[ 
https://issues.apache.org/jira/browse/DRILL-4699?page=com.atlassian.jira.plugin.system.issuetabpanels:comment-tabpanel&focusedCommentId=16652750#comment-16652750
 ] 

ASF GitHub Bot commented on DRILL-4699:
---------------------------------------

kkhatua closed pull request #536: DRILL-4699: Add description column to 
sys.options table
URL: https://github.com/apache/drill/pull/536
 
 
   

This is a PR merged from a forked repository.
As GitHub hides the original diff on merge, it is displayed below for
the sake of provenance:

As this is a foreign pull request (from a fork), the diff is supplied
below (as it won't show otherwise due to GitHub magic):

diff --git 
a/exec/java-exec/src/main/java/org/apache/drill/exec/ExecConstants.java 
b/exec/java-exec/src/main/java/org/apache/drill/exec/ExecConstants.java
index 0d7e0d09bec..9ac59304c1f 100644
--- a/exec/java-exec/src/main/java/org/apache/drill/exec/ExecConstants.java
+++ b/exec/java-exec/src/main/java/org/apache/drill/exec/ExecConstants.java
@@ -33,64 +33,65 @@
 import org.apache.drill.exec.testing.ExecutionControls;
 import org.apache.drill.exec.util.ImpersonationUtil;
 
-public interface ExecConstants {
-  String ZK_RETRY_TIMES = "drill.exec.zk.retry.count";
-  String ZK_RETRY_DELAY = "drill.exec.zk.retry.delay";
-  String ZK_CONNECTION = "drill.exec.zk.connect";
-  String ZK_TIMEOUT = "drill.exec.zk.timeout";
-  String ZK_ROOT = "drill.exec.zk.root";
-  String ZK_REFRESH = "drill.exec.zk.refresh";
-  String BIT_RETRY_TIMES = "drill.exec.rpc.bit.server.retry.count";
-  String BIT_RETRY_DELAY = "drill.exec.rpc.bit.server.retry.delay";
-  String BIT_TIMEOUT = "drill.exec.bit.timeout" ;
-  String SERVICE_NAME = "drill.exec.cluster-id";
-  String INITIAL_BIT_PORT = "drill.exec.rpc.bit.server.port";
-  String BIT_RPC_TIMEOUT = "drill.exec.rpc.bit.timeout";
-  String INITIAL_USER_PORT = "drill.exec.rpc.user.server.port";
-  String USER_RPC_TIMEOUT = "drill.exec.rpc.user.timeout";
-  String METRICS_CONTEXT_NAME = "drill.exec.metrics.context";
-  String USE_IP_ADDRESS = "drill.exec.rpc.use.ip";
-  String CLIENT_RPC_THREADS = "drill.exec.rpc.user.client.threads";
-  String BIT_SERVER_RPC_THREADS = "drill.exec.rpc.bit.server.threads";
-  String USER_SERVER_RPC_THREADS = "drill.exec.rpc.user.server.threads";
-  String TRACE_DUMP_DIRECTORY = "drill.exec.trace.directory";
-  String TRACE_DUMP_FILESYSTEM = "drill.exec.trace.filesystem";
-  String TEMP_DIRECTORIES = "drill.exec.tmp.directories";
-  String TEMP_FILESYSTEM = "drill.exec.tmp.filesystem";
-  String INCOMING_BUFFER_IMPL = "drill.exec.buffer.impl";
+public final class ExecConstants {
+
+  public static final String ZK_RETRY_TIMES = "drill.exec.zk.retry.count";
+  public static final String ZK_RETRY_DELAY = "drill.exec.zk.retry.delay";
+  public static final String ZK_CONNECTION = "drill.exec.zk.connect";
+  public static final String ZK_TIMEOUT = "drill.exec.zk.timeout";
+  public static final String ZK_ROOT = "drill.exec.zk.root";
+  public static final String ZK_REFRESH = "drill.exec.zk.refresh";
+  public static final String BIT_RETRY_TIMES = 
"drill.exec.rpc.bit.server.retry.count";
+  public static final String BIT_RETRY_DELAY = 
"drill.exec.rpc.bit.server.retry.delay";
+  public static final String BIT_TIMEOUT = "drill.exec.bit.timeout";
+  public static final String SERVICE_NAME = "drill.exec.cluster-id";
+  public static final String INITIAL_BIT_PORT = 
"drill.exec.rpc.bit.server.port";
+  public static final String BIT_RPC_TIMEOUT = "drill.exec.rpc.bit.timeout";
+  public static final String INITIAL_USER_PORT = 
"drill.exec.rpc.user.server.port";
+  public static final String USER_RPC_TIMEOUT = "drill.exec.rpc.user.timeout";
+  public static final String METRICS_CONTEXT_NAME = 
"drill.exec.metrics.context";
+  public static final String USE_IP_ADDRESS = "drill.exec.rpc.use.ip";
+  public static final String CLIENT_RPC_THREADS = 
"drill.exec.rpc.user.client.threads";
+  public static final String BIT_SERVER_RPC_THREADS = 
"drill.exec.rpc.bit.server.threads";
+  public static final String USER_SERVER_RPC_THREADS = 
"drill.exec.rpc.user.server.threads";
+  public static final String TRACE_DUMP_DIRECTORY = 
"drill.exec.trace.directory";
+  public static final String TRACE_DUMP_FILESYSTEM = 
"drill.exec.trace.filesystem";
+  public static final String TEMP_DIRECTORIES = "drill.exec.tmp.directories";
+  public static final String TEMP_FILESYSTEM = "drill.exec.tmp.filesystem";
+  public static final String INCOMING_BUFFER_IMPL = "drill.exec.buffer.impl";
   /** incoming buffer size (number of batches) */
-  String INCOMING_BUFFER_SIZE = "drill.exec.buffer.size";
-  String SPOOLING_BUFFER_DELETE = "drill.exec.buffer.spooling.delete";
-  String SPOOLING_BUFFER_MEMORY = "drill.exec.buffer.spooling.size";
-  String BATCH_PURGE_THRESHOLD = "drill.exec.sort.purge.threshold";
-  String EXTERNAL_SORT_TARGET_BATCH_SIZE = 
"drill.exec.sort.external.batch.size";
-  String EXTERNAL_SORT_TARGET_SPILL_BATCH_SIZE = 
"drill.exec.sort.external.spill.batch.size";
-  String EXTERNAL_SORT_SPILL_GROUP_SIZE = 
"drill.exec.sort.external.spill.group.size";
-  String EXTERNAL_SORT_SPILL_THRESHOLD = 
"drill.exec.sort.external.spill.threshold";
-  String EXTERNAL_SORT_SPILL_DIRS = 
"drill.exec.sort.external.spill.directories";
-  String EXTERNAL_SORT_SPILL_FILESYSTEM = "drill.exec.sort.external.spill.fs";
-  String EXTERNAL_SORT_MSORT_MAX_BATCHSIZE = 
"drill.exec.sort.external.msort.batch.maxsize";
-  String TEXT_LINE_READER_BATCH_SIZE = 
"drill.exec.storage.file.text.batch.size";
-  String TEXT_LINE_READER_BUFFER_SIZE = 
"drill.exec.storage.file.text.buffer.size";
-  String HAZELCAST_SUBNETS = "drill.exec.cache.hazel.subnets";
-  String HTTP_ENABLE = "drill.exec.http.enabled";
-  String HTTP_PORT = "drill.exec.http.port";
-  String HTTP_ENABLE_SSL = "drill.exec.http.ssl_enabled";
-  String HTTP_SESSION_MAX_IDLE_SECS = "drill.exec.http.session_max_idle_secs";
-  String HTTP_KEYSTORE_PATH = "javax.net.ssl.keyStore";
-  String HTTP_KEYSTORE_PASSWORD = "javax.net.ssl.keyStorePassword";
-  String HTTP_TRUSTSTORE_PATH = "javax.net.ssl.trustStore";
-  String HTTP_TRUSTSTORE_PASSWORD = "javax.net.ssl.trustStorePassword";
-  String SYS_STORE_PROVIDER_CLASS = "drill.exec.sys.store.provider.class";
-  String SYS_STORE_PROVIDER_LOCAL_PATH = 
"drill.exec.sys.store.provider.local.path";
-  String SYS_STORE_PROVIDER_LOCAL_ENABLE_WRITE = 
"drill.exec.sys.store.provider.local.write";
-  String IMPERSONATION_ENABLED = "drill.exec.impersonation.enabled";
-  String IMPERSONATION_MAX_CHAINED_USER_HOPS = 
"drill.exec.impersonation.max_chained_user_hops";
-  String USER_AUTHENTICATION_ENABLED = "drill.exec.security.user.auth.enabled";
-  String USER_AUTHENTICATOR_IMPL = "drill.exec.security.user.auth.impl";
-  String PAM_AUTHENTICATOR_PROFILES = 
"drill.exec.security.user.auth.pam_profiles";
+  public static final String INCOMING_BUFFER_SIZE = "drill.exec.buffer.size";
+  public static final String SPOOLING_BUFFER_DELETE = 
"drill.exec.buffer.spooling.delete";
+  public static final String SPOOLING_BUFFER_MEMORY = 
"drill.exec.buffer.spooling.size";
+  public static final String BATCH_PURGE_THRESHOLD = 
"drill.exec.sort.purge.threshold";
+  public static final String EXTERNAL_SORT_TARGET_BATCH_SIZE = 
"drill.exec.sort.external.batch.size";
+  public static final String EXTERNAL_SORT_TARGET_SPILL_BATCH_SIZE = 
"drill.exec.sort.external.spill.batch.size";
+  public static final String EXTERNAL_SORT_SPILL_GROUP_SIZE = 
"drill.exec.sort.external.spill.group.size";
+  public static final String EXTERNAL_SORT_SPILL_THRESHOLD = 
"drill.exec.sort.external.spill.threshold";
+  public static final String EXTERNAL_SORT_SPILL_DIRS = 
"drill.exec.sort.external.spill.directories";
+  public static final String EXTERNAL_SORT_SPILL_FILESYSTEM = 
"drill.exec.sort.external.spill.fs";
+  public static final String EXTERNAL_SORT_MSORT_MAX_BATCHSIZE = 
"drill.exec.sort.external.msort.batch.maxsize";
+  public static final String TEXT_LINE_READER_BATCH_SIZE = 
"drill.exec.storage.file.text.batch.size";
+  public static final String TEXT_LINE_READER_BUFFER_SIZE = 
"drill.exec.storage.file.text.buffer.size";
+  public static final String HAZELCAST_SUBNETS = 
"drill.exec.cache.hazel.subnets";
+  public static final String HTTP_ENABLE = "drill.exec.http.enabled";
+  public static final String HTTP_PORT = "drill.exec.http.port";
+  public static final String HTTP_ENABLE_SSL = "drill.exec.http.ssl_enabled";
+  public static final String HTTP_SESSION_MAX_IDLE_SECS = 
"drill.exec.http.session_max_idle_secs";
+  public static final String HTTP_KEYSTORE_PATH = "javax.net.ssl.keyStore";
+  public static final String HTTP_KEYSTORE_PASSWORD = 
"javax.net.ssl.keyStorePassword";
+  public static final String HTTP_TRUSTSTORE_PATH = "javax.net.ssl.trustStore";
+  public static final String HTTP_TRUSTSTORE_PASSWORD = 
"javax.net.ssl.trustStorePassword";
+  public static final String SYS_STORE_PROVIDER_CLASS = 
"drill.exec.sys.store.provider.class";
+  public static final String SYS_STORE_PROVIDER_LOCAL_PATH = 
"drill.exec.sys.store.provider.local.path";
+  public static final String SYS_STORE_PROVIDER_LOCAL_ENABLE_WRITE = 
"drill.exec.sys.store.provider.local.write";
+  public static final String IMPERSONATION_ENABLED = 
"drill.exec.impersonation.enabled";
+  public static final String IMPERSONATION_MAX_CHAINED_USER_HOPS = 
"drill.exec.impersonation.max_chained_user_hops";
+  public static final String USER_AUTHENTICATION_ENABLED = 
"drill.exec.security.user.auth.enabled";
+  public static final String USER_AUTHENTICATOR_IMPL = 
"drill.exec.security.user.auth.impl";
+  public static final String PAM_AUTHENTICATOR_PROFILES = 
"drill.exec.security.user.auth.pam_profiles";
   /** Size of JDBC batch queue (in batches) above which throttling begins. */
-  String JDBC_BATCH_QUEUE_THROTTLING_THRESHOLD =
+  public static final String JDBC_BATCH_QUEUE_THROTTLING_THRESHOLD =
       "drill.jdbc.batch_queue_throttling_threshold";
 
   /**
@@ -98,45 +99,56 @@
    * FINISHED we report the query result as CANCELLED by swallowing the 
failures occurred in fragments. This BOOT
    * setting allows the user to see the query status as failure. Useful for 
developers/testers.
    */
-  String RETURN_ERROR_FOR_FAILURE_IN_CANCELLED_FRAGMENTS =
+  public static final String RETURN_ERROR_FOR_FAILURE_IN_CANCELLED_FRAGMENTS =
       "drill.exec.debug.return_error_for_failure_in_cancelled_fragments";
 
-
-
-
-  String CLIENT_SUPPORT_COMPLEX_TYPES = "drill.client.supports-complex-types";
-
-  String OUTPUT_FORMAT_OPTION = "store.format";
-  OptionValidator OUTPUT_FORMAT_VALIDATOR = new 
StringValidator(OUTPUT_FORMAT_OPTION, "parquet");
-  String PARQUET_BLOCK_SIZE = "store.parquet.block-size";
-  OptionValidator PARQUET_BLOCK_SIZE_VALIDATOR = new 
LongValidator(PARQUET_BLOCK_SIZE, 512*1024*1024);
-  String PARQUET_PAGE_SIZE = "store.parquet.page-size";
-  OptionValidator PARQUET_PAGE_SIZE_VALIDATOR = new 
LongValidator(PARQUET_PAGE_SIZE, 1024*1024);
-  String PARQUET_DICT_PAGE_SIZE = "store.parquet.dictionary.page-size";
-  OptionValidator PARQUET_DICT_PAGE_SIZE_VALIDATOR = new 
LongValidator(PARQUET_DICT_PAGE_SIZE, 1024*1024);
-  String PARQUET_WRITER_COMPRESSION_TYPE = "store.parquet.compression";
-  OptionValidator PARQUET_WRITER_COMPRESSION_TYPE_VALIDATOR = new 
EnumeratedStringValidator(
-      PARQUET_WRITER_COMPRESSION_TYPE, "snappy", "snappy", "gzip", "none");
-  String PARQUET_WRITER_ENABLE_DICTIONARY_ENCODING = 
"store.parquet.enable_dictionary_encoding";
-  OptionValidator PARQUET_WRITER_ENABLE_DICTIONARY_ENCODING_VALIDATOR = new 
BooleanValidator(
+  public static final String CLIENT_SUPPORT_COMPLEX_TYPES = 
"drill.client.supports-complex-types";
+
+  public static final String OUTPUT_FORMAT_OPTION = "store.format";
+  public static final OptionValidator OUTPUT_FORMAT_VALIDATOR = new 
StringValidator(OUTPUT_FORMAT_OPTION, "parquet");
+  public static final String PARQUET_BLOCK_SIZE = "store.parquet.block-size";
+  public static final OptionValidator PARQUET_BLOCK_SIZE_VALIDATOR = new 
LongValidator(PARQUET_BLOCK_SIZE,
+      512 * 1024 * 1024);
+  public static final String PARQUET_PAGE_SIZE = "store.parquet.page-size";
+  public static final OptionValidator PARQUET_PAGE_SIZE_VALIDATOR = new 
LongValidator(PARQUET_PAGE_SIZE,
+      1024 * 1024);
+  public static final String PARQUET_DICT_PAGE_SIZE = 
"store.parquet.dictionary.page-size";
+  public static final OptionValidator PARQUET_DICT_PAGE_SIZE_VALIDATOR = new 
LongValidator(PARQUET_DICT_PAGE_SIZE,
+      1024 * 1024);
+  public static final String PARQUET_WRITER_COMPRESSION_TYPE = 
"store.parquet.compression";
+  public static final OptionValidator 
PARQUET_WRITER_COMPRESSION_TYPE_VALIDATOR = new EnumeratedStringValidator(
+      PARQUET_WRITER_COMPRESSION_TYPE, "snappy",
+      "Compression type for storing Parquet output. Allowed values: snappy 
(default), gzip, none.",
+      "snappy", "gzip", "none");
+  public static final String PARQUET_WRITER_ENABLE_DICTIONARY_ENCODING = 
"store.parquet.enable_dictionary_encoding";
+  public static final OptionValidator 
PARQUET_WRITER_ENABLE_DICTIONARY_ENCODING_VALIDATOR = new BooleanValidator(
       PARQUET_WRITER_ENABLE_DICTIONARY_ENCODING, false);
 
-  String PARQUET_VECTOR_FILL_THRESHOLD = "store.parquet.vector_fill_threshold";
-  OptionValidator PARQUET_VECTOR_FILL_THRESHOLD_VALIDATOR = new 
PositiveLongValidator(PARQUET_VECTOR_FILL_THRESHOLD, 99l, 85l);
-  String PARQUET_VECTOR_FILL_CHECK_THRESHOLD = 
"store.parquet.vector_fill_check_threshold";
-  OptionValidator PARQUET_VECTOR_FILL_CHECK_THRESHOLD_VALIDATOR = new 
PositiveLongValidator(PARQUET_VECTOR_FILL_CHECK_THRESHOLD, 100l, 10l);
-  String PARQUET_NEW_RECORD_READER = "store.parquet.use_new_reader";
-  OptionValidator PARQUET_RECORD_READER_IMPLEMENTATION_VALIDATOR = new 
BooleanValidator(PARQUET_NEW_RECORD_READER, false);
-
-  OptionValidator COMPILE_SCALAR_REPLACEMENT = new 
BooleanValidator("exec.compile.scalar_replacement", false);
-
-  String JSON_ALL_TEXT_MODE = "store.json.all_text_mode";
-  BooleanValidator JSON_READER_ALL_TEXT_MODE_VALIDATOR = new 
BooleanValidator(JSON_ALL_TEXT_MODE, false);
-  BooleanValidator JSON_EXTENDED_TYPES = new 
BooleanValidator("store.json.extended_types", false);
-  BooleanValidator JSON_WRITER_UGLIFY = new 
BooleanValidator("store.json.writer.uglify", false);
-  BooleanValidator JSON_WRITER_SKIPNULLFIELDS = new 
BooleanValidator("store.json.writer.skip_null_fields", true);
-
-  DoubleValidator TEXT_ESTIMATED_ROW_SIZE = new RangeDoubleValidator(
+  public static final String PARQUET_VECTOR_FILL_THRESHOLD = 
"store.parquet.vector_fill_threshold";
+  public static final OptionValidator PARQUET_VECTOR_FILL_THRESHOLD_VALIDATOR 
= new PositiveLongValidator(
+      PARQUET_VECTOR_FILL_THRESHOLD, 99l, 85l);
+  public static final String PARQUET_VECTOR_FILL_CHECK_THRESHOLD = 
"store.parquet.vector_fill_check_threshold";
+  public static final OptionValidator 
PARQUET_VECTOR_FILL_CHECK_THRESHOLD_VALIDATOR = new PositiveLongValidator(
+      PARQUET_VECTOR_FILL_CHECK_THRESHOLD, 100l, 10l);
+  public static final String PARQUET_NEW_RECORD_READER = 
"store.parquet.use_new_reader";
+  public static final OptionValidator 
PARQUET_RECORD_READER_IMPLEMENTATION_VALIDATOR = new BooleanValidator(
+      PARQUET_NEW_RECORD_READER, false,
+      "Enables the text reader that complies with the RFC 4180 standard for 
text/csv files.");
+
+  public static final OptionValidator COMPILE_SCALAR_REPLACEMENT = new 
BooleanValidator(
+      "exec.compile.scalar_replacement", false);
+
+  public static final String JSON_ALL_TEXT_MODE = "store.json.all_text_mode";
+  public static final BooleanValidator JSON_READER_ALL_TEXT_MODE_VALIDATOR = 
new BooleanValidator(JSON_ALL_TEXT_MODE,
+      false, "Drill reads all data from the JSON files as VARCHAR. Prevents 
schema change errors.");
+  public static final BooleanValidator JSON_EXTENDED_TYPES = new 
BooleanValidator("store.json.extended_types", false,
+      "Turns on special JSON structures that Drill serializes for storing more 
type information than the four basic" +
+          " JSON types.");
+  public static final BooleanValidator JSON_WRITER_UGLIFY = new 
BooleanValidator("store.json.writer.uglify", false);
+  public static final BooleanValidator JSON_WRITER_SKIPNULLFIELDS = new 
BooleanValidator(
+      "store.json.writer.skip_null_fields", true);
+
+  public static final DoubleValidator TEXT_ESTIMATED_ROW_SIZE = new 
RangeDoubleValidator(
       "store.text.estimated_row_size_bytes", 1, Long.MAX_VALUE, 100.0);
 
   /**
@@ -146,89 +158,121 @@
    *                |-    bar  -  a.parquet
    *                |-    baz  -  b.parquet
    */
-  String FILESYSTEM_PARTITION_COLUMN_LABEL = 
"drill.exec.storage.file.partition.column.label";
-  OptionValidator FILESYSTEM_PARTITION_COLUMN_LABEL_VALIDATOR = new 
StringValidator(FILESYSTEM_PARTITION_COLUMN_LABEL, "dir");
+  public static final String FILESYSTEM_PARTITION_COLUMN_LABEL = 
"drill.exec.storage.file.partition.column.label";
+  public static final OptionValidator 
FILESYSTEM_PARTITION_COLUMN_LABEL_VALIDATOR = new StringValidator(
+      FILESYSTEM_PARTITION_COLUMN_LABEL, "dir",
+      "The column label for directory levels in results of queries of files in 
a directory. Accepts a string input.");
 
   /**
    * Implicit file columns
    */
-  String IMPLICIT_FILENAME_COLUMN_LABEL = 
"drill.exec.storage.implicit.filename.column.label";
-  OptionValidator IMPLICIT_FILENAME_COLUMN_LABEL_VALIDATOR = new 
StringValidator(IMPLICIT_FILENAME_COLUMN_LABEL, "filename");
-  String IMPLICIT_SUFFIX_COLUMN_LABEL = 
"drill.exec.storage.implicit.suffix.column.label";
-  OptionValidator IMPLICIT_SUFFIX_COLUMN_LABEL_VALIDATOR = new 
StringValidator(IMPLICIT_SUFFIX_COLUMN_LABEL, "suffix");
-  String IMPLICIT_FQN_COLUMN_LABEL = 
"drill.exec.storage.implicit.fqn.column.label";
-  OptionValidator IMPLICIT_FQN_COLUMN_LABEL_VALIDATOR = new 
StringValidator(IMPLICIT_FQN_COLUMN_LABEL, "fqn");
-  String IMPLICIT_FILEPATH_COLUMN_LABEL = 
"drill.exec.storage.implicit.filepath.column.label";
-  OptionValidator IMPLICIT_FILEPATH_COLUMN_LABEL_VALIDATOR = new 
StringValidator(IMPLICIT_FILEPATH_COLUMN_LABEL, "filepath");
-
-  String JSON_READ_NUMBERS_AS_DOUBLE = "store.json.read_numbers_as_double";
-  BooleanValidator JSON_READ_NUMBERS_AS_DOUBLE_VALIDATOR = new 
BooleanValidator(JSON_READ_NUMBERS_AS_DOUBLE, false);
-
-  String MONGO_ALL_TEXT_MODE = "store.mongo.all_text_mode";
-  OptionValidator MONGO_READER_ALL_TEXT_MODE_VALIDATOR = new 
BooleanValidator(MONGO_ALL_TEXT_MODE, false);
-  String MONGO_READER_READ_NUMBERS_AS_DOUBLE = 
"store.mongo.read_numbers_as_double";
-  OptionValidator MONGO_READER_READ_NUMBERS_AS_DOUBLE_VALIDATOR = new 
BooleanValidator(MONGO_READER_READ_NUMBERS_AS_DOUBLE, false);
-  String MONGO_BSON_RECORD_READER = "store.mongo.bson.record.reader";
-  OptionValidator MONGO_BSON_RECORD_READER_VALIDATOR = new 
BooleanValidator(MONGO_BSON_RECORD_READER, true);
-
-  BooleanValidator ENABLE_UNION_TYPE = new 
BooleanValidator("exec.enable_union_type", false);
+  public static final String IMPLICIT_FILENAME_COLUMN_LABEL = 
"drill.exec.storage.implicit.filename.column.label";
+  public static final OptionValidator IMPLICIT_FILENAME_COLUMN_LABEL_VALIDATOR 
= new StringValidator(
+      IMPLICIT_FILENAME_COLUMN_LABEL, "filename");
+  public static final String IMPLICIT_SUFFIX_COLUMN_LABEL = 
"drill.exec.storage.implicit.suffix.column.label";
+  public static final OptionValidator IMPLICIT_SUFFIX_COLUMN_LABEL_VALIDATOR = 
new StringValidator(
+      IMPLICIT_SUFFIX_COLUMN_LABEL, "suffix");
+  public static final String IMPLICIT_FQN_COLUMN_LABEL = 
"drill.exec.storage.implicit.fqn.column.label";
+  public static final OptionValidator IMPLICIT_FQN_COLUMN_LABEL_VALIDATOR = 
new StringValidator(
+      IMPLICIT_FQN_COLUMN_LABEL, "fqn");
+  public static final String IMPLICIT_FILEPATH_COLUMN_LABEL = 
"drill.exec.storage.implicit.filepath.column.label";
+  public static final OptionValidator IMPLICIT_FILEPATH_COLUMN_LABEL_VALIDATOR 
= new StringValidator(
+      IMPLICIT_FILEPATH_COLUMN_LABEL, "filepath");
+
+  public static final String JSON_READ_NUMBERS_AS_DOUBLE = 
"store.json.read_numbers_as_double";
+  public static final BooleanValidator JSON_READ_NUMBERS_AS_DOUBLE_VALIDATOR = 
new BooleanValidator(
+      JSON_READ_NUMBERS_AS_DOUBLE, false,
+      "Reads numbers with or without a decimal point as DOUBLE. Prevents 
schema change errors.");
+
+  public static final String MONGO_ALL_TEXT_MODE = "store.mongo.all_text_mode";
+  public static final OptionValidator MONGO_READER_ALL_TEXT_MODE_VALIDATOR = 
new BooleanValidator(MONGO_ALL_TEXT_MODE,
+      false);
+  public static final String MONGO_READER_READ_NUMBERS_AS_DOUBLE = 
"store.mongo.read_numbers_as_double";
+  public static final OptionValidator 
MONGO_READER_READ_NUMBERS_AS_DOUBLE_VALIDATOR = new BooleanValidator(
+      MONGO_READER_READ_NUMBERS_AS_DOUBLE, false);
+  public static final String MONGO_BSON_RECORD_READER = 
"store.mongo.bson.record.reader";
+  public static final OptionValidator MONGO_BSON_RECORD_READER_VALIDATOR = new 
BooleanValidator(
+      MONGO_BSON_RECORD_READER, true);
+
+  public static final BooleanValidator ENABLE_UNION_TYPE = new 
BooleanValidator("exec.enable_union_type", false);
 
   // TODO: We need to add a feature that enables storage plugins to add their 
own options. Currently we have to declare
   // in core which is not right. Move this option and above two mongo plugin 
related options once we have the feature.
-  String HIVE_OPTIMIZE_SCAN_WITH_NATIVE_READERS = 
"store.hive.optimize_scan_with_native_readers";
-  OptionValidator HIVE_OPTIMIZE_SCAN_WITH_NATIVE_READERS_VALIDATOR =
+  public static final String HIVE_OPTIMIZE_SCAN_WITH_NATIVE_READERS = 
"store.hive.optimize_scan_with_native_readers";
+  public static final OptionValidator 
HIVE_OPTIMIZE_SCAN_WITH_NATIVE_READERS_VALIDATOR =
       new BooleanValidator(HIVE_OPTIMIZE_SCAN_WITH_NATIVE_READERS, false);
 
-  String SLICE_TARGET = "planner.slice_target";
-  long SLICE_TARGET_DEFAULT = 100000l;
-  PositiveLongValidator SLICE_TARGET_OPTION = new 
PositiveLongValidator(SLICE_TARGET, Long.MAX_VALUE,
-      SLICE_TARGET_DEFAULT);
+  public static final String SLICE_TARGET = "planner.slice_target";
+  public static final String SLICE_TARGET_DESCRIPTION =
+      "The number of records manipulated within a fragment before Drill 
parallelizes operations.";
+  public static final long SLICE_TARGET_DEFAULT = 100000L;
+  public static final PositiveLongValidator SLICE_TARGET_OPTION = new 
PositiveLongValidator(SLICE_TARGET,
+      Long.MAX_VALUE, SLICE_TARGET_DEFAULT, SLICE_TARGET_DESCRIPTION);
 
-  String CAST_TO_NULLABLE_NUMERIC = 
"drill.exec.functions.cast_empty_string_to_null";
-  OptionValidator CAST_TO_NULLABLE_NUMERIC_OPTION = new 
BooleanValidator(CAST_TO_NULLABLE_NUMERIC, false);
+  public static final String CAST_TO_NULLABLE_NUMERIC = 
"drill.exec.functions.cast_empty_string_to_null";
+  public static final OptionValidator CAST_TO_NULLABLE_NUMERIC_OPTION = new 
BooleanValidator(CAST_TO_NULLABLE_NUMERIC,
+      false, "In a text file, treat empty fields as NULL values instead of 
empty string.");
 
   /**
    * HashTable runtime settings
    */
-  String MIN_HASH_TABLE_SIZE_KEY = "exec.min_hash_table_size";
-  PositiveLongValidator MIN_HASH_TABLE_SIZE = new 
PositiveLongValidator(MIN_HASH_TABLE_SIZE_KEY, HashTable.MAXIMUM_CAPACITY, 
HashTable.DEFAULT_INITIAL_CAPACITY);
-  String MAX_HASH_TABLE_SIZE_KEY = "exec.max_hash_table_size";
-  PositiveLongValidator MAX_HASH_TABLE_SIZE = new 
PositiveLongValidator(MAX_HASH_TABLE_SIZE_KEY, HashTable.MAXIMUM_CAPACITY, 
HashTable.MAXIMUM_CAPACITY);
+  public static final String MIN_HASH_TABLE_SIZE_KEY = 
"exec.min_hash_table_size";
+  public static final PositiveLongValidator MIN_HASH_TABLE_SIZE = new 
PositiveLongValidator(MIN_HASH_TABLE_SIZE_KEY,
+      HashTable.MAXIMUM_CAPACITY, HashTable.DEFAULT_INITIAL_CAPACITY,
+      "Starting size in bucketsfor hash tables. Increase according to 
available memory to improve performance." +
+          " Increasing for very large aggregations or joins when you have 
large amounts of memory for Drill to" +
+          " use. Range: 0 - " + HashTable.MAXIMUM_CAPACITY);
+  public static final String MAX_HASH_TABLE_SIZE_KEY = 
"exec.max_hash_table_size";
+  public static final PositiveLongValidator MAX_HASH_TABLE_SIZE = new 
PositiveLongValidator(MAX_HASH_TABLE_SIZE_KEY,
+      HashTable.MAXIMUM_CAPACITY, HashTable.MAXIMUM_CAPACITY,
+      "Ending size in buckets for hash tables. Range: 0 - " + 
HashTable.MAXIMUM_CAPACITY);
 
   /**
    * Limits the maximum level of parallelization to this factor time the 
number of Drillbits
    */
-  String MAX_WIDTH_PER_NODE_KEY = "planner.width.max_per_node";
-  OptionValidator MAX_WIDTH_PER_NODE = new 
PositiveLongValidator(MAX_WIDTH_PER_NODE_KEY, Integer.MAX_VALUE, (long) 
Math.ceil(Runtime.getRuntime().availableProcessors() * 0.70));
+  public static final String MAX_WIDTH_PER_NODE_KEY = 
"planner.width.max_per_node";
+  public static final OptionValidator MAX_WIDTH_PER_NODE = new 
PositiveLongValidator(MAX_WIDTH_PER_NODE_KEY,
+      Integer.MAX_VALUE, (long) 
Math.ceil(Runtime.getRuntime().availableProcessors() * 0.70),
+      "Maximum number of threads that can run in parallel for a query on a 
node. A slice is an individual thread. " +
+          "This number indicates the maximum number of slices per query for 
the query’s major fragment on a node");
 
   /**
    * The maximum level or parallelization any stage of the query can do. Note 
that while this
    * might be the number of active Drillbits, realistically, this could be 
well beyond that
    * number of we want to do things like speed results return.
    */
-  String MAX_WIDTH_GLOBAL_KEY = "planner.width.max_per_query";
-  OptionValidator MAX_WIDTH_GLOBAL = new 
PositiveLongValidator(MAX_WIDTH_GLOBAL_KEY, Integer.MAX_VALUE, 1000);
+  public static final String MAX_WIDTH_GLOBAL_KEY = 
"planner.width.max_per_query";
+  public static final OptionValidator MAX_WIDTH_GLOBAL = new 
PositiveLongValidator(MAX_WIDTH_GLOBAL_KEY,
+      Integer.MAX_VALUE, 1000,
+      "Same as max per node but applies to the query as executed by the entire 
cluster. For example, this value might" +
+          " be the number of active Drillbits, or a higher number to return 
results faster.");
 
   /**
    * Factor by which a node with endpoint affinity will be favored while 
creating assignment
    */
-  String AFFINITY_FACTOR_KEY = "planner.affinity_factor";
-  OptionValidator AFFINITY_FACTOR = new DoubleValidator(AFFINITY_FACTOR_KEY, 
1.2d);
+  public static final String AFFINITY_FACTOR_KEY = "planner.affinity_factor";
+  public static final OptionValidator AFFINITY_FACTOR = new 
DoubleValidator(AFFINITY_FACTOR_KEY, 1.2d,
+      "Factor by which a node with endpoint affinity is favored while creating 
assignment." +
+          " Accepts inputs of type DOUBLE.");
 
-  String EARLY_LIMIT0_OPT_KEY = "planner.enable_limit0_optimization";
-  BooleanValidator EARLY_LIMIT0_OPT = new 
BooleanValidator(EARLY_LIMIT0_OPT_KEY, false);
+  public static final String EARLY_LIMIT0_OPT_KEY = 
"planner.enable_limit0_optimization";
+  public static final BooleanValidator EARLY_LIMIT0_OPT = new 
BooleanValidator(EARLY_LIMIT0_OPT_KEY, false);
 
-  String ENABLE_MEMORY_ESTIMATION_KEY = 
"planner.memory.enable_memory_estimation";
-  OptionValidator ENABLE_MEMORY_ESTIMATION = new 
BooleanValidator(ENABLE_MEMORY_ESTIMATION_KEY, false);
+  public static final String ENABLE_MEMORY_ESTIMATION_KEY = 
"planner.memory.enable_memory_estimation";
+  public static final OptionValidator ENABLE_MEMORY_ESTIMATION = new 
BooleanValidator(ENABLE_MEMORY_ESTIMATION_KEY,
+      false);
 
   /**
    * Maximum query memory per node (in MB). Re-plan with cheaper operators if 
memory estimation exceeds this limit.
    * <p/>
    * DEFAULT: 2048 MB
    */
-  String MAX_QUERY_MEMORY_PER_NODE_KEY = 
"planner.memory.max_query_memory_per_node";
-  LongValidator MAX_QUERY_MEMORY_PER_NODE = new RangeLongValidator(
-      MAX_QUERY_MEMORY_PER_NODE_KEY, 1024 * 1024, Long.MAX_VALUE, 2 * 1024 * 
1024 * 1024L);
+  public static final String MAX_QUERY_MEMORY_PER_NODE_KEY = 
"planner.memory.max_query_memory_per_node";
+  public static final LongValidator MAX_QUERY_MEMORY_PER_NODE = new 
RangeLongValidator(
+      MAX_QUERY_MEMORY_PER_NODE_KEY, 1024 * 1024, Long.MAX_VALUE, 2 * 1024 * 
1024 * 1024L,
+      "Sets the maximum amount of direct memory allocated to the sort operator 
in each query on a node. If a query" +
+          " plan contains multiple sort operators, they all share this memory. 
If you encounter memory issues when" +
+          " running queries with sort operators, increase the value of this 
option.");
 
   /**
    * Extra query memory per node for non-blocking operators.
@@ -237,71 +281,81 @@
    * DEFAULT: 64 MB
    * MAXIMUM: 2048 MB
    */
-  String NON_BLOCKING_OPERATORS_MEMORY_KEY = 
"planner.memory.non_blocking_operators_memory";
-  OptionValidator NON_BLOCKING_OPERATORS_MEMORY = new PowerOfTwoLongValidator(
-    NON_BLOCKING_OPERATORS_MEMORY_KEY, 1 << 11, 1 << 6);
+  public static final String NON_BLOCKING_OPERATORS_MEMORY_KEY = 
"planner.memory.non_blocking_operators_memory";
+  public static final OptionValidator NON_BLOCKING_OPERATORS_MEMORY = new 
PowerOfTwoLongValidator(
+      NON_BLOCKING_OPERATORS_MEMORY_KEY, 1 << 11, 1 << 6);
 
-  String HASH_JOIN_TABLE_FACTOR_KEY = "planner.memory.hash_join_table_factor";
-  OptionValidator HASH_JOIN_TABLE_FACTOR = new 
DoubleValidator(HASH_JOIN_TABLE_FACTOR_KEY, 1.1d);
+  public static final String HASH_JOIN_TABLE_FACTOR_KEY = 
"planner.memory.hash_join_table_factor";
+  public static final OptionValidator HASH_JOIN_TABLE_FACTOR = new 
DoubleValidator(HASH_JOIN_TABLE_FACTOR_KEY, 1.1d);
 
-  String HASH_AGG_TABLE_FACTOR_KEY = "planner.memory.hash_agg_table_factor";
-  OptionValidator HASH_AGG_TABLE_FACTOR = new 
DoubleValidator(HASH_AGG_TABLE_FACTOR_KEY, 1.1d);
+  public static final String HASH_AGG_TABLE_FACTOR_KEY = 
"planner.memory.hash_agg_table_factor";
+  public static final OptionValidator HASH_AGG_TABLE_FACTOR = new 
DoubleValidator(HASH_AGG_TABLE_FACTOR_KEY, 1.1d);
 
-  String AVERAGE_FIELD_WIDTH_KEY = "planner.memory.average_field_width";
-  OptionValidator AVERAGE_FIELD_WIDTH = new 
PositiveLongValidator(AVERAGE_FIELD_WIDTH_KEY, Long.MAX_VALUE, 8);
+  public static final String AVERAGE_FIELD_WIDTH_KEY = 
"planner.memory.average_field_width";
+  public static final OptionValidator AVERAGE_FIELD_WIDTH = new 
PositiveLongValidator(AVERAGE_FIELD_WIDTH_KEY,
+      Long.MAX_VALUE, 8);
 
-  BooleanValidator ENABLE_QUEUE = new BooleanValidator("exec.queue.enable", 
false);
-  LongValidator LARGE_QUEUE_SIZE = new 
PositiveLongValidator("exec.queue.large", 1000, 10);
-  LongValidator SMALL_QUEUE_SIZE = new 
PositiveLongValidator("exec.queue.small", 100000, 100);
-  LongValidator QUEUE_THRESHOLD_SIZE = new 
PositiveLongValidator("exec.queue.threshold",
-      Long.MAX_VALUE, 30000000);
-  LongValidator QUEUE_TIMEOUT = new 
PositiveLongValidator("exec.queue.timeout_millis",
-      Long.MAX_VALUE, 60 * 1000 * 5);
+  public static final BooleanValidator ENABLE_QUEUE = new 
BooleanValidator("exec.queue.enable", false);
+  public static final LongValidator LARGE_QUEUE_SIZE = new 
PositiveLongValidator("exec.queue.large", 1000, 10,
+      "Sets the number of large queries that can run concurrently in the 
cluster. Range: 0 - 1000.");
+  public static final LongValidator SMALL_QUEUE_SIZE = new 
PositiveLongValidator("exec.queue.small", 100000, 100,
+      "Sets the number of small queries that can run concurrently in the 
cluster. Range: 0 - 100000.");
+  public static final LongValidator QUEUE_THRESHOLD_SIZE = new 
PositiveLongValidator("exec.queue.threshold",
+      Long.MAX_VALUE, 30000000, "Sets the cost threshold, which depends on the 
complexity of the queries in" +
+      " queue, for determining whether query is large or small. Complex 
queries have higher thresholds." +
+      " Range: 0 - 9223372036854775807.");
+  public static final LongValidator QUEUE_TIMEOUT = new 
PositiveLongValidator("exec.queue.timeout_millis",
+      Long.MAX_VALUE, 60 * 1000 * 5, "Indicates how long a query can wait in 
queue before the query fails." +
+      " Range: 0 - 9223372036854775807.");
 
-  String ENABLE_VERBOSE_ERRORS_KEY = "exec.errors.verbose";
-  OptionValidator ENABLE_VERBOSE_ERRORS = new 
BooleanValidator(ENABLE_VERBOSE_ERRORS_KEY, false);
+  public static final String ENABLE_VERBOSE_ERRORS_KEY = "exec.errors.verbose";
+  public static final OptionValidator ENABLE_VERBOSE_ERRORS = new 
BooleanValidator(ENABLE_VERBOSE_ERRORS_KEY, false,
+      "Toggles verbose output of error messages.");
 
-  String ENABLE_NEW_TEXT_READER_KEY = "exec.storage.enable_new_text_reader";
-  OptionValidator ENABLE_NEW_TEXT_READER = new 
BooleanValidator(ENABLE_NEW_TEXT_READER_KEY, true);
+  public static final String ENABLE_NEW_TEXT_READER_KEY = 
"exec.storage.enable_new_text_reader";
+  public static final OptionValidator ENABLE_NEW_TEXT_READER = new 
BooleanValidator(ENABLE_NEW_TEXT_READER_KEY, true);
 
-  String BOOTSTRAP_STORAGE_PLUGINS_FILE = "bootstrap-storage-plugins.json";
-  String MAX_LOADING_CACHE_SIZE_CONFIG = "drill.exec.compile.cache_max_size";
+  public static final String BOOTSTRAP_STORAGE_PLUGINS_FILE = 
"bootstrap-storage-plugins.json";
+  public static final String MAX_LOADING_CACHE_SIZE_CONFIG = 
"drill.exec.compile.cache_max_size";
 
-  String DRILL_SYS_FILE_SUFFIX = ".sys.drill";
+  public static final String DRILL_SYS_FILE_SUFFIX = ".sys.drill";
 
-  String ENABLE_WINDOW_FUNCTIONS = "window.enable";
-  OptionValidator ENABLE_WINDOW_FUNCTIONS_VALIDATOR = new 
BooleanValidator(ENABLE_WINDOW_FUNCTIONS, true);
+  public static final String ENABLE_WINDOW_FUNCTIONS = "window.enable";
+  public static final OptionValidator ENABLE_WINDOW_FUNCTIONS_VALIDATOR = new 
BooleanValidator(ENABLE_WINDOW_FUNCTIONS,
+      true, "Enable or disable window functions in Drill 1.1 and later.");
 
-  String DRILLBIT_CONTROL_INJECTIONS = "drill.exec.testing.controls";
-  OptionValidator DRILLBIT_CONTROLS_VALIDATOR =
-    new ExecutionControls.ControlsOptionValidator(DRILLBIT_CONTROL_INJECTIONS, 
ExecutionControls.DEFAULT_CONTROLS, 1);
+  public static final String DRILLBIT_CONTROL_INJECTIONS = 
"drill.exec.testing.controls";
+  public static final OptionValidator DRILLBIT_CONTROLS_VALIDATOR =
+      new 
ExecutionControls.ControlsOptionValidator(DRILLBIT_CONTROL_INJECTIONS, 
ExecutionControls.DEFAULT_CONTROLS, 1);
 
-  String NEW_VIEW_DEFAULT_PERMS_KEY = "new_view_default_permissions";
-  OptionValidator NEW_VIEW_DEFAULT_PERMS_VALIDATOR =
-      new StringValidator(NEW_VIEW_DEFAULT_PERMS_KEY, "700");
+  public static final String NEW_VIEW_DEFAULT_PERMS_KEY = 
"new_view_default_permissions";
+  public static final OptionValidator NEW_VIEW_DEFAULT_PERMS_VALIDATOR = new 
StringValidator(
+      NEW_VIEW_DEFAULT_PERMS_KEY, "700", "Sets view permissions using an octal 
code in the Unix tradition.");
 
-  String CTAS_PARTITIONING_HASH_DISTRIBUTE = "store.partition.hash_distribute";
-  BooleanValidator CTAS_PARTITIONING_HASH_DISTRIBUTE_VALIDATOR = new 
BooleanValidator(CTAS_PARTITIONING_HASH_DISTRIBUTE, false);
+  public static final String CTAS_PARTITIONING_HASH_DISTRIBUTE = 
"store.partition.hash_distribute";
+  public static final BooleanValidator 
CTAS_PARTITIONING_HASH_DISTRIBUTE_VALIDATOR = new BooleanValidator(
+      CTAS_PARTITIONING_HASH_DISTRIBUTE, false);
 
-  String ENABLE_BULK_LOAD_TABLE_LIST_KEY = "exec.enable_bulk_load_table_list";
-  BooleanValidator ENABLE_BULK_LOAD_TABLE_LIST = new 
BooleanValidator(ENABLE_BULK_LOAD_TABLE_LIST_KEY, false);
+  public static final String ENABLE_BULK_LOAD_TABLE_LIST_KEY = 
"exec.enable_bulk_load_table_list";
+  public static final BooleanValidator ENABLE_BULK_LOAD_TABLE_LIST = new 
BooleanValidator(
+      ENABLE_BULK_LOAD_TABLE_LIST_KEY, false);
 
   /**
    * Option whose value is a comma separated list of admin usernames. Admin 
users are users who have special privileges
    * such as changing system options.
    */
-  String ADMIN_USERS_KEY = "security.admin.users";
-  StringValidator ADMIN_USERS_VALIDATOR =
+  public static final String ADMIN_USERS_KEY = "security.admin.users";
+  public static final StringValidator ADMIN_USERS_VALIDATOR =
       new AdminOptionValidator(ADMIN_USERS_KEY, 
ImpersonationUtil.getProcessUserName());
 
   /**
    * Option whose value is a comma separated list of admin usergroups.
    */
-  String ADMIN_USER_GROUPS_KEY = "security.admin.user_groups";
-  StringValidator ADMIN_USER_GROUPS_VALIDATOR = new 
AdminOptionValidator(ADMIN_USER_GROUPS_KEY, "");
+  public static final String ADMIN_USER_GROUPS_KEY = 
"security.admin.user_groups";
+  public static final StringValidator ADMIN_USER_GROUPS_VALIDATOR = new 
AdminOptionValidator(ADMIN_USER_GROUPS_KEY, "");
 
   /**
-   * Option whose value is a string representing list of inbound impersonation 
policies.
+   * Option whose value is a public static final String representing list of 
inbound impersonation policies.
    *
    * Impersonation policy format:
    * [
@@ -312,17 +366,22 @@
    *   ...
    * ]
    */
-  String IMPERSONATION_POLICIES_KEY = "exec.impersonation.inbound_policies";
-  StringValidator IMPERSONATION_POLICY_VALIDATOR =
+  public static final String IMPERSONATION_POLICIES_KEY = 
"exec.impersonation.inbound_policies";
+  public static final StringValidator IMPERSONATION_POLICY_VALIDATOR =
       new 
InboundImpersonationManager.InboundImpersonationPolicyValidator(IMPERSONATION_POLICIES_KEY,
 "[]");
 
   /**
    * Web settings
    */
-  String WEB_LOGS_MAX_LINES = "web.logs.max_lines";
-  OptionValidator WEB_LOGS_MAX_LINES_VALIDATOR = new 
PositiveLongValidator(WEB_LOGS_MAX_LINES, Integer.MAX_VALUE, 10000);
+  public static final String WEB_LOGS_MAX_LINES = "web.logs.max_lines";
+  public static final OptionValidator WEB_LOGS_MAX_LINES_VALIDATOR = new 
PositiveLongValidator(
+      WEB_LOGS_MAX_LINES, Integer.MAX_VALUE, 10000);
 
-  String CODE_GEN_EXP_IN_METHOD_SIZE = "exec.java.compiler.exp_in_method_size";
-  LongValidator CODE_GEN_EXP_IN_METHOD_SIZE_VALIDATOR = new 
LongValidator(CODE_GEN_EXP_IN_METHOD_SIZE, 50);
+  public static final String CODE_GEN_EXP_IN_METHOD_SIZE = 
"exec.java.compiler.exp_in_method_size";
+  public static final LongValidator CODE_GEN_EXP_IN_METHOD_SIZE_VALIDATOR = 
new LongValidator(
+      CODE_GEN_EXP_IN_METHOD_SIZE, 50);
 
+  // prevent instantiation
+  private ExecConstants() {
+  }
 }
diff --git 
a/exec/java-exec/src/main/java/org/apache/drill/exec/compile/ClassTransformer.java
 
b/exec/java-exec/src/main/java/org/apache/drill/exec/compile/ClassTransformer.java
index 02323a98592..ba168851b07 100644
--- 
a/exec/java-exec/src/main/java/org/apache/drill/exec/compile/ClassTransformer.java
+++ 
b/exec/java-exec/src/main/java/org/apache/drill/exec/compile/ClassTransformer.java
@@ -50,7 +50,7 @@
   public final static String SCALAR_REPLACEMENT_OPTION =
       "org.apache.drill.exec.compile.ClassTransformer.scalar_replacement";
   public final static EnumeratedStringValidator SCALAR_REPLACEMENT_VALIDATOR = 
new EnumeratedStringValidator(
-      SCALAR_REPLACEMENT_OPTION, "try", "off", "try", "on");
+      SCALAR_REPLACEMENT_OPTION, "try", null, "off", "try", "on");
 
   @VisibleForTesting // although we need it even if it weren't used in testing
   public enum ScalarReplacementOption {
diff --git 
a/exec/java-exec/src/main/java/org/apache/drill/exec/compile/QueryClassLoader.java
 
b/exec/java-exec/src/main/java/org/apache/drill/exec/compile/QueryClassLoader.java
index 3df8f84806a..5a437f8aa49 100644
--- 
a/exec/java-exec/src/main/java/org/apache/drill/exec/compile/QueryClassLoader.java
+++ 
b/exec/java-exec/src/main/java/org/apache/drill/exec/compile/QueryClassLoader.java
@@ -42,7 +42,17 @@
   private static final org.slf4j.Logger logger = 
org.slf4j.LoggerFactory.getLogger(QueryClassLoader.class);
 
   public static final String JAVA_COMPILER_OPTION = "exec.java_compiler";
-  public static final StringValidator JAVA_COMPILER_VALIDATOR = new 
StringValidator(JAVA_COMPILER_OPTION, CompilerPolicy.DEFAULT.toString()) {
+  public static final String JAVA_COMPILER_JANINO_MAXSIZE_OPTION = 
"exec.java_compiler_janino_maxsize";
+
+  public static final OptionValidator JAVA_COMPILER_JANINO_MAXSIZE =
+      new LongValidator(JAVA_COMPILER_JANINO_MAXSIZE_OPTION, 256 * 1024,
+          "See the " + JAVA_COMPILER_OPTION + ". Accepts inputs of type 
LONG.");
+
+  public static final StringValidator JAVA_COMPILER_VALIDATOR = new 
StringValidator(JAVA_COMPILER_OPTION,
+      CompilerPolicy.DEFAULT.toString(),
+      "Switches between DEFAULT, JDK, and JANINO mode for the current session. 
Uses Janino by default for generated" +
+          " source code of less than " + JAVA_COMPILER_JANINO_MAXSIZE_OPTION + 
"; otherwise, switches" +
+          " to the JDK compiler.") {
     @Override
     public void validate(OptionValue v) {
       super.validate(v);
@@ -58,10 +68,8 @@ public void validate(OptionValue v) {
   };
 
   public static final String JAVA_COMPILER_DEBUG_OPTION = 
"exec.java_compiler_debug";
-  public static final OptionValidator JAVA_COMPILER_DEBUG = new 
BooleanValidator(JAVA_COMPILER_DEBUG_OPTION, true);
-
-  public static final String JAVA_COMPILER_JANINO_MAXSIZE_OPTION = 
"exec.java_compiler_janino_maxsize";
-  public static final OptionValidator JAVA_COMPILER_JANINO_MAXSIZE = new 
LongValidator(JAVA_COMPILER_JANINO_MAXSIZE_OPTION, 256*1024);
+  public static final OptionValidator JAVA_COMPILER_DEBUG = new 
BooleanValidator(JAVA_COMPILER_DEBUG_OPTION, true,
+      "Toggles the output of debug-level compiler error messages in runtime 
generated code.");
 
   public static final String JAVA_COMPILER_CONFIG = 
"drill.exec.compile.compiler";
   public static final String JAVA_COMPILER_DEBUG_CONFIG = 
"drill.exec.compile.debug";
diff --git 
a/exec/java-exec/src/main/java/org/apache/drill/exec/planner/physical/PlannerSettings.java
 
b/exec/java-exec/src/main/java/org/apache/drill/exec/planner/physical/PlannerSettings.java
index ff36d47bc1d..8501bea3538 100644
--- 
a/exec/java-exec/src/main/java/org/apache/drill/exec/planner/physical/PlannerSettings.java
+++ 
b/exec/java-exec/src/main/java/org/apache/drill/exec/planner/physical/PlannerSettings.java
@@ -46,40 +46,83 @@
   // max off heap memory for planning (16G)
   private static final long MAX_OFF_HEAP_ALLOCATION_IN_BYTES = 16l * 1024 * 
1024 * 1024;
 
-  public static final OptionValidator CONSTANT_FOLDING = new 
BooleanValidator("planner.enable_constant_folding", true);
+  public static final OptionValidator CONSTANT_FOLDING = new 
BooleanValidator("planner.enable_constant_folding", true,
+      "If one side of a filter condition is a constant expression, constant 
folding evaluates the expression in the" +
+          " planning phase and replaces the expression with the constant 
value. For example, Drill can rewrite" +
+          " this clause ' WHERE age + 5 < 42 as WHERE age < 37'.");
+
   public static final OptionValidator EXCHANGE = new 
BooleanValidator("planner.disable_exchanges", false);
+
   public static final OptionValidator HASHAGG = new 
BooleanValidator("planner.enable_hashagg", true);
+
   public static final OptionValidator STREAMAGG = new 
BooleanValidator("planner.enable_streamagg", true);
-  public static final OptionValidator HASHJOIN = new 
BooleanValidator("planner.enable_hashjoin", true);
-  public static final OptionValidator MERGEJOIN = new 
BooleanValidator("planner.enable_mergejoin", true);
+
+  public static final OptionValidator HASHJOIN = new 
BooleanValidator("planner.enable_hashjoin", true,
+      "Enable the memory hungry hash join. Drill assumes that a query with 
have adequate memory to complete and" +
+          " tries to use the fastest operations possible to complete the 
planned inner, left, right, or full outer" +
+          " joins using a hash table. Does not write to disk. Disabling hash 
join allows Drill to manage arbitrarily" +
+          " large data in a small memory footprint.");
+
+  public static final OptionValidator MERGEJOIN = new 
BooleanValidator("planner.enable_mergejoin", true,
+      "Sort-based operation. A merge join is used for inner join, left and 
right outer joins. Inputs to the merge" +
+          " join must be sorted. It reads the sorted input streams from both 
sides and finds matching rows." +
+          " Writes to disk.");
+
   public static final OptionValidator NESTEDLOOPJOIN = new 
BooleanValidator("planner.enable_nestedloopjoin", true);
+
   public static final OptionValidator MULTIPHASE = new 
BooleanValidator("planner.enable_multiphase_agg", true);
-  public static final OptionValidator BROADCAST = new 
BooleanValidator("planner.enable_broadcast_join", true);
-  public static final OptionValidator BROADCAST_THRESHOLD = new 
PositiveLongValidator("planner.broadcast_threshold", MAX_BROADCAST_THRESHOLD, 
10000000);
-  public static final OptionValidator BROADCAST_FACTOR = new 
RangeDoubleValidator("planner.broadcast_factor", 0, Double.MAX_VALUE, 1.0d);
-  public static final OptionValidator NESTEDLOOPJOIN_FACTOR = new 
RangeDoubleValidator("planner.nestedloopjoin_factor", 0, Double.MAX_VALUE, 
100.0d);
-  public static final OptionValidator NLJOIN_FOR_SCALAR = new 
BooleanValidator("planner.enable_nljoin_for_scalar_only", true);
-  public static final OptionValidator JOIN_ROW_COUNT_ESTIMATE_FACTOR = new 
RangeDoubleValidator("planner.join.row_count_estimate_factor", 0, 
Double.MAX_VALUE, 1.0d);
+
+  public static final OptionValidator BROADCAST = new 
BooleanValidator("planner.enable_broadcast_join", true,
+      "The broadcast join can be used for hash join, merge join and nested 
loop join. Use to join a large (fact)" +
+          " table to relatively smaller (dimension) tables. This should be 
enabled.");
+  public static final OptionValidator BROADCAST_THRESHOLD = new 
PositiveLongValidator("planner.broadcast_threshold",
+      MAX_BROADCAST_THRESHOLD, 10000000, "The maximum number of records 
allowed to be broadcast as part of a query." +
+      " If the threshold is exceeded, Drill reshuffles data rather than doing 
a broadcast to one side of the" +
+      " join. Range: 0 - " + MAX_BROADCAST_THRESHOLD + ".");
+  public static final OptionValidator BROADCAST_FACTOR = new 
RangeDoubleValidator("planner.broadcast_factor", 0,
+      Double.MAX_VALUE, 1.0d, "A heuristic parameter for influencing the 
broadcast of records as part of a query.");
+
+  public static final OptionValidator NESTEDLOOPJOIN_FACTOR = new 
RangeDoubleValidator("planner.nestedloopjoin_factor",
+      0, Double.MAX_VALUE, 100.0d);
+
+  public static final OptionValidator NLJOIN_FOR_SCALAR = new 
BooleanValidator("planner.enable_nljoin_for_scalar_only",
+      true);
+
+  public static final OptionValidator JOIN_ROW_COUNT_ESTIMATE_FACTOR = new 
RangeDoubleValidator(
+      "planner.join.row_count_estimate_factor", 0, Double.MAX_VALUE, 1.0d,
+      "The factor for adjusting the estimated row count when considering 
multiple join order sequences during the" +
+          " planning phase.");
+
   public static final OptionValidator MUX_EXCHANGE = new 
BooleanValidator("planner.enable_mux_exchange", true);
   public static final OptionValidator DEMUX_EXCHANGE = new 
BooleanValidator("planner.enable_demux_exchange", false);
-  public static final OptionValidator PARTITION_SENDER_THREADS_FACTOR = new 
LongValidator("planner.partitioner_sender_threads_factor", 2);
-  public static final OptionValidator PARTITION_SENDER_MAX_THREADS = new 
LongValidator("planner.partitioner_sender_max_threads", 8);
-  public static final OptionValidator PARTITION_SENDER_SET_THREADS = new 
LongValidator("planner.partitioner_sender_set_threads", -1);
+  public static final OptionValidator PARTITION_SENDER_THREADS_FACTOR = new 
LongValidator(
+      "planner.partitioner_sender_threads_factor", 2);
+  public static final OptionValidator PARTITION_SENDER_MAX_THREADS = new 
LongValidator(
+      "planner.partitioner_sender_max_threads", 8);
+  public static final OptionValidator PARTITION_SENDER_SET_THREADS = new 
LongValidator(
+      "planner.partitioner_sender_set_threads", -1);
   public static final OptionValidator PRODUCER_CONSUMER = new 
BooleanValidator("planner.add_producer_consumer", false);
-  public static final OptionValidator PRODUCER_CONSUMER_QUEUE_SIZE = new 
LongValidator("planner.producer_consumer_queue_size", 10);
+  public static final OptionValidator PRODUCER_CONSUMER_QUEUE_SIZE = new 
LongValidator(
+      "planner.producer_consumer_queue_size", 10);
   public static final OptionValidator HASH_SINGLE_KEY = new 
BooleanValidator("planner.enable_hash_single_key", true);
   public static final OptionValidator HASH_JOIN_SWAP = new 
BooleanValidator("planner.enable_hashjoin_swap", true);
-  public static final OptionValidator HASH_JOIN_SWAP_MARGIN_FACTOR = new 
RangeDoubleValidator("planner.join.hash_join_swap_margin_factor", 0, 100, 10d);
+  public static final OptionValidator HASH_JOIN_SWAP_MARGIN_FACTOR = new 
RangeDoubleValidator(
+      "planner.join.hash_join_swap_margin_factor", 0, 100, 10d);
   public static final String ENABLE_DECIMAL_DATA_TYPE_KEY = 
"planner.enable_decimal_data_type";
-  public static final OptionValidator ENABLE_DECIMAL_DATA_TYPE = new 
BooleanValidator(ENABLE_DECIMAL_DATA_TYPE_KEY, false);
+  public static final OptionValidator ENABLE_DECIMAL_DATA_TYPE = new 
BooleanValidator(ENABLE_DECIMAL_DATA_TYPE_KEY,
+      false);
   public static final OptionValidator HEP_OPT = new 
BooleanValidator("planner.enable_hep_opt", true);
-  public static final OptionValidator HEP_PARTITION_PRUNING = new 
BooleanValidator("planner.enable_hep_partition_pruning", true);
+  public static final OptionValidator HEP_PARTITION_PRUNING = new 
BooleanValidator(
+      "planner.enable_hep_partition_pruning", true);
   public static final OptionValidator PLANNER_MEMORY_LIMIT = new 
RangeLongValidator("planner.memory_limit",
-      INITIAL_OFF_HEAP_ALLOCATION_IN_BYTES, MAX_OFF_HEAP_ALLOCATION_IN_BYTES, 
DEFAULT_MAX_OFF_HEAP_ALLOCATION_IN_BYTES);
-
-  public static final OptionValidator IDENTIFIER_MAX_LENGTH =
-      new RangeLongValidator("planner.identifier_max_length", 128 /* A minimum 
length is needed because option names are identifiers themselves */,
-                              Integer.MAX_VALUE, 
DEFAULT_IDENTIFIER_MAX_LENGTH);
+      INITIAL_OFF_HEAP_ALLOCATION_IN_BYTES, MAX_OFF_HEAP_ALLOCATION_IN_BYTES, 
DEFAULT_MAX_OFF_HEAP_ALLOCATION_IN_BYTES,
+      "Defines the maximum amount of direct memory allocated to a query for 
planning. When multiple queries run" +
+          " concurrently, each query is allocated the amount of memory set by 
this parameter. Increase the value" +
+          " of this parameter and rerun the query if partition pruning failed 
due to insufficient memory.");
+
+  public static final OptionValidator IDENTIFIER_MAX_LENGTH = new 
RangeLongValidator("planner.identifier_max_length",
+      128 /* A minimum length is needed because option names are identifiers 
themselves */,
+      Integer.MAX_VALUE, DEFAULT_IDENTIFIER_MAX_LENGTH);
 
   public static final String TYPE_INFERENCE_KEY = 
"planner.enable_type_inference";
   public static final BooleanValidator TYPE_INFERENCE = new 
BooleanValidator(TYPE_INFERENCE_KEY, true);
diff --git 
a/exec/java-exec/src/main/java/org/apache/drill/exec/rpc/user/InboundImpersonationManager.java
 
b/exec/java-exec/src/main/java/org/apache/drill/exec/rpc/user/InboundImpersonationManager.java
index 06c3fe27510..102f36c6e5b 100644
--- 
a/exec/java-exec/src/main/java/org/apache/drill/exec/rpc/user/InboundImpersonationManager.java
+++ 
b/exec/java-exec/src/main/java/org/apache/drill/exec/rpc/user/InboundImpersonationManager.java
@@ -86,7 +86,7 @@
   public static class InboundImpersonationPolicyValidator extends 
TypeValidators.AdminOptionValidator {
 
     public InboundImpersonationPolicyValidator(String name, String def) {
-      super(name, def);
+      super(name, def, "Defines inbound impersonation policies. See 
configuration documentation for more details.");
     }
 
     @Override
diff --git 
a/exec/java-exec/src/main/java/org/apache/drill/exec/server/options/OptionValidator.java
 
b/exec/java-exec/src/main/java/org/apache/drill/exec/server/options/OptionValidator.java
index 3b43f9af77e..290d2416e4c 100644
--- 
a/exec/java-exec/src/main/java/org/apache/drill/exec/server/options/OptionValidator.java
+++ 
b/exec/java-exec/src/main/java/org/apache/drill/exec/server/options/OptionValidator.java
@@ -40,6 +40,13 @@ public String getOptionName() {
     return optionName;
   }
 
+  /**
+   * Returns the description of this option.
+   *
+   * @return option description
+   */
+  public abstract String getOptionDescription();
+
   /**
    * This function returns true if and only if this validator is meant for a 
short-lived option.
    *
diff --git 
a/exec/java-exec/src/main/java/org/apache/drill/exec/server/options/TypeValidators.java
 
b/exec/java-exec/src/main/java/org/apache/drill/exec/server/options/TypeValidators.java
index ced448c0c3b..69ff307fb34 100644
--- 
a/exec/java-exec/src/main/java/org/apache/drill/exec/server/options/TypeValidators.java
+++ 
b/exec/java-exec/src/main/java/org/apache/drill/exec/server/options/TypeValidators.java
@@ -32,8 +32,13 @@
   public static class PositiveLongValidator extends LongValidator {
     private final long max;
 
+    @Deprecated
     public PositiveLongValidator(String name, long max, long def) {
-      super(name, def);
+      this(name, max, def, null);
+    }
+
+    public PositiveLongValidator(String name, long max, long def, String 
description) {
+      super(name, def, description);
       this.max = max;
     }
 
@@ -50,8 +55,13 @@ public void validate(OptionValue v) {
 
   public static class PowerOfTwoLongValidator extends PositiveLongValidator {
 
+    @Deprecated
     public PowerOfTwoLongValidator(String name, long max, long def) {
-      super(name, max, def);
+      this(name, max, def, null);
+    }
+
+    public PowerOfTwoLongValidator(String name, long max, long def, String 
description) {
+      super(name, max, def, description);
     }
 
     @Override
@@ -73,8 +83,13 @@ private static boolean isPowerOfTwo(long num) {
     private final double min;
     private final double max;
 
+    @Deprecated
     public RangeDoubleValidator(String name, double min, double max, double 
def) {
-      super(name, def);
+      this(name, min, max, def, null);
+    }
+
+    public RangeDoubleValidator(String name, double min, double max, double 
def, String description) {
+      super(name, def, description);
       this.min = min;
       this.max = max;
     }
@@ -91,26 +106,50 @@ public void validate(OptionValue v) {
   }
 
   public static class BooleanValidator extends TypeValidator {
+
+    @Deprecated
     public BooleanValidator(String name, boolean def) {
-      super(name, Kind.BOOLEAN, OptionValue.createBoolean(OptionType.SYSTEM, 
name, def));
+      this(name, def, null);
+    }
+
+    public BooleanValidator(String name, boolean def, String description) {
+      super(name, Kind.BOOLEAN, OptionValue.createBoolean(OptionType.SYSTEM, 
name, def), description);
     }
   }
 
   public static class StringValidator extends TypeValidator {
+
+    @Deprecated
     public StringValidator(String name, String def) {
-      super(name, Kind.STRING, OptionValue.createString(OptionType.SYSTEM, 
name, def));
+      this(name, def, null);
+    }
+
+    public StringValidator(String name, String def, String description) {
+      super(name, Kind.STRING, OptionValue.createString(OptionType.SYSTEM, 
name, def), description);
     }
   }
 
   public static class LongValidator extends TypeValidator {
+
+    @Deprecated
     public LongValidator(String name, long def) {
-      super(name, Kind.LONG, OptionValue.createLong(OptionType.SYSTEM, name, 
def));
+      this(name, def, null);
+    }
+
+    public LongValidator(String name, long def, String description) {
+      super(name, Kind.LONG, OptionValue.createLong(OptionType.SYSTEM, name, 
def), description);
     }
   }
 
   public static class DoubleValidator extends TypeValidator {
+
+    @Deprecated
     public DoubleValidator(String name, double def) {
-      super(name, Kind.DOUBLE, OptionValue.createDouble(OptionType.SYSTEM, 
name, def));
+      this(name, def, null);
+    }
+
+    public DoubleValidator(String name, double def, String description) {
+      super(name, Kind.DOUBLE, OptionValue.createDouble(OptionType.SYSTEM, 
name, def), description);
     }
   }
 
@@ -118,8 +157,13 @@ public DoubleValidator(String name, double def) {
     private final long min;
     private final long max;
 
+    @Deprecated
     public RangeLongValidator(String name, long min, long max, long def) {
-      super(name, def);
+      this(name, min, max, def, null);
+    }
+
+    public RangeLongValidator(String name, long min, long max, long def, 
String description) {
+      super(name, def, description);
       this.min = min;
       this.max = max;
     }
@@ -136,8 +180,14 @@ public void validate(OptionValue v) {
   }
 
   public static class AdminOptionValidator extends StringValidator {
+
+    @Deprecated
     public AdminOptionValidator(String name, String def) {
-      super(name, def);
+      this(name, def, null);
+    }
+
+    public AdminOptionValidator(String name, String def, String description) {
+      super(name, def, description);
     }
 
     @Override
@@ -157,8 +207,8 @@ public void validate(OptionValue v) {
   public static class EnumeratedStringValidator extends StringValidator {
     private final Set<String> valuesSet = new HashSet<>();
 
-    public EnumeratedStringValidator(String name, String def, String... 
values) {
-      super(name, def);
+    public EnumeratedStringValidator(String name, String def, String 
description, String... values) {
+      super(name, def, description);
       for (String value : values) {
         valuesSet.add(value.toLowerCase());
       }
@@ -178,12 +228,20 @@ public void validate(final OptionValue v) {
   public static abstract class TypeValidator extends OptionValidator {
     private final Kind kind;
     private final OptionValue defaultValue;
+    private final String description;
 
-    public TypeValidator(final String name, final Kind kind, final OptionValue 
defValue) {
+    public TypeValidator(final String name, final Kind kind, final OptionValue 
defValue, String description) {
       super(name);
       checkArgument(defValue.type == OptionType.SYSTEM, "Default value must be 
SYSTEM type.");
       this.kind = kind;
       this.defaultValue = defValue;
+      this.description = description;
+    }
+
+    @Override
+    public String getOptionDescription() {
+      return description != null ? description :
+          "A description of this option is unavailable.";
     }
 
     @Override
diff --git 
a/exec/java-exec/src/main/java/org/apache/drill/exec/store/sys/OptionIterator.java
 
b/exec/java-exec/src/main/java/org/apache/drill/exec/store/sys/OptionIterator.java
index 5c8a6412dcd..9d6497ae9fe 100644
--- 
a/exec/java-exec/src/main/java/org/apache/drill/exec/store/sys/OptionIterator.java
+++ 
b/exec/java-exec/src/main/java/org/apache/drill/exec/store/sys/OptionIterator.java
@@ -26,6 +26,7 @@
 import org.apache.drill.exec.ops.FragmentContext;
 import org.apache.drill.exec.server.options.DrillConfigIterator;
 import org.apache.drill.exec.server.options.OptionManager;
+import org.apache.drill.exec.server.options.OptionValidator;
 import org.apache.drill.exec.server.options.OptionValue;
 import org.apache.drill.exec.server.options.OptionValue.Kind;
 import org.apache.drill.exec.server.options.OptionValue.OptionType;
@@ -38,12 +39,11 @@
     BOOT, SYS_SESS, BOTH
   };
 
-  private final OptionManager fragmentOptions;
   private final Iterator<OptionValue> mergedOptions;
 
   public OptionIterator(FragmentContext context, Mode mode){
     final DrillConfigIterator configOptions = new 
DrillConfigIterator(context.getConfig());
-    fragmentOptions = context.getOptions();
+    final OptionManager fragmentOptions = context.getOptions();
     final Iterator<OptionValue> optionList;
     switch(mode){
     case BOOT:
@@ -71,10 +71,11 @@ public boolean hasNext() {
   public OptionValueWrapper next() {
     final OptionValue value = mergedOptions.next();
     final Status status;
+    final OptionValidator validator = 
SystemOptionManager.getValidator(value.name);
     if (value.type == OptionType.BOOT) {
       status = Status.BOOT;
     } else {
-      final OptionValue def = 
SystemOptionManager.getValidator(value.name).getDefault();
+      final OptionValue def = validator.getDefault();
       if (value.equalsIgnoreType(def)) {
         status = Status.DEFAULT;
         } else {
@@ -82,10 +83,10 @@ public OptionValueWrapper next() {
         }
       }
     return new OptionValueWrapper(value.name, value.kind, value.type, 
value.num_val, value.string_val,
-        value.bool_val, value.float_val, status);
+        value.bool_val, value.float_val, status, 
validator.getOptionDescription());
   }
 
-  public static enum Status {
+  public enum Status {
     BOOT, DEFAULT, CHANGED
   }
 
@@ -102,10 +103,11 @@ public OptionValueWrapper next() {
     public final String string_val;
     public final Boolean bool_val;
     public final Double float_val;
+    public final String description;
 
     public OptionValueWrapper(final String name, final Kind kind, final 
OptionType type, final Long num_val,
         final String string_val, final Boolean bool_val, final Double 
float_val,
-        final Status status) {
+        final Status status, final String description) {
       this.name = name;
       this.kind = kind;
       this.type = type;
@@ -114,6 +116,7 @@ public OptionValueWrapper(final String name, final Kind 
kind, final OptionType t
       this.bool_val = bool_val;
       this.float_val = float_val;
       this.status = status;
+      this.description = description;
     }
   }
 
diff --git 
a/exec/java-exec/src/main/java/org/apache/drill/exec/testing/ExecutionControls.java
 
b/exec/java-exec/src/main/java/org/apache/drill/exec/testing/ExecutionControls.java
index 9673394bd55..457ecb91a09 100644
--- 
a/exec/java-exec/src/main/java/org/apache/drill/exec/testing/ExecutionControls.java
+++ 
b/exec/java-exec/src/main/java/org/apache/drill/exec/testing/ExecutionControls.java
@@ -80,7 +80,8 @@
      * @param ttl  the number of queries for which this option should be valid
      */
     public ControlsOptionValidator(final String name, final String def, final 
int ttl) {
-      super(name, OptionValue.Kind.STRING, 
OptionValue.createString(OptionType.SYSTEM, name, def));
+      super(name, OptionValue.Kind.STRING, 
OptionValue.createString(OptionType.SYSTEM, name, def),
+          "For internal testing purposes.");
       assert ttl > 0;
       this.ttl = ttl;
     }
diff --git 
a/exec/java-exec/src/test/java/org/apache/drill/exec/server/TestOptions.java 
b/exec/java-exec/src/test/java/org/apache/drill/exec/server/TestOptions.java
index 2761faa2370..6ab23dc14a9 100644
--- a/exec/java-exec/src/test/java/org/apache/drill/exec/server/TestOptions.java
+++ b/exec/java-exec/src/test/java/org/apache/drill/exec/server/TestOptions.java
@@ -45,6 +45,18 @@ public void testOptions() throws Exception{
     );
   }
 
+  @Test
+  public void checkDescriptionColumn() throws Exception {
+    testBuilder()
+        .sqlQuery("SELECT * FROM sys.options WHERE name = '%s'", SLICE_TARGET)
+        .unOrdered()
+        .baselineColumns("name", "kind", "type", "status", "num_val", 
"string_val", "bool_val", "float_val",
+            "description")
+        .baselineValues(SLICE_TARGET, "LONG", "SYSTEM", "DEFAULT", 
ExecConstants.SLICE_TARGET_DEFAULT, null, null,
+            null, ExecConstants.SLICE_TARGET_DESCRIPTION)
+        .go();
+  }
+
   @Test
   public void checkValidationException() throws Exception {
     thrownException.expect(new UserExceptionMatcher(VALIDATION));


 

----------------------------------------------------------------
This is an automated message from the Apache Git Service.
To respond to the message, please log on GitHub and use the
URL above to go to the specific comment.
 
For queries about this service, please contact Infrastructure at:
us...@infra.apache.org


> Add Description Column in sys.options
> -------------------------------------
>
>                 Key: DRILL-4699
>                 URL: https://issues.apache.org/jira/browse/DRILL-4699
>             Project: Apache Drill
>          Issue Type: Improvement
>          Components:  Server, Documentation
>    Affects Versions: 1.6.0
>            Reporter: John Omernik
>            Assignee: Kunal Khatua
>            Priority: Major
>             Fix For: 1.14.0
>
>
> select * from sys.options provides a user with a strong understanding of what 
> options are available to Drill. These options are not well documented.  Some 
> options are "experimental" other options have a function only in specific 
> cases (writers vs readers for example).  If we had a large text field for 
> description, we could enforce documentation of the settings are option 
> creation time, and the description of the setting could change as the 
> versions change (i.e. when an option graduates to being supported from being 
> experimental, it would be changed in the version the user is using. I.e. when 
> they run select * from sys.options, they know the exact state of the option 
> every time they query. It could also facilitate better self documentation via 
> QA on pull requests "Did you update the sys.options.desc?"  This makes it 
> easier for users, and admins in the use of Drill in an enterprise.    
> The first step is adding the field, and then going back and filling in the 
> desc for each option.  (Another JIRA after the option is available)



--
This message was sent by Atlassian JIRA
(v7.6.3#76005)

Reply via email to