diff --git a/common/src/java/org/apache/hadoop/hive/common/metrics/metrics2/CodahaleMetrics.java b/common/src/java/org/apache/hadoop/hive/common/metrics/metrics2/CodahaleMetrics.java index 4f35a6da60..0c4049fed2 100644 --- a/common/src/java/org/apache/hadoop/hive/common/metrics/metrics2/CodahaleMetrics.java +++ b/common/src/java/org/apache/hadoop/hive/common/metrics/metrics2/CodahaleMetrics.java @@ -18,11 +18,9 @@ package org.apache.hadoop.hive.common.metrics.metrics2; -import com.codahale.metrics.ConsoleReporter; import com.codahale.metrics.Counter; import com.codahale.metrics.ExponentiallyDecayingReservoir; import com.codahale.metrics.Gauge; -import com.codahale.metrics.JmxReporter; import com.codahale.metrics.Meter; import com.codahale.metrics.Metric; import com.codahale.metrics.MetricRegistry; @@ -35,7 +33,6 @@ import com.codahale.metrics.jvm.MemoryUsageGaugeSet; import com.codahale.metrics.jvm.ThreadStatesGaugeSet; import com.fasterxml.jackson.databind.ObjectMapper; -import com.github.joshelser.dropwizard.metrics.hadoop.HadoopMetrics2Reporter; import com.google.common.annotations.VisibleForTesting; import com.google.common.base.Preconditions; import com.google.common.base.Splitter; @@ -46,29 +43,20 @@ import java.lang.reflect.Constructor; import java.lang.reflect.InvocationTargetException; -import org.apache.hadoop.fs.FileSystem; -import org.apache.hadoop.fs.Path; -import org.apache.hadoop.fs.permission.FsPermission; import org.apache.hadoop.hive.common.metrics.common.MetricsConstant; import org.apache.hadoop.hive.common.metrics.common.MetricsScope; import org.apache.hadoop.hive.common.metrics.common.MetricsVariable; import org.apache.hadoop.hive.conf.HiveConf; -import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem; import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import java.io.BufferedWriter; import java.io.Closeable; -import java.io.IOException; -import java.io.OutputStreamWriter; import java.lang.management.ManagementFactory; -import java.net.URI; import java.util.HashMap; import java.util.HashSet; import java.util.List; import java.util.Map; import java.util.Set; -import java.util.TimerTask; import java.util.concurrent.ConcurrentHashMap; import java.util.concurrent.ExecutionException; import java.util.concurrent.TimeUnit; @@ -388,16 +376,15 @@ public String dumpJson() throws Exception { } /** - * Initializes reporters from HIVE_CODAHALE_METRICS_REPORTER_CLASSES or HIVE_METRICS_REPORTER if the former is not defined. - * Note: if both confs are defined, only HIVE_CODAHALE_METRICS_REPORTER_CLASSES will be used. + * Initializes reporters from HIVE_CODAHALE_METRICS_REPORTER_CLASSES. */ private void initReporting() { - if (!(initCodahaleMetricsReporterClasses() || initMetricsReporter())) { + if (!(initCodahaleMetricsReporterClasses())) { LOGGER.warn("Unable to initialize metrics reporting"); } if (reporters.isEmpty()) { - // log a warning incase no reporters were successfully added + // log a warning in case no reporters were successfully added LOGGER.warn("No reporters configured for codahale metrics!"); } } @@ -439,49 +426,4 @@ private boolean initCodahaleMetricsReporterClasses() { } return true; } - - /** - * Initializes reporting using HIVE_METRICS+REPORTER. - * @return whether initialization was successful or not - */ - private boolean initMetricsReporter() { - - List metricsReporterNames = Lists.newArrayList(Splitter.on(",").trimResults(). - omitEmptyStrings().split(conf.getVar(HiveConf.ConfVars.HIVE_METRICS_REPORTER))); - if (metricsReporterNames.isEmpty()) { - return false; - } - - MetricsReporting reporter = null; - for (String metricsReportingName : metricsReporterNames) { - try { - reporter = MetricsReporting.valueOf(metricsReportingName.trim().toUpperCase()); - } catch (IllegalArgumentException e) { - LOGGER.error("Invalid reporter name " + metricsReportingName, e); - throw e; - } - CodahaleReporter codahaleReporter = null; - switch (reporter) { - case CONSOLE: - codahaleReporter = new ConsoleMetricsReporter(metricRegistry, conf); - break; - case JMX: - codahaleReporter = new JmxMetricsReporter(metricRegistry, conf); - break; - case JSON_FILE: - codahaleReporter = new JsonFileMetricsReporter(metricRegistry, conf); - break; - case HADOOP2: - codahaleReporter = new Metrics2Reporter(metricRegistry, conf); - break; - default: - LOGGER.warn("Unhandled reporter " + reporter + " provided."); - } - if (codahaleReporter != null) { - codahaleReporter.start(); - reporters.add(codahaleReporter); - } - } - return true; - } } diff --git a/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java b/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java index c5e46e910e..38e083e133 100644 --- a/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java +++ b/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java @@ -90,7 +90,6 @@ private static byte[] confVarByteArray = null; private static final Map vars = new HashMap(); - private static final Map metaConfs = new HashMap(); private final List restrictList = new ArrayList(); private final Set hiddenSet = new HashSet(); private final List rscList = new ArrayList<>(); @@ -214,105 +213,17 @@ private static URL checkConfigFile(File f) { * be recreated so that the change will take effect. */ public static final HiveConf.ConfVars[] metaVars = { - HiveConf.ConfVars.METASTOREWAREHOUSE, HiveConf.ConfVars.REPLDIR, - HiveConf.ConfVars.METASTOREURIS, - HiveConf.ConfVars.METASTORESELECTION, - HiveConf.ConfVars.METASTORE_SERVER_PORT, - HiveConf.ConfVars.METASTORETHRIFTCONNECTIONRETRIES, - HiveConf.ConfVars.METASTORETHRIFTFAILURERETRIES, - HiveConf.ConfVars.METASTORE_CLIENT_CONNECT_RETRY_DELAY, - HiveConf.ConfVars.METASTORE_CLIENT_SOCKET_TIMEOUT, - HiveConf.ConfVars.METASTORE_CLIENT_SOCKET_LIFETIME, - HiveConf.ConfVars.METASTOREPWD, - HiveConf.ConfVars.METASTORECONNECTURLHOOK, - HiveConf.ConfVars.METASTORECONNECTURLKEY, - HiveConf.ConfVars.METASTORESERVERMINTHREADS, - HiveConf.ConfVars.METASTORESERVERMAXTHREADS, - HiveConf.ConfVars.METASTORE_TCP_KEEP_ALIVE, HiveConf.ConfVars.METASTORE_INT_ORIGINAL, HiveConf.ConfVars.METASTORE_INT_ARCHIVED, HiveConf.ConfVars.METASTORE_INT_EXTRACTED, - HiveConf.ConfVars.METASTORE_KERBEROS_KEYTAB_FILE, - HiveConf.ConfVars.METASTORE_KERBEROS_PRINCIPAL, - HiveConf.ConfVars.METASTORE_USE_THRIFT_SASL, - HiveConf.ConfVars.METASTORE_TOKEN_SIGNATURE, - HiveConf.ConfVars.METASTORE_CACHE_PINOBJTYPES, - HiveConf.ConfVars.METASTORE_CONNECTION_POOLING_TYPE, - HiveConf.ConfVars.METASTORE_VALIDATE_TABLES, - HiveConf.ConfVars.METASTORE_DATANUCLEUS_INIT_COL_INFO, - HiveConf.ConfVars.METASTORE_VALIDATE_COLUMNS, - HiveConf.ConfVars.METASTORE_VALIDATE_CONSTRAINTS, - HiveConf.ConfVars.METASTORE_STORE_MANAGER_TYPE, - HiveConf.ConfVars.METASTORE_AUTO_CREATE_ALL, - HiveConf.ConfVars.METASTORE_TRANSACTION_ISOLATION, - HiveConf.ConfVars.METASTORE_CACHE_LEVEL2, - HiveConf.ConfVars.METASTORE_CACHE_LEVEL2_TYPE, - HiveConf.ConfVars.METASTORE_IDENTIFIER_FACTORY, - HiveConf.ConfVars.METASTORE_PLUGIN_REGISTRY_BUNDLE_CHECK, - HiveConf.ConfVars.METASTORE_AUTHORIZATION_STORAGE_AUTH_CHECKS, - HiveConf.ConfVars.METASTORE_BATCH_RETRIEVE_MAX, - HiveConf.ConfVars.METASTORE_EVENT_LISTENERS, - HiveConf.ConfVars.METASTORE_TRANSACTIONAL_EVENT_LISTENERS, - HiveConf.ConfVars.METASTORE_EVENT_CLEAN_FREQ, - HiveConf.ConfVars.METASTORE_EVENT_EXPIRY_DURATION, - HiveConf.ConfVars.METASTORE_EVENT_MESSAGE_FACTORY, - HiveConf.ConfVars.METASTORE_FILTER_HOOK, - HiveConf.ConfVars.METASTORE_RAW_STORE_IMPL, - HiveConf.ConfVars.METASTORE_END_FUNCTION_LISTENERS, - HiveConf.ConfVars.METASTORE_PART_INHERIT_TBL_PROPS, - HiveConf.ConfVars.METASTORE_BATCH_RETRIEVE_OBJECTS_MAX, - HiveConf.ConfVars.METASTORE_INIT_HOOKS, - HiveConf.ConfVars.METASTORE_PRE_EVENT_LISTENERS, - HiveConf.ConfVars.HMSHANDLERATTEMPTS, - HiveConf.ConfVars.HMSHANDLERINTERVAL, - HiveConf.ConfVars.HMSHANDLERFORCERELOADCONF, - HiveConf.ConfVars.METASTORE_PARTITION_NAME_WHITELIST_PATTERN, - HiveConf.ConfVars.METASTORE_ORM_RETRIEVE_MAPNULLS_AS_EMPTY_STRINGS, - HiveConf.ConfVars.METASTORE_DISALLOW_INCOMPATIBLE_COL_TYPE_CHANGES, HiveConf.ConfVars.USERS_IN_ADMIN_ROLE, HiveConf.ConfVars.HIVE_AUTHORIZATION_MANAGER, HiveConf.ConfVars.HIVE_TXN_MANAGER, - HiveConf.ConfVars.HIVE_TXN_TIMEOUT, HiveConf.ConfVars.HIVE_TXN_OPERATIONAL_PROPERTIES, - HiveConf.ConfVars.HIVE_TXN_HEARTBEAT_THREADPOOL_SIZE, - HiveConf.ConfVars.HIVE_TXN_MAX_OPEN_BATCH, - HiveConf.ConfVars.HIVE_TXN_RETRYABLE_SQLEX_REGEX, - HiveConf.ConfVars.HIVE_METASTORE_STATS_NDV_TUNER, - HiveConf.ConfVars.HIVE_METASTORE_STATS_NDV_DENSITY_FUNCTION, - HiveConf.ConfVars.METASTORE_AGGREGATE_STATS_CACHE_ENABLED, - HiveConf.ConfVars.METASTORE_AGGREGATE_STATS_CACHE_SIZE, - HiveConf.ConfVars.METASTORE_AGGREGATE_STATS_CACHE_MAX_PARTITIONS, - HiveConf.ConfVars.METASTORE_AGGREGATE_STATS_CACHE_FPP, - HiveConf.ConfVars.METASTORE_AGGREGATE_STATS_CACHE_MAX_VARIANCE, - HiveConf.ConfVars.METASTORE_AGGREGATE_STATS_CACHE_TTL, - HiveConf.ConfVars.METASTORE_AGGREGATE_STATS_CACHE_MAX_WRITER_WAIT, - HiveConf.ConfVars.METASTORE_AGGREGATE_STATS_CACHE_MAX_READER_WAIT, - HiveConf.ConfVars.METASTORE_AGGREGATE_STATS_CACHE_MAX_FULL, - HiveConf.ConfVars.METASTORE_AGGREGATE_STATS_CACHE_CLEAN_UNTIL, HiveConf.ConfVars.METASTORE_FASTPATH, - HiveConf.ConfVars.METASTORE_HBASE_FILE_METADATA_THREADS, - HiveConf.ConfVars.METASTORE_WM_DEFAULT_POOL_SIZE }; - /** - * User configurable Metastore vars - */ - public static final HiveConf.ConfVars[] metaConfVars = { - HiveConf.ConfVars.METASTORE_TRY_DIRECT_SQL, - HiveConf.ConfVars.METASTORE_TRY_DIRECT_SQL_DDL, - HiveConf.ConfVars.METASTORE_CLIENT_SOCKET_TIMEOUT, - HiveConf.ConfVars.METASTORE_PARTITION_NAME_WHITELIST_PATTERN, - HiveConf.ConfVars.METASTORE_CAPABILITY_CHECK, - HiveConf.ConfVars.METASTORE_DISALLOW_INCOMPATIBLE_COL_TYPE_CHANGES - }; - - static { - for (ConfVars confVar : metaConfVars) { - metaConfs.put(confVar.varname, confVar); - } - } - public static final String HIVE_LLAP_DAEMON_SERVICE_PRINCIPAL_NAME = "hive.llap.daemon.service.principal"; public static final String HIVE_SERVER2_AUTHENTICATION_LDAP_USERMEMBERSHIPKEY_NAME = "hive.server2.authentication.ldap.userMembershipKey"; @@ -329,7 +240,6 @@ private static URL checkConfigFile(File f) { */ public static final HiveConf.ConfVars[] dbVars = { HiveConf.ConfVars.HADOOPBIN, - HiveConf.ConfVars.METASTOREWAREHOUSE, HiveConf.ConfVars.SCRATCHDIR }; @@ -708,40 +618,11 @@ private static void populateLlapDaemonVarsSet(Set llapDaemonVarsSetLocal // Metastore stuff. Be sure to update HiveConf.metaVars when you add something here! METASTOREDBTYPE("hive.metastore.db.type", "DERBY", new StringSet("DERBY", "ORACLE", "MYSQL", "MSSQL", "POSTGRES"), "Type of database used by the metastore. Information schema & JDBCStorageHandler depend on it."), - /** - * @deprecated Use MetastoreConf.WAREHOUSE - */ - @Deprecated - METASTOREWAREHOUSE("hive.metastore.warehouse.dir", "/user/hive/warehouse", - "location of default database for the warehouse"), HIVE_METASTORE_WAREHOUSE_EXTERNAL("hive.metastore.warehouse.external.dir", null, "Default location for external tables created in the warehouse. " + "If not set or null, then the normal warehouse location will be used as the default location."), - /** - * @deprecated Use MetastoreConf.THRIFT_URIS - */ - @Deprecated - METASTOREURIS("hive.metastore.uris", "", - "Thrift URI for the remote metastore. Used by metastore client to connect to remote metastore."), - - /** - * @deprecated Use MetastoreConf.THRIFT_URI_SELECTION - */ - @Deprecated - METASTORESELECTION("hive.metastore.uri.selection", "RANDOM", - new StringSet("SEQUENTIAL", "RANDOM"), - "Determines the selection mechanism used by metastore client to connect to remote " + - "metastore. SEQUENTIAL implies that the first valid metastore from the URIs specified " + - "as part of hive.metastore.uris will be picked. RANDOM implies that the metastore " + - "will be picked randomly"), - /** - * @deprecated Use MetastoreConf.CAPABILITY_CHECK - */ - @Deprecated - METASTORE_CAPABILITY_CHECK("hive.metastore.client.capability.check", true, - "Whether to check client capabilities for potentially breaking API usage."), METASTORE_CLIENT_CAPABILITIES("hive.metastore.client.capabilities", "EXTWRITE,EXTREAD,HIVEBUCKET2," + "HIVEFULLACIDREAD,HIVEFULLACIDWRITE,HIVECACHEINVALIDATE,HIVEMANAGESTATS," + "HIVEMANAGEDINSERTWRITE,HIVEMANAGEDINSERTREAD," @@ -760,153 +641,6 @@ private static void populateLlapDaemonVarsSet(Set llapDaemonVarsSetLocal "Used to avoid all of the proxies and object copies in the metastore. Note, if this is " + "set, you MUST use a local metastore (hive.metastore.uris must be empty) otherwise " + "undefined and most likely undesired behavior will result"), - /** - * @deprecated Use MetastoreConf.FS_HANDLER_THREADS_COUNT - */ - @Deprecated - METASTORE_FS_HANDLER_THREADS_COUNT("hive.metastore.fshandler.threads", 15, - "Number of threads to be allocated for metastore handler for fs operations."), - /** - * @deprecated Use MetastoreConf.FILE_METADATA_THREADS - */ - @Deprecated - METASTORE_HBASE_FILE_METADATA_THREADS("hive.metastore.hbase.file.metadata.threads", 1, - "Number of threads to use to read file metadata in background to cache it."), - - /** - * @deprecated Use MetastoreConf.URI_RESOLVER - */ - @Deprecated - METASTORE_URI_RESOLVER("hive.metastore.uri.resolver", "", - "If set, fully qualified class name of resolver for hive metastore uri's"), - - /** - * @deprecated Use MetastoreConf.THRIFT_CONNECTION_RETRIES - */ - @Deprecated - METASTORETHRIFTCONNECTIONRETRIES("hive.metastore.connect.retries", 3, - "Number of retries while opening a connection to metastore"), - /** - * @deprecated Use MetastoreConf.THRIFT_FAILURE_RETRIES - */ - @Deprecated - METASTORETHRIFTFAILURERETRIES("hive.metastore.failure.retries", 1, - "Number of retries upon failure of Thrift metastore calls"), - /** - * @deprecated Use MetastoreConf.SERVER_PORT - */ - @Deprecated - METASTORE_SERVER_PORT("hive.metastore.port", 9083, "Hive metastore listener port"), - /** - * @deprecated Use MetastoreConf.CLIENT_CONNECT_RETRY_DELAY - */ - @Deprecated - METASTORE_CLIENT_CONNECT_RETRY_DELAY("hive.metastore.client.connect.retry.delay", "1s", - new TimeValidator(TimeUnit.SECONDS), - "Number of seconds for the client to wait between consecutive connection attempts"), - /** - * @deprecated Use MetastoreConf.CLIENT_SOCKET_TIMEOUT - */ - @Deprecated - METASTORE_CLIENT_SOCKET_TIMEOUT("hive.metastore.client.socket.timeout", "600s", - new TimeValidator(TimeUnit.SECONDS), - "MetaStore Client socket timeout in seconds"), - /** - * @deprecated Use MetastoreConf.CLIENT_SOCKET_LIFETIME - */ - @Deprecated - METASTORE_CLIENT_SOCKET_LIFETIME("hive.metastore.client.socket.lifetime", "0s", - new TimeValidator(TimeUnit.SECONDS), - "MetaStore Client socket lifetime in seconds. After this time is exceeded, client\n" + - "reconnects on the next MetaStore operation. A value of 0s means the connection\n" + - "has an infinite lifetime."), - /** - * @deprecated Use MetastoreConf.PWD - */ - @Deprecated - METASTOREPWD("javax.jdo.option.ConnectionPassword", "mine", - "password to use against metastore database"), - /** - * @deprecated Use MetastoreConf.CONNECT_URL_HOOK - */ - @Deprecated - METASTORECONNECTURLHOOK("hive.metastore.ds.connection.url.hook", "", - "Name of the hook to use for retrieving the JDO connection URL. If empty, the value in javax.jdo.option.ConnectionURL is used"), - /** - * @deprecated Use MetastoreConf.MULTITHREADED - */ - @Deprecated - METASTOREMULTITHREADED("javax.jdo.option.Multithreaded", true, - "Set this to true if multiple threads access metastore through JDO concurrently."), - /** - * @deprecated Use MetastoreConf.CONNECT_URL_KEY - */ - @Deprecated - METASTORECONNECTURLKEY("javax.jdo.option.ConnectionURL", - "jdbc:derby:;databaseName=metastore_db;create=true", - "JDBC connect string for a JDBC metastore.\n" + - "To use SSL to encrypt/authenticate the connection, provide database-specific SSL flag in the connection URL.\n" + - "For example, jdbc:postgresql://myhost/db?ssl=true for postgres database."), - /** - * @deprecated Use MetastoreConf.DBACCESS_SSL_PROPS - */ - @Deprecated - METASTORE_DBACCESS_SSL_PROPS("hive.metastore.dbaccess.ssl.properties", "", - "Comma-separated SSL properties for metastore to access database when JDO connection URL\n" + - "enables SSL access. e.g. javax.net.ssl.trustStore=/tmp/truststore,javax.net.ssl.trustStorePassword=pwd."), - /** - * @deprecated Use MetastoreConf.HMS_HANDLER_ATTEMPTS - */ - @Deprecated - HMSHANDLERATTEMPTS("hive.hmshandler.retry.attempts", 10, - "The number of times to retry a HMSHandler call if there were a connection error."), - /** - * @deprecated Use MetastoreConf.HMS_HANDLER_INTERVAL - */ - @Deprecated - HMSHANDLERINTERVAL("hive.hmshandler.retry.interval", "2000ms", - new TimeValidator(TimeUnit.MILLISECONDS), "The time between HMSHandler retry attempts on failure."), - /** - * @deprecated Use MetastoreConf.HMS_HANDLER_FORCE_RELOAD_CONF - */ - @Deprecated - HMSHANDLERFORCERELOADCONF("hive.hmshandler.force.reload.conf", false, - "Whether to force reloading of the HMSHandler configuration (including\n" + - "the connection URL, before the next metastore query that accesses the\n" + - "datastore. Once reloaded, this value is reset to false. Used for\n" + - "testing only."), - /** - * @deprecated Use MetastoreConf.SERVER_MAX_MESSAGE_SIZE - */ - @Deprecated - METASTORESERVERMAXMESSAGESIZE("hive.metastore.server.max.message.size", 100*1024*1024L, - "Maximum message size in bytes a HMS will accept."), - /** - * @deprecated Use MetastoreConf.SERVER_MIN_THREADS - */ - @Deprecated - METASTORESERVERMINTHREADS("hive.metastore.server.min.threads", 200, - "Minimum number of worker threads in the Thrift server's pool."), - /** - * @deprecated Use MetastoreConf.SERVER_MAX_THREADS - */ - @Deprecated - METASTORESERVERMAXTHREADS("hive.metastore.server.max.threads", 1000, - "Maximum number of worker threads in the Thrift server's pool."), - /** - * @deprecated Use MetastoreConf.TCP_KEEP_ALIVE - */ - @Deprecated - METASTORE_TCP_KEEP_ALIVE("hive.metastore.server.tcp.keepalive", true, - "Whether to enable TCP keepalive for the metastore server. Keepalive will prevent accumulation of half-open connections."), - - /** - * @deprecated Use MetastoreConf.WM_DEFAULT_POOL_SIZE - */ - @Deprecated - METASTORE_WM_DEFAULT_POOL_SIZE("hive.metastore.wm.default.pool.size", 4, - "The size of a default pool to create when creating an empty resource plan;\n" + - "If not positive, no default pool will be created."), METASTORE_INT_ORIGINAL("hive.metastore.archive.intermediate.original", "_INTERMEDIATE_ORIGINAL", @@ -916,59 +650,7 @@ private static void populateLlapDaemonVarsSet(Set llapDaemonVarsSetLocal "_INTERMEDIATE_ARCHIVED", ""), METASTORE_INT_EXTRACTED("hive.metastore.archive.intermediate.extracted", "_INTERMEDIATE_EXTRACTED", ""), - /** - * @deprecated Use MetastoreConf.KERBEROS_KEYTAB_FILE - */ - @Deprecated - METASTORE_KERBEROS_KEYTAB_FILE("hive.metastore.kerberos.keytab.file", "", - "The path to the Kerberos Keytab file containing the metastore Thrift server's service principal."), - /** - * @deprecated Use MetastoreConf.KERBEROS_PRINCIPAL - */ - @Deprecated - METASTORE_KERBEROS_PRINCIPAL("hive.metastore.kerberos.principal", - "hive-metastore/_HOST@EXAMPLE.COM", - "The service principal for the metastore Thrift server. \n" + - "The special string _HOST will be replaced automatically with the correct host name."), - /** - * @deprecated Use MetastoreConf.CLIENT_KERBEROS_PRINCIPAL - */ - @Deprecated - METASTORE_CLIENT_KERBEROS_PRINCIPAL("hive.metastore.client.kerberos.principal", - "", // E.g. "hive-metastore/_HOST@EXAMPLE.COM". - "The Kerberos principal associated with the HA cluster of hcat_servers."), - /** - * @deprecated Use MetastoreConf.USE_THRIFT_SASL - */ - @Deprecated - METASTORE_USE_THRIFT_SASL("hive.metastore.sasl.enabled", false, - "If true, the metastore Thrift interface will be secured with SASL. Clients must authenticate with Kerberos."), - /** - * @deprecated Use MetastoreConf.USE_THRIFT_FRAMED_TRANSPORT - */ - @Deprecated - METASTORE_USE_THRIFT_FRAMED_TRANSPORT("hive.metastore.thrift.framed.transport.enabled", false, - "If true, the metastore Thrift interface will use TFramedTransport. When false (default) a standard TTransport is used."), - /** - * @deprecated Use MetastoreConf.USE_THRIFT_COMPACT_PROTOCOL - */ - @Deprecated - METASTORE_USE_THRIFT_COMPACT_PROTOCOL("hive.metastore.thrift.compact.protocol.enabled", false, - "If true, the metastore Thrift interface will use TCompactProtocol. When false (default) TBinaryProtocol will be used.\n" + - "Setting it to true will break compatibility with older clients running TBinaryProtocol."), - /** - * @deprecated Use MetastoreConf.TOKEN_SIGNATURE - */ - @Deprecated - METASTORE_TOKEN_SIGNATURE("hive.metastore.token.signature", "", - "The delegation token service name to match when selecting a token from the current user's tokens."), - /** - * @deprecated Use MetastoreConf.DELEGATION_TOKEN_STORE_CLS - */ - @Deprecated - METASTORE_CLUSTER_DELEGATION_TOKEN_STORE_CLS("hive.cluster.delegation.token.store.class", - "org.apache.hadoop.hive.thrift.MemoryTokenStore", - "The delegation token store implementation. Set to org.apache.hadoop.hive.thrift.ZooKeeperTokenStore for load-balanced cluster."), + METASTORE_CLUSTER_DELEGATION_TOKEN_STORE_ZK_CONNECTSTR( "hive.cluster.delegation.token.store.zookeeper.connectString", "", "The ZooKeeper token store connect string. You can re-use the configuration value\n" + @@ -984,554 +666,23 @@ private static void populateLlapDaemonVarsSet(Set llapDaemonVarsSetLocal "ACL for token store entries. Comma separated list of ACL entries. For example:\n" + "sasl:hive/host1@MY.DOMAIN:cdrwa,sasl:hive/host2@MY.DOMAIN:cdrwa\n" + "Defaults to all permissions for the hiveserver2/metastore process user."), - /** - * @deprecated Use MetastoreConf.CACHE_PINOBJTYPES - */ - @Deprecated - METASTORE_CACHE_PINOBJTYPES("hive.metastore.cache.pinobjtypes", "Table,StorageDescriptor,SerDeInfo,Partition,Database,Type,FieldSchema,Order", - "List of comma separated metastore object types that should be pinned in the cache"), - /** - * @deprecated Use MetastoreConf.CONNECTION_POOLING_TYPE - */ - @Deprecated - METASTORE_CONNECTION_POOLING_TYPE("datanucleus.connectionPoolingType", "HikariCP", new StringSet("BONECP", "DBCP", - "HikariCP", "NONE"), - "Specify connection pool library for datanucleus"), - /** - * @deprecated Use MetastoreConf.CONNECTION_POOLING_MAX_CONNECTIONS - */ - @Deprecated - METASTORE_CONNECTION_POOLING_MAX_CONNECTIONS("datanucleus.connectionPool.maxPoolSize", 10, - "Specify the maximum number of connections in the connection pool. Note: The configured size will be used by\n" + - "2 connection pools (TxnHandler and ObjectStore). When configuring the max connection pool size, it is\n" + - "recommended to take into account the number of metastore instances and the number of HiveServer2 instances\n" + - "configured with embedded metastore. To get optimal performance, set config to meet the following condition\n"+ - "(2 * pool_size * metastore_instances + 2 * pool_size * HS2_instances_with_embedded_metastore) = \n" + - "(2 * physical_core_count + hard_disk_count)."), - // Workaround for DN bug on Postgres: - // http://www.datanucleus.org/servlet/forum/viewthread_thread,7985_offset - /** - * @deprecated Use MetastoreConf.DATANUCLEUS_INIT_COL_INFO - */ - @Deprecated - METASTORE_DATANUCLEUS_INIT_COL_INFO("datanucleus.rdbms.initializeColumnInfo", "NONE", - "initializeColumnInfo setting for DataNucleus; set to NONE at least on Postgres."), - /** - * @deprecated Use MetastoreConf.VALIDATE_TABLES - */ - @Deprecated - METASTORE_VALIDATE_TABLES("datanucleus.schema.validateTables", false, - "validates existing schema against code. turn this on if you want to verify existing schema"), - /** - * @deprecated Use MetastoreConf.VALIDATE_COLUMNS - */ - @Deprecated - METASTORE_VALIDATE_COLUMNS("datanucleus.schema.validateColumns", false, - "validates existing schema against code. turn this on if you want to verify existing schema"), - /** - * @deprecated Use MetastoreConf.VALIDATE_CONSTRAINTS - */ - @Deprecated - METASTORE_VALIDATE_CONSTRAINTS("datanucleus.schema.validateConstraints", false, - "validates existing schema against code. turn this on if you want to verify existing schema"), - /** - * @deprecated Use MetastoreConf.STORE_MANAGER_TYPE - */ - @Deprecated - METASTORE_STORE_MANAGER_TYPE("datanucleus.storeManagerType", "rdbms", "metadata store type"), - /** - * @deprecated Use MetastoreConf.AUTO_CREATE_ALL - */ - @Deprecated - METASTORE_AUTO_CREATE_ALL("datanucleus.schema.autoCreateAll", false, - "Auto creates necessary schema on a startup if one doesn't exist. Set this to false, after creating it once." - + "To enable auto create also set hive.metastore.schema.verification=false. Auto creation is not " - + "recommended for production use cases, run schematool command instead." ), - /** - * @deprecated Use MetastoreConf.SCHEMA_VERIFICATION - */ - @Deprecated - METASTORE_SCHEMA_VERIFICATION("hive.metastore.schema.verification", true, - "Enforce metastore schema version consistency.\n" + - "True: Verify that version information stored in is compatible with one from Hive jars. Also disable automatic\n" + - " schema migration attempt. Users are required to manually migrate schema after Hive upgrade which ensures\n" + - " proper metastore schema migration. (Default)\n" + - "False: Warn if the version information stored in metastore doesn't match with one from in Hive jars."), - /** - * @deprecated Use MetastoreConf.SCHEMA_VERIFICATION_RECORD_VERSION - */ - @Deprecated - METASTORE_SCHEMA_VERIFICATION_RECORD_VERSION("hive.metastore.schema.verification.record.version", false, - "When true the current MS version is recorded in the VERSION table. If this is disabled and verification is\n" + - " enabled the MS will be unusable."), - /** - * @deprecated Use MetastoreConf.SCHEMA_INFO_CLASS - */ - @Deprecated - METASTORE_SCHEMA_INFO_CLASS("hive.metastore.schema.info.class", - "org.apache.hadoop.hive.metastore.MetaStoreSchemaInfo", - "Fully qualified class name for the metastore schema information class \n" - + "which is used by schematool to fetch the schema information.\n" - + " This class should implement the IMetaStoreSchemaInfo interface"), - /** - * @deprecated Use MetastoreConf.DATANUCLEUS_TRANSACTION_ISOLATION - */ - @Deprecated - METASTORE_TRANSACTION_ISOLATION("datanucleus.transactionIsolation", "read-committed", - "Default transaction isolation level for identity generation."), - /** - * @deprecated Use MetastoreConf.DATANUCLEUS_CACHE_LEVEL2 - */ - @Deprecated - METASTORE_CACHE_LEVEL2("datanucleus.cache.level2", false, - "Use a level 2 cache. Turn this off if metadata is changed independently of Hive metastore server"), - METASTORE_CACHE_LEVEL2_TYPE("datanucleus.cache.level2.type", "none", ""), - /** - * @deprecated Use MetastoreConf.IDENTIFIER_FACTORY - */ - @Deprecated - METASTORE_IDENTIFIER_FACTORY("datanucleus.identifierFactory", "datanucleus1", - "Name of the identifier factory to use when generating table/column names etc. \n" + - "'datanucleus1' is used for backward compatibility with DataNucleus v1"), - /** - * @deprecated Use MetastoreConf.DATANUCLEUS_USE_LEGACY_VALUE_STRATEGY - */ - @Deprecated - METASTORE_USE_LEGACY_VALUE_STRATEGY("datanucleus.rdbms.useLegacyNativeValueStrategy", true, ""), - /** - * @deprecated Use MetastoreConf.DATANUCLEUS_PLUGIN_REGISTRY_BUNDLE_CHECK - */ - @Deprecated - METASTORE_PLUGIN_REGISTRY_BUNDLE_CHECK("datanucleus.plugin.pluginRegistryBundleCheck", "LOG", - "Defines what happens when plugin bundles are found and are duplicated [EXCEPTION|LOG|NONE]"), - /** - * @deprecated Use MetastoreConf.BATCH_RETRIEVE_MAX - */ - @Deprecated - METASTORE_BATCH_RETRIEVE_MAX("hive.metastore.batch.retrieve.max", 300, - "Maximum number of objects (tables/partitions) can be retrieved from metastore in one batch. \n" + - "The higher the number, the less the number of round trips is needed to the Hive metastore server, \n" + - "but it may also cause higher memory requirement at the client side."), - /** - * @deprecated Use MetastoreConf.BATCH_RETRIEVE_OBJECTS_MAX - */ - @Deprecated - METASTORE_BATCH_RETRIEVE_OBJECTS_MAX( - "hive.metastore.batch.retrieve.table.partition.max", 1000, - "Maximum number of objects that metastore internally retrieves in one batch."), - - /** - * @deprecated Use MetastoreConf.INIT_HOOKS - */ - @Deprecated - METASTORE_INIT_HOOKS("hive.metastore.init.hooks", "", - "A comma separated list of hooks to be invoked at the beginning of HMSHandler initialization. \n" + - "An init hook is specified as the name of Java class which extends org.apache.hadoop.hive.metastore.MetaStoreInitListener."), - /** - * @deprecated Use MetastoreConf.PRE_EVENT_LISTENERS - */ - @Deprecated - METASTORE_PRE_EVENT_LISTENERS("hive.metastore.pre.event.listeners", "", - "List of comma separated listeners for metastore events."), - /** - * @deprecated Use MetastoreConf.EVENT_LISTENERS - */ - @Deprecated - METASTORE_EVENT_LISTENERS("hive.metastore.event.listeners", "", - "A comma separated list of Java classes that implement the org.apache.hadoop.hive.metastore.MetaStoreEventListener" + - " interface. The metastore event and corresponding listener method will be invoked in separate JDO transactions. " + - "Alternatively, configure hive.metastore.transactional.event.listeners to ensure both are invoked in same JDO transaction."), - /** - * @deprecated Use MetastoreConf.TRANSACTIONAL_EVENT_LISTENERS - */ - @Deprecated - METASTORE_TRANSACTIONAL_EVENT_LISTENERS("hive.metastore.transactional.event.listeners", "", - "A comma separated list of Java classes that implement the org.apache.hadoop.hive.metastore.MetaStoreEventListener" + - " interface. Both the metastore event and corresponding listener method will be invoked in the same JDO transaction."), - /** - * @deprecated Use MetastoreConf.NOTIFICATION_SEQUENCE_LOCK_MAX_RETRIES - */ - @Deprecated - NOTIFICATION_SEQUENCE_LOCK_MAX_RETRIES("hive.notification.sequence.lock.max.retries", 10, - "Number of retries required to acquire a lock when getting the next notification sequential ID for entries " - + "in the NOTIFICATION_LOG table."), - /** - * @deprecated Use MetastoreConf.NOTIFICATION_SEQUENCE_LOCK_RETRY_SLEEP_INTERVAL - */ - @Deprecated - NOTIFICATION_SEQUENCE_LOCK_RETRY_SLEEP_INTERVAL("hive.notification.sequence.lock.retry.sleep.interval", 10L, - new TimeValidator(TimeUnit.SECONDS), - "Sleep interval between retries to acquire a notification lock as described part of property " - + NOTIFICATION_SEQUENCE_LOCK_MAX_RETRIES.name()), - /** - * @deprecated Use MetastoreConf.EVENT_DB_LISTENER_TTL - */ - @Deprecated - METASTORE_EVENT_DB_LISTENER_TTL("hive.metastore.event.db.listener.timetolive", "86400s", - new TimeValidator(TimeUnit.SECONDS), - "time after which events will be removed from the database listener queue"), - - /** - * @deprecated Use MetastoreConf.EVENT_DB_NOTIFICATION_API_AUTH - */ - @Deprecated - METASTORE_EVENT_DB_NOTIFICATION_API_AUTH("hive.metastore.event.db.notification.api.auth", true, - "Should metastore do authorization against database notification related APIs such as get_next_notification.\n" + - "If set to true, then only the superusers in proxy settings have the permission"), - - /** - * @deprecated Use MetastoreConf.AUTHORIZATION_STORAGE_AUTH_CHECKS - */ - @Deprecated - METASTORE_AUTHORIZATION_STORAGE_AUTH_CHECKS("hive.metastore.authorization.storage.checks", false, - "Should the metastore do authorization checks against the underlying storage (usually hdfs) \n" + - "for operations like drop-partition (disallow the drop-partition if the user in\n" + - "question doesn't have permissions to delete the corresponding directory\n" + - "on the storage)."), METASTORE_AUTHORIZATION_EXTERNALTABLE_DROP_CHECK("hive.metastore.authorization.storage.check.externaltable.drop", true, "Should StorageBasedAuthorization check permission of the storage before dropping external table.\n" + "StorageBasedAuthorization already does this check for managed table. For external table however,\n" + "anyone who has read permission of the directory could drop external table, which is surprising.\n" + "The flag is set to false by default to maintain backward compatibility."), - /** - * @deprecated Use MetastoreConf.EVENT_CLEAN_FREQ - */ - @Deprecated - METASTORE_EVENT_CLEAN_FREQ("hive.metastore.event.clean.freq", "0s", - new TimeValidator(TimeUnit.SECONDS), - "Frequency at which timer task runs to purge expired events in metastore."), - /** - * @deprecated Use MetastoreConf.EVENT_EXPIRY_DURATION - */ - @Deprecated - METASTORE_EVENT_EXPIRY_DURATION("hive.metastore.event.expiry.duration", "0s", - new TimeValidator(TimeUnit.SECONDS), - "Duration after which events expire from events table"), - /** - * @deprecated Use MetastoreConf.EVENT_MESSAGE_FACTORY - */ - @Deprecated - METASTORE_EVENT_MESSAGE_FACTORY("hive.metastore.event.message.factory", - "org.apache.hadoop.hive.metastore.messaging.json.gzip.GzipJSONMessageEncoder", - "Factory class for making encoding and decoding messages in the events generated."), - /** - * @deprecated Use MetastoreConf.EXECUTE_SET_UGI - */ - @Deprecated - METASTORE_EXECUTE_SET_UGI("hive.metastore.execute.setugi", true, - "In unsecure mode, setting this property to true will cause the metastore to execute DFS operations using \n" + - "the client's reported user and group permissions. Note that this property must be set on \n" + - "both the client and server sides. Further note that its best effort. \n" + - "If client sets its to true and server sets it to false, client setting will be ignored."), - /** - * @deprecated Use MetastoreConf.PARTITION_NAME_WHITELIST_PATTERN - */ - @Deprecated - METASTORE_PARTITION_NAME_WHITELIST_PATTERN("hive.metastore.partition.name.whitelist.pattern", "", - "Partition names will be checked against this regex pattern and rejected if not matched."), - /** - * @deprecated Use MetastoreConf.INTEGER_JDO_PUSHDOWN - */ - @Deprecated - METASTORE_INTEGER_JDO_PUSHDOWN("hive.metastore.integral.jdo.pushdown", false, - "Allow JDO query pushdown for integral partition columns in metastore. Off by default. This\n" + - "improves metastore perf for integral columns, especially if there's a large number of partitions.\n" + - "However, it doesn't work correctly with integral values that are not normalized (e.g. have\n" + - "leading zeroes, like 0012). If metastore direct SQL is enabled and works, this optimization\n" + - "is also irrelevant."), - /** - * @deprecated Use MetastoreConf.TRY_DIRECT_SQL - */ - @Deprecated - METASTORE_TRY_DIRECT_SQL("hive.metastore.try.direct.sql", true, - "Whether the Hive metastore should try to use direct SQL queries instead of the\n" + - "DataNucleus for certain read paths. This can improve metastore performance when\n" + - "fetching many partitions or column statistics by orders of magnitude; however, it\n" + - "is not guaranteed to work on all RDBMS-es and all versions. In case of SQL failures,\n" + - "the metastore will fall back to the DataNucleus, so it's safe even if SQL doesn't\n" + - "work for all queries on your datastore. If all SQL queries fail (for example, your\n" + - "metastore is backed by MongoDB), you might want to disable this to save the\n" + - "try-and-fall-back cost."), - /** - * @deprecated Use MetastoreConf.DIRECT_SQL_PARTITION_BATCH_SIZE - */ - @Deprecated - METASTORE_DIRECT_SQL_PARTITION_BATCH_SIZE("hive.metastore.direct.sql.batch.size", 0, - "Batch size for partition and other object retrieval from the underlying DB in direct\n" + - "SQL. For some DBs like Oracle and MSSQL, there are hardcoded or perf-based limitations\n" + - "that necessitate this. For DBs that can handle the queries, this isn't necessary and\n" + - "may impede performance. -1 means no batching, 0 means automatic batching."), - /** - * @deprecated Use MetastoreConf.TRY_DIRECT_SQL_DDL - */ - @Deprecated - METASTORE_TRY_DIRECT_SQL_DDL("hive.metastore.try.direct.sql.ddl", true, - "Same as hive.metastore.try.direct.sql, for read statements within a transaction that\n" + - "modifies metastore data. Due to non-standard behavior in Postgres, if a direct SQL\n" + - "select query has incorrect syntax or something similar inside a transaction, the\n" + - "entire transaction will fail and fall-back to DataNucleus will not be possible. You\n" + - "should disable the usage of direct SQL inside transactions if that happens in your case."), - /** - * @deprecated Use MetastoreConf.DIRECT_SQL_MAX_QUERY_LENGTH - */ - @Deprecated - METASTORE_DIRECT_SQL_MAX_QUERY_LENGTH("hive.direct.sql.max.query.length", 100, "The maximum\n" + - " size of a query string (in KB)."), - /** - * @deprecated Use MetastoreConf.DIRECT_SQL_MAX_ELEMENTS_IN_CLAUSE - */ - @Deprecated - METASTORE_DIRECT_SQL_MAX_ELEMENTS_IN_CLAUSE("hive.direct.sql.max.elements.in.clause", 1000, - "The maximum number of values in a IN clause. Once exceeded, it will be broken into\n" + - " multiple OR separated IN clauses."), - /** - * @deprecated Use MetastoreConf.DIRECT_SQL_MAX_ELEMENTS_VALUES_CLAUSE - */ - @Deprecated - METASTORE_DIRECT_SQL_MAX_ELEMENTS_VALUES_CLAUSE("hive.direct.sql.max.elements.values.clause", - 1000, "The maximum number of values in a VALUES clause for INSERT statement."), - /** - * @deprecated Use MetastoreConf.ORM_RETRIEVE_MAPNULLS_AS_EMPTY_STRINGS - */ - @Deprecated - METASTORE_ORM_RETRIEVE_MAPNULLS_AS_EMPTY_STRINGS("hive.metastore.orm.retrieveMapNullsAsEmptyStrings",false, - "Thrift does not support nulls in maps, so any nulls present in maps retrieved from ORM must " + - "either be pruned or converted to empty strings. Some backing dbs such as Oracle persist empty strings " + - "as nulls, so we should set this parameter if we wish to reverse that behaviour. For others, " + - "pruning is the correct behaviour"), - /** - * @deprecated Use MetastoreConf.DISALLOW_INCOMPATIBLE_COL_TYPE_CHANGES - */ - @Deprecated - METASTORE_DISALLOW_INCOMPATIBLE_COL_TYPE_CHANGES( - "hive.metastore.disallow.incompatible.col.type.changes", true, - "If true (default is false), ALTER TABLE operations which change the type of a\n" + - "column (say STRING) to an incompatible type (say MAP) are disallowed.\n" + - "RCFile default SerDe (ColumnarSerDe) serializes the values in such a way that the\n" + - "datatypes can be converted from string to any type. The map is also serialized as\n" + - "a string, which can be read as a string as well. However, with any binary\n" + - "serialization, this is not true. Blocking the ALTER TABLE prevents ClassCastExceptions\n" + - "when subsequently trying to access old partitions.\n" + - "\n" + - "Primitive types like INT, STRING, BIGINT, etc., are compatible with each other and are\n" + - "not blocked.\n" + - "\n" + - "See HIVE-4409 for more details."), - /** - * @deprecated Use MetastoreConf.LIMIT_PARTITION_REQUEST - */ - @Deprecated - METASTORE_LIMIT_PARTITION_REQUEST("hive.metastore.limit.partition.request", -1, - "This limits the number of partitions that can be requested from the metastore for a given table.\n" + - "The default value \"-1\" means no limit."), NEWTABLEDEFAULTPARA("hive.table.parameters.default", "", "Default property values for newly created tables"), DDL_CTL_PARAMETERS_WHITELIST("hive.ddl.createtablelike.properties.whitelist", "", "Table Properties to copy over when executing a Create Table Like."), - /** - * @deprecated Use MetastoreConf.RAW_STORE_IMPL - */ - @Deprecated - METASTORE_RAW_STORE_IMPL("hive.metastore.rawstore.impl", "org.apache.hadoop.hive.metastore.ObjectStore", - "Name of the class that implements org.apache.hadoop.hive.metastore.rawstore interface. \n" + - "This class is used to store and retrieval of raw metadata objects such as table, database"), - /** - * @deprecated Use MetastoreConf.TXN_STORE_IMPL - */ - @Deprecated - METASTORE_TXN_STORE_IMPL("hive.metastore.txn.store.impl", - "org.apache.hadoop.hive.metastore.txn.CompactionTxnHandler", - "Name of class that implements org.apache.hadoop.hive.metastore.txn.TxnStore. This " + - "class is used to store and retrieve transactions and locks"), - /** - * @deprecated Use MetastoreConf.CONNECTION_DRIVER - */ - @Deprecated - METASTORE_CONNECTION_DRIVER("javax.jdo.option.ConnectionDriverName", "org.apache.derby.jdbc.EmbeddedDriver", - "Driver class name for a JDBC metastore"), - /** - * @deprecated Use MetastoreConf.MANAGER_FACTORY_CLASS - */ - @Deprecated - METASTORE_MANAGER_FACTORY_CLASS("javax.jdo.PersistenceManagerFactoryClass", - "org.datanucleus.api.jdo.JDOPersistenceManagerFactory", - "class implementing the jdo persistence"), - /** - * @deprecated Use MetastoreConf.EXPRESSION_PROXY_CLASS - */ - @Deprecated - METASTORE_EXPRESSION_PROXY_CLASS("hive.metastore.expression.proxy", - "org.apache.hadoop.hive.ql.optimizer.ppr.PartitionExpressionForMetastore", ""), - /** - * @deprecated Use MetastoreConf.DETACH_ALL_ON_COMMIT - */ - @Deprecated - METASTORE_DETACH_ALL_ON_COMMIT("javax.jdo.option.DetachAllOnCommit", true, - "Detaches all objects from session so that they can be used after transaction is committed"), - /** - * @deprecated Use MetastoreConf.NON_TRANSACTIONAL_READ - */ - @Deprecated - METASTORE_NON_TRANSACTIONAL_READ("javax.jdo.option.NonTransactionalRead", true, - "Reads outside of transactions"), - /** - * @deprecated Use MetastoreConf.CONNECTION_USER_NAME - */ - @Deprecated - METASTORE_CONNECTION_USER_NAME("javax.jdo.option.ConnectionUserName", "APP", - "Username to use against metastore database"), - /** - * @deprecated Use MetastoreConf.END_FUNCTION_LISTENERS - */ - @Deprecated - METASTORE_END_FUNCTION_LISTENERS("hive.metastore.end.function.listeners", "", - "List of comma separated listeners for the end of metastore functions."), - /** - * @deprecated Use MetastoreConf.PART_INHERIT_TBL_PROPS - */ - @Deprecated - METASTORE_PART_INHERIT_TBL_PROPS("hive.metastore.partition.inherit.table.properties", "", - "List of comma separated keys occurring in table properties which will get inherited to newly created partitions. \n" + - "* implies all the keys will get inherited."), - /** - * @deprecated Use MetastoreConf.FILTER_HOOK - */ - @Deprecated - METASTORE_FILTER_HOOK("hive.metastore.filter.hook", "org.apache.hadoop.hive.metastore.DefaultMetaStoreFilterHookImpl", - "Metastore hook class for filtering the metadata read results. If hive.security.authorization.manager" - + "is set to instance of HiveAuthorizerFactory, then this value is ignored."), + FIRE_EVENTS_FOR_DML("hive.metastore.dml.events", false, "If true, the metastore will be asked" + " to fire events for DML operations"), METASTORE_CLIENT_DROP_PARTITIONS_WITH_EXPRESSIONS("hive.metastore.client.drop.partitions.using.expressions", true, "Choose whether dropping partitions with HCatClient pushes the partition-predicate to the metastore, " + "or drops partitions iteratively"), - /** - * @deprecated Use MetastoreConf.AGGREGATE_STATS_CACHE_ENABLED - */ - @Deprecated - METASTORE_AGGREGATE_STATS_CACHE_ENABLED("hive.metastore.aggregate.stats.cache.enabled", false, - "Whether aggregate stats caching is enabled or not."), - /** - * @deprecated Use MetastoreConf.AGGREGATE_STATS_CACHE_SIZE - */ - @Deprecated - METASTORE_AGGREGATE_STATS_CACHE_SIZE("hive.metastore.aggregate.stats.cache.size", 10000, - "Maximum number of aggregate stats nodes that we will place in the metastore aggregate stats cache."), - /** - * @deprecated Use MetastoreConf.AGGREGATE_STATS_CACHE_MAX_PARTITIONS - */ - @Deprecated - METASTORE_AGGREGATE_STATS_CACHE_MAX_PARTITIONS("hive.metastore.aggregate.stats.cache.max.partitions", 10000, - "Maximum number of partitions that are aggregated per cache node."), - /** - * @deprecated Use MetastoreConf.AGGREGATE_STATS_CACHE_FPP - */ - @Deprecated - METASTORE_AGGREGATE_STATS_CACHE_FPP("hive.metastore.aggregate.stats.cache.fpp", (float) 0.01, - "Maximum false positive probability for the Bloom Filter used in each aggregate stats cache node (default 1%)."), - /** - * @deprecated Use MetastoreConf.AGGREGATE_STATS_CACHE_MAX_VARIANCE - */ - @Deprecated - METASTORE_AGGREGATE_STATS_CACHE_MAX_VARIANCE("hive.metastore.aggregate.stats.cache.max.variance", (float) 0.01, - "Maximum tolerable variance in number of partitions between a cached node and our request (default 1%)."), - /** - * @deprecated Use MetastoreConf.AGGREGATE_STATS_CACHE_TTL - */ - @Deprecated - METASTORE_AGGREGATE_STATS_CACHE_TTL("hive.metastore.aggregate.stats.cache.ttl", "600s", new TimeValidator(TimeUnit.SECONDS), - "Number of seconds for a cached node to be active in the cache before they become stale."), - /** - * @deprecated Use MetastoreConf.AGGREGATE_STATS_CACHE_MAX_WRITER_WAIT - */ - @Deprecated - METASTORE_AGGREGATE_STATS_CACHE_MAX_WRITER_WAIT("hive.metastore.aggregate.stats.cache.max.writer.wait", "5000ms", - new TimeValidator(TimeUnit.MILLISECONDS), - "Number of milliseconds a writer will wait to acquire the writelock before giving up."), - /** - * @deprecated Use MetastoreConf.AGGREGATE_STATS_CACHE_MAX_READER_WAIT - */ - @Deprecated - METASTORE_AGGREGATE_STATS_CACHE_MAX_READER_WAIT("hive.metastore.aggregate.stats.cache.max.reader.wait", "1000ms", - new TimeValidator(TimeUnit.MILLISECONDS), - "Number of milliseconds a reader will wait to acquire the readlock before giving up."), - /** - * @deprecated Use MetastoreConf.AGGREGATE_STATS_CACHE_MAX_FULL - */ - @Deprecated - METASTORE_AGGREGATE_STATS_CACHE_MAX_FULL("hive.metastore.aggregate.stats.cache.max.full", (float) 0.9, - "Maximum cache full % after which the cache cleaner thread kicks in."), - /** - * @deprecated Use MetastoreConf.AGGREGATE_STATS_CACHE_CLEAN_UNTIL - */ - @Deprecated - METASTORE_AGGREGATE_STATS_CACHE_CLEAN_UNTIL("hive.metastore.aggregate.stats.cache.clean.until", (float) 0.8, - "The cleaner thread cleans until cache reaches this % full size."), - /** - * @deprecated Use MetastoreConf.METRICS_ENABLED - */ - @Deprecated - METASTORE_METRICS("hive.metastore.metrics.enabled", false, "Enable metrics on the metastore."), - /** - * @deprecated Use MetastoreConf.INIT_METADATA_COUNT_ENABLED - */ - @Deprecated - METASTORE_INIT_METADATA_COUNT_ENABLED("hive.metastore.initial.metadata.count.enabled", true, - "Enable a metadata count at metastore startup for metrics."), - - // Metastore SSL settings - /** - * @deprecated Use MetastoreConf.USE_SSL - */ - @Deprecated - HIVE_METASTORE_USE_SSL("hive.metastore.use.SSL", false, - "Set this to true for using SSL encryption in HMS server."), - /** - * @deprecated Use MetastoreConf.SSL_KEYSTORE_PATH - */ - @Deprecated - HIVE_METASTORE_SSL_KEYSTORE_PATH("hive.metastore.keystore.path", "", - "Metastore SSL certificate keystore location."), - /** - * @deprecated Use MetastoreConf.SSL_KEYSTORE_PASSWORD - */ - @Deprecated - HIVE_METASTORE_SSL_KEYSTORE_PASSWORD("hive.metastore.keystore.password", "", - "Metastore SSL certificate keystore password."), - /** - * @deprecated Use MetastoreConf.SSL_TRUSTSTORE_PATH - */ - @Deprecated - HIVE_METASTORE_SSL_TRUSTSTORE_PATH("hive.metastore.truststore.path", "", - "Metastore SSL certificate truststore location."), - /** - * @deprecated Use MetastoreConf.SSL_TRUSTSTORE_PASSWORD - */ - @Deprecated - HIVE_METASTORE_SSL_TRUSTSTORE_PASSWORD("hive.metastore.truststore.password", "", - "Metastore SSL certificate truststore password."), - - // Parameters for exporting metadata on table drop (requires the use of the) - // org.apache.hadoop.hive.ql.parse.MetaDataExportListener preevent listener - /** - * @deprecated Use MetastoreConf.METADATA_EXPORT_LOCATION - */ - @Deprecated - METADATA_EXPORT_LOCATION("hive.metadata.export.location", "", - "When used in conjunction with the org.apache.hadoop.hive.ql.parse.MetaDataExportListener pre event listener, \n" + - "it is the location to which the metadata will be exported. The default is an empty string, which results in the \n" + - "metadata being exported to the current user's home directory on HDFS."), - /** - * @deprecated Use MetastoreConf.MOVE_EXPORTED_METADATA_TO_TRASH - */ - @Deprecated - MOVE_EXPORTED_METADATA_TO_TRASH("hive.metadata.move.exported.metadata.to.trash", true, - "When used in conjunction with the org.apache.hadoop.hive.ql.parse.MetaDataExportListener pre event listener, \n" + - "this setting determines if the metadata that is exported will subsequently be moved to the user's trash directory \n" + - "alongside the dropped table data. This ensures that the metadata will be cleaned up along with the dropped table data."), - // CLI CLIIGNOREERRORS("hive.cli.errors.ignore", false, ""), CLIPRINTCURRENTDB("hive.cli.print.current.db", false, @@ -1539,11 +690,6 @@ private static void populateLlapDaemonVarsSet(Set llapDaemonVarsSetLocal CLIPROMPT("hive.cli.prompt", "hive", "Command line prompt configuration value. Other hiveconf can be used in this configuration value. \n" + "Variable substitution will only be invoked at the Hive CLI startup."), - /** - * @deprecated Use MetastoreConf.FS_HANDLER_CLS - */ - @Deprecated - HIVE_METASTORE_FS_HANDLER_CLS("hive.metastore.fs.handler.class", "org.apache.hadoop.hive.metastore.HiveMetaStoreFsImpl", ""), // Things we log in the jobconf @@ -1636,9 +782,6 @@ private static void populateLlapDaemonVarsSet(Set llapDaemonVarsSetLocal HIVE_LOAD_DATA_OWNER("hive.load.data.owner", "", "Set the owner of files loaded using load data in managed tables."), - @Deprecated - HIVEMAPREDMODE("hive.mapred.mode", null, - "Deprecated; use hive.strict.checks.* settings instead."), HIVEALIAS("hive.alias", "", ""), HIVEMAPSIDEAGGREGATE("hive.map.aggr", true, "Whether to use map-side aggregation in Hive Group By queries"), HIVEGROUPBYSKEW("hive.groupby.skewindata", false, "Whether there is skew in data to optimize group by queries"), @@ -1792,10 +935,6 @@ private static void populateLlapDaemonVarsSet(Set llapDaemonVarsSetLocal "Whether to enable using Column Position Alias in Group By"), HIVE_ORDERBY_POSITION_ALIAS("hive.orderby.position.alias", true, "Whether to enable using Column Position Alias in Order By"), - @Deprecated - HIVE_GROUPBY_ORDERBY_POSITION_ALIAS("hive.groupby.orderby.position.alias", false, - "Whether to enable using Column Position Alias in Group By or Order By (deprecated).\n" + - "Use " + HIVE_ORDERBY_POSITION_ALIAS.varname + " or " + HIVE_GROUPBY_POSITION_ALIAS.varname + " instead"), HIVE_NEW_JOB_GROUPING_SET_CARDINALITY("hive.new.job.grouping.set.cardinality", 30, "Whether a new map-reduce job should be launched for grouping sets/rollups/cubes.\n" + "For a query like: select a, b, c, count(1) from T group by a, b, c with rollup;\n" + @@ -1843,27 +982,6 @@ private static void populateLlapDaemonVarsSet(Set llapDaemonVarsSetLocal "org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe", "The default SerDe Hive will use for storage formats that do not specify a SerDe."), - /** - * @deprecated Use MetastoreConf.SERDES_USING_METASTORE_FOR_SCHEMA - */ - @Deprecated - SERDESUSINGMETASTOREFORSCHEMA("hive.serdes.using.metastore.for.schema", - "org.apache.hadoop.hive.ql.io.orc.OrcSerde," + - "org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe," + - "org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe," + - "org.apache.hadoop.hive.serde2.dynamic_type.DynamicSerDe," + - "org.apache.hadoop.hive.serde2.MetadataTypedColumnsetSerDe," + - "org.apache.hadoop.hive.serde2.columnar.LazyBinaryColumnarSerDe," + - "org.apache.hadoop.hive.ql.io.parquet.serde.ParquetHiveSerDe," + - "org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe", - "SerDes retrieving schema from metastore. This is an internal parameter."), - - @Deprecated - HIVE_LEGACY_SCHEMA_FOR_ALL_SERDES("hive.legacy.schema.for.all.serdes", - false, - "A backward compatibility setting for external metastore users that do not handle \n" + - SERDESUSINGMETASTOREFORSCHEMA.varname + " correctly. This may be removed at any time."), - HIVEHISTORYFILELOC("hive.querylog.location", "${system:java.io.tmpdir}" + File.separator + "${system:user.name}", "Location of Hive run time structured log file"), @@ -2305,9 +1423,6 @@ private static void populateLlapDaemonVarsSet(Set llapDaemonVarsSetLocal "Currently it only works with Apache Tez. This should always be set to true. \n" + "Since it is a new feature, it has been made configurable."), - @Deprecated - HIVEOPTSORTDYNAMICPARTITION("hive.optimize.sort.dynamic.partition", false, - "Deprecated. Use hive.optimize.sort.dynamic.partition.threshold instead."), HIVEOPTSORTDYNAMICPARTITIONTHRESHOLD("hive.optimize.sort.dynamic.partition.threshold", 0, "When enabled dynamic partitioning column will be globally sorted.\n" + "This way we can keep only one record writer open for each partition value\n" + @@ -2435,18 +1550,6 @@ private static void populateLlapDaemonVarsSet(Set llapDaemonVarsSetLocal "The storage that stores temporary Hive statistics. In filesystem based statistics collection ('fs'), \n" + "each task writes statistics it has collected in a file on the filesystem, which will be aggregated \n" + "after the job has finished. Supported values are fs (filesystem) and custom as defined in StatsSetupConst.java."), // StatsSetupConst.StatDB - /** - * @deprecated Use MetastoreConf.STATS_DEFAULT_PUBLISHER - */ - @Deprecated - HIVE_STATS_DEFAULT_PUBLISHER("hive.stats.default.publisher", "", - "The Java class (implementing the StatsPublisher interface) that is used by default if hive.stats.dbclass is custom type."), - /** - * @deprecated Use MetastoreConf.STATS_DEFAULT_AGGRETATOR - */ - @Deprecated - HIVE_STATS_DEFAULT_AGGREGATOR("hive.stats.default.aggregator", "", - "The Java class (implementing the StatsAggregator interface) that is used by default if hive.stats.dbclass is custom type."), CLIENT_STATS_COUNTERS("hive.client.stats.counters", "", "Subset of counters that should be of interest for hive.client.stats.publishers (when one wants to limit their publishing). \n" + "Non-display names should be used"), @@ -2471,31 +1574,12 @@ private static void populateLlapDaemonVarsSet(Set llapDaemonVarsSetLocal "This is useful to identify how tables are accessed and to determine if there are wasted columns that can be trimmed."), HIVE_STATS_NDV_ALGO("hive.stats.ndv.algo", "hll", new PatternSet("hll", "fm"), "hll and fm stand for HyperLogLog and FM-sketch, respectively for computing ndv."), - /** - * @deprecated Use MetastoreConf.STATS_FETCH_BITVECTOR - */ - @Deprecated - HIVE_STATS_FETCH_BITVECTOR("hive.stats.fetch.bitvector", false, - "Whether we fetch bitvector when we compute ndv. Users can turn it off if they want to use old schema"), + // standard error allowed for ndv estimates for FM-sketch. A lower value indicates higher accuracy and a // higher compute cost. HIVE_STATS_NDV_ERROR("hive.stats.ndv.error", (float)20.0, "Standard error expressed in percentage. Provides a tradeoff between accuracy and compute cost. \n" + "A lower value for error indicates higher accuracy and a higher compute cost."), - /** - * @deprecated Use MetastoreConf.STATS_NDV_TUNER - */ - @Deprecated - HIVE_METASTORE_STATS_NDV_TUNER("hive.metastore.stats.ndv.tuner", (float)0.0, - "Provides a tunable parameter between the lower bound and the higher bound of ndv for aggregate ndv across all the partitions. \n" + - "The lower bound is equal to the maximum of ndv of all the partitions. The higher bound is equal to the sum of ndv of all the partitions.\n" + - "Its value should be between 0.0 (i.e., choose lower bound) and 1.0 (i.e., choose higher bound)"), - /** - * @deprecated Use MetastoreConf.STATS_NDV_DENSITY_FUNCTION - */ - @Deprecated - HIVE_METASTORE_STATS_NDV_DENSITY_FUNCTION("hive.metastore.stats.ndv.densityfunction", false, - "Whether to use density function to estimate the NDV for the whole table based on the NDV of partitions"), HIVE_STATS_KEY_PREFIX("hive.stats.key.prefix", "", "", true), // internal usage only // if length of variable length data type cannot be determined this length will be used. HIVE_STATS_MAX_VARIABLE_LENGTH("hive.stats.max.variable.length", 100, @@ -2645,18 +1729,7 @@ private static void populateLlapDaemonVarsSet(Set llapDaemonVarsSetLocal "are not hidden by the INSERT OVERWRITE."), HIVE_TXN_STATS_ENABLED("hive.txn.stats.enabled", true, "Whether Hive supports transactional stats (accurate stats for transactional tables)"), - /** - * @deprecated Use MetastoreConf.TXN_TIMEOUT - */ - @Deprecated - HIVE_TXN_TIMEOUT("hive.txn.timeout", "300s", new TimeValidator(TimeUnit.SECONDS), - "time after which transactions are declared aborted if the client has not sent a heartbeat."), - /** - * @deprecated Use MetastoreConf.TXN_HEARTBEAT_THREADPOOL_SIZE - */ - @Deprecated - HIVE_TXN_HEARTBEAT_THREADPOOL_SIZE("hive.txn.heartbeat.threadpool.size", 5, "The number of " + - "threads to use for heartbeating. For Hive CLI, 1 is enough. For HiveServer2, we need a few"), + TXN_MGR_DUMP_LOCK_STATE_ON_ACQUIRE_TIMEOUT("hive.txn.manager.dump.lock.state.on.acquire.timeout", false, "Set this to true so that when attempt to acquire a lock on resource times out, the current state" + " of the lock manager is dumped to log file. This is for debugging. See also " + @@ -2667,64 +1740,6 @@ private static void populateLlapDaemonVarsSet(Set llapDaemonVarsSetLocal "4: Make the table 'quarter-acid' as it only supports insert. But it doesn't require ORC or bucketing.\n" + "This is intended to be used as an internal property for future versions of ACID. (See\n" + "HIVE-14035 for details. User sets it tblproperites via transactional_properties.)", true), - /** - * @deprecated Use MetastoreConf.MAX_OPEN_TXNS - */ - @Deprecated - HIVE_MAX_OPEN_TXNS("hive.max.open.txns", 100000, "Maximum number of open transactions. If \n" + - "current open transactions reach this limit, future open transaction requests will be \n" + - "rejected, until this number goes below the limit."), - /** - * @deprecated Use MetastoreConf.COUNT_OPEN_TXNS_INTERVAL - */ - @Deprecated - HIVE_COUNT_OPEN_TXNS_INTERVAL("hive.count.open.txns.interval", "1s", - new TimeValidator(TimeUnit.SECONDS), "Time in seconds between checks to count open transactions."), - /** - * @deprecated Use MetastoreConf.TXN_MAX_OPEN_BATCH - */ - @Deprecated - HIVE_TXN_MAX_OPEN_BATCH("hive.txn.max.open.batch", 1000, - "Maximum number of transactions that can be fetched in one call to open_txns().\n" + - "This controls how many transactions streaming agents such as Flume or Storm open\n" + - "simultaneously. The streaming agent then writes that number of entries into a single\n" + - "file (per Flume agent or Storm bolt). Thus increasing this value decreases the number\n" + - "of delta files created by streaming agents. But it also increases the number of open\n" + - "transactions that Hive has to track at any given time, which may negatively affect\n" + - "read performance."), - /** - * @deprecated Use MetastoreConf.TXN_RETRYABLE_SQLEX_REGEX - */ - @Deprecated - HIVE_TXN_RETRYABLE_SQLEX_REGEX("hive.txn.retryable.sqlex.regex", "", "Comma separated list\n" + - "of regular expression patterns for SQL state, error code, and error message of\n" + - "retryable SQLExceptions, that's suitable for the metastore DB.\n" + - "For example: Can't serialize.*,40001$,^Deadlock,.*ORA-08176.*\n" + - "The string that the regex will be matched against is of the following form, where ex is a SQLException:\n" + - "ex.getMessage() + \" (SQLState=\" + ex.getSQLState() + \", ErrorCode=\" + ex.getErrorCode() + \")\""), - /** - * @deprecated Use MetastoreConf.COMPACTOR_INITIATOR_ON - */ - @Deprecated - HIVE_COMPACTOR_INITIATOR_ON("hive.compactor.initiator.on", false, - "Whether to run the initiator and cleaner threads on this metastore instance or not.\n" + - "Set this to true on one instance of the Thrift metastore service as part of turning\n" + - "on Hive transactions. For a complete list of parameters required for turning on\n" + - "transactions, see hive.txn.manager."), - /** - * @deprecated Use MetastoreConf.COMPACTOR_WORKER_THREADS - */ - @Deprecated - HIVE_COMPACTOR_WORKER_THREADS("hive.compactor.worker.threads", 0, - "How many compactor worker threads to run on this metastore instance. Set this to a\n" + - "positive number on one or more instances of the Thrift metastore service as part of\n" + - "turning on Hive transactions. For a complete list of parameters required for turning\n" + - "on transactions, see hive.txn.manager.\n" + - "Worker threads spawn MapReduce jobs to do compactions. They do not do the compactions\n" + - "themselves. Increasing the number of worker threads will decrease the time it takes\n" + - "tables or partitions to be compacted once they are determined to need compaction.\n" + - "It will also increase the background load on the Hadoop cluster as more MapReduce jobs\n" + - "will be running in the background."), HIVE_COMPACTOR_WORKER_TIMEOUT("hive.compactor.worker.timeout", "86400s", new TimeValidator(TimeUnit.SECONDS), @@ -2755,14 +1770,6 @@ private static void populateLlapDaemonVarsSet(Set llapDaemonVarsSetLocal HIVE_COMPACTOR_ABORTEDTXN_THRESHOLD("hive.compactor.abortedtxn.threshold", 1000, "Number of aborted transactions involving a given table or partition that will trigger\n" + "a major compaction."), - /** - * @deprecated Use MetastoreConf.COMPACTOR_INITIATOR_FAILED_THRESHOLD - */ - @Deprecated - COMPACTOR_INITIATOR_FAILED_THRESHOLD("hive.compactor.initiator.failed.compacts.threshold", 2, - new RangeValidator(1, 20), "Number of consecutive compaction failures (per table/partition) " + - "after which automatic compactions will not be scheduled any more. Note that this must be less " + - "than hive.compactor.history.retention.failed."), HIVE_COMPACTOR_CLEANER_RUN_INTERVAL("hive.compactor.cleaner.run.interval", "5000ms", new TimeValidator(TimeUnit.MILLISECONDS), "Time between runs of the cleaner thread"), @@ -2781,52 +1788,6 @@ private static void populateLlapDaemonVarsSet(Set llapDaemonVarsSetLocal "This is set to compactor from within the query based compactor. This enables the Tez SplitGrouper " + "to group splits based on their bucket number, so that all rows from different bucket files " + " for the same bucket number can end up in the same bucket file after the compaction."), - /** - * @deprecated Use MetastoreConf.COMPACTOR_HISTORY_RETENTION_SUCCEEDED - */ - @Deprecated - COMPACTOR_HISTORY_RETENTION_SUCCEEDED("hive.compactor.history.retention.succeeded", 3, - new RangeValidator(0, 100), "Determines how many successful compaction records will be " + - "retained in compaction history for a given table/partition."), - /** - * @deprecated Use MetastoreConf.COMPACTOR_HISTORY_RETENTION_FAILED - */ - @Deprecated - COMPACTOR_HISTORY_RETENTION_FAILED("hive.compactor.history.retention.failed", 3, - new RangeValidator(0, 100), "Determines how many failed compaction records will be " + - "retained in compaction history for a given table/partition."), - /** - * @deprecated Use MetastoreConf.COMPACTOR_HISTORY_RETENTION_ATTEMPTED - */ - @Deprecated - COMPACTOR_HISTORY_RETENTION_ATTEMPTED("hive.compactor.history.retention.attempted", 2, - new RangeValidator(0, 100), "Determines how many attempted compaction records will be " + - "retained in compaction history for a given table/partition."), - /** - * @deprecated Use MetastoreConf.COMPACTOR_HISTORY_REAPER_INTERVAL - */ - @Deprecated - COMPACTOR_HISTORY_REAPER_INTERVAL("hive.compactor.history.reaper.interval", "2m", - new TimeValidator(TimeUnit.MILLISECONDS), "Determines how often compaction history reaper runs"), - /** - * @deprecated Use MetastoreConf.TIMEDOUT_TXN_REAPER_START - */ - @Deprecated - HIVE_TIMEDOUT_TXN_REAPER_START("hive.timedout.txn.reaper.start", "100s", - new TimeValidator(TimeUnit.MILLISECONDS), "Time delay of 1st reaper run after metastore start"), - /** - * @deprecated Use MetastoreConf.TIMEDOUT_TXN_REAPER_INTERVAL - */ - @Deprecated - HIVE_TIMEDOUT_TXN_REAPER_INTERVAL("hive.timedout.txn.reaper.interval", "180s", - new TimeValidator(TimeUnit.MILLISECONDS), "Time interval describing how often the reaper runs"), - /** - * @deprecated Use MetastoreConf.WRITE_SET_REAPER_INTERVAL - */ - @Deprecated - WRITE_SET_REAPER_INTERVAL("hive.writeset.reaper.interval", "60s", - new TimeValidator(TimeUnit.MILLISECONDS), "Frequency of WriteSet reaper runs"), - MERGE_CARDINALITY_VIOLATION_CHECK("hive.merge.cardinality.check", true, "Set to true to ensure that each SQL Merge statement ensures that for each row in the target\n" + "table there is at most 1 matching row in the source table per SQL Specification."), @@ -3142,13 +2103,6 @@ private static void populateLlapDaemonVarsSet(Set llapDaemonVarsSetLocal "Comma separated list of reporter implementation classes for metric class " + "org.apache.hadoop.hive.common.metrics.metrics2.CodahaleMetrics. Overrides " + "HIVE_METRICS_REPORTER conf if present"), - @Deprecated - HIVE_METRICS_REPORTER("hive.service.metrics.reporter", "", - "Reporter implementations for metric class " - + "org.apache.hadoop.hive.common.metrics.metrics2.CodahaleMetrics;" + - "Deprecated, use HIVE_CODAHALE_METRICS_REPORTER_CLASSES instead. This configuraiton will be" - + " overridden by HIVE_CODAHALE_METRICS_REPORTER_CLASSES if present. " + - "Comma separated list of JMX, CONSOLE, JSON_FILE, HADOOP2"), HIVE_METRICS_JSON_FILE_LOCATION("hive.service.metrics.file.location", "/tmp/report.json", "For metric class org.apache.hadoop.hive.common.metrics.metrics2.CodahaleMetrics JSON_FILE reporter, the location of local JSON metrics file. " + "This file will get overwritten at every interval."), @@ -3444,9 +2398,6 @@ private static void populateLlapDaemonVarsSet(Set llapDaemonVarsSetLocal "Domain for the HS2 generated cookies"), HIVE_SERVER2_THRIFT_HTTP_COOKIE_PATH("hive.server2.thrift.http.cookie.path", null, "Path for the HS2 generated cookies"), - @Deprecated - HIVE_SERVER2_THRIFT_HTTP_COOKIE_IS_SECURE("hive.server2.thrift.http.cookie.is.secure", true, - "Deprecated: Secure attribute of the HS2 generated cookie (this is automatically enabled for SSL enabled HiveServer2)."), HIVE_SERVER2_THRIFT_HTTP_COOKIE_IS_HTTPONLY("hive.server2.thrift.http.cookie.is.httponly", true, "HttpOnly attribute of the HS2 generated cookie."), @@ -3941,15 +2892,6 @@ private static void populateLlapDaemonVarsSet(Set llapDaemonVarsSetLocal " none: default(past) behavior. Implies only alphaNumeric and underscore are valid characters in identifiers.\n" + " column: implies column names can contain any character." ), - /** - * @deprecated Use MetastoreConf.SUPPORT_SPECIAL_CHARACTERS_IN_TABLE_NAMES - */ - @Deprecated - HIVE_SUPPORT_SPECICAL_CHARACTERS_IN_TABLE_NAMES("hive.support.special.characters.tablename", true, - "This flag should be set to true to enable support for special characters in table names.\n" - + "When it is set to false, only [a-zA-Z_0-9]+ are supported.\n" - + "The only supported special character right now is '/'. This flag applies only to quoted table names.\n" - + "The default value is true."), HIVE_CREATE_TABLES_AS_INSERT_ONLY("hive.create.as.insert.only", false, "Whether the eligible tables should be created as ACID insert-only by default. Does \n" + "not apply to external tables, the ones using storage handlers, etc."), @@ -4646,35 +3588,6 @@ private static void populateLlapDaemonVarsSet(Set llapDaemonVarsSetLocal "Merge adjacent joins into a single n-way join"), HIVE_LOG_N_RECORDS("hive.log.every.n.records", 0L, new RangeValidator(0L, null), "If value is greater than 0 logs in fixed intervals of size n rather than exponentially."), - /** - * @deprecated Use MetastoreConf.MSCK_PATH_VALIDATION - */ - @Deprecated - HIVE_MSCK_PATH_VALIDATION("hive.msck.path.validation", "throw", - new StringSet("throw", "skip", "ignore"), "The approach msck should take with HDFS " + - "directories that are partition-like but contain unsupported characters. 'throw' (an " + - "exception) is the default; 'skip' will skip the invalid directories and still repair the" + - " others; 'ignore' will skip the validation (legacy behavior, causes bugs in many cases)"), - /** - * @deprecated Use MetastoreConf.MSCK_REPAIR_BATCH_SIZE - */ - @Deprecated - HIVE_MSCK_REPAIR_BATCH_SIZE( - "hive.msck.repair.batch.size", 3000, - "Batch size for the msck repair command. If the value is greater than zero,\n " - + "it will execute batch wise with the configured batch size. In case of errors while\n" - + "adding unknown partitions the batch size is automatically reduced by half in the subsequent\n" - + "retry attempt. The default value is 3000 which means it will execute in the batches of 3000."), - /** - * @deprecated Use MetastoreConf.MSCK_REPAIR_BATCH_MAX_RETRIES - */ - @Deprecated - HIVE_MSCK_REPAIR_BATCH_MAX_RETRIES("hive.msck.repair.batch.max.retries", 4, - "Maximum number of retries for the msck repair command when adding unknown partitions.\n " - + "If the value is greater than zero it will retry adding unknown partitions until the maximum\n" - + "number of attempts is reached or batch size is reduced to 0, whichever is earlier.\n" - + "In each retry attempt it will reduce the batch size by a factor of 2 until it reaches zero.\n" - + "If the value is set to zero it will retry until the batch size becomes zero as described above."), HIVE_SERVER2_LLAP_CONCURRENT_QUERIES("hive.server2.llap.concurrent.queries", -1, "The number of queries allowed in parallel via llap. Negative number implies 'infinite'."), HIVE_TEZ_ENABLE_MEMORY_MANAGER("hive.tez.enable.memory.manager", true, @@ -4726,7 +3639,7 @@ private static void populateLlapDaemonVarsSet(Set llapDaemonVarsSetLocal "hive.driver.parallel.compilation.global.limit", "Comma separated list of configuration options which are immutable at runtime"), HIVE_CONF_HIDDEN_LIST("hive.conf.hidden.list", - METASTOREPWD.varname + "," + HIVE_SERVER2_SSL_KEYSTORE_PASSWORD.varname + HIVE_SERVER2_SSL_KEYSTORE_PASSWORD.varname + "," + DRUID_METADATA_DB_PASSWORD.varname // Adding the S3 credentials from Hadoop config to be hidden + ",fs.s3.awsAccessKeyId" @@ -5474,10 +4387,6 @@ public static ConfVars getConfVars(String name) { return vars.get(name); } - public static ConfVars getMetaConf(String name) { - return metaConfs.get(name); - } - public String getVar(ConfVars var) { return getVar(this, var); } @@ -5579,21 +4488,6 @@ private void initialize(Class cls) { addResource(hiveSiteURL); } - // if embedded metastore is to be used as per config so far - // then this is considered like the metastore server case - String msUri = this.getVar(HiveConf.ConfVars.METASTOREURIS); - // This is hackery, but having hive-common depend on standalone-metastore is really bad - // because it will pull all of the metastore code into every module. We need to check that - // we aren't using the standalone metastore. If we are, we should treat it the same as a - // remote metastore situation. - if (msUri == null || msUri.isEmpty()) { - msUri = this.get("metastore.thrift.uris"); - } - LOG.debug("Found metastore URI of " + msUri); - if(HiveConfUtil.isEmbeddedMetaStore(msUri)){ - setLoadMetastoreConfig(true); - } - // load hivemetastore-site.xml if this is metastore and file exists if (isLoadMetastoreConfig() && hivemetastoreSiteUrl != null) { addResource(hivemetastoreSiteUrl); @@ -5638,10 +4532,6 @@ private void initialize(Class cls) { auxJars = StringUtils.join(FileUtils.getJarFilesByPath(this.get(ConfVars.HIVEAUXJARS.varname), this), ','); } - if (getBoolVar(ConfVars.METASTORE_SCHEMA_VERIFICATION)) { - setBoolVar(ConfVars.METASTORE_AUTO_CREATE_ALL, false); - } - if (getBoolVar(HiveConf.ConfVars.HIVECONFVALIDATION)) { List trimmed = new ArrayList(); for (Map.Entry entry : this) { @@ -5737,7 +4627,6 @@ public ZoneId getLocalTimeZone() { ConfVars.HIVEHASHTABLETHRESHOLD.varname, ConfVars.HIVEIGNOREMAPJOINHINT.varname, ConfVars.HIVELIMITMAXROWSIZE.varname, - ConfVars.HIVEMAPREDMODE.varname, ConfVars.HIVEMAPSIDEAGGREGATE.varname, ConfVars.HIVEOPTIMIZEMETADATAQUERIES.varname, ConfVars.HIVEROWOFFSET.varname, @@ -5765,7 +4654,6 @@ public ZoneId getLocalTimeZone() { ConfVars.HIVE_SCHEMA_EVOLUTION.varname, ConfVars.HIVE_SERVER2_LOGGING_OPERATION_LEVEL.varname, ConfVars.HIVE_SERVER2_THRIFT_RESULTSET_SERIALIZE_IN_TASKS.varname, - ConfVars.HIVE_SUPPORT_SPECICAL_CHARACTERS_IN_TABLE_NAMES.varname, ConfVars.JOB_DEBUG_CAPTURE_STACKTRACES.varname, ConfVars.JOB_DEBUG_TIMEOUT.varname, ConfVars.LLAP_IO_ENABLED.varname, @@ -6164,10 +5052,13 @@ public static void setLoadHiveServer2Config(boolean loadHiveServer2Config) { "Load into bucketed tables", ConfVars.HIVE_STRICT_CHECKS_BUCKETING); private static String makeMessage(String what, ConfVars setting) { - return what + " are disabled for safety reasons. If you know what you are doing, please set " - + setting.varname + " to false and make sure that " + ConfVars.HIVEMAPREDMODE.varname + - " is not set to 'strict' to proceed. Note that you may get errors or incorrect " + - "results if you make a mistake while using some of the unsafe features."; + return what + + " are disabled for safety reasons. If you know what you are " + + "doing, please set " + setting.varname + + " to false and make sure that hive.strict.checks.*" + + " allow this action to proceed. Note that you may get errors " + + "or incorrect results if you make a mistake while using some " + + "of the unsafe features."; } public static String checkNoLimit(Configuration conf) { @@ -6192,8 +5083,7 @@ public static String checkBucketing(Configuration conf) { } private static boolean isAllowed(Configuration conf, ConfVars setting) { - String mode = HiveConf.getVar(conf, ConfVars.HIVEMAPREDMODE, (String)null); - return (mode != null) ? !"strict".equals(mode) : !HiveConf.getBoolVar(conf, setting); + return !HiveConf.getBoolVar(conf, setting); } } diff --git a/common/src/java/org/apache/hadoop/hive/conf/HiveConfUtil.java b/common/src/java/org/apache/hadoop/hive/conf/HiveConfUtil.java index ebe64234c0..c45b2f5ee3 100644 --- a/common/src/java/org/apache/hadoop/hive/conf/HiveConfUtil.java +++ b/common/src/java/org/apache/hadoop/hive/conf/HiveConfUtil.java @@ -52,16 +52,6 @@ public class HiveConfUtil { private static final String CLASS_NAME = HiveConfUtil.class.getName(); private static final Logger LOG = LoggerFactory.getLogger(CLASS_NAME); - /** - * Check if metastore is being used in embedded mode. - * This utility function exists so that the logic for determining the mode is same - * in HiveConf and HiveMetaStoreClient - * @param msUri - metastore server uri - * @return - */ - public static boolean isEmbeddedMetaStore(String msUri) { - return (msUri == null) ? true : msUri.trim().isEmpty(); - } /** * Dumps all HiveConf for debugging. Convenient to dump state at process start up and log it diff --git a/common/src/test/org/apache/hadoop/hive/common/metrics/metrics2/TestCodahaleReportersConf.java b/common/src/test/org/apache/hadoop/hive/common/metrics/metrics2/TestCodahaleReportersConf.java deleted file mode 100644 index e89a605bb3..0000000000 --- a/common/src/test/org/apache/hadoop/hive/common/metrics/metrics2/TestCodahaleReportersConf.java +++ /dev/null @@ -1,145 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hive.common.metrics.metrics2; - -import com.fasterxml.jackson.databind.JsonNode; -import com.fasterxml.jackson.databind.ObjectMapper; -import java.lang.reflect.InvocationTargetException; -import org.apache.hadoop.fs.CommonConfigurationKeysPublic; -import org.apache.hadoop.hive.common.metrics.MetricsTestUtils; -import org.apache.hadoop.hive.common.metrics.common.MetricsFactory; -import org.apache.hadoop.hive.conf.HiveConf; -import org.junit.After; -import org.junit.Assert; -import org.junit.Test; - -import java.io.File; - -/** - * Unit tests for Codahale reporter config backward compatibility - */ -public class TestCodahaleReportersConf { - - private static File workDir = new File(System.getProperty("test.tmp.dir")); - private static File jsonReportFile; - - @After - public void after() throws Exception { - MetricsFactory.close(); - } - - /** - * Tests that the deprecated HIVE_METRICS_REPORTER config is used if the HIVE_CODAHALE_METRICS_REPORTER_CLASSES is missing. - */ - @Test - public void testFallbackToDeprecatedConfig() throws Exception { - - HiveConf conf = new HiveConf(); - - jsonReportFile = new File(workDir, "json_reporting"); - jsonReportFile.delete(); - - conf.set(CommonConfigurationKeysPublic.FS_DEFAULT_NAME_KEY, "local"); - conf.setVar(HiveConf.ConfVars.HIVE_METRICS_CLASS, CodahaleMetrics.class.getCanonicalName()); - conf.setVar(HiveConf.ConfVars.HIVE_METRICS_REPORTER, "JMX, JSON"); - conf.setVar(HiveConf.ConfVars.HIVE_METRICS_JSON_FILE_LOCATION, jsonReportFile.toString()); - conf.setVar(HiveConf.ConfVars.HIVE_METRICS_JSON_FILE_INTERVAL, "100ms"); - - MetricsFactory.init(conf); - - int runs = 5; - for (int i = 0; i < runs; i++) { - MetricsFactory.getInstance().incrementCounter("count2"); - } - - // we expect json file to be updated - byte[] jsonData = MetricsTestUtils.getFileData(jsonReportFile.getAbsolutePath(), 2000, 3); - ObjectMapper objectMapper = new ObjectMapper(); - - JsonNode rootNode = objectMapper.readTree(jsonData); - JsonNode countersNode = rootNode.path("counters"); - JsonNode methodCounterNode = countersNode.path("count2"); - JsonNode countNode = methodCounterNode.path("count"); - Assert.assertEquals(countNode.asInt(), 5); - } - - /** - * Tests that the deprecated HIVE_METRICS_REPORTER config is not used if - * HIVE_CODAHALE_METRICS_REPORTER_CLASSES is present. - * - * The deprecated config specifies json reporters whereas the newer one doesn't. Validates that - * the JSON file is not created. - */ - @Test - public void testNoFallback() throws Exception { - - HiveConf conf = new HiveConf(); - - jsonReportFile = new File(workDir, "json_reporting"); - jsonReportFile.delete(); - - conf.set(CommonConfigurationKeysPublic.FS_DEFAULT_NAME_KEY, "local"); - conf.setVar(HiveConf.ConfVars.HIVE_METRICS_CLASS, CodahaleMetrics.class.getCanonicalName()); - conf.setVar(HiveConf.ConfVars.HIVE_METRICS_REPORTER, "JMX, JSON"); - conf.setVar(HiveConf.ConfVars.HIVE_CODAHALE_METRICS_REPORTER_CLASSES, - "org.apache.hadoop.hive.common.metrics.metrics2.JmxMetricsReporter"); - conf.setVar(HiveConf.ConfVars.HIVE_METRICS_JSON_FILE_LOCATION, jsonReportFile.toString()); - conf.setVar(HiveConf.ConfVars.HIVE_METRICS_JSON_FILE_INTERVAL, "100ms"); - - MetricsFactory.init(conf); - - int runs = 5; - for (int i = 0; i < runs; i++) { - MetricsFactory.getInstance().incrementCounter("count2"); - } - - Assert.assertFalse(jsonReportFile.exists()); - } - - /** - * Tests that the deprecated HIVE_METRICS_REPORTER config is not used if - * HIVE_CODAHALE_METRICS_REPORTER_CLASSES is present but incorrect. - * - * The deprecated config specifies json reporters whereas the newer one doesn't. Validates that - * the JSON file is not created. - */ - @Test - public void testNoFallbackOnIncorrectConf() throws Exception { - - HiveConf conf = new HiveConf(); - - jsonReportFile = new File(workDir, "json_reporting"); - jsonReportFile.delete(); - - conf.set(CommonConfigurationKeysPublic.FS_DEFAULT_NAME_KEY, "local"); - conf.setVar(HiveConf.ConfVars.HIVE_METRICS_CLASS, CodahaleMetrics.class.getCanonicalName()); - conf.setVar(HiveConf.ConfVars.HIVE_METRICS_REPORTER, "JMX, JSON"); - conf.setVar(HiveConf.ConfVars.HIVE_CODAHALE_METRICS_REPORTER_CLASSES, - "org.apache.hadoop.hive.common.metrics.NonExistentReporter"); - conf.setVar(HiveConf.ConfVars.HIVE_METRICS_JSON_FILE_LOCATION, jsonReportFile.toString()); - conf.setVar(HiveConf.ConfVars.HIVE_METRICS_JSON_FILE_INTERVAL, "100ms"); - - try { - MetricsFactory.init(conf); - } catch (InvocationTargetException expectedException) { - - } - - Assert.assertFalse(jsonReportFile.exists()); - } -} diff --git a/common/src/test/org/apache/hadoop/hive/conf/TestHiveConf.java b/common/src/test/org/apache/hadoop/hive/conf/TestHiveConf.java index b82b4133f1..43141b556a 100644 --- a/common/src/test/org/apache/hadoop/hive/conf/TestHiveConf.java +++ b/common/src/test/org/apache/hadoop/hive/conf/TestHiveConf.java @@ -146,7 +146,6 @@ public void testHiddenConfig() throws Exception { } ArrayList hiddenList = Lists.newArrayList( - HiveConf.ConfVars.METASTOREPWD.varname, HiveConf.ConfVars.HIVE_SERVER2_SSL_KEYSTORE_PASSWORD.varname, "fs.s3.awsSecretAccessKey", "fs.s3n.awsSecretAccessKey", diff --git a/common/src/test/org/apache/hadoop/hive/conf/TestHiveConfRestrictList.java b/common/src/test/org/apache/hadoop/hive/conf/TestHiveConfRestrictList.java index fd41c5bb98..6edee5d68c 100644 --- a/common/src/test/org/apache/hadoop/hive/conf/TestHiveConfRestrictList.java +++ b/common/src/test/org/apache/hadoop/hive/conf/TestHiveConfRestrictList.java @@ -76,15 +76,13 @@ public void testRestrictList() throws Exception { @Test public void testAppendRestriction() throws Exception { String appendListStr = ConfVars.SCRATCHDIR.varname + "," + - ConfVars.LOCALSCRATCHDIR.varname + "," + - ConfVars.METASTOREURIS.varname; + ConfVars.LOCALSCRATCHDIR.varname; conf.addToRestrictList(appendListStr); // check if the new configs are added to HIVE_CONF_RESTRICTED_LIST String newRestrictList = conf.getVar(ConfVars.HIVE_CONF_RESTRICTED_LIST); assertTrue(newRestrictList.contains(ConfVars.SCRATCHDIR.varname)); assertTrue(newRestrictList.contains(ConfVars.LOCALSCRATCHDIR.varname)); - assertTrue(newRestrictList.contains(ConfVars.METASTOREURIS.varname)); // check if the old values are still there in HIVE_CONF_RESTRICTED_LIST assertTrue(newRestrictList.contains(ConfVars.HIVETESTMODEPREFIX.varname)); @@ -93,7 +91,6 @@ public void testAppendRestriction() throws Exception { verifyRestriction(ConfVars.HIVETESTMODEPREFIX.varname, "foo"); verifyRestriction(ConfVars.HIVE_CONF_RESTRICTED_LIST.varname, "foo"); verifyRestriction(ConfVars.LOCALSCRATCHDIR.varname, "foo"); - verifyRestriction(ConfVars.METASTOREURIS.varname, "foo"); } private void verifyRestriction(String varName, String newVal) { diff --git a/hcatalog/core/src/test/java/org/apache/hive/hcatalog/cli/TestSemanticAnalysis.java b/hcatalog/core/src/test/java/org/apache/hive/hcatalog/cli/TestSemanticAnalysis.java index 3e18e91423..70275b9e08 100644 --- a/hcatalog/core/src/test/java/org/apache/hive/hcatalog/cli/TestSemanticAnalysis.java +++ b/hcatalog/core/src/test/java/org/apache/hive/hcatalog/cli/TestSemanticAnalysis.java @@ -24,6 +24,7 @@ import org.apache.hadoop.hive.metastore.api.FieldSchema; import org.apache.hadoop.hive.metastore.api.Partition; import org.apache.hadoop.hive.metastore.api.Table; +import org.apache.hadoop.hive.metastore.conf.MetastoreConf; import org.apache.hadoop.hive.ql.DriverFactory; import org.apache.hadoop.hive.ql.IDriver; import org.apache.hadoop.hive.ql.ErrorMsg; @@ -69,7 +70,8 @@ public void setUpHCatDriver() throws IOException { "org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe"); hcatConf.set(HiveConf.ConfVars.SEMANTIC_ANALYZER_HOOK.varname, HCatSemanticAnalyzer.class.getName()); - hcatConf.setBoolVar(HiveConf.ConfVars.METASTORE_DISALLOW_INCOMPATIBLE_COL_TYPE_CHANGES, false); + MetastoreConf.setBoolVar(hcatConf, + MetastoreConf.ConfVars.DISALLOW_INCOMPATIBLE_COL_TYPE_CHANGES, false); hcatDriver = DriverFactory.newDriver(hcatConf); SessionState.start(new CliSessionState(hcatConf)); } diff --git a/hcatalog/core/src/test/java/org/apache/hive/hcatalog/mapreduce/TestPassProperties.java b/hcatalog/core/src/test/java/org/apache/hive/hcatalog/mapreduce/TestPassProperties.java index c955aa5021..ef79c333b3 100644 --- a/hcatalog/core/src/test/java/org/apache/hive/hcatalog/mapreduce/TestPassProperties.java +++ b/hcatalog/core/src/test/java/org/apache/hive/hcatalog/mapreduce/TestPassProperties.java @@ -30,6 +30,7 @@ import org.apache.hadoop.hive.cli.CliSessionState; import org.apache.hadoop.hive.conf.HiveConf; import org.apache.hadoop.hive.metastore.Warehouse; +import org.apache.hadoop.hive.metastore.conf.MetastoreConf; import org.apache.hadoop.hive.ql.DriverFactory; import org.apache.hadoop.hive.ql.IDriver; import org.apache.hadoop.hive.ql.session.SessionState; @@ -61,7 +62,8 @@ public void Initialize() throws Exception { hiveConf.set(HiveConf.ConfVars.PREEXECHOOKS.varname, ""); hiveConf.set(HiveConf.ConfVars.POSTEXECHOOKS.varname, ""); hiveConf.set(HiveConf.ConfVars.HIVE_SUPPORT_CONCURRENCY.varname, "false"); - hiveConf.set(HiveConf.ConfVars.METASTOREWAREHOUSE.varname, TEST_WAREHOUSE_DIR); + MetastoreConf.setVar(hiveConf, MetastoreConf.ConfVars.WAREHOUSE, + TEST_WAREHOUSE_DIR); hiveConf .setVar(HiveConf.ConfVars.HIVE_AUTHORIZATION_MANAGER, "org.apache.hadoop.hive.ql.security.authorization.plugin.sqlstd.SQLStdHiveAuthorizerFactory"); diff --git a/hcatalog/hcatalog-pig-adapter/src/test/java/org/apache/hive/hcatalog/pig/TestE2EScenarios.java b/hcatalog/hcatalog-pig-adapter/src/test/java/org/apache/hive/hcatalog/pig/TestE2EScenarios.java index ae292eb78c..43ff4d8f78 100644 --- a/hcatalog/hcatalog-pig-adapter/src/test/java/org/apache/hive/hcatalog/pig/TestE2EScenarios.java +++ b/hcatalog/hcatalog-pig-adapter/src/test/java/org/apache/hive/hcatalog/pig/TestE2EScenarios.java @@ -29,6 +29,7 @@ import org.apache.hadoop.fs.FileUtil; import org.apache.hadoop.hive.cli.CliSessionState; import org.apache.hadoop.hive.conf.HiveConf; +import org.apache.hadoop.hive.metastore.conf.MetastoreConf; import org.apache.hadoop.hive.ql.DriverFactory; import org.apache.hadoop.hive.ql.IDriver; import org.apache.hadoop.hive.ql.session.SessionState; @@ -87,7 +88,8 @@ public void setUp() throws Exception { hiveConf.set(HiveConf.ConfVars.PREEXECHOOKS.varname, ""); hiveConf.set(HiveConf.ConfVars.POSTEXECHOOKS.varname, ""); hiveConf.set(HiveConf.ConfVars.HIVE_SUPPORT_CONCURRENCY.varname, "false"); - hiveConf.set(HiveConf.ConfVars.METASTOREWAREHOUSE.varname, TEST_WAREHOUSE_DIR); + MetastoreConf.setVar(hiveConf, MetastoreConf.ConfVars.WAREHOUSE, + TEST_WAREHOUSE_DIR); hiveConf .setVar(HiveConf.ConfVars.HIVE_AUTHORIZATION_MANAGER, "org.apache.hadoop.hive.ql.security.authorization.plugin.sqlstd.SQLStdHiveAuthorizerFactory"); diff --git a/hcatalog/hcatalog-pig-adapter/src/test/java/org/apache/hive/hcatalog/pig/TestHCatLoaderEncryption.java b/hcatalog/hcatalog-pig-adapter/src/test/java/org/apache/hive/hcatalog/pig/TestHCatLoaderEncryption.java index beb4fe9f4b..ebc650042e 100644 --- a/hcatalog/hcatalog-pig-adapter/src/test/java/org/apache/hive/hcatalog/pig/TestHCatLoaderEncryption.java +++ b/hcatalog/hcatalog-pig-adapter/src/test/java/org/apache/hive/hcatalog/pig/TestHCatLoaderEncryption.java @@ -46,6 +46,7 @@ import org.apache.hadoop.hive.common.io.SessionStream; import org.apache.hadoop.hive.conf.HiveConf; import org.apache.hadoop.hive.metastore.Warehouse; +import org.apache.hadoop.hive.metastore.conf.MetastoreConf; import org.apache.hadoop.hive.ql.DriverFactory; import org.apache.hadoop.hive.ql.IDriver; import org.apache.hadoop.hive.ql.io.StorageFormats; @@ -162,7 +163,8 @@ public void setup() throws Exception { hiveConf.set(HiveConf.ConfVars.PREEXECHOOKS.varname, ""); hiveConf.set(HiveConf.ConfVars.POSTEXECHOOKS.varname, ""); hiveConf.set(HiveConf.ConfVars.HIVE_SUPPORT_CONCURRENCY.varname, "false"); - hiveConf.set(HiveConf.ConfVars.METASTOREWAREHOUSE.varname, TEST_WAREHOUSE_DIR); + MetastoreConf.setVar(hiveConf, MetastoreConf.ConfVars.WAREHOUSE, + TEST_WAREHOUSE_DIR); hiveConf .setVar(HiveConf.ConfVars.HIVE_AUTHORIZATION_MANAGER, "org.apache.hadoop.hive.ql.security.authorization.plugin.sqlstd.SQLStdHiveAuthorizerFactory"); diff --git a/hcatalog/streaming/src/java/org/apache/hive/hcatalog/streaming/HiveEndPoint.java b/hcatalog/streaming/src/java/org/apache/hive/hcatalog/streaming/HiveEndPoint.java index e249b7775e..edbf8d0355 100644 --- a/hcatalog/streaming/src/java/org/apache/hive/hcatalog/streaming/HiveEndPoint.java +++ b/hcatalog/streaming/src/java/org/apache/hive/hcatalog/streaming/HiveEndPoint.java @@ -44,6 +44,7 @@ import org.apache.hadoop.hive.metastore.api.Table; import org.apache.hadoop.hive.metastore.api.TxnAbortedException; import org.apache.hadoop.hive.metastore.api.TxnToWriteId; +import org.apache.hadoop.hive.metastore.conf.MetastoreConf; import org.apache.hive.hcatalog.common.HCatUtil; import org.apache.hadoop.security.UserGroupInformation; @@ -470,10 +471,12 @@ private static IMetaStoreClient getMetaStoreClient(HiveEndPoint endPoint, HiveCo throws ConnectionError { if (endPoint.metaStoreUri!= null) { - conf.setVar(HiveConf.ConfVars.METASTOREURIS, endPoint.metaStoreUri); + MetastoreConf.setVar(conf, MetastoreConf.ConfVars.THRIFT_URIS, + endPoint.metaStoreUri); } if(secureMode) { - conf.setBoolVar(HiveConf.ConfVars.METASTORE_USE_THRIFT_SASL,true); + MetastoreConf.setBoolVar(conf, MetastoreConf.ConfVars.USE_THRIFT_SASL, + true); } try { return HCatUtil.getHiveMetastoreClient(conf); @@ -1028,7 +1031,8 @@ private static LockRequest createLockRequest(final HiveEndPoint hiveEndPoint, static HiveConf createHiveConf(Class clazz, String metaStoreUri) { HiveConf conf = new HiveConf(clazz); if (metaStoreUri!= null) { - setHiveConf(conf, HiveConf.ConfVars.METASTOREURIS, metaStoreUri); + setHiveConf(conf, MetastoreConf.ConfVars.THRIFT_URIS.getHiveName(), + metaStoreUri); } HiveEndPoint.overrideConfSettings(conf); return conf; @@ -1038,11 +1042,19 @@ private static void overrideConfSettings(HiveConf conf) { setHiveConf(conf, HiveConf.ConfVars.HIVE_TXN_MANAGER, "org.apache.hadoop.hive.ql.lockmgr.DbTxnManager"); setHiveConf(conf, HiveConf.ConfVars.HIVE_SUPPORT_CONCURRENCY, true); - setHiveConf(conf, HiveConf.ConfVars.METASTORE_EXECUTE_SET_UGI, true); + setHiveConf(conf, MetastoreConf.ConfVars.EXECUTE_SET_UGI.getHiveName(), + Boolean.TRUE.toString()); // Avoids creating Tez Client sessions internally as it takes much longer currently setHiveConf(conf, HiveConf.ConfVars.HIVE_EXECUTION_ENGINE, "mr"); } + private static void setHiveConf(HiveConf conf, String prop, String value) { + if( LOG.isDebugEnabled() ) { + LOG.debug("Overriding HiveConf setting : " + prop + " = " + value); + } + conf.set(prop, value); + } + private static void setHiveConf(HiveConf conf, HiveConf.ConfVars var, String value) { if( LOG.isDebugEnabled() ) { LOG.debug("Overriding HiveConf setting : " + var + " = " + value); diff --git a/metastore/src/java/org/apache/hadoop/hive/metastore/HiveClientCache.java b/metastore/src/java/org/apache/hadoop/hive/metastore/HiveClientCache.java index 6c33f63298..aac3647bd5 100644 --- a/metastore/src/java/org/apache/hadoop/hive/metastore/HiveClientCache.java +++ b/metastore/src/java/org/apache/hadoop/hive/metastore/HiveClientCache.java @@ -36,6 +36,7 @@ import org.apache.hadoop.hive.conf.HiveConf; import org.apache.hadoop.hive.metastore.annotation.NoReconnect; import org.apache.hadoop.hive.metastore.api.MetaException; +import org.apache.hadoop.hive.metastore.conf.MetastoreConf; import org.apache.hadoop.hive.shims.Utils; import org.apache.hadoop.security.UserGroupInformation; import org.apache.hive.common.util.ShutdownHookManager; @@ -322,7 +323,8 @@ public ICacheableMetaStoreClient call() throws MetaException { final private int threadId; private HiveClientCacheKey(HiveConf hiveConf, final int threadId) throws IOException, LoginException { - this.metaStoreURIs = hiveConf.getVar(HiveConf.ConfVars.METASTOREURIS); + this.metaStoreURIs = + MetastoreConf.getVar(hiveConf, MetastoreConf.ConfVars.THRIFT_URIS); ugi = Utils.getUGI(); this.hiveConf = hiveConf; this.threadId = threadId; diff --git a/ql/src/java/org/apache/hadoop/hive/ql/lockmgr/DbTxnManager.java b/ql/src/java/org/apache/hadoop/hive/ql/lockmgr/DbTxnManager.java index 943aa383bb..2665b6e7c4 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/lockmgr/DbTxnManager.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/lockmgr/DbTxnManager.java @@ -34,6 +34,7 @@ Licensed to the Apache Software Foundation (ASF) under one import org.apache.hadoop.hive.metastore.api.NoSuchTxnException; import org.apache.hadoop.hive.metastore.api.TxnAbortedException; import org.apache.hadoop.hive.metastore.api.TxnToWriteId; +import org.apache.hadoop.hive.metastore.conf.MetastoreConf; import org.apache.hadoop.hive.metastore.api.CommitTxnRequest; import org.apache.hadoop.hive.metastore.txn.TxnCommonUtils; import org.apache.hadoop.hive.ql.Context; @@ -811,7 +812,8 @@ private synchronized void initHeartbeatExecutorService() { } heartbeatExecutorService = Executors.newScheduledThreadPool( - conf.getIntVar(HiveConf.ConfVars.HIVE_TXN_HEARTBEAT_THREADPOOL_SIZE), + MetastoreConf.getIntVar(conf, + MetastoreConf.ConfVars.TXN_HEARTBEAT_THREADPOOL_SIZE), new ThreadFactory() { private final AtomicInteger threadCounter = new AtomicInteger(); @@ -927,11 +929,11 @@ public void replAllocateTableWriteIdsBatch(String dbName, String tableName, Stri public static long getHeartbeatInterval(Configuration conf) throws LockException { // Retrieve HIVE_TXN_TIMEOUT in MILLISECONDS (it's defined as SECONDS), // then divide it by 2 to give us a safety factor. - long interval = - HiveConf.getTimeVar(conf, HiveConf.ConfVars.HIVE_TXN_TIMEOUT, TimeUnit.MILLISECONDS) / 2; + long interval = MetastoreConf.getTimeVar(conf, + MetastoreConf.ConfVars.TXN_TIMEOUT, TimeUnit.MILLISECONDS) / 2; if (interval == 0) { - throw new LockException(HiveConf.ConfVars.HIVE_TXN_TIMEOUT.toString() + " not set," + - " heartbeats won't be sent"); + throw new LockException(MetastoreConf.ConfVars.TXN_TIMEOUT + " not set," + + " heartbeats won't be sent"); } return interval; } diff --git a/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java b/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java index 6143e85664..2e89be7c89 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java @@ -2868,8 +2868,9 @@ private void constructOneLBLocationMap(FileStatus fSta, // fetch all the partitions matching the part spec using the partition iterable // this way the maximum batch size configuration parameter is considered - PartitionIterable partitionIterable = new PartitionIterable(Hive.get(), tbl, partSpec, - conf.getInt(MetastoreConf.ConfVars.BATCH_RETRIEVE_MAX.getVarname(), 300)); + PartitionIterable partitionIterable = + new PartitionIterable(Hive.get(), tbl, partSpec, MetastoreConf + .getIntVar(conf, MetastoreConf.ConfVars.BATCH_RETRIEVE_MAX)); Iterator iterator = partitionIterable.iterator(); // Match valid partition path to partitions @@ -3746,7 +3747,8 @@ public boolean dropPartition(String dbName, String tableName, List parti } List partitions = new ArrayList(partNames.size()); - int batchSize = HiveConf.getIntVar(conf, HiveConf.ConfVars.METASTORE_BATCH_RETRIEVE_MAX); + int batchSize = MetastoreConf.getIntVar(conf, + MetastoreConf.ConfVars.BATCH_RETRIEVE_MAX); // TODO: might want to increase the default batch size. 1024 is viable; MS gets OOM if too high. int nParts = partNames.size(); int nBatches = nParts / batchSize; @@ -5080,7 +5082,8 @@ public synchronized IMetaStoreClient getMSC( } throw ex; } - String metaStoreUris = conf.getVar(HiveConf.ConfVars.METASTOREURIS); + String metaStoreUris = + MetastoreConf.getAsString(conf, MetastoreConf.ConfVars.THRIFT_URIS); if (!org.apache.commons.lang3.StringUtils.isEmpty(metaStoreUris)) { // get a synchronized wrapper if the meta store is remote. metaStoreClient = HiveMetaStoreClient.newSynchronizedClient(metaStoreClient); diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/CalcitePlanner.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/CalcitePlanner.java index ef2ebac82c..ce4baba454 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/parse/CalcitePlanner.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/CalcitePlanner.java @@ -4007,9 +4007,7 @@ private RelNode genOBLogicalPlan(QB qb, Pair selPair, Map astToExprNDescMap = null; ExprNodeDesc obExprNDesc = null; - boolean isBothByPos = HiveConf.getBoolVar(conf, ConfVars.HIVE_GROUPBY_ORDERBY_POSITION_ALIAS); - boolean isObyByPos = isBothByPos - || HiveConf.getBoolVar(conf, ConfVars.HIVE_ORDERBY_POSITION_ALIAS); + boolean isObyByPos = HiveConf.getBoolVar(conf, ConfVars.HIVE_ORDERBY_POSITION_ALIAS); // replace each of the position alias in ORDERBY with the actual column if (ref != null && ref.getToken().getType() == HiveParser.Number) { if (isObyByPos) { diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java index 281025ff67..74117ac893 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java @@ -1790,7 +1790,6 @@ boolean doPhase1(ASTNode ast, QB qb, Phase1Ctx ctx_1, PlannerContext plannerCtx) qb.getParseInfo().setNoScanAnalyzeCommand(this.noscan); // Allow analyze the whole table and dynamic partitions HiveConf.setVar(conf, HiveConf.ConfVars.DYNAMICPARTITIONINGMODE, "nonstrict"); - HiveConf.setVar(conf, HiveConf.ConfVars.HIVEMAPREDMODE, "nonstrict"); break; @@ -13991,11 +13990,10 @@ private void validateCreateView() // Process the position alias in GROUPBY and ORDERBY void processPositionAlias(ASTNode ast) throws SemanticException { - boolean isBothByPos = HiveConf.getBoolVar(conf, ConfVars.HIVE_GROUPBY_ORDERBY_POSITION_ALIAS); - boolean isGbyByPos = isBothByPos - || HiveConf.getBoolVar(conf, ConfVars.HIVE_GROUPBY_POSITION_ALIAS); - boolean isObyByPos = isBothByPos - || HiveConf.getBoolVar(conf, ConfVars.HIVE_ORDERBY_POSITION_ALIAS); + boolean isGbyByPos = + HiveConf.getBoolVar(conf, ConfVars.HIVE_GROUPBY_POSITION_ALIAS); + boolean isObyByPos = + HiveConf.getBoolVar(conf, ConfVars.HIVE_ORDERBY_POSITION_ALIAS); Deque stack = new ArrayDeque(); stack.push(ast); diff --git a/ql/src/java/org/apache/hadoop/hive/ql/plan/PlanUtils.java b/ql/src/java/org/apache/hadoop/hive/ql/plan/PlanUtils.java index 563d0530d1..c85c65816c 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/plan/PlanUtils.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/plan/PlanUtils.java @@ -41,6 +41,7 @@ import org.apache.hadoop.hive.metastore.HiveMetaStoreUtils; import org.apache.hadoop.hive.metastore.api.FieldSchema; import org.apache.hadoop.hive.metastore.api.hive_metastoreConstants; +import org.apache.hadoop.hive.metastore.conf.MetastoreConf; import org.apache.hadoop.hive.metastore.utils.MetaStoreUtils; import org.apache.hadoop.hive.ql.ddl.table.creation.CreateTableDesc; import org.apache.hadoop.hive.ql.ddl.view.create.CreateViewDesc; @@ -1018,7 +1019,8 @@ public static String stripQuotes(String val) { * @return */ public static String removePrefixFromWarehouseConfig(String origiKey) { - String prefix = SessionState.get().getConf().getVar(HiveConf.ConfVars.METASTOREWAREHOUSE); + String prefix = MetastoreConf.getAsString(SessionState.get().getConf(), + MetastoreConf.ConfVars.WAREHOUSE); if ((prefix != null) && (prefix.length() > 0)) { //Local file system is using pfile:/// {@link ProxyLocalFileSystem} prefix = prefix.replace("pfile:///", "pfile:/"); diff --git a/ql/src/java/org/apache/hadoop/hive/ql/session/SessionState.java b/ql/src/java/org/apache/hadoop/hive/ql/session/SessionState.java index e224f2c348..0d73c6aa20 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/session/SessionState.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/session/SessionState.java @@ -63,7 +63,6 @@ import org.apache.hadoop.hive.common.type.TimestampTZUtil; import org.apache.hadoop.hive.conf.HiveConf; import org.apache.hadoop.hive.conf.HiveConf.ConfVars; -import org.apache.hadoop.hive.conf.HiveConfUtil; import org.apache.hadoop.hive.metastore.ObjectStore; import org.apache.hadoop.hive.metastore.PersistenceManagerProvider; import org.apache.hadoop.hive.metastore.Warehouse; @@ -962,14 +961,15 @@ private void setAuthorizerV2Config() throws HiveException { if (sessionConf.get(CONFIG_AUTHZ_SETTINGS_APPLIED_MARKER, "").equals(Boolean.TRUE.toString())) { return; } - String metastoreHook = sessionConf.getVar(ConfVars.METASTORE_FILTER_HOOK); - if (!ConfVars.METASTORE_FILTER_HOOK.getDefaultValue().equals(metastoreHook) && + String metastoreHook = MetastoreConf.getAsString(sessionConf, + MetastoreConf.ConfVars.FILTER_HOOK); + if (!MetastoreConf.ConfVars.FILTER_HOOK.getDefaultVal().equals(metastoreHook) && !AuthorizationMetaStoreFilterHook.class.getName().equals(metastoreHook)) { - LOG.warn(ConfVars.METASTORE_FILTER_HOOK.varname + + LOG.warn(MetastoreConf.ConfVars.FILTER_HOOK + " will be ignored, since hive.security.authorization.manager" + " is set to instance of HiveAuthorizerFactory."); } - sessionConf.setVar(ConfVars.METASTORE_FILTER_HOOK, + MetastoreConf.setVar(sessionConf, MetastoreConf.ConfVars.FILTER_HOOK, AuthorizationMetaStoreFilterHook.class.getName()); authorizerV2.applyAuthorizationConfigPolicy(sessionConf); @@ -1812,7 +1812,7 @@ public void close() throws IOException { private void unCacheDataNucleusClassLoaders() { try { - boolean isLocalMetastore = HiveConfUtil.isEmbeddedMetaStore( + boolean isLocalMetastore = MetastoreConf.isEmbeddedMetaStore( MetastoreConf.getVar(sessionConf, MetastoreConf.ConfVars.THRIFT_URIS)); if (isLocalMetastore) { diff --git a/ql/src/java/org/apache/hadoop/hive/ql/stats/StatsUtils.java b/ql/src/java/org/apache/hadoop/hive/ql/stats/StatsUtils.java index be527095c3..814e1f27f0 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/stats/StatsUtils.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/stats/StatsUtils.java @@ -46,6 +46,7 @@ import org.apache.hadoop.hive.metastore.api.ColumnStatisticsData; import org.apache.hadoop.hive.metastore.api.ColumnStatisticsObj; import org.apache.hadoop.hive.metastore.api.Decimal; +import org.apache.hadoop.hive.metastore.conf.MetastoreConf; import org.apache.hadoop.hive.metastore.utils.MetaStoreUtils; import org.apache.hadoop.hive.ql.exec.ColumnInfo; import org.apache.hadoop.hive.ql.exec.FunctionRegistry; @@ -722,7 +723,8 @@ public static long getFileSizeForTable(HiveConf conf, Table table) { LOG.info("Number of partitions : " + parts.size()); ArrayList> futures = new ArrayList<>(); - int threads = Math.max(1, conf.getIntVar(ConfVars.METASTORE_FS_HANDLER_THREADS_COUNT)); + int threads = Math.max(1, MetastoreConf.getIntVar(conf, + MetastoreConf.ConfVars.FS_HANDLER_THREADS_COUNT)); final ExecutorService pool = Executors.newFixedThreadPool(threads, new ThreadFactoryBuilder() .setDaemon(true) diff --git a/ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/Worker.java b/ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/Worker.java index 3270175a80..3a78a378ca 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/Worker.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/Worker.java @@ -337,7 +337,6 @@ void gatherStats() { } sb.setLength(sb.length() - 1); //remove trailing , LOG.info(ci + ": running '" + sb.toString() + "'"); - conf.setVar(HiveConf.ConfVars.METASTOREURIS,""); //todo: use DriverUtils.runOnDriver() here QueryState queryState = new QueryState.Builder().withGenerateNewQueryId(true).withHiveConf(conf).build(); diff --git a/ql/src/test/org/apache/hadoop/hive/ql/TestTxnCommands.java b/ql/src/test/org/apache/hadoop/hive/ql/TestTxnCommands.java index 1da01be7c0..030e2dafe6 100644 --- a/ql/src/test/org/apache/hadoop/hive/ql/TestTxnCommands.java +++ b/ql/src/test/org/apache/hadoop/hive/ql/TestTxnCommands.java @@ -725,7 +725,8 @@ public void testTimeOutReaper() throws Exception { runStatementOnDriver("start transaction"); runStatementOnDriver("delete from " + Table.ACIDTBL + " where a = 5"); //make sure currently running txn is considered aborted by housekeeper - hiveConf.setTimeVar(HiveConf.ConfVars.HIVE_TXN_TIMEOUT, 2, TimeUnit.MILLISECONDS); + MetastoreConf.setTimeVar(hiveConf, MetastoreConf.ConfVars.TXN_TIMEOUT, 2, + TimeUnit.MILLISECONDS); MetastoreTaskThread houseKeeperService = new AcidHouseKeeperService(); houseKeeperService.setConf(hiveConf); //this will abort the txn @@ -737,7 +738,8 @@ public void testTimeOutReaper() throws Exception { //now test that we don't timeout locks we should not //heartbeater should be running in the background every 1/2 second - hiveConf.setTimeVar(HiveConf.ConfVars.HIVE_TXN_TIMEOUT, 1, TimeUnit.SECONDS); + MetastoreConf.setTimeVar(hiveConf, MetastoreConf.ConfVars.TXN_TIMEOUT, 1, + TimeUnit.SECONDS); // Have to reset the conf when we change it so that the change takes affect houseKeeperService.setConf(hiveConf); runStatementOnDriver("start transaction"); diff --git a/ql/src/test/org/apache/hadoop/hive/ql/TestTxnCommands2.java b/ql/src/test/org/apache/hadoop/hive/ql/TestTxnCommands2.java index cbc72b47ea..6c1e0f5662 100644 --- a/ql/src/test/org/apache/hadoop/hive/ql/TestTxnCommands2.java +++ b/ql/src/test/org/apache/hadoop/hive/ql/TestTxnCommands2.java @@ -47,6 +47,7 @@ import org.apache.hadoop.hive.metastore.api.ShowCompactRequest; import org.apache.hadoop.hive.metastore.api.ShowCompactResponse; import org.apache.hadoop.hive.metastore.api.ShowCompactResponseElement; +import org.apache.hadoop.hive.metastore.conf.MetastoreConf; import org.apache.hadoop.hive.metastore.txn.AcidCompactionHistoryService; import org.apache.hadoop.hive.metastore.txn.TxnDbUtil; import org.apache.hadoop.hive.metastore.txn.TxnStore; @@ -137,7 +138,8 @@ void setUpWithTableProperties(String tableProperties) throws Exception { + File.separator + "mapred" + File.separator + "temp"); hiveConf.set(HiveConf.ConfVars.PREEXECHOOKS.varname, ""); hiveConf.set(HiveConf.ConfVars.POSTEXECHOOKS.varname, ""); - hiveConf.set(HiveConf.ConfVars.METASTOREWAREHOUSE.varname, TEST_WAREHOUSE_DIR); + MetastoreConf.setVar(hiveConf, MetastoreConf.ConfVars.WAREHOUSE, + TEST_WAREHOUSE_DIR); hiveConf.setVar(HiveConf.ConfVars.HIVEINPUTFORMAT, HiveInputFormat.class.getName()); hiveConf .setVar(HiveConf.ConfVars.HIVE_AUTHORIZATION_MANAGER, @@ -1028,7 +1030,8 @@ void testInitiatorWithMultipleFailedCompactionsForVariousTblProperties(String tb } hiveConf.setBoolVar(HiveConf.ConfVars.HIVETESTMODEFAILCOMPACTION, true); - int numFailedCompactions = hiveConf.getIntVar(HiveConf.ConfVars.COMPACTOR_INITIATOR_FAILED_THRESHOLD); + int numFailedCompactions = MetastoreConf.getIntVar(hiveConf, + MetastoreConf.ConfVars.COMPACTOR_INITIATOR_FAILED_THRESHOLD); TxnStore txnHandler = TxnUtils.getTxnStore(hiveConf); AtomicBoolean stop = new AtomicBoolean(true); //create failed compactions @@ -1044,9 +1047,14 @@ void testInitiatorWithMultipleFailedCompactionsForVariousTblProperties(String tb init.init(stop, new AtomicBoolean()); init.run(); int numAttemptedCompactions = 1; - checkCompactionState(new CompactionsByState(numAttemptedCompactions,numFailedCompactions,0,0,0,0,numFailedCompactions + numAttemptedCompactions), countCompacts(txnHandler)); - - hiveConf.setTimeVar(HiveConf.ConfVars.COMPACTOR_HISTORY_REAPER_INTERVAL, 10, TimeUnit.MILLISECONDS); + checkCompactionState( + new CompactionsByState(numAttemptedCompactions, numFailedCompactions, 0, + 0, 0, 0, numFailedCompactions + numAttemptedCompactions), + countCompacts(txnHandler)); + + MetastoreConf.setTimeVar(hiveConf, + MetastoreConf.ConfVars.COMPACTOR_HISTORY_REAPER_INTERVAL, 10L, + TimeUnit.MILLISECONDS); AcidCompactionHistoryService compactionHistoryService = new AcidCompactionHistoryService(); compactionHistoryService.setConf(hiveConf); compactionHistoryService.run(); @@ -1063,35 +1071,69 @@ void testInitiatorWithMultipleFailedCompactionsForVariousTblProperties(String tb checkCompactionState(new CompactionsByState(numAttemptedCompactions,numFailedCompactions + 2,0,0,0,0,numFailedCompactions + 2 + numAttemptedCompactions), countCompacts(txnHandler)); compactionHistoryService.run(); - //COMPACTOR_HISTORY_RETENTION_FAILED failed compacts left (and no other since we only have failed ones here) + // COMPACTOR_HISTORY_RETENTION_FAILED failed compacts left (and no other + // since we only have failed ones here) checkCompactionState(new CompactionsByState( - hiveConf.getIntVar(HiveConf.ConfVars.COMPACTOR_HISTORY_RETENTION_ATTEMPTED), - hiveConf.getIntVar(HiveConf.ConfVars.COMPACTOR_HISTORY_RETENTION_FAILED),0,0,0,0, - hiveConf.getIntVar(HiveConf.ConfVars.COMPACTOR_HISTORY_RETENTION_FAILED) + hiveConf.getIntVar(HiveConf.ConfVars.COMPACTOR_HISTORY_RETENTION_ATTEMPTED)), countCompacts(txnHandler)); + MetastoreConf.getIntVar(hiveConf, + MetastoreConf.ConfVars.COMPACTOR_HISTORY_RETENTION_ATTEMPTED), + MetastoreConf.getIntVar(hiveConf, + MetastoreConf.ConfVars.COMPACTOR_HISTORY_RETENTION_FAILED), + 0, 0, 0, 0, + MetastoreConf.getIntVar(hiveConf, + MetastoreConf.ConfVars.COMPACTOR_HISTORY_RETENTION_FAILED) + + MetastoreConf.getIntVar(hiveConf, + MetastoreConf.ConfVars.COMPACTOR_HISTORY_RETENTION_ATTEMPTED) + + 1), + countCompacts(txnHandler)); hiveConf.setBoolVar(HiveConf.ConfVars.HIVETESTMODEFAILCOMPACTION, false); - txnHandler.compact(new CompactionRequest("default", tblName, CompactionType.MINOR)); - //at this point "show compactions" should have (COMPACTOR_HISTORY_RETENTION_FAILED) failed + 1 initiated (explicitly by user) + txnHandler.compact( + new CompactionRequest("default", tblName, CompactionType.MINOR)); + // at this point "show compactions" should have + // (COMPACTOR_HISTORY_RETENTION_FAILED) failed + 1 initiated (explicitly by + // user) checkCompactionState(new CompactionsByState( - hiveConf.getIntVar(HiveConf.ConfVars.COMPACTOR_HISTORY_RETENTION_ATTEMPTED), - hiveConf.getIntVar(HiveConf.ConfVars.COMPACTOR_HISTORY_RETENTION_FAILED),1,0,0,0, - hiveConf.getIntVar(HiveConf.ConfVars.COMPACTOR_HISTORY_RETENTION_FAILED) + - hiveConf.getIntVar(HiveConf.ConfVars.COMPACTOR_HISTORY_RETENTION_ATTEMPTED)+ 1), countCompacts(txnHandler)); - - runWorker(hiveConf);//will succeed and transition to Initiated->Working->Ready for Cleaning + MetastoreConf.getIntVar(hiveConf, + MetastoreConf.ConfVars.COMPACTOR_HISTORY_RETENTION_ATTEMPTED), + MetastoreConf.getIntVar(hiveConf, + MetastoreConf.ConfVars.COMPACTOR_HISTORY_RETENTION_FAILED), + 1, 0, 0, 0, + MetastoreConf.getIntVar(hiveConf, + MetastoreConf.ConfVars.COMPACTOR_HISTORY_RETENTION_FAILED) + + MetastoreConf.getIntVar(hiveConf, + MetastoreConf.ConfVars.COMPACTOR_HISTORY_RETENTION_ATTEMPTED) + + 1), + countCompacts(txnHandler)); + + runWorker(hiveConf);// will succeed and transition to + // Initiated->Working->Ready for Cleaning checkCompactionState(new CompactionsByState( - hiveConf.getIntVar(HiveConf.ConfVars.COMPACTOR_HISTORY_RETENTION_ATTEMPTED), - hiveConf.getIntVar(HiveConf.ConfVars.COMPACTOR_HISTORY_RETENTION_FAILED),0,1,0,0, - hiveConf.getIntVar(HiveConf.ConfVars.COMPACTOR_HISTORY_RETENTION_FAILED) + - hiveConf.getIntVar(HiveConf.ConfVars.COMPACTOR_HISTORY_RETENTION_ATTEMPTED)+ 1), countCompacts(txnHandler)); + MetastoreConf.getIntVar(hiveConf, + MetastoreConf.ConfVars.COMPACTOR_HISTORY_RETENTION_ATTEMPTED), + MetastoreConf.getIntVar(hiveConf, + MetastoreConf.ConfVars.COMPACTOR_HISTORY_RETENTION_FAILED), + 0, 1, 0, 0, + MetastoreConf.getIntVar(hiveConf, + MetastoreConf.ConfVars.COMPACTOR_HISTORY_RETENTION_FAILED) + + MetastoreConf.getIntVar(hiveConf, + MetastoreConf.ConfVars.COMPACTOR_HISTORY_RETENTION_ATTEMPTED) + + 1), + countCompacts(txnHandler)); runCleaner(hiveConf); // transition to Success state compactionHistoryService.run(); checkCompactionState(new CompactionsByState( - hiveConf.getIntVar(HiveConf.ConfVars.COMPACTOR_HISTORY_RETENTION_ATTEMPTED), - hiveConf.getIntVar(HiveConf.ConfVars.COMPACTOR_HISTORY_RETENTION_FAILED),0,0,1,0, - hiveConf.getIntVar(HiveConf.ConfVars.COMPACTOR_HISTORY_RETENTION_FAILED) + - hiveConf.getIntVar(HiveConf.ConfVars.COMPACTOR_HISTORY_RETENTION_ATTEMPTED)+ 1), countCompacts(txnHandler)); + MetastoreConf.getIntVar(hiveConf, + MetastoreConf.ConfVars.COMPACTOR_HISTORY_RETENTION_ATTEMPTED), + MetastoreConf.getIntVar(hiveConf, + MetastoreConf.ConfVars.COMPACTOR_HISTORY_RETENTION_FAILED), + 0, 0, 1, 0, + MetastoreConf.getIntVar(hiveConf, + MetastoreConf.ConfVars.COMPACTOR_HISTORY_RETENTION_FAILED) + + MetastoreConf.getIntVar(hiveConf, + MetastoreConf.ConfVars.COMPACTOR_HISTORY_RETENTION_ATTEMPTED) + + 1), + countCompacts(txnHandler)); } /** @@ -1275,8 +1317,10 @@ public void testFailHeartbeater() throws Exception { @Test public void testOpenTxnsCounter() throws Exception { - hiveConf.setIntVar(HiveConf.ConfVars.HIVE_MAX_OPEN_TXNS, 3); - hiveConf.setTimeVar(HiveConf.ConfVars.HIVE_COUNT_OPEN_TXNS_INTERVAL, 10, TimeUnit.MILLISECONDS); + MetastoreConf.setIntVar(hiveConf, MetastoreConf.ConfVars.MAX_OPEN_TXNS, 3); + MetastoreConf.setTimeVar(hiveConf, + MetastoreConf.ConfVars.COUNT_OPEN_TXNS_INTERVAL, 10, + TimeUnit.MILLISECONDS); TxnStore txnHandler = TxnUtils.getTxnStore(hiveConf); OpenTxnsResponse openTxnsResponse = txnHandler.openTxns(new OpenTxnRequest(3, "me", "localhost")); diff --git a/ql/src/test/org/apache/hadoop/hive/ql/TestTxnCommandsForMmTable.java b/ql/src/test/org/apache/hadoop/hive/ql/TestTxnCommandsForMmTable.java index d4c9121c9f..e16d7671db 100644 --- a/ql/src/test/org/apache/hadoop/hive/ql/TestTxnCommandsForMmTable.java +++ b/ql/src/test/org/apache/hadoop/hive/ql/TestTxnCommandsForMmTable.java @@ -83,7 +83,6 @@ public void setUp() throws Exception { void setUpInternalExtended(boolean isOrcFormat) throws Exception { hiveConf.setBoolVar(HiveConf.ConfVars.DYNAMICPARTITIONING, true); - hiveConf.setVar(HiveConf.ConfVars.HIVEMAPREDMODE, "nonstrict"); hiveConf.setVar(HiveConf.ConfVars.HIVEFETCHTASKCONVERSION, "none"); hiveConf.set(HiveConf.ConfVars.HIVE_SUPPORT_CONCURRENCY.varname, "true"); hiveConf.setVar(HiveConf.ConfVars.HIVE_TXN_MANAGER, "org.apache.hadoop.hive.ql.lockmgr.DbTxnManager"); diff --git a/ql/src/test/org/apache/hadoop/hive/ql/TxnCommandsBaseForTests.java b/ql/src/test/org/apache/hadoop/hive/ql/TxnCommandsBaseForTests.java index f01a07e591..8513ea2bab 100644 --- a/ql/src/test/org/apache/hadoop/hive/ql/TxnCommandsBaseForTests.java +++ b/ql/src/test/org/apache/hadoop/hive/ql/TxnCommandsBaseForTests.java @@ -31,6 +31,7 @@ import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.RemoteIterator; import org.apache.hadoop.hive.conf.HiveConf; +import org.apache.hadoop.hive.metastore.conf.MetastoreConf; import org.apache.hadoop.hive.metastore.txn.TxnDbUtil; import org.apache.hadoop.hive.ql.io.HiveInputFormat; import org.apache.hadoop.hive.ql.processors.CommandProcessorException; @@ -94,7 +95,8 @@ void setUpInternal() throws Exception { + File.separator + "mapred" + File.separator + "temp"); hiveConf.set(HiveConf.ConfVars.PREEXECHOOKS.varname, ""); hiveConf.set(HiveConf.ConfVars.POSTEXECHOOKS.varname, ""); - hiveConf.set(HiveConf.ConfVars.METASTOREWAREHOUSE.varname, getWarehouseDir()); + MetastoreConf.setVar(hiveConf, MetastoreConf.ConfVars.WAREHOUSE, + getWarehouseDir()); hiveConf.setVar(HiveConf.ConfVars.HIVEINPUTFORMAT, HiveInputFormat.class.getName()); hiveConf .setVar(HiveConf.ConfVars.HIVE_AUTHORIZATION_MANAGER, diff --git a/ql/src/test/org/apache/hadoop/hive/ql/exec/TestOperators.java b/ql/src/test/org/apache/hadoop/hive/ql/exec/TestOperators.java index dc00ceb398..7b15fb0ca3 100644 --- a/ql/src/test/org/apache/hadoop/hive/ql/exec/TestOperators.java +++ b/ql/src/test/org/apache/hadoop/hive/ql/exec/TestOperators.java @@ -413,7 +413,6 @@ public void testFetchOperatorContextQuoting() throws Exception { public void testFetchOperatorContext() throws Exception { HiveConf conf = new HiveConf(); conf.set("hive.support.concurrency", "false"); - conf.setVar(HiveConf.ConfVars.HIVEMAPREDMODE, "nonstrict"); conf.setVar(HiveConf.ConfVars.HIVE_AUTHORIZATION_MANAGER, "org.apache.hadoop.hive.ql.security.authorization.plugin.sqlstd.SQLStdHiveAuthorizerFactory"); SessionState.start(conf); diff --git a/ql/src/test/org/apache/hadoop/hive/ql/metadata/TestHive.java b/ql/src/test/org/apache/hadoop/hive/ql/metadata/TestHive.java index 5626dbefbf..f4d602fe86 100755 --- a/ql/src/test/org/apache/hadoop/hive/ql/metadata/TestHive.java +++ b/ql/src/test/org/apache/hadoop/hive/ql/metadata/TestHive.java @@ -799,8 +799,10 @@ public void testHiveRefreshOnConfChange() throws Throwable{ prevHiveObj.getDatabaseCurrent(); //change value of a metavar config param in new hive conf newHconf = new HiveConf(hiveConf); - newHconf.setIntVar(ConfVars.METASTORETHRIFTCONNECTIONRETRIES, - newHconf.getIntVar(ConfVars.METASTORETHRIFTCONNECTIONRETRIES) + 1); + MetastoreConf.setIntVar(newHconf, + MetastoreConf.ConfVars.THRIFT_CONNECTION_RETRIES, + MetastoreConf.getIntVar(newHconf, + MetastoreConf.ConfVars.THRIFT_CONNECTION_RETRIES) + 1); newHiveObj = Hive.get(newHconf); assertTrue(prevHiveObj != newHiveObj); } diff --git a/ql/src/test/org/apache/hadoop/hive/ql/processors/TestSetProcessor.java b/ql/src/test/org/apache/hadoop/hive/ql/processors/TestSetProcessor.java index ddbbef0b01..a6d7164253 100644 --- a/ql/src/test/org/apache/hadoop/hive/ql/processors/TestSetProcessor.java +++ b/ql/src/test/org/apache/hadoop/hive/ql/processors/TestSetProcessor.java @@ -27,6 +27,7 @@ import org.apache.hadoop.hive.common.io.SessionStream; import org.apache.hadoop.hive.conf.HiveConf; import org.apache.hadoop.hive.conf.SystemVariables; +import org.apache.hadoop.hive.metastore.conf.MetastoreConf; import org.apache.hadoop.hive.ql.session.SessionState; import org.junit.Assert; import org.junit.Before; @@ -65,13 +66,15 @@ public void setupTest() throws Exception { public void testHiddenConfig() throws Exception { runSetProcessor(""); String output = baos.toString(); - Assert.assertFalse(output.contains(HiveConf.ConfVars.METASTOREPWD.varname + "=")); - Assert.assertFalse(output.contains(HiveConf.ConfVars.HIVE_SERVER2_SSL_KEYSTORE_PASSWORD.varname + "=")); + Assert.assertFalse( + output.contains(MetastoreConf.ConfVars.PWD.getHiveName() + "=")); + Assert.assertFalse(output.contains( + HiveConf.ConfVars.HIVE_SERVER2_SSL_KEYSTORE_PASSWORD.varname + "=")); } @Test public void testHiddenConfigSetVarName() throws CommandProcessorException { - runSetProcessor(HiveConf.ConfVars.METASTOREPWD.varname); + runSetProcessor(MetastoreConf.ConfVars.PWD.getHiveName()); String output = baos.toString(); Assert.assertTrue(output.contains("hidden")); } diff --git a/ql/src/test/org/apache/hadoop/hive/ql/security/authorization/plugin/TestHivePrivilegeObjectOwnerNameAndType.java b/ql/src/test/org/apache/hadoop/hive/ql/security/authorization/plugin/TestHivePrivilegeObjectOwnerNameAndType.java index e6576d18cf..622f298a89 100644 --- a/ql/src/test/org/apache/hadoop/hive/ql/security/authorization/plugin/TestHivePrivilegeObjectOwnerNameAndType.java +++ b/ql/src/test/org/apache/hadoop/hive/ql/security/authorization/plugin/TestHivePrivilegeObjectOwnerNameAndType.java @@ -75,7 +75,6 @@ public static void beforeTest() throws Exception { conf.setBoolVar(ConfVars.HIVE_SERVER2_ENABLE_DOAS, false); conf.setBoolVar(ConfVars.HIVE_SUPPORT_CONCURRENCY, true); conf.setVar(ConfVars.HIVE_TXN_MANAGER, DbTxnManager.class.getName()); - conf.setVar(ConfVars.HIVEMAPREDMODE, "nonstrict"); SessionState.start(conf); driver = new Driver(conf); diff --git a/ql/src/test/org/apache/hadoop/hive/ql/stats/TestStatsUpdaterThread.java b/ql/src/test/org/apache/hadoop/hive/ql/stats/TestStatsUpdaterThread.java index 84827d1604..d3dcb4f373 100644 --- a/ql/src/test/org/apache/hadoop/hive/ql/stats/TestStatsUpdaterThread.java +++ b/ql/src/test/org/apache/hadoop/hive/ql/stats/TestStatsUpdaterThread.java @@ -68,13 +68,13 @@ String getTestDataDir() { return TEST_DATA_DIR; } - @SuppressWarnings("deprecation") @Before public void setUp() throws Exception { this.hiveConf = new HiveConf(TestStatsUpdaterThread.class); hiveConf.set(HiveConf.ConfVars.PREEXECHOOKS.varname, ""); hiveConf.set(HiveConf.ConfVars.POSTEXECHOOKS.varname, ""); - hiveConf.set(HiveConf.ConfVars.METASTOREWAREHOUSE.varname, getTestDataDir()); + MetastoreConf.setVar(hiveConf, MetastoreConf.ConfVars.WAREHOUSE, + getTestDataDir()); hiveConf.setVar(HiveConf.ConfVars.HIVEINPUTFORMAT, HiveInputFormat.class.getName()); hiveConf.setVar(HiveConf.ConfVars.HIVE_AUTHORIZATION_MANAGER, "org.apache.hadoop.hive.ql.security.authorization.plugin.sqlstd.SQLStdHiveAuthorizerFactory"); diff --git a/standalone-metastore/metastore-common/src/main/java/org/apache/hadoop/hive/metastore/conf/MetastoreConf.java b/standalone-metastore/metastore-common/src/main/java/org/apache/hadoop/hive/metastore/conf/MetastoreConf.java index a496a593c2..69ba8fd776 100644 --- a/standalone-metastore/metastore-common/src/main/java/org/apache/hadoop/hive/metastore/conf/MetastoreConf.java +++ b/standalone-metastore/metastore-common/src/main/java/org/apache/hadoop/hive/metastore/conf/MetastoreConf.java @@ -1112,6 +1112,9 @@ public static ConfVars getMetaConf(String name) { "select query has incorrect syntax or something similar inside a transaction, the\n" + "entire transaction will fail and fall-back to DataNucleus will not be possible. You\n" + "should disable the usage of direct SQL inside transactions if that happens in your case."), + TXN_HEARTBEAT_THREADPOOL_SIZE("hive.txn.heartbeat.threadpool.size", + "hive.txn.heartbeat.threadpool.size", 5, + "The number of threads to use for heartbeating. For Hive CLI, 1 is enough. For HiveServer2, we need a few"), TXN_MAX_OPEN_BATCH("metastore.txn.max.open.batch", "hive.txn.max.open.batch", 1000, "Maximum number of transactions that can be fetched in one call to open_txns().\n" + "This controls how many transactions streaming agents such as Flume or Storm open\n" + @@ -1670,6 +1673,18 @@ public static int getIntVar(Configuration conf, ConfVars var) { return (int)val; } + /** + * Set the variable as a long + * @param conf configuration file to set it in + * @param var variable to set + * @param val value to set it to + */ + public static int setIntVar(Configuration conf, ConfVars var, int value) { + assert var.defaultVal.getClass() == Integer.class; + String val = conf.get(var.varname); + return val == null ? conf.getInt(var.hiveName, (Integer)var.defaultVal) : Integer.valueOf(val); + } + /** * Get the variable as a long * @param conf configuration to retrieve it from @@ -2043,4 +2058,5 @@ static String dumpConfig(Configuration conf) { buf.append("Finished MetastoreConf object.\n"); return buf.toString(); } + }