> rootTasks) throws SemanticException {
-
- if (!HiveConf.getBoolVar(context.getConf(),
- HiveConf.ConfVars.HIVE_AUTHORIZATION_ENABLED)) {
- return;
- }
-
- Hive hive;
- try {
- hive = context.getHive();
-
- for (Task extends Serializable> task : rootTasks) {
- if (task.getWork() instanceof DDLWork) {
- DDLWork work = (DDLWork) task.getWork();
- if (work != null) {
- authorizeDDLWork(context, hive, work);
- }
- }
- }
- } catch (SemanticException ex) {
- throw ex;
- } catch (AuthorizationException ex) {
- throw ex;
- } catch (Exception ex) {
- throw new SemanticException(ex);
- }
- }
-
- /**
- * Authorized the given DDLWork. Does nothing by default. Override this
- * and delegate to the relevant method in HiveAuthorizationProvider obtained by
- * getAuthProvider().
- */
- protected void authorizeDDLWork(HiveSemanticAnalyzerHookContext context,
- Hive hive, DDLWork work) throws HiveException {
- }
-
- protected void authorize(Privilege[] inputPrivs, Privilege[] outputPrivs)
- throws AuthorizationException, SemanticException {
- try {
- getAuthProvider().authorize(inputPrivs, outputPrivs);
- } catch (HiveException ex) {
- throw new SemanticException(ex);
- }
- }
-
- protected void authorize(Database db, Privilege priv)
- throws AuthorizationException, SemanticException {
- try {
- getAuthProvider().authorize(db, null, new Privilege[]{priv});
- } catch (HiveException ex) {
- throw new SemanticException(ex);
- }
- }
-
- protected void authorizeTable(Hive hive, String tableName, Privilege priv)
- throws AuthorizationException, HiveException {
- Table table;
- try {
- table = hive.getTable(tableName);
- } catch (InvalidTableException ite) {
- // Table itself doesn't exist in metastore, nothing to validate.
- return;
- }
-
- authorize(table, priv);
- }
-
- protected void authorize(Table table, Privilege priv)
- throws AuthorizationException, SemanticException {
- try {
- getAuthProvider().authorize(table, new Privilege[]{priv}, null);
- } catch (HiveException ex) {
- throw new SemanticException(ex);
- }
- }
-
- protected void authorize(Partition part, Privilege priv)
- throws AuthorizationException, SemanticException {
- try {
- getAuthProvider().authorize(part, new Privilege[]{priv}, null);
- } catch (HiveException ex) {
- throw new SemanticException(ex);
- }
- }
-}
diff --git a/hcatalog/core/src/main/java/org/apache/hcatalog/common/ErrorType.java b/hcatalog/core/src/main/java/org/apache/hcatalog/common/ErrorType.java
deleted file mode 100644
index 2e16b47..0000000
--- a/hcatalog/core/src/main/java/org/apache/hcatalog/common/ErrorType.java
+++ /dev/null
@@ -1,138 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied. See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-package org.apache.hcatalog.common;
-
-/**
- * Enum type representing the various errors throws by HCat.
- * @deprecated Use/modify {@link org.apache.hive.hcatalog.common.ErrorType} instead
- */
-public enum ErrorType {
-
- /* HCat Input Format related errors 1000 - 1999 */
- ERROR_DB_INIT (1000, "Error initializing database session"),
- ERROR_EXCEED_MAXPART (1001, "Query result exceeded maximum number of partitions allowed"),
-
- ERROR_SET_INPUT (1002, "Error setting input information"),
-
- /* HCat Output Format related errors 2000 - 2999 */
- ERROR_INVALID_TABLE (2000, "Table specified does not exist"),
- ERROR_SET_OUTPUT (2001, "Error setting output information"),
- ERROR_DUPLICATE_PARTITION (2002, "Partition already present with given partition key values"),
- ERROR_NON_EMPTY_TABLE (2003, "Non-partitioned table already contains data"),
- ERROR_NOT_INITIALIZED (2004, "HCatOutputFormat not initialized, setOutput has to be called"),
- ERROR_INIT_STORAGE_HANDLER (2005, "Error initializing storage handler instance"),
- ERROR_PUBLISHING_PARTITION (2006, "Error adding partition to metastore"),
- ERROR_SCHEMA_COLUMN_MISMATCH (2007, "Invalid column position in partition schema"),
- ERROR_SCHEMA_PARTITION_KEY (2008, "Partition key cannot be present in the partition data"),
- ERROR_SCHEMA_TYPE_MISMATCH (2009, "Invalid column type in partition schema"),
- ERROR_INVALID_PARTITION_VALUES (2010, "Invalid partition values specified"),
- ERROR_MISSING_PARTITION_KEY (2011, "Partition key value not provided for publish"),
- ERROR_MOVE_FAILED (2012, "Moving of data failed during commit"),
- ERROR_TOO_MANY_DYNAMIC_PTNS (2013, "Attempt to create too many dynamic partitions"),
- ERROR_INIT_LOADER (2014, "Error initializing Pig loader"),
- ERROR_INIT_STORER (2015, "Error initializing Pig storer"),
- ERROR_NOT_SUPPORTED (2016, "Error operation not supported"),
-
- /* Authorization Errors 3000 - 3999 */
- ERROR_ACCESS_CONTROL (3000, "Permission denied"),
-
- /* Miscellaneous errors, range 9000 - 9998 */
- ERROR_UNIMPLEMENTED (9000, "Functionality currently unimplemented"),
- ERROR_INTERNAL_EXCEPTION (9001, "Exception occurred while processing HCat request");
-
- /** The error code. */
- private int errorCode;
-
- /** The error message. */
- private String errorMessage;
-
- /** Should the causal exception message be appended to the error message, yes by default*/
- private boolean appendCauseMessage = true;
-
- /** Is this a retriable error, no by default. */
- private boolean isRetriable = false;
-
- /**
- * Instantiates a new error type.
- * @param errorCode the error code
- * @param errorMessage the error message
- */
- private ErrorType(int errorCode, String errorMessage) {
- this.errorCode = errorCode;
- this.errorMessage = errorMessage;
- }
-
- /**
- * Instantiates a new error type.
- * @param errorCode the error code
- * @param errorMessage the error message
- * @param appendCauseMessage should causal exception message be appended to error message
- */
- private ErrorType(int errorCode, String errorMessage, boolean appendCauseMessage) {
- this.errorCode = errorCode;
- this.errorMessage = errorMessage;
- this.appendCauseMessage = appendCauseMessage;
- }
-
- /**
- * Instantiates a new error type.
- * @param errorCode the error code
- * @param errorMessage the error message
- * @param appendCauseMessage should causal exception message be appended to error message
- * @param isRetriable is this a retriable error
- */
- private ErrorType(int errorCode, String errorMessage, boolean appendCauseMessage, boolean isRetriable) {
- this.errorCode = errorCode;
- this.errorMessage = errorMessage;
- this.appendCauseMessage = appendCauseMessage;
- this.isRetriable = isRetriable;
- }
-
- /**
- * Gets the error code.
- * @return the error code
- */
- public int getErrorCode() {
- return errorCode;
- }
-
- /**
- * Gets the error message.
- * @return the error message
- */
- public String getErrorMessage() {
- return errorMessage;
- }
-
- /**
- * Checks if this is a retriable error.
- * @return true, if is a retriable error, false otherwise
- */
- public boolean isRetriable() {
- return isRetriable;
- }
-
- /**
- * Whether the cause of the exception should be added to the error message.
- * @return true, if the cause should be added to the message, false otherwise
- */
- public boolean appendCauseMessage() {
- return appendCauseMessage;
- }
-}
diff --git a/hcatalog/core/src/main/java/org/apache/hcatalog/common/HCatConstants.java b/hcatalog/core/src/main/java/org/apache/hcatalog/common/HCatConstants.java
deleted file mode 100644
index a7e5b91..0000000
--- a/hcatalog/core/src/main/java/org/apache/hcatalog/common/HCatConstants.java
+++ /dev/null
@@ -1,189 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied. See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-package org.apache.hcatalog.common;
-
-import org.apache.hadoop.hive.conf.HiveConf;
-import org.apache.hadoop.mapred.SequenceFileInputFormat;
-import org.apache.hadoop.mapred.SequenceFileOutputFormat;
-
-/**
- * @deprecated Use/modify {@link org.apache.hive.hcatalog.common.HCatConstants} instead
- */
-public final class HCatConstants {
-
- public static final String HIVE_RCFILE_IF_CLASS = "org.apache.hadoop.hive.ql.io.RCFileInputFormat";
- public static final String HIVE_RCFILE_OF_CLASS = "org.apache.hadoop.hive.ql.io.RCFileOutputFormat";
-
- public static final String SEQUENCEFILE_INPUT = SequenceFileInputFormat.class.getName();
- public static final String SEQUENCEFILE_OUTPUT = SequenceFileOutputFormat.class.getName();
-
- public static final String HCAT_PIG_STORAGE_CLASS = "org.apache.pig.builtin.PigStorage";
- public static final String HCAT_PIG_LOADER = "hcat.pig.loader";
- public static final String HCAT_PIG_LOADER_LOCATION_SET = HCAT_PIG_LOADER + ".location.set";
- public static final String HCAT_PIG_LOADER_ARGS = "hcat.pig.loader.args";
- public static final String HCAT_PIG_STORER = "hcat.pig.storer";
- public static final String HCAT_PIG_STORER_ARGS = "hcat.pig.storer.args";
- public static final String HCAT_PIG_ARGS_DELIMIT = "hcat.pig.args.delimiter";
- public static final String HCAT_PIG_ARGS_DELIMIT_DEFAULT = ",";
- public static final String HCAT_PIG_STORER_LOCATION_SET = HCAT_PIG_STORER + ".location.set";
- public static final String HCAT_PIG_INNER_TUPLE_NAME = "hcat.pig.inner.tuple.name";
- public static final String HCAT_PIG_INNER_TUPLE_NAME_DEFAULT = "innertuple";
- public static final String HCAT_PIG_INNER_FIELD_NAME = "hcat.pig.inner.field.name";
- public static final String HCAT_PIG_INNER_FIELD_NAME_DEFAULT = "innerfield";
-
- /**
- * {@value} (default: null)
- * When the property is set in the UDFContext of the org.apache.hcatalog.pig.HCatStorer, HCatStorer writes
- * to the location it specifies instead of the default HCatalog location format. An example can be found
- * in org.apache.hcatalog.pig.HCatStorerWrapper.
- */
- public static final String HCAT_PIG_STORER_EXTERNAL_LOCATION = HCAT_PIG_STORER + ".external.location";
-
- //The keys used to store info into the job Configuration
- public static final String HCAT_KEY_BASE = "mapreduce.lib.hcat";
-
- public static final String HCAT_KEY_OUTPUT_SCHEMA = HCAT_KEY_BASE + ".output.schema";
-
- public static final String HCAT_KEY_JOB_INFO = HCAT_KEY_BASE + ".job.info";
-
- // hcatalog specific configurations, that can be put in hive-site.xml
- public static final String HCAT_HIVE_CLIENT_EXPIRY_TIME = "hcatalog.hive.client.cache.expiry.time";
-
- private HCatConstants() { // restrict instantiation
- }
-
- public static final String HCAT_TABLE_SCHEMA = "hcat.table.schema";
-
- public static final String HCAT_METASTORE_URI = HiveConf.ConfVars.METASTOREURIS.varname;
-
- public static final String HCAT_PERMS = "hcat.perms";
-
- public static final String HCAT_GROUP = "hcat.group";
-
- public static final String HCAT_CREATE_TBL_NAME = "hcat.create.tbl.name";
-
- public static final String HCAT_CREATE_DB_NAME = "hcat.create.db.name";
-
- public static final String HCAT_METASTORE_PRINCIPAL
- = HiveConf.ConfVars.METASTORE_KERBEROS_PRINCIPAL.varname;
-
- /**
- * The desired number of input splits produced for each partition. When the
- * input files are large and few, we want to split them into many splits,
- * so as to increase the parallelizm of loading the splits. Try also two
- * other parameters, mapred.min.split.size and mapred.max.split.size, to
- * control the number of input splits.
- */
- public static final String HCAT_DESIRED_PARTITION_NUM_SPLITS =
- "hcat.desired.partition.num.splits";
-
- // IMPORTANT IMPORTANT IMPORTANT!!!!!
- //The keys used to store info into the job Configuration.
- //If any new keys are added, the HCatStorer needs to be updated. The HCatStorer
- //updates the job configuration in the backend to insert these keys to avoid
- //having to call setOutput from the backend (which would cause a metastore call
- //from the map jobs)
- public static final String HCAT_KEY_OUTPUT_BASE = "mapreduce.lib.hcatoutput";
- public static final String HCAT_KEY_OUTPUT_INFO = HCAT_KEY_OUTPUT_BASE + ".info";
- public static final String HCAT_KEY_HIVE_CONF = HCAT_KEY_OUTPUT_BASE + ".hive.conf";
- public static final String HCAT_KEY_TOKEN_SIGNATURE = HCAT_KEY_OUTPUT_BASE + ".token.sig";
-
- public static final String[] OUTPUT_CONFS_TO_SAVE = {
- HCAT_KEY_OUTPUT_INFO,
- HCAT_KEY_HIVE_CONF,
- HCAT_KEY_TOKEN_SIGNATURE
- };
-
-
- public static final String HCAT_MSG_CLEAN_FREQ = "hcat.msg.clean.freq";
- public static final String HCAT_MSG_EXPIRY_DURATION = "hcat.msg.expiry.duration";
-
- public static final String HCAT_MSGBUS_TOPIC_NAME = "hcat.msgbus.topic.name";
- public static final String HCAT_MSGBUS_TOPIC_NAMING_POLICY = "hcat.msgbus.topic.naming.policy";
- public static final String HCAT_MSGBUS_TOPIC_PREFIX = "hcat.msgbus.topic.prefix";
-
- public static final String HCAT_DYNAMIC_PTN_JOBID = HCAT_KEY_OUTPUT_BASE + "dynamic.jobid";
- public static final boolean HCAT_IS_DYNAMIC_MAX_PTN_CHECK_ENABLED = false;
-
- // Message Bus related properties.
- public static final String HCAT_DEFAULT_TOPIC_PREFIX = "hcat";
- public static final String HCAT_EVENT = "HCAT_EVENT";
- public static final String HCAT_ADD_PARTITION_EVENT = "ADD_PARTITION";
- public static final String HCAT_DROP_PARTITION_EVENT = "DROP_PARTITION";
- public static final String HCAT_PARTITION_DONE_EVENT = "PARTITION_DONE";
- public static final String HCAT_CREATE_TABLE_EVENT = "CREATE_TABLE";
- public static final String HCAT_DROP_TABLE_EVENT = "DROP_TABLE";
- public static final String HCAT_CREATE_DATABASE_EVENT = "CREATE_DATABASE";
- public static final String HCAT_DROP_DATABASE_EVENT = "DROP_DATABASE";
- public static final String HCAT_MESSAGE_VERSION = "HCAT_MESSAGE_VERSION";
- public static final String HCAT_MESSAGE_FORMAT = "HCAT_MESSAGE_FORMAT";
- public static final String CONF_LABEL_HCAT_MESSAGE_FACTORY_IMPL_PREFIX = "hcatalog.message.factory.impl.";
- public static final String CONF_LABEL_HCAT_MESSAGE_FORMAT = "hcatalog.message.format";
- public static final String DEFAULT_MESSAGE_FACTORY_IMPL = "org.apache.hcatalog.messaging.json.JSONMessageFactory";
-
- // System environment variables
- public static final String SYSENV_HADOOP_TOKEN_FILE_LOCATION = "HADOOP_TOKEN_FILE_LOCATION";
-
- // Hadoop Conf Var Names
- public static final String CONF_MAPREDUCE_JOB_CREDENTIALS_BINARY = "mapreduce.job.credentials.binary";
-
- //***************************************************************************
- // Data-related configuration properties.
- //***************************************************************************
-
- /**
- * {@value} (default: {@value #HCAT_DATA_CONVERT_BOOLEAN_TO_INTEGER_DEFAULT}).
- * Pig < 0.10.0 does not have boolean support, and scripts written for pre-boolean Pig versions
- * will not expect boolean values when upgrading Pig. For integration the option is offered to
- * convert boolean fields to integers by setting this Hadoop configuration key.
- */
- public static final String HCAT_DATA_CONVERT_BOOLEAN_TO_INTEGER =
- "hcat.data.convert.boolean.to.integer";
- public static final boolean HCAT_DATA_CONVERT_BOOLEAN_TO_INTEGER_DEFAULT = false;
-
- /**
- * {@value} (default: {@value #HCAT_DATA_TINY_SMALL_INT_PROMOTION_DEFAULT}).
- * Hive tables support tinyint and smallint columns, while not all processing frameworks support
- * these types (Pig only has integer for example). Enable this property to promote tinyint and
- * smallint columns to integer at runtime. Note that writes to tinyint and smallint columns
- * enforce bounds checking and jobs will fail if attempting to write values outside the column
- * bounds.
- */
- public static final String HCAT_DATA_TINY_SMALL_INT_PROMOTION =
- "hcat.data.tiny.small.int.promotion";
- public static final boolean HCAT_DATA_TINY_SMALL_INT_PROMOTION_DEFAULT = false;
-
- /**
- * {@value} (default: {@value #HCAT_INPUT_BAD_RECORD_THRESHOLD_DEFAULT}).
- * Threshold for the ratio of bad records that will be silently skipped without causing a task
- * failure. This is useful when processing large data sets with corrupt records, when its
- * acceptable to skip some bad records.
- */
- public static final String HCAT_INPUT_BAD_RECORD_THRESHOLD_KEY = "hcat.input.bad.record.threshold";
- public static final float HCAT_INPUT_BAD_RECORD_THRESHOLD_DEFAULT = 0.0001f;
-
- /**
- * {@value} (default: {@value #HCAT_INPUT_BAD_RECORD_MIN_DEFAULT}).
- * Number of bad records that will be accepted before applying
- * {@value #HCAT_INPUT_BAD_RECORD_THRESHOLD_KEY}. This is necessary to prevent an initial bad
- * record from causing a task failure.
- */
- public static final String HCAT_INPUT_BAD_RECORD_MIN_KEY = "hcat.input.bad.record.min";
- public static final int HCAT_INPUT_BAD_RECORD_MIN_DEFAULT = 2;
-}
diff --git a/hcatalog/core/src/main/java/org/apache/hcatalog/common/HCatContext.java b/hcatalog/core/src/main/java/org/apache/hcatalog/common/HCatContext.java
deleted file mode 100644
index 4d2c3cb..0000000
--- a/hcatalog/core/src/main/java/org/apache/hcatalog/common/HCatContext.java
+++ /dev/null
@@ -1,91 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied. See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-package org.apache.hcatalog.common;
-
-import com.google.common.base.Optional;
-import com.google.common.base.Preconditions;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hive.common.classification.InterfaceAudience;
-import org.apache.hadoop.hive.common.classification.InterfaceStability;
-
-import java.util.Map;
-
-/**
- * HCatContext is a singleton that provides global access to configuration data.
- *
- * HCatalog provides a variety of functionality that users can configure at runtime through
- * configuration properties. Available configuration properties are defined in
- * {@link HCatConstants}. HCatContext allows users to enable optional functionality by
- * setting properties in a provided configuration.
- *
- * HCatalog users (MR apps, processing framework adapters) should set properties
- * in a configuration that has been provided to
- * {@link #setConf(org.apache.hadoop.conf.Configuration)} to enable optional functionality.
- * The job configuration must be used to ensure properties are passed to the backend MR tasks.
- *
- * HCatalog developers should enable optional functionality by checking properties
- * from {@link #getConf()}. Since users are not obligated to set a configuration, optional
- * functionality must provide a sensible default.
- *
- * @deprecated Use/modify {@link org.apache.hive.hcatalog.common.HCatContext} instead
- */
-@InterfaceAudience.Public
-@InterfaceStability.Evolving
-public enum HCatContext {
- INSTANCE;
-
- private Configuration conf = null;
-
- /**
- * Use the given configuration for optional behavior. Keys exclusive to an existing config
- * are set in the new conf. The job conf must be used to ensure properties are passed to
- * backend MR tasks.
- */
- public synchronized HCatContext setConf(Configuration newConf) {
- Preconditions.checkNotNull(newConf, "Required parameter 'newConf' must not be null.");
-
- if (conf == null) {
- conf = newConf;
- return this;
- }
-
- if (conf != newConf) {
- synchronized (conf) {
- for (Map.Entry entry : conf) {
- if ((entry.getKey().matches("hcat.*")) && (newConf.get(entry.getKey()) == null)) {
- newConf.set(entry.getKey(), entry.getValue());
- }
- }
- }
- conf = newConf;
- }
- return this;
- }
-
- /**
- * Get the configuration, if there is one. Users are not required to setup HCatContext
- * unless they wish to override default behavior, so the configuration may not be present.
- *
- * @return an Optional that might contain a Configuration
- */
- public Optional getConf() {
- return Optional.fromNullable(conf);
- }
-}
diff --git a/hcatalog/core/src/main/java/org/apache/hcatalog/common/HCatException.java b/hcatalog/core/src/main/java/org/apache/hcatalog/common/HCatException.java
deleted file mode 100644
index a2bb2ab..0000000
--- a/hcatalog/core/src/main/java/org/apache/hcatalog/common/HCatException.java
+++ /dev/null
@@ -1,160 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied. See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-package org.apache.hcatalog.common;
-
-import java.io.IOException;
-
-/**
- * Class representing exceptions thrown by HCat.
- * @deprecated Use/modify {@link org.apache.hive.hcatalog.common.HCatException} instead
- */
-public class HCatException extends IOException {
-
- private static final long serialVersionUID = 1L;
-
- /** The error type enum for this exception. */
- private final ErrorType errorType;
-
- /**
- * Instantiates a new hcat exception.
- * @param errorType the error type
- */
- public HCatException(ErrorType errorType) {
- this(errorType, null, null);
- }
-
-
- /**
- * Instantiates a new hcat exception.
- * @param errorType the error type
- * @param cause the cause
- */
- public HCatException(ErrorType errorType, Throwable cause) {
- this(errorType, null, cause);
- }
-
- /**
- * Instantiates a new hcat exception.
- * @param errorType the error type
- * @param extraMessage extra messages to add to the message string
- */
- public HCatException(ErrorType errorType, String extraMessage) {
- this(errorType, extraMessage, null);
- }
-
- /**
- * Instantiates a new hcat exception.
- * @param errorType the error type
- * @param extraMessage extra messages to add to the message string
- * @param cause the cause
- */
- public HCatException(ErrorType errorType, String extraMessage, Throwable cause) {
- super(buildErrorMessage(
- errorType,
- extraMessage,
- cause), cause);
- this.errorType = errorType;
- }
-
-
- //TODO : remove default error type constructors after all exceptions
- //are changed to use error types
-
- /**
- * Instantiates a new hcat exception.
- * @param message the error message
- */
- public HCatException(String message) {
- this(ErrorType.ERROR_INTERNAL_EXCEPTION, message, null);
- }
-
- /**
- * Instantiates a new hcat exception.
- * @param message the error message
- * @param cause the cause
- */
- public HCatException(String message, Throwable cause) {
- this(ErrorType.ERROR_INTERNAL_EXCEPTION, message, cause);
- }
-
-
- /**
- * Builds the error message string. The error type message is appended with the extra message. If appendCause
- * is true for the error type, then the message of the cause also is added to the message.
- * @param type the error type
- * @param extraMessage the extra message string
- * @param cause the cause for the exception
- * @return the exception message string
- */
- public static String buildErrorMessage(ErrorType type, String extraMessage, Throwable cause) {
-
- //Initial message is just the error type message
- StringBuffer message = new StringBuffer(HCatException.class.getName());
- message.append(" : " + type.getErrorCode());
- message.append(" : " + type.getErrorMessage());
-
- if (extraMessage != null) {
- //Add the extra message value to buffer
- message.append(" : " + extraMessage);
- }
-
- if (type.appendCauseMessage()) {
- if (cause != null) {
- //Add the cause message to buffer
- message.append(". Cause : " + cause.toString());
- }
- }
-
- return message.toString();
- }
-
-
- /**
- * Is this a retriable error.
- * @return is it retriable
- */
- public boolean isRetriable() {
- return errorType.isRetriable();
- }
-
- /**
- * Gets the error type.
- * @return the error type enum
- */
- public ErrorType getErrorType() {
- return errorType;
- }
-
- /**
- * Gets the error code.
- * @return the error code
- */
- public int getErrorCode() {
- return errorType.getErrorCode();
- }
-
- /* (non-Javadoc)
- * @see java.lang.Throwable#toString()
- */
- @Override
- public String toString() {
- return getMessage();
- }
-
-}
diff --git a/hcatalog/core/src/main/java/org/apache/hcatalog/common/HCatUtil.java b/hcatalog/core/src/main/java/org/apache/hcatalog/common/HCatUtil.java
deleted file mode 100644
index 15433e3..0000000
--- a/hcatalog/core/src/main/java/org/apache/hcatalog/common/HCatUtil.java
+++ /dev/null
@@ -1,631 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements. See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership. The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License. You may obtain a copy of the License at
- *
- * http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied. See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-
-package org.apache.hcatalog.common;
-
-import java.io.ByteArrayInputStream;
-import java.io.ByteArrayOutputStream;
-import java.io.IOException;
-import java.io.ObjectInputStream;
-import java.io.ObjectOutputStream;
-import java.io.Serializable;
-import java.util.ArrayList;
-import java.util.HashMap;
-import java.util.LinkedList;
-import java.util.List;
-import java.util.Map;
-import java.util.Properties;
-
-import org.apache.hadoop.classification.InterfaceAudience;
-import org.apache.hadoop.classification.InterfaceStability;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.permission.FsAction;
-import org.apache.hadoop.hive.common.JavaUtils;
-import org.apache.hadoop.hive.conf.HiveConf;
-import org.apache.hadoop.hive.metastore.HiveMetaStoreClient;
-import org.apache.hadoop.hive.metastore.MetaStoreUtils;
-import org.apache.hadoop.hive.metastore.api.FieldSchema;
-import org.apache.hadoop.hive.metastore.api.MetaException;
-import org.apache.hadoop.hive.metastore.api.NoSuchObjectException;
-import org.apache.hadoop.hive.ql.io.IgnoreKeyTextOutputFormat;
-import org.apache.hadoop.hive.ql.metadata.Partition;
-import org.apache.hadoop.hive.ql.metadata.Table;
-import org.apache.hadoop.hive.ql.plan.TableDesc;
-import org.apache.hadoop.hive.serde.serdeConstants;
-import org.apache.hadoop.hive.serde2.typeinfo.TypeInfo;
-import org.apache.hadoop.hive.serde2.typeinfo.TypeInfoUtils;
-import org.apache.hadoop.hive.thrift.DelegationTokenIdentifier;
-import org.apache.hadoop.io.Text;
-import org.apache.hadoop.mapred.JobClient;
-import org.apache.hadoop.mapred.JobConf;
-import org.apache.hadoop.mapreduce.JobContext;
-import org.apache.hadoop.security.token.Token;
-import org.apache.hadoop.security.token.delegation.AbstractDelegationTokenIdentifier;
-import org.apache.hadoop.util.ReflectionUtils;
-import org.apache.hcatalog.data.Pair;
-import org.apache.hcatalog.data.schema.HCatFieldSchema;
-import org.apache.hcatalog.data.schema.HCatSchema;
-import org.apache.hcatalog.data.schema.HCatSchemaUtils;
-import org.apache.hcatalog.mapreduce.FosterStorageHandler;
-import org.apache.hcatalog.mapreduce.HCatOutputFormat;
-import org.apache.hcatalog.mapreduce.HCatStorageHandler;
-import org.apache.hcatalog.mapreduce.InputJobInfo;
-import org.apache.hcatalog.mapreduce.OutputJobInfo;
-import org.apache.hcatalog.mapreduce.PartInfo;
-import org.apache.hcatalog.mapreduce.StorerInfo;
-import org.apache.thrift.TException;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import javax.security.auth.login.LoginException;
-
-/**
- * @deprecated Use/modify {@link org.apache.hive.hcatalog.common.HCatUtil} instead
- */
-public class HCatUtil {
-
- private static final Logger LOG = LoggerFactory.getLogger(HCatUtil.class);
- private static volatile HiveClientCache hiveClientCache;
- private final static int DEFAULT_HIVE_CACHE_EXPIRY_TIME_SECONDS = 2 * 60;
-
- public static boolean checkJobContextIfRunningFromBackend(JobContext j) {
- if (j.getConfiguration().get("mapred.task.id", "").equals("") &&
- !("true".equals(j.getConfiguration().get("pig.illustrating")))) {
- return false;
- }
- return true;
- }
-
- public static String serialize(Serializable obj) throws IOException {
- if (obj == null) {
- return "";
- }
- try {
- ByteArrayOutputStream serialObj = new ByteArrayOutputStream();
- ObjectOutputStream objStream = new ObjectOutputStream(serialObj);
- objStream.writeObject(obj);
- objStream.close();
- return encodeBytes(serialObj.toByteArray());
- } catch (Exception e) {
- throw new IOException("Serialization error: " + e.getMessage(), e);
- }
- }
-
- public static Object deserialize(String str) throws IOException {
- if (str == null || str.length() == 0) {
- return null;
- }
- try {
- ByteArrayInputStream serialObj = new ByteArrayInputStream(
- decodeBytes(str));
- ObjectInputStream objStream = new ObjectInputStream(serialObj);
- return objStream.readObject();
- } catch (Exception e) {
- throw new IOException("Deserialization error: " + e.getMessage(), e);
- }
- }
-
- public static String encodeBytes(byte[] bytes) {
- StringBuffer strBuf = new StringBuffer();
-
- for (int i = 0; i < bytes.length; i++) {
- strBuf.append((char) (((bytes[i] >> 4) & 0xF) + ('a')));
- strBuf.append((char) (((bytes[i]) & 0xF) + ('a')));
- }
-
- return strBuf.toString();
- }
-
- public static byte[] decodeBytes(String str) {
- byte[] bytes = new byte[str.length() / 2];
- for (int i = 0; i < str.length(); i += 2) {
- char c = str.charAt(i);
- bytes[i / 2] = (byte) ((c - 'a') << 4);
- c = str.charAt(i + 1);
- bytes[i / 2] += (c - 'a');
- }
- return bytes;
- }
-
- public static List getHCatFieldSchemaList(
- FieldSchema... fields) throws HCatException {
- List result = new ArrayList(
- fields.length);
-
- for (FieldSchema f : fields) {
- result.add(HCatSchemaUtils.getHCatFieldSchema(f));
- }
-
- return result;
- }
-
- public static List getHCatFieldSchemaList(
- List fields) throws HCatException {
- if (fields == null) {
- return null;
- } else {
- List result = new ArrayList();
- for (FieldSchema f : fields) {
- result.add(HCatSchemaUtils.getHCatFieldSchema(f));
- }
- return result;
- }
- }
-
- public static HCatSchema extractSchema(Table table) throws HCatException {
- return new HCatSchema(HCatUtil.getHCatFieldSchemaList(table.getCols()));
- }
-
- public static HCatSchema extractSchema(Partition partition) throws HCatException {
- return new HCatSchema(HCatUtil.getHCatFieldSchemaList(partition.getCols()));
- }
-
- public static List getFieldSchemaList(
- List hcatFields) {
- if (hcatFields == null) {
- return null;
- } else {
- List result = new ArrayList();
- for (HCatFieldSchema f : hcatFields) {
- result.add(HCatSchemaUtils.getFieldSchema(f));
- }
- return result;
- }
- }
-
- public static Table getTable(HiveMetaStoreClient client, String dbName, String tableName)
- throws NoSuchObjectException, TException, MetaException {
- return new Table(client.getTable(dbName, tableName));
- }
-
- public static HCatSchema getTableSchemaWithPtnCols(Table table) throws IOException {
- HCatSchema tableSchema = new HCatSchema(HCatUtil.getHCatFieldSchemaList(table.getCols()));
-
- if (table.getPartitionKeys().size() != 0) {
-
- // add partition keys to table schema
- // NOTE : this assumes that we do not ever have ptn keys as columns
- // inside the table schema as well!
- for (FieldSchema fs : table.getPartitionKeys()) {
- tableSchema.append(HCatSchemaUtils.getHCatFieldSchema(fs));
- }
- }
- return tableSchema;
- }
-
- /**
- * return the partition columns from a table instance
- *
- * @param table the instance to extract partition columns from
- * @return HCatSchema instance which contains the partition columns
- * @throws IOException
- */
- public static HCatSchema getPartitionColumns(Table table) throws IOException {
- HCatSchema cols = new HCatSchema(new LinkedList());
- if (table.getPartitionKeys().size() != 0) {
- for (FieldSchema fs : table.getPartitionKeys()) {
- cols.append(HCatSchemaUtils.getHCatFieldSchema(fs));
- }
- }
- return cols;
- }
-
- /**
- * Validate partition schema, checks if the column types match between the
- * partition and the existing table schema. Returns the list of columns
- * present in the partition but not in the table.
- *
- * @param table the table
- * @param partitionSchema the partition schema
- * @return the list of newly added fields
- * @throws IOException Signals that an I/O exception has occurred.
- */
- public static List validatePartitionSchema(Table table,
- HCatSchema partitionSchema) throws IOException {
- Map partitionKeyMap = new HashMap();
-
- for (FieldSchema field : table.getPartitionKeys()) {
- partitionKeyMap.put(field.getName().toLowerCase(), field);
- }
-
- List tableCols = table.getCols();
- List newFields = new ArrayList();
-
- for (int i = 0; i < partitionSchema.getFields().size(); i++) {
-
- FieldSchema field = HCatSchemaUtils.getFieldSchema(partitionSchema
- .getFields().get(i));
-
- FieldSchema tableField;
- if (i < tableCols.size()) {
- tableField = tableCols.get(i);
-
- if (!tableField.getName().equalsIgnoreCase(field.getName())) {
- throw new HCatException(
- ErrorType.ERROR_SCHEMA_COLUMN_MISMATCH,
- "Expected column <" + tableField.getName()
- + "> at position " + (i + 1)
- + ", found column <" + field.getName()
- + ">");
- }
- } else {
- tableField = partitionKeyMap.get(field.getName().toLowerCase());
-
- if (tableField != null) {
- throw new HCatException(
- ErrorType.ERROR_SCHEMA_PARTITION_KEY, "Key <"
- + field.getName() + ">");
- }
- }
-
- if (tableField == null) {
- // field present in partition but not in table
- newFields.add(field);
- } else {
- // field present in both. validate type has not changed
- TypeInfo partitionType = TypeInfoUtils
- .getTypeInfoFromTypeString(field.getType());
- TypeInfo tableType = TypeInfoUtils
- .getTypeInfoFromTypeString(tableField.getType());
-
- if (!partitionType.equals(tableType)) {
- throw new HCatException(
- ErrorType.ERROR_SCHEMA_TYPE_MISMATCH, "Column <"
- + field.getName() + ">, expected <"
- + tableType.getTypeName() + ">, got <"
- + partitionType.getTypeName() + ">");
- }
- }
- }
-
- return newFields;
- }
-
- /**
- * Test if the first FsAction is more permissive than the second. This is
- * useful in cases where we want to ensure that a file owner has more
- * permissions than the group they belong to, for eg. More completely(but
- * potentially more cryptically) owner-r >= group-r >= world-r : bitwise
- * and-masked with 0444 => 444 >= 440 >= 400 >= 000 owner-w >= group-w >=
- * world-w : bitwise and-masked with &0222 => 222 >= 220 >= 200 >= 000
- * owner-x >= group-x >= world-x : bitwise and-masked with &0111 => 111 >=
- * 110 >= 100 >= 000
- *
- * @return true if first FsAction is more permissive than the second, false
- * if not.
- */
- public static boolean validateMorePermissive(FsAction first, FsAction second) {
- if ((first == FsAction.ALL) || (second == FsAction.NONE)
- || (first == second)) {
- return true;
- }
- switch (first) {
- case READ_EXECUTE:
- return ((second == FsAction.READ) || (second == FsAction.EXECUTE));
- case READ_WRITE:
- return ((second == FsAction.READ) || (second == FsAction.WRITE));
- case WRITE_EXECUTE:
- return ((second == FsAction.WRITE) || (second == FsAction.EXECUTE));
- }
- return false;
- }
-
- /**
- * Ensure that read or write permissions are not granted without also
- * granting execute permissions. Essentially, r-- , rw- and -w- are invalid,
- * r-x, -wx, rwx, ---, --x are valid
- *
- * @param perms The FsAction to verify
- * @return true if the presence of read or write permission is accompanied
- * by execute permissions
- */
- public static boolean validateExecuteBitPresentIfReadOrWrite(FsAction perms) {
- if ((perms == FsAction.READ) || (perms == FsAction.WRITE)
- || (perms == FsAction.READ_WRITE)) {
- return false;
- }
- return true;
- }
-
- public static Token getJobTrackerDelegationToken(
- Configuration conf, String userName) throws Exception {
- // LOG.info("getJobTrackerDelegationToken("+conf+","+userName+")");
- JobClient jcl = new JobClient(new JobConf(conf, HCatOutputFormat.class));
- Token t = jcl
- .getDelegationToken(new Text(userName));
- // LOG.info("got "+t);
- return t;
-
- // return null;
- }
-
- public static Token extends AbstractDelegationTokenIdentifier> extractThriftToken(
- String tokenStrForm, String tokenSignature) throws MetaException,
- TException, IOException {
- // LOG.info("extractThriftToken("+tokenStrForm+","+tokenSignature+")");
- Token extends AbstractDelegationTokenIdentifier> t = new Token();
- t.decodeFromUrlString(tokenStrForm);
- t.setService(new Text(tokenSignature));
- // LOG.info("returning "+t);
- return t;
- }
-
- /**
- * Create an instance of a storage handler defined in storerInfo. If one cannot be found
- * then FosterStorageHandler is used to encapsulate the InputFormat, OutputFormat and SerDe.
- * This StorageHandler assumes the other supplied storage artifacts are for a file-based storage system.
- * @param conf job's configuration will be used to configure the Configurable StorageHandler
- * @param storerInfo StorerInfo to definining the StorageHandler and InputFormat, OutputFormat and SerDe
- * @return storageHandler instance
- * @throws IOException
- */
- public static HCatStorageHandler getStorageHandler(Configuration conf, StorerInfo storerInfo) throws IOException {
- return getStorageHandler(conf,
- storerInfo.getStorageHandlerClass(),
- storerInfo.getSerdeClass(),
- storerInfo.getIfClass(),
- storerInfo.getOfClass());
- }
-
- public static HCatStorageHandler getStorageHandler(Configuration conf, PartInfo partitionInfo) throws IOException {
- return HCatUtil.getStorageHandler(
- conf,
- partitionInfo.getStorageHandlerClassName(),
- partitionInfo.getSerdeClassName(),
- partitionInfo.getInputFormatClassName(),
- partitionInfo.getOutputFormatClassName());
- }
-
- /**
- * Create an instance of a storage handler. If storageHandler == null,
- * then surrrogate StorageHandler is used to encapsulate the InputFormat, OutputFormat and SerDe.
- * This StorageHandler assumes the other supplied storage artifacts are for a file-based storage system.
- * @param conf job's configuration will be used to configure the Configurable StorageHandler
- * @param storageHandler fully qualified class name of the desired StorageHandle instance
- * @param serDe fully qualified class name of the desired SerDe instance
- * @param inputFormat fully qualified class name of the desired InputFormat instance
- * @param outputFormat fully qualified class name of the desired outputFormat instance
- * @return storageHandler instance
- * @throws IOException
- */
- public static HCatStorageHandler getStorageHandler(Configuration conf,
- String storageHandler,
- String serDe,
- String inputFormat,
- String outputFormat)
- throws IOException {
-
- if ((storageHandler == null) || (storageHandler.equals(FosterStorageHandler.class.getName()))) {
- try {
- FosterStorageHandler fosterStorageHandler =
- new FosterStorageHandler(inputFormat, outputFormat, serDe);
- fosterStorageHandler.setConf(conf);
- return fosterStorageHandler;
- } catch (ClassNotFoundException e) {
- throw new IOException("Failed to load "
- + "foster storage handler", e);
- }
- }
-
- try {
- Class extends HCatStorageHandler> handlerClass =
- (Class extends HCatStorageHandler>) Class
- .forName(storageHandler, true, JavaUtils.getClassLoader());
- return (HCatStorageHandler) ReflectionUtils.newInstance(
- handlerClass, conf);
- } catch (ClassNotFoundException e) {
- throw new IOException("Error in loading storage handler."
- + e.getMessage(), e);
- }
- }
-
- public static Pair getDbAndTableName(String tableName) throws IOException {
- String[] dbTableNametokens = tableName.split("\\.");
- if (dbTableNametokens.length == 1) {
- return new Pair(MetaStoreUtils.DEFAULT_DATABASE_NAME, tableName);
- } else if (dbTableNametokens.length == 2) {
- return new Pair(dbTableNametokens[0], dbTableNametokens[1]);
- } else {
- throw new IOException("tableName expected in the form "
- + ". or . Got " + tableName);
- }
- }
-
- public static Map
- getInputJobProperties(HCatStorageHandler storageHandler,
- InputJobInfo inputJobInfo) {
- Properties props = inputJobInfo.getTableInfo().getStorerInfo().getProperties();
- props.put(serdeConstants.SERIALIZATION_LIB,storageHandler.getSerDeClass().getName());
- TableDesc tableDesc = new TableDesc(storageHandler.getInputFormatClass(),
- storageHandler.getOutputFormatClass(),props);
- if (tableDesc.getJobProperties() == null) {
- tableDesc.setJobProperties(new HashMap());
- }
-
- Map jobProperties = new HashMap();
- try {
- tableDesc.getJobProperties().put(
- HCatConstants.HCAT_KEY_JOB_INFO,
- HCatUtil.serialize(inputJobInfo));
-
- storageHandler.configureInputJobProperties(tableDesc,
- jobProperties);
-
- } catch (IOException e) {
- throw new IllegalStateException(
- "Failed to configure StorageHandler", e);
- }
-
- return jobProperties;
- }
-
- @InterfaceAudience.Private
- @InterfaceStability.Evolving
- public static void
- configureOutputStorageHandler(HCatStorageHandler storageHandler,
- Configuration conf,
- OutputJobInfo outputJobInfo) {
- //TODO replace IgnoreKeyTextOutputFormat with a
- //HiveOutputFormatWrapper in StorageHandler
- Properties props = outputJobInfo.getTableInfo().getStorerInfo().getProperties();
- props.put(serdeConstants.SERIALIZATION_LIB,storageHandler.getSerDeClass().getName());
- TableDesc tableDesc = new TableDesc(storageHandler.getInputFormatClass(),
- IgnoreKeyTextOutputFormat.class,props);
- if (tableDesc.getJobProperties() == null)
- tableDesc.setJobProperties(new HashMap());
- for (Map.Entry el : conf) {
- tableDesc.getJobProperties().put(el.getKey(), el.getValue());
- }
-
- Map jobProperties = new HashMap();
- try {
- tableDesc.getJobProperties().put(
- HCatConstants.HCAT_KEY_OUTPUT_INFO,
- HCatUtil.serialize(outputJobInfo));
-
- storageHandler.configureOutputJobProperties(tableDesc,
- jobProperties);
-
- for (Map.Entry el : jobProperties.entrySet()) {
- conf.set(el.getKey(), el.getValue());
- }
- } catch (IOException e) {
- throw new IllegalStateException(
- "Failed to configure StorageHandler", e);
- }
- }
-
- /**
- * Replace the contents of dest with the contents of src
- * @param src
- * @param dest
- */
- public static void copyConf(Configuration src, Configuration dest) {
- dest.clear();
- for (Map.Entry el : src) {
- dest.set(el.getKey(), el.getValue());
- }
- }
-
- /**
- * Get or create a hive client depending on whether it exits in cache or not
- * @param hiveConf The hive configuration
- * @return the client
- * @throws MetaException When HiveMetaStoreClient couldn't be created
- * @throws IOException
- */
- public static HiveMetaStoreClient getHiveClient(HiveConf hiveConf)
- throws MetaException, IOException {
-
- // Singleton behaviour: create the cache instance if required. The cache needs to be created lazily and
- // using the expiry time available in hiveConf.
-
- if (hiveClientCache == null) {
- synchronized (HiveMetaStoreClient.class) {
- if (hiveClientCache == null) {
- hiveClientCache = new HiveClientCache(hiveConf.getInt(HCatConstants.HCAT_HIVE_CLIENT_EXPIRY_TIME,
- DEFAULT_HIVE_CACHE_EXPIRY_TIME_SECONDS));
- }
- }
- }
- try {
- return hiveClientCache.get(hiveConf);
- } catch (LoginException e) {
- throw new IOException("Couldn't create hiveMetaStoreClient, Error getting UGI for user", e);
- }
- }
-
- public static void closeHiveClientQuietly(HiveMetaStoreClient client) {
- try {
- if (client != null)
- client.close();
- } catch (Exception e) {
- LOG.debug("Error closing metastore client. Ignored the error.", e);
- }
- }
-
- public static HiveConf getHiveConf(Configuration conf)
- throws IOException {
-
- HiveConf hiveConf = new HiveConf(conf, HCatUtil.class);
-
- //copy the hive conf into the job conf and restore it
- //in the backend context
- if (conf.get(HCatConstants.HCAT_KEY_HIVE_CONF) == null) {
- conf.set(HCatConstants.HCAT_KEY_HIVE_CONF,
- HCatUtil.serialize(hiveConf.getAllProperties()));
- } else {
- //Copy configuration properties into the hive conf
- Properties properties = (Properties) HCatUtil.deserialize(
- conf.get(HCatConstants.HCAT_KEY_HIVE_CONF));
-
- for (Map.Entry