diff --git a/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java.orig b/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java.orig new file mode 100644 index 0000000..b8870f2 --- /dev/null +++ b/common/src/java/org/apache/hadoop/hive/conf/HiveConf.java.orig @@ -0,0 +1,4001 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hive.conf; + +import com.google.common.base.Joiner; +import org.apache.commons.lang.StringUtils; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hive.common.classification.InterfaceAudience; +import org.apache.hadoop.hive.common.classification.InterfaceAudience.LimitedPrivate; +import org.apache.hadoop.hive.conf.Validator.PatternSet; +import org.apache.hadoop.hive.conf.Validator.RangeValidator; +import org.apache.hadoop.hive.conf.Validator.RatioValidator; +import org.apache.hadoop.hive.conf.Validator.SizeValidator; +import org.apache.hadoop.hive.conf.Validator.StringSet; +import org.apache.hadoop.hive.conf.Validator.TimeValidator; +import org.apache.hadoop.hive.shims.Utils; +import org.apache.hadoop.mapred.JobConf; +import org.apache.hadoop.mapreduce.lib.input.CombineFileInputFormat; +import org.apache.hadoop.mapreduce.lib.input.FileInputFormat; +import org.apache.hadoop.security.UserGroupInformation; +import org.apache.hadoop.util.Shell; +import org.apache.hive.common.HiveCompat; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import javax.security.auth.login.LoginException; + +import java.io.*; +import java.net.URL; +import java.net.URLDecoder; +import java.net.URLEncoder; +import java.util.ArrayList; +import java.util.Collections; +import java.util.HashMap; +import java.util.HashSet; +import java.util.Iterator; +import java.util.LinkedHashSet; +import java.util.List; +import java.util.Map; +import java.util.Map.Entry; +import java.util.Properties; +import java.util.Set; +import java.util.concurrent.TimeUnit; +import java.util.regex.Matcher; +import java.util.regex.Pattern; + +/** + * Hive Configuration. + */ +public class HiveConf extends Configuration { + protected String hiveJar; + protected Properties origProp; + protected String auxJars; + private static final Logger l4j = LoggerFactory.getLogger(HiveConf.class); + private static boolean loadMetastoreConfig = false; + private static boolean loadHiveServer2Config = false; + private static URL hiveDefaultURL = null; + private static URL hiveSiteURL = null; + private static URL hivemetastoreSiteUrl = null; + private static URL hiveServer2SiteUrl = null; + + private static byte[] confVarByteArray = null; + + + private static final Map vars = new HashMap(); + private static final Map metaConfs = new HashMap(); + private final List restrictList = new ArrayList(); + private final Set hiddenSet = new HashSet(); + + private Pattern modWhiteListPattern = null; + private volatile boolean isSparkConfigUpdated = false; + private static final int LOG_PREFIX_LENGTH = 64; + + public boolean getSparkConfigUpdated() { + return isSparkConfigUpdated; + } + + public void setSparkConfigUpdated(boolean isSparkConfigUpdated) { + this.isSparkConfigUpdated = isSparkConfigUpdated; + } + + public interface EncoderDecoder { + V encode(K key); + K decode(V value); + } + + public static class URLEncoderDecoder implements EncoderDecoder { + private static final String UTF_8 = "UTF-8"; + @Override + public String encode(String key) { + try { + return URLEncoder.encode(key, UTF_8); + } catch (UnsupportedEncodingException e) { + return key; + } + } + + @Override + public String decode(String value) { + try { + return URLDecoder.decode(value, UTF_8); + } catch (UnsupportedEncodingException e) { + return value; + } + } + } + public static class EncoderDecoderFactory { + public static final URLEncoderDecoder URL_ENCODER_DECODER = new URLEncoderDecoder(); + } + + static { + ClassLoader classLoader = Thread.currentThread().getContextClassLoader(); + if (classLoader == null) { + classLoader = HiveConf.class.getClassLoader(); + } + + hiveDefaultURL = classLoader.getResource("hive-default.xml"); + + // Look for hive-site.xml on the CLASSPATH and log its location if found. + hiveSiteURL = classLoader.getResource("hive-site.xml"); + hivemetastoreSiteUrl = classLoader.getResource("hivemetastore-site.xml"); + hiveServer2SiteUrl = classLoader.getResource("hiveserver2-site.xml"); + + for (ConfVars confVar : ConfVars.values()) { + vars.put(confVar.varname, confVar); + } + + Set llapDaemonConfVarsSetLocal = new LinkedHashSet<>(); + populateLlapDaemonVarsSet(llapDaemonConfVarsSetLocal); + llapDaemonVarsSet = Collections.unmodifiableSet(llapDaemonConfVarsSetLocal); + } + + @InterfaceAudience.Private + public static final String PREFIX_LLAP = "llap."; + @InterfaceAudience.Private + public static final String PREFIX_HIVE_LLAP = "hive.llap."; + + /** + * Metastore related options that the db is initialized against. When a conf + * var in this is list is changed, the metastore instance for the CLI will + * be recreated so that the change will take effect. + */ + public static final HiveConf.ConfVars[] metaVars = { + HiveConf.ConfVars.METASTOREWAREHOUSE, + HiveConf.ConfVars.METASTOREURIS, + HiveConf.ConfVars.METASTORE_SERVER_PORT, + HiveConf.ConfVars.METASTORETHRIFTCONNECTIONRETRIES, + HiveConf.ConfVars.METASTORETHRIFTFAILURERETRIES, + HiveConf.ConfVars.METASTORE_CLIENT_CONNECT_RETRY_DELAY, + HiveConf.ConfVars.METASTORE_CLIENT_SOCKET_TIMEOUT, + HiveConf.ConfVars.METASTORE_CLIENT_SOCKET_LIFETIME, + HiveConf.ConfVars.METASTOREPWD, + HiveConf.ConfVars.METASTORECONNECTURLHOOK, + HiveConf.ConfVars.METASTORECONNECTURLKEY, + HiveConf.ConfVars.METASTORESERVERMINTHREADS, + HiveConf.ConfVars.METASTORESERVERMAXTHREADS, + HiveConf.ConfVars.METASTORE_TCP_KEEP_ALIVE, + HiveConf.ConfVars.METASTORE_INT_ORIGINAL, + HiveConf.ConfVars.METASTORE_INT_ARCHIVED, + HiveConf.ConfVars.METASTORE_INT_EXTRACTED, + HiveConf.ConfVars.METASTORE_KERBEROS_KEYTAB_FILE, + HiveConf.ConfVars.METASTORE_KERBEROS_PRINCIPAL, + HiveConf.ConfVars.METASTORE_USE_THRIFT_SASL, + HiveConf.ConfVars.METASTORE_CACHE_PINOBJTYPES, + HiveConf.ConfVars.METASTORE_CONNECTION_POOLING_TYPE, + HiveConf.ConfVars.METASTORE_VALIDATE_TABLES, + HiveConf.ConfVars.METASTORE_VALIDATE_COLUMNS, + HiveConf.ConfVars.METASTORE_VALIDATE_CONSTRAINTS, + HiveConf.ConfVars.METASTORE_STORE_MANAGER_TYPE, + HiveConf.ConfVars.METASTORE_AUTO_CREATE_ALL, + HiveConf.ConfVars.METASTORE_AUTO_START_MECHANISM_MODE, + HiveConf.ConfVars.METASTORE_TRANSACTION_ISOLATION, + HiveConf.ConfVars.METASTORE_CACHE_LEVEL2, + HiveConf.ConfVars.METASTORE_CACHE_LEVEL2_TYPE, + HiveConf.ConfVars.METASTORE_IDENTIFIER_FACTORY, + HiveConf.ConfVars.METASTORE_PLUGIN_REGISTRY_BUNDLE_CHECK, + HiveConf.ConfVars.METASTORE_AUTHORIZATION_STORAGE_AUTH_CHECKS, + HiveConf.ConfVars.METASTORE_BATCH_RETRIEVE_MAX, + HiveConf.ConfVars.METASTORE_EVENT_LISTENERS, + HiveConf.ConfVars.METASTORE_EVENT_CLEAN_FREQ, + HiveConf.ConfVars.METASTORE_EVENT_EXPIRY_DURATION, + HiveConf.ConfVars.METASTORE_FILTER_HOOK, + HiveConf.ConfVars.METASTORE_RAW_STORE_IMPL, + HiveConf.ConfVars.METASTORE_END_FUNCTION_LISTENERS, + HiveConf.ConfVars.METASTORE_PART_INHERIT_TBL_PROPS, + HiveConf.ConfVars.METASTORE_BATCH_RETRIEVE_OBJECTS_MAX, + HiveConf.ConfVars.METASTORE_INIT_HOOKS, + HiveConf.ConfVars.METASTORE_PRE_EVENT_LISTENERS, + HiveConf.ConfVars.HMSHANDLERATTEMPTS, + HiveConf.ConfVars.HMSHANDLERINTERVAL, + HiveConf.ConfVars.HMSHANDLERFORCERELOADCONF, + HiveConf.ConfVars.METASTORE_PARTITION_NAME_WHITELIST_PATTERN, + HiveConf.ConfVars.METASTORE_ORM_RETRIEVE_MAPNULLS_AS_EMPTY_STRINGS, + HiveConf.ConfVars.METASTORE_DISALLOW_INCOMPATIBLE_COL_TYPE_CHANGES, + HiveConf.ConfVars.USERS_IN_ADMIN_ROLE, + HiveConf.ConfVars.HIVE_AUTHORIZATION_MANAGER, + HiveConf.ConfVars.HIVE_TXN_MANAGER, + HiveConf.ConfVars.HIVE_TXN_TIMEOUT, + HiveConf.ConfVars.HIVE_TXN_HEARTBEAT_THREADPOOL_SIZE, + HiveConf.ConfVars.HIVE_TXN_MAX_OPEN_BATCH, + HiveConf.ConfVars.HIVE_METASTORE_STATS_NDV_DENSITY_FUNCTION, + HiveConf.ConfVars.METASTORE_AGGREGATE_STATS_CACHE_ENABLED, + HiveConf.ConfVars.METASTORE_AGGREGATE_STATS_CACHE_SIZE, + HiveConf.ConfVars.METASTORE_AGGREGATE_STATS_CACHE_MAX_PARTITIONS, + HiveConf.ConfVars.METASTORE_AGGREGATE_STATS_CACHE_FPP, + HiveConf.ConfVars.METASTORE_AGGREGATE_STATS_CACHE_MAX_VARIANCE, + HiveConf.ConfVars.METASTORE_AGGREGATE_STATS_CACHE_TTL, + HiveConf.ConfVars.METASTORE_AGGREGATE_STATS_CACHE_MAX_WRITER_WAIT, + HiveConf.ConfVars.METASTORE_AGGREGATE_STATS_CACHE_MAX_READER_WAIT, + HiveConf.ConfVars.METASTORE_AGGREGATE_STATS_CACHE_MAX_FULL, + HiveConf.ConfVars.METASTORE_AGGREGATE_STATS_CACHE_CLEAN_UNTIL, + HiveConf.ConfVars.METASTORE_FASTPATH, + HiveConf.ConfVars.METASTORE_HBASE_CATALOG_CACHE_SIZE, + HiveConf.ConfVars.METASTORE_HBASE_AGGREGATE_STATS_CACHE_SIZE, + HiveConf.ConfVars.METASTORE_HBASE_AGGREGATE_STATS_CACHE_MAX_PARTITIONS, + HiveConf.ConfVars.METASTORE_HBASE_AGGREGATE_STATS_CACHE_FALSE_POSITIVE_PROBABILITY, + HiveConf.ConfVars.METASTORE_HBASE_AGGREGATE_STATS_CACHE_MAX_VARIANCE, + HiveConf.ConfVars.METASTORE_HBASE_CACHE_TIME_TO_LIVE, + HiveConf.ConfVars.METASTORE_HBASE_CACHE_MAX_WRITER_WAIT, + HiveConf.ConfVars.METASTORE_HBASE_CACHE_MAX_READER_WAIT, + HiveConf.ConfVars.METASTORE_HBASE_CACHE_MAX_FULL, + HiveConf.ConfVars.METASTORE_HBASE_CACHE_CLEAN_UNTIL, + HiveConf.ConfVars.METASTORE_HBASE_CONNECTION_CLASS, + HiveConf.ConfVars.METASTORE_HBASE_AGGR_STATS_CACHE_ENTRIES, + HiveConf.ConfVars.METASTORE_HBASE_AGGR_STATS_MEMORY_TTL, + HiveConf.ConfVars.METASTORE_HBASE_AGGR_STATS_INVALIDATOR_FREQUENCY, + HiveConf.ConfVars.METASTORE_HBASE_AGGR_STATS_HBASE_TTL, + HiveConf.ConfVars.METASTORE_HBASE_FILE_METADATA_THREADS + }; + + /** + * User configurable Metastore vars + */ + public static final HiveConf.ConfVars[] metaConfVars = { + HiveConf.ConfVars.METASTORE_TRY_DIRECT_SQL, + HiveConf.ConfVars.METASTORE_TRY_DIRECT_SQL_DDL, + HiveConf.ConfVars.METASTORE_CLIENT_SOCKET_TIMEOUT, + HiveConf.ConfVars.METASTORE_PARTITION_NAME_WHITELIST_PATTERN + }; + + static { + for (ConfVars confVar : metaConfVars) { + metaConfs.put(confVar.varname, confVar); + } + } + + public static final String HIVE_LLAP_DAEMON_SERVICE_PRINCIPAL_NAME = "hive.llap.daemon.service.principal"; + + + /** + * dbVars are the parameters can be set per database. If these + * parameters are set as a database property, when switching to that + * database, the HiveConf variable will be changed. The change of these + * parameters will effectively change the DFS and MapReduce clusters + * for different databases. + */ + public static final HiveConf.ConfVars[] dbVars = { + HiveConf.ConfVars.HADOOPBIN, + HiveConf.ConfVars.METASTOREWAREHOUSE, + HiveConf.ConfVars.SCRATCHDIR + }; + + /** + * Variables used by LLAP daemons. + * TODO: Eventually auto-populate this based on prefixes. The conf variables + * will need to be renamed for this. + */ + private static final Set llapDaemonVarsSet; + + private static void populateLlapDaemonVarsSet(Set llapDaemonVarsSetLocal) { + llapDaemonVarsSetLocal.add(ConfVars.LLAP_IO_ENABLED.varname); + llapDaemonVarsSetLocal.add(ConfVars.LLAP_IO_MEMORY_MODE.varname); + llapDaemonVarsSetLocal.add(ConfVars.LLAP_ALLOCATOR_MIN_ALLOC.varname); + llapDaemonVarsSetLocal.add(ConfVars.LLAP_ALLOCATOR_MAX_ALLOC.varname); + llapDaemonVarsSetLocal.add(ConfVars.LLAP_ALLOCATOR_ARENA_COUNT.varname); + llapDaemonVarsSetLocal.add(ConfVars.LLAP_IO_MEMORY_MAX_SIZE.varname); + llapDaemonVarsSetLocal.add(ConfVars.LLAP_ALLOCATOR_DIRECT.varname); + llapDaemonVarsSetLocal.add(ConfVars.LLAP_USE_LRFU.varname); + llapDaemonVarsSetLocal.add(ConfVars.LLAP_LRFU_LAMBDA.varname); + llapDaemonVarsSetLocal.add(ConfVars.LLAP_CACHE_ALLOW_SYNTHETIC_FILEID.varname); + llapDaemonVarsSetLocal.add(ConfVars.LLAP_IO_USE_FILEID_PATH.varname); + llapDaemonVarsSetLocal.add(ConfVars.LLAP_ORC_ENABLE_TIME_COUNTERS.varname); + llapDaemonVarsSetLocal.add(ConfVars.LLAP_IO_THREADPOOL_SIZE.varname); + llapDaemonVarsSetLocal.add(ConfVars.LLAP_KERBEROS_PRINCIPAL.varname); + llapDaemonVarsSetLocal.add(ConfVars.LLAP_KERBEROS_KEYTAB_FILE.varname); + llapDaemonVarsSetLocal.add(ConfVars.LLAP_ZKSM_KERBEROS_PRINCIPAL.varname); + llapDaemonVarsSetLocal.add(ConfVars.LLAP_ZKSM_KERBEROS_KEYTAB_FILE.varname); + llapDaemonVarsSetLocal.add(ConfVars.LLAP_ZKSM_ZK_CONNECTION_STRING.varname); + llapDaemonVarsSetLocal.add(ConfVars.LLAP_SECURITY_ACL.varname); + llapDaemonVarsSetLocal.add(ConfVars.LLAP_MANAGEMENT_ACL.varname); + llapDaemonVarsSetLocal.add(ConfVars.LLAP_DELEGATION_TOKEN_LIFETIME.varname); + llapDaemonVarsSetLocal.add(ConfVars.LLAP_MANAGEMENT_RPC_PORT.varname); + llapDaemonVarsSetLocal.add(ConfVars.LLAP_WEB_AUTO_AUTH.varname); + llapDaemonVarsSetLocal.add(ConfVars.LLAP_DAEMON_RPC_NUM_HANDLERS.varname); + llapDaemonVarsSetLocal.add(ConfVars.LLAP_DAEMON_WORK_DIRS.varname); + llapDaemonVarsSetLocal.add(ConfVars.LLAP_DAEMON_YARN_SHUFFLE_PORT.varname); + llapDaemonVarsSetLocal.add(ConfVars.LLAP_DAEMON_YARN_CONTAINER_MB.varname); + llapDaemonVarsSetLocal.add(ConfVars.LLAP_DAEMON_SHUFFLE_DIR_WATCHER_ENABLED.varname); + llapDaemonVarsSetLocal.add(ConfVars.LLAP_DAEMON_AM_LIVENESS_HEARTBEAT_INTERVAL_MS.varname); + llapDaemonVarsSetLocal.add(ConfVars.LLAP_DAEMON_AM_LIVENESS_CONNECTION_TIMEOUT_MS.varname); + llapDaemonVarsSetLocal.add(ConfVars.LLAP_DAEMON_AM_LIVENESS_CONNECTION_SLEEP_BETWEEN_RETRIES_MS.varname); + llapDaemonVarsSetLocal.add(ConfVars.LLAP_DAEMON_NUM_EXECUTORS.varname); + llapDaemonVarsSetLocal.add(ConfVars.LLAP_DAEMON_RPC_PORT.varname); + llapDaemonVarsSetLocal.add(ConfVars.LLAP_DAEMON_MEMORY_PER_INSTANCE_MB.varname); + llapDaemonVarsSetLocal.add(ConfVars.LLAP_DAEMON_VCPUS_PER_INSTANCE.varname); + llapDaemonVarsSetLocal.add(ConfVars.LLAP_DAEMON_NUM_FILE_CLEANER_THREADS.varname); + llapDaemonVarsSetLocal.add(ConfVars.LLAP_FILE_CLEANUP_DELAY_SECONDS.varname); + llapDaemonVarsSetLocal.add(ConfVars.LLAP_DAEMON_SERVICE_HOSTS.varname); + llapDaemonVarsSetLocal.add(ConfVars.LLAP_DAEMON_SERVICE_REFRESH_INTERVAL.varname); + llapDaemonVarsSetLocal.add(ConfVars.LLAP_ALLOW_PERMANENT_FNS.varname); + llapDaemonVarsSetLocal.add(ConfVars.LLAP_DAEMON_DOWNLOAD_PERMANENT_FNS.varname); + llapDaemonVarsSetLocal.add(ConfVars.LLAP_DAEMON_TASK_SCHEDULER_WAIT_QUEUE_SIZE.varname); + llapDaemonVarsSetLocal.add(ConfVars.LLAP_DAEMON_WAIT_QUEUE_COMPARATOR_CLASS_NAME.varname); + llapDaemonVarsSetLocal.add(ConfVars.LLAP_DAEMON_TASK_SCHEDULER_ENABLE_PREEMPTION.varname); + llapDaemonVarsSetLocal.add(ConfVars.LLAP_DAEMON_WEB_PORT.varname); + llapDaemonVarsSetLocal.add(ConfVars.LLAP_DAEMON_WEB_SSL.varname); + } + + /** + * Get a set containing configuration parameter names used by LLAP Server isntances + * @return an unmodifiable set containing llap ConfVars + */ + public static final Set getLlapDaemonConfVars() { + return llapDaemonVarsSet; + } + + + /** + * ConfVars. + * + * These are the default configuration properties for Hive. Each HiveConf + * object is initialized as follows: + * + * 1) Hadoop configuration properties are applied. + * 2) ConfVar properties with non-null values are overlayed. + * 3) hive-site.xml properties are overlayed. + * + * WARNING: think twice before adding any Hadoop configuration properties + * with non-null values to this list as they will override any values defined + * in the underlying Hadoop configuration. + */ + public static enum ConfVars { + // QL execution stuff + SCRIPTWRAPPER("hive.exec.script.wrapper", null, ""), + PLAN("hive.exec.plan", "", ""), + STAGINGDIR("hive.exec.stagingdir", ".hive-staging", + "Directory name that will be created inside table locations in order to support HDFS encryption. " + + "This is replaces ${hive.exec.scratchdir} for query results with the exception of read-only tables. " + + "In all cases ${hive.exec.scratchdir} is still used for other temporary files, such as job plans."), + SCRATCHDIR("hive.exec.scratchdir", "/tmp/hive", + "HDFS root scratch dir for Hive jobs which gets created with write all (733) permission. " + + "For each connecting user, an HDFS scratch dir: ${hive.exec.scratchdir}/ is created, " + + "with ${hive.scratch.dir.permission}."), + LOCALSCRATCHDIR("hive.exec.local.scratchdir", + "${system:java.io.tmpdir}" + File.separator + "${system:user.name}", + "Local scratch space for Hive jobs"), + DOWNLOADED_RESOURCES_DIR("hive.downloaded.resources.dir", + "${system:java.io.tmpdir}" + File.separator + "${hive.session.id}_resources", + "Temporary local directory for added resources in the remote file system."), + SCRATCHDIRPERMISSION("hive.scratch.dir.permission", "700", + "The permission for the user specific scratch directories that get created."), + SUBMITVIACHILD("hive.exec.submitviachild", false, ""), + SUBMITLOCALTASKVIACHILD("hive.exec.submit.local.task.via.child", true, + "Determines whether local tasks (typically mapjoin hashtable generation phase) runs in \n" + + "separate JVM (true recommended) or not. \n" + + "Avoids the overhead of spawning new JVM, but can lead to out-of-memory issues."), + SCRIPTERRORLIMIT("hive.exec.script.maxerrsize", 100000, + "Maximum number of bytes a script is allowed to emit to standard error (per map-reduce task). \n" + + "This prevents runaway scripts from filling logs partitions to capacity"), + ALLOWPARTIALCONSUMP("hive.exec.script.allow.partial.consumption", false, + "When enabled, this option allows a user script to exit successfully without consuming \n" + + "all the data from the standard input."), + STREAMREPORTERPERFIX("stream.stderr.reporter.prefix", "reporter:", + "Streaming jobs that log to standard error with this prefix can log counter or status information."), + STREAMREPORTERENABLED("stream.stderr.reporter.enabled", true, + "Enable consumption of status and counter messages for streaming jobs."), + COMPRESSRESULT("hive.exec.compress.output", false, + "This controls whether the final outputs of a query (to a local/HDFS file or a Hive table) is compressed. \n" + + "The compression codec and other options are determined from Hadoop config variables mapred.output.compress*"), + COMPRESSINTERMEDIATE("hive.exec.compress.intermediate", false, + "This controls whether intermediate files produced by Hive between multiple map-reduce jobs are compressed. \n" + + "The compression codec and other options are determined from Hadoop config variables mapred.output.compress*"), + COMPRESSINTERMEDIATECODEC("hive.intermediate.compression.codec", "", ""), + COMPRESSINTERMEDIATETYPE("hive.intermediate.compression.type", "", ""), + BYTESPERREDUCER("hive.exec.reducers.bytes.per.reducer", (long) (256 * 1000 * 1000), + "size per reducer.The default is 256Mb, i.e if the input size is 1G, it will use 4 reducers."), + MAXREDUCERS("hive.exec.reducers.max", 1009, + "max number of reducers will be used. If the one specified in the configuration parameter mapred.reduce.tasks is\n" + + "negative, Hive will use this one as the max number of reducers when automatically determine number of reducers."), + PREEXECHOOKS("hive.exec.pre.hooks", "", + "Comma-separated list of pre-execution hooks to be invoked for each statement. \n" + + "A pre-execution hook is specified as the name of a Java class which implements the \n" + + "org.apache.hadoop.hive.ql.hooks.ExecuteWithHookContext interface."), + POSTEXECHOOKS("hive.exec.post.hooks", "", + "Comma-separated list of post-execution hooks to be invoked for each statement. \n" + + "A post-execution hook is specified as the name of a Java class which implements the \n" + + "org.apache.hadoop.hive.ql.hooks.ExecuteWithHookContext interface."), + ONFAILUREHOOKS("hive.exec.failure.hooks", "", + "Comma-separated list of on-failure hooks to be invoked for each statement. \n" + + "An on-failure hook is specified as the name of Java class which implements the \n" + + "org.apache.hadoop.hive.ql.hooks.ExecuteWithHookContext interface."), + QUERYREDACTORHOOKS("hive.exec.query.redactor.hooks", "", + "Comma-separated list of hooks to be invoked for each query which can \n" + + "tranform the query before it's placed in the job.xml file. Must be a Java class which \n" + + "extends from the org.apache.hadoop.hive.ql.hooks.Redactor abstract class."), + CLIENTSTATSPUBLISHERS("hive.client.stats.publishers", "", + "Comma-separated list of statistics publishers to be invoked on counters on each job. \n" + + "A client stats publisher is specified as the name of a Java class which implements the \n" + + "org.apache.hadoop.hive.ql.stats.ClientStatsPublisher interface."), + EXECPARALLEL("hive.exec.parallel", false, "Whether to execute jobs in parallel"), + EXECPARALLETHREADNUMBER("hive.exec.parallel.thread.number", 8, + "How many jobs at most can be executed in parallel"), + HIVESPECULATIVEEXECREDUCERS("hive.mapred.reduce.tasks.speculative.execution", true, + "Whether speculative execution for reducers should be turned on. "), + HIVECOUNTERSPULLINTERVAL("hive.exec.counters.pull.interval", 1000L, + "The interval with which to poll the JobTracker for the counters the running job. \n" + + "The smaller it is the more load there will be on the jobtracker, the higher it is the less granular the caught will be."), + DYNAMICPARTITIONING("hive.exec.dynamic.partition", true, + "Whether or not to allow dynamic partitions in DML/DDL."), + DYNAMICPARTITIONINGMODE("hive.exec.dynamic.partition.mode", "strict", + "In strict mode, the user must specify at least one static partition\n" + + "in case the user accidentally overwrites all partitions.\n" + + "In nonstrict mode all partitions are allowed to be dynamic."), + DYNAMICPARTITIONMAXPARTS("hive.exec.max.dynamic.partitions", 1000, + "Maximum number of dynamic partitions allowed to be created in total."), + DYNAMICPARTITIONMAXPARTSPERNODE("hive.exec.max.dynamic.partitions.pernode", 100, + "Maximum number of dynamic partitions allowed to be created in each mapper/reducer node."), + MAXCREATEDFILES("hive.exec.max.created.files", 100000L, + "Maximum number of HDFS files created by all mappers/reducers in a MapReduce job."), + DEFAULTPARTITIONNAME("hive.exec.default.partition.name", "__HIVE_DEFAULT_PARTITION__", + "The default partition name in case the dynamic partition column value is null/empty string or any other values that cannot be escaped. \n" + + "This value must not contain any special character used in HDFS URI (e.g., ':', '%', '/' etc). \n" + + "The user has to be aware that the dynamic partition value should not contain this value to avoid confusions."), + DEFAULT_ZOOKEEPER_PARTITION_NAME("hive.lockmgr.zookeeper.default.partition.name", "__HIVE_DEFAULT_ZOOKEEPER_PARTITION__", ""), + + // Whether to show a link to the most failed task + debugging tips + SHOW_JOB_FAIL_DEBUG_INFO("hive.exec.show.job.failure.debug.info", true, + "If a job fails, whether to provide a link in the CLI to the task with the\n" + + "most failures, along with debugging hints if applicable."), + JOB_DEBUG_CAPTURE_STACKTRACES("hive.exec.job.debug.capture.stacktraces", true, + "Whether or not stack traces parsed from the task logs of a sampled failed task \n" + + "for each failed job should be stored in the SessionState"), + JOB_DEBUG_TIMEOUT("hive.exec.job.debug.timeout", 30000, ""), + TASKLOG_DEBUG_TIMEOUT("hive.exec.tasklog.debug.timeout", 20000, ""), + OUTPUT_FILE_EXTENSION("hive.output.file.extension", null, + "String used as a file extension for output files. \n" + + "If not set, defaults to the codec extension for text files (e.g. \".gz\"), or no extension otherwise."), + + HIVE_IN_TEST("hive.in.test", false, "internal usage only, true in test mode", true), + + HIVE_IN_TEZ_TEST("hive.in.tez.test", false, "internal use only, true when in testing tez", + true), + + LOCALMODEAUTO("hive.exec.mode.local.auto", false, + "Let Hive determine whether to run in local mode automatically"), + LOCALMODEMAXBYTES("hive.exec.mode.local.auto.inputbytes.max", 134217728L, + "When hive.exec.mode.local.auto is true, input bytes should less than this for local mode."), + LOCALMODEMAXINPUTFILES("hive.exec.mode.local.auto.input.files.max", 4, + "When hive.exec.mode.local.auto is true, the number of tasks should less than this for local mode."), + + DROPIGNORESNONEXISTENT("hive.exec.drop.ignorenonexistent", true, + "Do not report an error if DROP TABLE/VIEW/Index/Function specifies a non-existent table/view/index/function"), + + HIVEIGNOREMAPJOINHINT("hive.ignore.mapjoin.hint", true, "Ignore the mapjoin hint"), + + HIVE_FILE_MAX_FOOTER("hive.file.max.footer", 100, + "maximum number of lines for footer user can define for a table file"), + + HIVE_RESULTSET_USE_UNIQUE_COLUMN_NAMES("hive.resultset.use.unique.column.names", true, + "Make column names unique in the result set by qualifying column names with table alias if needed.\n" + + "Table alias will be added to column names for queries of type \"select *\" or \n" + + "if query explicitly uses table alias \"select r1.x..\"."), + + // Hadoop Configuration Properties + // Properties with null values are ignored and exist only for the purpose of giving us + // a symbolic name to reference in the Hive source code. Properties with non-null + // values will override any values set in the underlying Hadoop configuration. + HADOOPBIN("hadoop.bin.path", findHadoopBinary(), "", true), + YARNBIN("yarn.bin.path", findYarnBinary(), "", true), + HIVE_FS_HAR_IMPL("fs.har.impl", "org.apache.hadoop.hive.shims.HiveHarFileSystem", + "The implementation for accessing Hadoop Archives. Note that this won't be applicable to Hadoop versions less than 0.20"), + MAPREDMAXSPLITSIZE(FileInputFormat.SPLIT_MAXSIZE, 256000000L, "", true), + MAPREDMINSPLITSIZE(FileInputFormat.SPLIT_MINSIZE, 1L, "", true), + MAPREDMINSPLITSIZEPERNODE(CombineFileInputFormat.SPLIT_MINSIZE_PERNODE, 1L, "", true), + MAPREDMINSPLITSIZEPERRACK(CombineFileInputFormat.SPLIT_MINSIZE_PERRACK, 1L, "", true), + // The number of reduce tasks per job. Hadoop sets this value to 1 by default + // By setting this property to -1, Hive will automatically determine the correct + // number of reducers. + HADOOPNUMREDUCERS("mapreduce.job.reduces", -1, "", true), + + // Metastore stuff. Be sure to update HiveConf.metaVars when you add something here! + METASTOREWAREHOUSE("hive.metastore.warehouse.dir", "/user/hive/warehouse", + "location of default database for the warehouse"), + METASTOREURIS("hive.metastore.uris", "", + "Thrift URI for the remote metastore. Used by metastore client to connect to remote metastore."), + + METASTORE_FASTPATH("hive.metastore.fastpath", false, + "Used to avoid all of the proxies and object copies in the metastore. Note, if this is " + + "set, you MUST use a local metastore (hive.metastore.uris must be empty) otherwise " + + "undefined and most likely undesired behavior will result"), + METASTORE_HBASE_CATALOG_CACHE_SIZE("hive.metastore.hbase.catalog.cache.size", 50000, "Maximum number of " + + "objects we will place in the hbase metastore catalog cache. The objects will be divided up by " + + "types that we need to cache."), + METASTORE_HBASE_AGGREGATE_STATS_CACHE_SIZE("hive.metastore.hbase.aggregate.stats.cache.size", 10000, + "Maximum number of aggregate stats nodes that we will place in the hbase metastore aggregate stats cache."), + METASTORE_HBASE_AGGREGATE_STATS_CACHE_MAX_PARTITIONS("hive.metastore.hbase.aggregate.stats.max.partitions", 10000, + "Maximum number of partitions that are aggregated per cache node."), + METASTORE_HBASE_AGGREGATE_STATS_CACHE_FALSE_POSITIVE_PROBABILITY("hive.metastore.hbase.aggregate.stats.false.positive.probability", + (float) 0.01, "Maximum false positive probability for the Bloom Filter used in each aggregate stats cache node (default 1%)."), + METASTORE_HBASE_AGGREGATE_STATS_CACHE_MAX_VARIANCE("hive.metastore.hbase.aggregate.stats.max.variance", (float) 0.1, + "Maximum tolerable variance in number of partitions between a cached node and our request (default 10%)."), + METASTORE_HBASE_CACHE_TIME_TO_LIVE("hive.metastore.hbase.cache.ttl", "600s", new TimeValidator(TimeUnit.SECONDS), + "Number of seconds for a cached node to be active in the cache before they become stale."), + METASTORE_HBASE_CACHE_MAX_WRITER_WAIT("hive.metastore.hbase.cache.max.writer.wait", "5000ms", new TimeValidator(TimeUnit.MILLISECONDS), + "Number of milliseconds a writer will wait to acquire the writelock before giving up."), + METASTORE_HBASE_CACHE_MAX_READER_WAIT("hive.metastore.hbase.cache.max.reader.wait", "1000ms", new TimeValidator(TimeUnit.MILLISECONDS), + "Number of milliseconds a reader will wait to acquire the readlock before giving up."), + METASTORE_HBASE_CACHE_MAX_FULL("hive.metastore.hbase.cache.max.full", (float) 0.9, + "Maximum cache full % after which the cache cleaner thread kicks in."), + METASTORE_HBASE_CACHE_CLEAN_UNTIL("hive.metastore.hbase.cache.clean.until", (float) 0.8, + "The cleaner thread cleans until cache reaches this % full size."), + METASTORE_HBASE_CONNECTION_CLASS("hive.metastore.hbase.connection.class", + "org.apache.hadoop.hive.metastore.hbase.VanillaHBaseConnection", + "Class used to connection to HBase"), + METASTORE_HBASE_AGGR_STATS_CACHE_ENTRIES("hive.metastore.hbase.aggr.stats.cache.entries", + 10000, "How many in stats objects to cache in memory"), + METASTORE_HBASE_AGGR_STATS_MEMORY_TTL("hive.metastore.hbase.aggr.stats.memory.ttl", "60s", + new TimeValidator(TimeUnit.SECONDS), + "Number of seconds stats objects live in memory after they are read from HBase."), + METASTORE_HBASE_AGGR_STATS_INVALIDATOR_FREQUENCY( + "hive.metastore.hbase.aggr.stats.invalidator.frequency", "5s", + new TimeValidator(TimeUnit.SECONDS), + "How often the stats cache scans its HBase entries and looks for expired entries"), + METASTORE_HBASE_AGGR_STATS_HBASE_TTL("hive.metastore.hbase.aggr.stats.hbase.ttl", "604800s", + new TimeValidator(TimeUnit.SECONDS), + "Number of seconds stats entries live in HBase cache after they are created. They may be" + + " invalided by updates or partition drops before this. Default is one week."), + METASTORE_HBASE_FILE_METADATA_THREADS("hive.metastore.hbase.file.metadata.threads", 1, + "Number of threads to use to read file metadata in background to cache it."), + + METASTORETHRIFTCONNECTIONRETRIES("hive.metastore.connect.retries", 3, + "Number of retries while opening a connection to metastore"), + METASTORETHRIFTFAILURERETRIES("hive.metastore.failure.retries", 1, + "Number of retries upon failure of Thrift metastore calls"), + METASTORE_SERVER_PORT("hive.metastore.port", 9083, "Hive metastore listener port"), + METASTORE_CLIENT_CONNECT_RETRY_DELAY("hive.metastore.client.connect.retry.delay", "1s", + new TimeValidator(TimeUnit.SECONDS), + "Number of seconds for the client to wait between consecutive connection attempts"), + METASTORE_CLIENT_SOCKET_TIMEOUT("hive.metastore.client.socket.timeout", "600s", + new TimeValidator(TimeUnit.SECONDS), + "MetaStore Client socket timeout in seconds"), + METASTORE_CLIENT_SOCKET_LIFETIME("hive.metastore.client.socket.lifetime", "0s", + new TimeValidator(TimeUnit.SECONDS), + "MetaStore Client socket lifetime in seconds. After this time is exceeded, client\n" + + "reconnects on the next MetaStore operation. A value of 0s means the connection\n" + + "has an infinite lifetime."), + METASTOREPWD("javax.jdo.option.ConnectionPassword", "mine", + "password to use against metastore database"), + METASTORECONNECTURLHOOK("hive.metastore.ds.connection.url.hook", "", + "Name of the hook to use for retrieving the JDO connection URL. If empty, the value in javax.jdo.option.ConnectionURL is used"), + METASTOREMULTITHREADED("javax.jdo.option.Multithreaded", true, + "Set this to true if multiple threads access metastore through JDO concurrently."), + METASTORECONNECTURLKEY("javax.jdo.option.ConnectionURL", + "jdbc:derby:;databaseName=metastore_db;create=true", + "JDBC connect string for a JDBC metastore.\n" + + "To use SSL to encrypt/authenticate the connection, provide database-specific SSL flag in the connection URL.\n" + + "For example, jdbc:postgresql://myhost/db?ssl=true for postgres database."), + METASTORE_DBACCESS_SSL_PROPS("hive.metastore.dbaccess.ssl.properties", "", + "Comma-separated SSL properties for metastore to access database when JDO connection URL\n" + + "enables SSL access. e.g. javax.net.ssl.trustStore=/tmp/truststore,javax.net.ssl.trustStorePassword=pwd."), + HMSHANDLERATTEMPTS("hive.hmshandler.retry.attempts", 10, + "The number of times to retry a HMSHandler call if there were a connection error."), + HMSHANDLERINTERVAL("hive.hmshandler.retry.interval", "2000ms", + new TimeValidator(TimeUnit.MILLISECONDS), "The time between HMSHandler retry attempts on failure."), + HMSHANDLERFORCERELOADCONF("hive.hmshandler.force.reload.conf", false, + "Whether to force reloading of the HMSHandler configuration (including\n" + + "the connection URL, before the next metastore query that accesses the\n" + + "datastore. Once reloaded, this value is reset to false. Used for\n" + + "testing only."), + METASTORESERVERMAXMESSAGESIZE("hive.metastore.server.max.message.size", 100*1024*1024, + "Maximum message size in bytes a HMS will accept."), + METASTORESERVERMINTHREADS("hive.metastore.server.min.threads", 200, + "Minimum number of worker threads in the Thrift server's pool."), + METASTORESERVERMAXTHREADS("hive.metastore.server.max.threads", 1000, + "Maximum number of worker threads in the Thrift server's pool."), + METASTORE_TCP_KEEP_ALIVE("hive.metastore.server.tcp.keepalive", true, + "Whether to enable TCP keepalive for the metastore server. Keepalive will prevent accumulation of half-open connections."), + + METASTORE_INT_ORIGINAL("hive.metastore.archive.intermediate.original", + "_INTERMEDIATE_ORIGINAL", + "Intermediate dir suffixes used for archiving. Not important what they\n" + + "are, as long as collisions are avoided"), + METASTORE_INT_ARCHIVED("hive.metastore.archive.intermediate.archived", + "_INTERMEDIATE_ARCHIVED", ""), + METASTORE_INT_EXTRACTED("hive.metastore.archive.intermediate.extracted", + "_INTERMEDIATE_EXTRACTED", ""), + METASTORE_KERBEROS_KEYTAB_FILE("hive.metastore.kerberos.keytab.file", "", + "The path to the Kerberos Keytab file containing the metastore Thrift server's service principal."), + METASTORE_KERBEROS_PRINCIPAL("hive.metastore.kerberos.principal", + "hive-metastore/_HOST@EXAMPLE.COM", + "The service principal for the metastore Thrift server. \n" + + "The special string _HOST will be replaced automatically with the correct host name."), + METASTORE_USE_THRIFT_SASL("hive.metastore.sasl.enabled", false, + "If true, the metastore Thrift interface will be secured with SASL. Clients must authenticate with Kerberos."), + METASTORE_USE_THRIFT_FRAMED_TRANSPORT("hive.metastore.thrift.framed.transport.enabled", false, + "If true, the metastore Thrift interface will use TFramedTransport. When false (default) a standard TTransport is used."), + METASTORE_USE_THRIFT_COMPACT_PROTOCOL("hive.metastore.thrift.compact.protocol.enabled", false, + "If true, the metastore Thrift interface will use TCompactProtocol. When false (default) TBinaryProtocol will be used.\n" + + "Setting it to true will break compatibility with older clients running TBinaryProtocol."), + METASTORE_CLUSTER_DELEGATION_TOKEN_STORE_CLS("hive.cluster.delegation.token.store.class", + "org.apache.hadoop.hive.thrift.MemoryTokenStore", + "The delegation token store implementation. Set to org.apache.hadoop.hive.thrift.ZooKeeperTokenStore for load-balanced cluster."), + METASTORE_CLUSTER_DELEGATION_TOKEN_STORE_ZK_CONNECTSTR( + "hive.cluster.delegation.token.store.zookeeper.connectString", "", + "The ZooKeeper token store connect string. You can re-use the configuration value\n" + + "set in hive.zookeeper.quorum, by leaving this parameter unset."), + METASTORE_CLUSTER_DELEGATION_TOKEN_STORE_ZK_ZNODE( + "hive.cluster.delegation.token.store.zookeeper.znode", "/hivedelegation", + "The root path for token store data. Note that this is used by both HiveServer2 and\n" + + "MetaStore to store delegation Token. One directory gets created for each of them.\n" + + "The final directory names would have the servername appended to it (HIVESERVER2,\n" + + "METASTORE)."), + METASTORE_CLUSTER_DELEGATION_TOKEN_STORE_ZK_ACL( + "hive.cluster.delegation.token.store.zookeeper.acl", "", + "ACL for token store entries. Comma separated list of ACL entries. For example:\n" + + "sasl:hive/host1@MY.DOMAIN:cdrwa,sasl:hive/host2@MY.DOMAIN:cdrwa\n" + + "Defaults to all permissions for the hiveserver2/metastore process user."), + METASTORE_CACHE_PINOBJTYPES("hive.metastore.cache.pinobjtypes", "Table,StorageDescriptor,SerDeInfo,Partition,Database,Type,FieldSchema,Order", + "List of comma separated metastore object types that should be pinned in the cache"), + METASTORE_CONNECTION_POOLING_TYPE("datanucleus.connectionPoolingType", "BONECP", + "Specify connection pool library for datanucleus"), + METASTORE_VALIDATE_TABLES("datanucleus.schema.validateTables", false, + "validates existing schema against code. turn this on if you want to verify existing schema"), + METASTORE_VALIDATE_COLUMNS("datanucleus.schema.validateColumns", false, + "validates existing schema against code. turn this on if you want to verify existing schema"), + METASTORE_VALIDATE_CONSTRAINTS("datanucleus.schema.validateConstraints", false, + "validates existing schema against code. turn this on if you want to verify existing schema"), + METASTORE_STORE_MANAGER_TYPE("datanucleus.storeManagerType", "rdbms", "metadata store type"), + METASTORE_AUTO_CREATE_ALL("datanucleus.schema.autoCreateAll", false, + "creates necessary schema on a startup if one doesn't exist. set this to false, after creating it once"), + METASTORE_SCHEMA_VERIFICATION("hive.metastore.schema.verification", false, + "Enforce metastore schema version consistency.\n" + + "True: Verify that version information stored in metastore matches with one from Hive jars. Also disable automatic\n" + + " schema migration attempt. Users are required to manually migrate schema after Hive upgrade which ensures\n" + + " proper metastore schema migration. (Default)\n" + + "False: Warn if the version information stored in metastore doesn't match with one from in Hive jars."), + METASTORE_SCHEMA_VERIFICATION_RECORD_VERSION("hive.metastore.schema.verification.record.version", true, + "When true the current MS version is recorded in the VERSION table. If this is disabled and verification is\n" + + " enabled the MS will be unusable."), + METASTORE_AUTO_START_MECHANISM_MODE("datanucleus.autoStartMechanismMode", "checked", + "throw exception if metadata tables are incorrect"), + METASTORE_TRANSACTION_ISOLATION("datanucleus.transactionIsolation", "read-committed", + "Default transaction isolation level for identity generation."), + METASTORE_CACHE_LEVEL2("datanucleus.cache.level2", false, + "Use a level 2 cache. Turn this off if metadata is changed independently of Hive metastore server"), + METASTORE_CACHE_LEVEL2_TYPE("datanucleus.cache.level2.type", "none", ""), + METASTORE_IDENTIFIER_FACTORY("datanucleus.identifierFactory", "datanucleus1", + "Name of the identifier factory to use when generating table/column names etc. \n" + + "'datanucleus1' is used for backward compatibility with DataNucleus v1"), + METASTORE_USE_LEGACY_VALUE_STRATEGY("datanucleus.rdbms.useLegacyNativeValueStrategy", true, ""), + METASTORE_PLUGIN_REGISTRY_BUNDLE_CHECK("datanucleus.plugin.pluginRegistryBundleCheck", "LOG", + "Defines what happens when plugin bundles are found and are duplicated [EXCEPTION|LOG|NONE]"), + METASTORE_BATCH_RETRIEVE_MAX("hive.metastore.batch.retrieve.max", 300, + "Maximum number of objects (tables/partitions) can be retrieved from metastore in one batch. \n" + + "The higher the number, the less the number of round trips is needed to the Hive metastore server, \n" + + "but it may also cause higher memory requirement at the client side."), + METASTORE_BATCH_RETRIEVE_OBJECTS_MAX( + "hive.metastore.batch.retrieve.table.partition.max", 1000, + "Maximum number of objects that metastore internally retrieves in one batch."), + + METASTORE_INIT_HOOKS("hive.metastore.init.hooks", "", + "A comma separated list of hooks to be invoked at the beginning of HMSHandler initialization. \n" + + "An init hook is specified as the name of Java class which extends org.apache.hadoop.hive.metastore.MetaStoreInitListener."), + METASTORE_PRE_EVENT_LISTENERS("hive.metastore.pre.event.listeners", "", + "List of comma separated listeners for metastore events."), + METASTORE_EVENT_LISTENERS("hive.metastore.event.listeners", "", ""), + METASTORE_EVENT_DB_LISTENER_TTL("hive.metastore.event.db.listener.timetolive", "86400s", + new TimeValidator(TimeUnit.SECONDS), + "time after which events will be removed from the database listener queue"), + METASTORE_AUTHORIZATION_STORAGE_AUTH_CHECKS("hive.metastore.authorization.storage.checks", false, + "Should the metastore do authorization checks against the underlying storage (usually hdfs) \n" + + "for operations like drop-partition (disallow the drop-partition if the user in\n" + + "question doesn't have permissions to delete the corresponding directory\n" + + "on the storage)."), + METASTORE_EVENT_CLEAN_FREQ("hive.metastore.event.clean.freq", "0s", + new TimeValidator(TimeUnit.SECONDS), + "Frequency at which timer task runs to purge expired events in metastore."), + METASTORE_EVENT_EXPIRY_DURATION("hive.metastore.event.expiry.duration", "0s", + new TimeValidator(TimeUnit.SECONDS), + "Duration after which events expire from events table"), + METASTORE_EXECUTE_SET_UGI("hive.metastore.execute.setugi", true, + "In unsecure mode, setting this property to true will cause the metastore to execute DFS operations using \n" + + "the client's reported user and group permissions. Note that this property must be set on \n" + + "both the client and server sides. Further note that its best effort. \n" + + "If client sets its to true and server sets it to false, client setting will be ignored."), + METASTORE_PARTITION_NAME_WHITELIST_PATTERN("hive.metastore.partition.name.whitelist.pattern", "", + "Partition names will be checked against this regex pattern and rejected if not matched."), + + METASTORE_INTEGER_JDO_PUSHDOWN("hive.metastore.integral.jdo.pushdown", false, + "Allow JDO query pushdown for integral partition columns in metastore. Off by default. This\n" + + "improves metastore perf for integral columns, especially if there's a large number of partitions.\n" + + "However, it doesn't work correctly with integral values that are not normalized (e.g. have\n" + + "leading zeroes, like 0012). If metastore direct SQL is enabled and works, this optimization\n" + + "is also irrelevant."), + METASTORE_TRY_DIRECT_SQL("hive.metastore.try.direct.sql", true, + "Whether the Hive metastore should try to use direct SQL queries instead of the\n" + + "DataNucleus for certain read paths. This can improve metastore performance when\n" + + "fetching many partitions or column statistics by orders of magnitude; however, it\n" + + "is not guaranteed to work on all RDBMS-es and all versions. In case of SQL failures,\n" + + "the metastore will fall back to the DataNucleus, so it's safe even if SQL doesn't\n" + + "work for all queries on your datastore. If all SQL queries fail (for example, your\n" + + "metastore is backed by MongoDB), you might want to disable this to save the\n" + + "try-and-fall-back cost."), + METASTORE_DIRECT_SQL_PARTITION_BATCH_SIZE("hive.metastore.direct.sql.batch.size", 0, + "Batch size for partition and other object retrieval from the underlying DB in direct\n" + + "SQL. For some DBs like Oracle and MSSQL, there are hardcoded or perf-based limitations\n" + + "that necessitate this. For DBs that can handle the queries, this isn't necessary and\n" + + "may impede performance. -1 means no batching, 0 means automatic batching."), + METASTORE_TRY_DIRECT_SQL_DDL("hive.metastore.try.direct.sql.ddl", true, + "Same as hive.metastore.try.direct.sql, for read statements within a transaction that\n" + + "modifies metastore data. Due to non-standard behavior in Postgres, if a direct SQL\n" + + "select query has incorrect syntax or something similar inside a transaction, the\n" + + "entire transaction will fail and fall-back to DataNucleus will not be possible. You\n" + + "should disable the usage of direct SQL inside transactions if that happens in your case."), + METASTORE_DIRECT_SQL_MAX_QUERY_LENGTH("hive.direct.sql.max.query.length", 100, "The maximum\n" + + " size of a query string (in KB)."), + METASTORE_DIRECT_SQL_MAX_ELEMENTS_IN_CLAUSE("hive.direct.sql.max.elements.in.clause", 1000, + "The maximum number of values in a IN clause. Once exceeded, it will be broken into\n" + + " multiple OR separated IN clauses."), + METASTORE_DIRECT_SQL_MAX_ELEMENTS_VALUES_CLAUSE("hive.direct.sql.max.elements.values.clause", + 1000, "The maximum number of values in a VALUES clause for INSERT statement."), + METASTORE_ORM_RETRIEVE_MAPNULLS_AS_EMPTY_STRINGS("hive.metastore.orm.retrieveMapNullsAsEmptyStrings",false, + "Thrift does not support nulls in maps, so any nulls present in maps retrieved from ORM must " + + "either be pruned or converted to empty strings. Some backing dbs such as Oracle persist empty strings " + + "as nulls, so we should set this parameter if we wish to reverse that behaviour. For others, " + + "pruning is the correct behaviour"), + METASTORE_DISALLOW_INCOMPATIBLE_COL_TYPE_CHANGES( + "hive.metastore.disallow.incompatible.col.type.changes", true, + "If true (default is false), ALTER TABLE operations which change the type of a\n" + + "column (say STRING) to an incompatible type (say MAP) are disallowed.\n" + + "RCFile default SerDe (ColumnarSerDe) serializes the values in such a way that the\n" + + "datatypes can be converted from string to any type. The map is also serialized as\n" + + "a string, which can be read as a string as well. However, with any binary\n" + + "serialization, this is not true. Blocking the ALTER TABLE prevents ClassCastExceptions\n" + + "when subsequently trying to access old partitions.\n" + + "\n" + + "Primitive types like INT, STRING, BIGINT, etc., are compatible with each other and are\n" + + "not blocked.\n" + + "\n" + + "See HIVE-4409 for more details."), + + NEWTABLEDEFAULTPARA("hive.table.parameters.default", "", + "Default property values for newly created tables"), + DDL_CTL_PARAMETERS_WHITELIST("hive.ddl.createtablelike.properties.whitelist", "", + "Table Properties to copy over when executing a Create Table Like."), + METASTORE_RAW_STORE_IMPL("hive.metastore.rawstore.impl", "org.apache.hadoop.hive.metastore.ObjectStore", + "Name of the class that implements org.apache.hadoop.hive.metastore.rawstore interface. \n" + + "This class is used to store and retrieval of raw metadata objects such as table, database"), + METASTORE_TXN_STORE_IMPL("hive.metastore.txn.store.impl", + "org.apache.hadoop.hive.metastore.txn.CompactionTxnHandler", + "Name of class that implements org.apache.hadoop.hive.metastore.txn.TxnStore. This " + + "class is used to store and retrieve transactions and locks"), + METASTORE_CONNECTION_DRIVER("javax.jdo.option.ConnectionDriverName", "org.apache.derby.jdbc.EmbeddedDriver", + "Driver class name for a JDBC metastore"), + METASTORE_MANAGER_FACTORY_CLASS("javax.jdo.PersistenceManagerFactoryClass", + "org.datanucleus.api.jdo.JDOPersistenceManagerFactory", + "class implementing the jdo persistence"), + METASTORE_EXPRESSION_PROXY_CLASS("hive.metastore.expression.proxy", + "org.apache.hadoop.hive.ql.optimizer.ppr.PartitionExpressionForMetastore", ""), + METASTORE_DETACH_ALL_ON_COMMIT("javax.jdo.option.DetachAllOnCommit", true, + "Detaches all objects from session so that they can be used after transaction is committed"), + METASTORE_NON_TRANSACTIONAL_READ("javax.jdo.option.NonTransactionalRead", true, + "Reads outside of transactions"), + METASTORE_CONNECTION_USER_NAME("javax.jdo.option.ConnectionUserName", "APP", + "Username to use against metastore database"), + METASTORE_END_FUNCTION_LISTENERS("hive.metastore.end.function.listeners", "", + "List of comma separated listeners for the end of metastore functions."), + METASTORE_PART_INHERIT_TBL_PROPS("hive.metastore.partition.inherit.table.properties", "", + "List of comma separated keys occurring in table properties which will get inherited to newly created partitions. \n" + + "* implies all the keys will get inherited."), + METASTORE_FILTER_HOOK("hive.metastore.filter.hook", "org.apache.hadoop.hive.metastore.DefaultMetaStoreFilterHookImpl", + "Metastore hook class for filtering the metadata read results. If hive.security.authorization.manager" + + "is set to instance of HiveAuthorizerFactory, then this value is ignored."), + FIRE_EVENTS_FOR_DML("hive.metastore.dml.events", false, "If true, the metastore will be asked" + + " to fire events for DML operations"), + METASTORE_CLIENT_DROP_PARTITIONS_WITH_EXPRESSIONS("hive.metastore.client.drop.partitions.using.expressions", true, + "Choose whether dropping partitions with HCatClient pushes the partition-predicate to the metastore, " + + "or drops partitions iteratively"), + + METASTORE_AGGREGATE_STATS_CACHE_ENABLED("hive.metastore.aggregate.stats.cache.enabled", true, + "Whether aggregate stats caching is enabled or not."), + METASTORE_AGGREGATE_STATS_CACHE_SIZE("hive.metastore.aggregate.stats.cache.size", 10000, + "Maximum number of aggregate stats nodes that we will place in the metastore aggregate stats cache."), + METASTORE_AGGREGATE_STATS_CACHE_MAX_PARTITIONS("hive.metastore.aggregate.stats.cache.max.partitions", 10000, + "Maximum number of partitions that are aggregated per cache node."), + METASTORE_AGGREGATE_STATS_CACHE_FPP("hive.metastore.aggregate.stats.cache.fpp", (float) 0.01, + "Maximum false positive probability for the Bloom Filter used in each aggregate stats cache node (default 1%)."), + METASTORE_AGGREGATE_STATS_CACHE_MAX_VARIANCE("hive.metastore.aggregate.stats.cache.max.variance", (float) 0.01, + "Maximum tolerable variance in number of partitions between a cached node and our request (default 1%)."), + METASTORE_AGGREGATE_STATS_CACHE_TTL("hive.metastore.aggregate.stats.cache.ttl", "600s", new TimeValidator(TimeUnit.SECONDS), + "Number of seconds for a cached node to be active in the cache before they become stale."), + METASTORE_AGGREGATE_STATS_CACHE_MAX_WRITER_WAIT("hive.metastore.aggregate.stats.cache.max.writer.wait", "5000ms", + new TimeValidator(TimeUnit.MILLISECONDS), + "Number of milliseconds a writer will wait to acquire the writelock before giving up."), + METASTORE_AGGREGATE_STATS_CACHE_MAX_READER_WAIT("hive.metastore.aggregate.stats.cache.max.reader.wait", "1000ms", + new TimeValidator(TimeUnit.MILLISECONDS), + "Number of milliseconds a reader will wait to acquire the readlock before giving up."), + METASTORE_AGGREGATE_STATS_CACHE_MAX_FULL("hive.metastore.aggregate.stats.cache.max.full", (float) 0.9, + "Maximum cache full % after which the cache cleaner thread kicks in."), + METASTORE_AGGREGATE_STATS_CACHE_CLEAN_UNTIL("hive.metastore.aggregate.stats.cache.clean.until", (float) 0.8, + "The cleaner thread cleans until cache reaches this % full size."), + METASTORE_METRICS("hive.metastore.metrics.enabled", false, "Enable metrics on the metastore."), + METASTORE_INIT_METADATA_COUNT_ENABLED("hive.metastore.initial.metadata.count.enabled", true, + "Enable a metadata count at metastore startup for metrics."), + + // Parameters for exporting metadata on table drop (requires the use of the) + // org.apache.hadoop.hive.ql.parse.MetaDataExportListener preevent listener + METADATA_EXPORT_LOCATION("hive.metadata.export.location", "", + "When used in conjunction with the org.apache.hadoop.hive.ql.parse.MetaDataExportListener pre event listener, \n" + + "it is the location to which the metadata will be exported. The default is an empty string, which results in the \n" + + "metadata being exported to the current user's home directory on HDFS."), + MOVE_EXPORTED_METADATA_TO_TRASH("hive.metadata.move.exported.metadata.to.trash", true, + "When used in conjunction with the org.apache.hadoop.hive.ql.parse.MetaDataExportListener pre event listener, \n" + + "this setting determines if the metadata that is exported will subsequently be moved to the user's trash directory \n" + + "alongside the dropped table data. This ensures that the metadata will be cleaned up along with the dropped table data."), + + // CLI + CLIIGNOREERRORS("hive.cli.errors.ignore", false, ""), + CLIPRINTCURRENTDB("hive.cli.print.current.db", false, + "Whether to include the current database in the Hive prompt."), + CLIPROMPT("hive.cli.prompt", "hive", + "Command line prompt configuration value. Other hiveconf can be used in this configuration value. \n" + + "Variable substitution will only be invoked at the Hive CLI startup."), + CLIPRETTYOUTPUTNUMCOLS("hive.cli.pretty.output.num.cols", -1, + "The number of columns to use when formatting output generated by the DESCRIBE PRETTY table_name command.\n" + + "If the value of this property is -1, then Hive will use the auto-detected terminal width."), + + HIVE_METASTORE_FS_HANDLER_CLS("hive.metastore.fs.handler.class", "org.apache.hadoop.hive.metastore.HiveMetaStoreFsImpl", ""), + + // Things we log in the jobconf + + // session identifier + HIVESESSIONID("hive.session.id", "", ""), + // whether session is running in silent mode or not + HIVESESSIONSILENT("hive.session.silent", false, ""), + + HIVE_SESSION_HISTORY_ENABLED("hive.session.history.enabled", false, + "Whether to log Hive query, query plan, runtime statistics etc."), + + HIVEQUERYSTRING("hive.query.string", "", + "Query being executed (might be multiple per a session)"), + + HIVEQUERYID("hive.query.id", "", + "ID for query being executed (might be multiple per a session)"), + + HIVEJOBNAMELENGTH("hive.jobname.length", 50, "max jobname length"), + + // hive jar + HIVEJAR("hive.jar.path", "", + "The location of hive_cli.jar that is used when submitting jobs in a separate jvm."), + HIVEAUXJARS("hive.aux.jars.path", "", + "The location of the plugin jars that contain implementations of user defined functions and serdes."), + + // reloadable jars + HIVERELOADABLEJARS("hive.reloadable.aux.jars.path", "", + "Jars can be renewed by executing reload command. And these jars can be " + + "used as the auxiliary classes like creating a UDF or SerDe."), + + // hive added files and jars + HIVEADDEDFILES("hive.added.files.path", "", "This an internal parameter."), + HIVEADDEDJARS("hive.added.jars.path", "", "This an internal parameter."), + HIVEADDEDARCHIVES("hive.added.archives.path", "", "This an internal parameter."), + + HIVE_CURRENT_DATABASE("hive.current.database", "", "Database name used by current session. Internal usage only.", true), + + // for hive script operator + HIVES_AUTO_PROGRESS_TIMEOUT("hive.auto.progress.timeout", "0s", + new TimeValidator(TimeUnit.SECONDS), + "How long to run autoprogressor for the script/UDTF operators.\n" + + "Set to 0 for forever."), + HIVESCRIPTAUTOPROGRESS("hive.script.auto.progress", false, + "Whether Hive Transform/Map/Reduce Clause should automatically send progress information to TaskTracker \n" + + "to avoid the task getting killed because of inactivity. Hive sends progress information when the script is \n" + + "outputting to stderr. This option removes the need of periodically producing stderr messages, \n" + + "but users should be cautious because this may prevent infinite loops in the scripts to be killed by TaskTracker."), + HIVESCRIPTIDENVVAR("hive.script.operator.id.env.var", "HIVE_SCRIPT_OPERATOR_ID", + "Name of the environment variable that holds the unique script operator ID in the user's \n" + + "transform function (the custom mapper/reducer that the user has specified in the query)"), + HIVESCRIPTTRUNCATEENV("hive.script.operator.truncate.env", false, + "Truncate each environment variable for external script in scripts operator to 20KB (to fit system limits)"), + HIVESCRIPT_ENV_BLACKLIST("hive.script.operator.env.blacklist", + "hive.txn.valid.txns,hive.script.operator.env.blacklist", + "Comma separated list of keys from the configuration file not to convert to environment " + + "variables when envoking the script operator"), + HIVE_STRICT_CHECKS_LARGE_QUERY("hive.strict.checks.large.query", false, + "Enabling strict large query checks disallows the following:\n" + + " Orderby without limit.\n" + + " No partition being picked up for a query against partitioned table.\n" + + "Note that these checks currently do not consider data size, only the query pattern."), + HIVE_STRICT_CHECKS_TYPE_SAFETY("hive.strict.checks.type.safety", true, + "Enabling strict type safety checks disallows the following:\n" + + " Comparing bigints and strings.\n" + + " Comparing bigints and doubles."), + HIVE_STRICT_CHECKS_CARTESIAN("hive.strict.checks.cartesian.product", true, + "Enabling strict large query checks disallows the following:\n" + + " Cartesian product (cross join)."), + @Deprecated + HIVEMAPREDMODE("hive.mapred.mode", "nonstrict", + "Deprecated; use hive.strict.checks.* settings instead."), + HIVEALIAS("hive.alias", "", ""), + HIVEMAPSIDEAGGREGATE("hive.map.aggr", true, "Whether to use map-side aggregation in Hive Group By queries"), + HIVEGROUPBYSKEW("hive.groupby.skewindata", false, "Whether there is skew in data to optimize group by queries"), + HIVEJOINEMITINTERVAL("hive.join.emit.interval", 1000, + "How many rows in the right-most join operand Hive should buffer before emitting the join result."), + HIVEJOINCACHESIZE("hive.join.cache.size", 25000, + "How many rows in the joining tables (except the streaming table) should be cached in memory."), + + // CBO related + HIVE_CBO_ENABLED("hive.cbo.enable", true, "Flag to control enabling Cost Based Optimizations using Calcite framework."), + HIVE_CBO_RETPATH_HIVEOP("hive.cbo.returnpath.hiveop", false, "Flag to control calcite plan to hive operator conversion"), + HIVE_CBO_EXTENDED_COST_MODEL("hive.cbo.costmodel.extended", false, "Flag to control enabling the extended cost model based on" + + "CPU, IO and cardinality. Otherwise, the cost model is based on cardinality."), + HIVE_CBO_COST_MODEL_CPU("hive.cbo.costmodel.cpu", "0.000001", "Default cost of a comparison"), + HIVE_CBO_COST_MODEL_NET("hive.cbo.costmodel.network", "150.0", "Default cost of a transfering a byte over network;" + + " expressed as multiple of CPU cost"), + HIVE_CBO_COST_MODEL_LFS_WRITE("hive.cbo.costmodel.local.fs.write", "4.0", "Default cost of writing a byte to local FS;" + + " expressed as multiple of NETWORK cost"), + HIVE_CBO_COST_MODEL_LFS_READ("hive.cbo.costmodel.local.fs.read", "4.0", "Default cost of reading a byte from local FS;" + + " expressed as multiple of NETWORK cost"), + HIVE_CBO_COST_MODEL_HDFS_WRITE("hive.cbo.costmodel.hdfs.write", "10.0", "Default cost of writing a byte to HDFS;" + + " expressed as multiple of Local FS write cost"), + HIVE_CBO_COST_MODEL_HDFS_READ("hive.cbo.costmodel.hdfs.read", "1.5", "Default cost of reading a byte from HDFS;" + + " expressed as multiple of Local FS read cost"), + AGGR_JOIN_TRANSPOSE("hive.transpose.aggr.join", false, "push aggregates through join"), + + // hive.mapjoin.bucket.cache.size has been replaced by hive.smbjoin.cache.row, + // need to remove by hive .13. Also, do not change default (see SMB operator) + HIVEMAPJOINBUCKETCACHESIZE("hive.mapjoin.bucket.cache.size", 100, ""), + + HIVEMAPJOINUSEOPTIMIZEDTABLE("hive.mapjoin.optimized.hashtable", true, + "Whether Hive should use memory-optimized hash table for MapJoin.\n" + + "Only works on Tez and Spark, because memory-optimized hashtable cannot be serialized."), + HIVEMAPJOINOPTIMIZEDTABLEPROBEPERCENT("hive.mapjoin.optimized.hashtable.probe.percent", + (float) 0.5, "Probing space percentage of the optimized hashtable"), + HIVEUSEHYBRIDGRACEHASHJOIN("hive.mapjoin.hybridgrace.hashtable", true, "Whether to use hybrid" + + "grace hash join as the join method for mapjoin. Tez only."), + HIVEHYBRIDGRACEHASHJOINMEMCHECKFREQ("hive.mapjoin.hybridgrace.memcheckfrequency", 1024, "For " + + "hybrid grace hash join, how often (how many rows apart) we check if memory is full. " + + "This number should be power of 2."), + HIVEHYBRIDGRACEHASHJOINMINWBSIZE("hive.mapjoin.hybridgrace.minwbsize", 524288, "For hybrid grace" + + "Hash join, the minimum write buffer size used by optimized hashtable. Default is 512 KB."), + HIVEHYBRIDGRACEHASHJOINMINNUMPARTITIONS("hive.mapjoin.hybridgrace.minnumpartitions", 16, "For" + + "Hybrid grace hash join, the minimum number of partitions to create."), + HIVEHASHTABLEWBSIZE("hive.mapjoin.optimized.hashtable.wbsize", 8 * 1024 * 1024, + "Optimized hashtable (see hive.mapjoin.optimized.hashtable) uses a chain of buffers to\n" + + "store data. This is one buffer size. HT may be slightly faster if this is larger, but for small\n" + + "joins unnecessary memory will be allocated and then trimmed."), + + HIVESMBJOINCACHEROWS("hive.smbjoin.cache.rows", 10000, + "How many rows with the same key value should be cached in memory per smb joined table."), + HIVEGROUPBYMAPINTERVAL("hive.groupby.mapaggr.checkinterval", 100000, + "Number of rows after which size of the grouping keys/aggregation classes is performed"), + HIVEMAPAGGRHASHMEMORY("hive.map.aggr.hash.percentmemory", (float) 0.5, + "Portion of total memory to be used by map-side group aggregation hash table"), + HIVEMAPJOINFOLLOWEDBYMAPAGGRHASHMEMORY("hive.mapjoin.followby.map.aggr.hash.percentmemory", (float) 0.3, + "Portion of total memory to be used by map-side group aggregation hash table, when this group by is followed by map join"), + HIVEMAPAGGRMEMORYTHRESHOLD("hive.map.aggr.hash.force.flush.memory.threshold", (float) 0.9, + "The max memory to be used by map-side group aggregation hash table.\n" + + "If the memory usage is higher than this number, force to flush data"), + HIVEMAPAGGRHASHMINREDUCTION("hive.map.aggr.hash.min.reduction", (float) 0.5, + "Hash aggregation will be turned off if the ratio between hash table size and input rows is bigger than this number. \n" + + "Set to 1 to make sure hash aggregation is never turned off."), + HIVEMULTIGROUPBYSINGLEREDUCER("hive.multigroupby.singlereducer", true, + "Whether to optimize multi group by query to generate single M/R job plan. If the multi group by query has \n" + + "common group by keys, it will be optimized to generate single M/R job."), + HIVE_MAP_GROUPBY_SORT("hive.map.groupby.sorted", true, + "If the bucketing/sorting properties of the table exactly match the grouping key, whether to perform \n" + + "the group by in the mapper by using BucketizedHiveInputFormat. The only downside to this\n" + + "is that it limits the number of mappers to the number of files."), + HIVE_GROUPBY_ORDERBY_POSITION_ALIAS("hive.groupby.orderby.position.alias", false, + "Whether to enable using Column Position Alias in Group By or Order By"), + HIVE_NEW_JOB_GROUPING_SET_CARDINALITY("hive.new.job.grouping.set.cardinality", 30, + "Whether a new map-reduce job should be launched for grouping sets/rollups/cubes.\n" + + "For a query like: select a, b, c, count(1) from T group by a, b, c with rollup;\n" + + "4 rows are created per row: (a, b, c), (a, b, null), (a, null, null), (null, null, null).\n" + + "This can lead to explosion across map-reduce boundary if the cardinality of T is very high,\n" + + "and map-side aggregation does not do a very good job. \n" + + "\n" + + "This parameter decides if Hive should add an additional map-reduce job. If the grouping set\n" + + "cardinality (4 in the example above), is more than this value, a new MR job is added under the\n" + + "assumption that the original group by will reduce the data size."), + + // Max filesize used to do a single copy (after that, distcp is used) + HIVE_EXEC_COPYFILE_MAXSIZE("hive.exec.copyfile.maxsize", 32L * 1024 * 1024 /*32M*/, + "Maximum file size (in Mb) that Hive uses to do single HDFS copies between directories." + + "Distributed copies (distcp) will be used instead for bigger files so that copies can be done faster."), + + // for hive udtf operator + HIVEUDTFAUTOPROGRESS("hive.udtf.auto.progress", false, + "Whether Hive should automatically send progress information to TaskTracker \n" + + "when using UDTF's to prevent the task getting killed because of inactivity. Users should be cautious \n" + + "because this may prevent TaskTracker from killing tasks with infinite loops."), + + HIVEDEFAULTFILEFORMAT("hive.default.fileformat", "TextFile", new StringSet("TextFile", "SequenceFile", "RCfile", "ORC"), + "Default file format for CREATE TABLE statement. Users can explicitly override it by CREATE TABLE ... STORED AS [FORMAT]"), + HIVEDEFAULTMANAGEDFILEFORMAT("hive.default.fileformat.managed", "none", + new StringSet("none", "TextFile", "SequenceFile", "RCfile", "ORC"), + "Default file format for CREATE TABLE statement applied to managed tables only. External tables will be \n" + + "created with format specified by hive.default.fileformat. Leaving this null will result in using hive.default.fileformat \n" + + "for all tables."), + HIVEQUERYRESULTFILEFORMAT("hive.query.result.fileformat", "SequenceFile", new StringSet("TextFile", "SequenceFile", "RCfile"), + "Default file format for storing result of the query."), + HIVECHECKFILEFORMAT("hive.fileformat.check", true, "Whether to check file format or not when loading data files"), + + // default serde for rcfile + HIVEDEFAULTRCFILESERDE("hive.default.rcfile.serde", + "org.apache.hadoop.hive.serde2.columnar.LazyBinaryColumnarSerDe", + "The default SerDe Hive will use for the RCFile format"), + + HIVEDEFAULTSERDE("hive.default.serde", + "org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe", + "The default SerDe Hive will use for storage formats that do not specify a SerDe."), + + SERDESUSINGMETASTOREFORSCHEMA("hive.serdes.using.metastore.for.schema", + "org.apache.hadoop.hive.ql.io.orc.OrcSerde," + + "org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe," + + "org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe," + + "org.apache.hadoop.hive.serde2.dynamic_type.DynamicSerDe," + + "org.apache.hadoop.hive.serde2.MetadataTypedColumnsetSerDe," + + "org.apache.hadoop.hive.serde2.columnar.LazyBinaryColumnarSerDe," + + "org.apache.hadoop.hive.ql.io.parquet.serde.ParquetHiveSerDe," + + "org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe", + "SerDes retrieving schema from metastore. This is an internal parameter."), + + HIVEHISTORYFILELOC("hive.querylog.location", + "${system:java.io.tmpdir}" + File.separator + "${system:user.name}", + "Location of Hive run time structured log file"), + + HIVE_LOG_INCREMENTAL_PLAN_PROGRESS("hive.querylog.enable.plan.progress", true, + "Whether to log the plan's progress every time a job's progress is checked.\n" + + "These logs are written to the location specified by hive.querylog.location"), + + HIVE_LOG_INCREMENTAL_PLAN_PROGRESS_INTERVAL("hive.querylog.plan.progress.interval", "60000ms", + new TimeValidator(TimeUnit.MILLISECONDS), + "The interval to wait between logging the plan's progress.\n" + + "If there is a whole number percentage change in the progress of the mappers or the reducers,\n" + + "the progress is logged regardless of this value.\n" + + "The actual interval will be the ceiling of (this value divided by the value of\n" + + "hive.exec.counters.pull.interval) multiplied by the value of hive.exec.counters.pull.interval\n" + + "I.e. if it is not divide evenly by the value of hive.exec.counters.pull.interval it will be\n" + + "logged less frequently than specified.\n" + + "This only has an effect if hive.querylog.enable.plan.progress is set to true."), + + HIVESCRIPTSERDE("hive.script.serde", "org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe", + "The default SerDe for transmitting input data to and reading output data from the user scripts. "), + HIVESCRIPTRECORDREADER("hive.script.recordreader", + "org.apache.hadoop.hive.ql.exec.TextRecordReader", + "The default record reader for reading data from the user scripts. "), + HIVESCRIPTRECORDWRITER("hive.script.recordwriter", + "org.apache.hadoop.hive.ql.exec.TextRecordWriter", + "The default record writer for writing data to the user scripts. "), + HIVESCRIPTESCAPE("hive.transform.escape.input", false, + "This adds an option to escape special chars (newlines, carriage returns and\n" + + "tabs) when they are passed to the user script. This is useful if the Hive tables\n" + + "can contain data that contains special characters."), + HIVEBINARYRECORDMAX("hive.binary.record.max.length", 1000, + "Read from a binary stream and treat each hive.binary.record.max.length bytes as a record. \n" + + "The last record before the end of stream can have less than hive.binary.record.max.length bytes"), + + // HWI + HIVEHWILISTENHOST("hive.hwi.listen.host", "0.0.0.0", "This is the host address the Hive Web Interface will listen on"), + HIVEHWILISTENPORT("hive.hwi.listen.port", "9999", "This is the port the Hive Web Interface will listen on"), + HIVEHWIWARFILE("hive.hwi.war.file", "${env:HWI_WAR_FILE}", + "This sets the path to the HWI war file, relative to ${HIVE_HOME}. "), + + HIVEHADOOPMAXMEM("hive.mapred.local.mem", 0, "mapper/reducer memory in local mode"), + + //small table file size + HIVESMALLTABLESFILESIZE("hive.mapjoin.smalltable.filesize", 25000000L, + "The threshold for the input file size of the small tables; if the file size is smaller \n" + + "than this threshold, it will try to convert the common join into map join"), + + + HIVE_SCHEMA_EVOLUTION("hive.exec.schema.evolution", false, + "Use schema evolution to convert self-describing file format's data to the schema desired by the reader."), + + HIVE_TRANSACTIONAL_TABLE_SCAN("hive.transactional.table.scan", false, + "internal usage only -- do transaction (ACID) table scan.", true), + + HIVESAMPLERANDOMNUM("hive.sample.seednumber", 0, + "A number used to percentage sampling. By changing this number, user will change the subsets of data sampled."), + + // test mode in hive mode + HIVETESTMODE("hive.test.mode", false, + "Whether Hive is running in test mode. If yes, it turns on sampling and prefixes the output tablename.", + false), + HIVETESTMODEPREFIX("hive.test.mode.prefix", "test_", + "In test mode, specfies prefixes for the output table", false), + HIVETESTMODESAMPLEFREQ("hive.test.mode.samplefreq", 32, + "In test mode, specfies sampling frequency for table, which is not bucketed,\n" + + "For example, the following query:\n" + + " INSERT OVERWRITE TABLE dest SELECT col1 from src\n" + + "would be converted to\n" + + " INSERT OVERWRITE TABLE test_dest\n" + + " SELECT col1 from src TABLESAMPLE (BUCKET 1 out of 32 on rand(1))", false), + HIVETESTMODENOSAMPLE("hive.test.mode.nosamplelist", "", + "In test mode, specifies comma separated table names which would not apply sampling", false), + HIVETESTMODEDUMMYSTATAGGR("hive.test.dummystats.aggregator", "", "internal variable for test", false), + HIVETESTMODEDUMMYSTATPUB("hive.test.dummystats.publisher", "", "internal variable for test", false), + HIVETESTCURRENTTIMESTAMP("hive.test.currenttimestamp", null, "current timestamp for test", false), + HIVETESTMODEROLLBACKTXN("hive.test.rollbacktxn", false, "For testing only. Will mark every ACID transaction aborted", false), + HIVETESTMODEFAILCOMPACTION("hive.test.fail.compaction", false, "For testing only. Will cause CompactorMR to fail.", false), + + HIVEMERGEMAPFILES("hive.merge.mapfiles", true, + "Merge small files at the end of a map-only job"), + HIVEMERGEMAPREDFILES("hive.merge.mapredfiles", false, + "Merge small files at the end of a map-reduce job"), + HIVEMERGETEZFILES("hive.merge.tezfiles", false, "Merge small files at the end of a Tez DAG"), + HIVEMERGESPARKFILES("hive.merge.sparkfiles", false, "Merge small files at the end of a Spark DAG Transformation"), + HIVEMERGEMAPFILESSIZE("hive.merge.size.per.task", (long) (256 * 1000 * 1000), + "Size of merged files at the end of the job"), + HIVEMERGEMAPFILESAVGSIZE("hive.merge.smallfiles.avgsize", (long) (16 * 1000 * 1000), + "When the average output file size of a job is less than this number, Hive will start an additional \n" + + "map-reduce job to merge the output files into bigger files. This is only done for map-only jobs \n" + + "if hive.merge.mapfiles is true, and for map-reduce jobs if hive.merge.mapredfiles is true."), + HIVEMERGERCFILEBLOCKLEVEL("hive.merge.rcfile.block.level", true, ""), + HIVEMERGEORCFILESTRIPELEVEL("hive.merge.orcfile.stripe.level", true, + "When hive.merge.mapfiles, hive.merge.mapredfiles or hive.merge.tezfiles is enabled\n" + + "while writing a table with ORC file format, enabling this config will do stripe-level\n" + + "fast merge for small ORC files. Note that enabling this config will not honor the\n" + + "padding tolerance config (hive.exec.orc.block.padding.tolerance)."), + + HIVEUSEEXPLICITRCFILEHEADER("hive.exec.rcfile.use.explicit.header", true, + "If this is set the header for RCFiles will simply be RCF. If this is not\n" + + "set the header will be that borrowed from sequence files, e.g. SEQ- followed\n" + + "by the input and output RCFile formats."), + HIVEUSERCFILESYNCCACHE("hive.exec.rcfile.use.sync.cache", true, ""), + + HIVE_RCFILE_RECORD_INTERVAL("hive.io.rcfile.record.interval", Integer.MAX_VALUE, ""), + HIVE_RCFILE_COLUMN_NUMBER_CONF("hive.io.rcfile.column.number.conf", 0, ""), + HIVE_RCFILE_TOLERATE_CORRUPTIONS("hive.io.rcfile.tolerate.corruptions", false, ""), + HIVE_RCFILE_RECORD_BUFFER_SIZE("hive.io.rcfile.record.buffer.size", 4194304, ""), // 4M + + PARQUET_MEMORY_POOL_RATIO("parquet.memory.pool.ratio", 0.5f, + "Maximum fraction of heap that can be used by Parquet file writers in one task.\n" + + "It is for avoiding OutOfMemory error in tasks. Work with Parquet 1.6.0 and above.\n" + + "This config parameter is defined in Parquet, so that it does not start with 'hive.'."), + HIVE_PARQUET_TIMESTAMP_SKIP_CONVERSION("hive.parquet.timestamp.skip.conversion", true, + "Current Hive implementation of parquet stores timestamps to UTC, this flag allows skipping of the conversion" + + "on reading parquet files from other tools"), + HIVE_INT_TIMESTAMP_CONVERSION_IN_SECONDS("hive.int.timestamp.conversion.in.seconds", false, + "Boolean/tinyint/smallint/int/bigint value is interpreted as milliseconds during the timestamp conversion.\n" + + "Set this flag to true to interpret the value as seconds to be consistent with float/double." ), + HIVE_ORC_FILE_MEMORY_POOL("hive.exec.orc.memory.pool", 0.5f, + "Maximum fraction of heap that can be used by ORC file writers"), + HIVE_ORC_WRITE_FORMAT("hive.exec.orc.write.format", null, + "Define the version of the file to write. Possible values are 0.11 and 0.12.\n" + + "If this parameter is not defined, ORC will use the run length encoding (RLE)\n" + + "introduced in Hive 0.12. Any value other than 0.11 results in the 0.12 encoding."), + HIVE_ORC_DEFAULT_STRIPE_SIZE("hive.exec.orc.default.stripe.size", + 64L * 1024 * 1024, + "Define the default ORC stripe size, in bytes."), + HIVE_ORC_DEFAULT_BLOCK_SIZE("hive.exec.orc.default.block.size", 256L * 1024 * 1024, + "Define the default file system block size for ORC files."), + + HIVE_ORC_DICTIONARY_KEY_SIZE_THRESHOLD("hive.exec.orc.dictionary.key.size.threshold", 0.8f, + "If the number of keys in a dictionary is greater than this fraction of the total number of\n" + + "non-null rows, turn off dictionary encoding. Use 1 to always use dictionary encoding."), + HIVE_ORC_DEFAULT_ROW_INDEX_STRIDE("hive.exec.orc.default.row.index.stride", 10000, + "Define the default ORC index stride in number of rows. (Stride is the number of rows\n" + + "an index entry represents.)"), + HIVE_ORC_ROW_INDEX_STRIDE_DICTIONARY_CHECK("hive.orc.row.index.stride.dictionary.check", true, + "If enabled dictionary check will happen after first row index stride (default 10000 rows)\n" + + "else dictionary check will happen before writing first stripe. In both cases, the decision\n" + + "to use dictionary or not will be retained thereafter."), + HIVE_ORC_DEFAULT_BUFFER_SIZE("hive.exec.orc.default.buffer.size", 256 * 1024, + "Define the default ORC buffer size, in bytes."), + HIVE_ORC_DEFAULT_BLOCK_PADDING("hive.exec.orc.default.block.padding", true, + "Define the default block padding, which pads stripes to the HDFS block boundaries."), + HIVE_ORC_BLOCK_PADDING_TOLERANCE("hive.exec.orc.block.padding.tolerance", 0.05f, + "Define the tolerance for block padding as a decimal fraction of stripe size (for\n" + + "example, the default value 0.05 is 5% of the stripe size). For the defaults of 64Mb\n" + + "ORC stripe and 256Mb HDFS blocks, the default block padding tolerance of 5% will\n" + + "reserve a maximum of 3.2Mb for padding within the 256Mb block. In that case, if the\n" + + "available size within the block is more than 3.2Mb, a new smaller stripe will be\n" + + "inserted to fit within that space. This will make sure that no stripe written will\n" + + "cross block boundaries and cause remote reads within a node local task."), + HIVE_ORC_DEFAULT_COMPRESS("hive.exec.orc.default.compress", "ZLIB", "Define the default compression codec for ORC file"), + + HIVE_ORC_ENCODING_STRATEGY("hive.exec.orc.encoding.strategy", "SPEED", new StringSet("SPEED", "COMPRESSION"), + "Define the encoding strategy to use while writing data. Changing this will\n" + + "only affect the light weight encoding for integers. This flag will not\n" + + "change the compression level of higher level compression codec (like ZLIB)."), + + HIVE_ORC_COMPRESSION_STRATEGY("hive.exec.orc.compression.strategy", "SPEED", new StringSet("SPEED", "COMPRESSION"), + "Define the compression strategy to use while writing data. \n" + + "This changes the compression level of higher level compression codec (like ZLIB)."), + + HIVE_ORC_SPLIT_STRATEGY("hive.exec.orc.split.strategy", "HYBRID", new StringSet("HYBRID", "BI", "ETL"), + "This is not a user level config. BI strategy is used when the requirement is to spend less time in split generation" + + " as opposed to query execution (split generation does not read or cache file footers)." + + " ETL strategy is used when spending little more time in split generation is acceptable" + + " (split generation reads and caches file footers). HYBRID chooses between the above strategies" + + " based on heuristics."), + + HIVE_ORC_MS_FOOTER_CACHE_ENABLED("hive.orc.splits.ms.footer.cache.enabled", false, + "Whether to enable using file metadata cache in metastore for ORC file footers."), + HIVE_ORC_MS_FOOTER_CACHE_PPD("hive.orc.splits.ms.footer.cache.ppd.enabled", true, + "Whether to enable file footer cache PPD (hive.orc.splits.ms.footer.cache.enabled\n" + + "must also be set to true for this to work)."), + + HIVE_ORC_INCLUDE_FILE_FOOTER_IN_SPLITS("hive.orc.splits.include.file.footer", false, + "If turned on splits generated by orc will include metadata about the stripes in the file. This\n" + + "data is read remotely (from the client or HS2 machine) and sent to all the tasks."), + HIVE_ORC_SPLIT_DIRECTORY_BATCH_MS("hive.orc.splits.directory.batch.ms", 0, + "How long, in ms, to wait to batch input directories for processing during ORC split\n" + + "generation. 0 means process directories individually. This can increase the number of\n" + + "metastore calls if metastore metadata cache is used."), + HIVE_ORC_INCLUDE_FILE_ID_IN_SPLITS("hive.orc.splits.include.fileid", true, + "Include file ID in splits on file systems that support it."), + HIVE_ORC_ALLOW_SYNTHETIC_FILE_ID_IN_SPLITS("hive.orc.splits.allow.synthetic.fileid", true, + "Allow synthetic file ID in splits on file systems that don't have a native one."), + HIVE_ORC_CACHE_STRIPE_DETAILS_SIZE("hive.orc.cache.stripe.details.size", 10000, + "Max cache size for keeping meta info about orc splits cached in the client."), + HIVE_ORC_COMPUTE_SPLITS_NUM_THREADS("hive.orc.compute.splits.num.threads", 10, + "How many threads orc should use to create splits in parallel."), + HIVE_ORC_SKIP_CORRUPT_DATA("hive.exec.orc.skip.corrupt.data", false, + "If ORC reader encounters corrupt data, this value will be used to determine\n" + + "whether to skip the corrupt data or throw exception. The default behavior is to throw exception."), + + HIVE_ORC_ZEROCOPY("hive.exec.orc.zerocopy", false, + "Use zerocopy reads with ORC. (This requires Hadoop 2.3 or later.)"), + + HIVE_LAZYSIMPLE_EXTENDED_BOOLEAN_LITERAL("hive.lazysimple.extended_boolean_literal", false, + "LazySimpleSerde uses this property to determine if it treats 'T', 't', 'F', 'f',\n" + + "'1', and '0' as extened, legal boolean literal, in addition to 'TRUE' and 'FALSE'.\n" + + "The default is false, which means only 'TRUE' and 'FALSE' are treated as legal\n" + + "boolean literal."), + + HIVESKEWJOIN("hive.optimize.skewjoin", false, + "Whether to enable skew join optimization. \n" + + "The algorithm is as follows: At runtime, detect the keys with a large skew. Instead of\n" + + "processing those keys, store them temporarily in an HDFS directory. In a follow-up map-reduce\n" + + "job, process those skewed keys. The same key need not be skewed for all the tables, and so,\n" + + "the follow-up map-reduce job (for the skewed keys) would be much faster, since it would be a\n" + + "map-join."), + HIVEDYNAMICPARTITIONHASHJOIN("hive.optimize.dynamic.partition.hashjoin", false, + "Whether to enable dynamically partitioned hash join optimization. \n" + + "This setting is also dependent on enabling hive.auto.convert.join"), + HIVECONVERTJOIN("hive.auto.convert.join", true, + "Whether Hive enables the optimization about converting common join into mapjoin based on the input file size"), + HIVECONVERTJOINNOCONDITIONALTASK("hive.auto.convert.join.noconditionaltask", true, + "Whether Hive enables the optimization about converting common join into mapjoin based on the input file size. \n" + + "If this parameter is on, and the sum of size for n-1 of the tables/partitions for a n-way join is smaller than the\n" + + "specified size, the join is directly converted to a mapjoin (there is no conditional task)."), + + HIVECONVERTJOINNOCONDITIONALTASKTHRESHOLD("hive.auto.convert.join.noconditionaltask.size", + 10000000L, + "If hive.auto.convert.join.noconditionaltask is off, this parameter does not take affect. \n" + + "However, if it is on, and the sum of size for n-1 of the tables/partitions for a n-way join is smaller than this size, \n" + + "the join is directly converted to a mapjoin(there is no conditional task). The default is 10MB"), + HIVECONVERTJOINUSENONSTAGED("hive.auto.convert.join.use.nonstaged", false, + "For conditional joins, if input stream from a small alias can be directly applied to join operator without \n" + + "filtering or projection, the alias need not to be pre-staged in distributed cache via mapred local task.\n" + + "Currently, this is not working with vectorization or tez execution engine."), + HIVESKEWJOINKEY("hive.skewjoin.key", 100000, + "Determine if we get a skew key in join. If we see more than the specified number of rows with the same key in join operator,\n" + + "we think the key as a skew join key. "), + HIVESKEWJOINMAPJOINNUMMAPTASK("hive.skewjoin.mapjoin.map.tasks", 10000, + "Determine the number of map task used in the follow up map join job for a skew join.\n" + + "It should be used together with hive.skewjoin.mapjoin.min.split to perform a fine grained control."), + HIVESKEWJOINMAPJOINMINSPLIT("hive.skewjoin.mapjoin.min.split", 33554432L, + "Determine the number of map task at most used in the follow up map join job for a skew join by specifying \n" + + "the minimum split size. It should be used together with hive.skewjoin.mapjoin.map.tasks to perform a fine grained control."), + + HIVESENDHEARTBEAT("hive.heartbeat.interval", 1000, + "Send a heartbeat after this interval - used by mapjoin and filter operators"), + HIVELIMITMAXROWSIZE("hive.limit.row.max.size", 100000L, + "When trying a smaller subset of data for simple LIMIT, how much size we need to guarantee each row to have at least."), + HIVELIMITOPTLIMITFILE("hive.limit.optimize.limit.file", 10, + "When trying a smaller subset of data for simple LIMIT, maximum number of files we can sample."), + HIVELIMITOPTENABLE("hive.limit.optimize.enable", false, + "Whether to enable to optimization to trying a smaller subset of data for simple LIMIT first."), + HIVELIMITOPTMAXFETCH("hive.limit.optimize.fetch.max", 50000, + "Maximum number of rows allowed for a smaller subset of data for simple LIMIT, if it is a fetch query. \n" + + "Insert queries are not restricted by this limit."), + HIVELIMITPUSHDOWNMEMORYUSAGE("hive.limit.pushdown.memory.usage", 0.1f, new RatioValidator(), + "The fraction of available memory to be used for buffering rows in Reducesink operator for limit pushdown optimization."), + HIVELIMITTABLESCANPARTITION("hive.limit.query.max.table.partition", -1, + "This controls how many partitions can be scanned for each partitioned table.\n" + + "The default value \"-1\" means no limit."), + + HIVEHASHTABLEKEYCOUNTADJUSTMENT("hive.hashtable.key.count.adjustment", 1.0f, + "Adjustment to mapjoin hashtable size derived from table and column statistics; the estimate" + + " of the number of keys is divided by this value. If the value is 0, statistics are not used" + + "and hive.hashtable.initialCapacity is used instead."), + HIVEHASHTABLETHRESHOLD("hive.hashtable.initialCapacity", 100000, "Initial capacity of " + + "mapjoin hashtable if statistics are absent, or if hive.hashtable.key.count.adjustment is set to 0"), + HIVEHASHTABLELOADFACTOR("hive.hashtable.loadfactor", (float) 0.75, ""), + HIVEHASHTABLEFOLLOWBYGBYMAXMEMORYUSAGE("hive.mapjoin.followby.gby.localtask.max.memory.usage", (float) 0.55, + "This number means how much memory the local task can take to hold the key/value into an in-memory hash table \n" + + "when this map join is followed by a group by. If the local task's memory usage is more than this number, \n" + + "the local task will abort by itself. It means the data of the small table is too large to be held in memory."), + HIVEHASHTABLEMAXMEMORYUSAGE("hive.mapjoin.localtask.max.memory.usage", (float) 0.90, + "This number means how much memory the local task can take to hold the key/value into an in-memory hash table. \n" + + "If the local task's memory usage is more than this number, the local task will abort by itself. \n" + + "It means the data of the small table is too large to be held in memory."), + HIVEHASHTABLESCALE("hive.mapjoin.check.memory.rows", (long)100000, + "The number means after how many rows processed it needs to check the memory usage"), + + HIVEDEBUGLOCALTASK("hive.debug.localtask",false, ""), + + HIVEINPUTFORMAT("hive.input.format", "org.apache.hadoop.hive.ql.io.CombineHiveInputFormat", + "The default input format. Set this to HiveInputFormat if you encounter problems with CombineHiveInputFormat."), + HIVETEZINPUTFORMAT("hive.tez.input.format", "org.apache.hadoop.hive.ql.io.HiveInputFormat", + "The default input format for tez. Tez groups splits in the AM."), + + HIVETEZCONTAINERSIZE("hive.tez.container.size", -1, + "By default Tez will spawn containers of the size of a mapper. This can be used to overwrite."), + HIVETEZCPUVCORES("hive.tez.cpu.vcores", -1, + "By default Tez will ask for however many cpus map-reduce is configured to use per container.\n" + + "This can be used to overwrite."), + HIVETEZJAVAOPTS("hive.tez.java.opts", null, + "By default Tez will use the Java options from map tasks. This can be used to overwrite."), + HIVETEZLOGLEVEL("hive.tez.log.level", "INFO", + "The log level to use for tasks executing as part of the DAG.\n" + + "Used only if hive.tez.java.opts is used to configure Java options."), + HIVEQUERYNAME ("hive.query.name", null, + "This named is used by Tez to set the dag name. This name in turn will appear on \n" + + "the Tez UI representing the work that was done."), + + HIVEOPTIMIZEBUCKETINGSORTING("hive.optimize.bucketingsorting", true, + "Don't create a reducer for enforcing \n" + + "bucketing/sorting for queries of the form: \n" + + "insert overwrite table T2 select * from T1;\n" + + "where T1 and T2 are bucketed/sorted by the same keys into the same number of buckets."), + HIVEPARTITIONER("hive.mapred.partitioner", "org.apache.hadoop.hive.ql.io.DefaultHivePartitioner", ""), + HIVEENFORCESORTMERGEBUCKETMAPJOIN("hive.enforce.sortmergebucketmapjoin", false, + "If the user asked for sort-merge bucketed map-side join, and it cannot be performed, should the query fail or not ?"), + HIVEENFORCEBUCKETMAPJOIN("hive.enforce.bucketmapjoin", false, + "If the user asked for bucketed map-side join, and it cannot be performed, \n" + + "should the query fail or not ? For example, if the buckets in the tables being joined are\n" + + "not a multiple of each other, bucketed map-side join cannot be performed, and the\n" + + "query will fail if hive.enforce.bucketmapjoin is set to true."), + + HIVE_AUTO_SORTMERGE_JOIN("hive.auto.convert.sortmerge.join", false, + "Will the join be automatically converted to a sort-merge join, if the joined tables pass the criteria for sort-merge join."), + HIVE_AUTO_SORTMERGE_JOIN_BIGTABLE_SELECTOR( + "hive.auto.convert.sortmerge.join.bigtable.selection.policy", + "org.apache.hadoop.hive.ql.optimizer.AvgPartitionSizeBasedBigTableSelectorForAutoSMJ", + "The policy to choose the big table for automatic conversion to sort-merge join. \n" + + "By default, the table with the largest partitions is assigned the big table. All policies are:\n" + + ". based on position of the table - the leftmost table is selected\n" + + "org.apache.hadoop.hive.ql.optimizer.LeftmostBigTableSMJ.\n" + + ". based on total size (all the partitions selected in the query) of the table \n" + + "org.apache.hadoop.hive.ql.optimizer.TableSizeBasedBigTableSelectorForAutoSMJ.\n" + + ". based on average size (all the partitions selected in the query) of the table \n" + + "org.apache.hadoop.hive.ql.optimizer.AvgPartitionSizeBasedBigTableSelectorForAutoSMJ.\n" + + "New policies can be added in future."), + HIVE_AUTO_SORTMERGE_JOIN_TOMAPJOIN( + "hive.auto.convert.sortmerge.join.to.mapjoin", false, + "If hive.auto.convert.sortmerge.join is set to true, and a join was converted to a sort-merge join, \n" + + "this parameter decides whether each table should be tried as a big table, and effectively a map-join should be\n" + + "tried. That would create a conditional task with n+1 children for a n-way join (1 child for each table as the\n" + + "big table), and the backup task will be the sort-merge join. In some cases, a map-join would be faster than a\n" + + "sort-merge join, if there is no advantage of having the output bucketed and sorted. For example, if a very big sorted\n" + + "and bucketed table with few files (say 10 files) are being joined with a very small sorter and bucketed table\n" + + "with few files (10 files), the sort-merge join will only use 10 mappers, and a simple map-only join might be faster\n" + + "if the complete small table can fit in memory, and a map-join can be performed."), + + HIVESCRIPTOPERATORTRUST("hive.exec.script.trust", false, ""), + HIVEROWOFFSET("hive.exec.rowoffset", false, + "Whether to provide the row offset virtual column"), + + // Optimizer + HIVEOPTINDEXFILTER("hive.optimize.index.filter", false, + "Whether to enable automatic use of indexes"), + HIVEINDEXAUTOUPDATE("hive.optimize.index.autoupdate", false, + "Whether to update stale indexes automatically"), + HIVEOPTPPD("hive.optimize.ppd", true, + "Whether to enable predicate pushdown"), + HIVEOPTPPD_WINDOWING("hive.optimize.ppd.windowing", true, + "Whether to enable predicate pushdown through windowing"), + HIVEPPDRECOGNIZETRANSITIVITY("hive.ppd.recognizetransivity", true, + "Whether to transitively replicate predicate filters over equijoin conditions."), + HIVEPPDREMOVEDUPLICATEFILTERS("hive.ppd.remove.duplicatefilters", true, + "During query optimization, filters may be pushed down in the operator tree. \n" + + "If this config is true only pushed down filters remain in the operator tree, \n" + + "and the original filter is removed. If this config is false, the original filter \n" + + "is also left in the operator tree at the original place."), + HIVEPOINTLOOKUPOPTIMIZER("hive.optimize.point.lookup", true, + "Whether to transform OR clauses in Filter operators into IN clauses"), + HIVEPOINTLOOKUPOPTIMIZERMIN("hive.optimize.point.lookup.min", 31, + "Minimum number of OR clauses needed to transform into IN clauses"), + HIVEPARTITIONCOLUMNSEPARATOR("hive.optimize.partition.columns.separate", true, + "Extract partition columns from IN clauses"), + // Constant propagation optimizer + HIVEOPTCONSTANTPROPAGATION("hive.optimize.constant.propagation", true, "Whether to enable constant propagation optimizer"), + HIVEIDENTITYPROJECTREMOVER("hive.optimize.remove.identity.project", true, "Removes identity project from operator tree"), + HIVEMETADATAONLYQUERIES("hive.optimize.metadataonly", true, ""), + HIVENULLSCANOPTIMIZE("hive.optimize.null.scan", true, "Dont scan relations which are guaranteed to not generate any rows"), + HIVEOPTPPD_STORAGE("hive.optimize.ppd.storage", true, + "Whether to push predicates down to storage handlers"), + HIVEOPTGROUPBY("hive.optimize.groupby", true, + "Whether to enable the bucketed group by from bucketed partitions/tables."), + HIVEOPTBUCKETMAPJOIN("hive.optimize.bucketmapjoin", false, + "Whether to try bucket mapjoin"), + HIVEOPTSORTMERGEBUCKETMAPJOIN("hive.optimize.bucketmapjoin.sortedmerge", false, + "Whether to try sorted bucket merge map join"), + HIVEOPTREDUCEDEDUPLICATION("hive.optimize.reducededuplication", true, + "Remove extra map-reduce jobs if the data is already clustered by the same key which needs to be used again. \n" + + "This should always be set to true. Since it is a new feature, it has been made configurable."), + HIVEOPTREDUCEDEDUPLICATIONMINREDUCER("hive.optimize.reducededuplication.min.reducer", 4, + "Reduce deduplication merges two RSs by moving key/parts/reducer-num of the child RS to parent RS. \n" + + "That means if reducer-num of the child RS is fixed (order by or forced bucketing) and small, it can make very slow, single MR.\n" + + "The optimization will be automatically disabled if number of reducers would be less than specified value."), + + HIVEOPTSORTDYNAMICPARTITION("hive.optimize.sort.dynamic.partition", false, + "When enabled dynamic partitioning column will be globally sorted.\n" + + "This way we can keep only one record writer open for each partition value\n" + + "in the reducer thereby reducing the memory pressure on reducers."), + + HIVESAMPLINGFORORDERBY("hive.optimize.sampling.orderby", false, "Uses sampling on order-by clause for parallel execution."), + HIVESAMPLINGNUMBERFORORDERBY("hive.optimize.sampling.orderby.number", 1000, "Total number of samples to be obtained."), + HIVESAMPLINGPERCENTFORORDERBY("hive.optimize.sampling.orderby.percent", 0.1f, new RatioValidator(), + "Probability with which a row will be chosen."), + HIVEOPTIMIZEDISTINCTREWRITE("hive.optimize.distinct.rewrite", true, "When applicable this " + + "optimization rewrites distinct aggregates from a single stage to multi-stage " + + "aggregation. This may not be optimal in all cases. Ideally, whether to trigger it or " + + "not should be cost based decision. Until Hive formalizes cost model for this, this is config driven."), + // whether to optimize union followed by select followed by filesink + // It creates sub-directories in the final output, so should not be turned on in systems + // where MAPREDUCE-1501 is not present + HIVE_OPTIMIZE_UNION_REMOVE("hive.optimize.union.remove", false, + "Whether to remove the union and push the operators between union and the filesink above union. \n" + + "This avoids an extra scan of the output by union. This is independently useful for union\n" + + "queries, and specially useful when hive.optimize.skewjoin.compiletime is set to true, since an\n" + + "extra union is inserted.\n" + + "\n" + + "The merge is triggered if either of hive.merge.mapfiles or hive.merge.mapredfiles is set to true.\n" + + "If the user has set hive.merge.mapfiles to true and hive.merge.mapredfiles to false, the idea was the\n" + + "number of reducers are few, so the number of files anyway are small. However, with this optimization,\n" + + "we are increasing the number of files possibly by a big margin. So, we merge aggressively."), + HIVEOPTCORRELATION("hive.optimize.correlation", false, "exploit intra-query correlations."), + + HIVE_OPTIMIZE_LIMIT_TRANSPOSE("hive.optimize.limittranspose", false, + "Whether to push a limit through left/right outer join or union. If the value is true and the size of the outer\n" + + "input is reduced enough (as specified in hive.optimize.limittranspose.reduction), the limit is pushed\n" + + "to the outer input or union; to remain semantically correct, the limit is kept on top of the join or the union too."), + HIVE_OPTIMIZE_LIMIT_TRANSPOSE_REDUCTION_PERCENTAGE("hive.optimize.limittranspose.reductionpercentage", 1.0f, + "When hive.optimize.limittranspose is true, this variable specifies the minimal reduction of the\n" + + "size of the outer input of the join or input of the union that we should get in order to apply the rule."), + HIVE_OPTIMIZE_LIMIT_TRANSPOSE_REDUCTION_TUPLES("hive.optimize.limittranspose.reductiontuples", (long) 0, + "When hive.optimize.limittranspose is true, this variable specifies the minimal reduction in the\n" + + "number of tuples of the outer input of the join or the input of the union that you should get in order to apply the rule."), + + HIVE_OPTIMIZE_SKEWJOIN_COMPILETIME("hive.optimize.skewjoin.compiletime", false, + "Whether to create a separate plan for skewed keys for the tables in the join.\n" + + "This is based on the skewed keys stored in the metadata. At compile time, the plan is broken\n" + + "into different joins: one for the skewed keys, and the other for the remaining keys. And then,\n" + + "a union is performed for the 2 joins generated above. So unless the same skewed key is present\n" + + "in both the joined tables, the join for the skewed key will be performed as a map-side join.\n" + + "\n" + + "The main difference between this parameter and hive.optimize.skewjoin is that this parameter\n" + + "uses the skew information stored in the metastore to optimize the plan at compile time itself.\n" + + "If there is no skew information in the metadata, this parameter will not have any affect.\n" + + "Both hive.optimize.skewjoin.compiletime and hive.optimize.skewjoin should be set to true.\n" + + "Ideally, hive.optimize.skewjoin should be renamed as hive.optimize.skewjoin.runtime, but not doing\n" + + "so for backward compatibility.\n" + + "\n" + + "If the skew information is correctly stored in the metadata, hive.optimize.skewjoin.compiletime\n" + + "would change the query plan to take care of it, and hive.optimize.skewjoin will be a no-op."), + + // CTE + HIVE_CTE_MATERIALIZE_THRESHOLD("hive.optimize.cte.materialize.threshold", -1, + "If the number of references to a CTE clause exceeds this threshold, Hive will materialize it\n" + + "before executing the main query block. -1 will disable this feature."), + + // Indexes + HIVEOPTINDEXFILTER_COMPACT_MINSIZE("hive.optimize.index.filter.compact.minsize", (long) 5 * 1024 * 1024 * 1024, + "Minimum size (in bytes) of the inputs on which a compact index is automatically used."), // 5G + HIVEOPTINDEXFILTER_COMPACT_MAXSIZE("hive.optimize.index.filter.compact.maxsize", (long) -1, + "Maximum size (in bytes) of the inputs on which a compact index is automatically used. A negative number is equivalent to infinity."), // infinity + HIVE_INDEX_COMPACT_QUERY_MAX_ENTRIES("hive.index.compact.query.max.entries", (long) 10000000, + "The maximum number of index entries to read during a query that uses the compact index. Negative value is equivalent to infinity."), // 10M + HIVE_INDEX_COMPACT_QUERY_MAX_SIZE("hive.index.compact.query.max.size", (long) 10 * 1024 * 1024 * 1024, + "The maximum number of bytes that a query using the compact index can read. Negative value is equivalent to infinity."), // 10G + HIVE_INDEX_COMPACT_BINARY_SEARCH("hive.index.compact.binary.search", true, + "Whether or not to use a binary search to find the entries in an index table that match the filter, where possible"), + + // Statistics + HIVESTATSAUTOGATHER("hive.stats.autogather", true, + "A flag to gather statistics automatically during the INSERT OVERWRITE command."), + HIVESTATSDBCLASS("hive.stats.dbclass", "fs", new PatternSet("custom", "fs"), + "The storage that stores temporary Hive statistics. In filesystem based statistics collection ('fs'), \n" + + "each task writes statistics it has collected in a file on the filesystem, which will be aggregated \n" + + "after the job has finished. Supported values are fs (filesystem) and custom as defined in StatsSetupConst.java."), // StatsSetupConst.StatDB + HIVE_STATS_DEFAULT_PUBLISHER("hive.stats.default.publisher", "", + "The Java class (implementing the StatsPublisher interface) that is used by default if hive.stats.dbclass is custom type."), + HIVE_STATS_DEFAULT_AGGREGATOR("hive.stats.default.aggregator", "", + "The Java class (implementing the StatsAggregator interface) that is used by default if hive.stats.dbclass is custom type."), + HIVE_STATS_ATOMIC("hive.stats.atomic", false, + "whether to update metastore stats only if all stats are available"), + HIVE_STATS_COLLECT_RAWDATASIZE("hive.stats.collect.rawdatasize", true, + "should the raw data size be collected when analyzing tables"), + CLIENT_STATS_COUNTERS("hive.client.stats.counters", "", + "Subset of counters that should be of interest for hive.client.stats.publishers (when one wants to limit their publishing). \n" + + "Non-display names should be used"), + //Subset of counters that should be of interest for hive.client.stats.publishers (when one wants to limit their publishing). Non-display names should be used". + HIVE_STATS_RELIABLE("hive.stats.reliable", false, + "Whether queries will fail because stats cannot be collected completely accurately. \n" + + "If this is set to true, reading/writing from/into a partition may fail because the stats\n" + + "could not be computed accurately."), + HIVE_STATS_COLLECT_PART_LEVEL_STATS("hive.analyze.stmt.collect.partlevel.stats", true, + "analyze table T compute statistics for columns. Queries like these should compute partition" + + "level stats for partitioned table even when no part spec is specified."), + HIVE_STATS_GATHER_NUM_THREADS("hive.stats.gather.num.threads", 10, + "Number of threads used by partialscan/noscan analyze command for partitioned tables.\n" + + "This is applicable only for file formats that implement StatsProvidingRecordReader (like ORC)."), + // Collect table access keys information for operators that can benefit from bucketing + HIVE_STATS_COLLECT_TABLEKEYS("hive.stats.collect.tablekeys", false, + "Whether join and group by keys on tables are derived and maintained in the QueryPlan.\n" + + "This is useful to identify how tables are accessed and to determine if they should be bucketed."), + // Collect column access information + HIVE_STATS_COLLECT_SCANCOLS("hive.stats.collect.scancols", false, + "Whether column accesses are tracked in the QueryPlan.\n" + + "This is useful to identify how tables are accessed and to determine if there are wasted columns that can be trimmed."), + // standard error allowed for ndv estimates. A lower value indicates higher accuracy and a + // higher compute cost. + HIVE_STATS_NDV_ERROR("hive.stats.ndv.error", (float)20.0, + "Standard error expressed in percentage. Provides a tradeoff between accuracy and compute cost. \n" + + "A lower value for error indicates higher accuracy and a higher compute cost."), + HIVE_METASTORE_STATS_NDV_DENSITY_FUNCTION("hive.metastore.stats.ndv.densityfunction", false, + "Whether to use density function to estimate the NDV for the whole table based on the NDV of partitions"), + HIVE_STATS_KEY_PREFIX("hive.stats.key.prefix", "", "", true), // internal usage only + // if length of variable length data type cannot be determined this length will be used. + HIVE_STATS_MAX_VARIABLE_LENGTH("hive.stats.max.variable.length", 100, + "To estimate the size of data flowing through operators in Hive/Tez(for reducer estimation etc.),\n" + + "average row size is multiplied with the total number of rows coming out of each operator.\n" + + "Average row size is computed from average column size of all columns in the row. In the absence\n" + + "of column statistics, for variable length columns (like string, bytes etc.), this value will be\n" + + "used. For fixed length columns their corresponding Java equivalent sizes are used\n" + + "(float - 4 bytes, double - 8 bytes etc.)."), + // if number of elements in list cannot be determined, this value will be used + HIVE_STATS_LIST_NUM_ENTRIES("hive.stats.list.num.entries", 10, + "To estimate the size of data flowing through operators in Hive/Tez(for reducer estimation etc.),\n" + + "average row size is multiplied with the total number of rows coming out of each operator.\n" + + "Average row size is computed from average column size of all columns in the row. In the absence\n" + + "of column statistics and for variable length complex columns like list, the average number of\n" + + "entries/values can be specified using this config."), + // if number of elements in map cannot be determined, this value will be used + HIVE_STATS_MAP_NUM_ENTRIES("hive.stats.map.num.entries", 10, + "To estimate the size of data flowing through operators in Hive/Tez(for reducer estimation etc.),\n" + + "average row size is multiplied with the total number of rows coming out of each operator.\n" + + "Average row size is computed from average column size of all columns in the row. In the absence\n" + + "of column statistics and for variable length complex columns like map, the average number of\n" + + "entries/values can be specified using this config."), + // statistics annotation fetches stats for each partition, which can be expensive. turning + // this off will result in basic sizes being fetched from namenode instead + HIVE_STATS_FETCH_PARTITION_STATS("hive.stats.fetch.partition.stats", true, + "Annotation of operator tree with statistics information requires partition level basic\n" + + "statistics like number of rows, data size and file size. Partition statistics are fetched from\n" + + "metastore. Fetching partition statistics for each needed partition can be expensive when the\n" + + "number of partitions is high. This flag can be used to disable fetching of partition statistics\n" + + "from metastore. When this flag is disabled, Hive will make calls to filesystem to get file sizes\n" + + "and will estimate the number of rows from row schema."), + // statistics annotation fetches column statistics for all required columns which can + // be very expensive sometimes + HIVE_STATS_FETCH_COLUMN_STATS("hive.stats.fetch.column.stats", false, + "Annotation of operator tree with statistics information requires column statistics.\n" + + "Column statistics are fetched from metastore. Fetching column statistics for each needed column\n" + + "can be expensive when the number of columns is high. This flag can be used to disable fetching\n" + + "of column statistics from metastore."), + // in the absence of column statistics, the estimated number of rows/data size that will + // be emitted from join operator will depend on this factor + HIVE_STATS_JOIN_FACTOR("hive.stats.join.factor", (float) 1.1, + "Hive/Tez optimizer estimates the data size flowing through each of the operators. JOIN operator\n" + + "uses column statistics to estimate the number of rows flowing out of it and hence the data size.\n" + + "In the absence of column statistics, this factor determines the amount of rows that flows out\n" + + "of JOIN operator."), + // in the absence of uncompressed/raw data size, total file size will be used for statistics + // annotation. But the file may be compressed, encoded and serialized which may be lesser in size + // than the actual uncompressed/raw data size. This factor will be multiplied to file size to estimate + // the raw data size. + HIVE_STATS_DESERIALIZATION_FACTOR("hive.stats.deserialization.factor", (float) 1.0, + "Hive/Tez optimizer estimates the data size flowing through each of the operators. In the absence\n" + + "of basic statistics like number of rows and data size, file size is used to estimate the number\n" + + "of rows and data size. Since files in tables/partitions are serialized (and optionally\n" + + "compressed) the estimates of number of rows and data size cannot be reliably determined.\n" + + "This factor is multiplied with the file size to account for serialization and compression."), + + // Concurrency + HIVE_SUPPORT_CONCURRENCY("hive.support.concurrency", false, + "Whether Hive supports concurrency control or not. \n" + + "A ZooKeeper instance must be up and running when using zookeeper Hive lock manager "), + HIVE_LOCK_MANAGER("hive.lock.manager", "org.apache.hadoop.hive.ql.lockmgr.zookeeper.ZooKeeperHiveLockManager", ""), + HIVE_LOCK_NUMRETRIES("hive.lock.numretries", 100, + "The number of times you want to try to get all the locks"), + HIVE_UNLOCK_NUMRETRIES("hive.unlock.numretries", 10, + "The number of times you want to retry to do one unlock"), + HIVE_LOCK_SLEEP_BETWEEN_RETRIES("hive.lock.sleep.between.retries", "60s", + new TimeValidator(TimeUnit.SECONDS, 0L, false, Long.MAX_VALUE, false), + "The maximum sleep time between various retries"), + HIVE_LOCK_MAPRED_ONLY("hive.lock.mapred.only.operation", false, + "This param is to control whether or not only do lock on queries\n" + + "that need to execute at least one mapred job."), + + // Zookeeper related configs + HIVE_ZOOKEEPER_QUORUM("hive.zookeeper.quorum", "", + "List of ZooKeeper servers to talk to. This is needed for: \n" + + "1. Read/write locks - when hive.lock.manager is set to \n" + + "org.apache.hadoop.hive.ql.lockmgr.zookeeper.ZooKeeperHiveLockManager, \n" + + "2. When HiveServer2 supports service discovery via Zookeeper.\n" + + "3. For delegation token storage if zookeeper store is used, if\n" + + "hive.cluster.delegation.token.store.zookeeper.connectString is not set\n" + + "4. LLAP daemon registry service"), + + HIVE_ZOOKEEPER_CLIENT_PORT("hive.zookeeper.client.port", "2181", + "The port of ZooKeeper servers to talk to.\n" + + "If the list of Zookeeper servers specified in hive.zookeeper.quorum\n" + + "does not contain port numbers, this value is used."), + HIVE_ZOOKEEPER_SESSION_TIMEOUT("hive.zookeeper.session.timeout", "1200000ms", + new TimeValidator(TimeUnit.MILLISECONDS), + "ZooKeeper client's session timeout (in milliseconds). The client is disconnected, and as a result, all locks released, \n" + + "if a heartbeat is not sent in the timeout."), + HIVE_ZOOKEEPER_NAMESPACE("hive.zookeeper.namespace", "hive_zookeeper_namespace", + "The parent node under which all ZooKeeper nodes are created."), + HIVE_ZOOKEEPER_CLEAN_EXTRA_NODES("hive.zookeeper.clean.extra.nodes", false, + "Clean extra nodes at the end of the session."), + HIVE_ZOOKEEPER_CONNECTION_MAX_RETRIES("hive.zookeeper.connection.max.retries", 3, + "Max number of times to retry when connecting to the ZooKeeper server."), + HIVE_ZOOKEEPER_CONNECTION_BASESLEEPTIME("hive.zookeeper.connection.basesleeptime", "1000ms", + new TimeValidator(TimeUnit.MILLISECONDS), + "Initial amount of time (in milliseconds) to wait between retries\n" + + "when connecting to the ZooKeeper server when using ExponentialBackoffRetry policy."), + + // Transactions + HIVE_TXN_MANAGER("hive.txn.manager", + "org.apache.hadoop.hive.ql.lockmgr.DummyTxnManager", + "Set to org.apache.hadoop.hive.ql.lockmgr.DbTxnManager as part of turning on Hive\n" + + "transactions, which also requires appropriate settings for hive.compactor.initiator.on,\n" + + "hive.compactor.worker.threads, hive.support.concurrency (true), hive.enforce.bucketing\n" + + "(true), and hive.exec.dynamic.partition.mode (nonstrict).\n" + + "The default DummyTxnManager replicates pre-Hive-0.13 behavior and provides\n" + + "no transactions."), + HIVE_TXN_TIMEOUT("hive.txn.timeout", "300s", new TimeValidator(TimeUnit.SECONDS), + "time after which transactions are declared aborted if the client has not sent a heartbeat."), + HIVE_TXN_HEARTBEAT_THREADPOOL_SIZE("hive.txn.heartbeat.threadpool.size", 5, "The number of " + + "threads to use for heartbeating. For Hive CLI, 1 is enough. For HiveServer2, we need a few"), + TXN_MGR_DUMP_LOCK_STATE_ON_ACQUIRE_TIMEOUT("hive.txn.manager.dump.lock.state.on.acquire.timeout", false, + "Set this to true so that when attempt to acquire a lock on resource times out, the current state" + + " of the lock manager is dumped to log file. This is for debugging. See also " + + "hive.lock.numretries and hive.lock.sleep.between.retries."), + + HIVE_TXN_MAX_OPEN_BATCH("hive.txn.max.open.batch", 1000, + "Maximum number of transactions that can be fetched in one call to open_txns().\n" + + "This controls how many transactions streaming agents such as Flume or Storm open\n" + + "simultaneously. The streaming agent then writes that number of entries into a single\n" + + "file (per Flume agent or Storm bolt). Thus increasing this value decreases the number\n" + + "of delta files created by streaming agents. But it also increases the number of open\n" + + "transactions that Hive has to track at any given time, which may negatively affect\n" + + "read performance."), + + HIVE_COMPACTOR_INITIATOR_ON("hive.compactor.initiator.on", false, + "Whether to run the initiator and cleaner threads on this metastore instance or not.\n" + + "Set this to true on one instance of the Thrift metastore service as part of turning\n" + + "on Hive transactions. For a complete list of parameters required for turning on\n" + + "transactions, see hive.txn.manager."), + + HIVE_COMPACTOR_WORKER_THREADS("hive.compactor.worker.threads", 0, + "How many compactor worker threads to run on this metastore instance. Set this to a\n" + + "positive number on one or more instances of the Thrift metastore service as part of\n" + + "turning on Hive transactions. For a complete list of parameters required for turning\n" + + "on transactions, see hive.txn.manager.\n" + + "Worker threads spawn MapReduce jobs to do compactions. They do not do the compactions\n" + + "themselves. Increasing the number of worker threads will decrease the time it takes\n" + + "tables or partitions to be compacted once they are determined to need compaction.\n" + + "It will also increase the background load on the Hadoop cluster as more MapReduce jobs\n" + + "will be running in the background."), + + HIVE_COMPACTOR_WORKER_TIMEOUT("hive.compactor.worker.timeout", "86400s", + new TimeValidator(TimeUnit.SECONDS), + "Time in seconds after which a compaction job will be declared failed and the\n" + + "compaction re-queued."), + + HIVE_COMPACTOR_CHECK_INTERVAL("hive.compactor.check.interval", "300s", + new TimeValidator(TimeUnit.SECONDS), + "Time in seconds between checks to see if any tables or partitions need to be\n" + + "compacted. This should be kept high because each check for compaction requires\n" + + "many calls against the NameNode.\n" + + "Decreasing this value will reduce the time it takes for compaction to be started\n" + + "for a table or partition that requires compaction. However, checking if compaction\n" + + "is needed requires several calls to the NameNode for each table or partition that\n" + + "has had a transaction done on it since the last major compaction. So decreasing this\n" + + "value will increase the load on the NameNode."), + + HIVE_COMPACTOR_DELTA_NUM_THRESHOLD("hive.compactor.delta.num.threshold", 10, + "Number of delta directories in a table or partition that will trigger a minor\n" + + "compaction."), + + HIVE_COMPACTOR_DELTA_PCT_THRESHOLD("hive.compactor.delta.pct.threshold", 0.1f, + "Percentage (fractional) size of the delta files relative to the base that will trigger\n" + + "a major compaction. (1.0 = 100%, so the default 0.1 = 10%.)"), + COMPACTOR_MAX_NUM_DELTA("hive.compactor.max.num.delta", 500, "Maximum number of delta files that " + + "the compactor will attempt to handle in a single job."), + + HIVE_COMPACTOR_ABORTEDTXN_THRESHOLD("hive.compactor.abortedtxn.threshold", 1000, + "Number of aborted transactions involving a given table or partition that will trigger\n" + + "a major compaction."), + + COMPACTOR_INITIATOR_FAILED_THRESHOLD("hive.compactor.initiator.failed.compacts.threshold", 2, + new RangeValidator(1, 20), "Number of consecutive compaction failures (per table/partition) " + + "after which automatic compactions will not be scheduled any more. Note that this must be less " + + "than hive.compactor.history.retention.failed."), + + HIVE_COMPACTOR_CLEANER_RUN_INTERVAL("hive.compactor.cleaner.run.interval", "5000ms", + new TimeValidator(TimeUnit.MILLISECONDS), "Time between runs of the cleaner thread"), + COMPACTOR_JOB_QUEUE("hive.compactor.job.queue", "", "Used to specify name of Hadoop queue to which\n" + + "Compaction jobs will be submitted. Set to empty string to let Hadoop choose the queue."), + + COMPACTOR_HISTORY_RETENTION_SUCCEEDED("hive.compactor.history.retention.succeeded", 3, + new RangeValidator(0, 100), "Determines how many successful compaction records will be " + + "retained in compaction history for a given table/partition."), + + COMPACTOR_HISTORY_RETENTION_FAILED("hive.compactor.history.retention.failed", 3, + new RangeValidator(0, 100), "Determines how many failed compaction records will be " + + "retained in compaction history for a given table/partition."), + + COMPACTOR_HISTORY_RETENTION_ATTEMPTED("hive.compactor.history.retention.attempted", 2, + new RangeValidator(0, 100), "Determines how many attempted compaction records will be " + + "retained in compaction history for a given table/partition."), + + COMPACTOR_HISTORY_REAPER_INTERVAL("hive.compactor.history.reaper.interval", "2m", + new TimeValidator(TimeUnit.MILLISECONDS), "Determines how often compaction history reaper runs"), + + HIVE_TIMEDOUT_TXN_REAPER_START("hive.timedout.txn.reaper.start", "100s", + new TimeValidator(TimeUnit.MILLISECONDS), "Time delay of 1st reaper run after metastore start"), + HIVE_TIMEDOUT_TXN_REAPER_INTERVAL("hive.timedout.txn.reaper.interval", "180s", + new TimeValidator(TimeUnit.MILLISECONDS), "Time interval describing how often the reaper runs"), + + // For HBase storage handler + HIVE_HBASE_WAL_ENABLED("hive.hbase.wal.enabled", true, + "Whether writes to HBase should be forced to the write-ahead log. \n" + + "Disabling this improves HBase write performance at the risk of lost writes in case of a crash."), + HIVE_HBASE_GENERATE_HFILES("hive.hbase.generatehfiles", false, + "True when HBaseStorageHandler should generate hfiles instead of operate against the online table."), + HIVE_HBASE_SNAPSHOT_NAME("hive.hbase.snapshot.name", null, "The HBase table snapshot name to use."), + HIVE_HBASE_SNAPSHOT_RESTORE_DIR("hive.hbase.snapshot.restoredir", "/tmp", "The directory in which to " + + "restore the HBase table snapshot."), + + // For har files + HIVEARCHIVEENABLED("hive.archive.enabled", false, "Whether archiving operations are permitted"), + + HIVEOPTGBYUSINGINDEX("hive.optimize.index.groupby", false, + "Whether to enable optimization of group-by queries using Aggregate indexes."), + + HIVEOUTERJOINSUPPORTSFILTERS("hive.outerjoin.supports.filters", true, ""), + + HIVEFETCHTASKCONVERSION("hive.fetch.task.conversion", "more", new StringSet("none", "minimal", "more"), + "Some select queries can be converted to single FETCH task minimizing latency.\n" + + "Currently the query should be single sourced not having any subquery and should not have\n" + + "any aggregations or distincts (which incurs RS), lateral views and joins.\n" + + "0. none : disable hive.fetch.task.conversion\n" + + "1. minimal : SELECT STAR, FILTER on partition columns, LIMIT only\n" + + "2. more : SELECT, FILTER, LIMIT only (support TABLESAMPLE and virtual columns)" + ), + HIVEFETCHTASKCONVERSIONTHRESHOLD("hive.fetch.task.conversion.threshold", 1073741824L, + "Input threshold for applying hive.fetch.task.conversion. If target table is native, input length\n" + + "is calculated by summation of file lengths. If it's not native, storage handler for the table\n" + + "can optionally implement org.apache.hadoop.hive.ql.metadata.InputEstimator interface."), + + HIVEFETCHTASKAGGR("hive.fetch.task.aggr", false, + "Aggregation queries with no group-by clause (for example, select count(*) from src) execute\n" + + "final aggregations in single reduce task. If this is set true, Hive delegates final aggregation\n" + + "stage to fetch task, possibly decreasing the query time."), + + HIVEOPTIMIZEMETADATAQUERIES("hive.compute.query.using.stats", false, + "When set to true Hive will answer a few queries like count(1) purely using stats\n" + + "stored in metastore. For basic stats collection turn on the config hive.stats.autogather to true.\n" + + "For more advanced stats collection need to run analyze table queries."), + + // Serde for FetchTask + HIVEFETCHOUTPUTSERDE("hive.fetch.output.serde", "org.apache.hadoop.hive.serde2.DelimitedJSONSerDe", + "The SerDe used by FetchTask to serialize the fetch output."), + + HIVEEXPREVALUATIONCACHE("hive.cache.expr.evaluation", true, + "If true, the evaluation result of a deterministic expression referenced twice or more\n" + + "will be cached.\n" + + "For example, in a filter condition like '.. where key + 10 = 100 or key + 10 = 0'\n" + + "the expression 'key + 10' will be evaluated/cached once and reused for the following\n" + + "expression ('key + 10 = 0'). Currently, this is applied only to expressions in select\n" + + "or filter operators."), + + // Hive Variables + HIVEVARIABLESUBSTITUTE("hive.variable.substitute", true, + "This enables substitution using syntax like ${var} ${system:var} and ${env:var}."), + HIVEVARIABLESUBSTITUTEDEPTH("hive.variable.substitute.depth", 40, + "The maximum replacements the substitution engine will do."), + + HIVECONFVALIDATION("hive.conf.validation", true, + "Enables type checking for registered Hive configurations"), + + SEMANTIC_ANALYZER_HOOK("hive.semantic.analyzer.hook", "", ""), + HIVE_TEST_AUTHORIZATION_SQLSTD_HS2_MODE( + "hive.test.authz.sstd.hs2.mode", false, "test hs2 mode from .q tests", true), + HIVE_AUTHORIZATION_ENABLED("hive.security.authorization.enabled", false, + "enable or disable the Hive client authorization"), + HIVE_AUTHORIZATION_MANAGER("hive.security.authorization.manager", + "org.apache.hadoop.hive.ql.security.authorization.plugin.sqlstd.SQLStdHiveAuthorizerFactory", + "The Hive client authorization manager class name. The user defined authorization class should implement \n" + + "interface org.apache.hadoop.hive.ql.security.authorization.HiveAuthorizationProvider."), + HIVE_AUTHENTICATOR_MANAGER("hive.security.authenticator.manager", + "org.apache.hadoop.hive.ql.security.HadoopDefaultAuthenticator", + "hive client authenticator manager class name. The user defined authenticator should implement \n" + + "interface org.apache.hadoop.hive.ql.security.HiveAuthenticationProvider."), + HIVE_METASTORE_AUTHORIZATION_MANAGER("hive.security.metastore.authorization.manager", + "org.apache.hadoop.hive.ql.security.authorization.DefaultHiveMetastoreAuthorizationProvider", + "Names of authorization manager classes (comma separated) to be used in the metastore\n" + + "for authorization. The user defined authorization class should implement interface\n" + + "org.apache.hadoop.hive.ql.security.authorization.HiveMetastoreAuthorizationProvider.\n" + + "All authorization manager classes have to successfully authorize the metastore API\n" + + "call for the command execution to be allowed."), + HIVE_METASTORE_AUTHORIZATION_AUTH_READS("hive.security.metastore.authorization.auth.reads", true, + "If this is true, metastore authorizer authorizes read actions on database, table"), + HIVE_METASTORE_AUTHENTICATOR_MANAGER("hive.security.metastore.authenticator.manager", + "org.apache.hadoop.hive.ql.security.HadoopDefaultMetastoreAuthenticator", + "authenticator manager class name to be used in the metastore for authentication. \n" + + "The user defined authenticator should implement interface org.apache.hadoop.hive.ql.security.HiveAuthenticationProvider."), + HIVE_AUTHORIZATION_TABLE_USER_GRANTS("hive.security.authorization.createtable.user.grants", "", + "the privileges automatically granted to some users whenever a table gets created.\n" + + "An example like \"userX,userY:select;userZ:create\" will grant select privilege to userX and userY,\n" + + "and grant create privilege to userZ whenever a new table created."), + HIVE_AUTHORIZATION_TABLE_GROUP_GRANTS("hive.security.authorization.createtable.group.grants", + "", + "the privileges automatically granted to some groups whenever a table gets created.\n" + + "An example like \"groupX,groupY:select;groupZ:create\" will grant select privilege to groupX and groupY,\n" + + "and grant create privilege to groupZ whenever a new table created."), + HIVE_AUTHORIZATION_TABLE_ROLE_GRANTS("hive.security.authorization.createtable.role.grants", "", + "the privileges automatically granted to some roles whenever a table gets created.\n" + + "An example like \"roleX,roleY:select;roleZ:create\" will grant select privilege to roleX and roleY,\n" + + "and grant create privilege to roleZ whenever a new table created."), + HIVE_AUTHORIZATION_TABLE_OWNER_GRANTS("hive.security.authorization.createtable.owner.grants", + "", + "The privileges automatically granted to the owner whenever a table gets created.\n" + + "An example like \"select,drop\" will grant select and drop privilege to the owner\n" + + "of the table. Note that the default gives the creator of a table no access to the\n" + + "table (but see HIVE-8067)."), + HIVE_AUTHORIZATION_TASK_FACTORY("hive.security.authorization.task.factory", + "org.apache.hadoop.hive.ql.parse.authorization.HiveAuthorizationTaskFactoryImpl", + "Authorization DDL task factory implementation"), + + // if this is not set default value is set during config initialization + // Default value can't be set in this constructor as it would refer names in other ConfVars + // whose constructor would not have been called + HIVE_AUTHORIZATION_SQL_STD_AUTH_CONFIG_WHITELIST( + "hive.security.authorization.sqlstd.confwhitelist", "", + "List of comma separated Java regexes. Configurations parameters that match these\n" + + "regexes can be modified by user when SQL standard authorization is enabled.\n" + + "To get the default value, use the 'set ' command.\n" + + "Note that the hive.conf.restricted.list checks are still enforced after the white list\n" + + "check"), + + HIVE_AUTHORIZATION_SQL_STD_AUTH_CONFIG_WHITELIST_APPEND( + "hive.security.authorization.sqlstd.confwhitelist.append", "", + "List of comma separated Java regexes, to be appended to list set in\n" + + "hive.security.authorization.sqlstd.confwhitelist. Using this list instead\n" + + "of updating the original list means that you can append to the defaults\n" + + "set by SQL standard authorization instead of replacing it entirely."), + + HIVE_CLI_PRINT_HEADER("hive.cli.print.header", false, "Whether to print the names of the columns in query output."), + + HIVE_CLI_TEZ_SESSION_ASYNC("hive.cli.tez.session.async", true, "Whether to start Tez\n" + + "session in background when running CLI with Tez, allowing CLI to be available earlier."), + + HIVE_ERROR_ON_EMPTY_PARTITION("hive.error.on.empty.partition", false, + "Whether to throw an exception if dynamic partition insert generates empty results."), + + HIVE_INDEX_COMPACT_FILE("hive.index.compact.file", "", "internal variable"), + HIVE_INDEX_BLOCKFILTER_FILE("hive.index.blockfilter.file", "", "internal variable"), + HIVE_INDEX_IGNORE_HDFS_LOC("hive.index.compact.file.ignore.hdfs", false, + "When true the HDFS location stored in the index file will be ignored at runtime.\n" + + "If the data got moved or the name of the cluster got changed, the index data should still be usable."), + + HIVE_EXIM_URI_SCHEME_WL("hive.exim.uri.scheme.whitelist", "hdfs,pfile", + "A comma separated list of acceptable URI schemes for import and export."), + // temporary variable for testing. This is added just to turn off this feature in case of a bug in + // deployment. It has not been documented in hive-default.xml intentionally, this should be removed + // once the feature is stable + HIVE_EXIM_RESTRICT_IMPORTS_INTO_REPLICATED_TABLES("hive.exim.strict.repl.tables",true, + "Parameter that determines if 'regular' (non-replication) export dumps can be\n" + + "imported on to tables that are the target of replication. If this parameter is\n" + + "set, regular imports will check if the destination table(if it exists) has a " + + "'repl.last.id' set on it. If so, it will fail."), + HIVE_REPL_TASK_FACTORY("hive.repl.task.factory", + "org.apache.hive.hcatalog.api.repl.exim.EximReplicationTaskFactory", + "Parameter that can be used to override which ReplicationTaskFactory will be\n" + + "used to instantiate ReplicationTask events. Override for third party repl plugins"), + HIVE_MAPPER_CANNOT_SPAN_MULTIPLE_PARTITIONS("hive.mapper.cannot.span.multiple.partitions", false, ""), + HIVE_REWORK_MAPREDWORK("hive.rework.mapredwork", false, + "should rework the mapred work or not.\n" + + "This is first introduced by SymlinkTextInputFormat to replace symlink files with real paths at compile time."), + HIVE_CONCATENATE_CHECK_INDEX ("hive.exec.concatenate.check.index", true, + "If this is set to true, Hive will throw error when doing\n" + + "'alter table tbl_name [partSpec] concatenate' on a table/partition\n" + + "that has indexes on it. The reason the user want to set this to true\n" + + "is because it can help user to avoid handling all index drop, recreation,\n" + + "rebuild work. This is very helpful for tables with thousands of partitions."), + HIVE_IO_EXCEPTION_HANDLERS("hive.io.exception.handlers", "", + "A list of io exception handler class names. This is used\n" + + "to construct a list exception handlers to handle exceptions thrown\n" + + "by record readers"), + + // logging configuration + HIVE_LOG4J_FILE("hive.log4j.file", "", + "Hive log4j configuration file.\n" + + "If the property is not set, then logging will be initialized using hive-log4j2.properties found on the classpath.\n" + + "If the property is set, the value must be a valid URI (java.net.URI, e.g. \"file:///tmp/my-logging.xml\"), \n" + + "which you can then extract a URL from and pass to PropertyConfigurator.configure(URL)."), + HIVE_EXEC_LOG4J_FILE("hive.exec.log4j.file", "", + "Hive log4j configuration file for execution mode(sub command).\n" + + "If the property is not set, then logging will be initialized using hive-exec-log4j2.properties found on the classpath.\n" + + "If the property is set, the value must be a valid URI (java.net.URI, e.g. \"file:///tmp/my-logging.xml\"), \n" + + "which you can then extract a URL from and pass to PropertyConfigurator.configure(URL)."), + + HIVE_LOG_EXPLAIN_OUTPUT("hive.log.explain.output", false, + "Whether to log explain output for every query.\n" + + "When enabled, will log EXPLAIN EXTENDED output for the query at INFO log4j log level."), + HIVE_EXPLAIN_USER("hive.explain.user", true, + "Whether to show explain result at user level.\n" + + "When enabled, will log EXPLAIN output for the query at user level."), + + // prefix used to auto generated column aliases (this should be started with '_') + HIVE_AUTOGEN_COLUMNALIAS_PREFIX_LABEL("hive.autogen.columnalias.prefix.label", "_c", + "String used as a prefix when auto generating column alias.\n" + + "By default the prefix label will be appended with a column position number to form the column alias. \n" + + "Auto generation would happen if an aggregate function is used in a select clause without an explicit alias."), + HIVE_AUTOGEN_COLUMNALIAS_PREFIX_INCLUDEFUNCNAME( + "hive.autogen.columnalias.prefix.includefuncname", false, + "Whether to include function name in the column alias auto generated by Hive."), + HIVE_METRICS_CLASS("hive.service.metrics.class", + "org.apache.hadoop.hive.common.metrics.metrics2.CodahaleMetrics", + new StringSet( + "org.apache.hadoop.hive.common.metrics.metrics2.CodahaleMetrics", + "org.apache.hadoop.hive.common.metrics.LegacyMetrics"), + "Hive metrics subsystem implementation class."), + HIVE_METRICS_REPORTER("hive.service.metrics.reporter", "JSON_FILE, JMX", + "Reporter type for metric class org.apache.hadoop.hive.common.metrics.metrics2.CodahaleMetrics, comma separated list of JMX, CONSOLE, JSON_FILE"), + HIVE_METRICS_JSON_FILE_LOCATION("hive.service.metrics.file.location", "/tmp/report.json", + "For metric class org.apache.hadoop.hive.common.metrics.metrics2.CodahaleMetrics JSON_FILE reporter, the location of local JSON metrics file. " + + "This file will get overwritten at every interval."), + HIVE_METRICS_JSON_FILE_INTERVAL("hive.service.metrics.file.frequency", "5s", + new TimeValidator(TimeUnit.MILLISECONDS), + "For metric class org.apache.hadoop.hive.common.metrics.metrics2.CodahaleMetrics JSON_FILE reporter, " + + "the frequency of updating JSON metrics file."), + HIVE_PERF_LOGGER("hive.exec.perf.logger", "org.apache.hadoop.hive.ql.log.PerfLogger", + "The class responsible for logging client side performance metrics. \n" + + "Must be a subclass of org.apache.hadoop.hive.ql.log.PerfLogger"), + HIVE_START_CLEANUP_SCRATCHDIR("hive.start.cleanup.scratchdir", false, + "To cleanup the Hive scratchdir when starting the Hive Server"), + HIVE_INSERT_INTO_MULTILEVEL_DIRS("hive.insert.into.multilevel.dirs", false, + "Where to insert into multilevel directories like\n" + + "\"insert directory '/HIVEFT25686/chinna/' from table\""), + HIVE_WAREHOUSE_SUBDIR_INHERIT_PERMS("hive.warehouse.subdir.inherit.perms", true, + "Set this to false if the table directories should be created\n" + + "with the permissions derived from dfs umask instead of\n" + + "inheriting the permission of the warehouse or database directory."), + HIVE_INSERT_INTO_EXTERNAL_TABLES("hive.insert.into.external.tables", true, + "whether insert into external tables is allowed"), + HIVE_TEMPORARY_TABLE_STORAGE( + "hive.exec.temporary.table.storage", "default", new StringSet("memory", + "ssd", "default"), "Define the storage policy for temporary tables." + + "Choices between memory, ssd and default"), + + HIVE_DRIVER_RUN_HOOKS("hive.exec.driver.run.hooks", "", + "A comma separated list of hooks which implement HiveDriverRunHook. Will be run at the beginning " + + "and end of Driver.run, these will be run in the order specified."), + HIVE_DDL_OUTPUT_FORMAT("hive.ddl.output.format", null, + "The data format to use for DDL output. One of \"text\" (for human\n" + + "readable text) or \"json\" (for a json object)."), + HIVE_ENTITY_SEPARATOR("hive.entity.separator", "@", + "Separator used to construct names of tables and partitions. For example, dbname@tablename@partitionname"), + HIVE_CAPTURE_TRANSFORM_ENTITY("hive.entity.capture.transform", false, + "Compiler to capture transform URI referred in the query"), + HIVE_DISPLAY_PARTITION_COLUMNS_SEPARATELY("hive.display.partition.cols.separately", true, + "In older Hive version (0.10 and earlier) no distinction was made between\n" + + "partition columns or non-partition columns while displaying columns in describe\n" + + "table. From 0.12 onwards, they are displayed separately. This flag will let you\n" + + "get old behavior, if desired. See, test-case in patch for HIVE-6689."), + + HIVE_SSL_PROTOCOL_BLACKLIST("hive.ssl.protocol.blacklist", "SSLv2,SSLv3", + "SSL Versions to disable for all Hive Servers"), + + // HiveServer2 specific configs + HIVE_SERVER2_MAX_START_ATTEMPTS("hive.server2.max.start.attempts", 30L, new RangeValidator(0L, null), + "Number of times HiveServer2 will attempt to start before exiting, sleeping 60 seconds " + + "between retries. \n The default of 30 will keep trying for 30 minutes."), + HIVE_SERVER2_SUPPORT_DYNAMIC_SERVICE_DISCOVERY("hive.server2.support.dynamic.service.discovery", false, + "Whether HiveServer2 supports dynamic service discovery for its clients. " + + "To support this, each instance of HiveServer2 currently uses ZooKeeper to register itself, " + + "when it is brought up. JDBC/ODBC clients should use the ZooKeeper ensemble: " + + "hive.zookeeper.quorum in their connection string."), + HIVE_SERVER2_ZOOKEEPER_NAMESPACE("hive.server2.zookeeper.namespace", "hiveserver2", + "The parent node in ZooKeeper used by HiveServer2 when supporting dynamic service discovery."), + + // HiveServer2 global init file location + HIVE_SERVER2_GLOBAL_INIT_FILE_LOCATION("hive.server2.global.init.file.location", "${env:HIVE_CONF_DIR}", + "Either the location of a HS2 global init file or a directory containing a .hiverc file. If the \n" + + "property is set, the value must be a valid path to an init file or directory where the init file is located."), + HIVE_SERVER2_TRANSPORT_MODE("hive.server2.transport.mode", "binary", new StringSet("binary", "http"), + "Transport mode of HiveServer2."), + HIVE_SERVER2_THRIFT_BIND_HOST("hive.server2.thrift.bind.host", "", + "Bind host on which to run the HiveServer2 Thrift service."), + HIVE_SERVER2_PARALLEL_COMPILATION("hive.driver.parallel.compilation", false, "Whether to\n" + + "enable parallel compilation between sessions on HiveServer2. The default is false."), + HIVE_SERVER2_COMPILE_LOCK_TIMEOUT("hive.server2.compile.lock.timeout", "0s", + new TimeValidator(TimeUnit.SECONDS), + "Number of seconds a request will wait to acquire the compile lock before giving up. " + + "Setting it to 0s disables the timeout."), + // HiveServer2 WebUI + HIVE_SERVER2_WEBUI_BIND_HOST("hive.server2.webui.host", "0.0.0.0", "The host address the HiveServer2 WebUI will listen on"), + HIVE_SERVER2_WEBUI_PORT("hive.server2.webui.port", 10002, "The port the HiveServer2 WebUI will listen on. This can be" + + "set to 0 or a negative integer to disable the web UI"), + HIVE_SERVER2_WEBUI_MAX_THREADS("hive.server2.webui.max.threads", 50, "The max HiveServer2 WebUI threads"), + HIVE_SERVER2_WEBUI_USE_SSL("hive.server2.webui.use.ssl", false, + "Set this to true for using SSL encryption for HiveServer2 WebUI."), + HIVE_SERVER2_WEBUI_SSL_KEYSTORE_PATH("hive.server2.webui.keystore.path", "", + "SSL certificate keystore location for HiveServer2 WebUI."), + HIVE_SERVER2_WEBUI_SSL_KEYSTORE_PASSWORD("hive.server2.webui.keystore.password", "", + "SSL certificate keystore password for HiveServer2 WebUI."), + HIVE_SERVER2_WEBUI_USE_SPNEGO("hive.server2.webui.use.spnego", false, + "If true, the HiveServer2 WebUI will be secured with SPNEGO. Clients must authenticate with Kerberos."), + HIVE_SERVER2_WEBUI_SPNEGO_KEYTAB("hive.server2.webui.spnego.keytab", "", + "The path to the Kerberos Keytab file containing the HiveServer2 WebUI SPNEGO service principal."), + HIVE_SERVER2_WEBUI_SPNEGO_PRINCIPAL("hive.server2.webui.spnego.principal", + "HTTP/_HOST@EXAMPLE.COM", "The HiveServer2 WebUI SPNEGO service principal.\n" + + "The special string _HOST will be replaced automatically with \n" + + "the value of hive.server2.webui.host or the correct host name."), + HIVE_SERVER2_WEBUI_MAX_HISTORIC_QUERIES("hive.server2.webui.max.historic.queries", 25, + "The maximum number of past queries to show in HiverSever2 WebUI."), + + // Tez session settings + HIVE_SERVER2_TEZ_DEFAULT_QUEUES("hive.server2.tez.default.queues", "", + "A list of comma separated values corresponding to YARN queues of the same name.\n" + + "When HiveServer2 is launched in Tez mode, this configuration needs to be set\n" + + "for multiple Tez sessions to run in parallel on the cluster."), + HIVE_SERVER2_TEZ_SESSIONS_PER_DEFAULT_QUEUE("hive.server2.tez.sessions.per.default.queue", 1, + "A positive integer that determines the number of Tez sessions that should be\n" + + "launched on each of the queues specified by \"hive.server2.tez.default.queues\".\n" + + "Determines the parallelism on each queue."), + HIVE_SERVER2_TEZ_INITIALIZE_DEFAULT_SESSIONS("hive.server2.tez.initialize.default.sessions", + false, + "This flag is used in HiveServer2 to enable a user to use HiveServer2 without\n" + + "turning on Tez for HiveServer2. The user could potentially want to run queries\n" + + "over Tez without the pool of sessions."), + HIVE_SERVER2_TEZ_SESSION_LIFETIME("hive.server2.tez.session.lifetime", "162h", + new TimeValidator(TimeUnit.HOURS), + "The lifetime of the Tez sessions launched by HS2 when default sessions are enabled.\n" + + "Set to 0 to disable session expiration."), + HIVE_SERVER2_TEZ_SESSION_LIFETIME_JITTER("hive.server2.tez.session.lifetime.jitter", "3h", + new TimeValidator(TimeUnit.HOURS), + "The jitter for Tez session lifetime; prevents all the sessions from restarting at once."), + HIVE_SERVER2_TEZ_SESSION_MAX_INIT_THREADS("hive.server2.tez.sessions.init.threads", 16, + "If hive.server2.tez.initialize.default.sessions is enabled, the maximum number of\n" + + "threads to use to initialize the default sessions."), + + + // Operation log configuration + HIVE_SERVER2_LOGGING_OPERATION_ENABLED("hive.server2.logging.operation.enabled", true, + "When true, HS2 will save operation logs and make them available for clients"), + HIVE_SERVER2_LOGGING_OPERATION_LOG_LOCATION("hive.server2.logging.operation.log.location", + "${system:java.io.tmpdir}" + File.separator + "${system:user.name}" + File.separator + + "operation_logs", + "Top level directory where operation logs are stored if logging functionality is enabled"), + HIVE_SERVER2_LOGGING_OPERATION_LEVEL("hive.server2.logging.operation.level", "EXECUTION", + new StringSet("NONE", "EXECUTION", "PERFORMANCE", "VERBOSE"), + "HS2 operation logging mode available to clients to be set at session level.\n" + + "For this to work, hive.server2.logging.operation.enabled should be set to true.\n" + + " NONE: Ignore any logging\n" + + " EXECUTION: Log completion of tasks\n" + + " PERFORMANCE: Execution + Performance logs \n" + + " VERBOSE: All logs" ), + + // Enable metric collection for HiveServer2 + HIVE_SERVER2_METRICS_ENABLED("hive.server2.metrics.enabled", false, "Enable metrics on the HiveServer2."), + + // http (over thrift) transport settings + HIVE_SERVER2_THRIFT_HTTP_PORT("hive.server2.thrift.http.port", 10001, + "Port number of HiveServer2 Thrift interface when hive.server2.transport.mode is 'http'."), + HIVE_SERVER2_THRIFT_HTTP_PATH("hive.server2.thrift.http.path", "cliservice", + "Path component of URL endpoint when in HTTP mode."), + HIVE_SERVER2_THRIFT_MAX_MESSAGE_SIZE("hive.server2.thrift.max.message.size", 100*1024*1024, + "Maximum message size in bytes a HS2 server will accept."), + HIVE_SERVER2_THRIFT_HTTP_MAX_IDLE_TIME("hive.server2.thrift.http.max.idle.time", "1800s", + new TimeValidator(TimeUnit.MILLISECONDS), + "Maximum idle time for a connection on the server when in HTTP mode."), + HIVE_SERVER2_THRIFT_HTTP_WORKER_KEEPALIVE_TIME("hive.server2.thrift.http.worker.keepalive.time", "60s", + new TimeValidator(TimeUnit.SECONDS), + "Keepalive time for an idle http worker thread. When the number of workers exceeds min workers, " + + "excessive threads are killed after this time interval."), + HIVE_SERVER2_THRIFT_HTTP_REQUEST_HEADER_SIZE("hive.server2.thrift.http.request.header.size", 6*1024, + "Request header size in bytes, when using HTTP transport mode. Jetty defaults used."), + HIVE_SERVER2_THRIFT_HTTP_RESPONSE_HEADER_SIZE("hive.server2.thrift.http.response.header.size", 6*1024, + "Response header size in bytes, when using HTTP transport mode. Jetty defaults used."), + + // Cookie based authentication when using HTTP Transport + HIVE_SERVER2_THRIFT_HTTP_COOKIE_AUTH_ENABLED("hive.server2.thrift.http.cookie.auth.enabled", true, + "When true, HiveServer2 in HTTP transport mode, will use cookie based authentication mechanism."), + HIVE_SERVER2_THRIFT_HTTP_COOKIE_MAX_AGE("hive.server2.thrift.http.cookie.max.age", "86400s", + new TimeValidator(TimeUnit.SECONDS), + "Maximum age in seconds for server side cookie used by HS2 in HTTP mode."), + HIVE_SERVER2_THRIFT_HTTP_COOKIE_DOMAIN("hive.server2.thrift.http.cookie.domain", null, + "Domain for the HS2 generated cookies"), + HIVE_SERVER2_THRIFT_HTTP_COOKIE_PATH("hive.server2.thrift.http.cookie.path", null, + "Path for the HS2 generated cookies"), + HIVE_SERVER2_THRIFT_HTTP_COOKIE_IS_SECURE("hive.server2.thrift.http.cookie.is.secure", true, + "Secure attribute of the HS2 generated cookie."), + HIVE_SERVER2_THRIFT_HTTP_COOKIE_IS_HTTPONLY("hive.server2.thrift.http.cookie.is.httponly", true, + "HttpOnly attribute of the HS2 generated cookie."), + + // binary transport settings + HIVE_SERVER2_THRIFT_PORT("hive.server2.thrift.port", 10000, + "Port number of HiveServer2 Thrift interface when hive.server2.transport.mode is 'binary'."), + HIVE_SERVER2_THRIFT_SASL_QOP("hive.server2.thrift.sasl.qop", "auth", + new StringSet("auth", "auth-int", "auth-conf"), + "Sasl QOP value; set it to one of following values to enable higher levels of\n" + + "protection for HiveServer2 communication with clients.\n" + + "Setting hadoop.rpc.protection to a higher level than HiveServer2 does not\n" + + "make sense in most situations. HiveServer2 ignores hadoop.rpc.protection in favor\n" + + "of hive.server2.thrift.sasl.qop.\n" + + " \"auth\" - authentication only (default)\n" + + " \"auth-int\" - authentication plus integrity protection\n" + + " \"auth-conf\" - authentication plus integrity and confidentiality protection\n" + + "This is applicable only if HiveServer2 is configured to use Kerberos authentication."), + HIVE_SERVER2_THRIFT_MIN_WORKER_THREADS("hive.server2.thrift.min.worker.threads", 5, + "Minimum number of Thrift worker threads"), + HIVE_SERVER2_THRIFT_MAX_WORKER_THREADS("hive.server2.thrift.max.worker.threads", 500, + "Maximum number of Thrift worker threads"), + HIVE_SERVER2_THRIFT_LOGIN_BEBACKOFF_SLOT_LENGTH( + "hive.server2.thrift.exponential.backoff.slot.length", "100ms", + new TimeValidator(TimeUnit.MILLISECONDS), + "Binary exponential backoff slot time for Thrift clients during login to HiveServer2,\n" + + "for retries until hitting Thrift client timeout"), + HIVE_SERVER2_THRIFT_LOGIN_TIMEOUT("hive.server2.thrift.login.timeout", "20s", + new TimeValidator(TimeUnit.SECONDS), "Timeout for Thrift clients during login to HiveServer2"), + HIVE_SERVER2_THRIFT_WORKER_KEEPALIVE_TIME("hive.server2.thrift.worker.keepalive.time", "60s", + new TimeValidator(TimeUnit.SECONDS), + "Keepalive time (in seconds) for an idle worker thread. When the number of workers exceeds min workers, " + + "excessive threads are killed after this time interval."), + // Configuration for async thread pool in SessionManager + HIVE_SERVER2_ASYNC_EXEC_THREADS("hive.server2.async.exec.threads", 100, + "Number of threads in the async thread pool for HiveServer2"), + HIVE_SERVER2_ASYNC_EXEC_SHUTDOWN_TIMEOUT("hive.server2.async.exec.shutdown.timeout", "10s", + new TimeValidator(TimeUnit.SECONDS), + "How long HiveServer2 shutdown will wait for async threads to terminate."), + HIVE_SERVER2_ASYNC_EXEC_WAIT_QUEUE_SIZE("hive.server2.async.exec.wait.queue.size", 100, + "Size of the wait queue for async thread pool in HiveServer2.\n" + + "After hitting this limit, the async thread pool will reject new requests."), + HIVE_SERVER2_ASYNC_EXEC_KEEPALIVE_TIME("hive.server2.async.exec.keepalive.time", "10s", + new TimeValidator(TimeUnit.SECONDS), + "Time that an idle HiveServer2 async thread (from the thread pool) will wait for a new task\n" + + "to arrive before terminating"), + HIVE_SERVER2_LONG_POLLING_TIMEOUT("hive.server2.long.polling.timeout", "5000ms", + new TimeValidator(TimeUnit.MILLISECONDS), + "Time that HiveServer2 will wait before responding to asynchronous calls that use long polling"), + + HIVE_SESSION_IMPL_CLASSNAME("hive.session.impl.classname", null, "Classname for custom implementation of hive session"), + HIVE_SESSION_IMPL_WITH_UGI_CLASSNAME("hive.session.impl.withugi.classname", null, "Classname for custom implementation of hive session with UGI"), + + // HiveServer2 auth configuration + HIVE_SERVER2_AUTHENTICATION("hive.server2.authentication", "NONE", + new StringSet("NOSASL", "NONE", "LDAP", "KERBEROS", "PAM", "CUSTOM"), + "Client authentication types.\n" + + " NONE: no authentication check\n" + + " LDAP: LDAP/AD based authentication\n" + + " KERBEROS: Kerberos/GSSAPI authentication\n" + + " CUSTOM: Custom authentication provider\n" + + " (Use with property hive.server2.custom.authentication.class)\n" + + " PAM: Pluggable authentication module\n" + + " NOSASL: Raw transport"), + HIVE_SERVER2_ALLOW_USER_SUBSTITUTION("hive.server2.allow.user.substitution", true, + "Allow alternate user to be specified as part of HiveServer2 open connection request."), + HIVE_SERVER2_KERBEROS_KEYTAB("hive.server2.authentication.kerberos.keytab", "", + "Kerberos keytab file for server principal"), + HIVE_SERVER2_KERBEROS_PRINCIPAL("hive.server2.authentication.kerberos.principal", "", + "Kerberos server principal"), + HIVE_SERVER2_SPNEGO_KEYTAB("hive.server2.authentication.spnego.keytab", "", + "keytab file for SPNego principal, optional,\n" + + "typical value would look like /etc/security/keytabs/spnego.service.keytab,\n" + + "This keytab would be used by HiveServer2 when Kerberos security is enabled and \n" + + "HTTP transport mode is used.\n" + + "This needs to be set only if SPNEGO is to be used in authentication.\n" + + "SPNego authentication would be honored only if valid\n" + + " hive.server2.authentication.spnego.principal\n" + + "and\n" + + " hive.server2.authentication.spnego.keytab\n" + + "are specified."), + HIVE_SERVER2_SPNEGO_PRINCIPAL("hive.server2.authentication.spnego.principal", "", + "SPNego service principal, optional,\n" + + "typical value would look like HTTP/_HOST@EXAMPLE.COM\n" + + "SPNego service principal would be used by HiveServer2 when Kerberos security is enabled\n" + + "and HTTP transport mode is used.\n" + + "This needs to be set only if SPNEGO is to be used in authentication."), + HIVE_SERVER2_PLAIN_LDAP_URL("hive.server2.authentication.ldap.url", null, + "LDAP connection URL(s),\n" + + "this value could contain URLs to mutiple LDAP servers instances for HA,\n" + + "each LDAP URL is separated by a SPACE character. URLs are used in the \n" + + " order specified until a connection is successful."), + HIVE_SERVER2_PLAIN_LDAP_BASEDN("hive.server2.authentication.ldap.baseDN", null, "LDAP base DN"), + HIVE_SERVER2_PLAIN_LDAP_DOMAIN("hive.server2.authentication.ldap.Domain", null, ""), + HIVE_SERVER2_PLAIN_LDAP_GROUPDNPATTERN("hive.server2.authentication.ldap.groupDNPattern", null, + "COLON-separated list of patterns to use to find DNs for group entities in this directory.\n" + + "Use %s where the actual group name is to be substituted for.\n" + + "For example: CN=%s,CN=Groups,DC=subdomain,DC=domain,DC=com."), + HIVE_SERVER2_PLAIN_LDAP_GROUPFILTER("hive.server2.authentication.ldap.groupFilter", null, + "COMMA-separated list of LDAP Group names (short name not full DNs).\n" + + "For example: HiveAdmins,HadoopAdmins,Administrators"), + HIVE_SERVER2_PLAIN_LDAP_USERDNPATTERN("hive.server2.authentication.ldap.userDNPattern", null, + "COLON-separated list of patterns to use to find DNs for users in this directory.\n" + + "Use %s where the actual group name is to be substituted for.\n" + + "For example: CN=%s,CN=Users,DC=subdomain,DC=domain,DC=com."), + HIVE_SERVER2_PLAIN_LDAP_USERFILTER("hive.server2.authentication.ldap.userFilter", null, + "COMMA-separated list of LDAP usernames (just short names, not full DNs).\n" + + "For example: hiveuser,impalauser,hiveadmin,hadoopadmin"), + HIVE_SERVER2_PLAIN_LDAP_GUIDKEY("hive.server2.authentication.ldap.guidKey", "uid", + "LDAP attribute name whose values are unique in this LDAP server.\n" + + "For example: uid or CN."), + HIVE_SERVER2_PLAIN_LDAP_GROUPMEMBERSHIP_KEY("hive.server2.authentication.ldap.groupMembershipKey", "member", + "LDAP attribute name on the user entry that references a group, the user belongs to.\n" + + "For example: member, uniqueMember or memberUid"), + HIVE_SERVER2_PLAIN_LDAP_GROUPCLASS_KEY("hive.server2.authentication.ldap.groupClassKey", "groupOfNames", + "LDAP attribute name on the group entry that is to be used in LDAP group searches.\n" + + "For example: group, groupOfNames or groupOfUniqueNames."), + HIVE_SERVER2_PLAIN_LDAP_CUSTOMLDAPQUERY("hive.server2.authentication.ldap.customLDAPQuery", null, + "A full LDAP query that LDAP Atn provider uses to execute against LDAP Server.\n" + + "If this query returns a null resultset, the LDAP Provider fails the Authentication\n" + + "request, succeeds if the user is part of the resultset." + + "For example: (&(objectClass=group)(objectClass=top)(instanceType=4)(cn=Domain*)) \n" + + "(&(objectClass=person)(|(sAMAccountName=admin)(|(memberOf=CN=Domain Admins,CN=Users,DC=domain,DC=com)" + + "(memberOf=CN=Administrators,CN=Builtin,DC=domain,DC=com))))"), + HIVE_SERVER2_CUSTOM_AUTHENTICATION_CLASS("hive.server2.custom.authentication.class", null, + "Custom authentication class. Used when property\n" + + "'hive.server2.authentication' is set to 'CUSTOM'. Provided class\n" + + "must be a proper implementation of the interface\n" + + "org.apache.hive.service.auth.PasswdAuthenticationProvider. HiveServer2\n" + + "will call its Authenticate(user, passed) method to authenticate requests.\n" + + "The implementation may optionally implement Hadoop's\n" + + "org.apache.hadoop.conf.Configurable class to grab Hive's Configuration object."), + HIVE_SERVER2_PAM_SERVICES("hive.server2.authentication.pam.services", null, + "List of the underlying pam services that should be used when auth type is PAM\n" + + "A file with the same name must exist in /etc/pam.d"), + + HIVE_SERVER2_ENABLE_DOAS("hive.server2.enable.doAs", true, + "Setting this property to true will have HiveServer2 execute\n" + + "Hive operations as the user making the calls to it."), + HIVE_SERVER2_TABLE_TYPE_MAPPING("hive.server2.table.type.mapping", "CLASSIC", new StringSet("CLASSIC", "HIVE"), + "This setting reflects how HiveServer2 will report the table types for JDBC and other\n" + + "client implementations that retrieve the available tables and supported table types\n" + + " HIVE : Exposes Hive's native table types like MANAGED_TABLE, EXTERNAL_TABLE, VIRTUAL_VIEW\n" + + " CLASSIC : More generic types like TABLE and VIEW"), + HIVE_SERVER2_SESSION_HOOK("hive.server2.session.hook", "", ""), + + // SSL settings + HIVE_SERVER2_USE_SSL("hive.server2.use.SSL", false, + "Set this to true for using SSL encryption in HiveServer2."), + HIVE_SERVER2_SSL_KEYSTORE_PATH("hive.server2.keystore.path", "", + "SSL certificate keystore location."), + HIVE_SERVER2_SSL_KEYSTORE_PASSWORD("hive.server2.keystore.password", "", + "SSL certificate keystore password."), + HIVE_SERVER2_MAP_FAIR_SCHEDULER_QUEUE("hive.server2.map.fair.scheduler.queue", true, + "If the YARN fair scheduler is configured and HiveServer2 is running in non-impersonation mode,\n" + + "this setting determines the user for fair scheduler queue mapping.\n" + + "If set to true (default), the logged-in user determines the fair scheduler queue\n" + + "for submitted jobs, so that map reduce resource usage can be tracked by user.\n" + + "If set to false, all Hive jobs go to the 'hive' user's queue."), + HIVE_SERVER2_BUILTIN_UDF_WHITELIST("hive.server2.builtin.udf.whitelist", "", + "Comma separated list of builtin udf names allowed in queries.\n" + + "An empty whitelist allows all builtin udfs to be executed. " + + " The udf black list takes precedence over udf white list"), + HIVE_SERVER2_BUILTIN_UDF_BLACKLIST("hive.server2.builtin.udf.blacklist", "", + "Comma separated list of udfs names. These udfs will not be allowed in queries." + + " The udf black list takes precedence over udf white list"), + + HIVE_SERVER2_SESSION_CHECK_INTERVAL("hive.server2.session.check.interval", "6h", + new TimeValidator(TimeUnit.MILLISECONDS, 3000l, true, null, false), + "The check interval for session/operation timeout, which can be disabled by setting to zero or negative value."), + HIVE_SERVER2_IDLE_SESSION_TIMEOUT("hive.server2.idle.session.timeout", "7d", + new TimeValidator(TimeUnit.MILLISECONDS), + "Session will be closed when it's not accessed for this duration, which can be disabled by setting to zero or negative value."), + HIVE_SERVER2_IDLE_OPERATION_TIMEOUT("hive.server2.idle.operation.timeout", "5d", + new TimeValidator(TimeUnit.MILLISECONDS), + "Operation will be closed when it's not accessed for this duration of time, which can be disabled by setting to zero value.\n" + + " With positive value, it's checked for operations in terminal state only (FINISHED, CANCELED, CLOSED, ERROR).\n" + + " With negative value, it's checked for all of the operations regardless of state."), + HIVE_SERVER2_IDLE_SESSION_CHECK_OPERATION("hive.server2.idle.session.check.operation", true, + "Session will be considered to be idle only if there is no activity, and there is no pending operation.\n" + + " This setting takes effect only if session idle timeout (hive.server2.idle.session.timeout) and checking\n" + + "(hive.server2.session.check.interval) are enabled."), + HIVE_SERVER2_THRIFT_CLIENT_RETRY_LIMIT("hive.server2.thrift.client.retry.limit", 1,"Number of retries upon " + + "failure of Thrift HiveServer2 calls"), + HIVE_SERVER2_THRIFT_CLIENT_CONNECTION_RETRY_LIMIT("hive.server2.thrift.client.connect.retry.limit", 1,"Number of " + + "retries while opening a connection to HiveServe2"), + HIVE_SERVER2_THRIFT_CLIENT_RETRY_DELAY_SECONDS("hive.server2.thrift.client.retry.delay.seconds", "1s", + new TimeValidator(TimeUnit.SECONDS), "Number of seconds for the HiveServer2 thrift client to wait between " + + "consecutive connection attempts. Also specifies the time to wait between retrying thrift calls upon failures"), + HIVE_SERVER2_THRIFT_CLIENT_USER("hive.server2.thrift.client.user", "anonymous","Username to use against thrift" + + " client"), + HIVE_SERVER2_THRIFT_CLIENT_PASSWORD("hive.server2.thrift.client.password", "anonymous","Password to use against " + + "thrift client"), + + HIVE_SECURITY_COMMAND_WHITELIST("hive.security.command.whitelist", "set,reset,dfs,add,list,delete,reload,compile", + "Comma separated list of non-SQL Hive commands users are authorized to execute"), + + // If this is set all move tasks at the end of a multi-insert query will only begin once all + // outputs are ready + HIVE_MULTI_INSERT_MOVE_TASKS_SHARE_DEPENDENCIES( + "hive.multi.insert.move.tasks.share.dependencies", false, + "If this is set all move tasks for tables/partitions (not directories) at the end of a\n" + + "multi-insert query will only begin once the dependencies for all these move tasks have been\n" + + "met.\n" + + "Advantages: If concurrency is enabled, the locks will only be released once the query has\n" + + " finished, so with this config enabled, the time when the table/partition is\n" + + " generated will be much closer to when the lock on it is released.\n" + + "Disadvantages: If concurrency is not enabled, with this disabled, the tables/partitions which\n" + + " are produced by this query and finish earlier will be available for querying\n" + + " much earlier. Since the locks are only released once the query finishes, this\n" + + " does not apply if concurrency is enabled."), + + HIVE_INFER_BUCKET_SORT("hive.exec.infer.bucket.sort", false, + "If this is set, when writing partitions, the metadata will include the bucketing/sorting\n" + + "properties with which the data was written if any (this will not overwrite the metadata\n" + + "inherited from the table if the table is bucketed/sorted)"), + + HIVE_INFER_BUCKET_SORT_NUM_BUCKETS_POWER_TWO( + "hive.exec.infer.bucket.sort.num.buckets.power.two", false, + "If this is set, when setting the number of reducers for the map reduce task which writes the\n" + + "final output files, it will choose a number which is a power of two, unless the user specifies\n" + + "the number of reducers to use using mapred.reduce.tasks. The number of reducers\n" + + "may be set to a power of two, only to be followed by a merge task meaning preventing\n" + + "anything from being inferred.\n" + + "With hive.exec.infer.bucket.sort set to true:\n" + + "Advantages: If this is not set, the number of buckets for partitions will seem arbitrary,\n" + + " which means that the number of mappers used for optimized joins, for example, will\n" + + " be very low. With this set, since the number of buckets used for any partition is\n" + + " a power of two, the number of mappers used for optimized joins will be the least\n" + + " number of buckets used by any partition being joined.\n" + + "Disadvantages: This may mean a much larger or much smaller number of reducers being used in the\n" + + " final map reduce job, e.g. if a job was originally going to take 257 reducers,\n" + + " it will now take 512 reducers, similarly if the max number of reducers is 511,\n" + + " and a job was going to use this many, it will now use 256 reducers."), + + HIVEOPTLISTBUCKETING("hive.optimize.listbucketing", false, + "Enable list bucketing optimizer. Default value is false so that we disable it by default."), + + // Allow TCP Keep alive socket option for for HiveServer or a maximum timeout for the socket. + SERVER_READ_SOCKET_TIMEOUT("hive.server.read.socket.timeout", "10s", + new TimeValidator(TimeUnit.SECONDS), + "Timeout for the HiveServer to close the connection if no response from the client. By default, 10 seconds."), + SERVER_TCP_KEEP_ALIVE("hive.server.tcp.keepalive", true, + "Whether to enable TCP keepalive for the Hive Server. Keepalive will prevent accumulation of half-open connections."), + + HIVE_DECODE_PARTITION_NAME("hive.decode.partition.name", false, + "Whether to show the unquoted partition names in query results."), + + HIVE_EXECUTION_ENGINE("hive.execution.engine", "mr", new StringSet("mr", "tez", "spark"), + "Chooses execution engine. Options are: mr (Map reduce, default), tez, spark. While MR\n" + + "remains the default engine for historical reasons, it is itself a historical engine\n" + + "and is deprecated in Hive 2 line. It may be removed without further warning."), + + HIVE_EXECUTION_MODE("hive.execution.mode", "container", new StringSet("container", "llap"), + "Chooses whether query fragments will run in container or in llap"), + + HIVE_JAR_DIRECTORY("hive.jar.directory", null, + "This is the location hive in tez mode will look for to find a site wide \n" + + "installed hive instance."), + HIVE_USER_INSTALL_DIR("hive.user.install.directory", "/user/", + "If hive (in tez mode only) cannot find a usable hive jar in \"hive.jar.directory\", \n" + + "it will upload the hive jar to \"hive.user.install.directory/user.name\"\n" + + "and use it to run queries."), + + // Vectorization enabled + HIVE_VECTORIZATION_ENABLED("hive.vectorized.execution.enabled", false, + "This flag should be set to true to enable vectorized mode of query execution.\n" + + "The default value is false."), + HIVE_VECTORIZATION_REDUCE_ENABLED("hive.vectorized.execution.reduce.enabled", true, + "This flag should be set to true to enable vectorized mode of the reduce-side of query execution.\n" + + "The default value is true."), + HIVE_VECTORIZATION_REDUCE_GROUPBY_ENABLED("hive.vectorized.execution.reduce.groupby.enabled", true, + "This flag should be set to true to enable vectorized mode of the reduce-side GROUP BY query execution.\n" + + "The default value is true."), + HIVE_VECTORIZATION_MAPJOIN_NATIVE_ENABLED("hive.vectorized.execution.mapjoin.native.enabled", true, + "This flag should be set to true to enable native (i.e. non-pass through) vectorization\n" + + "of queries using MapJoin.\n" + + "The default value is true."), + HIVE_VECTORIZATION_MAPJOIN_NATIVE_MULTIKEY_ONLY_ENABLED("hive.vectorized.execution.mapjoin.native.multikey.only.enabled", false, + "This flag should be set to true to restrict use of native vector map join hash tables to\n" + + "the MultiKey in queries using MapJoin.\n" + + "The default value is false."), + HIVE_VECTORIZATION_MAPJOIN_NATIVE_MINMAX_ENABLED("hive.vectorized.execution.mapjoin.minmax.enabled", false, + "This flag should be set to true to enable vector map join hash tables to\n" + + "use max / max filtering for integer join queries using MapJoin.\n" + + "The default value is false."), + HIVE_VECTORIZATION_MAPJOIN_NATIVE_OVERFLOW_REPEATED_THRESHOLD("hive.vectorized.execution.mapjoin.overflow.repeated.threshold", -1, + "The number of small table rows for a match in vector map join hash tables\n" + + "where we use the repeated field optimization in overflow vectorized row batch for join queries using MapJoin.\n" + + "A value of -1 means do use the join result optimization. Otherwise, threshold value can be 0 to maximum integer."), + HIVE_VECTORIZATION_MAPJOIN_NATIVE_FAST_HASHTABLE_ENABLED("hive.vectorized.execution.mapjoin.native.fast.hashtable.enabled", false, + "This flag should be set to true to enable use of native fast vector map join hash tables in\n" + + "queries using MapJoin.\n" + + "The default value is false."), + HIVE_VECTORIZATION_GROUPBY_CHECKINTERVAL("hive.vectorized.groupby.checkinterval", 100000, + "Number of entries added to the group by aggregation hash before a recomputation of average entry size is performed."), + HIVE_VECTORIZATION_GROUPBY_MAXENTRIES("hive.vectorized.groupby.maxentries", 1000000, + "Max number of entries in the vector group by aggregation hashtables. \n" + + "Exceeding this will trigger a flush irrelevant of memory pressure condition."), + HIVE_VECTORIZATION_GROUPBY_FLUSH_PERCENT("hive.vectorized.groupby.flush.percent", (float) 0.1, + "Percent of entries in the group by aggregation hash flushed when the memory threshold is exceeded."), + HIVE_VECTORIZATION_REDUCESINK_NEW_ENABLED("hive.vectorized.execution.reducesink.new.enabled", true, + "This flag should be set to true to enable the new vectorization\n" + + "of queries using ReduceSink.\ni" + + "The default value is true."), + HIVE_TYPE_CHECK_ON_INSERT("hive.typecheck.on.insert", true, "This property has been extended to control " + + "whether to check, convert, and normalize partition value to conform to its column type in " + + "partition operations including but not limited to insert, such as alter, describe etc."), + + HIVE_HADOOP_CLASSPATH("hive.hadoop.classpath", null, + "For Windows OS, we need to pass HIVE_HADOOP_CLASSPATH Java parameter while starting HiveServer2 \n" + + "using \"-hiveconf hive.hadoop.classpath=%HIVE_LIB%\"."), + + HIVE_RPC_QUERY_PLAN("hive.rpc.query.plan", false, + "Whether to send the query plan via local resource or RPC"), + HIVE_AM_SPLIT_GENERATION("hive.compute.splits.in.am", true, + "Whether to generate the splits locally or in the AM (tez only)"), + HIVE_TEZ_GENERATE_CONSISTENT_SPLITS("hive.tez.input.generate.consistent.splits", true, + "Whether to generate consistent split locations when generating splits in the AM"), + HIVE_PREWARM_ENABLED("hive.prewarm.enabled", false, "Enables container prewarm for Tez/Spark (Hadoop 2 only)"), + HIVE_PREWARM_NUM_CONTAINERS("hive.prewarm.numcontainers", 10, "Controls the number of containers to prewarm for Tez/Spark (Hadoop 2 only)"), + + HIVESTAGEIDREARRANGE("hive.stageid.rearrange", "none", new StringSet("none", "idonly", "traverse", "execution"), ""), + HIVEEXPLAINDEPENDENCYAPPENDTASKTYPES("hive.explain.dependency.append.tasktype", false, ""), + + HIVECOUNTERGROUP("hive.counters.group.name", "HIVE", + "The name of counter group for internal Hive variables (CREATED_FILE, FATAL_ERROR, etc.)"), + + HIVE_QUOTEDID_SUPPORT("hive.support.quoted.identifiers", "column", + new StringSet("none", "column"), + "Whether to use quoted identifier. 'none' or 'column' can be used. \n" + + " none: default(past) behavior. Implies only alphaNumeric and underscore are valid characters in identifiers.\n" + + " column: implies column names can contain any character." + ), + HIVE_SUPPORT_SQL11_RESERVED_KEYWORDS("hive.support.sql11.reserved.keywords", true, + "This flag should be set to true to enable support for SQL2011 reserved keywords.\n" + + "The default value is true."), + HIVE_SUPPORT_SPECICAL_CHARACTERS_IN_TABLE_NAMES("hive.support.special.characters.tablename", true, + "This flag should be set to true to enable support for special characters in table names.\n" + + "When it is set to false, only [a-zA-Z_0-9]+ are supported.\n" + + "The only supported special character right now is '/'. This flag applies only to quoted table names.\n" + + "The default value is true."), + // role names are case-insensitive + USERS_IN_ADMIN_ROLE("hive.users.in.admin.role", "", false, + "Comma separated list of users who are in admin role for bootstrapping.\n" + + "More users can be added in ADMIN role later."), + + HIVE_COMPAT("hive.compat", HiveCompat.DEFAULT_COMPAT_LEVEL, + "Enable (configurable) deprecated behaviors by setting desired level of backward compatibility.\n" + + "Setting to 0.12:\n" + + " Maintains division behavior: int / int = double"), + HIVE_CONVERT_JOIN_BUCKET_MAPJOIN_TEZ("hive.convert.join.bucket.mapjoin.tez", false, + "Whether joins can be automatically converted to bucket map joins in hive \n" + + "when tez is used as the execution engine."), + + HIVE_CHECK_CROSS_PRODUCT("hive.exec.check.crossproducts", true, + "Check if a plan contains a Cross Product. If there is one, output a warning to the Session's console."), + HIVE_LOCALIZE_RESOURCE_WAIT_INTERVAL("hive.localize.resource.wait.interval", "5000ms", + new TimeValidator(TimeUnit.MILLISECONDS), + "Time to wait for another thread to localize the same resource for hive-tez."), + HIVE_LOCALIZE_RESOURCE_NUM_WAIT_ATTEMPTS("hive.localize.resource.num.wait.attempts", 5, + "The number of attempts waiting for localizing a resource in hive-tez."), + TEZ_AUTO_REDUCER_PARALLELISM("hive.tez.auto.reducer.parallelism", false, + "Turn on Tez' auto reducer parallelism feature. When enabled, Hive will still estimate data sizes\n" + + "and set parallelism estimates. Tez will sample source vertices' output sizes and adjust the estimates at runtime as\n" + + "necessary."), + TEZ_MAX_PARTITION_FACTOR("hive.tez.max.partition.factor", 2f, + "When auto reducer parallelism is enabled this factor will be used to over-partition data in shuffle edges."), + TEZ_MIN_PARTITION_FACTOR("hive.tez.min.partition.factor", 0.25f, + "When auto reducer parallelism is enabled this factor will be used to put a lower limit to the number\n" + + "of reducers that tez specifies."), + TEZ_OPTIMIZE_BUCKET_PRUNING( + "hive.tez.bucket.pruning", false, + "When pruning is enabled, filters on bucket columns will be processed by \n" + + "filtering the splits against a bitset of included buckets. This needs predicates \n"+ + "produced by hive.optimize.ppd and hive.optimize.index.filters."), + TEZ_OPTIMIZE_BUCKET_PRUNING_COMPAT( + "hive.tez.bucket.pruning.compat", true, + "When pruning is enabled, handle possibly broken inserts due to negative hashcodes.\n" + + "This occasionally doubles the data scan cost, but is default enabled for safety"), + TEZ_DYNAMIC_PARTITION_PRUNING( + "hive.tez.dynamic.partition.pruning", true, + "When dynamic pruning is enabled, joins on partition keys will be processed by sending\n" + + "events from the processing vertices to the Tez application master. These events will be\n" + + "used to prune unnecessary partitions."), + TEZ_DYNAMIC_PARTITION_PRUNING_MAX_EVENT_SIZE("hive.tez.dynamic.partition.pruning.max.event.size", 1*1024*1024L, + "Maximum size of events sent by processors in dynamic pruning. If this size is crossed no pruning will take place."), + + TEZ_DYNAMIC_PARTITION_PRUNING_MAX_DATA_SIZE("hive.tez.dynamic.partition.pruning.max.data.size", 100*1024*1024L, + "Maximum total data size of events in dynamic pruning."), + TEZ_SMB_NUMBER_WAVES( + "hive.tez.smb.number.waves", + (float) 0.5, + "The number of waves in which to run the SMB join. Account for cluster being occupied. Ideally should be 1 wave."), + TEZ_EXEC_SUMMARY( + "hive.tez.exec.print.summary", + false, + "Display breakdown of execution steps, for every query executed by the shell."), + TEZ_EXEC_INPLACE_PROGRESS( + "hive.tez.exec.inplace.progress", + true, + "Updates tez job execution progress in-place in the terminal."), + LLAP_IO_ENABLED("hive.llap.io.enabled", null, "Whether the LLAP IO layer is enabled."), + LLAP_IO_MEMORY_MODE("hive.llap.io.memory.mode", "cache", + new StringSet("cache", "allocator", "none"), + "LLAP IO memory usage; 'cache' (the default) uses data and metadata cache with a\n" + + "custom off-heap allocator, 'allocator' uses the custom allocator without the caches,\n" + + "'none' doesn't use either (this mode may result in significant performance degradation)"), + LLAP_ALLOCATOR_MIN_ALLOC("hive.llap.io.allocator.alloc.min", "128Kb", new SizeValidator(), + "Minimum allocation possible from LLAP buddy allocator. Allocations below that are\n" + + "padded to minimum allocation. For ORC, should generally be the same as the expected\n" + + "compression buffer size, or next lowest power of 2. Must be a power of 2."), + LLAP_ALLOCATOR_MAX_ALLOC("hive.llap.io.allocator.alloc.max", "16Mb", new SizeValidator(), + "Maximum allocation possible from LLAP buddy allocator. For ORC, should be as large as\n" + + "the largest expected ORC compression buffer size. Must be a power of 2."), + LLAP_ALLOCATOR_ARENA_COUNT("hive.llap.io.allocator.arena.count", 8, + "Arena count for LLAP low-level cache; cache will be allocated in the steps of\n" + + "(size/arena_count) bytes. This size must be <= 1Gb and >= max allocation; if it is\n" + + "not the case, an adjusted size will be used. Using powers of 2 is recommended."), + LLAP_IO_MEMORY_MAX_SIZE("hive.llap.io.memory.size", "1Gb", new SizeValidator(), + "Maximum size for IO allocator or ORC low-level cache.", "hive.llap.io.cache.orc.size"), + LLAP_ALLOCATOR_DIRECT("hive.llap.io.allocator.direct", true, + "Whether ORC low-level cache should use direct allocation."), + LLAP_USE_LRFU("hive.llap.io.use.lrfu", false, + "Whether ORC low-level cache should use LRFU cache policy instead of default (FIFO)."), + LLAP_LRFU_LAMBDA("hive.llap.io.lrfu.lambda", 0.01f, + "Lambda for ORC low-level cache LRFU cache policy. Must be in [0, 1]. 0 makes LRFU\n" + + "behave like LFU, 1 makes it behave like LRU, values in between balance accordingly."), + LLAP_CACHE_ALLOW_SYNTHETIC_FILEID("hive.llap.cache.allow.synthetic.fileid", false, + "Whether LLAP cache should use synthetic file ID if real one is not available. Systems\n" + + "like HDFS, Isilon, etc. provide a unique file/inode ID. On other FSes (e.g. local\n" + + "FS), the cache would not work by default because LLAP is unable to uniquely track the\n" + + "files; enabling this setting allows LLAP to generate file ID from the path, size and\n" + + "modification time, which is almost certain to identify file uniquely. However, if you\n" + + "use a FS without file IDs and rewrite files a lot (or are paranoid), you might want\n" + + "to avoid this setting."), + LLAP_IO_USE_FILEID_PATH("hive.llap.io.use.fileid.path", true, + "Whether LLAP should use fileId (inode)-based path to ensure better consistency for the\n" + + "cases of file overwrites. This is supported on HDFS."), + LLAP_ORC_ENABLE_TIME_COUNTERS("hive.llap.io.orc.time.counters", true, + "Whether to enable time counters for LLAP IO layer (time spent in HDFS, etc.)"), + LLAP_AUTO_ALLOW_UBER("hive.llap.auto.allow.uber", true, + "Whether or not to allow the planner to run vertices in the AM."), + LLAP_AUTO_ENFORCE_TREE("hive.llap.auto.enforce.tree", true, + "Enforce that all parents are in llap, before considering vertex"), + LLAP_AUTO_ENFORCE_VECTORIZED("hive.llap.auto.enforce.vectorized", true, + "Enforce that inputs are vectorized, before considering vertex"), + LLAP_AUTO_ENFORCE_STATS("hive.llap.auto.enforce.stats", true, + "Enforce that col stats are available, before considering vertex"), + LLAP_AUTO_MAX_INPUT("hive.llap.auto.max.input.size", 10*1024*1024*1024L, + "Check input size, before considering vertex (-1 disables check)"), + LLAP_AUTO_MAX_OUTPUT("hive.llap.auto.max.output.size", 1*1024*1024*1024L, + "Check output size, before considering vertex (-1 disables check)"), + LLAP_SKIP_COMPILE_UDF_CHECK("hive.llap.skip.compile.udf.check", false, + "Whether to skip the compile-time check for non-built-in UDFs when deciding whether to\n" + + "execute tasks in LLAP. Skipping the check allows executing UDFs from pre-localized\n" + + "jars in LLAP; if the jars are not pre-localized, the UDFs will simply fail to load."), + LLAP_ALLOW_PERMANENT_FNS("hive.llap.allow.permanent.fns", true, + "Whether LLAP decider should allow permanent UDFs."), + LLAP_EXECUTION_MODE("hive.llap.execution.mode", "none", + new StringSet("auto", "none", "all", "map"), + "Chooses whether query fragments will run in container or in llap"), + LLAP_OBJECT_CACHE_ENABLED("hive.llap.object.cache.enabled", true, + "Cache objects (plans, hashtables, etc) in llap"), + LLAP_QUEUE_METRICS_PERCENTILE_INTERVALS("hive.llap.queue.metrics.percentiles.intervals", "", + "Comma-delimited set of integers denoting the desired rollover intervals (in seconds)\n" + + "for percentile latency metrics on the LLAP daemon producer-consumer queue.\n" + + "By default, percentile latency metrics are disabled."), + LLAP_IO_THREADPOOL_SIZE("hive.llap.io.threadpool.size", 10, + "Specify the number of threads to use for low-level IO thread pool."), + LLAP_KERBEROS_PRINCIPAL(HIVE_LLAP_DAEMON_SERVICE_PRINCIPAL_NAME, "", + "The name of the LLAP daemon's service principal."), + LLAP_KERBEROS_KEYTAB_FILE("hive.llap.daemon.keytab.file", "", + "The path to the Kerberos Keytab file containing the LLAP daemon's service principal."), + LLAP_ZKSM_KERBEROS_PRINCIPAL("hive.llap.zk.sm.principal", "", + "The name of the principal to use to talk to ZooKeeper for ZooKeeper SecretManager."), + LLAP_ZKSM_KERBEROS_KEYTAB_FILE("hive.llap.zk.sm.keytab.file", "", + "The path to the Kerberos Keytab file containing the principal to use to talk to\n" + + "ZooKeeper for ZooKeeper SecretManager."), + LLAP_ZKSM_ZK_CONNECTION_STRING("hive.llap.zk.sm.connectionString", "", + "ZooKeeper connection string for ZooKeeper SecretManager."), + // Note: do not rename to ..service.acl; Hadoop generates .hosts setting name from this, + // resulting in a collision with existing hive.llap.daemon.service.hosts and bizarre errors. + LLAP_SECURITY_ACL("hive.llap.daemon.acl", "*", "The ACL for LLAP daemon."), + LLAP_MANAGEMENT_ACL("hive.llap.management.acl", "*", "The ACL for LLAP daemon management."), + // Hadoop DelegationTokenManager default is 1 week. + LLAP_DELEGATION_TOKEN_LIFETIME("hive.llap.daemon.delegation.token.lifetime", "14d", + new TimeValidator(TimeUnit.SECONDS), + "LLAP delegation token lifetime, in seconds if specified without a unit."), + LLAP_MANAGEMENT_RPC_PORT("hive.llap.management.rpc.port", 15004, + "RPC port for LLAP daemon management service."), + LLAP_WEB_AUTO_AUTH("hive.llap.auto.auth", true, + "Whether or not to set Hadoop configs to enable auth in LLAP web app."), + + LLAP_DAEMON_RPC_NUM_HANDLERS("hive.llap.daemon.rpc.num.handlers", 5, + "Number of RPC handlers for LLAP daemon.", "llap.daemon.rpc.num.handlers"), + LLAP_DAEMON_WORK_DIRS("hive.llap.daemon.work.dirs", "", + "Working directories for the daemon. Needs to be set for a secure cluster, since LLAP may\n" + + "not have access to the default YARN working directories. yarn.nodemanager.local-dirs is\n" + + "used if this is not set", "llap.daemon.work.dirs"), + LLAP_DAEMON_YARN_SHUFFLE_PORT("hive.llap.daemon.yarn.shuffle.port", 15551, + "YARN shuffle port for LLAP-daemon-hosted shuffle.", "llap.daemon.yarn.shuffle.port"), + LLAP_DAEMON_YARN_CONTAINER_MB("hive.llap.daemon.yarn.container.mb", -1, + "llap server yarn container size in MB. Used in LlapServiceDriver and package.py", "llap.daemon.yarn.container.mb"), + LLAP_DAEMON_QUEUE_NAME("hive.llap.daemon.queue.name", null, + "Queue name within which the llap slider application will run." + + " Used in LlapServiceDriver and package.py"), + LLAP_DAEMON_SHUFFLE_DIR_WATCHER_ENABLED("hive.llap.daemon.shuffle.dir.watcher.enabled", false, + "TODO doc", "llap.daemon.shuffle.dir-watcher.enabled"), + LLAP_DAEMON_AM_LIVENESS_HEARTBEAT_INTERVAL_MS( + "hive.llap.daemon.am.liveness.heartbeat.interval.ms", "10000ms", + new TimeValidator(TimeUnit.MILLISECONDS), + "Tez AM-LLAP heartbeat interval (milliseconds). This needs to be below the task timeout\n" + + "interval, but otherwise as high as possible to avoid unnecessary traffic.", + "llap.daemon.am.liveness.heartbeat.interval-ms"), + LLAP_DAEMON_AM_LIVENESS_CONNECTION_TIMEOUT_MS( + "hive.llap.am.liveness.connection.timeout.ms", "10000ms", + new TimeValidator(TimeUnit.MILLISECONDS), + "Amount of time to wait on connection failures to the AM from an LLAP daemon before\n" + + "considering the AM to be dead.", "llap.am.liveness.connection.timeout-millis"), + // Not used yet - since the Writable RPC engine does not support this policy. + LLAP_DAEMON_AM_LIVENESS_CONNECTION_SLEEP_BETWEEN_RETRIES_MS( + "hive.llap.am.liveness.connection.sleep.between.retries.ms", "2000ms", + new TimeValidator(TimeUnit.MILLISECONDS), + "Sleep duration while waiting to retry connection failures to the AM from the daemon for\n" + + "the general keep-alive thread (milliseconds).", + "llap.am.liveness.connection.sleep-between-retries-millis"), + LLAP_DAEMON_NUM_EXECUTORS("hive.llap.daemon.num.executors", 4, + "Number of executors to use in LLAP daemon; essentially, the number of tasks that can be\n" + + "executed in parallel.", "llap.daemon.num.executors"), + LLAP_DAEMON_RPC_PORT("hive.llap.daemon.rpc.port", 15001, "The LLAP daemon RPC port.", + "llap.daemon.rpc.port"), + LLAP_DAEMON_MEMORY_PER_INSTANCE_MB("hive.llap.daemon.memory.per.instance.mb", 4096, + "The total amount of memory to use for the executors inside LLAP (in megabytes).", + "llap.daemon.memory.per.instance.mb"), + LLAP_DAEMON_VCPUS_PER_INSTANCE("hive.llap.daemon.vcpus.per.instance", 4, + "The total number of vcpus to use for the executors inside LLAP.", + "llap.daemon.vcpus.per.instance"), + LLAP_DAEMON_NUM_FILE_CLEANER_THREADS("hive.llap.daemon.num.file.cleaner.threads", 1, + "Number of file cleaner threads in LLAP.", "llap.daemon.num.file.cleaner.threads"), + LLAP_FILE_CLEANUP_DELAY_SECONDS("hive.llap.file.cleanup.delay.seconds", "300s", + new TimeValidator(TimeUnit.SECONDS), + "How long to delay before cleaning up query files in LLAP (in seconds, for debugging).", + "llap.file.cleanup.delay-seconds"), + LLAP_DAEMON_SERVICE_HOSTS("hive.llap.daemon.service.hosts", null, + "Explicitly specified hosts to use for LLAP scheduling. Useful for testing. By default,\n" + + "YARN registry is used.", "llap.daemon.service.hosts"), + LLAP_DAEMON_SERVICE_REFRESH_INTERVAL("hive.llap.daemon.service.refresh.interval.sec", "60s", + new TimeValidator(TimeUnit.SECONDS), + "LLAP YARN registry service list refresh delay, in seconds.", + "llap.daemon.service.refresh.interval"), + LLAP_DAEMON_COMMUNICATOR_NUM_THREADS("hive.llap.daemon.communicator.num.threads", 10, + "Number of threads to use in LLAP task communicator in Tez AM.", + "llap.daemon.communicator.num.threads"), + LLAP_DAEMON_DOWNLOAD_PERMANENT_FNS("hive.llap.daemon.download.permanent.fns", false, + "Whether LLAP daemon should localize the resources for permanent UDFs."), + LLAP_TASK_SCHEDULER_NODE_REENABLE_MIN_TIMEOUT_MS( + "hive.llap.task.scheduler.node.reenable.min.timeout.ms", "200ms", + new TimeValidator(TimeUnit.MILLISECONDS), + "Minimum time after which a previously disabled node will be re-enabled for scheduling,\n" + + "in milliseconds. This may be modified by an exponential back-off if failures persist.", + "llap.task.scheduler.node.re-enable.min.timeout.ms"), + LLAP_TASK_SCHEDULER_NODE_REENABLE_MAX_TIMEOUT_MS( + "hive.llap.task.scheduler.node.reenable.max.timeout.ms", "10000ms", + new TimeValidator(TimeUnit.MILLISECONDS), + "Maximum time after which a previously disabled node will be re-enabled for scheduling,\n" + + "in milliseconds. This may be modified by an exponential back-off if failures persist.", + "llap.task.scheduler.node.re-enable.max.timeout.ms"), + LLAP_TASK_SCHEDULER_NODE_DISABLE_BACK_OFF_FACTOR( + "hive.llap.task.scheduler.node.disable.backoff.factor", 1.5f, + "Backoff factor on successive blacklists of a node due to some failures. Blacklist times\n" + + "start at the min timeout and go up to the max timeout based on this backoff factor.", + "llap.task.scheduler.node.disable.backoff.factor"), + LLAP_TASK_SCHEDULER_NUM_SCHEDULABLE_TASKS_PER_NODE( + "hive.llap.task.scheduler.num.schedulable.tasks.per.node", 0, + "The number of tasks the AM TaskScheduler will try allocating per node. 0 indicates that\n" + + "this should be picked up from the Registry. -1 indicates unlimited capacity; positive\n" + + "values indicate a specific bound.", "llap.task.scheduler.num.schedulable.tasks.per.node"), + LLAP_TASK_SCHEDULER_LOCALITY_DELAY( + "hive.llap.task.scheduler.locality.delay", "0ms", + new TimeValidator(TimeUnit.MILLISECONDS, -1l, true, Long.MAX_VALUE, true), + "Amount of time to wait before allocating a request which contains location information," + + " to a location other than the ones requested. Set to -1 for an infinite delay, 0" + + "for a no delay. Currently these are the only two supported values" + ), + LLAP_DAEMON_TASK_SCHEDULER_WAIT_QUEUE_SIZE("hive.llap.daemon.task.scheduler.wait.queue.size", + 10, "LLAP scheduler maximum queue size.", "llap.daemon.task.scheduler.wait.queue.size"), + LLAP_DAEMON_WAIT_QUEUE_COMPARATOR_CLASS_NAME( + "hive.llap.daemon.wait.queue.comparator.class.name", + "org.apache.hadoop.hive.llap.daemon.impl.comparator.ShortestJobFirstComparator", + "The priority comparator to use for LLAP scheduler prioroty queue. The built-in options\n" + + "are org.apache.hadoop.hive.llap.daemon.impl.comparator.ShortestJobFirstComparator and\n" + + ".....FirstInFirstOutComparator", "llap.daemon.wait.queue.comparator.class.name"), + LLAP_DAEMON_TASK_SCHEDULER_ENABLE_PREEMPTION( + "hive.llap.daemon.task.scheduler.enable.preemption", true, + "Whether non-finishable running tasks (e.g. a reducer waiting for inputs) should be\n" + + "preempted by finishable tasks inside LLAP scheduler.", + "llap.daemon.task.scheduler.enable.preemption"), + LLAP_TASK_COMMUNICATOR_CONNECTION_TIMEOUT_MS( + "hive.llap.task.communicator.connection.timeout.ms", "16000ms", + new TimeValidator(TimeUnit.MILLISECONDS), + "Connection timeout (in milliseconds) before a failure to an LLAP daemon from Tez AM.", + "llap.task.communicator.connection.timeout-millis"), + LLAP_TASK_COMMUNICATOR_CONNECTION_SLEEP_BETWEEN_RETRIES_MS( + "hive.llap.task.communicator.connection.sleep.between.retries.ms", "2000ms", + new TimeValidator(TimeUnit.MILLISECONDS), + "Sleep duration (in milliseconds) to wait before retrying on error when obtaining a\n" + + "connection to LLAP daemon from Tez AM.", + "llap.task.communicator.connection.sleep-between-retries-millis"), + LLAP_DAEMON_WEB_PORT("hive.llap.daemon.web.port", 15002, "LLAP daemon web UI port.", + "llap.daemon.service.port"), + LLAP_DAEMON_WEB_SSL("hive.llap.daemon.web.ssl", false, + "Whether LLAP daemon web UI should use SSL.", "llap.daemon.service.ssl"), + LLAP_CLIENT_CONSISTENT_SPLITS("hive.llap.client.consistent.splits", + false, + "Whether to setup split locations to match nodes on which llap daemons are running," + + " instead of using the locations provided by the split itself"), + + SPARK_CLIENT_FUTURE_TIMEOUT("hive.spark.client.future.timeout", + "60s", new TimeValidator(TimeUnit.SECONDS), + "Timeout for requests from Hive client to remote Spark driver."), + SPARK_JOB_MONITOR_TIMEOUT("hive.spark.job.monitor.timeout", + "60s", new TimeValidator(TimeUnit.SECONDS), + "Timeout for job monitor to get Spark job state."), + SPARK_RPC_CLIENT_CONNECT_TIMEOUT("hive.spark.client.connect.timeout", + "1000ms", new TimeValidator(TimeUnit.MILLISECONDS), + "Timeout for remote Spark driver in connecting back to Hive client."), + SPARK_RPC_CLIENT_HANDSHAKE_TIMEOUT("hive.spark.client.server.connect.timeout", + "90000ms", new TimeValidator(TimeUnit.MILLISECONDS), + "Timeout for handshake between Hive client and remote Spark driver. Checked by both processes."), + SPARK_RPC_SECRET_RANDOM_BITS("hive.spark.client.secret.bits", "256", + "Number of bits of randomness in the generated secret for communication between Hive client and remote Spark driver. " + + "Rounded down to the nearest multiple of 8."), + SPARK_RPC_MAX_THREADS("hive.spark.client.rpc.threads", 8, + "Maximum number of threads for remote Spark driver's RPC event loop."), + SPARK_RPC_MAX_MESSAGE_SIZE("hive.spark.client.rpc.max.size", 50 * 1024 * 1024, + "Maximum message size in bytes for communication between Hive client and remote Spark driver. Default is 50MB."), + SPARK_RPC_CHANNEL_LOG_LEVEL("hive.spark.client.channel.log.level", null, + "Channel logging level for remote Spark driver. One of {DEBUG, ERROR, INFO, TRACE, WARN}."), + SPARK_RPC_SASL_MECHANISM("hive.spark.client.rpc.sasl.mechanisms", "DIGEST-MD5", + "Name of the SASL mechanism to use for authentication."), + SPARK_RPC_SERVER_ADDRESS("hive.spark.client.rpc.server.address", "", + "The server address of HiverServer2 host to be used for communication between Hive client and remote Spark driver. " + + "Default is empty, which means the address will be determined in the same way as for hive.server2.thrift.bind.host." + + "This is only necessary if the host has mutiple network addresses and if a different network address other than " + + "hive.server2.thrift.bind.host is to be used."), + SPARK_DYNAMIC_PARTITION_PRUNING( + "hive.spark.dynamic.partition.pruning", false, + "When dynamic pruning is enabled, joins on partition keys will be processed by writing\n" + + "to a temporary HDFS file, and read later for removing unnecessary partitions."), + SPARK_DYNAMIC_PARTITION_PRUNING_MAX_DATA_SIZE( + "hive.spark.dynamic.partition.pruning.max.data.size", 100*1024*1024L, + "Maximum total data size in dynamic pruning."), + NWAYJOINREORDER("hive.reorder.nway.joins", true, + "Runs reordering of tables within single n-way join (i.e.: picks streamtable)"), + HIVE_LOG_N_RECORDS("hive.log.every.n.records", 0L, new RangeValidator(0L, null), + "If value is greater than 0 logs in fixed intervals of size n rather than exponentially."), + HIVE_MSCK_PATH_VALIDATION("hive.msck.path.validation", "throw", + new StringSet("throw", "skip", "ignore"), "The approach msck should take with HDFS " + + "directories that are partition-like but contain unsupported characters. 'throw' (an " + + "exception) is the default; 'skip' will skip the invalid directories and still repair the" + + " others; 'ignore' will skip the validation (legacy behavior, causes bugs in many cases)"), + HIVE_SERVER2_LLAP_CONCURRENT_QUERIES("hive.server2.llap.concurrent.queries", -1, + "The number of queries allowed in parallel via llap. Negative number implies 'infinite'."), + HIVE_TEZ_ENABLE_MEMORY_MANAGER("hive.tez.enable.memory.manager", true, + "Enable memory manager for tez"), + HIVE_HASH_TABLE_INFLATION_FACTOR("hive.hash.table.inflation.factor", (float) 2.0, + "Expected inflation factor between disk/in memory representation of hash tables"), + HIVE_LOG_TRACE_ID("hive.log.trace.id", "", + "Log tracing id that can be used by upstream clients for tracking respective logs. " + + "Truncated to " + LOG_PREFIX_LENGTH + " characters. Defaults to use auto-generated session id."), + + + HIVE_CONF_RESTRICTED_LIST("hive.conf.restricted.list", + "hive.security.authenticator.manager,hive.security.authorization.manager,hive.users.in.admin.role", + "Comma separated list of configuration options which are immutable at runtime"), + HIVE_CONF_HIDDEN_LIST("hive.conf.hidden.list", + METASTOREPWD.varname + "," + HIVE_SERVER2_SSL_KEYSTORE_PASSWORD.varname, + "Comma separated list of configuration options which should not be read by normal user like passwords"), + HIVE_CONF_INTERNAL_VARIABLE_LIST("hive.conf.internal.variable.list", + "hive.added.files.path,hive.added.jars.path,hive.added.archives.path", + "Comma separated list of variables which are used internally and should not be configurable."); + + + public final String varname; + private final String altName; + private final String defaultExpr; + + public final String defaultStrVal; + public final int defaultIntVal; + public final long defaultLongVal; + public final float defaultFloatVal; + public final boolean defaultBoolVal; + + private final Class valClass; + private final VarType valType; + + private final Validator validator; + + private final String description; + + private final boolean excluded; + private final boolean caseSensitive; + + ConfVars(String varname, Object defaultVal, String description) { + this(varname, defaultVal, null, description, true, false, null); + } + + ConfVars(String varname, Object defaultVal, String description, String altName) { + this(varname, defaultVal, null, description, true, false, altName); + } + + ConfVars(String varname, Object defaultVal, Validator validator, String description, + String altName) { + this(varname, defaultVal, validator, description, true, false, altName); + } + + ConfVars(String varname, Object defaultVal, String description, boolean excluded) { + this(varname, defaultVal, null, description, true, excluded, null); + } + + ConfVars(String varname, String defaultVal, boolean caseSensitive, String description) { + this(varname, defaultVal, null, description, caseSensitive, false, null); + } + + ConfVars(String varname, Object defaultVal, Validator validator, String description) { + this(varname, defaultVal, validator, description, true, false, null); + } + + ConfVars(String varname, Object defaultVal, Validator validator, String description, + boolean caseSensitive, boolean excluded, String altName) { + this.varname = varname; + this.validator = validator; + this.description = description; + this.defaultExpr = defaultVal == null ? null : String.valueOf(defaultVal); + this.excluded = excluded; + this.caseSensitive = caseSensitive; + this.altName = altName; + if (defaultVal == null || defaultVal instanceof String) { + this.valClass = String.class; + this.valType = VarType.STRING; + this.defaultStrVal = SystemVariables.substitute((String)defaultVal); + this.defaultIntVal = -1; + this.defaultLongVal = -1; + this.defaultFloatVal = -1; + this.defaultBoolVal = false; + } else if (defaultVal instanceof Integer) { + this.valClass = Integer.class; + this.valType = VarType.INT; + this.defaultStrVal = null; + this.defaultIntVal = (Integer)defaultVal; + this.defaultLongVal = -1; + this.defaultFloatVal = -1; + this.defaultBoolVal = false; + } else if (defaultVal instanceof Long) { + this.valClass = Long.class; + this.valType = VarType.LONG; + this.defaultStrVal = null; + this.defaultIntVal = -1; + this.defaultLongVal = (Long)defaultVal; + this.defaultFloatVal = -1; + this.defaultBoolVal = false; + } else if (defaultVal instanceof Float) { + this.valClass = Float.class; + this.valType = VarType.FLOAT; + this.defaultStrVal = null; + this.defaultIntVal = -1; + this.defaultLongVal = -1; + this.defaultFloatVal = (Float)defaultVal; + this.defaultBoolVal = false; + } else if (defaultVal instanceof Boolean) { + this.valClass = Boolean.class; + this.valType = VarType.BOOLEAN; + this.defaultStrVal = null; + this.defaultIntVal = -1; + this.defaultLongVal = -1; + this.defaultFloatVal = -1; + this.defaultBoolVal = (Boolean)defaultVal; + } else { + throw new IllegalArgumentException("Not supported type value " + defaultVal.getClass() + + " for name " + varname); + } + } + + public boolean isType(String value) { + return valType.isType(value); + } + + public Validator getValidator() { + return validator; + } + + public String validate(String value) { + return validator == null ? null : validator.validate(value); + } + + public String validatorDescription() { + return validator == null ? null : validator.toDescription(); + } + + public String typeString() { + String type = valType.typeString(); + if (valType == VarType.STRING && validator != null) { + if (validator instanceof TimeValidator) { + type += "(TIME)"; + } + } + return type; + } + + public String getRawDescription() { + return description; + } + + public String getDescription() { + String validator = validatorDescription(); + if (validator != null) { + return validator + ".\n" + description; + } + return description; + } + + public boolean isExcluded() { + return excluded; + } + + public boolean isCaseSensitive() { + return caseSensitive; + } + + @Override + public String toString() { + return varname; + } + + private static String findHadoopBinary() { + String val = findHadoopHome(); + // if can't find hadoop home we can at least try /usr/bin/hadoop + val = (val == null ? File.separator + "usr" : val) + + File.separator + "bin" + File.separator + "hadoop"; + // Launch hadoop command file on windows. + return val + (Shell.WINDOWS ? ".cmd" : ""); + } + + private static String findYarnBinary() { + String val = findHadoopHome(); + val = (val == null ? "yarn" : val + File.separator + "bin" + File.separator + "yarn"); + return val + (Shell.WINDOWS ? ".cmd" : ""); + } + + private static String findHadoopHome() { + String val = System.getenv("HADOOP_HOME"); + // In Hadoop 1.X and Hadoop 2.X HADOOP_HOME is gone and replaced with HADOOP_PREFIX + if (val == null) { + val = System.getenv("HADOOP_PREFIX"); + } + return val; + } + + public String getDefaultValue() { + return valType.defaultValueString(this); + } + + public String getDefaultExpr() { + return defaultExpr; + } + + private Set getValidStringValues() { + if (validator == null || !(validator instanceof StringSet)) { + throw new RuntimeException(varname + " does not specify a list of valid values"); + } + return ((StringSet)validator).getExpected(); + } + + enum VarType { + STRING { + @Override + void checkType(String value) throws Exception { } + @Override + String defaultValueString(ConfVars confVar) { return confVar.defaultStrVal; } + }, + INT { + @Override + void checkType(String value) throws Exception { Integer.valueOf(value); } + }, + LONG { + @Override + void checkType(String value) throws Exception { Long.valueOf(value); } + }, + FLOAT { + @Override + void checkType(String value) throws Exception { Float.valueOf(value); } + }, + BOOLEAN { + @Override + void checkType(String value) throws Exception { Boolean.valueOf(value); } + }; + + boolean isType(String value) { + try { checkType(value); } catch (Exception e) { return false; } + return true; + } + String typeString() { return name().toUpperCase();} + String defaultValueString(ConfVars confVar) { return confVar.defaultExpr; } + abstract void checkType(String value) throws Exception; + } + } + + /** + * Writes the default ConfVars out to a byte array and returns an input + * stream wrapping that byte array. + * + * We need this in order to initialize the ConfVar properties + * in the underling Configuration object using the addResource(InputStream) + * method. + * + * It is important to use a LoopingByteArrayInputStream because it turns out + * addResource(InputStream) is broken since Configuration tries to read the + * entire contents of the same InputStream repeatedly without resetting it. + * LoopingByteArrayInputStream has special logic to handle this. + */ + private static synchronized InputStream getConfVarInputStream() { + if (confVarByteArray == null) { + try { + // Create a Hadoop configuration without inheriting default settings. + Configuration conf = new Configuration(false); + + applyDefaultNonNullConfVars(conf); + + ByteArrayOutputStream confVarBaos = new ByteArrayOutputStream(); + conf.writeXml(confVarBaos); + confVarByteArray = confVarBaos.toByteArray(); + } catch (Exception e) { + // We're pretty screwed if we can't load the default conf vars + throw new RuntimeException("Failed to initialize default Hive configuration variables!", e); + } + } + return new LoopingByteArrayInputStream(confVarByteArray); + } + + public void verifyAndSet(String name, String value) throws IllegalArgumentException { + if (modWhiteListPattern != null) { + Matcher wlMatcher = modWhiteListPattern.matcher(name); + if (!wlMatcher.matches()) { + throw new IllegalArgumentException("Cannot modify " + name + " at runtime. " + + "It is not in list of params that are allowed to be modified at runtime"); + } + } + if (restrictList.contains(name)) { + throw new IllegalArgumentException("Cannot modify " + name + " at runtime. It is in the list" + + " of parameters that can't be modified at runtime"); + } + String oldValue = name != null ? get(name) : null; + if (name == null || value == null || !value.equals(oldValue)) { + // When either name or value is null, the set method below will fail, + // and throw IllegalArgumentException + set(name, value); + if (isSparkRelatedConfig(name)) { + isSparkConfigUpdated = true; + } + } + } + + public boolean isHiddenConfig(String name) { + return hiddenSet.contains(name); + } + + /** + * check whether spark related property is updated, which includes spark configurations, + * RSC configurations and yarn configuration in Spark on YARN mode. + * @param name + * @return + */ + private boolean isSparkRelatedConfig(String name) { + boolean result = false; + if (name.startsWith("spark")) { // Spark property. + // for now we don't support changing spark app name on the fly + result = !name.equals("spark.app.name"); + } else if (name.startsWith("yarn")) { // YARN property in Spark on YARN mode. + String sparkMaster = get("spark.master"); + if (sparkMaster != null && + (sparkMaster.equals("yarn-client") || sparkMaster.equals("yarn-cluster"))) { + result = true; + } + } else if (name.startsWith("hive.spark")) { // Remote Spark Context property. + result = true; + } + + return result; + } + + public static int getIntVar(Configuration conf, ConfVars var) { + assert (var.valClass == Integer.class) : var.varname; + if (var.altName != null) { + return conf.getInt(var.varname, conf.getInt(var.altName, var.defaultIntVal)); + } + return conf.getInt(var.varname, var.defaultIntVal); + } + + public static void setIntVar(Configuration conf, ConfVars var, int val) { + assert (var.valClass == Integer.class) : var.varname; + conf.setInt(var.varname, val); + } + + public int getIntVar(ConfVars var) { + return getIntVar(this, var); + } + + public void setIntVar(ConfVars var, int val) { + setIntVar(this, var, val); + } + + public static long getTimeVar(Configuration conf, ConfVars var, TimeUnit outUnit) { + return toTime(getVar(conf, var), getDefaultTimeUnit(var), outUnit); + } + + public static void setTimeVar(Configuration conf, ConfVars var, long time, TimeUnit timeunit) { + assert (var.valClass == String.class) : var.varname; + conf.set(var.varname, time + stringFor(timeunit)); + } + + public long getTimeVar(ConfVars var, TimeUnit outUnit) { + return getTimeVar(this, var, outUnit); + } + + public void setTimeVar(ConfVars var, long time, TimeUnit outUnit) { + setTimeVar(this, var, time, outUnit); + } + + public static long getSizeVar(Configuration conf, ConfVars var) { + return toSizeBytes(getVar(conf, var)); + } + + public long getSizeVar(ConfVars var) { + return getSizeVar(this, var); + } + + private static TimeUnit getDefaultTimeUnit(ConfVars var) { + TimeUnit inputUnit = null; + if (var.validator instanceof TimeValidator) { + inputUnit = ((TimeValidator)var.validator).getTimeUnit(); + } + return inputUnit; + } + + public static long toTime(String value, TimeUnit inputUnit, TimeUnit outUnit) { + String[] parsed = parseNumberFollowedByUnit(value.trim()); + return outUnit.convert(Long.valueOf(parsed[0].trim().trim()), unitFor(parsed[1].trim(), inputUnit)); + } + + public static long toSizeBytes(String value) { + String[] parsed = parseNumberFollowedByUnit(value.trim()); + return Long.valueOf(parsed[0].trim()) * multiplierFor(parsed[1].trim()); + } + + private static String[] parseNumberFollowedByUnit(String value) { + char[] chars = value.toCharArray(); + int i = 0; + for (; i < chars.length && (chars[i] == '-' || Character.isDigit(chars[i])); i++) { + } + return new String[] {value.substring(0, i), value.substring(i)}; + } + + public static TimeUnit unitFor(String unit, TimeUnit defaultUnit) { + unit = unit.trim().toLowerCase(); + if (unit.isEmpty() || unit.equals("l")) { + if (defaultUnit == null) { + throw new IllegalArgumentException("Time unit is not specified"); + } + return defaultUnit; + } else if (unit.equals("d") || unit.startsWith("day")) { + return TimeUnit.DAYS; + } else if (unit.equals("h") || unit.startsWith("hour")) { + return TimeUnit.HOURS; + } else if (unit.equals("m") || unit.startsWith("min")) { + return TimeUnit.MINUTES; + } else if (unit.equals("s") || unit.startsWith("sec")) { + return TimeUnit.SECONDS; + } else if (unit.equals("ms") || unit.startsWith("msec")) { + return TimeUnit.MILLISECONDS; + } else if (unit.equals("us") || unit.startsWith("usec")) { + return TimeUnit.MICROSECONDS; + } else if (unit.equals("ns") || unit.startsWith("nsec")) { + return TimeUnit.NANOSECONDS; + } + throw new IllegalArgumentException("Invalid time unit " + unit); + } + + + public static long multiplierFor(String unit) { + unit = unit.trim().toLowerCase(); + if (unit.isEmpty() || unit.equals("b") || unit.equals("bytes")) { + return 1; + } else if (unit.equals("kb")) { + return 1024; + } else if (unit.equals("mb")) { + return 1024*1024; + } else if (unit.equals("gb")) { + return 1024*1024*1024; + } else if (unit.equals("tb")) { + return 1024*1024*1024*1024; + } else if (unit.equals("pb")) { + return 1024*1024*1024*1024*1024; + } + throw new IllegalArgumentException("Invalid size unit " + unit); + } + + public static String stringFor(TimeUnit timeunit) { + switch (timeunit) { + case DAYS: return "day"; + case HOURS: return "hour"; + case MINUTES: return "min"; + case SECONDS: return "sec"; + case MILLISECONDS: return "msec"; + case MICROSECONDS: return "usec"; + case NANOSECONDS: return "nsec"; + } + throw new IllegalArgumentException("Invalid timeunit " + timeunit); + } + + public static long getLongVar(Configuration conf, ConfVars var) { + assert (var.valClass == Long.class) : var.varname; + if (var.altName != null) { + return conf.getLong(var.varname, conf.getLong(var.altName, var.defaultLongVal)); + } + return conf.getLong(var.varname, var.defaultLongVal); + } + + public static long getLongVar(Configuration conf, ConfVars var, long defaultVal) { + if (var.altName != null) { + return conf.getLong(var.varname, conf.getLong(var.altName, defaultVal)); + } + return conf.getLong(var.varname, defaultVal); + } + + public static void setLongVar(Configuration conf, ConfVars var, long val) { + assert (var.valClass == Long.class) : var.varname; + conf.setLong(var.varname, val); + } + + public long getLongVar(ConfVars var) { + return getLongVar(this, var); + } + + public void setLongVar(ConfVars var, long val) { + setLongVar(this, var, val); + } + + public static float getFloatVar(Configuration conf, ConfVars var) { + assert (var.valClass == Float.class) : var.varname; + if (var.altName != null) { + return conf.getFloat(var.varname, conf.getFloat(var.altName, var.defaultFloatVal)); + } + return conf.getFloat(var.varname, var.defaultFloatVal); + } + + public static float getFloatVar(Configuration conf, ConfVars var, float defaultVal) { + if (var.altName != null) { + return conf.getFloat(var.varname, conf.getFloat(var.altName, defaultVal)); + } + return conf.getFloat(var.varname, defaultVal); + } + + public static void setFloatVar(Configuration conf, ConfVars var, float val) { + assert (var.valClass == Float.class) : var.varname; + conf.setFloat(var.varname, val); + } + + public float getFloatVar(ConfVars var) { + return getFloatVar(this, var); + } + + public void setFloatVar(ConfVars var, float val) { + setFloatVar(this, var, val); + } + + public static boolean getBoolVar(Configuration conf, ConfVars var) { + assert (var.valClass == Boolean.class) : var.varname; + if (var.altName != null) { + return conf.getBoolean(var.varname, conf.getBoolean(var.altName, var.defaultBoolVal)); + } + return conf.getBoolean(var.varname, var.defaultBoolVal); + } + + public static boolean getBoolVar(Configuration conf, ConfVars var, boolean defaultVal) { + if (var.altName != null) { + return conf.getBoolean(var.varname, conf.getBoolean(var.altName, defaultVal)); + } + return conf.getBoolean(var.varname, defaultVal); + } + + public static void setBoolVar(Configuration conf, ConfVars var, boolean val) { + assert (var.valClass == Boolean.class) : var.varname; + conf.setBoolean(var.varname, val); + } + + public boolean getBoolVar(ConfVars var) { + return getBoolVar(this, var); + } + + public void setBoolVar(ConfVars var, boolean val) { + setBoolVar(this, var, val); + } + + public static String getVar(Configuration conf, ConfVars var) { + assert (var.valClass == String.class) : var.varname; + return var.altName != null ? conf.get(var.varname, conf.get(var.altName, var.defaultStrVal)) + : conf.get(var.varname, var.defaultStrVal); + } + + public static String getTrimmedVar(Configuration conf, ConfVars var) { + assert (var.valClass == String.class) : var.varname; + if (var.altName != null) { + return conf.getTrimmed(var.varname, conf.getTrimmed(var.altName, var.defaultStrVal)); + } + return conf.getTrimmed(var.varname, var.defaultStrVal); + } + + public static String[] getTrimmedStringsVar(Configuration conf, ConfVars var) { + assert (var.valClass == String.class) : var.varname; + String[] result = conf.getTrimmedStrings(var.varname, (String[])null); + if (result != null) return result; + if (var.altName != null) { + result = conf.getTrimmedStrings(var.altName, (String[])null); + if (result != null) return result; + } + return org.apache.hadoop.util.StringUtils.getTrimmedStrings(var.defaultStrVal); + } + + public static String getVar(Configuration conf, ConfVars var, String defaultVal) { + String ret = var.altName != null ? conf.get(var.varname, conf.get(var.altName, defaultVal)) + : conf.get(var.varname, defaultVal); + return ret; + } + + public static String getVar(Configuration conf, ConfVars var, EncoderDecoder encoderDecoder) { + return encoderDecoder.decode(getVar(conf, var)); + } + + public String getLogIdVar(String defaultValue) { + String retval = getVar(ConfVars.HIVE_LOG_TRACE_ID); + if (retval.equals("")) { + l4j.info("Using the default value passed in for log id: " + defaultValue); + retval = defaultValue; + } + if (retval.length() > LOG_PREFIX_LENGTH) { + l4j.warn("The original log id prefix is " + retval + " has been truncated to " + + retval.substring(0, LOG_PREFIX_LENGTH - 1)); + retval = retval.substring(0, LOG_PREFIX_LENGTH - 1); + } + return retval; + } + + public static void setVar(Configuration conf, ConfVars var, String val) { + assert (var.valClass == String.class) : var.varname; + conf.set(var.varname, val); + } + public static void setVar(Configuration conf, ConfVars var, String val, + EncoderDecoder encoderDecoder) { + setVar(conf, var, encoderDecoder.encode(val)); + } + + public static ConfVars getConfVars(String name) { + return vars.get(name); + } + + public static ConfVars getMetaConf(String name) { + return metaConfs.get(name); + } + + public String getVar(ConfVars var) { + return getVar(this, var); + } + + public void setVar(ConfVars var, String val) { + setVar(this, var, val); + } + + public String getQueryString() { + return getQueryString(this); + } + + public static String getQueryString(Configuration conf) { + return getVar(conf, ConfVars.HIVEQUERYSTRING, EncoderDecoderFactory.URL_ENCODER_DECODER); + } + + public void setQueryString(String query) { + setQueryString(this, query); + } + + public static void setQueryString(Configuration conf, String query) { + setVar(conf, ConfVars.HIVEQUERYSTRING, query, EncoderDecoderFactory.URL_ENCODER_DECODER); + } + public void logVars(PrintStream ps) { + for (ConfVars one : ConfVars.values()) { + ps.println(one.varname + "=" + ((get(one.varname) != null) ? get(one.varname) : "")); + } + } + + public HiveConf() { + super(); + initialize(this.getClass()); + } + + public HiveConf(Class cls) { + super(); + initialize(cls); + } + + public HiveConf(Configuration other, Class cls) { + super(other); + initialize(cls); + } + + /** + * Copy constructor + */ + public HiveConf(HiveConf other) { + super(other); + hiveJar = other.hiveJar; + auxJars = other.auxJars; + isSparkConfigUpdated = other.isSparkConfigUpdated; + origProp = (Properties)other.origProp.clone(); + restrictList.addAll(other.restrictList); + hiddenSet.addAll(other.hiddenSet); + modWhiteListPattern = other.modWhiteListPattern; + } + + public Properties getAllProperties() { + return getProperties(this); + } + + public static Properties getProperties(Configuration conf) { + Iterator> iter = conf.iterator(); + Properties p = new Properties(); + while (iter.hasNext()) { + Map.Entry e = iter.next(); + p.setProperty(e.getKey(), e.getValue()); + } + return p; + } + + private void initialize(Class cls) { + hiveJar = (new JobConf(cls)).getJar(); + + // preserve the original configuration + origProp = getAllProperties(); + + // Overlay the ConfVars. Note that this ignores ConfVars with null values + addResource(getConfVarInputStream()); + + // Overlay hive-site.xml if it exists + if (hiveSiteURL != null) { + addResource(hiveSiteURL); + } + + // if embedded metastore is to be used as per config so far + // then this is considered like the metastore server case + String msUri = this.getVar(HiveConf.ConfVars.METASTOREURIS); + if(HiveConfUtil.isEmbeddedMetaStore(msUri)){ + setLoadMetastoreConfig(true); + } + + // load hivemetastore-site.xml if this is metastore and file exists + if (isLoadMetastoreConfig() && hivemetastoreSiteUrl != null) { + addResource(hivemetastoreSiteUrl); + } + + // load hiveserver2-site.xml if this is hiveserver2 and file exists + // metastore can be embedded within hiveserver2, in such cases + // the conf params in hiveserver2-site.xml will override whats defined + // in hivemetastore-site.xml + if (isLoadHiveServer2Config() && hiveServer2SiteUrl != null) { + addResource(hiveServer2SiteUrl); + } + + // Overlay the values of any system properties whose names appear in the list of ConfVars + applySystemProperties(); + + if ((this.get("hive.metastore.ds.retry.attempts") != null) || + this.get("hive.metastore.ds.retry.interval") != null) { + l4j.warn("DEPRECATED: hive.metastore.ds.retry.* no longer has any effect. " + + "Use hive.hmshandler.retry.* instead"); + } + + // if the running class was loaded directly (through eclipse) rather than through a + // jar then this would be needed + if (hiveJar == null) { + hiveJar = this.get(ConfVars.HIVEJAR.varname); + } + + if (auxJars == null) { + auxJars = this.get(ConfVars.HIVEAUXJARS.varname); + } + + if (getBoolVar(ConfVars.METASTORE_SCHEMA_VERIFICATION)) { + setBoolVar(ConfVars.METASTORE_AUTO_CREATE_ALL, false); + } + + if (getBoolVar(HiveConf.ConfVars.HIVECONFVALIDATION)) { + List trimmed = new ArrayList(); + for (Map.Entry entry : this) { + String key = entry.getKey(); + if (key == null || !key.startsWith("hive.")) { + continue; + } + ConfVars var = HiveConf.getConfVars(key); + if (var == null) { + var = HiveConf.getConfVars(key.trim()); + if (var != null) { + trimmed.add(key); + } + } + if (var == null) { + l4j.warn("HiveConf of name " + key + " does not exist"); + } else if (!var.isType(entry.getValue())) { + l4j.warn("HiveConf " + var.varname + " expects " + var.typeString() + " type value"); + } + } + for (String key : trimmed) { + set(key.trim(), getRaw(key)); + unset(key); + } + } + + setupSQLStdAuthWhiteList(); + + // setup list of conf vars that are not allowed to change runtime + setupRestrictList(); + setupHiddenSet(); + + } + + /** + * If the config whitelist param for sql standard authorization is not set, set it up here. + */ + private void setupSQLStdAuthWhiteList() { + String whiteListParamsStr = getVar(ConfVars.HIVE_AUTHORIZATION_SQL_STD_AUTH_CONFIG_WHITELIST); + if (whiteListParamsStr == null || whiteListParamsStr.trim().isEmpty()) { + // set the default configs in whitelist + whiteListParamsStr = getSQLStdAuthDefaultWhiteListPattern(); + } + setVar(ConfVars.HIVE_AUTHORIZATION_SQL_STD_AUTH_CONFIG_WHITELIST, whiteListParamsStr); + } + + private static String getSQLStdAuthDefaultWhiteListPattern() { + // create the default white list from list of safe config params + // and regex list + String confVarPatternStr = Joiner.on("|").join(convertVarsToRegex(sqlStdAuthSafeVarNames)); + String regexPatternStr = Joiner.on("|").join(sqlStdAuthSafeVarNameRegexes); + return regexPatternStr + "|" + confVarPatternStr; + } + + /** + * @param paramList list of parameter strings + * @return list of parameter strings with "." replaced by "\." + */ + private static String[] convertVarsToRegex(String[] paramList) { + String[] regexes = new String[paramList.length]; + for(int i=0; i systemProperties = getConfSystemProperties(); + for (Entry systemProperty : systemProperties.entrySet()) { + this.set(systemProperty.getKey(), systemProperty.getValue()); + } + } + + /** + * This method returns a mapping from config variable name to its value for all config variables + * which have been set using System properties + */ + public static Map getConfSystemProperties() { + Map systemProperties = new HashMap(); + + for (ConfVars oneVar : ConfVars.values()) { + if (System.getProperty(oneVar.varname) != null) { + if (System.getProperty(oneVar.varname).length() > 0) { + systemProperties.put(oneVar.varname, System.getProperty(oneVar.varname)); + } + } + } + + return systemProperties; + } + + /** + * Overlays ConfVar properties with non-null values + */ + private static void applyDefaultNonNullConfVars(Configuration conf) { + for (ConfVars var : ConfVars.values()) { + String defaultValue = var.getDefaultValue(); + if (defaultValue == null) { + // Don't override ConfVars with null values + continue; + } + conf.set(var.varname, defaultValue); + } + } + + public Properties getChangedProperties() { + Properties ret = new Properties(); + Properties newProp = getAllProperties(); + + for (Object one : newProp.keySet()) { + String oneProp = (String) one; + String oldValue = origProp.getProperty(oneProp); + if (!StringUtils.equals(oldValue, newProp.getProperty(oneProp))) { + ret.setProperty(oneProp, newProp.getProperty(oneProp)); + } + } + return (ret); + } + + public String getJar() { + return hiveJar; + } + + /** + * @return the auxJars + */ + public String getAuxJars() { + return auxJars; + } + + /** + * @param auxJars the auxJars to set + */ + public void setAuxJars(String auxJars) { + this.auxJars = auxJars; + setVar(this, ConfVars.HIVEAUXJARS, auxJars); + } + + public URL getHiveDefaultLocation() { + return hiveDefaultURL; + } + + public static void setHiveSiteLocation(URL location) { + hiveSiteURL = location; + } + + public static URL getHiveSiteLocation() { + return hiveSiteURL; + } + + public static URL getMetastoreSiteLocation() { + return hivemetastoreSiteUrl; + } + + public static URL getHiveServer2SiteLocation() { + return hiveServer2SiteUrl; + } + + /** + * @return the user name set in hadoop.job.ugi param or the current user from System + * @throws IOException + */ + public String getUser() throws IOException { + try { + UserGroupInformation ugi = Utils.getUGI(); + return ugi.getUserName(); + } catch (LoginException le) { + throw new IOException(le); + } + } + + public static String getColumnInternalName(int pos) { + return "_col" + pos; + } + + public static int getPositionFromInternalName(String internalName) { + Pattern internalPattern = Pattern.compile("_col([0-9]+)"); + Matcher m = internalPattern.matcher(internalName); + if (!m.matches()){ + return -1; + } else { + return Integer.parseInt(m.group(1)); + } + } + + /** + * Append comma separated list of config vars to the restrict List + * @param restrictListStr + */ + public void addToRestrictList(String restrictListStr) { + if (restrictListStr == null) { + return; + } + String oldList = this.getVar(ConfVars.HIVE_CONF_RESTRICTED_LIST); + if (oldList == null || oldList.isEmpty()) { + this.setVar(ConfVars.HIVE_CONF_RESTRICTED_LIST, restrictListStr); + } else { + this.setVar(ConfVars.HIVE_CONF_RESTRICTED_LIST, oldList + "," + restrictListStr); + } + setupRestrictList(); + } + + /** + * Set white list of parameters that are allowed to be modified + * + * @param paramNameRegex + */ + @LimitedPrivate(value = { "Currently only for use by HiveAuthorizer" }) + public void setModifiableWhiteListRegex(String paramNameRegex) { + if (paramNameRegex == null) { + return; + } + modWhiteListPattern = Pattern.compile(paramNameRegex); + } + + /** + * Add the HIVE_CONF_RESTRICTED_LIST values to restrictList, + * including HIVE_CONF_RESTRICTED_LIST itself + */ + private void setupRestrictList() { + String restrictListStr = this.getVar(ConfVars.HIVE_CONF_RESTRICTED_LIST); + restrictList.clear(); + if (restrictListStr != null) { + for (String entry : restrictListStr.split(",")) { + restrictList.add(entry.trim()); + } + } + + String internalVariableListStr = this.getVar(ConfVars.HIVE_CONF_INTERNAL_VARIABLE_LIST); + if (internalVariableListStr != null) { + for (String entry : internalVariableListStr.split(",")) { + restrictList.add(entry.trim()); + } + } + + restrictList.add(ConfVars.HIVE_IN_TEST.varname); + restrictList.add(ConfVars.HIVE_CONF_RESTRICTED_LIST.varname); + restrictList.add(ConfVars.HIVE_CONF_HIDDEN_LIST.varname); + restrictList.add(ConfVars.HIVE_CONF_INTERNAL_VARIABLE_LIST.varname); + } + + private void setupHiddenSet() { + String hiddenListStr = this.getVar(ConfVars.HIVE_CONF_HIDDEN_LIST); + hiddenSet.clear(); + if (hiddenListStr != null) { + for (String entry : hiddenListStr.split(",")) { + hiddenSet.add(entry.trim()); + } + } + } + + /** + * Strips hidden config entries from configuration + */ + public void stripHiddenConfigurations(Configuration conf) { + for (String name : hiddenSet) { + if (conf.get(name) != null) { + conf.set(name, ""); + } + } + } + + /** + * @return true if HS2 webui is enabled + */ + public boolean isWebUiEnabled() { + return this.getIntVar(ConfVars.HIVE_SERVER2_WEBUI_PORT) != 0; + } + + /** + * @return true if HS2 webui query-info cache is enabled + */ + public boolean isWebUiQueryInfoCacheEnabled() { + return isWebUiEnabled() && this.getIntVar(ConfVars.HIVE_SERVER2_WEBUI_MAX_HISTORIC_QUERIES) > 0; + } + + + public static boolean isLoadMetastoreConfig() { + return loadMetastoreConfig; + } + + public static void setLoadMetastoreConfig(boolean loadMetastoreConfig) { + HiveConf.loadMetastoreConfig = loadMetastoreConfig; + } + + public static boolean isLoadHiveServer2Config() { + return loadHiveServer2Config; + } + + public static void setLoadHiveServer2Config(boolean loadHiveServer2Config) { + HiveConf.loadHiveServer2Config = loadHiveServer2Config; + } + + public static class StrictChecks { + + private static final String NO_LIMIT_MSG = makeMessage( + "Order by-s without limit", ConfVars.HIVE_STRICT_CHECKS_LARGE_QUERY); + private static final String NO_PARTITIONLESS_MSG = makeMessage( + "Queries against partitioned tables without a partition filter", + ConfVars.HIVE_STRICT_CHECKS_LARGE_QUERY); + private static final String NO_COMPARES_MSG = makeMessage( + "Unsafe compares between different types", ConfVars.HIVE_STRICT_CHECKS_TYPE_SAFETY); + private static final String NO_CARTESIAN_MSG = makeMessage( + "Cartesian products", ConfVars.HIVE_STRICT_CHECKS_CARTESIAN); + + private static String makeMessage(String what, ConfVars setting) { + return what + " are disabled for safety reasons. If you know what you are doing, please make" + + " sure that " + setting.varname + " is set to false and that " + + ConfVars.HIVEMAPREDMODE.varname + " is not set to 'strict' to enable them."; + } + + public static String checkNoLimit(Configuration conf) { + return isAllowed(conf, ConfVars.HIVE_STRICT_CHECKS_LARGE_QUERY) ? null : NO_LIMIT_MSG; + } + + public static String checkNoPartitionFilter(Configuration conf) { + return isAllowed(conf, ConfVars.HIVE_STRICT_CHECKS_LARGE_QUERY) + ? null : NO_PARTITIONLESS_MSG; + } + + public static String checkTypeSafety(Configuration conf) { + return isAllowed(conf, ConfVars.HIVE_STRICT_CHECKS_TYPE_SAFETY) ? null : NO_COMPARES_MSG; + } + + public static String checkCartesian(Configuration conf) { + return isAllowed(conf, ConfVars.HIVE_STRICT_CHECKS_CARTESIAN) ? null : NO_CARTESIAN_MSG; + } + + private static boolean isAllowed(Configuration conf, ConfVars setting) { + String mode = HiveConf.getVar(conf, ConfVars.HIVEMAPREDMODE, (String)null); + return (mode != null) ? !"strict".equals(mode) : !HiveConf.getBoolVar(conf, setting); + } + } + + public static String getNonMrEngines() { + String result = ""; + for (String s : ConfVars.HIVE_EXECUTION_ENGINE.getValidStringValues()) { + if ("mr".equals(s)) continue; + if (!result.isEmpty()) { + result += ", "; + } + result += s; + } + return result; + } + + public static String generateMrDeprecationWarning() { + return "Hive-on-MR is deprecated in Hive 2 and may not be available in the future versions. " + + "Consider using a different execution engine (i.e. " + HiveConf.getNonMrEngines() + + ") or using Hive 1.X releases."; + } +} diff --git a/hcatalog/core/src/test/java/org/apache/hive/hcatalog/common/TestHCatUtil.java b/hcatalog/core/src/test/java/org/apache/hive/hcatalog/common/TestHCatUtil.java index 102d6d2..0895f43 100644 --- a/hcatalog/core/src/test/java/org/apache/hive/hcatalog/common/TestHCatUtil.java +++ b/hcatalog/core/src/test/java/org/apache/hive/hcatalog/common/TestHCatUtil.java @@ -124,7 +124,7 @@ public void testGetTableSchemaWithPtnColsApi() throws IOException { org.apache.hadoop.hive.metastore.api.Table apiTable = new org.apache.hadoop.hive.metastore.api.Table("test_tblname", "test_dbname", "test_owner", 0, 0, 0, sd, new ArrayList(), new HashMap(), - "viewOriginalText", "viewExpandedText", TableType.EXTERNAL_TABLE.name()); + "viewOriginalText", "viewExpandedText", TableType.EXTERNAL_TABLE.name(), null, null); Table table = new Table(apiTable); List expectedHCatSchema = @@ -169,7 +169,7 @@ public void testGetTableSchemaWithPtnColsSerDeReportedFields() throws IOExceptio org.apache.hadoop.hive.metastore.api.Table apiTable = new org.apache.hadoop.hive.metastore.api.Table("test_tblname", "test_dbname", "test_owner", 0, 0, 0, sd, new ArrayList(), new HashMap(), - "viewOriginalText", "viewExpandedText", TableType.EXTERNAL_TABLE.name()); + "viewOriginalText", "viewExpandedText", TableType.EXTERNAL_TABLE.name(), null, null); Table table = new Table(apiTable); List expectedHCatSchema = Lists.newArrayList( diff --git a/itests/hcatalog-unit/src/test/java/org/apache/hive/hcatalog/listener/TestDbNotificationListener.java b/itests/hcatalog-unit/src/test/java/org/apache/hive/hcatalog/listener/TestDbNotificationListener.java index 1360563..71f1ee8 100644 --- a/itests/hcatalog-unit/src/test/java/org/apache/hive/hcatalog/listener/TestDbNotificationListener.java +++ b/itests/hcatalog-unit/src/test/java/org/apache/hive/hcatalog/listener/TestDbNotificationListener.java @@ -132,7 +132,7 @@ public void createTable() throws Exception { StorageDescriptor sd = new StorageDescriptor(cols, "file:/tmp", "input", "output", false, 0, serde, null, null, emptyParameters); Table table = new Table("mytable", "default", "me", startTime, startTime, 0, sd, null, - emptyParameters, null, null, null); + emptyParameters, null, null, null, null, null); msClient.createTable(table); NotificationEventResponse rsp = msClient.getNextNotification(firstEventId, 0, null); @@ -156,12 +156,12 @@ public void alterTable() throws Exception { StorageDescriptor sd = new StorageDescriptor(cols, "file:/tmp", "input", "output", false, 0, serde, null, null, emptyParameters); Table table = new Table("alttable", "default", "me", startTime, startTime, 0, sd, - new ArrayList(), emptyParameters, null, null, null); + new ArrayList(), emptyParameters, null, null, null, null, null); msClient.createTable(table); cols.add(new FieldSchema("col2", "int", "")); table = new Table("alttable", "default", "me", startTime, startTime, 0, sd, - new ArrayList(), emptyParameters, null, null, null); + new ArrayList(), emptyParameters, null, null, null, null, null); msClient.alter_table("default", "alttable", table); NotificationEventResponse rsp = msClient.getNextNotification(firstEventId, 0, null); @@ -186,7 +186,7 @@ public void dropTable() throws Exception { StorageDescriptor sd = new StorageDescriptor(cols, "file:/tmp", "input", "output", false, 0, serde, null, null, emptyParameters); Table table = new Table("droptable", "default", "me", startTime, startTime, 0, sd, null, - emptyParameters, null, null, null); + emptyParameters, null, null, null, null, null); msClient.createTable(table); msClient.dropTable("default", "droptable"); @@ -214,7 +214,7 @@ public void addPartition() throws Exception { StorageDescriptor sd = new StorageDescriptor(cols, "file:/tmp", "input", "output", false, 0, serde, null, null, emptyParameters); Table table = new Table("addPartTable", "default", "me", startTime, startTime, 0, sd, partCols, - emptyParameters, null, null, null); + emptyParameters, null, null, null, null, null); msClient.createTable(table); Partition partition = new Partition(Arrays.asList("today"), "default", "addPartTable", @@ -245,7 +245,7 @@ public void alterPartition() throws Exception { StorageDescriptor sd = new StorageDescriptor(cols, "file:/tmp", "input", "output", false, 0, serde, null, null, emptyParameters); Table table = new Table("alterparttable", "default", "me", startTime, startTime, 0, sd, - partCols, emptyParameters, null, null, null); + partCols, emptyParameters, null, null, null, null, null); msClient.createTable(table); Partition partition = new Partition(Arrays.asList("today"), "default", "alterparttable", @@ -281,7 +281,7 @@ public void dropPartition() throws Exception { StorageDescriptor sd = new StorageDescriptor(cols, "file:/tmp", "input", "output", false, 0, serde, null, null, emptyParameters); Table table = new Table("dropPartTable", "default", "me", startTime, startTime, 0, sd, partCols, - emptyParameters, null, null, null); + emptyParameters, null, null, null, null, null); msClient.createTable(table); Partition partition = new Partition(Arrays.asList("today"), "default", "dropPartTable", @@ -312,7 +312,7 @@ public void insertTable() throws Exception { StorageDescriptor sd = new StorageDescriptor(cols, "file:/tmp", "input", "output", false, 0, serde, null, null, emptyParameters); Table table = new Table("insertTable", "default", "me", startTime, startTime, 0, sd, null, - emptyParameters, null, null, null); + emptyParameters, null, null, null, null, null); msClient.createTable(table); FireEventRequestData data = new FireEventRequestData(); @@ -350,7 +350,7 @@ public void insertPartition() throws Exception { StorageDescriptor sd = new StorageDescriptor(cols, "file:/tmp", "input", "output", false, 0, serde, null, null, emptyParameters); Table table = new Table("insertPartition", "default", "me", startTime, startTime, 0, sd, - partCols, emptyParameters, null, null, null); + partCols, emptyParameters, null, null, null, null, null); msClient.createTable(table); Partition partition = new Partition(Arrays.asList("today"), "default", "insertPartition", startTime, startTime, sd, emptyParameters); diff --git a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/hbase/TestHBaseAggrStatsCacheIntegration.java b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/hbase/TestHBaseAggrStatsCacheIntegration.java index 51d96dd..9caf671 100644 --- a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/hbase/TestHBaseAggrStatsCacheIntegration.java +++ b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/hbase/TestHBaseAggrStatsCacheIntegration.java @@ -94,7 +94,7 @@ public void hit() throws Exception { List partCols = new ArrayList<>(); partCols.add(new FieldSchema("ds", "string", "")); Table table = new Table(tableName, dbName, "me", (int)now, (int)now, 0, sd, partCols, - Collections.emptyMap(), null, null, null); + Collections.emptyMap(), null, null, null, null, null); store.createTable(table); for (List partVals : Arrays.asList(partVals1, partVals2)) { @@ -207,7 +207,7 @@ public void someWithStats() throws Exception { List partCols = new ArrayList<>(); partCols.add(new FieldSchema("ds", "string", "")); Table table = new Table(tableName, dbName, "me", (int)now, (int)now, 0, sd, partCols, - Collections.emptyMap(), null, null, null); + Collections.emptyMap(), null, null, null,null, null); store.createTable(table); boolean first = true; @@ -306,7 +306,7 @@ public void invalidation() throws Exception { List partCols = new ArrayList<>(); partCols.add(new FieldSchema("ds", "string", "")); Table table = new Table(tableName, dbName, "me", (int) now, (int) now, 0, sd, partCols, - Collections.emptyMap(), null, null, null); + Collections.emptyMap(), null, null, null, null, null); store.createTable(table); for (List partVals : Arrays.asList(partVals1, partVals2, partVals3)) { @@ -515,7 +515,7 @@ public void alterInvalidation() throws Exception { List partCols = new ArrayList<>(); partCols.add(new FieldSchema("ds", "string", "")); Table table = new Table(tableName, dbName, "me", (int) now, (int) now, 0, sd, partCols, - Collections.emptyMap(), null, null, null); + Collections.emptyMap(), null, null, null, null, null); store.createTable(table); Partition[] partitions = new Partition[3]; @@ -609,7 +609,7 @@ public void altersInvalidation() throws Exception { List partCols = new ArrayList<>(); partCols.add(new FieldSchema("ds", "string", "")); Table table = new Table(tableName, dbName, "me", (int) now, (int) now, 0, sd, partCols, - Collections.emptyMap(), null, null, null); + Collections.emptyMap(), null, null, null, null, null); store.createTable(table); Partition[] partitions = new Partition[3]; diff --git a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/hbase/TestHBaseImport.java b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/hbase/TestHBaseImport.java index af60660..50fe45a 100644 --- a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/hbase/TestHBaseImport.java +++ b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/hbase/TestHBaseImport.java @@ -490,12 +490,12 @@ private void setupObjectStore(RawStore rdbms, String[] roles, String[] dbNames, StorageDescriptor sd = new StorageDescriptor(cols, "file:/tmp", "input", "output", false, 0, serde, null, null, emptyParameters); rdbms.createTable(new Table(tableNames[0], dbNames[i], "me", now, now, 0, sd, null, - emptyParameters, null, null, null)); + emptyParameters, null, null, null, null, null)); List partCols = new ArrayList<>(); partCols.add(new FieldSchema("region", "string", "")); rdbms.createTable(new Table(tableNames[1], dbNames[i], "me", now, now, 0, sd, partCols, - emptyParameters, null, null, null)); + emptyParameters, null, null, null, null, null)); for (int j = 0; j < partVals.length; j++) { StorageDescriptor psd = new StorageDescriptor(sd); @@ -542,7 +542,7 @@ public void parallel() throws Exception { partCols.add(new FieldSchema("region", "string", "")); for (int j = 0; j < parallelFactor; j++) { rdbms.createTable(new Table("t" + j, dbNames[i], "me", now, now, 0, sd, partCols, - emptyParameters, null, null, null)); + emptyParameters, null, null, null, null, null)); for (int k = 0; k < parallelFactor; k++) { StorageDescriptor psd = new StorageDescriptor(sd); psd.setLocation("file:/tmp/region=" + k); @@ -609,7 +609,7 @@ public void parallelOdd() throws Exception { partCols.add(new FieldSchema("region", "string", "")); for (int j = 0; j < parallelFactor; j++) { rdbms.createTable(new Table("t" + j, dbNames[i], "me", now, now, 0, sd, partCols, - emptyParameters, null, null, null)); + emptyParameters, null, null, null, null, null)); for (int k = 0; k < parallelFactor; k++) { StorageDescriptor psd = new StorageDescriptor(sd); psd.setLocation("file:/tmp/region=" + k); diff --git a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/hbase/TestHBaseSchemaTool.java b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/hbase/TestHBaseSchemaTool.java index e5833b8..c8cbfbf 100644 --- a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/hbase/TestHBaseSchemaTool.java +++ b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/hbase/TestHBaseSchemaTool.java @@ -430,7 +430,7 @@ public void oneMondoTest() throws Exception { Table tab = new Table(tableNames[i], dbNames[0], "me", 0, 0, 0, sd, Arrays.asList(new FieldSchema("pcol1", "string", ""), new FieldSchema("pcol2", "string", "")), - Collections.emptyMap(), null, null, null); + Collections.emptyMap(), null, null, null, null, null); store.createTable(tab); } diff --git a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/hbase/TestHBaseStoreIntegration.java b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/hbase/TestHBaseStoreIntegration.java index 2cc1373..2f7596d 100644 --- a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/hbase/TestHBaseStoreIntegration.java +++ b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/hbase/TestHBaseStoreIntegration.java @@ -196,7 +196,7 @@ public void createTable() throws Exception { StorageDescriptor sd = new StorageDescriptor(cols, "file:/tmp", "input", "output", false, 0, serde, null, null, emptyParameters); Table table = new Table("mytable", "default", "me", startTime, startTime, 0, sd, null, - emptyParameters, null, null, null); + emptyParameters, null, null, null, null, null); store.createTable(table); Table t = store.getTable("default", "mytable"); @@ -224,7 +224,7 @@ public void alterTable() throws Exception { StorageDescriptor sd = new StorageDescriptor(cols, "file:/tmp", "input", "output", false, 0, serde, null, null, emptyParameters); Table table = new Table(tableName, "default", "me", startTime, startTime, 0, sd, null, - emptyParameters, null, null, null); + emptyParameters, null, null, null, null, null); store.createTable(table); startTime += 10; @@ -271,7 +271,7 @@ public void getAllTables() throws Exception { serde, null, null, emptyParameters); Table table = new Table(tableNames[j], dbNames[i], "me", startTime, startTime, 0, sd, null, - emptyParameters, null, null, null); + emptyParameters, null, null, null, null, null); store.createTable(table); } } @@ -309,7 +309,7 @@ public void dropTable() throws Exception { StorageDescriptor sd = new StorageDescriptor(cols, "file:/tmp", "input", "output", false, 0, serde, null, null, emptyParameters); Table table = new Table(tableName, "default", "me", startTime, startTime, 0, sd, null, - emptyParameters, null, null, null); + emptyParameters, null, null, null, null, null); store.createTable(table); Table t = store.getTable("default", tableName); @@ -332,7 +332,7 @@ public void createPartition() throws Exception { List partCols = new ArrayList(); partCols.add(new FieldSchema("pc", "string", "")); Table table = new Table(tableName, dbName, "me", startTime, startTime, 0, sd, partCols, - emptyParameters, null, null, null); + emptyParameters, null, null, null, null, null); store.createTable(table); List vals = new ArrayList(); @@ -372,7 +372,7 @@ public void addPartitions() throws Exception { List partCols = new ArrayList(); partCols.add(new FieldSchema("pc", "string", "")); Table table = new Table(tableName, dbName, "me", startTime, startTime, 0, sd, partCols, - emptyParameters, null, null, null); + emptyParameters, null, null, null, null, null); store.createTable(table); List partVals = Arrays.asList("alan", "bob", "carl", "doug", "ethan"); @@ -410,7 +410,7 @@ public void alterPartitions() throws Exception { List partCols = new ArrayList(); partCols.add(new FieldSchema("pc", "string", "")); Table table = new Table(tableName, dbName, "me", startTime, startTime, 0, sd, partCols, - emptyParameters, null, null, null); + emptyParameters, null, null, null, null, null); store.createTable(table); List partVals = Arrays.asList("alan", "bob", "carl", "doug", "ethan"); @@ -450,7 +450,7 @@ public void getPartitions() throws Exception { List partCols = new ArrayList(); partCols.add(new FieldSchema("pc", "string", "")); Table table = new Table(tableName, dbName, "me", startTime, startTime, 0, sd, partCols, - emptyParameters, null, null, null); + emptyParameters, null, null, null, null, null); store.createTable(table); List partVals = Arrays.asList("alan", "bob", "carl", "doug", "ethan"); @@ -489,7 +489,7 @@ public void listPartitions() throws Exception { partCols.add(new FieldSchema("pc", "string", "")); partCols.add(new FieldSchema("region", "string", "")); Table table = new Table(tableName, dbName, "me", startTime, startTime, 0, sd, partCols, - emptyParameters, null, null, null); + emptyParameters, null, null, null, null, null); store.createTable(table); String[][] partVals = new String[][]{{"today", "north america"}, {"tomorrow", "europe"}}; @@ -533,7 +533,7 @@ public void listPartitionsWithPs() throws Exception { partCols.add(new FieldSchema("ds", "string", "")); partCols.add(new FieldSchema("region", "string", "")); Table table = new Table(tableName, dbName, "me", startTime, startTime, 0, sd, partCols, - emptyParameters, null, null, null); + emptyParameters, null, null, null, null, null); store.createTable(table); String[][] partVals = new String[][]{{"today", "north america"}, {"today", "europe"}, @@ -598,7 +598,7 @@ public void getPartitionsByFilter() throws Exception { partCols.add(new FieldSchema("ds", "string", "")); partCols.add(new FieldSchema("region", "string", "")); Table table = new Table(tableName, dbName, "me", startTime, startTime, 0, sd, partCols, - emptyParameters, null, null, null); + emptyParameters, null, null, null, null, null); store.createTable(table); String[][] partVals = new String[][]{{"20010101", "north america"}, {"20010101", "europe"}, @@ -681,7 +681,7 @@ public void dropPartition() throws Exception { List partCols = new ArrayList(); partCols.add(new FieldSchema("pc", "string", "")); Table table = new Table(tableName, dbName, "me", startTime, startTime, 0, sd, partCols, - emptyParameters, null, null, null); + emptyParameters, null, null, null, null, null); store.createTable(table); List vals = Arrays.asList("fred"); @@ -927,7 +927,7 @@ public void grantRevokeTablePrivileges() throws Exception { StorageDescriptor sd = new StorageDescriptor(cols, "file:/tmp", "input", "output", false, 0, serde, null, null, emptyParameters); Table table = new Table(tableName, dbName, "me", startTime, startTime, 0, sd, null, - emptyParameters, null, null, null); + emptyParameters, null, null, null, null, null); store.createTable(table); doGrantRevoke(HiveObjectType.TABLE, dbName, tableName, new String[] {"grtp_role1", "grtp_role2"}, @@ -1317,10 +1317,10 @@ public void listTableGrants() throws Exception { StorageDescriptor sd = new StorageDescriptor(cols, "file:/tmp", "input", "output", false, 0, serde, null, null, emptyParameters); Table table = new Table(tableNames[0], dbName, "me", startTime, startTime, 0, sd, null, - emptyParameters, null, null, null); + emptyParameters, null, null, null, null, null); store.createTable(table); table = new Table(tableNames[1], dbName, "me", startTime, startTime, 0, sd, null, - emptyParameters, null, null, null); + emptyParameters, null, null, null, null, null); store.createTable(table); String[] roleNames = new String[]{"ltg_role1", "ltg_role2"}; String[] userNames = new String[]{"gandalf", "radagast"}; @@ -1467,7 +1467,7 @@ public void tableStatistics() throws Exception { StorageDescriptor sd = new StorageDescriptor(cols, "file:/tmp", "input", "output", false, 0, serde, null, null, emptyParameters); Table table = new Table(tableName, dbname, "me", (int)now / 1000, (int)now / 1000, 0, sd, null, - emptyParameters, null, null, null); + emptyParameters, null, null, null, null, null); store.createTable(table); ColumnStatistics stats = new ColumnStatistics(); @@ -1665,7 +1665,7 @@ public void partitionStatistics() throws Exception { List partCols = new ArrayList(); partCols.add(new FieldSchema("ds", "string", "")); Table table = new Table(tableName, dbname, "me", (int)now / 1000, (int)now / 1000, 0, sd, partCols, - emptyParameters, null, null, null); + emptyParameters, null, null, null, null, null); store.createTable(table); for (String partVal : partVals) { Partition part = new Partition(Arrays.asList(partVal), dbname, tableName, (int) now / 1000, diff --git a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/hbase/TestStorageDescriptorSharing.java b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/hbase/TestStorageDescriptorSharing.java index c29e46a..5a71235 100644 --- a/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/hbase/TestStorageDescriptorSharing.java +++ b/itests/hive-unit/src/test/java/org/apache/hadoop/hive/metastore/hbase/TestStorageDescriptorSharing.java @@ -80,7 +80,7 @@ public void createManyPartitions() throws Exception { List partCols = new ArrayList(); partCols.add(new FieldSchema("pc", "string", "")); Table table = new Table(tableName, dbName, "me", startTime, startTime, 0, sd, partCols, - emptyParameters, null, null, null); + emptyParameters, null, null, null, null, null); store.createTable(table); List partVals = Arrays.asList("alan", "bob", "carl", "doug", "ethan"); @@ -103,7 +103,7 @@ public void createManyPartitions() throws Exception { sd = new StorageDescriptor(cols, "file:/tmp", "input2", "output", false, 0, serde, null, null, emptyParameters); table = new Table(tableName2, "default", "me", startTime, startTime, 0, sd, null, - emptyParameters, null, null, null); + emptyParameters, null, null, null, null, null); store.createTable(table); Assert.assertEquals(2, HBaseReadWrite.getInstance().countStorageDescriptor()); diff --git a/itests/hive-unit/src/test/java/org/apache/hive/jdbc/TestJdbcWithMiniHS2.java b/itests/hive-unit/src/test/java/org/apache/hive/jdbc/TestJdbcWithMiniHS2.java index 10c8ff2..93f56fa 100644 --- a/itests/hive-unit/src/test/java/org/apache/hive/jdbc/TestJdbcWithMiniHS2.java +++ b/itests/hive-unit/src/test/java/org/apache/hive/jdbc/TestJdbcWithMiniHS2.java @@ -810,4 +810,48 @@ private int getNucleusClassLoaderResolverMapSize() { } return -1; } + + /** + * Test PK/FK retreival APIs. + * + * @throws Exception + */ + @Test + public void testGetPrimaryKeysAndGetCrossReference() throws Exception { + // Stop HiveServer2 + if (miniHS2.isStarted()) { + miniHS2.stop(); + } + HiveConf conf = new HiveConf(); + String userName; + // Start an instance of HiveServer2 which uses miniMR + miniHS2 = new MiniHS2(conf); + Map confOverlay = new HashMap(); + miniHS2.start(confOverlay); + userName = System.getProperty("user.name"); + hs2Conn = getConnection(miniHS2.getJdbcURL(), userName, "password"); + ResultSet rs = hs2Conn.getMetaData().getPrimaryKeys("C11", "S11", "T11"); + String rss = rs.toString(); + assert (rss.contains("C11")); + assert (rss.contains("S11")); + assert (rss.contains("T11")); + assert (rss.contains("PK1")); + assert (rss.contains("PK2")); + assert (rss.contains("COL1")); + assert (rss.contains("COL2")); + rs = hs2Conn.getMetaData().getCrossReference("PC11", "PS11", "PT11", "FC11", "FS11", "FT11"); + rss = rs.toString(); + assert (rss.contains("PC11")); + assert (rss.contains("PS11")); + assert (rss.contains("PT11")); + assert (rss.contains("PKCOL1")); + assert (rss.contains("PKCOL2")); + assert (rss.contains("FKCOL1")); + assert (rss.contains("FKCOL2")); + assert (rss.contains("FK1")); + assert (rss.contains("FK2")); + assert (rss.contains("PK1")); + assert (rss.contains("PK2")); + } + } \ No newline at end of file diff --git a/jdbc/src/java/org/apache/hive/jdbc/HiveDatabaseMetaData.java b/jdbc/src/java/org/apache/hive/jdbc/HiveDatabaseMetaData.java index 7e54d1f..9d73470 100644 --- a/jdbc/src/java/org/apache/hive/jdbc/HiveDatabaseMetaData.java +++ b/jdbc/src/java/org/apache/hive/jdbc/HiveDatabaseMetaData.java @@ -35,11 +35,15 @@ import org.apache.hive.service.rpc.thrift.TGetCatalogsResp; import org.apache.hive.service.rpc.thrift.TGetColumnsReq; import org.apache.hive.service.rpc.thrift.TGetColumnsResp; +import org.apache.hive.service.rpc.thrift.TGetCrossReferenceReq; +import org.apache.hive.service.rpc.thrift.TGetCrossReferenceResp; import org.apache.hive.service.rpc.thrift.TGetFunctionsReq; import org.apache.hive.service.rpc.thrift.TGetFunctionsResp; import org.apache.hive.service.rpc.thrift.TGetInfoReq; import org.apache.hive.service.rpc.thrift.TGetInfoResp; import org.apache.hive.service.rpc.thrift.TGetInfoType; +import org.apache.hive.service.rpc.thrift.TGetPrimaryKeysReq; +import org.apache.hive.service.rpc.thrift.TGetPrimaryKeysResp; import org.apache.hive.service.rpc.thrift.TGetSchemasReq; import org.apache.hive.service.rpc.thrift.TGetSchemasResp; import org.apache.hive.service.rpc.thrift.TGetTableTypesReq; @@ -255,7 +259,27 @@ public Connection getConnection() throws SQLException { public ResultSet getCrossReference(String primaryCatalog, String primarySchema, String primaryTable, String foreignCatalog, String foreignSchema, String foreignTable) throws SQLException { - throw new SQLException("Method not supported"); + TGetCrossReferenceResp getFKResp; + TGetCrossReferenceReq getFKReq = new TGetCrossReferenceReq(sessHandle); + getFKReq.setParentTableName(primaryTable); + getFKReq.setParentSchemaName(primarySchema); + getFKReq.setParentCatalogName(primaryCatalog); + getFKReq.setForeignTableName(foreignTable); + getFKReq.setForeignSchemaName(foreignSchema); + getFKReq.setForeignCatalogName(foreignCatalog); + + try { + getFKResp = client.GetCrossReference(getFKReq); + } catch (TException e) { + throw new SQLException(e.getMessage(), "08S01", e); + } + Utils.verifySuccess(getFKResp.getStatus()); + + return new HiveQueryResultSet.Builder(connection) + .setClient(client) + .setSessionHandle(sessHandle) + .setStmtHandle(getFKResp.getOperationHandle()) + .build(); } public int getDatabaseMajorVersion() throws SQLException { @@ -493,12 +517,23 @@ public String getNumericFunctions() throws SQLException { public ResultSet getPrimaryKeys(String catalog, String schema, String table) throws SQLException { - // Hive doesn't support primary keys - // using local schema with empty resultset - return new HiveQueryResultSet.Builder(connection).setClient(client).setEmptyResultSet(true). - setSchema(Arrays.asList("TABLE_CAT", "TABLE_SCHEM", "TABLE_NAME", "COLUMN_NAME", "KEY_SEQ", "PK_NAME" ), - Arrays.asList("STRING", "STRING", "STRING", "STRING", "INT", "STRING")) - .build(); + TGetPrimaryKeysResp getPKResp; + TGetPrimaryKeysReq getPKReq = new TGetPrimaryKeysReq(sessHandle); + getPKReq.setTableName(table); + getPKReq.setSchemaName(schema); + getPKReq.setCatalogName(catalog); + try { + getPKResp = client.GetPrimaryKeys(getPKReq); + } catch (TException e) { + throw new SQLException(e.getMessage(), "08S01", e); + } + Utils.verifySuccess(getPKResp.getStatus()); + + return new HiveQueryResultSet.Builder(connection) + .setClient(client) + .setSessionHandle(sessHandle) + .setStmtHandle(getPKResp.getOperationHandle()) + .build(); } public ResultSet getProcedureColumns(String catalog, String schemaPattern, diff --git a/metastore/if/hive_metastore.thrift b/metastore/if/hive_metastore.thrift index 6a55962..4f7dabe 100755 --- a/metastore/if/hive_metastore.thrift +++ b/metastore/if/hive_metastore.thrift @@ -41,6 +41,36 @@ struct FieldSchema { 3: string comment } +struct ForeignKeyRel { + 1: list foreignCols, + 2: list parentCols, + 3: string foreignKeyName, + 4: string parentTableName, + 5: string parentTableSchema +} + +struct SQLPrimaryKey { + 1: string table_schem, // table schema + 2: string table_name, // table name + 3: string column_name, // column name + 4: i32 key_seq, // sequence number within primary key + 5: string pk_name // primary key name +} + +struct SQLForeignKey { + 1: string pktable_schem, // primary key table schema + 2: string pktable_name, // primary key table name + 3: string pkcolumn_name, // primary key column name + 4: string fktable_schem, // foreign key table schema + 5: string fktable_name, // foreign key table name + 6: string fkcolumn_name, // foreign key column name + 7: i32 key_seq, // sequence within foreign key + 8: i32 update_rule, // what happens to foreign key when parent key is updated + 9: i32 delete_rule, // what happens to foreign key when parent key is deleted + 10: string fk_name, // foreign key name + 11: string pk_name // primary key name +} + struct Type { 1: string name, // one of the types in PrimitiveTypes or CollectionTypes or User defined types 2: optional string type1, // object type if the name is 'list' (LIST_TYPE), key type if the name is 'map' (MAP_TYPE) @@ -268,7 +298,9 @@ struct Table { 11: string viewExpandedText, // expanded view text, null for non-view 12: string tableType, // table type enum, e.g. EXTERNAL_TABLE 13: optional PrincipalPrivilegeSet privileges, - 14: optional bool temporary=false + 14: optional bool temporary=false, + 15: list primaryKeys, + 16: list foreignKeys; } struct Partition { @@ -1179,6 +1211,12 @@ service ThriftHiveMetastore extends fb303.FacebookService list get_index_names(1:string db_name, 2:string tbl_name, 3:i16 max_indexes=-1) throws(1:MetaException o2) + //primary keys and foreign keys + list get_primary_keys(1:string db_name, 2:string tbl_name) + throws(1:MetaException o1, 2:NoSuchObjectException o2) + list get_foreign_keys(1:string parent_db_name, 2:string parent_tbl_name, 3:string foreign_db_name, 4:string foreign_tbl_name) + throws(1:MetaException o1, 2:NoSuchObjectException o2) + // column statistics interfaces // update APIs persist the column statistics object(s) that are passed in. If statistics already diff --git a/metastore/if/hive_metastore.thrift.orig b/metastore/if/hive_metastore.thrift.orig new file mode 100755 index 0000000..87451a7 --- /dev/null +++ b/metastore/if/hive_metastore.thrift.orig @@ -0,0 +1,1401 @@ +#!/usr/local/bin/thrift -java + +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +# +# Thrift Service that the MetaStore is built on +# + +include "share/fb303/if/fb303.thrift" + +namespace java org.apache.hadoop.hive.metastore.api +namespace php metastore +namespace cpp Apache.Hadoop.Hive + +const string DDL_TIME = "transient_lastDdlTime" + +struct Version { + 1: string version, + 2: string comments +} + +struct FieldSchema { + 1: string name, // name of the field + 2: string type, // type of the field. primitive types defined above, specify list, map for lists & maps + 3: string comment +} + +struct ForeignKeyRel { + 1: list foreignCols, + 2: list parentCols, + 3: string foreignKeyName, + 4: string parentTableName, + 5: string parentTableSchema +} + +struct Type { + 1: string name, // one of the types in PrimitiveTypes or CollectionTypes or User defined types + 2: optional string type1, // object type if the name is 'list' (LIST_TYPE), key type if the name is 'map' (MAP_TYPE) + 3: optional string type2, // val type if the name is 'map' (MAP_TYPE) + 4: optional list fields // if the name is one of the user defined types +} + +enum HiveObjectType { + GLOBAL = 1, + DATABASE = 2, + TABLE = 3, + PARTITION = 4, + COLUMN = 5, +} + +enum PrincipalType { + USER = 1, + ROLE = 2, + GROUP = 3, +} + +const string HIVE_FILTER_FIELD_OWNER = "hive_filter_field_owner__" +const string HIVE_FILTER_FIELD_PARAMS = "hive_filter_field_params__" +const string HIVE_FILTER_FIELD_LAST_ACCESS = "hive_filter_field_last_access__" + +enum PartitionEventType { + LOAD_DONE = 1, +} + +// Enums for transaction and lock management +enum TxnState { + COMMITTED = 1, + ABORTED = 2, + OPEN = 3, +} + +enum LockLevel { + DB = 1, + TABLE = 2, + PARTITION = 3, +} + +enum LockState { + ACQUIRED = 1, // requester has the lock + WAITING = 2, // requester is waiting for the lock and should call checklock at a later point to see if the lock has been obtained. + ABORT = 3, // the lock has been aborted, most likely due to timeout + NOT_ACQUIRED = 4, // returned only with lockNoWait, indicates the lock was not available and was not acquired +} + +enum LockType { + SHARED_READ = 1, + SHARED_WRITE = 2, + EXCLUSIVE = 3, +} + +enum CompactionType { + MINOR = 1, + MAJOR = 2, +} + +enum GrantRevokeType { + GRANT = 1, + REVOKE = 2, +} + +// Types of events the client can request that the metastore fire. For now just support DML operations, as the metastore knows +// about DDL operations and there's no reason for the client to request such an event. +enum EventRequestType { + INSERT = 1, + UPDATE = 2, + DELETE = 3, +} + + +struct HiveObjectRef{ + 1: HiveObjectType objectType, + 2: string dbName, + 3: string objectName, + 4: list partValues, + 5: string columnName, +} + +struct PrivilegeGrantInfo { + 1: string privilege, + 2: i32 createTime, + 3: string grantor, + 4: PrincipalType grantorType, + 5: bool grantOption, +} + +struct HiveObjectPrivilege { + 1: HiveObjectRef hiveObject, + 2: string principalName, + 3: PrincipalType principalType, + 4: PrivilegeGrantInfo grantInfo, +} + +struct PrivilegeBag { + 1: list privileges, +} + +struct PrincipalPrivilegeSet { + 1: map> userPrivileges, // user name -> privilege grant info + 2: map> groupPrivileges, // group name -> privilege grant info + 3: map> rolePrivileges, //role name -> privilege grant info +} + +struct GrantRevokePrivilegeRequest { + 1: GrantRevokeType requestType; + 2: PrivilegeBag privileges; + 3: optional bool revokeGrantOption; // Only for revoke request +} + +struct GrantRevokePrivilegeResponse { + 1: optional bool success; +} + +struct Role { + 1: string roleName, + 2: i32 createTime, + 3: string ownerName, +} + +// Representation of a grant for a principal to a role +struct RolePrincipalGrant { + 1: string roleName, + 2: string principalName, + 3: PrincipalType principalType, + 4: bool grantOption, + 5: i32 grantTime, + 6: string grantorName, + 7: PrincipalType grantorPrincipalType +} + +struct GetRoleGrantsForPrincipalRequest { + 1: required string principal_name, + 2: required PrincipalType principal_type +} + +struct GetRoleGrantsForPrincipalResponse { + 1: required list principalGrants; +} + +struct GetPrincipalsInRoleRequest { + 1: required string roleName; +} + +struct GetPrincipalsInRoleResponse { + 1: required list principalGrants; +} + +struct GrantRevokeRoleRequest { + 1: GrantRevokeType requestType; + 2: string roleName; + 3: string principalName; + 4: PrincipalType principalType; + 5: optional string grantor; // Needed for grant + 6: optional PrincipalType grantorType; // Needed for grant + 7: optional bool grantOption; +} + +struct GrantRevokeRoleResponse { + 1: optional bool success; +} + +// namespace for tables +struct Database { + 1: string name, + 2: string description, + 3: string locationUri, + 4: map parameters, // properties associated with the database + 5: optional PrincipalPrivilegeSet privileges, + 6: optional string ownerName, + 7: optional PrincipalType ownerType +} + +// This object holds the information needed by SerDes +struct SerDeInfo { + 1: string name, // name of the serde, table name by default + 2: string serializationLib, // usually the class that implements the extractor & loader + 3: map parameters // initialization parameters +} + +// sort order of a column (column name along with asc(1)/desc(0)) +struct Order { + 1: string col, // sort column name + 2: i32 order // asc(1) or desc(0) +} + +// this object holds all the information about skewed table +struct SkewedInfo { + 1: list skewedColNames, // skewed column names + 2: list> skewedColValues, //skewed values + 3: map, string> skewedColValueLocationMaps, //skewed value to location mappings +} + +// this object holds all the information about physical storage of the data belonging to a table +struct StorageDescriptor { + 1: list cols, // required (refer to types defined above) + 2: string location, // defaults to //tablename + 3: string inputFormat, // SequenceFileInputFormat (binary) or TextInputFormat` or custom format + 4: string outputFormat, // SequenceFileOutputFormat (binary) or IgnoreKeyTextOutputFormat or custom format + 5: bool compressed, // compressed or not + 6: i32 numBuckets, // this must be specified if there are any dimension columns + 7: SerDeInfo serdeInfo, // serialization and deserialization information + 8: list bucketCols, // reducer grouping columns and clustering columns and bucketing columns` + 9: list sortCols, // sort order of the data in each bucket + 10: map parameters, // any user supplied key value hash + 11: optional SkewedInfo skewedInfo, // skewed information + 12: optional bool storedAsSubDirectories // stored as subdirectories or not +} + +// table information +struct Table { + 1: string tableName, // name of the table + 2: string dbName, // database name ('default') + 3: string owner, // owner of this table + 4: i32 createTime, // creation time of the table + 5: i32 lastAccessTime, // last access time (usually this will be filled from HDFS and shouldn't be relied on) + 6: i32 retention, // retention time + 7: StorageDescriptor sd, // storage descriptor of the table + 8: list partitionKeys, // partition keys of the table. only primitive types are supported + 9: map parameters, // to store comments or any other user level parameters + 10: string viewOriginalText, // original view text, null for non-view + 11: string viewExpandedText, // expanded view text, null for non-view + 12: string tableType, // table type enum, e.g. EXTERNAL_TABLE + 13: optional PrincipalPrivilegeSet privileges, + 14: optional bool temporary=false, + 15: list primaryKeys, + 16: list foreignKeys; +} + +struct Partition { + 1: list values // string value is converted to appropriate partition key type + 2: string dbName, + 3: string tableName, + 4: i32 createTime, + 5: i32 lastAccessTime, + 6: StorageDescriptor sd, + 7: map parameters, + 8: optional PrincipalPrivilegeSet privileges +} + +struct PartitionWithoutSD { + 1: list values // string value is converted to appropriate partition key type + 2: i32 createTime, + 3: i32 lastAccessTime, + 4: string relativePath, + 5: map parameters, + 6: optional PrincipalPrivilegeSet privileges +} + +struct PartitionSpecWithSharedSD { + 1: list partitions, + 2: StorageDescriptor sd, +} + +struct PartitionListComposingSpec { + 1: list partitions +} + +struct PartitionSpec { + 1: string dbName, + 2: string tableName, + 3: string rootPath, + 4: optional PartitionSpecWithSharedSD sharedSDPartitionSpec, + 5: optional PartitionListComposingSpec partitionList +} + +struct Index { + 1: string indexName, // unique with in the whole database namespace + 2: string indexHandlerClass, // reserved + 3: string dbName, + 4: string origTableName, + 5: i32 createTime, + 6: i32 lastAccessTime, + 7: string indexTableName, + 8: StorageDescriptor sd, + 9: map parameters, + 10: bool deferredRebuild +} + +// column statistics +struct BooleanColumnStatsData { +1: required i64 numTrues, +2: required i64 numFalses, +3: required i64 numNulls, +4: optional string bitVectors +} + +struct DoubleColumnStatsData { +1: optional double lowValue, +2: optional double highValue, +3: required i64 numNulls, +4: required i64 numDVs, +5: optional string bitVectors +} + +struct LongColumnStatsData { +1: optional i64 lowValue, +2: optional i64 highValue, +3: required i64 numNulls, +4: required i64 numDVs, +5: optional string bitVectors +} + +struct StringColumnStatsData { +1: required i64 maxColLen, +2: required double avgColLen, +3: required i64 numNulls, +4: required i64 numDVs, +5: optional string bitVectors +} + +struct BinaryColumnStatsData { +1: required i64 maxColLen, +2: required double avgColLen, +3: required i64 numNulls, +4: optional string bitVectors +} + + +struct Decimal { +1: required binary unscaled, +3: required i16 scale +} + +struct DecimalColumnStatsData { +1: optional Decimal lowValue, +2: optional Decimal highValue, +3: required i64 numNulls, +4: required i64 numDVs, +5: optional string bitVectors +} + +struct Date { +1: required i64 daysSinceEpoch +} + +struct DateColumnStatsData { +1: optional Date lowValue, +2: optional Date highValue, +3: required i64 numNulls, +4: required i64 numDVs, +5: optional string bitVectors +} + +union ColumnStatisticsData { +1: BooleanColumnStatsData booleanStats, +2: LongColumnStatsData longStats, +3: DoubleColumnStatsData doubleStats, +4: StringColumnStatsData stringStats, +5: BinaryColumnStatsData binaryStats, +6: DecimalColumnStatsData decimalStats, +7: DateColumnStatsData dateStats +} + +struct ColumnStatisticsObj { +1: required string colName, +2: required string colType, +3: required ColumnStatisticsData statsData +} + +struct ColumnStatisticsDesc { +1: required bool isTblLevel, +2: required string dbName, +3: required string tableName, +4: optional string partName, +5: optional i64 lastAnalyzed +} + +struct ColumnStatistics { +1: required ColumnStatisticsDesc statsDesc, +2: required list statsObj; +} + +struct AggrStats { +1: required list colStats, +2: required i64 partsFound // number of partitions for which stats were found +} + +struct SetPartitionsStatsRequest { +1: required list colStats +} + +// schema of the table/query results etc. +struct Schema { + // column names, types, comments + 1: list fieldSchemas, // delimiters etc + 2: map properties +} + +// Key-value store to be used with selected +// Metastore APIs (create, alter methods). +// The client can pass environment properties / configs that can be +// accessed in hooks. +struct EnvironmentContext { + 1: map properties +} + +// Return type for get_partitions_by_expr +struct PartitionsByExprResult { + 1: required list partitions, + // Whether the results has any (currently, all) partitions which may or may not match + 2: required bool hasUnknownPartitions +} + +struct PartitionsByExprRequest { + 1: required string dbName, + 2: required string tblName, + 3: required binary expr, + 4: optional string defaultPartitionName, + 5: optional i16 maxParts=-1 +} + +struct TableStatsResult { + 1: required list tableStats +} + +struct PartitionsStatsResult { + 1: required map> partStats +} + +struct TableStatsRequest { + 1: required string dbName, + 2: required string tblName, + 3: required list colNames +} + +struct PartitionsStatsRequest { + 1: required string dbName, + 2: required string tblName, + 3: required list colNames, + 4: required list partNames +} + +// Return type for add_partitions_req +struct AddPartitionsResult { + 1: optional list partitions, +} + +// Request type for add_partitions_req +struct AddPartitionsRequest { + 1: required string dbName, + 2: required string tblName, + 3: required list parts, + 4: required bool ifNotExists, + 5: optional bool needResult=true +} + +// Return type for drop_partitions_req +struct DropPartitionsResult { + 1: optional list partitions, +} + +struct DropPartitionsExpr { + 1: required binary expr; + 2: optional i32 partArchiveLevel; +} + +union RequestPartsSpec { + 1: list names; + 2: list exprs; +} + +// Request type for drop_partitions_req +// TODO: we might want to add "bestEffort" flag; where a subset can fail +struct DropPartitionsRequest { + 1: required string dbName, + 2: required string tblName, + 3: required RequestPartsSpec parts, + 4: optional bool deleteData, + 5: optional bool ifExists=true, // currently verified on client + 6: optional bool ignoreProtection, + 7: optional EnvironmentContext environmentContext, + 8: optional bool needResult=true +} + +enum FunctionType { + JAVA = 1, +} + +enum ResourceType { + JAR = 1, + FILE = 2, + ARCHIVE = 3, +} + +struct ResourceUri { + 1: ResourceType resourceType, + 2: string uri, +} + +// User-defined function +struct Function { + 1: string functionName, + 2: string dbName, + 3: string className, + 4: string ownerName, + 5: PrincipalType ownerType, + 6: i32 createTime, + 7: FunctionType functionType, + 8: list resourceUris, +} + +// Structs for transaction and locks +struct TxnInfo { + 1: required i64 id, + 2: required TxnState state, + 3: required string user, // used in 'show transactions' to help admins find who has open transactions + 4: required string hostname, // used in 'show transactions' to help admins find who has open transactions + 5: optional string agentInfo = "Unknown", + 6: optional i32 heartbeatCount=0, + 7: optional string metaInfo, +} + +struct GetOpenTxnsInfoResponse { + 1: required i64 txn_high_water_mark, + 2: required list open_txns, +} + +struct GetOpenTxnsResponse { + 1: required i64 txn_high_water_mark, + 2: required set open_txns, +} + +struct OpenTxnRequest { + 1: required i32 num_txns, + 2: required string user, + 3: required string hostname, + 4: optional string agentInfo = "Unknown", +} + +struct OpenTxnsResponse { + 1: required list txn_ids, +} + +struct AbortTxnRequest { + 1: required i64 txnid, +} + +struct CommitTxnRequest { + 1: required i64 txnid, +} + +struct LockComponent { + 1: required LockType type, + 2: required LockLevel level, + 3: required string dbname, + 4: optional string tablename, + 5: optional string partitionname, +} + +struct LockRequest { + 1: required list component, + 2: optional i64 txnid, + 3: required string user, // used in 'show locks' to help admins find who has open locks + 4: required string hostname, // used in 'show locks' to help admins find who has open locks + 5: optional string agentInfo = "Unknown", +} + +struct LockResponse { + 1: required i64 lockid, + 2: required LockState state, +} + +struct CheckLockRequest { + 1: required i64 lockid, + 2: optional i64 txnid, + 3: optional i64 elapsed_ms, +} + +struct UnlockRequest { + 1: required i64 lockid, +} + +struct ShowLocksRequest { + 1: optional string dbname, + 2: optional string tablename, + 3: optional string partname, + 4: optional bool isExtended=false, +} + +struct ShowLocksResponseElement { + 1: required i64 lockid, + 2: required string dbname, + 3: optional string tablename, + 4: optional string partname, + 5: required LockState state, + 6: required LockType type, + 7: optional i64 txnid, + 8: required i64 lastheartbeat, + 9: optional i64 acquiredat, + 10: required string user, + 11: required string hostname, + 12: optional i32 heartbeatCount = 0, + 13: optional string agentInfo, + 14: optional i64 blockedByExtId, + 15: optional i64 blockedByIntId, + 16: optional i64 lockIdInternal, +} + +struct ShowLocksResponse { + 1: list locks, +} + +struct HeartbeatRequest { + 1: optional i64 lockid, + 2: optional i64 txnid +} + +struct HeartbeatTxnRangeRequest { + 1: required i64 min, + 2: required i64 max +} + +struct HeartbeatTxnRangeResponse { + 1: required set aborted, + 2: required set nosuch +} + +struct CompactionRequest { + 1: required string dbname, + 2: required string tablename, + 3: optional string partitionname, + 4: required CompactionType type, + 5: optional string runas, +} + +struct ShowCompactRequest { +} + +struct ShowCompactResponseElement { + 1: required string dbname, + 2: required string tablename, + 3: optional string partitionname, + 4: required CompactionType type, + 5: required string state, + 6: optional string workerid, + 7: optional i64 start, + 8: optional string runAs, + 9: optional i64 hightestTxnId, // Highest Txn ID handled by this compaction + 10: optional string metaInfo, + 11: optional i64 endTime, + 12: optional string hadoopJobId = "None", +} + +struct ShowCompactResponse { + 1: required list compacts, +} + +struct AddDynamicPartitions { + 1: required i64 txnid, + 2: required string dbname, + 3: required string tablename, + 4: required list partitionnames, +} + +struct NotificationEventRequest { + 1: required i64 lastEvent, + 2: optional i32 maxEvents, +} + +struct NotificationEvent { + 1: required i64 eventId, + 2: required i32 eventTime, + 3: required string eventType, + 4: optional string dbName, + 5: optional string tableName, + 6: required string message, +} + +struct NotificationEventResponse { + 1: required list events, +} + +struct CurrentNotificationEventId { + 1: required i64 eventId, +} + +struct InsertEventRequestData { + 1: required list filesAdded +} + +union FireEventRequestData { + 1: InsertEventRequestData insertData +} + +struct FireEventRequest { + 1: required bool successful, + 2: required FireEventRequestData data + // dbname, tablename, and partition vals are included as optional in the top level event rather than placed in each type of + // subevent as I assume they'll be used across most event types. + 3: optional string dbName, + 4: optional string tableName, + 5: optional list partitionVals, +} + +struct FireEventResponse { + // NOP for now, this is just a place holder for future responses +} + + +struct GetChangeVersionRequest { + 1: required string topic +} + +struct GetChangeVersionResult { + 1: required i64 version +} + + +struct MetadataPpdResult { + 1: optional binary metadata, + 2: optional binary includeBitset +} + +// Return type for get_file_metadata_by_expr +struct GetFileMetadataByExprResult { + 1: required map metadata, + 2: required bool isSupported +} + +enum FileMetadataExprType { + ORC_SARG = 1 +} + + +// Request type for get_file_metadata_by_expr +struct GetFileMetadataByExprRequest { + 1: required list fileIds, + 2: required binary expr, + 3: optional bool doGetFooters, + 4: optional FileMetadataExprType type +} + +// Return type for get_file_metadata +struct GetFileMetadataResult { + 1: required map metadata, + 2: required bool isSupported +} + +// Request type for get_file_metadata +struct GetFileMetadataRequest { + 1: required list fileIds +} + +// Return type for put_file_metadata +struct PutFileMetadataResult { +} + +// Request type for put_file_metadata +struct PutFileMetadataRequest { + 1: required list fileIds, + 2: required list metadata, + 3: optional FileMetadataExprType type +} + +// Return type for clear_file_metadata +struct ClearFileMetadataResult { +} + +// Request type for clear_file_metadata +struct ClearFileMetadataRequest { + 1: required list fileIds +} + +// Return type for cache_file_metadata +struct CacheFileMetadataResult { + 1: required bool isSupported +} + +// Request type for cache_file_metadata +struct CacheFileMetadataRequest { + 1: required string dbName, + 2: required string tblName, + 3: optional string partName, + 4: optional bool isAllParts +} + +struct GetAllFunctionsResponse { + 1: optional list functions +} + +struct TableMeta { + 1: required string dbName; + 2: required string tableName; + 3: required string tableType; + 4: optional string comments; +} + +exception MetaException { + 1: string message +} + +exception UnknownTableException { + 1: string message +} + +exception UnknownDBException { + 1: string message +} + +exception AlreadyExistsException { + 1: string message +} + +exception InvalidPartitionException { + 1: string message +} + +exception UnknownPartitionException { + 1: string message +} + +exception InvalidObjectException { + 1: string message +} + +exception NoSuchObjectException { + 1: string message +} + +exception IndexAlreadyExistsException { + 1: string message +} + +exception InvalidOperationException { + 1: string message +} + +exception ConfigValSecurityException { + 1: string message +} + +exception InvalidInputException { + 1: string message +} + +// Transaction and lock exceptions +exception NoSuchTxnException { + 1: string message +} + +exception TxnAbortedException { + 1: string message +} + +exception TxnOpenException { + 1: string message +} + +exception NoSuchLockException { + 1: string message +} + +/** +* This interface is live. +*/ +service ThriftHiveMetastore extends fb303.FacebookService +{ + string getMetaConf(1:string key) throws(1:MetaException o1) + void setMetaConf(1:string key, 2:string value) throws(1:MetaException o1) + + void create_database(1:Database database) throws(1:AlreadyExistsException o1, 2:InvalidObjectException o2, 3:MetaException o3) + Database get_database(1:string name) throws(1:NoSuchObjectException o1, 2:MetaException o2) + void drop_database(1:string name, 2:bool deleteData, 3:bool cascade) throws(1:NoSuchObjectException o1, 2:InvalidOperationException o2, 3:MetaException o3) + list get_databases(1:string pattern) throws(1:MetaException o1) + list get_all_databases() throws(1:MetaException o1) + void alter_database(1:string dbname, 2:Database db) throws(1:MetaException o1, 2:NoSuchObjectException o2) + + // returns the type with given name (make seperate calls for the dependent types if needed) + Type get_type(1:string name) throws(1:MetaException o1, 2:NoSuchObjectException o2) + bool create_type(1:Type type) throws(1:AlreadyExistsException o1, 2:InvalidObjectException o2, 3:MetaException o3) + bool drop_type(1:string type) throws(1:MetaException o1, 2:NoSuchObjectException o2) + map get_type_all(1:string name) + throws(1:MetaException o2) + + // Gets a list of FieldSchemas describing the columns of a particular table + list get_fields(1: string db_name, 2: string table_name) throws (1: MetaException o1, 2: UnknownTableException o2, 3: UnknownDBException o3), + list get_fields_with_environment_context(1: string db_name, 2: string table_name, 3:EnvironmentContext environment_context) throws (1: MetaException o1, 2: UnknownTableException o2, 3: UnknownDBException o3) + + // Gets a list of FieldSchemas describing both the columns and the partition keys of a particular table + list get_schema(1: string db_name, 2: string table_name) throws (1: MetaException o1, 2: UnknownTableException o2, 3: UnknownDBException o3) + list get_schema_with_environment_context(1: string db_name, 2: string table_name, 3:EnvironmentContext environment_context) throws (1: MetaException o1, 2: UnknownTableException o2, 3: UnknownDBException o3) + + // create a Hive table. Following fields must be set + // tableName + // database (only 'default' for now until Hive QL supports databases) + // owner (not needed, but good to have for tracking purposes) + // sd.cols (list of field schemas) + // sd.inputFormat (SequenceFileInputFormat (binary like falcon tables or u_full) or TextInputFormat) + // sd.outputFormat (SequenceFileInputFormat (binary) or TextInputFormat) + // sd.serdeInfo.serializationLib (SerDe class name eg org.apache.hadoop.hive.serde.simple_meta.MetadataTypedColumnsetSerDe + // * See notes on DDL_TIME + void create_table(1:Table tbl) throws(1:AlreadyExistsException o1, 2:InvalidObjectException o2, 3:MetaException o3, 4:NoSuchObjectException o4) + void create_table_with_environment_context(1:Table tbl, + 2:EnvironmentContext environment_context) + throws (1:AlreadyExistsException o1, + 2:InvalidObjectException o2, 3:MetaException o3, + 4:NoSuchObjectException o4) + // drops the table and all the partitions associated with it if the table has partitions + // delete data (including partitions) if deleteData is set to true + void drop_table(1:string dbname, 2:string name, 3:bool deleteData) + throws(1:NoSuchObjectException o1, 2:MetaException o3) + void drop_table_with_environment_context(1:string dbname, 2:string name, 3:bool deleteData, + 4:EnvironmentContext environment_context) + throws(1:NoSuchObjectException o1, 2:MetaException o3) + list get_tables(1: string db_name, 2: string pattern) throws (1: MetaException o1) + list get_table_meta(1: string db_patterns, 2: string tbl_patterns, 3: list tbl_types) + throws (1: MetaException o1) + list get_all_tables(1: string db_name) throws (1: MetaException o1) + + Table get_table(1:string dbname, 2:string tbl_name) + throws (1:MetaException o1, 2:NoSuchObjectException o2) + list get_table_objects_by_name(1:string dbname, 2:list tbl_names) + throws (1:MetaException o1, 2:InvalidOperationException o2, 3:UnknownDBException o3) + + // Get a list of table names that match a filter. + // The filter operators are LIKE, <, <=, >, >=, =, <> + // + // In the filter statement, values interpreted as strings must be enclosed in quotes, + // while values interpreted as integers should not be. Strings and integers are the only + // supported value types. + // + // The currently supported key names in the filter are: + // Constants.HIVE_FILTER_FIELD_OWNER, which filters on the tables' owner's name + // and supports all filter operators + // Constants.HIVE_FILTER_FIELD_LAST_ACCESS, which filters on the last access times + // and supports all filter operators except LIKE + // Constants.HIVE_FILTER_FIELD_PARAMS, which filters on the tables' parameter keys and values + // and only supports the filter operators = and <>. + // Append the parameter key name to HIVE_FILTER_FIELD_PARAMS in the filter statement. + // For example, to filter on parameter keys called "retention", the key name in the filter + // statement should be Constants.HIVE_FILTER_FIELD_PARAMS + "retention" + // Also, = and <> only work for keys that exist + // in the tables. E.g., if you are looking for tables where key1 <> value, it will only + // look at tables that have a value for the parameter key1. + // Some example filter statements include: + // filter = Constants.HIVE_FILTER_FIELD_OWNER + " like \".*test.*\" and " + + // Constants.HIVE_FILTER_FIELD_LAST_ACCESS + " = 0"; + // filter = Constants.HIVE_FILTER_FIELD_PARAMS + "retention = \"30\" or " + + // Constants.HIVE_FILTER_FIELD_PARAMS + "retention = \"90\"" + // @param dbName + // The name of the database from which you will retrieve the table names + // @param filterType + // The type of filter + // @param filter + // The filter string + // @param max_tables + // The maximum number of tables returned + // @return A list of table names that match the desired filter + list get_table_names_by_filter(1:string dbname, 2:string filter, 3:i16 max_tables=-1) + throws (1:MetaException o1, 2:InvalidOperationException o2, 3:UnknownDBException o3) + + // alter table applies to only future partitions not for existing partitions + // * See notes on DDL_TIME + void alter_table(1:string dbname, 2:string tbl_name, 3:Table new_tbl) + throws (1:InvalidOperationException o1, 2:MetaException o2) + void alter_table_with_environment_context(1:string dbname, 2:string tbl_name, + 3:Table new_tbl, 4:EnvironmentContext environment_context) + throws (1:InvalidOperationException o1, 2:MetaException o2) + // alter table not only applies to future partitions but also cascade to existing partitions + void alter_table_with_cascade(1:string dbname, 2:string tbl_name, 3:Table new_tbl, 4:bool cascade) + throws (1:InvalidOperationException o1, 2:MetaException o2) + // the following applies to only tables that have partitions + // * See notes on DDL_TIME + Partition add_partition(1:Partition new_part) + throws(1:InvalidObjectException o1, 2:AlreadyExistsException o2, 3:MetaException o3) + Partition add_partition_with_environment_context(1:Partition new_part, + 2:EnvironmentContext environment_context) + throws (1:InvalidObjectException o1, 2:AlreadyExistsException o2, + 3:MetaException o3) + i32 add_partitions(1:list new_parts) + throws(1:InvalidObjectException o1, 2:AlreadyExistsException o2, 3:MetaException o3) + i32 add_partitions_pspec(1:list new_parts) + throws(1:InvalidObjectException o1, 2:AlreadyExistsException o2, 3:MetaException o3) + Partition append_partition(1:string db_name, 2:string tbl_name, 3:list part_vals) + throws (1:InvalidObjectException o1, 2:AlreadyExistsException o2, 3:MetaException o3) + AddPartitionsResult add_partitions_req(1:AddPartitionsRequest request) + throws(1:InvalidObjectException o1, 2:AlreadyExistsException o2, 3:MetaException o3) + Partition append_partition_with_environment_context(1:string db_name, 2:string tbl_name, + 3:list part_vals, 4:EnvironmentContext environment_context) + throws (1:InvalidObjectException o1, 2:AlreadyExistsException o2, 3:MetaException o3) + Partition append_partition_by_name(1:string db_name, 2:string tbl_name, 3:string part_name) + throws (1:InvalidObjectException o1, 2:AlreadyExistsException o2, 3:MetaException o3) + Partition append_partition_by_name_with_environment_context(1:string db_name, 2:string tbl_name, + 3:string part_name, 4:EnvironmentContext environment_context) + throws (1:InvalidObjectException o1, 2:AlreadyExistsException o2, 3:MetaException o3) + bool drop_partition(1:string db_name, 2:string tbl_name, 3:list part_vals, 4:bool deleteData) + throws(1:NoSuchObjectException o1, 2:MetaException o2) + bool drop_partition_with_environment_context(1:string db_name, 2:string tbl_name, + 3:list part_vals, 4:bool deleteData, 5:EnvironmentContext environment_context) + throws(1:NoSuchObjectException o1, 2:MetaException o2) + bool drop_partition_by_name(1:string db_name, 2:string tbl_name, 3:string part_name, 4:bool deleteData) + throws(1:NoSuchObjectException o1, 2:MetaException o2) + bool drop_partition_by_name_with_environment_context(1:string db_name, 2:string tbl_name, + 3:string part_name, 4:bool deleteData, 5:EnvironmentContext environment_context) + throws(1:NoSuchObjectException o1, 2:MetaException o2) + DropPartitionsResult drop_partitions_req(1: DropPartitionsRequest req) + throws(1:NoSuchObjectException o1, 2:MetaException o2) + + Partition get_partition(1:string db_name, 2:string tbl_name, 3:list part_vals) + throws(1:MetaException o1, 2:NoSuchObjectException o2) + Partition exchange_partition(1:map partitionSpecs, 2:string source_db, + 3:string source_table_name, 4:string dest_db, 5:string dest_table_name) + throws(1:MetaException o1, 2:NoSuchObjectException o2, 3:InvalidObjectException o3, + 4:InvalidInputException o4) + + list exchange_partitions(1:map partitionSpecs, 2:string source_db, + 3:string source_table_name, 4:string dest_db, 5:string dest_table_name) + throws(1:MetaException o1, 2:NoSuchObjectException o2, 3:InvalidObjectException o3, + 4:InvalidInputException o4) + + Partition get_partition_with_auth(1:string db_name, 2:string tbl_name, 3:list part_vals, + 4: string user_name, 5: list group_names) throws(1:MetaException o1, 2:NoSuchObjectException o2) + + Partition get_partition_by_name(1:string db_name 2:string tbl_name, 3:string part_name) + throws(1:MetaException o1, 2:NoSuchObjectException o2) + + // returns all the partitions for this table in reverse chronological order. + // If max parts is given then it will return only that many. + list get_partitions(1:string db_name, 2:string tbl_name, 3:i16 max_parts=-1) + throws(1:NoSuchObjectException o1, 2:MetaException o2) + list get_partitions_with_auth(1:string db_name, 2:string tbl_name, 3:i16 max_parts=-1, + 4: string user_name, 5: list group_names) throws(1:NoSuchObjectException o1, 2:MetaException o2) + + list get_partitions_pspec(1:string db_name, 2:string tbl_name, 3:i32 max_parts=-1) + throws(1:NoSuchObjectException o1, 2:MetaException o2) + + list get_partition_names(1:string db_name, 2:string tbl_name, 3:i16 max_parts=-1) + throws(1:MetaException o2) + + // get_partition*_ps methods allow filtering by a partial partition specification, + // as needed for dynamic partitions. The values that are not restricted should + // be empty strings. Nulls were considered (instead of "") but caused errors in + // generated Python code. The size of part_vals may be smaller than the + // number of partition columns - the unspecified values are considered the same + // as "". + list get_partitions_ps(1:string db_name 2:string tbl_name + 3:list part_vals, 4:i16 max_parts=-1) + throws(1:MetaException o1, 2:NoSuchObjectException o2) + list get_partitions_ps_with_auth(1:string db_name, 2:string tbl_name, 3:list part_vals, 4:i16 max_parts=-1, + 5: string user_name, 6: list group_names) throws(1:NoSuchObjectException o1, 2:MetaException o2) + + list get_partition_names_ps(1:string db_name, + 2:string tbl_name, 3:list part_vals, 4:i16 max_parts=-1) + throws(1:MetaException o1, 2:NoSuchObjectException o2) + + // get the partitions matching the given partition filter + list get_partitions_by_filter(1:string db_name 2:string tbl_name + 3:string filter, 4:i16 max_parts=-1) + throws(1:MetaException o1, 2:NoSuchObjectException o2) + + // List partitions as PartitionSpec instances. + list get_part_specs_by_filter(1:string db_name 2:string tbl_name + 3:string filter, 4:i32 max_parts=-1) + throws(1:MetaException o1, 2:NoSuchObjectException o2) + + // get the partitions matching the given partition filter + // unlike get_partitions_by_filter, takes serialized hive expression, and with that can work + // with any filter (get_partitions_by_filter only works if the filter can be pushed down to JDOQL. + PartitionsByExprResult get_partitions_by_expr(1:PartitionsByExprRequest req) + throws(1:MetaException o1, 2:NoSuchObjectException o2) + + // get the partitions matching the given partition filter + i32 get_num_partitions_by_filter(1:string db_name 2:string tbl_name 3:string filter) + throws(1:MetaException o1, 2:NoSuchObjectException o2) + + // get partitions give a list of partition names + list get_partitions_by_names(1:string db_name 2:string tbl_name 3:list names) + throws(1:MetaException o1, 2:NoSuchObjectException o2) + + // changes the partition to the new partition object. partition is identified from the part values + // in the new_part + // * See notes on DDL_TIME + void alter_partition(1:string db_name, 2:string tbl_name, 3:Partition new_part) + throws (1:InvalidOperationException o1, 2:MetaException o2) + + // change a list of partitions. All partitions are altered atomically and all + // prehooks are fired together followed by all post hooks + void alter_partitions(1:string db_name, 2:string tbl_name, 3:list new_parts) + throws (1:InvalidOperationException o1, 2:MetaException o2) + void alter_partitions_with_environment_context(1:string db_name, 2:string tbl_name, 3:list new_parts, 4:EnvironmentContext environment_context) throws (1:InvalidOperationException o1, 2:MetaException o2) + + void alter_partition_with_environment_context(1:string db_name, + 2:string tbl_name, 3:Partition new_part, + 4:EnvironmentContext environment_context) + throws (1:InvalidOperationException o1, 2:MetaException o2) + + // rename the old partition to the new partition object by changing old part values to the part values + // in the new_part. old partition is identified from part_vals. + // partition keys in new_part should be the same as those in old partition. + void rename_partition(1:string db_name, 2:string tbl_name, 3:list part_vals, 4:Partition new_part) + throws (1:InvalidOperationException o1, 2:MetaException o2) + + // returns whether or not the partition name is valid based on the value of the config + // hive.metastore.partition.name.whitelist.pattern + bool partition_name_has_valid_characters(1:list part_vals, 2:bool throw_exception) + throws(1: MetaException o1) + + // gets the value of the configuration key in the metastore server. returns + // defaultValue if the key does not exist. if the configuration key does not + // begin with "hive", "mapred", or "hdfs", a ConfigValSecurityException is + // thrown. + string get_config_value(1:string name, 2:string defaultValue) + throws(1:ConfigValSecurityException o1) + + // converts a partition name into a partition values array + list partition_name_to_vals(1: string part_name) + throws(1: MetaException o1) + // converts a partition name into a partition specification (a mapping from + // the partition cols to the values) + map partition_name_to_spec(1: string part_name) + throws(1: MetaException o1) + + void markPartitionForEvent(1:string db_name, 2:string tbl_name, 3:map part_vals, + 4:PartitionEventType eventType) throws (1: MetaException o1, 2: NoSuchObjectException o2, + 3: UnknownDBException o3, 4: UnknownTableException o4, 5: UnknownPartitionException o5, + 6: InvalidPartitionException o6) + bool isPartitionMarkedForEvent(1:string db_name, 2:string tbl_name, 3:map part_vals, + 4: PartitionEventType eventType) throws (1: MetaException o1, 2:NoSuchObjectException o2, + 3: UnknownDBException o3, 4: UnknownTableException o4, 5: UnknownPartitionException o5, + 6: InvalidPartitionException o6) + + //index + Index add_index(1:Index new_index, 2: Table index_table) + throws(1:InvalidObjectException o1, 2:AlreadyExistsException o2, 3:MetaException o3) + void alter_index(1:string dbname, 2:string base_tbl_name, 3:string idx_name, 4:Index new_idx) + throws (1:InvalidOperationException o1, 2:MetaException o2) + bool drop_index_by_name(1:string db_name, 2:string tbl_name, 3:string index_name, 4:bool deleteData) + throws(1:NoSuchObjectException o1, 2:MetaException o2) + Index get_index_by_name(1:string db_name 2:string tbl_name, 3:string index_name) + throws(1:MetaException o1, 2:NoSuchObjectException o2) + + list get_indexes(1:string db_name, 2:string tbl_name, 3:i16 max_indexes=-1) + throws(1:NoSuchObjectException o1, 2:MetaException o2) + list get_index_names(1:string db_name, 2:string tbl_name, 3:i16 max_indexes=-1) + throws(1:MetaException o2) + + // column statistics interfaces + + // update APIs persist the column statistics object(s) that are passed in. If statistics already + // exists for one or more columns, the existing statistics will be overwritten. The update APIs + // validate that the dbName, tableName, partName, colName[] passed in as part of the ColumnStatistics + // struct are valid, throws InvalidInputException/NoSuchObjectException if found to be invalid + bool update_table_column_statistics(1:ColumnStatistics stats_obj) throws (1:NoSuchObjectException o1, + 2:InvalidObjectException o2, 3:MetaException o3, 4:InvalidInputException o4) + bool update_partition_column_statistics(1:ColumnStatistics stats_obj) throws (1:NoSuchObjectException o1, + 2:InvalidObjectException o2, 3:MetaException o3, 4:InvalidInputException o4) + + // get APIs return the column statistics corresponding to db_name, tbl_name, [part_name], col_name if + // such statistics exists. If the required statistics doesn't exist, get APIs throw NoSuchObjectException + // For instance, if get_table_column_statistics is called on a partitioned table for which only + // partition level column stats exist, get_table_column_statistics will throw NoSuchObjectException + ColumnStatistics get_table_column_statistics(1:string db_name, 2:string tbl_name, 3:string col_name) throws + (1:NoSuchObjectException o1, 2:MetaException o2, 3:InvalidInputException o3, 4:InvalidObjectException o4) + ColumnStatistics get_partition_column_statistics(1:string db_name, 2:string tbl_name, 3:string part_name, + 4:string col_name) throws (1:NoSuchObjectException o1, 2:MetaException o2, + 3:InvalidInputException o3, 4:InvalidObjectException o4) + TableStatsResult get_table_statistics_req(1:TableStatsRequest request) throws + (1:NoSuchObjectException o1, 2:MetaException o2) + PartitionsStatsResult get_partitions_statistics_req(1:PartitionsStatsRequest request) throws + (1:NoSuchObjectException o1, 2:MetaException o2) + AggrStats get_aggr_stats_for(1:PartitionsStatsRequest request) throws + (1:NoSuchObjectException o1, 2:MetaException o2) + bool set_aggr_stats_for(1:SetPartitionsStatsRequest request) throws + (1:NoSuchObjectException o1, 2:InvalidObjectException o2, 3:MetaException o3, 4:InvalidInputException o4) + + + // delete APIs attempt to delete column statistics, if found, associated with a given db_name, tbl_name, [part_name] + // and col_name. If the delete API doesn't find the statistics record in the metastore, throws NoSuchObjectException + // Delete API validates the input and if the input is invalid throws InvalidInputException/InvalidObjectException. + bool delete_partition_column_statistics(1:string db_name, 2:string tbl_name, 3:string part_name, 4:string col_name) throws + (1:NoSuchObjectException o1, 2:MetaException o2, 3:InvalidObjectException o3, + 4:InvalidInputException o4) + bool delete_table_column_statistics(1:string db_name, 2:string tbl_name, 3:string col_name) throws + (1:NoSuchObjectException o1, 2:MetaException o2, 3:InvalidObjectException o3, + 4:InvalidInputException o4) + + // + // user-defined functions + // + + void create_function(1:Function func) + throws (1:AlreadyExistsException o1, + 2:InvalidObjectException o2, + 3:MetaException o3, + 4:NoSuchObjectException o4) + + void drop_function(1:string dbName, 2:string funcName) + throws (1:NoSuchObjectException o1, 2:MetaException o3) + + void alter_function(1:string dbName, 2:string funcName, 3:Function newFunc) + throws (1:InvalidOperationException o1, 2:MetaException o2) + + list get_functions(1:string dbName, 2:string pattern) + throws (1:MetaException o1) + Function get_function(1:string dbName, 2:string funcName) + throws (1:MetaException o1, 2:NoSuchObjectException o2) + + GetAllFunctionsResponse get_all_functions() throws (1:MetaException o1) + + //authorization privileges + + bool create_role(1:Role role) throws(1:MetaException o1) + bool drop_role(1:string role_name) throws(1:MetaException o1) + list get_role_names() throws(1:MetaException o1) + // Deprecated, use grant_revoke_role() + bool grant_role(1:string role_name, 2:string principal_name, 3:PrincipalType principal_type, + 4:string grantor, 5:PrincipalType grantorType, 6:bool grant_option) throws(1:MetaException o1) + // Deprecated, use grant_revoke_role() + bool revoke_role(1:string role_name, 2:string principal_name, 3:PrincipalType principal_type) + throws(1:MetaException o1) + list list_roles(1:string principal_name, 2:PrincipalType principal_type) throws(1:MetaException o1) + GrantRevokeRoleResponse grant_revoke_role(1:GrantRevokeRoleRequest request) throws(1:MetaException o1) + + // get all role-grants for users/roles that have been granted the given role + // Note that in the returned list of RolePrincipalGrants, the roleName is + // redundant as it would match the role_name argument of this function + GetPrincipalsInRoleResponse get_principals_in_role(1: GetPrincipalsInRoleRequest request) throws(1:MetaException o1) + + // get grant information of all roles granted to the given principal + // Note that in the returned list of RolePrincipalGrants, the principal name,type is + // redundant as it would match the principal name,type arguments of this function + GetRoleGrantsForPrincipalResponse get_role_grants_for_principal(1: GetRoleGrantsForPrincipalRequest request) throws(1:MetaException o1) + + PrincipalPrivilegeSet get_privilege_set(1:HiveObjectRef hiveObject, 2:string user_name, + 3: list group_names) throws(1:MetaException o1) + list list_privileges(1:string principal_name, 2:PrincipalType principal_type, + 3: HiveObjectRef hiveObject) throws(1:MetaException o1) + + // Deprecated, use grant_revoke_privileges() + bool grant_privileges(1:PrivilegeBag privileges) throws(1:MetaException o1) + // Deprecated, use grant_revoke_privileges() + bool revoke_privileges(1:PrivilegeBag privileges) throws(1:MetaException o1) + GrantRevokePrivilegeResponse grant_revoke_privileges(1:GrantRevokePrivilegeRequest request) throws(1:MetaException o1); + + // this is used by metastore client to send UGI information to metastore server immediately + // after setting up a connection. + list set_ugi(1:string user_name, 2:list group_names) throws (1:MetaException o1) + + //Authentication (delegation token) interfaces + + // get metastore server delegation token for use from the map/reduce tasks to authenticate + // to metastore server + string get_delegation_token(1:string token_owner, 2:string renewer_kerberos_principal_name) + throws (1:MetaException o1) + + // method to renew delegation token obtained from metastore server + i64 renew_delegation_token(1:string token_str_form) throws (1:MetaException o1) + + // method to cancel delegation token obtained from metastore server + void cancel_delegation_token(1:string token_str_form) throws (1:MetaException o1) + + // add a delegation token + bool add_token(1:string token_identifier, 2:string delegation_token) + + // remove a delegation token + bool remove_token(1:string token_identifier) + + // get a delegation token by identifier + string get_token(1:string token_identifier) + + // get all delegation token identifiers + list get_all_token_identifiers() + + // add master key + i32 add_master_key(1:string key) throws (1:MetaException o1) + + // update master key + void update_master_key(1:i32 seq_number, 2:string key) throws (1:NoSuchObjectException o1, 2:MetaException o2) + + // remove master key + bool remove_master_key(1:i32 key_seq) + + // get master keys + list get_master_keys() + + // Transaction and lock management calls + // Get just list of open transactions + GetOpenTxnsResponse get_open_txns() + // Get list of open transactions with state (open, aborted) + GetOpenTxnsInfoResponse get_open_txns_info() + OpenTxnsResponse open_txns(1:OpenTxnRequest rqst) + void abort_txn(1:AbortTxnRequest rqst) throws (1:NoSuchTxnException o1) + void commit_txn(1:CommitTxnRequest rqst) throws (1:NoSuchTxnException o1, 2:TxnAbortedException o2) + LockResponse lock(1:LockRequest rqst) throws (1:NoSuchTxnException o1, 2:TxnAbortedException o2) + LockResponse check_lock(1:CheckLockRequest rqst) + throws (1:NoSuchTxnException o1, 2:TxnAbortedException o2, 3:NoSuchLockException o3) + void unlock(1:UnlockRequest rqst) throws (1:NoSuchLockException o1, 2:TxnOpenException o2) + ShowLocksResponse show_locks(1:ShowLocksRequest rqst) + void heartbeat(1:HeartbeatRequest ids) throws (1:NoSuchLockException o1, 2:NoSuchTxnException o2, 3:TxnAbortedException o3) + HeartbeatTxnRangeResponse heartbeat_txn_range(1:HeartbeatTxnRangeRequest txns) + void compact(1:CompactionRequest rqst) + ShowCompactResponse show_compact(1:ShowCompactRequest rqst) + void add_dynamic_partitions(1:AddDynamicPartitions rqst) throws (1:NoSuchTxnException o1, 2:TxnAbortedException o2) + + // Notification logging calls + NotificationEventResponse get_next_notification(1:NotificationEventRequest rqst) + CurrentNotificationEventId get_current_notificationEventId() + FireEventResponse fire_listener_event(1:FireEventRequest rqst) + void flushCache() + + GetFileMetadataByExprResult get_file_metadata_by_expr(1:GetFileMetadataByExprRequest req) + GetFileMetadataResult get_file_metadata(1:GetFileMetadataRequest req) + PutFileMetadataResult put_file_metadata(1:PutFileMetadataRequest req) + ClearFileMetadataResult clear_file_metadata(1:ClearFileMetadataRequest req) + CacheFileMetadataResult cache_file_metadata(1:CacheFileMetadataRequest req) + + GetChangeVersionResult get_change_version(1:GetChangeVersionRequest req) +} + +// * Note about the DDL_TIME: When creating or altering a table or a partition, +// if the DDL_TIME is not set, the current time will be used. + +// For storing info about archived partitions in parameters + +// Whether the partition is archived +const string IS_ARCHIVED = "is_archived", +// The original location of the partition, before archiving. After archiving, +// this directory will contain the archive. When the partition +// is dropped, this directory will be deleted +const string ORIGINAL_LOCATION = "original_location", + +// Whether or not the table is considered immutable - immutable tables can only be +// overwritten or created if unpartitioned, or if partitioned, partitions inside them +// can only be overwritten or created. Immutability supports write-once and replace +// semantics, but not append. +const string IS_IMMUTABLE = "immutable", + +// these should be needed only for backward compatibility with filestore +const string META_TABLE_COLUMNS = "columns", +const string META_TABLE_COLUMN_TYPES = "columns.types", +const string BUCKET_FIELD_NAME = "bucket_field_name", +const string BUCKET_COUNT = "bucket_count", +const string FIELD_TO_DIMENSION = "field_to_dimension", +const string META_TABLE_NAME = "name", +const string META_TABLE_DB = "db", +const string META_TABLE_LOCATION = "location", +const string META_TABLE_SERDE = "serde", +const string META_TABLE_PARTITION_COLUMNS = "partition_columns", +const string META_TABLE_PARTITION_COLUMN_TYPES = "partition_columns.types", +const string FILE_INPUT_FORMAT = "file.inputformat", +const string FILE_OUTPUT_FORMAT = "file.outputformat", +const string META_TABLE_STORAGE = "storage_handler", +const string TABLE_IS_TRANSACTIONAL = "transactional", +const string TABLE_NO_AUTO_COMPACT = "no_auto_compaction", + + diff --git a/metastore/scripts/upgrade/derby/034-HIVE-13076.derby.sql b/metastore/scripts/upgrade/derby/034-HIVE-13076.derby.sql new file mode 100644 index 0000000..ebe2154 --- /dev/null +++ b/metastore/scripts/upgrade/derby/034-HIVE-13076.derby.sql @@ -0,0 +1,4 @@ +CREATE TABLE "APP"."PRIMARY_KEYS" ("TBL_ID" BIGINT NOT NULL, "PKEY_NAME" VARCHAR(128) NOT NULL, "PKEY_COMMENT" VARCHAR(4000), "PKEY_TYPE" VARCHAR(128) NOT NULL); +CREATE TABLE "APP"."FOREIGN_KEY_RELS" ("FOR_CD_ID" BIGINT NOT NULL, "REF_CD_ID" BIGINT NOT NULL, "PK_FK_CONSTRAINT_NAME" VARCHAR(400) NOT NULL, "FOR_TBL_ID" BIGINT NOT NULL); +CREATE TABLE "APP"."PK_FK_MAPPING" ("FOR_TBL_ID" BIGINT NOT NULL, "REF_TBL_ID" BIGINT NOT NULL, "PK_FK_CONSTRAINT_NAME" VARCHAR(400) NOT NULL); +ALTER TABLE "APP"."PRIMARY_KEYS" ADD CONSTRAINT "PRIMARY_KEY_PK" PRIMARY KEY ("TBL_ID", "PKEY_NAME"); diff --git a/metastore/scripts/upgrade/derby/hive-schema-2.1.0.derby.sql b/metastore/scripts/upgrade/derby/hive-schema-2.1.0.derby.sql index 42f4eb6..3eb0862 100644 --- a/metastore/scripts/upgrade/derby/hive-schema-2.1.0.derby.sql +++ b/metastore/scripts/upgrade/derby/hive-schema-2.1.0.derby.sql @@ -108,6 +108,8 @@ CREATE TABLE "APP"."NOTIFICATION_LOG" ("NL_ID" BIGINT NOT NULL, "DB_NAME" VARCHA CREATE TABLE "APP"."NOTIFICATION_SEQUENCE" ("NNI_ID" BIGINT NOT NULL, "NEXT_EVENT_ID" BIGINT NOT NULL); +RUN '034-HIVE-13076.derby.sql'; + -- ---------------------------------------------- -- DDL Statements for indexes -- ---------------------------------------------- diff --git a/metastore/scripts/upgrade/derby/upgrade-2.0.0-to-2.1.0.derby.sql b/metastore/scripts/upgrade/derby/upgrade-2.0.0-to-2.1.0.derby.sql index a0bac3c..dde8c45 100644 --- a/metastore/scripts/upgrade/derby/upgrade-2.0.0-to-2.1.0.derby.sql +++ b/metastore/scripts/upgrade/derby/upgrade-2.0.0-to-2.1.0.derby.sql @@ -1,4 +1,5 @@ -- Upgrade MetaStore schema from 2.0.0 to 2.1.0 RUN '033-HIVE-12892.derby.sql'; +RUN '034-HIVE-13076.derby.sql'; UPDATE "APP".VERSION SET SCHEMA_VERSION='2.1.0', VERSION_COMMENT='Hive release version 2.1.0' where VER_ID=1; diff --git a/metastore/scripts/upgrade/mssql/019-HIVE-13076.mssql.sql b/metastore/scripts/upgrade/mssql/019-HIVE-13076.mssql.sql new file mode 100644 index 0000000..eca1a74 --- /dev/null +++ b/metastore/scripts/upgrade/mssql/019-HIVE-13076.mssql.sql @@ -0,0 +1,22 @@ +CREATE TABLE PRIMARY_KEYS +( + TBL_ID BIGINT NOT NULL, + PKEY_NAME VARCHAR(128) NOT NULL, + PKEY_COMMENT VARCHAR(400) NOT NULL, + PKEY_TYPE VARCHAR(128) NOT NULL +) ; + +CREATE TABLE FOREIGN_KEY_RELS +( + FOR_CD_ID BIGINT NOT NULL, + REF_CD_ID BIGINT NOT NULL, + PK_FK_CONSTRAINT_NAME VARCHAR(400) NOT NULL, + FOR_TBL_ID BIGINT NOT NULL +) ; + +CREATE TABLE PK_FK_MAPPING +( + FOR_TBL_ID BIGINT NOT NULL, + REF_TBL_ID BIGINT NOT NULL, + PK_FK_CONSTRAINT_NAME VARCHAR(400) NOT NULL +) ; \ No newline at end of file diff --git a/metastore/scripts/upgrade/mssql/hive-schema-2.1.0.mssql.sql b/metastore/scripts/upgrade/mssql/hive-schema-2.1.0.mssql.sql index cf5a662..efb85f6 100644 --- a/metastore/scripts/upgrade/mssql/hive-schema-2.1.0.mssql.sql +++ b/metastore/scripts/upgrade/mssql/hive-schema-2.1.0.mssql.sql @@ -993,6 +993,8 @@ CREATE TABLE AUX_TABLE ( ) ); +:r 019-HIVE-13076.mssql.sql; + -- ----------------------------------------------------------------- -- Record schema version. Should be the last step in the init script diff --git a/metastore/scripts/upgrade/mssql/upgrade-2.0.0-to-2.1.0.mssql.sql b/metastore/scripts/upgrade/mssql/upgrade-2.0.0-to-2.1.0.mssql.sql index f25daf2..3e5cb30 100644 --- a/metastore/scripts/upgrade/mssql/upgrade-2.0.0-to-2.1.0.mssql.sql +++ b/metastore/scripts/upgrade/mssql/upgrade-2.0.0-to-2.1.0.mssql.sql @@ -1,6 +1,7 @@ SELECT 'Upgrading MetaStore schema from 2.0.0 to 2.1.0' AS MESSAGE; :r 018-HIVE-12892.mssql.sql; +:r 019-HIVE-13076.mssql.sql; UPDATE VERSION SET SCHEMA_VERSION='2.1.0', VERSION_COMMENT='Hive release version 2.1.0' where VER_ID=1; SELECT 'Finished upgrading MetaStore schema from 2.0.0 to 2.1.0' AS MESSAGE; diff --git a/metastore/scripts/upgrade/mysql/#hive-schema-2.1.0.mysql.sql# b/metastore/scripts/upgrade/mysql/#hive-schema-2.1.0.mysql.sql# new file mode 100644 index 0000000..1ed0d75 --- /dev/null +++ b/metastore/scripts/upgrade/mysql/#hive-schema-2.1.0.mysql.sql# @@ -0,0 +1,846 @@ +-- MySQL dump 10.13 Distrib 5.5.25, for osx10.6 (i386) +-- +-- Host: localhost Database: test +-- ------------------------------------------------------ +-- Server version 5.5.25 + +/*!40101 SET @OLD_CHARACTER_SET_CLIENT=@@CHARACTER_SET_CLIENT */; +/*!40101 SET @OLD_CHARACTER_SET_RESULTS=@@CHARACTER_SET_RESULTS */; +/*!40101 SET @OLD_COLLATION_CONNECTION=@@COLLATION_CONNECTION */; +/*!40101 SET NAMES utf8 */; +/*!40103 SET @OLD_TIME_ZONE=@@TIME_ZONE */; +/*!40103 SET TIME_ZONE='+00:00' */; +/*!40014 SET @OLD_UNIQUE_CHECKS=@@UNIQUE_CHECKS, UNIQUE_CHECKS=0 */; +/*!40014 SET @OLD_FOREIGN_KEY_CHECKS=@@FOREIGN_KEY_CHECKS, FOREIGN_KEY_CHECKS=0 */; +/*!40101 SET @OLD_SQL_MODE=@@SQL_MODE, SQL_MODE='NO_AUTO_VALUE_ON_ZERO' */; +/*!40111 SET @OLD_SQL_NOTES=@@SQL_NOTES, SQL_NOTES=0 */; + +-- +-- Table structure for table `BUCKETING_COLS` +-- + +/*!40101 SET @saved_cs_client = @@character_set_client */; +/*!40101 SET character_set_client = utf8 */; +CREATE TABLE IF NOT EXISTS `BUCKETING_COLS` ( + `SD_ID` bigint(20) NOT NULL, + `BUCKET_COL_NAME` varchar(256) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL, + `INTEGER_IDX` int(11) NOT NULL, + PRIMARY KEY (`SD_ID`,`INTEGER_IDX`), + KEY `BUCKETING_COLS_N49` (`SD_ID`), + CONSTRAINT `BUCKETING_COLS_FK1` FOREIGN KEY (`SD_ID`) REFERENCES `SDS` (`SD_ID`) +) ENGINE=InnoDB DEFAULT CHARSET=latin1; +/*!40101 SET character_set_client = @saved_cs_client */; + +-- +-- Table structure for table `CDS` +-- + +/*!40101 SET @saved_cs_client = @@character_set_client */; +/*!40101 SET character_set_client = utf8 */; +CREATE TABLE IF NOT EXISTS `CDS` ( + `CD_ID` bigint(20) NOT NULL, + PRIMARY KEY (`CD_ID`) +) ENGINE=InnoDB DEFAULT CHARSET=latin1; +/*!40101 SET character_set_client = @saved_cs_client */; + +-- +-- Table structure for table `COLUMNS_V2` +-- + +/*!40101 SET @saved_cs_client = @@character_set_client */; +/*!40101 SET character_set_client = utf8 */; +CREATE TABLE IF NOT EXISTS `COLUMNS_V2` ( + `CD_ID` bigint(20) NOT NULL, + `COMMENT` varchar(256) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL, + `COLUMN_NAME` varchar(767) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL, + `TYPE_NAME` varchar(4000) DEFAULT NULL, + `INTEGER_IDX` int(11) NOT NULL, + PRIMARY KEY (`CD_ID`,`COLUMN_NAME`), + KEY `COLUMNS_V2_N49` (`CD_ID`), + CONSTRAINT `COLUMNS_V2_FK1` FOREIGN KEY (`CD_ID`) REFERENCES `CDS` (`CD_ID`) +) ENGINE=InnoDB DEFAULT CHARSET=latin1; +/*!40101 SET character_set_client = @saved_cs_client */; + +-- +-- Table structure for table `DATABASE_PARAMS` +-- + +/*!40101 SET @saved_cs_client = @@character_set_client */; +/*!40101 SET character_set_client = utf8 */; +CREATE TABLE IF NOT EXISTS `DATABASE_PARAMS` ( + `DB_ID` bigint(20) NOT NULL, + `PARAM_KEY` varchar(180) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL, + `PARAM_VALUE` varchar(4000) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL, + PRIMARY KEY (`DB_ID`,`PARAM_KEY`), + KEY `DATABASE_PARAMS_N49` (`DB_ID`), + CONSTRAINT `DATABASE_PARAMS_FK1` FOREIGN KEY (`DB_ID`) REFERENCES `DBS` (`DB_ID`) +) ENGINE=InnoDB DEFAULT CHARSET=latin1; +/*!40101 SET character_set_client = @saved_cs_client */; + +-- +-- Table structure for table `DBS` +-- + +/*!40101 SET @saved_cs_client = @@character_set_client */; +/*!40101 SET character_set_client = utf8 */; +CREATE TABLE IF NOT EXISTS `DBS` ( + `DB_ID` bigint(20) NOT NULL, + `DESC` varchar(4000) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL, + `DB_LOCATION_URI` varchar(4000) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL, + `NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL, + `OWNER_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL, + `OWNER_TYPE` varchar(10) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL, + PRIMARY KEY (`DB_ID`), + UNIQUE KEY `UNIQUE_DATABASE` (`NAME`) +) ENGINE=InnoDB DEFAULT CHARSET=latin1; +/*!40101 SET character_set_client = @saved_cs_client */; + +-- +-- Table structure for table `DB_PRIVS` +-- + +/*!40101 SET @saved_cs_client = @@character_set_client */; +/*!40101 SET character_set_client = utf8 */; +CREATE TABLE IF NOT EXISTS `DB_PRIVS` ( + `DB_GRANT_ID` bigint(20) NOT NULL, + `CREATE_TIME` int(11) NOT NULL, + `DB_ID` bigint(20) DEFAULT NULL, + `GRANT_OPTION` smallint(6) NOT NULL, + `GRANTOR` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL, + `GRANTOR_TYPE` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL, + `PRINCIPAL_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL, + `PRINCIPAL_TYPE` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL, + `DB_PRIV` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL, + PRIMARY KEY (`DB_GRANT_ID`), + UNIQUE KEY `DBPRIVILEGEINDEX` (`DB_ID`,`PRINCIPAL_NAME`,`PRINCIPAL_TYPE`,`DB_PRIV`,`GRANTOR`,`GRANTOR_TYPE`), + KEY `DB_PRIVS_N49` (`DB_ID`), + CONSTRAINT `DB_PRIVS_FK1` FOREIGN KEY (`DB_ID`) REFERENCES `DBS` (`DB_ID`) +) ENGINE=InnoDB DEFAULT CHARSET=latin1; +/*!40101 SET character_set_client = @saved_cs_client */; + +-- +-- Table structure for table `GLOBAL_PRIVS` +-- + +/*!40101 SET @saved_cs_client = @@character_set_client */; +/*!40101 SET character_set_client = utf8 */; +CREATE TABLE IF NOT EXISTS `GLOBAL_PRIVS` ( + `USER_GRANT_ID` bigint(20) NOT NULL, + `CREATE_TIME` int(11) NOT NULL, + `GRANT_OPTION` smallint(6) NOT NULL, + `GRANTOR` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL, + `GRANTOR_TYPE` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL, + `PRINCIPAL_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL, + `PRINCIPAL_TYPE` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL, + `USER_PRIV` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL, + PRIMARY KEY (`USER_GRANT_ID`), + UNIQUE KEY `GLOBALPRIVILEGEINDEX` (`PRINCIPAL_NAME`,`PRINCIPAL_TYPE`,`USER_PRIV`,`GRANTOR`,`GRANTOR_TYPE`) +) ENGINE=InnoDB DEFAULT CHARSET=latin1; +/*!40101 SET character_set_client = @saved_cs_client */; + +-- +-- Table structure for table `IDXS` +-- + +/*!40101 SET @saved_cs_client = @@character_set_client */; +/*!40101 SET character_set_client = utf8 */; +CREATE TABLE IF NOT EXISTS `IDXS` ( + `INDEX_ID` bigint(20) NOT NULL, + `CREATE_TIME` int(11) NOT NULL, + `DEFERRED_REBUILD` bit(1) NOT NULL, + `INDEX_HANDLER_CLASS` varchar(4000) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL, + `INDEX_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL, + `INDEX_TBL_ID` bigint(20) DEFAULT NULL, + `LAST_ACCESS_TIME` int(11) NOT NULL, + `ORIG_TBL_ID` bigint(20) DEFAULT NULL, + `SD_ID` bigint(20) DEFAULT NULL, + PRIMARY KEY (`INDEX_ID`), + UNIQUE KEY `UNIQUEINDEX` (`INDEX_NAME`,`ORIG_TBL_ID`), + KEY `IDXS_N51` (`SD_ID`), + KEY `IDXS_N50` (`INDEX_TBL_ID`), + KEY `IDXS_N49` (`ORIG_TBL_ID`), + CONSTRAINT `IDXS_FK1` FOREIGN KEY (`ORIG_TBL_ID`) REFERENCES `TBLS` (`TBL_ID`), + CONSTRAINT `IDXS_FK2` FOREIGN KEY (`SD_ID`) REFERENCES `SDS` (`SD_ID`), + CONSTRAINT `IDXS_FK3` FOREIGN KEY (`INDEX_TBL_ID`) REFERENCES `TBLS` (`TBL_ID`) +) ENGINE=InnoDB DEFAULT CHARSET=latin1; +/*!40101 SET character_set_client = @saved_cs_client */; + +-- +-- Table structure for table `INDEX_PARAMS` +-- + +/*!40101 SET @saved_cs_client = @@character_set_client */; +/*!40101 SET character_set_client = utf8 */; +CREATE TABLE IF NOT EXISTS `INDEX_PARAMS` ( + `INDEX_ID` bigint(20) NOT NULL, + `PARAM_KEY` varchar(256) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL, + `PARAM_VALUE` varchar(4000) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL, + PRIMARY KEY (`INDEX_ID`,`PARAM_KEY`), + KEY `INDEX_PARAMS_N49` (`INDEX_ID`), + CONSTRAINT `INDEX_PARAMS_FK1` FOREIGN KEY (`INDEX_ID`) REFERENCES `IDXS` (`INDEX_ID`) +) ENGINE=InnoDB DEFAULT CHARSET=latin1; +/*!40101 SET character_set_client = @saved_cs_client */; + +-- +-- Table structure for table `NUCLEUS_TABLES` +-- + +/*!40101 SET @saved_cs_client = @@character_set_client */; +/*!40101 SET character_set_client = utf8 */; +CREATE TABLE IF NOT EXISTS `NUCLEUS_TABLES` ( + `CLASS_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL, + `TABLE_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL, + `TYPE` varchar(4) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL, + `OWNER` varchar(2) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL, + `VERSION` varchar(20) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL, + `INTERFACE_NAME` varchar(255) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL, + PRIMARY KEY (`CLASS_NAME`) +) ENGINE=InnoDB DEFAULT CHARSET=latin1; +/*!40101 SET character_set_client = @saved_cs_client */; + +-- +-- Table structure for table `PARTITIONS` +-- + +/*!40101 SET @saved_cs_client = @@character_set_client */; +/*!40101 SET character_set_client = utf8 */; +CREATE TABLE IF NOT EXISTS `PARTITIONS` ( + `PART_ID` bigint(20) NOT NULL, + `CREATE_TIME` int(11) NOT NULL, + `LAST_ACCESS_TIME` int(11) NOT NULL, + `PART_NAME` varchar(767) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL, + `SD_ID` bigint(20) DEFAULT NULL, + `TBL_ID` bigint(20) DEFAULT NULL, + PRIMARY KEY (`PART_ID`), + UNIQUE KEY `UNIQUEPARTITION` (`PART_NAME`,`TBL_ID`), + KEY `PARTITIONS_N49` (`TBL_ID`), + KEY `PARTITIONS_N50` (`SD_ID`), + CONSTRAINT `PARTITIONS_FK1` FOREIGN KEY (`TBL_ID`) REFERENCES `TBLS` (`TBL_ID`), + CONSTRAINT `PARTITIONS_FK2` FOREIGN KEY (`SD_ID`) REFERENCES `SDS` (`SD_ID`) +) ENGINE=InnoDB DEFAULT CHARSET=latin1; +/*!40101 SET character_set_client = @saved_cs_client */; + +-- +-- Table structure for table `PARTITION_EVENTS` +-- + +/*!40101 SET @saved_cs_client = @@character_set_client */; +/*!40101 SET character_set_client = utf8 */; +CREATE TABLE IF NOT EXISTS `PARTITION_EVENTS` ( + `PART_NAME_ID` bigint(20) NOT NULL, + `DB_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL, + `EVENT_TIME` bigint(20) NOT NULL, + `EVENT_TYPE` int(11) NOT NULL, + `PARTITION_NAME` varchar(767) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL, + `TBL_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL, + PRIMARY KEY (`PART_NAME_ID`), + KEY `PARTITIONEVENTINDEX` (`PARTITION_NAME`) +) ENGINE=InnoDB DEFAULT CHARSET=latin1; +/*!40101 SET character_set_client = @saved_cs_client */; + +-- +-- Table structure for table `PARTITION_KEYS` +-- + +/*!40101 SET @saved_cs_client = @@character_set_client */; +/*!40101 SET character_set_client = utf8 */; +CREATE TABLE IF NOT EXISTS `PARTITION_KEYS` ( + `TBL_ID` bigint(20) NOT NULL, + `PKEY_COMMENT` varchar(4000) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL, + `PKEY_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL, + `PKEY_TYPE` varchar(767) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL, + `INTEGER_IDX` int(11) NOT NULL, + PRIMARY KEY (`TBL_ID`,`PKEY_NAME`), + KEY `PARTITION_KEYS_N49` (`TBL_ID`), + CONSTRAINT `PARTITION_KEYS_FK1` FOREIGN KEY (`TBL_ID`) REFERENCES `TBLS` (`TBL_ID`) +) ENGINE=InnoDB DEFAULT CHARSET=latin1; +/*!40101 SET character_set_client = @saved_cs_client */; + +-- +-- Table structure for table `PARTITION_KEY_VALS` +-- + +/*!40101 SET @saved_cs_client = @@character_set_client */; +/*!40101 SET character_set_client = utf8 */; +CREATE TABLE IF NOT EXISTS `PARTITION_KEY_VALS` ( + `PART_ID` bigint(20) NOT NULL, + `PART_KEY_VAL` varchar(256) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL, + `INTEGER_IDX` int(11) NOT NULL, + PRIMARY KEY (`PART_ID`,`INTEGER_IDX`), + KEY `PARTITION_KEY_VALS_N49` (`PART_ID`), + CONSTRAINT `PARTITION_KEY_VALS_FK1` FOREIGN KEY (`PART_ID`) REFERENCES `PARTITIONS` (`PART_ID`) +) ENGINE=InnoDB DEFAULT CHARSET=latin1; +/*!40101 SET character_set_client = @saved_cs_client */; + +-- +-- Table structure for table `PARTITION_PARAMS` +-- + +/*!40101 SET @saved_cs_client = @@character_set_client */; +/*!40101 SET character_set_client = utf8 */; +CREATE TABLE IF NOT EXISTS `PARTITION_PARAMS` ( + `PART_ID` bigint(20) NOT NULL, + `PARAM_KEY` varchar(256) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL, + `PARAM_VALUE` varchar(4000) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL, + PRIMARY KEY (`PART_ID`,`PARAM_KEY`), + KEY `PARTITION_PARAMS_N49` (`PART_ID`), + CONSTRAINT `PARTITION_PARAMS_FK1` FOREIGN KEY (`PART_ID`) REFERENCES `PARTITIONS` (`PART_ID`) +) ENGINE=InnoDB DEFAULT CHARSET=latin1; +/*!40101 SET character_set_client = @saved_cs_client */; + +-- +-- Table structure for table `PART_COL_PRIVS` +-- + +/*!40101 SET @saved_cs_client = @@character_set_client */; +/*!40101 SET character_set_client = utf8 */; +CREATE TABLE IF NOT EXISTS `PART_COL_PRIVS` ( + `PART_COLUMN_GRANT_ID` bigint(20) NOT NULL, + `COLUMN_NAME` varchar(1000) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL, + `CREATE_TIME` int(11) NOT NULL, + `GRANT_OPTION` smallint(6) NOT NULL, + `GRANTOR` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL, + `GRANTOR_TYPE` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL, + `PART_ID` bigint(20) DEFAULT NULL, + `PRINCIPAL_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL, + `PRINCIPAL_TYPE` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL, + `PART_COL_PRIV` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL, + PRIMARY KEY (`PART_COLUMN_GRANT_ID`), + KEY `PART_COL_PRIVS_N49` (`PART_ID`), + KEY `PARTITIONCOLUMNPRIVILEGEINDEX` (`PART_ID`,`COLUMN_NAME`,`PRINCIPAL_NAME`,`PRINCIPAL_TYPE`,`PART_COL_PRIV`,`GRANTOR`,`GRANTOR_TYPE`), + CONSTRAINT `PART_COL_PRIVS_FK1` FOREIGN KEY (`PART_ID`) REFERENCES `PARTITIONS` (`PART_ID`) +) ENGINE=InnoDB DEFAULT CHARSET=latin1; +/*!40101 SET character_set_client = @saved_cs_client */; + +-- +-- Table structure for table `PART_PRIVS` +-- + +/*!40101 SET @saved_cs_client = @@character_set_client */; +/*!40101 SET character_set_client = utf8 */; +CREATE TABLE IF NOT EXISTS `PART_PRIVS` ( + `PART_GRANT_ID` bigint(20) NOT NULL, + `CREATE_TIME` int(11) NOT NULL, + `GRANT_OPTION` smallint(6) NOT NULL, + `GRANTOR` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL, + `GRANTOR_TYPE` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL, + `PART_ID` bigint(20) DEFAULT NULL, + `PRINCIPAL_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL, + `PRINCIPAL_TYPE` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL, + `PART_PRIV` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL, + PRIMARY KEY (`PART_GRANT_ID`), + KEY `PARTPRIVILEGEINDEX` (`PART_ID`,`PRINCIPAL_NAME`,`PRINCIPAL_TYPE`,`PART_PRIV`,`GRANTOR`,`GRANTOR_TYPE`), + KEY `PART_PRIVS_N49` (`PART_ID`), + CONSTRAINT `PART_PRIVS_FK1` FOREIGN KEY (`PART_ID`) REFERENCES `PARTITIONS` (`PART_ID`) +) ENGINE=InnoDB DEFAULT CHARSET=latin1; +/*!40101 SET character_set_client = @saved_cs_client */; + +-- +-- Table structure for table `ROLES` +-- + +/*!40101 SET @saved_cs_client = @@character_set_client */; +/*!40101 SET character_set_client = utf8 */; +CREATE TABLE IF NOT EXISTS `ROLES` ( + `ROLE_ID` bigint(20) NOT NULL, + `CREATE_TIME` int(11) NOT NULL, + `OWNER_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL, + `ROLE_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL, + PRIMARY KEY (`ROLE_ID`), + UNIQUE KEY `ROLEENTITYINDEX` (`ROLE_NAME`) +) ENGINE=InnoDB DEFAULT CHARSET=latin1; +/*!40101 SET character_set_client = @saved_cs_client */; + +-- +-- Table structure for table `ROLE_MAP` +-- + +/*!40101 SET @saved_cs_client = @@character_set_client */; +/*!40101 SET character_set_client = utf8 */; +CREATE TABLE IF NOT EXISTS `ROLE_MAP` ( + `ROLE_GRANT_ID` bigint(20) NOT NULL, + `ADD_TIME` int(11) NOT NULL, + `GRANT_OPTION` smallint(6) NOT NULL, + `GRANTOR` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL, + `GRANTOR_TYPE` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL, + `PRINCIPAL_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL, + `PRINCIPAL_TYPE` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL, + `ROLE_ID` bigint(20) DEFAULT NULL, + PRIMARY KEY (`ROLE_GRANT_ID`), + UNIQUE KEY `USERROLEMAPINDEX` (`PRINCIPAL_NAME`,`ROLE_ID`,`GRANTOR`,`GRANTOR_TYPE`), + KEY `ROLE_MAP_N49` (`ROLE_ID`), + CONSTRAINT `ROLE_MAP_FK1` FOREIGN KEY (`ROLE_ID`) REFERENCES `ROLES` (`ROLE_ID`) +) ENGINE=InnoDB DEFAULT CHARSET=latin1; +/*!40101 SET character_set_client = @saved_cs_client */; + +-- +-- Table structure for table `SDS` +-- + +/*!40101 SET @saved_cs_client = @@character_set_client */; +/*!40101 SET character_set_client = utf8 */; +CREATE TABLE IF NOT EXISTS `SDS` ( + `SD_ID` bigint(20) NOT NULL, + `CD_ID` bigint(20) DEFAULT NULL, + `INPUT_FORMAT` varchar(4000) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL, + `IS_COMPRESSED` bit(1) NOT NULL, + `IS_STOREDASSUBDIRECTORIES` bit(1) NOT NULL, + `LOCATION` varchar(4000) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL, + `NUM_BUCKETS` int(11) NOT NULL, + `OUTPUT_FORMAT` varchar(4000) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL, + `SERDE_ID` bigint(20) DEFAULT NULL, + PRIMARY KEY (`SD_ID`), + KEY `SDS_N49` (`SERDE_ID`), + KEY `SDS_N50` (`CD_ID`), + CONSTRAINT `SDS_FK1` FOREIGN KEY (`SERDE_ID`) REFERENCES `SERDES` (`SERDE_ID`), + CONSTRAINT `SDS_FK2` FOREIGN KEY (`CD_ID`) REFERENCES `CDS` (`CD_ID`) +) ENGINE=InnoDB DEFAULT CHARSET=latin1; +/*!40101 SET character_set_client = @saved_cs_client */; + +-- +-- Table structure for table `SD_PARAMS` +-- + +/*!40101 SET @saved_cs_client = @@character_set_client */; +/*!40101 SET character_set_client = utf8 */; +CREATE TABLE IF NOT EXISTS `SD_PARAMS` ( + `SD_ID` bigint(20) NOT NULL, + `PARAM_KEY` varchar(256) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL, + `PARAM_VALUE` varchar(4000) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL, + PRIMARY KEY (`SD_ID`,`PARAM_KEY`), + KEY `SD_PARAMS_N49` (`SD_ID`), + CONSTRAINT `SD_PARAMS_FK1` FOREIGN KEY (`SD_ID`) REFERENCES `SDS` (`SD_ID`) +) ENGINE=InnoDB DEFAULT CHARSET=latin1; +/*!40101 SET character_set_client = @saved_cs_client */; + +-- +-- Table structure for table `SEQUENCE_TABLE` +-- + +/*!40101 SET @saved_cs_client = @@character_set_client */; +/*!40101 SET character_set_client = utf8 */; +CREATE TABLE IF NOT EXISTS `SEQUENCE_TABLE` ( + `SEQUENCE_NAME` varchar(255) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL, + `NEXT_VAL` bigint(20) NOT NULL, + PRIMARY KEY (`SEQUENCE_NAME`) +) ENGINE=InnoDB DEFAULT CHARSET=latin1; +/*!40101 SET character_set_client = @saved_cs_client */; + +-- +-- Table structure for table `SERDES` +-- + +/*!40101 SET @saved_cs_client = @@character_set_client */; +/*!40101 SET character_set_client = utf8 */; +CREATE TABLE IF NOT EXISTS `SERDES` ( + `SERDE_ID` bigint(20) NOT NULL, + `NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL, + `SLIB` varchar(4000) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL, + PRIMARY KEY (`SERDE_ID`) +) ENGINE=InnoDB DEFAULT CHARSET=latin1; +/*!40101 SET character_set_client = @saved_cs_client */; + +-- +-- Table structure for table `SERDE_PARAMS` +-- + +/*!40101 SET @saved_cs_client = @@character_set_client */; +/*!40101 SET character_set_client = utf8 */; +CREATE TABLE IF NOT EXISTS `SERDE_PARAMS` ( + `SERDE_ID` bigint(20) NOT NULL, + `PARAM_KEY` varchar(256) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL, + `PARAM_VALUE` varchar(4000) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL, + PRIMARY KEY (`SERDE_ID`,`PARAM_KEY`), + KEY `SERDE_PARAMS_N49` (`SERDE_ID`), + CONSTRAINT `SERDE_PARAMS_FK1` FOREIGN KEY (`SERDE_ID`) REFERENCES `SERDES` (`SERDE_ID`) +) ENGINE=InnoDB DEFAULT CHARSET=latin1; +/*!40101 SET character_set_client = @saved_cs_client */; + +-- +-- Table structure for table `SKEWED_COL_NAMES` +-- + +/*!40101 SET @saved_cs_client = @@character_set_client */; +/*!40101 SET character_set_client = utf8 */; +CREATE TABLE IF NOT EXISTS `SKEWED_COL_NAMES` ( + `SD_ID` bigint(20) NOT NULL, + `SKEWED_COL_NAME` varchar(256) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL, + `INTEGER_IDX` int(11) NOT NULL, + PRIMARY KEY (`SD_ID`,`INTEGER_IDX`), + KEY `SKEWED_COL_NAMES_N49` (`SD_ID`), + CONSTRAINT `SKEWED_COL_NAMES_FK1` FOREIGN KEY (`SD_ID`) REFERENCES `SDS` (`SD_ID`) +) ENGINE=InnoDB DEFAULT CHARSET=latin1; +/*!40101 SET character_set_client = @saved_cs_client */; + +-- +-- Table structure for table `SKEWED_COL_VALUE_LOC_MAP` +-- + +/*!40101 SET @saved_cs_client = @@character_set_client */; +/*!40101 SET character_set_client = utf8 */; +CREATE TABLE IF NOT EXISTS `SKEWED_COL_VALUE_LOC_MAP` ( + `SD_ID` bigint(20) NOT NULL, + `STRING_LIST_ID_KID` bigint(20) NOT NULL, + `LOCATION` varchar(4000) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL, + PRIMARY KEY (`SD_ID`,`STRING_LIST_ID_KID`), + KEY `SKEWED_COL_VALUE_LOC_MAP_N49` (`STRING_LIST_ID_KID`), + KEY `SKEWED_COL_VALUE_LOC_MAP_N50` (`SD_ID`), + CONSTRAINT `SKEWED_COL_VALUE_LOC_MAP_FK2` FOREIGN KEY (`STRING_LIST_ID_KID`) REFERENCES `SKEWED_STRING_LIST` (`STRING_LIST_ID`), + CONSTRAINT `SKEWED_COL_VALUE_LOC_MAP_FK1` FOREIGN KEY (`SD_ID`) REFERENCES `SDS` (`SD_ID`) +) ENGINE=InnoDB DEFAULT CHARSET=latin1; +/*!40101 SET character_set_client = @saved_cs_client */; + +-- +-- Table structure for table `SKEWED_STRING_LIST` +-- + +/*!40101 SET @saved_cs_client = @@character_set_client */; +/*!40101 SET character_set_client = utf8 */; +CREATE TABLE IF NOT EXISTS `SKEWED_STRING_LIST` ( + `STRING_LIST_ID` bigint(20) NOT NULL, + PRIMARY KEY (`STRING_LIST_ID`) +) ENGINE=InnoDB DEFAULT CHARSET=latin1; +/*!40101 SET character_set_client = @saved_cs_client */; + +-- +-- Table structure for table `SKEWED_STRING_LIST_VALUES` +-- + +/*!40101 SET @saved_cs_client = @@character_set_client */; +/*!40101 SET character_set_client = utf8 */; +CREATE TABLE IF NOT EXISTS `SKEWED_STRING_LIST_VALUES` ( + `STRING_LIST_ID` bigint(20) NOT NULL, + `STRING_LIST_VALUE` varchar(256) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL, + `INTEGER_IDX` int(11) NOT NULL, + PRIMARY KEY (`STRING_LIST_ID`,`INTEGER_IDX`), + KEY `SKEWED_STRING_LIST_VALUES_N49` (`STRING_LIST_ID`), + CONSTRAINT `SKEWED_STRING_LIST_VALUES_FK1` FOREIGN KEY (`STRING_LIST_ID`) REFERENCES `SKEWED_STRING_LIST` (`STRING_LIST_ID`) +) ENGINE=InnoDB DEFAULT CHARSET=latin1; +/*!40101 SET character_set_client = @saved_cs_client */; + +-- +-- Table structure for table `SKEWED_VALUES` +-- + +/*!40101 SET @saved_cs_client = @@character_set_client */; +/*!40101 SET character_set_client = utf8 */; +CREATE TABLE IF NOT EXISTS `SKEWED_VALUES` ( + `SD_ID_OID` bigint(20) NOT NULL, + `STRING_LIST_ID_EID` bigint(20) NOT NULL, + `INTEGER_IDX` int(11) NOT NULL, + PRIMARY KEY (`SD_ID_OID`,`INTEGER_IDX`), + KEY `SKEWED_VALUES_N50` (`SD_ID_OID`), + KEY `SKEWED_VALUES_N49` (`STRING_LIST_ID_EID`), + CONSTRAINT `SKEWED_VALUES_FK2` FOREIGN KEY (`STRING_LIST_ID_EID`) REFERENCES `SKEWED_STRING_LIST` (`STRING_LIST_ID`), + CONSTRAINT `SKEWED_VALUES_FK1` FOREIGN KEY (`SD_ID_OID`) REFERENCES `SDS` (`SD_ID`) +) ENGINE=InnoDB DEFAULT CHARSET=latin1; +/*!40101 SET character_set_client = @saved_cs_client */; + +-- +-- Table structure for table `SORT_COLS` +-- + +/*!40101 SET @saved_cs_client = @@character_set_client */; +/*!40101 SET character_set_client = utf8 */; +CREATE TABLE IF NOT EXISTS `SORT_COLS` ( + `SD_ID` bigint(20) NOT NULL, + `COLUMN_NAME` varchar(1000) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL, + `ORDER` int(11) NOT NULL, + `INTEGER_IDX` int(11) NOT NULL, + PRIMARY KEY (`SD_ID`,`INTEGER_IDX`), + KEY `SORT_COLS_N49` (`SD_ID`), + CONSTRAINT `SORT_COLS_FK1` FOREIGN KEY (`SD_ID`) REFERENCES `SDS` (`SD_ID`) +) ENGINE=InnoDB DEFAULT CHARSET=latin1; +/*!40101 SET character_set_client = @saved_cs_client */; + +-- +-- Table structure for table `TABLE_PARAMS` +-- + +/*!40101 SET @saved_cs_client = @@character_set_client */; +/*!40101 SET character_set_client = utf8 */; +CREATE TABLE IF NOT EXISTS `TABLE_PARAMS` ( + `TBL_ID` bigint(20) NOT NULL, + `PARAM_KEY` varchar(256) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL, + `PARAM_VALUE` varchar(4000) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL, + PRIMARY KEY (`TBL_ID`,`PARAM_KEY`), + KEY `TABLE_PARAMS_N49` (`TBL_ID`), + CONSTRAINT `TABLE_PARAMS_FK1` FOREIGN KEY (`TBL_ID`) REFERENCES `TBLS` (`TBL_ID`) +) ENGINE=InnoDB DEFAULT CHARSET=latin1; +/*!40101 SET character_set_client = @saved_cs_client */; + +-- +-- Table structure for table `TBLS` +-- + +/*!40101 SET @saved_cs_client = @@character_set_client */; +/*!40101 SET character_set_client = utf8 */; +CREATE TABLE IF NOT EXISTS `TBLS` ( + `TBL_ID` bigint(20) NOT NULL, + `CREATE_TIME` int(11) NOT NULL, + `DB_ID` bigint(20) DEFAULT NULL, + `LAST_ACCESS_TIME` int(11) NOT NULL, + `OWNER` varchar(767) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL, + `RETENTION` int(11) NOT NULL, + `SD_ID` bigint(20) DEFAULT NULL, + `TBL_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL, + `TBL_TYPE` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL, + `VIEW_EXPANDED_TEXT` mediumtext, + `VIEW_ORIGINAL_TEXT` mediumtext, + PRIMARY KEY (`TBL_ID`), + UNIQUE KEY `UNIQUETABLE` (`TBL_NAME`,`DB_ID`), + KEY `TBLS_N50` (`SD_ID`), + KEY `TBLS_N49` (`DB_ID`), + CONSTRAINT `TBLS_FK1` FOREIGN KEY (`SD_ID`) REFERENCES `SDS` (`SD_ID`), + CONSTRAINT `TBLS_FK2` FOREIGN KEY (`DB_ID`) REFERENCES `DBS` (`DB_ID`) +) ENGINE=InnoDB DEFAULT CHARSET=latin1; +/*!40101 SET character_set_client = @saved_cs_client */; + +-- +-- Table structure for table `TBL_COL_PRIVS` +-- + +/*!40101 SET @saved_cs_client = @@character_set_client */; +/*!40101 SET character_set_client = utf8 */; +CREATE TABLE IF NOT EXISTS `TBL_COL_PRIVS` ( + `TBL_COLUMN_GRANT_ID` bigint(20) NOT NULL, + `COLUMN_NAME` varchar(1000) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL, + `CREATE_TIME` int(11) NOT NULL, + `GRANT_OPTION` smallint(6) NOT NULL, + `GRANTOR` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL, + `GRANTOR_TYPE` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL, + `PRINCIPAL_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL, + `PRINCIPAL_TYPE` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL, + `TBL_COL_PRIV` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL, + `TBL_ID` bigint(20) DEFAULT NULL, + PRIMARY KEY (`TBL_COLUMN_GRANT_ID`), + KEY `TABLECOLUMNPRIVILEGEINDEX` (`TBL_ID`,`COLUMN_NAME`,`PRINCIPAL_NAME`,`PRINCIPAL_TYPE`,`TBL_COL_PRIV`,`GRANTOR`,`GRANTOR_TYPE`), + KEY `TBL_COL_PRIVS_N49` (`TBL_ID`), + CONSTRAINT `TBL_COL_PRIVS_FK1` FOREIGN KEY (`TBL_ID`) REFERENCES `TBLS` (`TBL_ID`) +) ENGINE=InnoDB DEFAULT CHARSET=latin1; +/*!40101 SET character_set_client = @saved_cs_client */; + +-- +-- Table structure for table `TBL_PRIVS` +-- + +/*!40101 SET @saved_cs_client = @@character_set_client */; +/*!40101 SET character_set_client = utf8 */; +CREATE TABLE IF NOT EXISTS `TBL_PRIVS` ( + `TBL_GRANT_ID` bigint(20) NOT NULL, + `CREATE_TIME` int(11) NOT NULL, + `GRANT_OPTION` smallint(6) NOT NULL, + `GRANTOR` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL, + `GRANTOR_TYPE` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL, + `PRINCIPAL_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL, + `PRINCIPAL_TYPE` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL, + `TBL_PRIV` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL, + `TBL_ID` bigint(20) DEFAULT NULL, + PRIMARY KEY (`TBL_GRANT_ID`), + KEY `TBL_PRIVS_N49` (`TBL_ID`), + KEY `TABLEPRIVILEGEINDEX` (`TBL_ID`,`PRINCIPAL_NAME`,`PRINCIPAL_TYPE`,`TBL_PRIV`,`GRANTOR`,`GRANTOR_TYPE`), + CONSTRAINT `TBL_PRIVS_FK1` FOREIGN KEY (`TBL_ID`) REFERENCES `TBLS` (`TBL_ID`) +) ENGINE=InnoDB DEFAULT CHARSET=latin1; +/*!40101 SET character_set_client = @saved_cs_client */; + +-- +-- Table structure for table `TAB_COL_STATS` +-- +CREATE TABLE IF NOT EXISTS `TAB_COL_STATS` ( + `CS_ID` bigint(20) NOT NULL, + `DB_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL, + `TABLE_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL, + `COLUMN_NAME` varchar(1000) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL, + `COLUMN_TYPE` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL, + `TBL_ID` bigint(20) NOT NULL, + `LONG_LOW_VALUE` bigint(20), + `LONG_HIGH_VALUE` bigint(20), + `DOUBLE_HIGH_VALUE` double(53,4), + `DOUBLE_LOW_VALUE` double(53,4), + `BIG_DECIMAL_LOW_VALUE` varchar(4000) CHARACTER SET latin1 COLLATE latin1_bin, + `BIG_DECIMAL_HIGH_VALUE` varchar(4000) CHARACTER SET latin1 COLLATE latin1_bin, + `NUM_NULLS` bigint(20) NOT NULL, + `NUM_DISTINCTS` bigint(20), + `AVG_COL_LEN` double(53,4), + `MAX_COL_LEN` bigint(20), + `NUM_TRUES` bigint(20), + `NUM_FALSES` bigint(20), + `LAST_ANALYZED` bigint(20) NOT NULL, + PRIMARY KEY (`CS_ID`), + CONSTRAINT `TAB_COL_STATS_FK` FOREIGN KEY (`TBL_ID`) REFERENCES `TBLS` (`TBL_ID`) +) ENGINE=InnoDB DEFAULT CHARSET=latin1; + +-- +-- Table structure for table `PART_COL_STATS` +-- +CREATE TABLE IF NOT EXISTS `PART_COL_STATS` ( + `CS_ID` bigint(20) NOT NULL, + `DB_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL, + `TABLE_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL, + `PARTITION_NAME` varchar(767) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL, + `COLUMN_NAME` varchar(1000) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL, + `COLUMN_TYPE` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL, + `PART_ID` bigint(20) NOT NULL, + `LONG_LOW_VALUE` bigint(20), + `LONG_HIGH_VALUE` bigint(20), + `DOUBLE_HIGH_VALUE` double(53,4), + `DOUBLE_LOW_VALUE` double(53,4), + `BIG_DECIMAL_LOW_VALUE` varchar(4000) CHARACTER SET latin1 COLLATE latin1_bin, + `BIG_DECIMAL_HIGH_VALUE` varchar(4000) CHARACTER SET latin1 COLLATE latin1_bin, + `NUM_NULLS` bigint(20) NOT NULL, + `NUM_DISTINCTS` bigint(20), + `AVG_COL_LEN` double(53,4), + `MAX_COL_LEN` bigint(20), + `NUM_TRUES` bigint(20), + `NUM_FALSES` bigint(20), + `LAST_ANALYZED` bigint(20) NOT NULL, + PRIMARY KEY (`CS_ID`), + CONSTRAINT `PART_COL_STATS_FK` FOREIGN KEY (`PART_ID`) REFERENCES `PARTITIONS` (`PART_ID`) +) ENGINE=InnoDB DEFAULT CHARSET=latin1; + +CREATE INDEX PCS_STATS_IDX ON PART_COL_STATS (DB_NAME,TABLE_NAME,COLUMN_NAME,PARTITION_NAME) USING BTREE; + +-- +-- Table structure for table `TYPES` +-- + +/*!40101 SET @saved_cs_client = @@character_set_client */; +/*!40101 SET character_set_client = utf8 */; +CREATE TABLE IF NOT EXISTS `TYPES` ( + `TYPES_ID` bigint(20) NOT NULL, + `TYPE_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL, + `TYPE1` varchar(767) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL, + `TYPE2` varchar(767) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL, + PRIMARY KEY (`TYPES_ID`), + UNIQUE KEY `UNIQUE_TYPE` (`TYPE_NAME`) +) ENGINE=InnoDB DEFAULT CHARSET=latin1; +/*!40101 SET character_set_client = @saved_cs_client */; + +-- +-- Table structure for table `TYPE_FIELDS` +-- + +/*!40101 SET @saved_cs_client = @@character_set_client */; +/*!40101 SET character_set_client = utf8 */; +CREATE TABLE IF NOT EXISTS `TYPE_FIELDS` ( + `TYPE_NAME` bigint(20) NOT NULL, + `COMMENT` varchar(256) CHARACTER SET latin1 COLLATE latin1_bin DEFAULT NULL, + `FIELD_NAME` varchar(128) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL, + `FIELD_TYPE` varchar(767) CHARACTER SET latin1 COLLATE latin1_bin NOT NULL, + `INTEGER_IDX` int(11) NOT NULL, + PRIMARY KEY (`TYPE_NAME`,`FIELD_NAME`), + KEY `TYPE_FIELDS_N49` (`TYPE_NAME`), + CONSTRAINT `TYPE_FIELDS_FK1` FOREIGN KEY (`TYPE_NAME`) REFERENCES `TYPES` (`TYPES_ID`) +) ENGINE=InnoDB DEFAULT CHARSET=latin1; + +-- Table `MASTER_KEYS` for classes [org.apache.hadoop.hive.metastore.model.MMasterKey] +CREATE TABLE IF NOT EXISTS `MASTER_KEYS` +( + `KEY_ID` INTEGER NOT NULL AUTO_INCREMENT, + `MASTER_KEY` VARCHAR(767) BINARY NULL, + PRIMARY KEY (`KEY_ID`) +) ENGINE=INNODB DEFAULT CHARSET=latin1; + +-- Table `DELEGATION_TOKENS` for classes [org.apache.hadoop.hive.metastore.model.MDelegationToken] +CREATE TABLE IF NOT EXISTS `DELEGATION_TOKENS` +( + `TOKEN_IDENT` VARCHAR(767) BINARY NOT NULL, + `TOKEN` VARCHAR(767) BINARY NULL, + PRIMARY KEY (`TOKEN_IDENT`) +) ENGINE=INNODB DEFAULT CHARSET=latin1; + +-- +-- Table structure for VERSION +-- +CREATE TABLE IF NOT EXISTS `VERSION` ( + `VER_ID` BIGINT NOT NULL, + `SCHEMA_VERSION` VARCHAR(127) NOT NULL, + `VERSION_COMMENT` VARCHAR(255), + PRIMARY KEY (`VER_ID`) +) ENGINE=InnoDB DEFAULT CHARSET=latin1; + +-- +-- Table structure for CHANGE_VERSION +-- +CREATE TABLE IF NOT EXISTS `CHANGE_VERSION` ( + `CHANGE_VERSION_ID` BIGINT NOT NULL, + `VERSION` BIGINT NOT NULL, + `TOPIC` VARCHAR(255) NOT NULL, + PRIMARY KEY (`CHANGE_VERSION_ID`), + UNIQUE KEY `UNIQUECHANGEVERSION` (`TOPIC`) +) ENGINE=InnoDB DEFAULT CHARSET=latin1; + +-- +-- Table structure for table FUNCS +-- +CREATE TABLE IF NOT EXISTS `FUNCS` ( + `FUNC_ID` BIGINT(20) NOT NULL, + `CLASS_NAME` VARCHAR(4000) CHARACTER SET latin1 COLLATE latin1_bin, + `CREATE_TIME` INT(11) NOT NULL, + `DB_ID` BIGINT(20), + `FUNC_NAME` VARCHAR(128) CHARACTER SET latin1 COLLATE latin1_bin, + `FUNC_TYPE` INT(11) NOT NULL, + `OWNER_NAME` VARCHAR(128) CHARACTER SET latin1 COLLATE latin1_bin, + `OWNER_TYPE` VARCHAR(10) CHARACTER SET latin1 COLLATE latin1_bin, + PRIMARY KEY (`FUNC_ID`), + UNIQUE KEY `UNIQUEFUNCTION` (`FUNC_NAME`, `DB_ID`), + KEY `FUNCS_N49` (`DB_ID`), + CONSTRAINT `FUNCS_FK1` FOREIGN KEY (`DB_ID`) REFERENCES `DBS` (`DB_ID`) +) ENGINE=InnoDB DEFAULT CHARSET=latin1; + +-- +-- Table structure for table FUNC_RU +-- +CREATE TABLE IF NOT EXISTS `FUNC_RU` ( + `FUNC_ID` BIGINT(20) NOT NULL, + `RESOURCE_TYPE` INT(11) NOT NULL, + `RESOURCE_URI` VARCHAR(4000) CHARACTER SET latin1 COLLATE latin1_bin, + `INTEGER_IDX` INT(11) NOT NULL, + PRIMARY KEY (`FUNC_ID`, `INTEGER_IDX`), + CONSTRAINT `FUNC_RU_FK1` FOREIGN KEY (`FUNC_ID`) REFERENCES `FUNCS` (`FUNC_ID`) +) ENGINE=InnoDB DEFAULT CHARSET=latin1; + +CREATE TABLE IF NOT EXISTS `NOTIFICATION_LOG` +( + `NL_ID` BIGINT(20) NOT NULL, + `EVENT_ID` BIGINT(20) NOT NULL, + `EVENT_TIME` INT(11) NOT NULL, + `EVENT_TYPE` varchar(32) NOT NULL, + `DB_NAME` varchar(128), + `TBL_NAME` varchar(128), + `MESSAGE` mediumtext, + PRIMARY KEY (`NL_ID`) +) ENGINE=InnoDB DEFAULT CHARSET=latin1; + +CREATE TABLE IF NOT EXISTS `NOTIFICATION_SEQUENCE` +( + `NNI_ID` BIGINT(20) NOT NULL, + `NEXT_EVENT_ID` BIGINT(20) NOT NULL, + PRIMARY KEY (`NNI_ID`) +) ENGINE=InnoDB DEFAULT CHARSET=latin1; + +SOURCE hive-schema-2.1.0.mysql.sql; + + +-- ---------------------------- +-- Transaction and Lock Tables +-- ---------------------------- +SOURCE hive-txn-schema-2.0.0.mysql.sql; + +-- ----------------------------------------------------------------- +-- Record schema version. Should be the last step in the init script +-- ----------------------------------------------------------------- +INSERT INTO VERSION (VER_ID, SCHEMA_VERSION, VERSION_COMMENT) VALUES (1, '2.1.0', 'Hive release version 2.1.0'); + +/*!40101 SET character_set_client = @saved_cs_client */; +/*!40103 SET TIME_ZONE=@OLD_TIME_ZONE */; + +/*!40101 SET SQL_MODE=@OLD_SQL_MODE */; +/*!40014 SET FOREIGN_KEY_CHECKS=@OLD_FOREIGN_KEY_CHECKS */; +/*!40014 SET UNIQUE_CHECKS=@OLD_UNIQUE_CHECKS */; +/*!40101 SET CHARACTER_SET_CLIENT=@OLD_CHARACTER_SET_CLIENT */; +/*!40101 SET CHARACTER_SET_RESULTS=@OLD_CHARACTER_SET_RESULTS */; +/*!40101 SET COLLATION_CONNECTION=@OLD_COLLATION_CONNECTION */; +/*!40111 SET SQL_NOTES=@OLD_SQL_NOTES */; + +-- Dump completed on 2012-08-23 0:56:31 diff --git a/metastore/scripts/upgrade/mysql/034-HIVE-13076.mysql.sql b/metastore/scripts/upgrade/mysql/034-HIVE-13076.mysql.sql new file mode 100644 index 0000000..5924a40 --- /dev/null +++ b/metastore/scripts/upgrade/mysql/034-HIVE-13076.mysql.sql @@ -0,0 +1,22 @@ +CREATE TABLE IF NOT EXISTS `PRIMARY_KEYS` +( + `TBL_ID` BIGINT NOT NULL, + `PKEY_NAME` VARCHAR(128) NOT NULL, + `PKEY_COMMENT` VARCHAR(400) NOT NULL, + `PKEY_TYPE` VARCHAR(128) NOT NULL +) ENGINE=InnoDB DEFAULT CHARSET=latin1; + +CREATE TABLE IF NOT EXISTS `FOREIGN_KEY_RELS` +( + `FOR_CD_ID` BIGINT NOT NULL, + `REF_CD_ID` BIGINT NOT NULL, + `PK_FK_CONSTRAINT_NAME` VARCHAR(400) NOT NULL, + `FOR_TBL_ID` BIGINT NOT NULL +) ENGINE=InnoDB DEFAULT CHARSET=latin1; + +CREATE TABLE IF NOT EXISTS `PK_FK_MAPPING` +( + `FOR_TBL_ID` BIGINT NOT NULL, + `REF_TBL_ID` BIGINT NOT NULL, + `PK_FK_CONSTRAINT_NAME` VARCHAR(400) NOT NULL +) ENGINE=InnoDB DEFAULT CHARSET=latin1; diff --git a/metastore/scripts/upgrade/mysql/hive-schema-2.1.0.mysql.sql b/metastore/scripts/upgrade/mysql/hive-schema-2.1.0.mysql.sql index 6fd3209..0ac8011 100644 --- a/metastore/scripts/upgrade/mysql/hive-schema-2.1.0.mysql.sql +++ b/metastore/scripts/upgrade/mysql/hive-schema-2.1.0.mysql.sql @@ -819,7 +819,7 @@ CREATE TABLE IF NOT EXISTS `NOTIFICATION_SEQUENCE` PRIMARY KEY (`NNI_ID`) ) ENGINE=InnoDB DEFAULT CHARSET=latin1; - +SOURCE 034-HIVE-13076.mysql.sql; -- ---------------------------- -- Transaction and Lock Tables diff --git a/metastore/scripts/upgrade/mysql/upgrade-2.0.0-to-2.1.0.mysql.sql b/metastore/scripts/upgrade/mysql/upgrade-2.0.0-to-2.1.0.mysql.sql index e790636..eb21f73 100644 --- a/metastore/scripts/upgrade/mysql/upgrade-2.0.0-to-2.1.0.mysql.sql +++ b/metastore/scripts/upgrade/mysql/upgrade-2.0.0-to-2.1.0.mysql.sql @@ -1,6 +1,7 @@ SELECT 'Upgrading MetaStore schema from 2.0.0 to 2.1.0' AS ' '; SOURCE 033-HIVE-12892.mysql.sql; +SOURCE 034-HIVE-13076.mysql.sql; UPDATE VERSION SET SCHEMA_VERSION='2.1.0', VERSION_COMMENT='Hive release version 2.1.0' where VER_ID=1; SELECT 'Finished upgrading MetaStore schema from 2.0.0 to 2.1.0' AS ' '; diff --git a/metastore/scripts/upgrade/oracle/034-HIVE-13076.oracle.sql b/metastore/scripts/upgrade/oracle/034-HIVE-13076.oracle.sql new file mode 100644 index 0000000..da75f6e --- /dev/null +++ b/metastore/scripts/upgrade/oracle/034-HIVE-13076.oracle.sql @@ -0,0 +1,22 @@ +CREATE TABLE PRIMARY_KEYS +( + TBL_ID NUMBER, + PKEY_NAME VARCHAR(128), + PKEY_COMMENT VARCHAR(400), + PKEY_TYPE VARCHAR(128) +) ; + +CREATE TABLE FOREIGN_KEY_RELS +( + FOR_CD_ID NUMBER, + REF_CD_ID NUMBER, + PK_FK_CONSTRAINT_NAME VARCHAR(400), + FOR_TBL_ID NUMBER +) ; + +CREATE TABLE PK_FK_MAPPING +( + FOR_TBL_ID NUMBER, + REF_TBL_ID NUMBER, + PK_FK_CONSTRAINT_NAME VARCHAR(400) +) ; \ No newline at end of file diff --git a/metastore/scripts/upgrade/oracle/hive-schema-2.1.0.oracle.sql b/metastore/scripts/upgrade/oracle/hive-schema-2.1.0.oracle.sql index 774f6be..0f3821a 100644 --- a/metastore/scripts/upgrade/oracle/hive-schema-2.1.0.oracle.sql +++ b/metastore/scripts/upgrade/oracle/hive-schema-2.1.0.oracle.sql @@ -786,6 +786,8 @@ ALTER TABLE FUNC_RU ADD CONSTRAINT FUNC_RU_FK1 FOREIGN KEY (FUNC_ID) REFERENCES CREATE INDEX FUNC_RU_N49 ON FUNC_RU (FUNC_ID); +@034-HIVE-13076.oracle.sql; + ------------------------------ -- Transaction and lock tables ------------------------------ diff --git a/metastore/scripts/upgrade/oracle/upgrade-2.0.0-to-2.1.0.oracle.sql b/metastore/scripts/upgrade/oracle/upgrade-2.0.0-to-2.1.0.oracle.sql index 8368d08..8c065a1 100644 --- a/metastore/scripts/upgrade/oracle/upgrade-2.0.0-to-2.1.0.oracle.sql +++ b/metastore/scripts/upgrade/oracle/upgrade-2.0.0-to-2.1.0.oracle.sql @@ -1,6 +1,7 @@ SELECT 'Upgrading MetaStore schema from 2.0.0 to 2.1.0' AS Status from dual; @033-HIVE-12892.oracle.sql; +@034-HIVE-13076.oracle.sql; UPDATE VERSION SET SCHEMA_VERSION='2.1.0', VERSION_COMMENT='Hive release version 2.1.0' where VER_ID=1; SELECT 'Finished upgrading MetaStore schema from 2.0.0 to 2.1.0' AS Status from dual; diff --git a/metastore/scripts/upgrade/postgres/033-HIVE-13076.postgres.sql b/metastore/scripts/upgrade/postgres/033-HIVE-13076.postgres.sql new file mode 100644 index 0000000..b5c39f5 --- /dev/null +++ b/metastore/scripts/upgrade/postgres/033-HIVE-13076.postgres.sql @@ -0,0 +1,22 @@ +CREATE TABLE "PRIMARY_KEYS" +( + "TBL_ID" BIGINT NOT NULL, + "PKEY_NAME" CHARACTER VARYING(128) NOT NULL, + "PKEY_COMMENT" CHARACTER VARYING(400) NOT NULL, + "PKEY_TYPE" CHARACTER VARYING(128) NOT NULL +) ; + +CREATE TABLE "FOREIGN_KEY_RELS" +( + "FOR_CD_ID" BIGINT NOT NULL, + "REF_CD_ID" BIGINT NOT NULL, + "PK_FK_CONSTRAINT_NAME" CHARACTER VARYING(400) NOT NULL, + "FOR_TBL_ID" BIGINT NOT NULL +) ; + +CREATE TABLE "PK_FK_MAPPING" +( + "FOR_TBL_ID" BIGINT NOT NULL, + "REF_TBL_ID" BIGINT NOT NULL, + "PK_FK_CONSTRAINT_NAME" CHARACTER VARYING(400) NOT NULL +) ; \ No newline at end of file diff --git a/metastore/scripts/upgrade/postgres/hive-schema-2.1.0.postgres.sql b/metastore/scripts/upgrade/postgres/hive-schema-2.1.0.postgres.sql index 7463a37..d73a754 100644 --- a/metastore/scripts/upgrade/postgres/hive-schema-2.1.0.postgres.sql +++ b/metastore/scripts/upgrade/postgres/hive-schema-2.1.0.postgres.sql @@ -594,6 +594,8 @@ CREATE TABLE "NOTIFICATION_SEQUENCE" PRIMARY KEY ("NNI_ID") ); +\i 032-HIVE-12892.postgres.sql; + -- -- Name: BUCKETING_COLS_pkey; Type: CONSTRAINT; Schema: public; Owner: hiveuser; Tablespace: -- diff --git a/metastore/scripts/upgrade/postgres/upgrade-2.0.0-to-2.1.0.postgres.sql b/metastore/scripts/upgrade/postgres/upgrade-2.0.0-to-2.1.0.postgres.sql index 6172407..e96a6ec 100644 --- a/metastore/scripts/upgrade/postgres/upgrade-2.0.0-to-2.1.0.postgres.sql +++ b/metastore/scripts/upgrade/postgres/upgrade-2.0.0-to-2.1.0.postgres.sql @@ -1,6 +1,7 @@ SELECT 'Upgrading MetaStore schema from 2.0.0 to 2.1.0'; \i 032-HIVE-12892.postgres.sql; +\i 033-HIVE-13076.postgres.sql; UPDATE "VERSION" SET "SCHEMA_VERSION"='2.1.0', "VERSION_COMMENT"='Hive release version 2.1.0' where "VER_ID"=1; SELECT 'Finished upgrading MetaStore schema from 2.0.0 to 2.1.0'; diff --git a/metastore/src/gen/thrift/gen-cpp/ThriftHiveMetastore.cpp b/metastore/src/gen/thrift/gen-cpp/ThriftHiveMetastore.cpp index 6e5de20..a954a6c 100644 --- a/metastore/src/gen/thrift/gen-cpp/ThriftHiveMetastore.cpp +++ b/metastore/src/gen/thrift/gen-cpp/ThriftHiveMetastore.cpp @@ -1240,14 +1240,14 @@ uint32_t ThriftHiveMetastore_get_databases_result::read(::apache::thrift::protoc if (ftype == ::apache::thrift::protocol::T_LIST) { { this->success.clear(); - uint32_t _size725; - ::apache::thrift::protocol::TType _etype728; - xfer += iprot->readListBegin(_etype728, _size725); - this->success.resize(_size725); - uint32_t _i729; - for (_i729 = 0; _i729 < _size725; ++_i729) + uint32_t _size755; + ::apache::thrift::protocol::TType _etype758; + xfer += iprot->readListBegin(_etype758, _size755); + this->success.resize(_size755); + uint32_t _i759; + for (_i759 = 0; _i759 < _size755; ++_i759) { - xfer += iprot->readString(this->success[_i729]); + xfer += iprot->readString(this->success[_i759]); } xfer += iprot->readListEnd(); } @@ -1286,10 +1286,10 @@ uint32_t ThriftHiveMetastore_get_databases_result::write(::apache::thrift::proto xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->success.size())); - std::vector ::const_iterator _iter730; - for (_iter730 = this->success.begin(); _iter730 != this->success.end(); ++_iter730) + std::vector ::const_iterator _iter760; + for (_iter760 = this->success.begin(); _iter760 != this->success.end(); ++_iter760) { - xfer += oprot->writeString((*_iter730)); + xfer += oprot->writeString((*_iter760)); } xfer += oprot->writeListEnd(); } @@ -1334,14 +1334,14 @@ uint32_t ThriftHiveMetastore_get_databases_presult::read(::apache::thrift::proto if (ftype == ::apache::thrift::protocol::T_LIST) { { (*(this->success)).clear(); - uint32_t _size731; - ::apache::thrift::protocol::TType _etype734; - xfer += iprot->readListBegin(_etype734, _size731); - (*(this->success)).resize(_size731); - uint32_t _i735; - for (_i735 = 0; _i735 < _size731; ++_i735) + uint32_t _size761; + ::apache::thrift::protocol::TType _etype764; + xfer += iprot->readListBegin(_etype764, _size761); + (*(this->success)).resize(_size761); + uint32_t _i765; + for (_i765 = 0; _i765 < _size761; ++_i765) { - xfer += iprot->readString((*(this->success))[_i735]); + xfer += iprot->readString((*(this->success))[_i765]); } xfer += iprot->readListEnd(); } @@ -1458,14 +1458,14 @@ uint32_t ThriftHiveMetastore_get_all_databases_result::read(::apache::thrift::pr if (ftype == ::apache::thrift::protocol::T_LIST) { { this->success.clear(); - uint32_t _size736; - ::apache::thrift::protocol::TType _etype739; - xfer += iprot->readListBegin(_etype739, _size736); - this->success.resize(_size736); - uint32_t _i740; - for (_i740 = 0; _i740 < _size736; ++_i740) + uint32_t _size766; + ::apache::thrift::protocol::TType _etype769; + xfer += iprot->readListBegin(_etype769, _size766); + this->success.resize(_size766); + uint32_t _i770; + for (_i770 = 0; _i770 < _size766; ++_i770) { - xfer += iprot->readString(this->success[_i740]); + xfer += iprot->readString(this->success[_i770]); } xfer += iprot->readListEnd(); } @@ -1504,10 +1504,10 @@ uint32_t ThriftHiveMetastore_get_all_databases_result::write(::apache::thrift::p xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->success.size())); - std::vector ::const_iterator _iter741; - for (_iter741 = this->success.begin(); _iter741 != this->success.end(); ++_iter741) + std::vector ::const_iterator _iter771; + for (_iter771 = this->success.begin(); _iter771 != this->success.end(); ++_iter771) { - xfer += oprot->writeString((*_iter741)); + xfer += oprot->writeString((*_iter771)); } xfer += oprot->writeListEnd(); } @@ -1552,14 +1552,14 @@ uint32_t ThriftHiveMetastore_get_all_databases_presult::read(::apache::thrift::p if (ftype == ::apache::thrift::protocol::T_LIST) { { (*(this->success)).clear(); - uint32_t _size742; - ::apache::thrift::protocol::TType _etype745; - xfer += iprot->readListBegin(_etype745, _size742); - (*(this->success)).resize(_size742); - uint32_t _i746; - for (_i746 = 0; _i746 < _size742; ++_i746) + uint32_t _size772; + ::apache::thrift::protocol::TType _etype775; + xfer += iprot->readListBegin(_etype775, _size772); + (*(this->success)).resize(_size772); + uint32_t _i776; + for (_i776 = 0; _i776 < _size772; ++_i776) { - xfer += iprot->readString((*(this->success))[_i746]); + xfer += iprot->readString((*(this->success))[_i776]); } xfer += iprot->readListEnd(); } @@ -2621,17 +2621,17 @@ uint32_t ThriftHiveMetastore_get_type_all_result::read(::apache::thrift::protoco if (ftype == ::apache::thrift::protocol::T_MAP) { { this->success.clear(); - uint32_t _size747; - ::apache::thrift::protocol::TType _ktype748; - ::apache::thrift::protocol::TType _vtype749; - xfer += iprot->readMapBegin(_ktype748, _vtype749, _size747); - uint32_t _i751; - for (_i751 = 0; _i751 < _size747; ++_i751) + uint32_t _size777; + ::apache::thrift::protocol::TType _ktype778; + ::apache::thrift::protocol::TType _vtype779; + xfer += iprot->readMapBegin(_ktype778, _vtype779, _size777); + uint32_t _i781; + for (_i781 = 0; _i781 < _size777; ++_i781) { - std::string _key752; - xfer += iprot->readString(_key752); - Type& _val753 = this->success[_key752]; - xfer += _val753.read(iprot); + std::string _key782; + xfer += iprot->readString(_key782); + Type& _val783 = this->success[_key782]; + xfer += _val783.read(iprot); } xfer += iprot->readMapEnd(); } @@ -2670,11 +2670,11 @@ uint32_t ThriftHiveMetastore_get_type_all_result::write(::apache::thrift::protoc xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_MAP, 0); { xfer += oprot->writeMapBegin(::apache::thrift::protocol::T_STRING, ::apache::thrift::protocol::T_STRUCT, static_cast(this->success.size())); - std::map ::const_iterator _iter754; - for (_iter754 = this->success.begin(); _iter754 != this->success.end(); ++_iter754) + std::map ::const_iterator _iter784; + for (_iter784 = this->success.begin(); _iter784 != this->success.end(); ++_iter784) { - xfer += oprot->writeString(_iter754->first); - xfer += _iter754->second.write(oprot); + xfer += oprot->writeString(_iter784->first); + xfer += _iter784->second.write(oprot); } xfer += oprot->writeMapEnd(); } @@ -2719,17 +2719,17 @@ uint32_t ThriftHiveMetastore_get_type_all_presult::read(::apache::thrift::protoc if (ftype == ::apache::thrift::protocol::T_MAP) { { (*(this->success)).clear(); - uint32_t _size755; - ::apache::thrift::protocol::TType _ktype756; - ::apache::thrift::protocol::TType _vtype757; - xfer += iprot->readMapBegin(_ktype756, _vtype757, _size755); - uint32_t _i759; - for (_i759 = 0; _i759 < _size755; ++_i759) + uint32_t _size785; + ::apache::thrift::protocol::TType _ktype786; + ::apache::thrift::protocol::TType _vtype787; + xfer += iprot->readMapBegin(_ktype786, _vtype787, _size785); + uint32_t _i789; + for (_i789 = 0; _i789 < _size785; ++_i789) { - std::string _key760; - xfer += iprot->readString(_key760); - Type& _val761 = (*(this->success))[_key760]; - xfer += _val761.read(iprot); + std::string _key790; + xfer += iprot->readString(_key790); + Type& _val791 = (*(this->success))[_key790]; + xfer += _val791.read(iprot); } xfer += iprot->readMapEnd(); } @@ -2883,14 +2883,14 @@ uint32_t ThriftHiveMetastore_get_fields_result::read(::apache::thrift::protocol: if (ftype == ::apache::thrift::protocol::T_LIST) { { this->success.clear(); - uint32_t _size762; - ::apache::thrift::protocol::TType _etype765; - xfer += iprot->readListBegin(_etype765, _size762); - this->success.resize(_size762); - uint32_t _i766; - for (_i766 = 0; _i766 < _size762; ++_i766) + uint32_t _size792; + ::apache::thrift::protocol::TType _etype795; + xfer += iprot->readListBegin(_etype795, _size792); + this->success.resize(_size792); + uint32_t _i796; + for (_i796 = 0; _i796 < _size792; ++_i796) { - xfer += this->success[_i766].read(iprot); + xfer += this->success[_i796].read(iprot); } xfer += iprot->readListEnd(); } @@ -2945,10 +2945,10 @@ uint32_t ThriftHiveMetastore_get_fields_result::write(::apache::thrift::protocol xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->success.size())); - std::vector ::const_iterator _iter767; - for (_iter767 = this->success.begin(); _iter767 != this->success.end(); ++_iter767) + std::vector ::const_iterator _iter797; + for (_iter797 = this->success.begin(); _iter797 != this->success.end(); ++_iter797) { - xfer += (*_iter767).write(oprot); + xfer += (*_iter797).write(oprot); } xfer += oprot->writeListEnd(); } @@ -3001,14 +3001,14 @@ uint32_t ThriftHiveMetastore_get_fields_presult::read(::apache::thrift::protocol if (ftype == ::apache::thrift::protocol::T_LIST) { { (*(this->success)).clear(); - uint32_t _size768; - ::apache::thrift::protocol::TType _etype771; - xfer += iprot->readListBegin(_etype771, _size768); - (*(this->success)).resize(_size768); - uint32_t _i772; - for (_i772 = 0; _i772 < _size768; ++_i772) + uint32_t _size798; + ::apache::thrift::protocol::TType _etype801; + xfer += iprot->readListBegin(_etype801, _size798); + (*(this->success)).resize(_size798); + uint32_t _i802; + for (_i802 = 0; _i802 < _size798; ++_i802) { - xfer += (*(this->success))[_i772].read(iprot); + xfer += (*(this->success))[_i802].read(iprot); } xfer += iprot->readListEnd(); } @@ -3194,14 +3194,14 @@ uint32_t ThriftHiveMetastore_get_fields_with_environment_context_result::read(:: if (ftype == ::apache::thrift::protocol::T_LIST) { { this->success.clear(); - uint32_t _size773; - ::apache::thrift::protocol::TType _etype776; - xfer += iprot->readListBegin(_etype776, _size773); - this->success.resize(_size773); - uint32_t _i777; - for (_i777 = 0; _i777 < _size773; ++_i777) + uint32_t _size803; + ::apache::thrift::protocol::TType _etype806; + xfer += iprot->readListBegin(_etype806, _size803); + this->success.resize(_size803); + uint32_t _i807; + for (_i807 = 0; _i807 < _size803; ++_i807) { - xfer += this->success[_i777].read(iprot); + xfer += this->success[_i807].read(iprot); } xfer += iprot->readListEnd(); } @@ -3256,10 +3256,10 @@ uint32_t ThriftHiveMetastore_get_fields_with_environment_context_result::write(: xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->success.size())); - std::vector ::const_iterator _iter778; - for (_iter778 = this->success.begin(); _iter778 != this->success.end(); ++_iter778) + std::vector ::const_iterator _iter808; + for (_iter808 = this->success.begin(); _iter808 != this->success.end(); ++_iter808) { - xfer += (*_iter778).write(oprot); + xfer += (*_iter808).write(oprot); } xfer += oprot->writeListEnd(); } @@ -3312,14 +3312,14 @@ uint32_t ThriftHiveMetastore_get_fields_with_environment_context_presult::read(: if (ftype == ::apache::thrift::protocol::T_LIST) { { (*(this->success)).clear(); - uint32_t _size779; - ::apache::thrift::protocol::TType _etype782; - xfer += iprot->readListBegin(_etype782, _size779); - (*(this->success)).resize(_size779); - uint32_t _i783; - for (_i783 = 0; _i783 < _size779; ++_i783) + uint32_t _size809; + ::apache::thrift::protocol::TType _etype812; + xfer += iprot->readListBegin(_etype812, _size809); + (*(this->success)).resize(_size809); + uint32_t _i813; + for (_i813 = 0; _i813 < _size809; ++_i813) { - xfer += (*(this->success))[_i783].read(iprot); + xfer += (*(this->success))[_i813].read(iprot); } xfer += iprot->readListEnd(); } @@ -3489,14 +3489,14 @@ uint32_t ThriftHiveMetastore_get_schema_result::read(::apache::thrift::protocol: if (ftype == ::apache::thrift::protocol::T_LIST) { { this->success.clear(); - uint32_t _size784; - ::apache::thrift::protocol::TType _etype787; - xfer += iprot->readListBegin(_etype787, _size784); - this->success.resize(_size784); - uint32_t _i788; - for (_i788 = 0; _i788 < _size784; ++_i788) + uint32_t _size814; + ::apache::thrift::protocol::TType _etype817; + xfer += iprot->readListBegin(_etype817, _size814); + this->success.resize(_size814); + uint32_t _i818; + for (_i818 = 0; _i818 < _size814; ++_i818) { - xfer += this->success[_i788].read(iprot); + xfer += this->success[_i818].read(iprot); } xfer += iprot->readListEnd(); } @@ -3551,10 +3551,10 @@ uint32_t ThriftHiveMetastore_get_schema_result::write(::apache::thrift::protocol xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->success.size())); - std::vector ::const_iterator _iter789; - for (_iter789 = this->success.begin(); _iter789 != this->success.end(); ++_iter789) + std::vector ::const_iterator _iter819; + for (_iter819 = this->success.begin(); _iter819 != this->success.end(); ++_iter819) { - xfer += (*_iter789).write(oprot); + xfer += (*_iter819).write(oprot); } xfer += oprot->writeListEnd(); } @@ -3607,14 +3607,14 @@ uint32_t ThriftHiveMetastore_get_schema_presult::read(::apache::thrift::protocol if (ftype == ::apache::thrift::protocol::T_LIST) { { (*(this->success)).clear(); - uint32_t _size790; - ::apache::thrift::protocol::TType _etype793; - xfer += iprot->readListBegin(_etype793, _size790); - (*(this->success)).resize(_size790); - uint32_t _i794; - for (_i794 = 0; _i794 < _size790; ++_i794) + uint32_t _size820; + ::apache::thrift::protocol::TType _etype823; + xfer += iprot->readListBegin(_etype823, _size820); + (*(this->success)).resize(_size820); + uint32_t _i824; + for (_i824 = 0; _i824 < _size820; ++_i824) { - xfer += (*(this->success))[_i794].read(iprot); + xfer += (*(this->success))[_i824].read(iprot); } xfer += iprot->readListEnd(); } @@ -3800,14 +3800,14 @@ uint32_t ThriftHiveMetastore_get_schema_with_environment_context_result::read(:: if (ftype == ::apache::thrift::protocol::T_LIST) { { this->success.clear(); - uint32_t _size795; - ::apache::thrift::protocol::TType _etype798; - xfer += iprot->readListBegin(_etype798, _size795); - this->success.resize(_size795); - uint32_t _i799; - for (_i799 = 0; _i799 < _size795; ++_i799) + uint32_t _size825; + ::apache::thrift::protocol::TType _etype828; + xfer += iprot->readListBegin(_etype828, _size825); + this->success.resize(_size825); + uint32_t _i829; + for (_i829 = 0; _i829 < _size825; ++_i829) { - xfer += this->success[_i799].read(iprot); + xfer += this->success[_i829].read(iprot); } xfer += iprot->readListEnd(); } @@ -3862,10 +3862,10 @@ uint32_t ThriftHiveMetastore_get_schema_with_environment_context_result::write(: xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->success.size())); - std::vector ::const_iterator _iter800; - for (_iter800 = this->success.begin(); _iter800 != this->success.end(); ++_iter800) + std::vector ::const_iterator _iter830; + for (_iter830 = this->success.begin(); _iter830 != this->success.end(); ++_iter830) { - xfer += (*_iter800).write(oprot); + xfer += (*_iter830).write(oprot); } xfer += oprot->writeListEnd(); } @@ -3918,14 +3918,14 @@ uint32_t ThriftHiveMetastore_get_schema_with_environment_context_presult::read(: if (ftype == ::apache::thrift::protocol::T_LIST) { { (*(this->success)).clear(); - uint32_t _size801; - ::apache::thrift::protocol::TType _etype804; - xfer += iprot->readListBegin(_etype804, _size801); - (*(this->success)).resize(_size801); - uint32_t _i805; - for (_i805 = 0; _i805 < _size801; ++_i805) + uint32_t _size831; + ::apache::thrift::protocol::TType _etype834; + xfer += iprot->readListBegin(_etype834, _size831); + (*(this->success)).resize(_size831); + uint32_t _i835; + for (_i835 = 0; _i835 < _size831; ++_i835) { - xfer += (*(this->success))[_i805].read(iprot); + xfer += (*(this->success))[_i835].read(iprot); } xfer += iprot->readListEnd(); } @@ -5099,14 +5099,14 @@ uint32_t ThriftHiveMetastore_get_tables_result::read(::apache::thrift::protocol: if (ftype == ::apache::thrift::protocol::T_LIST) { { this->success.clear(); - uint32_t _size806; - ::apache::thrift::protocol::TType _etype809; - xfer += iprot->readListBegin(_etype809, _size806); - this->success.resize(_size806); - uint32_t _i810; - for (_i810 = 0; _i810 < _size806; ++_i810) + uint32_t _size836; + ::apache::thrift::protocol::TType _etype839; + xfer += iprot->readListBegin(_etype839, _size836); + this->success.resize(_size836); + uint32_t _i840; + for (_i840 = 0; _i840 < _size836; ++_i840) { - xfer += iprot->readString(this->success[_i810]); + xfer += iprot->readString(this->success[_i840]); } xfer += iprot->readListEnd(); } @@ -5145,10 +5145,10 @@ uint32_t ThriftHiveMetastore_get_tables_result::write(::apache::thrift::protocol xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->success.size())); - std::vector ::const_iterator _iter811; - for (_iter811 = this->success.begin(); _iter811 != this->success.end(); ++_iter811) + std::vector ::const_iterator _iter841; + for (_iter841 = this->success.begin(); _iter841 != this->success.end(); ++_iter841) { - xfer += oprot->writeString((*_iter811)); + xfer += oprot->writeString((*_iter841)); } xfer += oprot->writeListEnd(); } @@ -5193,14 +5193,14 @@ uint32_t ThriftHiveMetastore_get_tables_presult::read(::apache::thrift::protocol if (ftype == ::apache::thrift::protocol::T_LIST) { { (*(this->success)).clear(); - uint32_t _size812; - ::apache::thrift::protocol::TType _etype815; - xfer += iprot->readListBegin(_etype815, _size812); - (*(this->success)).resize(_size812); - uint32_t _i816; - for (_i816 = 0; _i816 < _size812; ++_i816) + uint32_t _size842; + ::apache::thrift::protocol::TType _etype845; + xfer += iprot->readListBegin(_etype845, _size842); + (*(this->success)).resize(_size842); + uint32_t _i846; + for (_i846 = 0; _i846 < _size842; ++_i846) { - xfer += iprot->readString((*(this->success))[_i816]); + xfer += iprot->readString((*(this->success))[_i846]); } xfer += iprot->readListEnd(); } @@ -5275,14 +5275,14 @@ uint32_t ThriftHiveMetastore_get_table_meta_args::read(::apache::thrift::protoco if (ftype == ::apache::thrift::protocol::T_LIST) { { this->tbl_types.clear(); - uint32_t _size817; - ::apache::thrift::protocol::TType _etype820; - xfer += iprot->readListBegin(_etype820, _size817); - this->tbl_types.resize(_size817); - uint32_t _i821; - for (_i821 = 0; _i821 < _size817; ++_i821) + uint32_t _size847; + ::apache::thrift::protocol::TType _etype850; + xfer += iprot->readListBegin(_etype850, _size847); + this->tbl_types.resize(_size847); + uint32_t _i851; + for (_i851 = 0; _i851 < _size847; ++_i851) { - xfer += iprot->readString(this->tbl_types[_i821]); + xfer += iprot->readString(this->tbl_types[_i851]); } xfer += iprot->readListEnd(); } @@ -5319,10 +5319,10 @@ uint32_t ThriftHiveMetastore_get_table_meta_args::write(::apache::thrift::protoc xfer += oprot->writeFieldBegin("tbl_types", ::apache::thrift::protocol::T_LIST, 3); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->tbl_types.size())); - std::vector ::const_iterator _iter822; - for (_iter822 = this->tbl_types.begin(); _iter822 != this->tbl_types.end(); ++_iter822) + std::vector ::const_iterator _iter852; + for (_iter852 = this->tbl_types.begin(); _iter852 != this->tbl_types.end(); ++_iter852) { - xfer += oprot->writeString((*_iter822)); + xfer += oprot->writeString((*_iter852)); } xfer += oprot->writeListEnd(); } @@ -5354,10 +5354,10 @@ uint32_t ThriftHiveMetastore_get_table_meta_pargs::write(::apache::thrift::proto xfer += oprot->writeFieldBegin("tbl_types", ::apache::thrift::protocol::T_LIST, 3); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast((*(this->tbl_types)).size())); - std::vector ::const_iterator _iter823; - for (_iter823 = (*(this->tbl_types)).begin(); _iter823 != (*(this->tbl_types)).end(); ++_iter823) + std::vector ::const_iterator _iter853; + for (_iter853 = (*(this->tbl_types)).begin(); _iter853 != (*(this->tbl_types)).end(); ++_iter853) { - xfer += oprot->writeString((*_iter823)); + xfer += oprot->writeString((*_iter853)); } xfer += oprot->writeListEnd(); } @@ -5398,14 +5398,14 @@ uint32_t ThriftHiveMetastore_get_table_meta_result::read(::apache::thrift::proto if (ftype == ::apache::thrift::protocol::T_LIST) { { this->success.clear(); - uint32_t _size824; - ::apache::thrift::protocol::TType _etype827; - xfer += iprot->readListBegin(_etype827, _size824); - this->success.resize(_size824); - uint32_t _i828; - for (_i828 = 0; _i828 < _size824; ++_i828) + uint32_t _size854; + ::apache::thrift::protocol::TType _etype857; + xfer += iprot->readListBegin(_etype857, _size854); + this->success.resize(_size854); + uint32_t _i858; + for (_i858 = 0; _i858 < _size854; ++_i858) { - xfer += this->success[_i828].read(iprot); + xfer += this->success[_i858].read(iprot); } xfer += iprot->readListEnd(); } @@ -5444,10 +5444,10 @@ uint32_t ThriftHiveMetastore_get_table_meta_result::write(::apache::thrift::prot xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->success.size())); - std::vector ::const_iterator _iter829; - for (_iter829 = this->success.begin(); _iter829 != this->success.end(); ++_iter829) + std::vector ::const_iterator _iter859; + for (_iter859 = this->success.begin(); _iter859 != this->success.end(); ++_iter859) { - xfer += (*_iter829).write(oprot); + xfer += (*_iter859).write(oprot); } xfer += oprot->writeListEnd(); } @@ -5492,14 +5492,14 @@ uint32_t ThriftHiveMetastore_get_table_meta_presult::read(::apache::thrift::prot if (ftype == ::apache::thrift::protocol::T_LIST) { { (*(this->success)).clear(); - uint32_t _size830; - ::apache::thrift::protocol::TType _etype833; - xfer += iprot->readListBegin(_etype833, _size830); - (*(this->success)).resize(_size830); - uint32_t _i834; - for (_i834 = 0; _i834 < _size830; ++_i834) + uint32_t _size860; + ::apache::thrift::protocol::TType _etype863; + xfer += iprot->readListBegin(_etype863, _size860); + (*(this->success)).resize(_size860); + uint32_t _i864; + for (_i864 = 0; _i864 < _size860; ++_i864) { - xfer += (*(this->success))[_i834].read(iprot); + xfer += (*(this->success))[_i864].read(iprot); } xfer += iprot->readListEnd(); } @@ -5637,14 +5637,14 @@ uint32_t ThriftHiveMetastore_get_all_tables_result::read(::apache::thrift::proto if (ftype == ::apache::thrift::protocol::T_LIST) { { this->success.clear(); - uint32_t _size835; - ::apache::thrift::protocol::TType _etype838; - xfer += iprot->readListBegin(_etype838, _size835); - this->success.resize(_size835); - uint32_t _i839; - for (_i839 = 0; _i839 < _size835; ++_i839) + uint32_t _size865; + ::apache::thrift::protocol::TType _etype868; + xfer += iprot->readListBegin(_etype868, _size865); + this->success.resize(_size865); + uint32_t _i869; + for (_i869 = 0; _i869 < _size865; ++_i869) { - xfer += iprot->readString(this->success[_i839]); + xfer += iprot->readString(this->success[_i869]); } xfer += iprot->readListEnd(); } @@ -5683,10 +5683,10 @@ uint32_t ThriftHiveMetastore_get_all_tables_result::write(::apache::thrift::prot xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->success.size())); - std::vector ::const_iterator _iter840; - for (_iter840 = this->success.begin(); _iter840 != this->success.end(); ++_iter840) + std::vector ::const_iterator _iter870; + for (_iter870 = this->success.begin(); _iter870 != this->success.end(); ++_iter870) { - xfer += oprot->writeString((*_iter840)); + xfer += oprot->writeString((*_iter870)); } xfer += oprot->writeListEnd(); } @@ -5731,14 +5731,14 @@ uint32_t ThriftHiveMetastore_get_all_tables_presult::read(::apache::thrift::prot if (ftype == ::apache::thrift::protocol::T_LIST) { { (*(this->success)).clear(); - uint32_t _size841; - ::apache::thrift::protocol::TType _etype844; - xfer += iprot->readListBegin(_etype844, _size841); - (*(this->success)).resize(_size841); - uint32_t _i845; - for (_i845 = 0; _i845 < _size841; ++_i845) + uint32_t _size871; + ::apache::thrift::protocol::TType _etype874; + xfer += iprot->readListBegin(_etype874, _size871); + (*(this->success)).resize(_size871); + uint32_t _i875; + for (_i875 = 0; _i875 < _size871; ++_i875) { - xfer += iprot->readString((*(this->success))[_i845]); + xfer += iprot->readString((*(this->success))[_i875]); } xfer += iprot->readListEnd(); } @@ -6048,14 +6048,14 @@ uint32_t ThriftHiveMetastore_get_table_objects_by_name_args::read(::apache::thri if (ftype == ::apache::thrift::protocol::T_LIST) { { this->tbl_names.clear(); - uint32_t _size846; - ::apache::thrift::protocol::TType _etype849; - xfer += iprot->readListBegin(_etype849, _size846); - this->tbl_names.resize(_size846); - uint32_t _i850; - for (_i850 = 0; _i850 < _size846; ++_i850) + uint32_t _size876; + ::apache::thrift::protocol::TType _etype879; + xfer += iprot->readListBegin(_etype879, _size876); + this->tbl_names.resize(_size876); + uint32_t _i880; + for (_i880 = 0; _i880 < _size876; ++_i880) { - xfer += iprot->readString(this->tbl_names[_i850]); + xfer += iprot->readString(this->tbl_names[_i880]); } xfer += iprot->readListEnd(); } @@ -6088,10 +6088,10 @@ uint32_t ThriftHiveMetastore_get_table_objects_by_name_args::write(::apache::thr xfer += oprot->writeFieldBegin("tbl_names", ::apache::thrift::protocol::T_LIST, 2); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->tbl_names.size())); - std::vector ::const_iterator _iter851; - for (_iter851 = this->tbl_names.begin(); _iter851 != this->tbl_names.end(); ++_iter851) + std::vector ::const_iterator _iter881; + for (_iter881 = this->tbl_names.begin(); _iter881 != this->tbl_names.end(); ++_iter881) { - xfer += oprot->writeString((*_iter851)); + xfer += oprot->writeString((*_iter881)); } xfer += oprot->writeListEnd(); } @@ -6119,10 +6119,10 @@ uint32_t ThriftHiveMetastore_get_table_objects_by_name_pargs::write(::apache::th xfer += oprot->writeFieldBegin("tbl_names", ::apache::thrift::protocol::T_LIST, 2); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast((*(this->tbl_names)).size())); - std::vector ::const_iterator _iter852; - for (_iter852 = (*(this->tbl_names)).begin(); _iter852 != (*(this->tbl_names)).end(); ++_iter852) + std::vector ::const_iterator _iter882; + for (_iter882 = (*(this->tbl_names)).begin(); _iter882 != (*(this->tbl_names)).end(); ++_iter882) { - xfer += oprot->writeString((*_iter852)); + xfer += oprot->writeString((*_iter882)); } xfer += oprot->writeListEnd(); } @@ -6163,14 +6163,14 @@ uint32_t ThriftHiveMetastore_get_table_objects_by_name_result::read(::apache::th if (ftype == ::apache::thrift::protocol::T_LIST) { { this->success.clear(); - uint32_t _size853; - ::apache::thrift::protocol::TType _etype856; - xfer += iprot->readListBegin(_etype856, _size853); - this->success.resize(_size853); - uint32_t _i857; - for (_i857 = 0; _i857 < _size853; ++_i857) + uint32_t _size883; + ::apache::thrift::protocol::TType _etype886; + xfer += iprot->readListBegin(_etype886, _size883); + this->success.resize(_size883); + uint32_t _i887; + for (_i887 = 0; _i887 < _size883; ++_i887) { - xfer += this->success[_i857].read(iprot); + xfer += this->success[_i887].read(iprot); } xfer += iprot->readListEnd(); } @@ -6225,10 +6225,10 @@ uint32_t ThriftHiveMetastore_get_table_objects_by_name_result::write(::apache::t xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->success.size())); - std::vector
::const_iterator _iter858; - for (_iter858 = this->success.begin(); _iter858 != this->success.end(); ++_iter858) + std::vector
::const_iterator _iter888; + for (_iter888 = this->success.begin(); _iter888 != this->success.end(); ++_iter888) { - xfer += (*_iter858).write(oprot); + xfer += (*_iter888).write(oprot); } xfer += oprot->writeListEnd(); } @@ -6281,14 +6281,14 @@ uint32_t ThriftHiveMetastore_get_table_objects_by_name_presult::read(::apache::t if (ftype == ::apache::thrift::protocol::T_LIST) { { (*(this->success)).clear(); - uint32_t _size859; - ::apache::thrift::protocol::TType _etype862; - xfer += iprot->readListBegin(_etype862, _size859); - (*(this->success)).resize(_size859); - uint32_t _i863; - for (_i863 = 0; _i863 < _size859; ++_i863) + uint32_t _size889; + ::apache::thrift::protocol::TType _etype892; + xfer += iprot->readListBegin(_etype892, _size889); + (*(this->success)).resize(_size889); + uint32_t _i893; + for (_i893 = 0; _i893 < _size889; ++_i893) { - xfer += (*(this->success))[_i863].read(iprot); + xfer += (*(this->success))[_i893].read(iprot); } xfer += iprot->readListEnd(); } @@ -6474,14 +6474,14 @@ uint32_t ThriftHiveMetastore_get_table_names_by_filter_result::read(::apache::th if (ftype == ::apache::thrift::protocol::T_LIST) { { this->success.clear(); - uint32_t _size864; - ::apache::thrift::protocol::TType _etype867; - xfer += iprot->readListBegin(_etype867, _size864); - this->success.resize(_size864); - uint32_t _i868; - for (_i868 = 0; _i868 < _size864; ++_i868) + uint32_t _size894; + ::apache::thrift::protocol::TType _etype897; + xfer += iprot->readListBegin(_etype897, _size894); + this->success.resize(_size894); + uint32_t _i898; + for (_i898 = 0; _i898 < _size894; ++_i898) { - xfer += iprot->readString(this->success[_i868]); + xfer += iprot->readString(this->success[_i898]); } xfer += iprot->readListEnd(); } @@ -6536,10 +6536,10 @@ uint32_t ThriftHiveMetastore_get_table_names_by_filter_result::write(::apache::t xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->success.size())); - std::vector ::const_iterator _iter869; - for (_iter869 = this->success.begin(); _iter869 != this->success.end(); ++_iter869) + std::vector ::const_iterator _iter899; + for (_iter899 = this->success.begin(); _iter899 != this->success.end(); ++_iter899) { - xfer += oprot->writeString((*_iter869)); + xfer += oprot->writeString((*_iter899)); } xfer += oprot->writeListEnd(); } @@ -6592,14 +6592,14 @@ uint32_t ThriftHiveMetastore_get_table_names_by_filter_presult::read(::apache::t if (ftype == ::apache::thrift::protocol::T_LIST) { { (*(this->success)).clear(); - uint32_t _size870; - ::apache::thrift::protocol::TType _etype873; - xfer += iprot->readListBegin(_etype873, _size870); - (*(this->success)).resize(_size870); - uint32_t _i874; - for (_i874 = 0; _i874 < _size870; ++_i874) + uint32_t _size900; + ::apache::thrift::protocol::TType _etype903; + xfer += iprot->readListBegin(_etype903, _size900); + (*(this->success)).resize(_size900); + uint32_t _i904; + for (_i904 = 0; _i904 < _size900; ++_i904) { - xfer += iprot->readString((*(this->success))[_i874]); + xfer += iprot->readString((*(this->success))[_i904]); } xfer += iprot->readListEnd(); } @@ -7933,14 +7933,14 @@ uint32_t ThriftHiveMetastore_add_partitions_args::read(::apache::thrift::protoco if (ftype == ::apache::thrift::protocol::T_LIST) { { this->new_parts.clear(); - uint32_t _size875; - ::apache::thrift::protocol::TType _etype878; - xfer += iprot->readListBegin(_etype878, _size875); - this->new_parts.resize(_size875); - uint32_t _i879; - for (_i879 = 0; _i879 < _size875; ++_i879) + uint32_t _size905; + ::apache::thrift::protocol::TType _etype908; + xfer += iprot->readListBegin(_etype908, _size905); + this->new_parts.resize(_size905); + uint32_t _i909; + for (_i909 = 0; _i909 < _size905; ++_i909) { - xfer += this->new_parts[_i879].read(iprot); + xfer += this->new_parts[_i909].read(iprot); } xfer += iprot->readListEnd(); } @@ -7969,10 +7969,10 @@ uint32_t ThriftHiveMetastore_add_partitions_args::write(::apache::thrift::protoc xfer += oprot->writeFieldBegin("new_parts", ::apache::thrift::protocol::T_LIST, 1); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->new_parts.size())); - std::vector ::const_iterator _iter880; - for (_iter880 = this->new_parts.begin(); _iter880 != this->new_parts.end(); ++_iter880) + std::vector ::const_iterator _iter910; + for (_iter910 = this->new_parts.begin(); _iter910 != this->new_parts.end(); ++_iter910) { - xfer += (*_iter880).write(oprot); + xfer += (*_iter910).write(oprot); } xfer += oprot->writeListEnd(); } @@ -7996,10 +7996,10 @@ uint32_t ThriftHiveMetastore_add_partitions_pargs::write(::apache::thrift::proto xfer += oprot->writeFieldBegin("new_parts", ::apache::thrift::protocol::T_LIST, 1); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast((*(this->new_parts)).size())); - std::vector ::const_iterator _iter881; - for (_iter881 = (*(this->new_parts)).begin(); _iter881 != (*(this->new_parts)).end(); ++_iter881) + std::vector ::const_iterator _iter911; + for (_iter911 = (*(this->new_parts)).begin(); _iter911 != (*(this->new_parts)).end(); ++_iter911) { - xfer += (*_iter881).write(oprot); + xfer += (*_iter911).write(oprot); } xfer += oprot->writeListEnd(); } @@ -8208,14 +8208,14 @@ uint32_t ThriftHiveMetastore_add_partitions_pspec_args::read(::apache::thrift::p if (ftype == ::apache::thrift::protocol::T_LIST) { { this->new_parts.clear(); - uint32_t _size882; - ::apache::thrift::protocol::TType _etype885; - xfer += iprot->readListBegin(_etype885, _size882); - this->new_parts.resize(_size882); - uint32_t _i886; - for (_i886 = 0; _i886 < _size882; ++_i886) + uint32_t _size912; + ::apache::thrift::protocol::TType _etype915; + xfer += iprot->readListBegin(_etype915, _size912); + this->new_parts.resize(_size912); + uint32_t _i916; + for (_i916 = 0; _i916 < _size912; ++_i916) { - xfer += this->new_parts[_i886].read(iprot); + xfer += this->new_parts[_i916].read(iprot); } xfer += iprot->readListEnd(); } @@ -8244,10 +8244,10 @@ uint32_t ThriftHiveMetastore_add_partitions_pspec_args::write(::apache::thrift:: xfer += oprot->writeFieldBegin("new_parts", ::apache::thrift::protocol::T_LIST, 1); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->new_parts.size())); - std::vector ::const_iterator _iter887; - for (_iter887 = this->new_parts.begin(); _iter887 != this->new_parts.end(); ++_iter887) + std::vector ::const_iterator _iter917; + for (_iter917 = this->new_parts.begin(); _iter917 != this->new_parts.end(); ++_iter917) { - xfer += (*_iter887).write(oprot); + xfer += (*_iter917).write(oprot); } xfer += oprot->writeListEnd(); } @@ -8271,10 +8271,10 @@ uint32_t ThriftHiveMetastore_add_partitions_pspec_pargs::write(::apache::thrift: xfer += oprot->writeFieldBegin("new_parts", ::apache::thrift::protocol::T_LIST, 1); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast((*(this->new_parts)).size())); - std::vector ::const_iterator _iter888; - for (_iter888 = (*(this->new_parts)).begin(); _iter888 != (*(this->new_parts)).end(); ++_iter888) + std::vector ::const_iterator _iter918; + for (_iter918 = (*(this->new_parts)).begin(); _iter918 != (*(this->new_parts)).end(); ++_iter918) { - xfer += (*_iter888).write(oprot); + xfer += (*_iter918).write(oprot); } xfer += oprot->writeListEnd(); } @@ -8499,14 +8499,14 @@ uint32_t ThriftHiveMetastore_append_partition_args::read(::apache::thrift::proto if (ftype == ::apache::thrift::protocol::T_LIST) { { this->part_vals.clear(); - uint32_t _size889; - ::apache::thrift::protocol::TType _etype892; - xfer += iprot->readListBegin(_etype892, _size889); - this->part_vals.resize(_size889); - uint32_t _i893; - for (_i893 = 0; _i893 < _size889; ++_i893) + uint32_t _size919; + ::apache::thrift::protocol::TType _etype922; + xfer += iprot->readListBegin(_etype922, _size919); + this->part_vals.resize(_size919); + uint32_t _i923; + for (_i923 = 0; _i923 < _size919; ++_i923) { - xfer += iprot->readString(this->part_vals[_i893]); + xfer += iprot->readString(this->part_vals[_i923]); } xfer += iprot->readListEnd(); } @@ -8543,10 +8543,10 @@ uint32_t ThriftHiveMetastore_append_partition_args::write(::apache::thrift::prot xfer += oprot->writeFieldBegin("part_vals", ::apache::thrift::protocol::T_LIST, 3); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->part_vals.size())); - std::vector ::const_iterator _iter894; - for (_iter894 = this->part_vals.begin(); _iter894 != this->part_vals.end(); ++_iter894) + std::vector ::const_iterator _iter924; + for (_iter924 = this->part_vals.begin(); _iter924 != this->part_vals.end(); ++_iter924) { - xfer += oprot->writeString((*_iter894)); + xfer += oprot->writeString((*_iter924)); } xfer += oprot->writeListEnd(); } @@ -8578,10 +8578,10 @@ uint32_t ThriftHiveMetastore_append_partition_pargs::write(::apache::thrift::pro xfer += oprot->writeFieldBegin("part_vals", ::apache::thrift::protocol::T_LIST, 3); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast((*(this->part_vals)).size())); - std::vector ::const_iterator _iter895; - for (_iter895 = (*(this->part_vals)).begin(); _iter895 != (*(this->part_vals)).end(); ++_iter895) + std::vector ::const_iterator _iter925; + for (_iter925 = (*(this->part_vals)).begin(); _iter925 != (*(this->part_vals)).end(); ++_iter925) { - xfer += oprot->writeString((*_iter895)); + xfer += oprot->writeString((*_iter925)); } xfer += oprot->writeListEnd(); } @@ -9053,14 +9053,14 @@ uint32_t ThriftHiveMetastore_append_partition_with_environment_context_args::rea if (ftype == ::apache::thrift::protocol::T_LIST) { { this->part_vals.clear(); - uint32_t _size896; - ::apache::thrift::protocol::TType _etype899; - xfer += iprot->readListBegin(_etype899, _size896); - this->part_vals.resize(_size896); - uint32_t _i900; - for (_i900 = 0; _i900 < _size896; ++_i900) + uint32_t _size926; + ::apache::thrift::protocol::TType _etype929; + xfer += iprot->readListBegin(_etype929, _size926); + this->part_vals.resize(_size926); + uint32_t _i930; + for (_i930 = 0; _i930 < _size926; ++_i930) { - xfer += iprot->readString(this->part_vals[_i900]); + xfer += iprot->readString(this->part_vals[_i930]); } xfer += iprot->readListEnd(); } @@ -9105,10 +9105,10 @@ uint32_t ThriftHiveMetastore_append_partition_with_environment_context_args::wri xfer += oprot->writeFieldBegin("part_vals", ::apache::thrift::protocol::T_LIST, 3); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->part_vals.size())); - std::vector ::const_iterator _iter901; - for (_iter901 = this->part_vals.begin(); _iter901 != this->part_vals.end(); ++_iter901) + std::vector ::const_iterator _iter931; + for (_iter931 = this->part_vals.begin(); _iter931 != this->part_vals.end(); ++_iter931) { - xfer += oprot->writeString((*_iter901)); + xfer += oprot->writeString((*_iter931)); } xfer += oprot->writeListEnd(); } @@ -9144,10 +9144,10 @@ uint32_t ThriftHiveMetastore_append_partition_with_environment_context_pargs::wr xfer += oprot->writeFieldBegin("part_vals", ::apache::thrift::protocol::T_LIST, 3); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast((*(this->part_vals)).size())); - std::vector ::const_iterator _iter902; - for (_iter902 = (*(this->part_vals)).begin(); _iter902 != (*(this->part_vals)).end(); ++_iter902) + std::vector ::const_iterator _iter932; + for (_iter932 = (*(this->part_vals)).begin(); _iter932 != (*(this->part_vals)).end(); ++_iter932) { - xfer += oprot->writeString((*_iter902)); + xfer += oprot->writeString((*_iter932)); } xfer += oprot->writeListEnd(); } @@ -9950,14 +9950,14 @@ uint32_t ThriftHiveMetastore_drop_partition_args::read(::apache::thrift::protoco if (ftype == ::apache::thrift::protocol::T_LIST) { { this->part_vals.clear(); - uint32_t _size903; - ::apache::thrift::protocol::TType _etype906; - xfer += iprot->readListBegin(_etype906, _size903); - this->part_vals.resize(_size903); - uint32_t _i907; - for (_i907 = 0; _i907 < _size903; ++_i907) + uint32_t _size933; + ::apache::thrift::protocol::TType _etype936; + xfer += iprot->readListBegin(_etype936, _size933); + this->part_vals.resize(_size933); + uint32_t _i937; + for (_i937 = 0; _i937 < _size933; ++_i937) { - xfer += iprot->readString(this->part_vals[_i907]); + xfer += iprot->readString(this->part_vals[_i937]); } xfer += iprot->readListEnd(); } @@ -10002,10 +10002,10 @@ uint32_t ThriftHiveMetastore_drop_partition_args::write(::apache::thrift::protoc xfer += oprot->writeFieldBegin("part_vals", ::apache::thrift::protocol::T_LIST, 3); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->part_vals.size())); - std::vector ::const_iterator _iter908; - for (_iter908 = this->part_vals.begin(); _iter908 != this->part_vals.end(); ++_iter908) + std::vector ::const_iterator _iter938; + for (_iter938 = this->part_vals.begin(); _iter938 != this->part_vals.end(); ++_iter938) { - xfer += oprot->writeString((*_iter908)); + xfer += oprot->writeString((*_iter938)); } xfer += oprot->writeListEnd(); } @@ -10041,10 +10041,10 @@ uint32_t ThriftHiveMetastore_drop_partition_pargs::write(::apache::thrift::proto xfer += oprot->writeFieldBegin("part_vals", ::apache::thrift::protocol::T_LIST, 3); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast((*(this->part_vals)).size())); - std::vector ::const_iterator _iter909; - for (_iter909 = (*(this->part_vals)).begin(); _iter909 != (*(this->part_vals)).end(); ++_iter909) + std::vector ::const_iterator _iter939; + for (_iter939 = (*(this->part_vals)).begin(); _iter939 != (*(this->part_vals)).end(); ++_iter939) { - xfer += oprot->writeString((*_iter909)); + xfer += oprot->writeString((*_iter939)); } xfer += oprot->writeListEnd(); } @@ -10253,14 +10253,14 @@ uint32_t ThriftHiveMetastore_drop_partition_with_environment_context_args::read( if (ftype == ::apache::thrift::protocol::T_LIST) { { this->part_vals.clear(); - uint32_t _size910; - ::apache::thrift::protocol::TType _etype913; - xfer += iprot->readListBegin(_etype913, _size910); - this->part_vals.resize(_size910); - uint32_t _i914; - for (_i914 = 0; _i914 < _size910; ++_i914) + uint32_t _size940; + ::apache::thrift::protocol::TType _etype943; + xfer += iprot->readListBegin(_etype943, _size940); + this->part_vals.resize(_size940); + uint32_t _i944; + for (_i944 = 0; _i944 < _size940; ++_i944) { - xfer += iprot->readString(this->part_vals[_i914]); + xfer += iprot->readString(this->part_vals[_i944]); } xfer += iprot->readListEnd(); } @@ -10313,10 +10313,10 @@ uint32_t ThriftHiveMetastore_drop_partition_with_environment_context_args::write xfer += oprot->writeFieldBegin("part_vals", ::apache::thrift::protocol::T_LIST, 3); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->part_vals.size())); - std::vector ::const_iterator _iter915; - for (_iter915 = this->part_vals.begin(); _iter915 != this->part_vals.end(); ++_iter915) + std::vector ::const_iterator _iter945; + for (_iter945 = this->part_vals.begin(); _iter945 != this->part_vals.end(); ++_iter945) { - xfer += oprot->writeString((*_iter915)); + xfer += oprot->writeString((*_iter945)); } xfer += oprot->writeListEnd(); } @@ -10356,10 +10356,10 @@ uint32_t ThriftHiveMetastore_drop_partition_with_environment_context_pargs::writ xfer += oprot->writeFieldBegin("part_vals", ::apache::thrift::protocol::T_LIST, 3); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast((*(this->part_vals)).size())); - std::vector ::const_iterator _iter916; - for (_iter916 = (*(this->part_vals)).begin(); _iter916 != (*(this->part_vals)).end(); ++_iter916) + std::vector ::const_iterator _iter946; + for (_iter946 = (*(this->part_vals)).begin(); _iter946 != (*(this->part_vals)).end(); ++_iter946) { - xfer += oprot->writeString((*_iter916)); + xfer += oprot->writeString((*_iter946)); } xfer += oprot->writeListEnd(); } @@ -11365,14 +11365,14 @@ uint32_t ThriftHiveMetastore_get_partition_args::read(::apache::thrift::protocol if (ftype == ::apache::thrift::protocol::T_LIST) { { this->part_vals.clear(); - uint32_t _size917; - ::apache::thrift::protocol::TType _etype920; - xfer += iprot->readListBegin(_etype920, _size917); - this->part_vals.resize(_size917); - uint32_t _i921; - for (_i921 = 0; _i921 < _size917; ++_i921) + uint32_t _size947; + ::apache::thrift::protocol::TType _etype950; + xfer += iprot->readListBegin(_etype950, _size947); + this->part_vals.resize(_size947); + uint32_t _i951; + for (_i951 = 0; _i951 < _size947; ++_i951) { - xfer += iprot->readString(this->part_vals[_i921]); + xfer += iprot->readString(this->part_vals[_i951]); } xfer += iprot->readListEnd(); } @@ -11409,10 +11409,10 @@ uint32_t ThriftHiveMetastore_get_partition_args::write(::apache::thrift::protoco xfer += oprot->writeFieldBegin("part_vals", ::apache::thrift::protocol::T_LIST, 3); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->part_vals.size())); - std::vector ::const_iterator _iter922; - for (_iter922 = this->part_vals.begin(); _iter922 != this->part_vals.end(); ++_iter922) + std::vector ::const_iterator _iter952; + for (_iter952 = this->part_vals.begin(); _iter952 != this->part_vals.end(); ++_iter952) { - xfer += oprot->writeString((*_iter922)); + xfer += oprot->writeString((*_iter952)); } xfer += oprot->writeListEnd(); } @@ -11444,10 +11444,10 @@ uint32_t ThriftHiveMetastore_get_partition_pargs::write(::apache::thrift::protoc xfer += oprot->writeFieldBegin("part_vals", ::apache::thrift::protocol::T_LIST, 3); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast((*(this->part_vals)).size())); - std::vector ::const_iterator _iter923; - for (_iter923 = (*(this->part_vals)).begin(); _iter923 != (*(this->part_vals)).end(); ++_iter923) + std::vector ::const_iterator _iter953; + for (_iter953 = (*(this->part_vals)).begin(); _iter953 != (*(this->part_vals)).end(); ++_iter953) { - xfer += oprot->writeString((*_iter923)); + xfer += oprot->writeString((*_iter953)); } xfer += oprot->writeListEnd(); } @@ -11636,17 +11636,17 @@ uint32_t ThriftHiveMetastore_exchange_partition_args::read(::apache::thrift::pro if (ftype == ::apache::thrift::protocol::T_MAP) { { this->partitionSpecs.clear(); - uint32_t _size924; - ::apache::thrift::protocol::TType _ktype925; - ::apache::thrift::protocol::TType _vtype926; - xfer += iprot->readMapBegin(_ktype925, _vtype926, _size924); - uint32_t _i928; - for (_i928 = 0; _i928 < _size924; ++_i928) + uint32_t _size954; + ::apache::thrift::protocol::TType _ktype955; + ::apache::thrift::protocol::TType _vtype956; + xfer += iprot->readMapBegin(_ktype955, _vtype956, _size954); + uint32_t _i958; + for (_i958 = 0; _i958 < _size954; ++_i958) { - std::string _key929; - xfer += iprot->readString(_key929); - std::string& _val930 = this->partitionSpecs[_key929]; - xfer += iprot->readString(_val930); + std::string _key959; + xfer += iprot->readString(_key959); + std::string& _val960 = this->partitionSpecs[_key959]; + xfer += iprot->readString(_val960); } xfer += iprot->readMapEnd(); } @@ -11707,11 +11707,11 @@ uint32_t ThriftHiveMetastore_exchange_partition_args::write(::apache::thrift::pr xfer += oprot->writeFieldBegin("partitionSpecs", ::apache::thrift::protocol::T_MAP, 1); { xfer += oprot->writeMapBegin(::apache::thrift::protocol::T_STRING, ::apache::thrift::protocol::T_STRING, static_cast(this->partitionSpecs.size())); - std::map ::const_iterator _iter931; - for (_iter931 = this->partitionSpecs.begin(); _iter931 != this->partitionSpecs.end(); ++_iter931) + std::map ::const_iterator _iter961; + for (_iter961 = this->partitionSpecs.begin(); _iter961 != this->partitionSpecs.end(); ++_iter961) { - xfer += oprot->writeString(_iter931->first); - xfer += oprot->writeString(_iter931->second); + xfer += oprot->writeString(_iter961->first); + xfer += oprot->writeString(_iter961->second); } xfer += oprot->writeMapEnd(); } @@ -11751,11 +11751,11 @@ uint32_t ThriftHiveMetastore_exchange_partition_pargs::write(::apache::thrift::p xfer += oprot->writeFieldBegin("partitionSpecs", ::apache::thrift::protocol::T_MAP, 1); { xfer += oprot->writeMapBegin(::apache::thrift::protocol::T_STRING, ::apache::thrift::protocol::T_STRING, static_cast((*(this->partitionSpecs)).size())); - std::map ::const_iterator _iter932; - for (_iter932 = (*(this->partitionSpecs)).begin(); _iter932 != (*(this->partitionSpecs)).end(); ++_iter932) + std::map ::const_iterator _iter962; + for (_iter962 = (*(this->partitionSpecs)).begin(); _iter962 != (*(this->partitionSpecs)).end(); ++_iter962) { - xfer += oprot->writeString(_iter932->first); - xfer += oprot->writeString(_iter932->second); + xfer += oprot->writeString(_iter962->first); + xfer += oprot->writeString(_iter962->second); } xfer += oprot->writeMapEnd(); } @@ -12000,17 +12000,17 @@ uint32_t ThriftHiveMetastore_exchange_partitions_args::read(::apache::thrift::pr if (ftype == ::apache::thrift::protocol::T_MAP) { { this->partitionSpecs.clear(); - uint32_t _size933; - ::apache::thrift::protocol::TType _ktype934; - ::apache::thrift::protocol::TType _vtype935; - xfer += iprot->readMapBegin(_ktype934, _vtype935, _size933); - uint32_t _i937; - for (_i937 = 0; _i937 < _size933; ++_i937) + uint32_t _size963; + ::apache::thrift::protocol::TType _ktype964; + ::apache::thrift::protocol::TType _vtype965; + xfer += iprot->readMapBegin(_ktype964, _vtype965, _size963); + uint32_t _i967; + for (_i967 = 0; _i967 < _size963; ++_i967) { - std::string _key938; - xfer += iprot->readString(_key938); - std::string& _val939 = this->partitionSpecs[_key938]; - xfer += iprot->readString(_val939); + std::string _key968; + xfer += iprot->readString(_key968); + std::string& _val969 = this->partitionSpecs[_key968]; + xfer += iprot->readString(_val969); } xfer += iprot->readMapEnd(); } @@ -12071,11 +12071,11 @@ uint32_t ThriftHiveMetastore_exchange_partitions_args::write(::apache::thrift::p xfer += oprot->writeFieldBegin("partitionSpecs", ::apache::thrift::protocol::T_MAP, 1); { xfer += oprot->writeMapBegin(::apache::thrift::protocol::T_STRING, ::apache::thrift::protocol::T_STRING, static_cast(this->partitionSpecs.size())); - std::map ::const_iterator _iter940; - for (_iter940 = this->partitionSpecs.begin(); _iter940 != this->partitionSpecs.end(); ++_iter940) + std::map ::const_iterator _iter970; + for (_iter970 = this->partitionSpecs.begin(); _iter970 != this->partitionSpecs.end(); ++_iter970) { - xfer += oprot->writeString(_iter940->first); - xfer += oprot->writeString(_iter940->second); + xfer += oprot->writeString(_iter970->first); + xfer += oprot->writeString(_iter970->second); } xfer += oprot->writeMapEnd(); } @@ -12115,11 +12115,11 @@ uint32_t ThriftHiveMetastore_exchange_partitions_pargs::write(::apache::thrift:: xfer += oprot->writeFieldBegin("partitionSpecs", ::apache::thrift::protocol::T_MAP, 1); { xfer += oprot->writeMapBegin(::apache::thrift::protocol::T_STRING, ::apache::thrift::protocol::T_STRING, static_cast((*(this->partitionSpecs)).size())); - std::map ::const_iterator _iter941; - for (_iter941 = (*(this->partitionSpecs)).begin(); _iter941 != (*(this->partitionSpecs)).end(); ++_iter941) + std::map ::const_iterator _iter971; + for (_iter971 = (*(this->partitionSpecs)).begin(); _iter971 != (*(this->partitionSpecs)).end(); ++_iter971) { - xfer += oprot->writeString(_iter941->first); - xfer += oprot->writeString(_iter941->second); + xfer += oprot->writeString(_iter971->first); + xfer += oprot->writeString(_iter971->second); } xfer += oprot->writeMapEnd(); } @@ -12176,14 +12176,14 @@ uint32_t ThriftHiveMetastore_exchange_partitions_result::read(::apache::thrift:: if (ftype == ::apache::thrift::protocol::T_LIST) { { this->success.clear(); - uint32_t _size942; - ::apache::thrift::protocol::TType _etype945; - xfer += iprot->readListBegin(_etype945, _size942); - this->success.resize(_size942); - uint32_t _i946; - for (_i946 = 0; _i946 < _size942; ++_i946) + uint32_t _size972; + ::apache::thrift::protocol::TType _etype975; + xfer += iprot->readListBegin(_etype975, _size972); + this->success.resize(_size972); + uint32_t _i976; + for (_i976 = 0; _i976 < _size972; ++_i976) { - xfer += this->success[_i946].read(iprot); + xfer += this->success[_i976].read(iprot); } xfer += iprot->readListEnd(); } @@ -12246,10 +12246,10 @@ uint32_t ThriftHiveMetastore_exchange_partitions_result::write(::apache::thrift: xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->success.size())); - std::vector ::const_iterator _iter947; - for (_iter947 = this->success.begin(); _iter947 != this->success.end(); ++_iter947) + std::vector ::const_iterator _iter977; + for (_iter977 = this->success.begin(); _iter977 != this->success.end(); ++_iter977) { - xfer += (*_iter947).write(oprot); + xfer += (*_iter977).write(oprot); } xfer += oprot->writeListEnd(); } @@ -12306,14 +12306,14 @@ uint32_t ThriftHiveMetastore_exchange_partitions_presult::read(::apache::thrift: if (ftype == ::apache::thrift::protocol::T_LIST) { { (*(this->success)).clear(); - uint32_t _size948; - ::apache::thrift::protocol::TType _etype951; - xfer += iprot->readListBegin(_etype951, _size948); - (*(this->success)).resize(_size948); - uint32_t _i952; - for (_i952 = 0; _i952 < _size948; ++_i952) + uint32_t _size978; + ::apache::thrift::protocol::TType _etype981; + xfer += iprot->readListBegin(_etype981, _size978); + (*(this->success)).resize(_size978); + uint32_t _i982; + for (_i982 = 0; _i982 < _size978; ++_i982) { - xfer += (*(this->success))[_i952].read(iprot); + xfer += (*(this->success))[_i982].read(iprot); } xfer += iprot->readListEnd(); } @@ -12412,14 +12412,14 @@ uint32_t ThriftHiveMetastore_get_partition_with_auth_args::read(::apache::thrift if (ftype == ::apache::thrift::protocol::T_LIST) { { this->part_vals.clear(); - uint32_t _size953; - ::apache::thrift::protocol::TType _etype956; - xfer += iprot->readListBegin(_etype956, _size953); - this->part_vals.resize(_size953); - uint32_t _i957; - for (_i957 = 0; _i957 < _size953; ++_i957) + uint32_t _size983; + ::apache::thrift::protocol::TType _etype986; + xfer += iprot->readListBegin(_etype986, _size983); + this->part_vals.resize(_size983); + uint32_t _i987; + for (_i987 = 0; _i987 < _size983; ++_i987) { - xfer += iprot->readString(this->part_vals[_i957]); + xfer += iprot->readString(this->part_vals[_i987]); } xfer += iprot->readListEnd(); } @@ -12440,14 +12440,14 @@ uint32_t ThriftHiveMetastore_get_partition_with_auth_args::read(::apache::thrift if (ftype == ::apache::thrift::protocol::T_LIST) { { this->group_names.clear(); - uint32_t _size958; - ::apache::thrift::protocol::TType _etype961; - xfer += iprot->readListBegin(_etype961, _size958); - this->group_names.resize(_size958); - uint32_t _i962; - for (_i962 = 0; _i962 < _size958; ++_i962) + uint32_t _size988; + ::apache::thrift::protocol::TType _etype991; + xfer += iprot->readListBegin(_etype991, _size988); + this->group_names.resize(_size988); + uint32_t _i992; + for (_i992 = 0; _i992 < _size988; ++_i992) { - xfer += iprot->readString(this->group_names[_i962]); + xfer += iprot->readString(this->group_names[_i992]); } xfer += iprot->readListEnd(); } @@ -12484,10 +12484,10 @@ uint32_t ThriftHiveMetastore_get_partition_with_auth_args::write(::apache::thrif xfer += oprot->writeFieldBegin("part_vals", ::apache::thrift::protocol::T_LIST, 3); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->part_vals.size())); - std::vector ::const_iterator _iter963; - for (_iter963 = this->part_vals.begin(); _iter963 != this->part_vals.end(); ++_iter963) + std::vector ::const_iterator _iter993; + for (_iter993 = this->part_vals.begin(); _iter993 != this->part_vals.end(); ++_iter993) { - xfer += oprot->writeString((*_iter963)); + xfer += oprot->writeString((*_iter993)); } xfer += oprot->writeListEnd(); } @@ -12500,10 +12500,10 @@ uint32_t ThriftHiveMetastore_get_partition_with_auth_args::write(::apache::thrif xfer += oprot->writeFieldBegin("group_names", ::apache::thrift::protocol::T_LIST, 5); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->group_names.size())); - std::vector ::const_iterator _iter964; - for (_iter964 = this->group_names.begin(); _iter964 != this->group_names.end(); ++_iter964) + std::vector ::const_iterator _iter994; + for (_iter994 = this->group_names.begin(); _iter994 != this->group_names.end(); ++_iter994) { - xfer += oprot->writeString((*_iter964)); + xfer += oprot->writeString((*_iter994)); } xfer += oprot->writeListEnd(); } @@ -12535,10 +12535,10 @@ uint32_t ThriftHiveMetastore_get_partition_with_auth_pargs::write(::apache::thri xfer += oprot->writeFieldBegin("part_vals", ::apache::thrift::protocol::T_LIST, 3); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast((*(this->part_vals)).size())); - std::vector ::const_iterator _iter965; - for (_iter965 = (*(this->part_vals)).begin(); _iter965 != (*(this->part_vals)).end(); ++_iter965) + std::vector ::const_iterator _iter995; + for (_iter995 = (*(this->part_vals)).begin(); _iter995 != (*(this->part_vals)).end(); ++_iter995) { - xfer += oprot->writeString((*_iter965)); + xfer += oprot->writeString((*_iter995)); } xfer += oprot->writeListEnd(); } @@ -12551,10 +12551,10 @@ uint32_t ThriftHiveMetastore_get_partition_with_auth_pargs::write(::apache::thri xfer += oprot->writeFieldBegin("group_names", ::apache::thrift::protocol::T_LIST, 5); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast((*(this->group_names)).size())); - std::vector ::const_iterator _iter966; - for (_iter966 = (*(this->group_names)).begin(); _iter966 != (*(this->group_names)).end(); ++_iter966) + std::vector ::const_iterator _iter996; + for (_iter996 = (*(this->group_names)).begin(); _iter996 != (*(this->group_names)).end(); ++_iter996) { - xfer += oprot->writeString((*_iter966)); + xfer += oprot->writeString((*_iter996)); } xfer += oprot->writeListEnd(); } @@ -13113,14 +13113,14 @@ uint32_t ThriftHiveMetastore_get_partitions_result::read(::apache::thrift::proto if (ftype == ::apache::thrift::protocol::T_LIST) { { this->success.clear(); - uint32_t _size967; - ::apache::thrift::protocol::TType _etype970; - xfer += iprot->readListBegin(_etype970, _size967); - this->success.resize(_size967); - uint32_t _i971; - for (_i971 = 0; _i971 < _size967; ++_i971) + uint32_t _size997; + ::apache::thrift::protocol::TType _etype1000; + xfer += iprot->readListBegin(_etype1000, _size997); + this->success.resize(_size997); + uint32_t _i1001; + for (_i1001 = 0; _i1001 < _size997; ++_i1001) { - xfer += this->success[_i971].read(iprot); + xfer += this->success[_i1001].read(iprot); } xfer += iprot->readListEnd(); } @@ -13167,10 +13167,10 @@ uint32_t ThriftHiveMetastore_get_partitions_result::write(::apache::thrift::prot xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->success.size())); - std::vector ::const_iterator _iter972; - for (_iter972 = this->success.begin(); _iter972 != this->success.end(); ++_iter972) + std::vector ::const_iterator _iter1002; + for (_iter1002 = this->success.begin(); _iter1002 != this->success.end(); ++_iter1002) { - xfer += (*_iter972).write(oprot); + xfer += (*_iter1002).write(oprot); } xfer += oprot->writeListEnd(); } @@ -13219,14 +13219,14 @@ uint32_t ThriftHiveMetastore_get_partitions_presult::read(::apache::thrift::prot if (ftype == ::apache::thrift::protocol::T_LIST) { { (*(this->success)).clear(); - uint32_t _size973; - ::apache::thrift::protocol::TType _etype976; - xfer += iprot->readListBegin(_etype976, _size973); - (*(this->success)).resize(_size973); - uint32_t _i977; - for (_i977 = 0; _i977 < _size973; ++_i977) + uint32_t _size1003; + ::apache::thrift::protocol::TType _etype1006; + xfer += iprot->readListBegin(_etype1006, _size1003); + (*(this->success)).resize(_size1003); + uint32_t _i1007; + for (_i1007 = 0; _i1007 < _size1003; ++_i1007) { - xfer += (*(this->success))[_i977].read(iprot); + xfer += (*(this->success))[_i1007].read(iprot); } xfer += iprot->readListEnd(); } @@ -13325,14 +13325,14 @@ uint32_t ThriftHiveMetastore_get_partitions_with_auth_args::read(::apache::thrif if (ftype == ::apache::thrift::protocol::T_LIST) { { this->group_names.clear(); - uint32_t _size978; - ::apache::thrift::protocol::TType _etype981; - xfer += iprot->readListBegin(_etype981, _size978); - this->group_names.resize(_size978); - uint32_t _i982; - for (_i982 = 0; _i982 < _size978; ++_i982) + uint32_t _size1008; + ::apache::thrift::protocol::TType _etype1011; + xfer += iprot->readListBegin(_etype1011, _size1008); + this->group_names.resize(_size1008); + uint32_t _i1012; + for (_i1012 = 0; _i1012 < _size1008; ++_i1012) { - xfer += iprot->readString(this->group_names[_i982]); + xfer += iprot->readString(this->group_names[_i1012]); } xfer += iprot->readListEnd(); } @@ -13377,10 +13377,10 @@ uint32_t ThriftHiveMetastore_get_partitions_with_auth_args::write(::apache::thri xfer += oprot->writeFieldBegin("group_names", ::apache::thrift::protocol::T_LIST, 5); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->group_names.size())); - std::vector ::const_iterator _iter983; - for (_iter983 = this->group_names.begin(); _iter983 != this->group_names.end(); ++_iter983) + std::vector ::const_iterator _iter1013; + for (_iter1013 = this->group_names.begin(); _iter1013 != this->group_names.end(); ++_iter1013) { - xfer += oprot->writeString((*_iter983)); + xfer += oprot->writeString((*_iter1013)); } xfer += oprot->writeListEnd(); } @@ -13420,10 +13420,10 @@ uint32_t ThriftHiveMetastore_get_partitions_with_auth_pargs::write(::apache::thr xfer += oprot->writeFieldBegin("group_names", ::apache::thrift::protocol::T_LIST, 5); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast((*(this->group_names)).size())); - std::vector ::const_iterator _iter984; - for (_iter984 = (*(this->group_names)).begin(); _iter984 != (*(this->group_names)).end(); ++_iter984) + std::vector ::const_iterator _iter1014; + for (_iter1014 = (*(this->group_names)).begin(); _iter1014 != (*(this->group_names)).end(); ++_iter1014) { - xfer += oprot->writeString((*_iter984)); + xfer += oprot->writeString((*_iter1014)); } xfer += oprot->writeListEnd(); } @@ -13464,14 +13464,14 @@ uint32_t ThriftHiveMetastore_get_partitions_with_auth_result::read(::apache::thr if (ftype == ::apache::thrift::protocol::T_LIST) { { this->success.clear(); - uint32_t _size985; - ::apache::thrift::protocol::TType _etype988; - xfer += iprot->readListBegin(_etype988, _size985); - this->success.resize(_size985); - uint32_t _i989; - for (_i989 = 0; _i989 < _size985; ++_i989) + uint32_t _size1015; + ::apache::thrift::protocol::TType _etype1018; + xfer += iprot->readListBegin(_etype1018, _size1015); + this->success.resize(_size1015); + uint32_t _i1019; + for (_i1019 = 0; _i1019 < _size1015; ++_i1019) { - xfer += this->success[_i989].read(iprot); + xfer += this->success[_i1019].read(iprot); } xfer += iprot->readListEnd(); } @@ -13518,10 +13518,10 @@ uint32_t ThriftHiveMetastore_get_partitions_with_auth_result::write(::apache::th xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->success.size())); - std::vector ::const_iterator _iter990; - for (_iter990 = this->success.begin(); _iter990 != this->success.end(); ++_iter990) + std::vector ::const_iterator _iter1020; + for (_iter1020 = this->success.begin(); _iter1020 != this->success.end(); ++_iter1020) { - xfer += (*_iter990).write(oprot); + xfer += (*_iter1020).write(oprot); } xfer += oprot->writeListEnd(); } @@ -13570,14 +13570,14 @@ uint32_t ThriftHiveMetastore_get_partitions_with_auth_presult::read(::apache::th if (ftype == ::apache::thrift::protocol::T_LIST) { { (*(this->success)).clear(); - uint32_t _size991; - ::apache::thrift::protocol::TType _etype994; - xfer += iprot->readListBegin(_etype994, _size991); - (*(this->success)).resize(_size991); - uint32_t _i995; - for (_i995 = 0; _i995 < _size991; ++_i995) + uint32_t _size1021; + ::apache::thrift::protocol::TType _etype1024; + xfer += iprot->readListBegin(_etype1024, _size1021); + (*(this->success)).resize(_size1021); + uint32_t _i1025; + for (_i1025 = 0; _i1025 < _size1021; ++_i1025) { - xfer += (*(this->success))[_i995].read(iprot); + xfer += (*(this->success))[_i1025].read(iprot); } xfer += iprot->readListEnd(); } @@ -13755,14 +13755,14 @@ uint32_t ThriftHiveMetastore_get_partitions_pspec_result::read(::apache::thrift: if (ftype == ::apache::thrift::protocol::T_LIST) { { this->success.clear(); - uint32_t _size996; - ::apache::thrift::protocol::TType _etype999; - xfer += iprot->readListBegin(_etype999, _size996); - this->success.resize(_size996); - uint32_t _i1000; - for (_i1000 = 0; _i1000 < _size996; ++_i1000) + uint32_t _size1026; + ::apache::thrift::protocol::TType _etype1029; + xfer += iprot->readListBegin(_etype1029, _size1026); + this->success.resize(_size1026); + uint32_t _i1030; + for (_i1030 = 0; _i1030 < _size1026; ++_i1030) { - xfer += this->success[_i1000].read(iprot); + xfer += this->success[_i1030].read(iprot); } xfer += iprot->readListEnd(); } @@ -13809,10 +13809,10 @@ uint32_t ThriftHiveMetastore_get_partitions_pspec_result::write(::apache::thrift xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->success.size())); - std::vector ::const_iterator _iter1001; - for (_iter1001 = this->success.begin(); _iter1001 != this->success.end(); ++_iter1001) + std::vector ::const_iterator _iter1031; + for (_iter1031 = this->success.begin(); _iter1031 != this->success.end(); ++_iter1031) { - xfer += (*_iter1001).write(oprot); + xfer += (*_iter1031).write(oprot); } xfer += oprot->writeListEnd(); } @@ -13861,14 +13861,14 @@ uint32_t ThriftHiveMetastore_get_partitions_pspec_presult::read(::apache::thrift if (ftype == ::apache::thrift::protocol::T_LIST) { { (*(this->success)).clear(); - uint32_t _size1002; - ::apache::thrift::protocol::TType _etype1005; - xfer += iprot->readListBegin(_etype1005, _size1002); - (*(this->success)).resize(_size1002); - uint32_t _i1006; - for (_i1006 = 0; _i1006 < _size1002; ++_i1006) + uint32_t _size1032; + ::apache::thrift::protocol::TType _etype1035; + xfer += iprot->readListBegin(_etype1035, _size1032); + (*(this->success)).resize(_size1032); + uint32_t _i1036; + for (_i1036 = 0; _i1036 < _size1032; ++_i1036) { - xfer += (*(this->success))[_i1006].read(iprot); + xfer += (*(this->success))[_i1036].read(iprot); } xfer += iprot->readListEnd(); } @@ -14046,14 +14046,14 @@ uint32_t ThriftHiveMetastore_get_partition_names_result::read(::apache::thrift:: if (ftype == ::apache::thrift::protocol::T_LIST) { { this->success.clear(); - uint32_t _size1007; - ::apache::thrift::protocol::TType _etype1010; - xfer += iprot->readListBegin(_etype1010, _size1007); - this->success.resize(_size1007); - uint32_t _i1011; - for (_i1011 = 0; _i1011 < _size1007; ++_i1011) + uint32_t _size1037; + ::apache::thrift::protocol::TType _etype1040; + xfer += iprot->readListBegin(_etype1040, _size1037); + this->success.resize(_size1037); + uint32_t _i1041; + for (_i1041 = 0; _i1041 < _size1037; ++_i1041) { - xfer += iprot->readString(this->success[_i1011]); + xfer += iprot->readString(this->success[_i1041]); } xfer += iprot->readListEnd(); } @@ -14092,10 +14092,10 @@ uint32_t ThriftHiveMetastore_get_partition_names_result::write(::apache::thrift: xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->success.size())); - std::vector ::const_iterator _iter1012; - for (_iter1012 = this->success.begin(); _iter1012 != this->success.end(); ++_iter1012) + std::vector ::const_iterator _iter1042; + for (_iter1042 = this->success.begin(); _iter1042 != this->success.end(); ++_iter1042) { - xfer += oprot->writeString((*_iter1012)); + xfer += oprot->writeString((*_iter1042)); } xfer += oprot->writeListEnd(); } @@ -14140,14 +14140,14 @@ uint32_t ThriftHiveMetastore_get_partition_names_presult::read(::apache::thrift: if (ftype == ::apache::thrift::protocol::T_LIST) { { (*(this->success)).clear(); - uint32_t _size1013; - ::apache::thrift::protocol::TType _etype1016; - xfer += iprot->readListBegin(_etype1016, _size1013); - (*(this->success)).resize(_size1013); - uint32_t _i1017; - for (_i1017 = 0; _i1017 < _size1013; ++_i1017) + uint32_t _size1043; + ::apache::thrift::protocol::TType _etype1046; + xfer += iprot->readListBegin(_etype1046, _size1043); + (*(this->success)).resize(_size1043); + uint32_t _i1047; + for (_i1047 = 0; _i1047 < _size1043; ++_i1047) { - xfer += iprot->readString((*(this->success))[_i1017]); + xfer += iprot->readString((*(this->success))[_i1047]); } xfer += iprot->readListEnd(); } @@ -14222,14 +14222,14 @@ uint32_t ThriftHiveMetastore_get_partitions_ps_args::read(::apache::thrift::prot if (ftype == ::apache::thrift::protocol::T_LIST) { { this->part_vals.clear(); - uint32_t _size1018; - ::apache::thrift::protocol::TType _etype1021; - xfer += iprot->readListBegin(_etype1021, _size1018); - this->part_vals.resize(_size1018); - uint32_t _i1022; - for (_i1022 = 0; _i1022 < _size1018; ++_i1022) + uint32_t _size1048; + ::apache::thrift::protocol::TType _etype1051; + xfer += iprot->readListBegin(_etype1051, _size1048); + this->part_vals.resize(_size1048); + uint32_t _i1052; + for (_i1052 = 0; _i1052 < _size1048; ++_i1052) { - xfer += iprot->readString(this->part_vals[_i1022]); + xfer += iprot->readString(this->part_vals[_i1052]); } xfer += iprot->readListEnd(); } @@ -14274,10 +14274,10 @@ uint32_t ThriftHiveMetastore_get_partitions_ps_args::write(::apache::thrift::pro xfer += oprot->writeFieldBegin("part_vals", ::apache::thrift::protocol::T_LIST, 3); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->part_vals.size())); - std::vector ::const_iterator _iter1023; - for (_iter1023 = this->part_vals.begin(); _iter1023 != this->part_vals.end(); ++_iter1023) + std::vector ::const_iterator _iter1053; + for (_iter1053 = this->part_vals.begin(); _iter1053 != this->part_vals.end(); ++_iter1053) { - xfer += oprot->writeString((*_iter1023)); + xfer += oprot->writeString((*_iter1053)); } xfer += oprot->writeListEnd(); } @@ -14313,10 +14313,10 @@ uint32_t ThriftHiveMetastore_get_partitions_ps_pargs::write(::apache::thrift::pr xfer += oprot->writeFieldBegin("part_vals", ::apache::thrift::protocol::T_LIST, 3); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast((*(this->part_vals)).size())); - std::vector ::const_iterator _iter1024; - for (_iter1024 = (*(this->part_vals)).begin(); _iter1024 != (*(this->part_vals)).end(); ++_iter1024) + std::vector ::const_iterator _iter1054; + for (_iter1054 = (*(this->part_vals)).begin(); _iter1054 != (*(this->part_vals)).end(); ++_iter1054) { - xfer += oprot->writeString((*_iter1024)); + xfer += oprot->writeString((*_iter1054)); } xfer += oprot->writeListEnd(); } @@ -14361,14 +14361,14 @@ uint32_t ThriftHiveMetastore_get_partitions_ps_result::read(::apache::thrift::pr if (ftype == ::apache::thrift::protocol::T_LIST) { { this->success.clear(); - uint32_t _size1025; - ::apache::thrift::protocol::TType _etype1028; - xfer += iprot->readListBegin(_etype1028, _size1025); - this->success.resize(_size1025); - uint32_t _i1029; - for (_i1029 = 0; _i1029 < _size1025; ++_i1029) + uint32_t _size1055; + ::apache::thrift::protocol::TType _etype1058; + xfer += iprot->readListBegin(_etype1058, _size1055); + this->success.resize(_size1055); + uint32_t _i1059; + for (_i1059 = 0; _i1059 < _size1055; ++_i1059) { - xfer += this->success[_i1029].read(iprot); + xfer += this->success[_i1059].read(iprot); } xfer += iprot->readListEnd(); } @@ -14415,10 +14415,10 @@ uint32_t ThriftHiveMetastore_get_partitions_ps_result::write(::apache::thrift::p xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->success.size())); - std::vector ::const_iterator _iter1030; - for (_iter1030 = this->success.begin(); _iter1030 != this->success.end(); ++_iter1030) + std::vector ::const_iterator _iter1060; + for (_iter1060 = this->success.begin(); _iter1060 != this->success.end(); ++_iter1060) { - xfer += (*_iter1030).write(oprot); + xfer += (*_iter1060).write(oprot); } xfer += oprot->writeListEnd(); } @@ -14467,14 +14467,14 @@ uint32_t ThriftHiveMetastore_get_partitions_ps_presult::read(::apache::thrift::p if (ftype == ::apache::thrift::protocol::T_LIST) { { (*(this->success)).clear(); - uint32_t _size1031; - ::apache::thrift::protocol::TType _etype1034; - xfer += iprot->readListBegin(_etype1034, _size1031); - (*(this->success)).resize(_size1031); - uint32_t _i1035; - for (_i1035 = 0; _i1035 < _size1031; ++_i1035) + uint32_t _size1061; + ::apache::thrift::protocol::TType _etype1064; + xfer += iprot->readListBegin(_etype1064, _size1061); + (*(this->success)).resize(_size1061); + uint32_t _i1065; + for (_i1065 = 0; _i1065 < _size1061; ++_i1065) { - xfer += (*(this->success))[_i1035].read(iprot); + xfer += (*(this->success))[_i1065].read(iprot); } xfer += iprot->readListEnd(); } @@ -14557,14 +14557,14 @@ uint32_t ThriftHiveMetastore_get_partitions_ps_with_auth_args::read(::apache::th if (ftype == ::apache::thrift::protocol::T_LIST) { { this->part_vals.clear(); - uint32_t _size1036; - ::apache::thrift::protocol::TType _etype1039; - xfer += iprot->readListBegin(_etype1039, _size1036); - this->part_vals.resize(_size1036); - uint32_t _i1040; - for (_i1040 = 0; _i1040 < _size1036; ++_i1040) + uint32_t _size1066; + ::apache::thrift::protocol::TType _etype1069; + xfer += iprot->readListBegin(_etype1069, _size1066); + this->part_vals.resize(_size1066); + uint32_t _i1070; + for (_i1070 = 0; _i1070 < _size1066; ++_i1070) { - xfer += iprot->readString(this->part_vals[_i1040]); + xfer += iprot->readString(this->part_vals[_i1070]); } xfer += iprot->readListEnd(); } @@ -14593,14 +14593,14 @@ uint32_t ThriftHiveMetastore_get_partitions_ps_with_auth_args::read(::apache::th if (ftype == ::apache::thrift::protocol::T_LIST) { { this->group_names.clear(); - uint32_t _size1041; - ::apache::thrift::protocol::TType _etype1044; - xfer += iprot->readListBegin(_etype1044, _size1041); - this->group_names.resize(_size1041); - uint32_t _i1045; - for (_i1045 = 0; _i1045 < _size1041; ++_i1045) + uint32_t _size1071; + ::apache::thrift::protocol::TType _etype1074; + xfer += iprot->readListBegin(_etype1074, _size1071); + this->group_names.resize(_size1071); + uint32_t _i1075; + for (_i1075 = 0; _i1075 < _size1071; ++_i1075) { - xfer += iprot->readString(this->group_names[_i1045]); + xfer += iprot->readString(this->group_names[_i1075]); } xfer += iprot->readListEnd(); } @@ -14637,10 +14637,10 @@ uint32_t ThriftHiveMetastore_get_partitions_ps_with_auth_args::write(::apache::t xfer += oprot->writeFieldBegin("part_vals", ::apache::thrift::protocol::T_LIST, 3); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->part_vals.size())); - std::vector ::const_iterator _iter1046; - for (_iter1046 = this->part_vals.begin(); _iter1046 != this->part_vals.end(); ++_iter1046) + std::vector ::const_iterator _iter1076; + for (_iter1076 = this->part_vals.begin(); _iter1076 != this->part_vals.end(); ++_iter1076) { - xfer += oprot->writeString((*_iter1046)); + xfer += oprot->writeString((*_iter1076)); } xfer += oprot->writeListEnd(); } @@ -14657,10 +14657,10 @@ uint32_t ThriftHiveMetastore_get_partitions_ps_with_auth_args::write(::apache::t xfer += oprot->writeFieldBegin("group_names", ::apache::thrift::protocol::T_LIST, 6); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->group_names.size())); - std::vector ::const_iterator _iter1047; - for (_iter1047 = this->group_names.begin(); _iter1047 != this->group_names.end(); ++_iter1047) + std::vector ::const_iterator _iter1077; + for (_iter1077 = this->group_names.begin(); _iter1077 != this->group_names.end(); ++_iter1077) { - xfer += oprot->writeString((*_iter1047)); + xfer += oprot->writeString((*_iter1077)); } xfer += oprot->writeListEnd(); } @@ -14692,10 +14692,10 @@ uint32_t ThriftHiveMetastore_get_partitions_ps_with_auth_pargs::write(::apache:: xfer += oprot->writeFieldBegin("part_vals", ::apache::thrift::protocol::T_LIST, 3); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast((*(this->part_vals)).size())); - std::vector ::const_iterator _iter1048; - for (_iter1048 = (*(this->part_vals)).begin(); _iter1048 != (*(this->part_vals)).end(); ++_iter1048) + std::vector ::const_iterator _iter1078; + for (_iter1078 = (*(this->part_vals)).begin(); _iter1078 != (*(this->part_vals)).end(); ++_iter1078) { - xfer += oprot->writeString((*_iter1048)); + xfer += oprot->writeString((*_iter1078)); } xfer += oprot->writeListEnd(); } @@ -14712,10 +14712,10 @@ uint32_t ThriftHiveMetastore_get_partitions_ps_with_auth_pargs::write(::apache:: xfer += oprot->writeFieldBegin("group_names", ::apache::thrift::protocol::T_LIST, 6); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast((*(this->group_names)).size())); - std::vector ::const_iterator _iter1049; - for (_iter1049 = (*(this->group_names)).begin(); _iter1049 != (*(this->group_names)).end(); ++_iter1049) + std::vector ::const_iterator _iter1079; + for (_iter1079 = (*(this->group_names)).begin(); _iter1079 != (*(this->group_names)).end(); ++_iter1079) { - xfer += oprot->writeString((*_iter1049)); + xfer += oprot->writeString((*_iter1079)); } xfer += oprot->writeListEnd(); } @@ -14756,14 +14756,14 @@ uint32_t ThriftHiveMetastore_get_partitions_ps_with_auth_result::read(::apache:: if (ftype == ::apache::thrift::protocol::T_LIST) { { this->success.clear(); - uint32_t _size1050; - ::apache::thrift::protocol::TType _etype1053; - xfer += iprot->readListBegin(_etype1053, _size1050); - this->success.resize(_size1050); - uint32_t _i1054; - for (_i1054 = 0; _i1054 < _size1050; ++_i1054) + uint32_t _size1080; + ::apache::thrift::protocol::TType _etype1083; + xfer += iprot->readListBegin(_etype1083, _size1080); + this->success.resize(_size1080); + uint32_t _i1084; + for (_i1084 = 0; _i1084 < _size1080; ++_i1084) { - xfer += this->success[_i1054].read(iprot); + xfer += this->success[_i1084].read(iprot); } xfer += iprot->readListEnd(); } @@ -14810,10 +14810,10 @@ uint32_t ThriftHiveMetastore_get_partitions_ps_with_auth_result::write(::apache: xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->success.size())); - std::vector ::const_iterator _iter1055; - for (_iter1055 = this->success.begin(); _iter1055 != this->success.end(); ++_iter1055) + std::vector ::const_iterator _iter1085; + for (_iter1085 = this->success.begin(); _iter1085 != this->success.end(); ++_iter1085) { - xfer += (*_iter1055).write(oprot); + xfer += (*_iter1085).write(oprot); } xfer += oprot->writeListEnd(); } @@ -14862,14 +14862,14 @@ uint32_t ThriftHiveMetastore_get_partitions_ps_with_auth_presult::read(::apache: if (ftype == ::apache::thrift::protocol::T_LIST) { { (*(this->success)).clear(); - uint32_t _size1056; - ::apache::thrift::protocol::TType _etype1059; - xfer += iprot->readListBegin(_etype1059, _size1056); - (*(this->success)).resize(_size1056); - uint32_t _i1060; - for (_i1060 = 0; _i1060 < _size1056; ++_i1060) + uint32_t _size1086; + ::apache::thrift::protocol::TType _etype1089; + xfer += iprot->readListBegin(_etype1089, _size1086); + (*(this->success)).resize(_size1086); + uint32_t _i1090; + for (_i1090 = 0; _i1090 < _size1086; ++_i1090) { - xfer += (*(this->success))[_i1060].read(iprot); + xfer += (*(this->success))[_i1090].read(iprot); } xfer += iprot->readListEnd(); } @@ -14952,14 +14952,14 @@ uint32_t ThriftHiveMetastore_get_partition_names_ps_args::read(::apache::thrift: if (ftype == ::apache::thrift::protocol::T_LIST) { { this->part_vals.clear(); - uint32_t _size1061; - ::apache::thrift::protocol::TType _etype1064; - xfer += iprot->readListBegin(_etype1064, _size1061); - this->part_vals.resize(_size1061); - uint32_t _i1065; - for (_i1065 = 0; _i1065 < _size1061; ++_i1065) + uint32_t _size1091; + ::apache::thrift::protocol::TType _etype1094; + xfer += iprot->readListBegin(_etype1094, _size1091); + this->part_vals.resize(_size1091); + uint32_t _i1095; + for (_i1095 = 0; _i1095 < _size1091; ++_i1095) { - xfer += iprot->readString(this->part_vals[_i1065]); + xfer += iprot->readString(this->part_vals[_i1095]); } xfer += iprot->readListEnd(); } @@ -15004,10 +15004,10 @@ uint32_t ThriftHiveMetastore_get_partition_names_ps_args::write(::apache::thrift xfer += oprot->writeFieldBegin("part_vals", ::apache::thrift::protocol::T_LIST, 3); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->part_vals.size())); - std::vector ::const_iterator _iter1066; - for (_iter1066 = this->part_vals.begin(); _iter1066 != this->part_vals.end(); ++_iter1066) + std::vector ::const_iterator _iter1096; + for (_iter1096 = this->part_vals.begin(); _iter1096 != this->part_vals.end(); ++_iter1096) { - xfer += oprot->writeString((*_iter1066)); + xfer += oprot->writeString((*_iter1096)); } xfer += oprot->writeListEnd(); } @@ -15043,10 +15043,10 @@ uint32_t ThriftHiveMetastore_get_partition_names_ps_pargs::write(::apache::thrif xfer += oprot->writeFieldBegin("part_vals", ::apache::thrift::protocol::T_LIST, 3); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast((*(this->part_vals)).size())); - std::vector ::const_iterator _iter1067; - for (_iter1067 = (*(this->part_vals)).begin(); _iter1067 != (*(this->part_vals)).end(); ++_iter1067) + std::vector ::const_iterator _iter1097; + for (_iter1097 = (*(this->part_vals)).begin(); _iter1097 != (*(this->part_vals)).end(); ++_iter1097) { - xfer += oprot->writeString((*_iter1067)); + xfer += oprot->writeString((*_iter1097)); } xfer += oprot->writeListEnd(); } @@ -15091,14 +15091,14 @@ uint32_t ThriftHiveMetastore_get_partition_names_ps_result::read(::apache::thrif if (ftype == ::apache::thrift::protocol::T_LIST) { { this->success.clear(); - uint32_t _size1068; - ::apache::thrift::protocol::TType _etype1071; - xfer += iprot->readListBegin(_etype1071, _size1068); - this->success.resize(_size1068); - uint32_t _i1072; - for (_i1072 = 0; _i1072 < _size1068; ++_i1072) + uint32_t _size1098; + ::apache::thrift::protocol::TType _etype1101; + xfer += iprot->readListBegin(_etype1101, _size1098); + this->success.resize(_size1098); + uint32_t _i1102; + for (_i1102 = 0; _i1102 < _size1098; ++_i1102) { - xfer += iprot->readString(this->success[_i1072]); + xfer += iprot->readString(this->success[_i1102]); } xfer += iprot->readListEnd(); } @@ -15145,10 +15145,10 @@ uint32_t ThriftHiveMetastore_get_partition_names_ps_result::write(::apache::thri xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->success.size())); - std::vector ::const_iterator _iter1073; - for (_iter1073 = this->success.begin(); _iter1073 != this->success.end(); ++_iter1073) + std::vector ::const_iterator _iter1103; + for (_iter1103 = this->success.begin(); _iter1103 != this->success.end(); ++_iter1103) { - xfer += oprot->writeString((*_iter1073)); + xfer += oprot->writeString((*_iter1103)); } xfer += oprot->writeListEnd(); } @@ -15197,14 +15197,14 @@ uint32_t ThriftHiveMetastore_get_partition_names_ps_presult::read(::apache::thri if (ftype == ::apache::thrift::protocol::T_LIST) { { (*(this->success)).clear(); - uint32_t _size1074; - ::apache::thrift::protocol::TType _etype1077; - xfer += iprot->readListBegin(_etype1077, _size1074); - (*(this->success)).resize(_size1074); - uint32_t _i1078; - for (_i1078 = 0; _i1078 < _size1074; ++_i1078) + uint32_t _size1104; + ::apache::thrift::protocol::TType _etype1107; + xfer += iprot->readListBegin(_etype1107, _size1104); + (*(this->success)).resize(_size1104); + uint32_t _i1108; + for (_i1108 = 0; _i1108 < _size1104; ++_i1108) { - xfer += iprot->readString((*(this->success))[_i1078]); + xfer += iprot->readString((*(this->success))[_i1108]); } xfer += iprot->readListEnd(); } @@ -15398,14 +15398,14 @@ uint32_t ThriftHiveMetastore_get_partitions_by_filter_result::read(::apache::thr if (ftype == ::apache::thrift::protocol::T_LIST) { { this->success.clear(); - uint32_t _size1079; - ::apache::thrift::protocol::TType _etype1082; - xfer += iprot->readListBegin(_etype1082, _size1079); - this->success.resize(_size1079); - uint32_t _i1083; - for (_i1083 = 0; _i1083 < _size1079; ++_i1083) + uint32_t _size1109; + ::apache::thrift::protocol::TType _etype1112; + xfer += iprot->readListBegin(_etype1112, _size1109); + this->success.resize(_size1109); + uint32_t _i1113; + for (_i1113 = 0; _i1113 < _size1109; ++_i1113) { - xfer += this->success[_i1083].read(iprot); + xfer += this->success[_i1113].read(iprot); } xfer += iprot->readListEnd(); } @@ -15452,10 +15452,10 @@ uint32_t ThriftHiveMetastore_get_partitions_by_filter_result::write(::apache::th xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->success.size())); - std::vector ::const_iterator _iter1084; - for (_iter1084 = this->success.begin(); _iter1084 != this->success.end(); ++_iter1084) + std::vector ::const_iterator _iter1114; + for (_iter1114 = this->success.begin(); _iter1114 != this->success.end(); ++_iter1114) { - xfer += (*_iter1084).write(oprot); + xfer += (*_iter1114).write(oprot); } xfer += oprot->writeListEnd(); } @@ -15504,14 +15504,14 @@ uint32_t ThriftHiveMetastore_get_partitions_by_filter_presult::read(::apache::th if (ftype == ::apache::thrift::protocol::T_LIST) { { (*(this->success)).clear(); - uint32_t _size1085; - ::apache::thrift::protocol::TType _etype1088; - xfer += iprot->readListBegin(_etype1088, _size1085); - (*(this->success)).resize(_size1085); - uint32_t _i1089; - for (_i1089 = 0; _i1089 < _size1085; ++_i1089) + uint32_t _size1115; + ::apache::thrift::protocol::TType _etype1118; + xfer += iprot->readListBegin(_etype1118, _size1115); + (*(this->success)).resize(_size1115); + uint32_t _i1119; + for (_i1119 = 0; _i1119 < _size1115; ++_i1119) { - xfer += (*(this->success))[_i1089].read(iprot); + xfer += (*(this->success))[_i1119].read(iprot); } xfer += iprot->readListEnd(); } @@ -15705,14 +15705,14 @@ uint32_t ThriftHiveMetastore_get_part_specs_by_filter_result::read(::apache::thr if (ftype == ::apache::thrift::protocol::T_LIST) { { this->success.clear(); - uint32_t _size1090; - ::apache::thrift::protocol::TType _etype1093; - xfer += iprot->readListBegin(_etype1093, _size1090); - this->success.resize(_size1090); - uint32_t _i1094; - for (_i1094 = 0; _i1094 < _size1090; ++_i1094) + uint32_t _size1120; + ::apache::thrift::protocol::TType _etype1123; + xfer += iprot->readListBegin(_etype1123, _size1120); + this->success.resize(_size1120); + uint32_t _i1124; + for (_i1124 = 0; _i1124 < _size1120; ++_i1124) { - xfer += this->success[_i1094].read(iprot); + xfer += this->success[_i1124].read(iprot); } xfer += iprot->readListEnd(); } @@ -15759,10 +15759,10 @@ uint32_t ThriftHiveMetastore_get_part_specs_by_filter_result::write(::apache::th xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->success.size())); - std::vector ::const_iterator _iter1095; - for (_iter1095 = this->success.begin(); _iter1095 != this->success.end(); ++_iter1095) + std::vector ::const_iterator _iter1125; + for (_iter1125 = this->success.begin(); _iter1125 != this->success.end(); ++_iter1125) { - xfer += (*_iter1095).write(oprot); + xfer += (*_iter1125).write(oprot); } xfer += oprot->writeListEnd(); } @@ -15811,14 +15811,14 @@ uint32_t ThriftHiveMetastore_get_part_specs_by_filter_presult::read(::apache::th if (ftype == ::apache::thrift::protocol::T_LIST) { { (*(this->success)).clear(); - uint32_t _size1096; - ::apache::thrift::protocol::TType _etype1099; - xfer += iprot->readListBegin(_etype1099, _size1096); - (*(this->success)).resize(_size1096); - uint32_t _i1100; - for (_i1100 = 0; _i1100 < _size1096; ++_i1100) + uint32_t _size1126; + ::apache::thrift::protocol::TType _etype1129; + xfer += iprot->readListBegin(_etype1129, _size1126); + (*(this->success)).resize(_size1126); + uint32_t _i1130; + for (_i1130 = 0; _i1130 < _size1126; ++_i1130) { - xfer += (*(this->success))[_i1100].read(iprot); + xfer += (*(this->success))[_i1130].read(iprot); } xfer += iprot->readListEnd(); } @@ -16387,14 +16387,14 @@ uint32_t ThriftHiveMetastore_get_partitions_by_names_args::read(::apache::thrift if (ftype == ::apache::thrift::protocol::T_LIST) { { this->names.clear(); - uint32_t _size1101; - ::apache::thrift::protocol::TType _etype1104; - xfer += iprot->readListBegin(_etype1104, _size1101); - this->names.resize(_size1101); - uint32_t _i1105; - for (_i1105 = 0; _i1105 < _size1101; ++_i1105) + uint32_t _size1131; + ::apache::thrift::protocol::TType _etype1134; + xfer += iprot->readListBegin(_etype1134, _size1131); + this->names.resize(_size1131); + uint32_t _i1135; + for (_i1135 = 0; _i1135 < _size1131; ++_i1135) { - xfer += iprot->readString(this->names[_i1105]); + xfer += iprot->readString(this->names[_i1135]); } xfer += iprot->readListEnd(); } @@ -16431,10 +16431,10 @@ uint32_t ThriftHiveMetastore_get_partitions_by_names_args::write(::apache::thrif xfer += oprot->writeFieldBegin("names", ::apache::thrift::protocol::T_LIST, 3); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->names.size())); - std::vector ::const_iterator _iter1106; - for (_iter1106 = this->names.begin(); _iter1106 != this->names.end(); ++_iter1106) + std::vector ::const_iterator _iter1136; + for (_iter1136 = this->names.begin(); _iter1136 != this->names.end(); ++_iter1136) { - xfer += oprot->writeString((*_iter1106)); + xfer += oprot->writeString((*_iter1136)); } xfer += oprot->writeListEnd(); } @@ -16466,10 +16466,10 @@ uint32_t ThriftHiveMetastore_get_partitions_by_names_pargs::write(::apache::thri xfer += oprot->writeFieldBegin("names", ::apache::thrift::protocol::T_LIST, 3); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast((*(this->names)).size())); - std::vector ::const_iterator _iter1107; - for (_iter1107 = (*(this->names)).begin(); _iter1107 != (*(this->names)).end(); ++_iter1107) + std::vector ::const_iterator _iter1137; + for (_iter1137 = (*(this->names)).begin(); _iter1137 != (*(this->names)).end(); ++_iter1137) { - xfer += oprot->writeString((*_iter1107)); + xfer += oprot->writeString((*_iter1137)); } xfer += oprot->writeListEnd(); } @@ -16510,14 +16510,14 @@ uint32_t ThriftHiveMetastore_get_partitions_by_names_result::read(::apache::thri if (ftype == ::apache::thrift::protocol::T_LIST) { { this->success.clear(); - uint32_t _size1108; - ::apache::thrift::protocol::TType _etype1111; - xfer += iprot->readListBegin(_etype1111, _size1108); - this->success.resize(_size1108); - uint32_t _i1112; - for (_i1112 = 0; _i1112 < _size1108; ++_i1112) + uint32_t _size1138; + ::apache::thrift::protocol::TType _etype1141; + xfer += iprot->readListBegin(_etype1141, _size1138); + this->success.resize(_size1138); + uint32_t _i1142; + for (_i1142 = 0; _i1142 < _size1138; ++_i1142) { - xfer += this->success[_i1112].read(iprot); + xfer += this->success[_i1142].read(iprot); } xfer += iprot->readListEnd(); } @@ -16564,10 +16564,10 @@ uint32_t ThriftHiveMetastore_get_partitions_by_names_result::write(::apache::thr xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->success.size())); - std::vector ::const_iterator _iter1113; - for (_iter1113 = this->success.begin(); _iter1113 != this->success.end(); ++_iter1113) + std::vector ::const_iterator _iter1143; + for (_iter1143 = this->success.begin(); _iter1143 != this->success.end(); ++_iter1143) { - xfer += (*_iter1113).write(oprot); + xfer += (*_iter1143).write(oprot); } xfer += oprot->writeListEnd(); } @@ -16616,14 +16616,14 @@ uint32_t ThriftHiveMetastore_get_partitions_by_names_presult::read(::apache::thr if (ftype == ::apache::thrift::protocol::T_LIST) { { (*(this->success)).clear(); - uint32_t _size1114; - ::apache::thrift::protocol::TType _etype1117; - xfer += iprot->readListBegin(_etype1117, _size1114); - (*(this->success)).resize(_size1114); - uint32_t _i1118; - for (_i1118 = 0; _i1118 < _size1114; ++_i1118) + uint32_t _size1144; + ::apache::thrift::protocol::TType _etype1147; + xfer += iprot->readListBegin(_etype1147, _size1144); + (*(this->success)).resize(_size1144); + uint32_t _i1148; + for (_i1148 = 0; _i1148 < _size1144; ++_i1148) { - xfer += (*(this->success))[_i1118].read(iprot); + xfer += (*(this->success))[_i1148].read(iprot); } xfer += iprot->readListEnd(); } @@ -16945,14 +16945,14 @@ uint32_t ThriftHiveMetastore_alter_partitions_args::read(::apache::thrift::proto if (ftype == ::apache::thrift::protocol::T_LIST) { { this->new_parts.clear(); - uint32_t _size1119; - ::apache::thrift::protocol::TType _etype1122; - xfer += iprot->readListBegin(_etype1122, _size1119); - this->new_parts.resize(_size1119); - uint32_t _i1123; - for (_i1123 = 0; _i1123 < _size1119; ++_i1123) + uint32_t _size1149; + ::apache::thrift::protocol::TType _etype1152; + xfer += iprot->readListBegin(_etype1152, _size1149); + this->new_parts.resize(_size1149); + uint32_t _i1153; + for (_i1153 = 0; _i1153 < _size1149; ++_i1153) { - xfer += this->new_parts[_i1123].read(iprot); + xfer += this->new_parts[_i1153].read(iprot); } xfer += iprot->readListEnd(); } @@ -16989,10 +16989,10 @@ uint32_t ThriftHiveMetastore_alter_partitions_args::write(::apache::thrift::prot xfer += oprot->writeFieldBegin("new_parts", ::apache::thrift::protocol::T_LIST, 3); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->new_parts.size())); - std::vector ::const_iterator _iter1124; - for (_iter1124 = this->new_parts.begin(); _iter1124 != this->new_parts.end(); ++_iter1124) + std::vector ::const_iterator _iter1154; + for (_iter1154 = this->new_parts.begin(); _iter1154 != this->new_parts.end(); ++_iter1154) { - xfer += (*_iter1124).write(oprot); + xfer += (*_iter1154).write(oprot); } xfer += oprot->writeListEnd(); } @@ -17024,10 +17024,10 @@ uint32_t ThriftHiveMetastore_alter_partitions_pargs::write(::apache::thrift::pro xfer += oprot->writeFieldBegin("new_parts", ::apache::thrift::protocol::T_LIST, 3); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast((*(this->new_parts)).size())); - std::vector ::const_iterator _iter1125; - for (_iter1125 = (*(this->new_parts)).begin(); _iter1125 != (*(this->new_parts)).end(); ++_iter1125) + std::vector ::const_iterator _iter1155; + for (_iter1155 = (*(this->new_parts)).begin(); _iter1155 != (*(this->new_parts)).end(); ++_iter1155) { - xfer += (*_iter1125).write(oprot); + xfer += (*_iter1155).write(oprot); } xfer += oprot->writeListEnd(); } @@ -17212,14 +17212,14 @@ uint32_t ThriftHiveMetastore_alter_partitions_with_environment_context_args::rea if (ftype == ::apache::thrift::protocol::T_LIST) { { this->new_parts.clear(); - uint32_t _size1126; - ::apache::thrift::protocol::TType _etype1129; - xfer += iprot->readListBegin(_etype1129, _size1126); - this->new_parts.resize(_size1126); - uint32_t _i1130; - for (_i1130 = 0; _i1130 < _size1126; ++_i1130) + uint32_t _size1156; + ::apache::thrift::protocol::TType _etype1159; + xfer += iprot->readListBegin(_etype1159, _size1156); + this->new_parts.resize(_size1156); + uint32_t _i1160; + for (_i1160 = 0; _i1160 < _size1156; ++_i1160) { - xfer += this->new_parts[_i1130].read(iprot); + xfer += this->new_parts[_i1160].read(iprot); } xfer += iprot->readListEnd(); } @@ -17264,10 +17264,10 @@ uint32_t ThriftHiveMetastore_alter_partitions_with_environment_context_args::wri xfer += oprot->writeFieldBegin("new_parts", ::apache::thrift::protocol::T_LIST, 3); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->new_parts.size())); - std::vector ::const_iterator _iter1131; - for (_iter1131 = this->new_parts.begin(); _iter1131 != this->new_parts.end(); ++_iter1131) + std::vector ::const_iterator _iter1161; + for (_iter1161 = this->new_parts.begin(); _iter1161 != this->new_parts.end(); ++_iter1161) { - xfer += (*_iter1131).write(oprot); + xfer += (*_iter1161).write(oprot); } xfer += oprot->writeListEnd(); } @@ -17303,10 +17303,10 @@ uint32_t ThriftHiveMetastore_alter_partitions_with_environment_context_pargs::wr xfer += oprot->writeFieldBegin("new_parts", ::apache::thrift::protocol::T_LIST, 3); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast((*(this->new_parts)).size())); - std::vector ::const_iterator _iter1132; - for (_iter1132 = (*(this->new_parts)).begin(); _iter1132 != (*(this->new_parts)).end(); ++_iter1132) + std::vector ::const_iterator _iter1162; + for (_iter1162 = (*(this->new_parts)).begin(); _iter1162 != (*(this->new_parts)).end(); ++_iter1162) { - xfer += (*_iter1132).write(oprot); + xfer += (*_iter1162).write(oprot); } xfer += oprot->writeListEnd(); } @@ -17750,14 +17750,14 @@ uint32_t ThriftHiveMetastore_rename_partition_args::read(::apache::thrift::proto if (ftype == ::apache::thrift::protocol::T_LIST) { { this->part_vals.clear(); - uint32_t _size1133; - ::apache::thrift::protocol::TType _etype1136; - xfer += iprot->readListBegin(_etype1136, _size1133); - this->part_vals.resize(_size1133); - uint32_t _i1137; - for (_i1137 = 0; _i1137 < _size1133; ++_i1137) + uint32_t _size1163; + ::apache::thrift::protocol::TType _etype1166; + xfer += iprot->readListBegin(_etype1166, _size1163); + this->part_vals.resize(_size1163); + uint32_t _i1167; + for (_i1167 = 0; _i1167 < _size1163; ++_i1167) { - xfer += iprot->readString(this->part_vals[_i1137]); + xfer += iprot->readString(this->part_vals[_i1167]); } xfer += iprot->readListEnd(); } @@ -17802,10 +17802,10 @@ uint32_t ThriftHiveMetastore_rename_partition_args::write(::apache::thrift::prot xfer += oprot->writeFieldBegin("part_vals", ::apache::thrift::protocol::T_LIST, 3); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->part_vals.size())); - std::vector ::const_iterator _iter1138; - for (_iter1138 = this->part_vals.begin(); _iter1138 != this->part_vals.end(); ++_iter1138) + std::vector ::const_iterator _iter1168; + for (_iter1168 = this->part_vals.begin(); _iter1168 != this->part_vals.end(); ++_iter1168) { - xfer += oprot->writeString((*_iter1138)); + xfer += oprot->writeString((*_iter1168)); } xfer += oprot->writeListEnd(); } @@ -17841,10 +17841,10 @@ uint32_t ThriftHiveMetastore_rename_partition_pargs::write(::apache::thrift::pro xfer += oprot->writeFieldBegin("part_vals", ::apache::thrift::protocol::T_LIST, 3); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast((*(this->part_vals)).size())); - std::vector ::const_iterator _iter1139; - for (_iter1139 = (*(this->part_vals)).begin(); _iter1139 != (*(this->part_vals)).end(); ++_iter1139) + std::vector ::const_iterator _iter1169; + for (_iter1169 = (*(this->part_vals)).begin(); _iter1169 != (*(this->part_vals)).end(); ++_iter1169) { - xfer += oprot->writeString((*_iter1139)); + xfer += oprot->writeString((*_iter1169)); } xfer += oprot->writeListEnd(); } @@ -18017,14 +18017,14 @@ uint32_t ThriftHiveMetastore_partition_name_has_valid_characters_args::read(::ap if (ftype == ::apache::thrift::protocol::T_LIST) { { this->part_vals.clear(); - uint32_t _size1140; - ::apache::thrift::protocol::TType _etype1143; - xfer += iprot->readListBegin(_etype1143, _size1140); - this->part_vals.resize(_size1140); - uint32_t _i1144; - for (_i1144 = 0; _i1144 < _size1140; ++_i1144) + uint32_t _size1170; + ::apache::thrift::protocol::TType _etype1173; + xfer += iprot->readListBegin(_etype1173, _size1170); + this->part_vals.resize(_size1170); + uint32_t _i1174; + for (_i1174 = 0; _i1174 < _size1170; ++_i1174) { - xfer += iprot->readString(this->part_vals[_i1144]); + xfer += iprot->readString(this->part_vals[_i1174]); } xfer += iprot->readListEnd(); } @@ -18061,10 +18061,10 @@ uint32_t ThriftHiveMetastore_partition_name_has_valid_characters_args::write(::a xfer += oprot->writeFieldBegin("part_vals", ::apache::thrift::protocol::T_LIST, 1); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->part_vals.size())); - std::vector ::const_iterator _iter1145; - for (_iter1145 = this->part_vals.begin(); _iter1145 != this->part_vals.end(); ++_iter1145) + std::vector ::const_iterator _iter1175; + for (_iter1175 = this->part_vals.begin(); _iter1175 != this->part_vals.end(); ++_iter1175) { - xfer += oprot->writeString((*_iter1145)); + xfer += oprot->writeString((*_iter1175)); } xfer += oprot->writeListEnd(); } @@ -18092,10 +18092,10 @@ uint32_t ThriftHiveMetastore_partition_name_has_valid_characters_pargs::write(:: xfer += oprot->writeFieldBegin("part_vals", ::apache::thrift::protocol::T_LIST, 1); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast((*(this->part_vals)).size())); - std::vector ::const_iterator _iter1146; - for (_iter1146 = (*(this->part_vals)).begin(); _iter1146 != (*(this->part_vals)).end(); ++_iter1146) + std::vector ::const_iterator _iter1176; + for (_iter1176 = (*(this->part_vals)).begin(); _iter1176 != (*(this->part_vals)).end(); ++_iter1176) { - xfer += oprot->writeString((*_iter1146)); + xfer += oprot->writeString((*_iter1176)); } xfer += oprot->writeListEnd(); } @@ -18570,14 +18570,14 @@ uint32_t ThriftHiveMetastore_partition_name_to_vals_result::read(::apache::thrif if (ftype == ::apache::thrift::protocol::T_LIST) { { this->success.clear(); - uint32_t _size1147; - ::apache::thrift::protocol::TType _etype1150; - xfer += iprot->readListBegin(_etype1150, _size1147); - this->success.resize(_size1147); - uint32_t _i1151; - for (_i1151 = 0; _i1151 < _size1147; ++_i1151) + uint32_t _size1177; + ::apache::thrift::protocol::TType _etype1180; + xfer += iprot->readListBegin(_etype1180, _size1177); + this->success.resize(_size1177); + uint32_t _i1181; + for (_i1181 = 0; _i1181 < _size1177; ++_i1181) { - xfer += iprot->readString(this->success[_i1151]); + xfer += iprot->readString(this->success[_i1181]); } xfer += iprot->readListEnd(); } @@ -18616,10 +18616,10 @@ uint32_t ThriftHiveMetastore_partition_name_to_vals_result::write(::apache::thri xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->success.size())); - std::vector ::const_iterator _iter1152; - for (_iter1152 = this->success.begin(); _iter1152 != this->success.end(); ++_iter1152) + std::vector ::const_iterator _iter1182; + for (_iter1182 = this->success.begin(); _iter1182 != this->success.end(); ++_iter1182) { - xfer += oprot->writeString((*_iter1152)); + xfer += oprot->writeString((*_iter1182)); } xfer += oprot->writeListEnd(); } @@ -18664,14 +18664,14 @@ uint32_t ThriftHiveMetastore_partition_name_to_vals_presult::read(::apache::thri if (ftype == ::apache::thrift::protocol::T_LIST) { { (*(this->success)).clear(); - uint32_t _size1153; - ::apache::thrift::protocol::TType _etype1156; - xfer += iprot->readListBegin(_etype1156, _size1153); - (*(this->success)).resize(_size1153); - uint32_t _i1157; - for (_i1157 = 0; _i1157 < _size1153; ++_i1157) + uint32_t _size1183; + ::apache::thrift::protocol::TType _etype1186; + xfer += iprot->readListBegin(_etype1186, _size1183); + (*(this->success)).resize(_size1183); + uint32_t _i1187; + for (_i1187 = 0; _i1187 < _size1183; ++_i1187) { - xfer += iprot->readString((*(this->success))[_i1157]); + xfer += iprot->readString((*(this->success))[_i1187]); } xfer += iprot->readListEnd(); } @@ -18809,17 +18809,17 @@ uint32_t ThriftHiveMetastore_partition_name_to_spec_result::read(::apache::thrif if (ftype == ::apache::thrift::protocol::T_MAP) { { this->success.clear(); - uint32_t _size1158; - ::apache::thrift::protocol::TType _ktype1159; - ::apache::thrift::protocol::TType _vtype1160; - xfer += iprot->readMapBegin(_ktype1159, _vtype1160, _size1158); - uint32_t _i1162; - for (_i1162 = 0; _i1162 < _size1158; ++_i1162) + uint32_t _size1188; + ::apache::thrift::protocol::TType _ktype1189; + ::apache::thrift::protocol::TType _vtype1190; + xfer += iprot->readMapBegin(_ktype1189, _vtype1190, _size1188); + uint32_t _i1192; + for (_i1192 = 0; _i1192 < _size1188; ++_i1192) { - std::string _key1163; - xfer += iprot->readString(_key1163); - std::string& _val1164 = this->success[_key1163]; - xfer += iprot->readString(_val1164); + std::string _key1193; + xfer += iprot->readString(_key1193); + std::string& _val1194 = this->success[_key1193]; + xfer += iprot->readString(_val1194); } xfer += iprot->readMapEnd(); } @@ -18858,11 +18858,11 @@ uint32_t ThriftHiveMetastore_partition_name_to_spec_result::write(::apache::thri xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_MAP, 0); { xfer += oprot->writeMapBegin(::apache::thrift::protocol::T_STRING, ::apache::thrift::protocol::T_STRING, static_cast(this->success.size())); - std::map ::const_iterator _iter1165; - for (_iter1165 = this->success.begin(); _iter1165 != this->success.end(); ++_iter1165) + std::map ::const_iterator _iter1195; + for (_iter1195 = this->success.begin(); _iter1195 != this->success.end(); ++_iter1195) { - xfer += oprot->writeString(_iter1165->first); - xfer += oprot->writeString(_iter1165->second); + xfer += oprot->writeString(_iter1195->first); + xfer += oprot->writeString(_iter1195->second); } xfer += oprot->writeMapEnd(); } @@ -18907,17 +18907,17 @@ uint32_t ThriftHiveMetastore_partition_name_to_spec_presult::read(::apache::thri if (ftype == ::apache::thrift::protocol::T_MAP) { { (*(this->success)).clear(); - uint32_t _size1166; - ::apache::thrift::protocol::TType _ktype1167; - ::apache::thrift::protocol::TType _vtype1168; - xfer += iprot->readMapBegin(_ktype1167, _vtype1168, _size1166); - uint32_t _i1170; - for (_i1170 = 0; _i1170 < _size1166; ++_i1170) + uint32_t _size1196; + ::apache::thrift::protocol::TType _ktype1197; + ::apache::thrift::protocol::TType _vtype1198; + xfer += iprot->readMapBegin(_ktype1197, _vtype1198, _size1196); + uint32_t _i1200; + for (_i1200 = 0; _i1200 < _size1196; ++_i1200) { - std::string _key1171; - xfer += iprot->readString(_key1171); - std::string& _val1172 = (*(this->success))[_key1171]; - xfer += iprot->readString(_val1172); + std::string _key1201; + xfer += iprot->readString(_key1201); + std::string& _val1202 = (*(this->success))[_key1201]; + xfer += iprot->readString(_val1202); } xfer += iprot->readMapEnd(); } @@ -18992,17 +18992,17 @@ uint32_t ThriftHiveMetastore_markPartitionForEvent_args::read(::apache::thrift:: if (ftype == ::apache::thrift::protocol::T_MAP) { { this->part_vals.clear(); - uint32_t _size1173; - ::apache::thrift::protocol::TType _ktype1174; - ::apache::thrift::protocol::TType _vtype1175; - xfer += iprot->readMapBegin(_ktype1174, _vtype1175, _size1173); - uint32_t _i1177; - for (_i1177 = 0; _i1177 < _size1173; ++_i1177) + uint32_t _size1203; + ::apache::thrift::protocol::TType _ktype1204; + ::apache::thrift::protocol::TType _vtype1205; + xfer += iprot->readMapBegin(_ktype1204, _vtype1205, _size1203); + uint32_t _i1207; + for (_i1207 = 0; _i1207 < _size1203; ++_i1207) { - std::string _key1178; - xfer += iprot->readString(_key1178); - std::string& _val1179 = this->part_vals[_key1178]; - xfer += iprot->readString(_val1179); + std::string _key1208; + xfer += iprot->readString(_key1208); + std::string& _val1209 = this->part_vals[_key1208]; + xfer += iprot->readString(_val1209); } xfer += iprot->readMapEnd(); } @@ -19013,9 +19013,9 @@ uint32_t ThriftHiveMetastore_markPartitionForEvent_args::read(::apache::thrift:: break; case 4: if (ftype == ::apache::thrift::protocol::T_I32) { - int32_t ecast1180; - xfer += iprot->readI32(ecast1180); - this->eventType = (PartitionEventType::type)ecast1180; + int32_t ecast1210; + xfer += iprot->readI32(ecast1210); + this->eventType = (PartitionEventType::type)ecast1210; this->__isset.eventType = true; } else { xfer += iprot->skip(ftype); @@ -19049,11 +19049,11 @@ uint32_t ThriftHiveMetastore_markPartitionForEvent_args::write(::apache::thrift: xfer += oprot->writeFieldBegin("part_vals", ::apache::thrift::protocol::T_MAP, 3); { xfer += oprot->writeMapBegin(::apache::thrift::protocol::T_STRING, ::apache::thrift::protocol::T_STRING, static_cast(this->part_vals.size())); - std::map ::const_iterator _iter1181; - for (_iter1181 = this->part_vals.begin(); _iter1181 != this->part_vals.end(); ++_iter1181) + std::map ::const_iterator _iter1211; + for (_iter1211 = this->part_vals.begin(); _iter1211 != this->part_vals.end(); ++_iter1211) { - xfer += oprot->writeString(_iter1181->first); - xfer += oprot->writeString(_iter1181->second); + xfer += oprot->writeString(_iter1211->first); + xfer += oprot->writeString(_iter1211->second); } xfer += oprot->writeMapEnd(); } @@ -19089,11 +19089,11 @@ uint32_t ThriftHiveMetastore_markPartitionForEvent_pargs::write(::apache::thrift xfer += oprot->writeFieldBegin("part_vals", ::apache::thrift::protocol::T_MAP, 3); { xfer += oprot->writeMapBegin(::apache::thrift::protocol::T_STRING, ::apache::thrift::protocol::T_STRING, static_cast((*(this->part_vals)).size())); - std::map ::const_iterator _iter1182; - for (_iter1182 = (*(this->part_vals)).begin(); _iter1182 != (*(this->part_vals)).end(); ++_iter1182) + std::map ::const_iterator _iter1212; + for (_iter1212 = (*(this->part_vals)).begin(); _iter1212 != (*(this->part_vals)).end(); ++_iter1212) { - xfer += oprot->writeString(_iter1182->first); - xfer += oprot->writeString(_iter1182->second); + xfer += oprot->writeString(_iter1212->first); + xfer += oprot->writeString(_iter1212->second); } xfer += oprot->writeMapEnd(); } @@ -19362,17 +19362,17 @@ uint32_t ThriftHiveMetastore_isPartitionMarkedForEvent_args::read(::apache::thri if (ftype == ::apache::thrift::protocol::T_MAP) { { this->part_vals.clear(); - uint32_t _size1183; - ::apache::thrift::protocol::TType _ktype1184; - ::apache::thrift::protocol::TType _vtype1185; - xfer += iprot->readMapBegin(_ktype1184, _vtype1185, _size1183); - uint32_t _i1187; - for (_i1187 = 0; _i1187 < _size1183; ++_i1187) + uint32_t _size1213; + ::apache::thrift::protocol::TType _ktype1214; + ::apache::thrift::protocol::TType _vtype1215; + xfer += iprot->readMapBegin(_ktype1214, _vtype1215, _size1213); + uint32_t _i1217; + for (_i1217 = 0; _i1217 < _size1213; ++_i1217) { - std::string _key1188; - xfer += iprot->readString(_key1188); - std::string& _val1189 = this->part_vals[_key1188]; - xfer += iprot->readString(_val1189); + std::string _key1218; + xfer += iprot->readString(_key1218); + std::string& _val1219 = this->part_vals[_key1218]; + xfer += iprot->readString(_val1219); } xfer += iprot->readMapEnd(); } @@ -19383,9 +19383,9 @@ uint32_t ThriftHiveMetastore_isPartitionMarkedForEvent_args::read(::apache::thri break; case 4: if (ftype == ::apache::thrift::protocol::T_I32) { - int32_t ecast1190; - xfer += iprot->readI32(ecast1190); - this->eventType = (PartitionEventType::type)ecast1190; + int32_t ecast1220; + xfer += iprot->readI32(ecast1220); + this->eventType = (PartitionEventType::type)ecast1220; this->__isset.eventType = true; } else { xfer += iprot->skip(ftype); @@ -19419,11 +19419,11 @@ uint32_t ThriftHiveMetastore_isPartitionMarkedForEvent_args::write(::apache::thr xfer += oprot->writeFieldBegin("part_vals", ::apache::thrift::protocol::T_MAP, 3); { xfer += oprot->writeMapBegin(::apache::thrift::protocol::T_STRING, ::apache::thrift::protocol::T_STRING, static_cast(this->part_vals.size())); - std::map ::const_iterator _iter1191; - for (_iter1191 = this->part_vals.begin(); _iter1191 != this->part_vals.end(); ++_iter1191) + std::map ::const_iterator _iter1221; + for (_iter1221 = this->part_vals.begin(); _iter1221 != this->part_vals.end(); ++_iter1221) { - xfer += oprot->writeString(_iter1191->first); - xfer += oprot->writeString(_iter1191->second); + xfer += oprot->writeString(_iter1221->first); + xfer += oprot->writeString(_iter1221->second); } xfer += oprot->writeMapEnd(); } @@ -19459,11 +19459,11 @@ uint32_t ThriftHiveMetastore_isPartitionMarkedForEvent_pargs::write(::apache::th xfer += oprot->writeFieldBegin("part_vals", ::apache::thrift::protocol::T_MAP, 3); { xfer += oprot->writeMapBegin(::apache::thrift::protocol::T_STRING, ::apache::thrift::protocol::T_STRING, static_cast((*(this->part_vals)).size())); - std::map ::const_iterator _iter1192; - for (_iter1192 = (*(this->part_vals)).begin(); _iter1192 != (*(this->part_vals)).end(); ++_iter1192) + std::map ::const_iterator _iter1222; + for (_iter1222 = (*(this->part_vals)).begin(); _iter1222 != (*(this->part_vals)).end(); ++_iter1222) { - xfer += oprot->writeString(_iter1192->first); - xfer += oprot->writeString(_iter1192->second); + xfer += oprot->writeString(_iter1222->first); + xfer += oprot->writeString(_iter1222->second); } xfer += oprot->writeMapEnd(); } @@ -20899,14 +20899,14 @@ uint32_t ThriftHiveMetastore_get_indexes_result::read(::apache::thrift::protocol if (ftype == ::apache::thrift::protocol::T_LIST) { { this->success.clear(); - uint32_t _size1193; - ::apache::thrift::protocol::TType _etype1196; - xfer += iprot->readListBegin(_etype1196, _size1193); - this->success.resize(_size1193); - uint32_t _i1197; - for (_i1197 = 0; _i1197 < _size1193; ++_i1197) + uint32_t _size1223; + ::apache::thrift::protocol::TType _etype1226; + xfer += iprot->readListBegin(_etype1226, _size1223); + this->success.resize(_size1223); + uint32_t _i1227; + for (_i1227 = 0; _i1227 < _size1223; ++_i1227) { - xfer += this->success[_i1197].read(iprot); + xfer += this->success[_i1227].read(iprot); } xfer += iprot->readListEnd(); } @@ -20953,10 +20953,10 @@ uint32_t ThriftHiveMetastore_get_indexes_result::write(::apache::thrift::protoco xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->success.size())); - std::vector ::const_iterator _iter1198; - for (_iter1198 = this->success.begin(); _iter1198 != this->success.end(); ++_iter1198) + std::vector ::const_iterator _iter1228; + for (_iter1228 = this->success.begin(); _iter1228 != this->success.end(); ++_iter1228) { - xfer += (*_iter1198).write(oprot); + xfer += (*_iter1228).write(oprot); } xfer += oprot->writeListEnd(); } @@ -21005,14 +21005,14 @@ uint32_t ThriftHiveMetastore_get_indexes_presult::read(::apache::thrift::protoco if (ftype == ::apache::thrift::protocol::T_LIST) { { (*(this->success)).clear(); - uint32_t _size1199; - ::apache::thrift::protocol::TType _etype1202; - xfer += iprot->readListBegin(_etype1202, _size1199); - (*(this->success)).resize(_size1199); - uint32_t _i1203; - for (_i1203 = 0; _i1203 < _size1199; ++_i1203) + uint32_t _size1229; + ::apache::thrift::protocol::TType _etype1232; + xfer += iprot->readListBegin(_etype1232, _size1229); + (*(this->success)).resize(_size1229); + uint32_t _i1233; + for (_i1233 = 0; _i1233 < _size1229; ++_i1233) { - xfer += (*(this->success))[_i1203].read(iprot); + xfer += (*(this->success))[_i1233].read(iprot); } xfer += iprot->readListEnd(); } @@ -21190,14 +21190,14 @@ uint32_t ThriftHiveMetastore_get_index_names_result::read(::apache::thrift::prot if (ftype == ::apache::thrift::protocol::T_LIST) { { this->success.clear(); - uint32_t _size1204; - ::apache::thrift::protocol::TType _etype1207; - xfer += iprot->readListBegin(_etype1207, _size1204); - this->success.resize(_size1204); - uint32_t _i1208; - for (_i1208 = 0; _i1208 < _size1204; ++_i1208) + uint32_t _size1234; + ::apache::thrift::protocol::TType _etype1237; + xfer += iprot->readListBegin(_etype1237, _size1234); + this->success.resize(_size1234); + uint32_t _i1238; + for (_i1238 = 0; _i1238 < _size1234; ++_i1238) { - xfer += iprot->readString(this->success[_i1208]); + xfer += iprot->readString(this->success[_i1238]); } xfer += iprot->readListEnd(); } @@ -21236,10 +21236,10 @@ uint32_t ThriftHiveMetastore_get_index_names_result::write(::apache::thrift::pro xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->success.size())); - std::vector ::const_iterator _iter1209; - for (_iter1209 = this->success.begin(); _iter1209 != this->success.end(); ++_iter1209) + std::vector ::const_iterator _iter1239; + for (_iter1239 = this->success.begin(); _iter1239 != this->success.end(); ++_iter1239) { - xfer += oprot->writeString((*_iter1209)); + xfer += oprot->writeString((*_iter1239)); } xfer += oprot->writeListEnd(); } @@ -21284,14 +21284,14 @@ uint32_t ThriftHiveMetastore_get_index_names_presult::read(::apache::thrift::pro if (ftype == ::apache::thrift::protocol::T_LIST) { { (*(this->success)).clear(); - uint32_t _size1210; - ::apache::thrift::protocol::TType _etype1213; - xfer += iprot->readListBegin(_etype1213, _size1210); - (*(this->success)).resize(_size1210); - uint32_t _i1214; - for (_i1214 = 0; _i1214 < _size1210; ++_i1214) + uint32_t _size1240; + ::apache::thrift::protocol::TType _etype1243; + xfer += iprot->readListBegin(_etype1243, _size1240); + (*(this->success)).resize(_size1240); + uint32_t _i1244; + for (_i1244 = 0; _i1244 < _size1240; ++_i1244) { - xfer += iprot->readString((*(this->success))[_i1214]); + xfer += iprot->readString((*(this->success))[_i1244]); } xfer += iprot->readListEnd(); } @@ -21321,11 +21321,11 @@ uint32_t ThriftHiveMetastore_get_index_names_presult::read(::apache::thrift::pro } -ThriftHiveMetastore_update_table_column_statistics_args::~ThriftHiveMetastore_update_table_column_statistics_args() throw() { +ThriftHiveMetastore_get_primary_keys_args::~ThriftHiveMetastore_get_primary_keys_args() throw() { } -uint32_t ThriftHiveMetastore_update_table_column_statistics_args::read(::apache::thrift::protocol::TProtocol* iprot) { +uint32_t ThriftHiveMetastore_get_primary_keys_args::read(::apache::thrift::protocol::TProtocol* iprot) { apache::thrift::protocol::TInputRecursionTracker tracker(*iprot); uint32_t xfer = 0; @@ -21347,9 +21347,17 @@ uint32_t ThriftHiveMetastore_update_table_column_statistics_args::read(::apache: switch (fid) { case 1: - if (ftype == ::apache::thrift::protocol::T_STRUCT) { - xfer += this->stats_obj.read(iprot); - this->__isset.stats_obj = true; + if (ftype == ::apache::thrift::protocol::T_STRING) { + xfer += iprot->readString(this->db_name); + this->__isset.db_name = true; + } else { + xfer += iprot->skip(ftype); + } + break; + case 2: + if (ftype == ::apache::thrift::protocol::T_STRING) { + xfer += iprot->readString(this->tbl_name); + this->__isset.tbl_name = true; } else { xfer += iprot->skip(ftype); } @@ -21366,13 +21374,17 @@ uint32_t ThriftHiveMetastore_update_table_column_statistics_args::read(::apache: return xfer; } -uint32_t ThriftHiveMetastore_update_table_column_statistics_args::write(::apache::thrift::protocol::TProtocol* oprot) const { +uint32_t ThriftHiveMetastore_get_primary_keys_args::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; apache::thrift::protocol::TOutputRecursionTracker tracker(*oprot); - xfer += oprot->writeStructBegin("ThriftHiveMetastore_update_table_column_statistics_args"); + xfer += oprot->writeStructBegin("ThriftHiveMetastore_get_primary_keys_args"); - xfer += oprot->writeFieldBegin("stats_obj", ::apache::thrift::protocol::T_STRUCT, 1); - xfer += this->stats_obj.write(oprot); + xfer += oprot->writeFieldBegin("db_name", ::apache::thrift::protocol::T_STRING, 1); + xfer += oprot->writeString(this->db_name); + xfer += oprot->writeFieldEnd(); + + xfer += oprot->writeFieldBegin("tbl_name", ::apache::thrift::protocol::T_STRING, 2); + xfer += oprot->writeString(this->tbl_name); xfer += oprot->writeFieldEnd(); xfer += oprot->writeFieldStop(); @@ -21381,17 +21393,21 @@ uint32_t ThriftHiveMetastore_update_table_column_statistics_args::write(::apache } -ThriftHiveMetastore_update_table_column_statistics_pargs::~ThriftHiveMetastore_update_table_column_statistics_pargs() throw() { +ThriftHiveMetastore_get_primary_keys_pargs::~ThriftHiveMetastore_get_primary_keys_pargs() throw() { } -uint32_t ThriftHiveMetastore_update_table_column_statistics_pargs::write(::apache::thrift::protocol::TProtocol* oprot) const { +uint32_t ThriftHiveMetastore_get_primary_keys_pargs::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; apache::thrift::protocol::TOutputRecursionTracker tracker(*oprot); - xfer += oprot->writeStructBegin("ThriftHiveMetastore_update_table_column_statistics_pargs"); + xfer += oprot->writeStructBegin("ThriftHiveMetastore_get_primary_keys_pargs"); - xfer += oprot->writeFieldBegin("stats_obj", ::apache::thrift::protocol::T_STRUCT, 1); - xfer += (*(this->stats_obj)).write(oprot); + xfer += oprot->writeFieldBegin("db_name", ::apache::thrift::protocol::T_STRING, 1); + xfer += oprot->writeString((*(this->db_name))); + xfer += oprot->writeFieldEnd(); + + xfer += oprot->writeFieldBegin("tbl_name", ::apache::thrift::protocol::T_STRING, 2); + xfer += oprot->writeString((*(this->tbl_name))); xfer += oprot->writeFieldEnd(); xfer += oprot->writeFieldStop(); @@ -21400,11 +21416,11 @@ uint32_t ThriftHiveMetastore_update_table_column_statistics_pargs::write(::apach } -ThriftHiveMetastore_update_table_column_statistics_result::~ThriftHiveMetastore_update_table_column_statistics_result() throw() { +ThriftHiveMetastore_get_primary_keys_result::~ThriftHiveMetastore_get_primary_keys_result() throw() { } -uint32_t ThriftHiveMetastore_update_table_column_statistics_result::read(::apache::thrift::protocol::TProtocol* iprot) { +uint32_t ThriftHiveMetastore_get_primary_keys_result::read(::apache::thrift::protocol::TProtocol* iprot) { apache::thrift::protocol::TInputRecursionTracker tracker(*iprot); uint32_t xfer = 0; @@ -21426,8 +21442,20 @@ uint32_t ThriftHiveMetastore_update_table_column_statistics_result::read(::apach switch (fid) { case 0: - if (ftype == ::apache::thrift::protocol::T_BOOL) { - xfer += iprot->readBool(this->success); + if (ftype == ::apache::thrift::protocol::T_LIST) { + { + this->success.clear(); + uint32_t _size1245; + ::apache::thrift::protocol::TType _etype1248; + xfer += iprot->readListBegin(_etype1248, _size1245); + this->success.resize(_size1245); + uint32_t _i1249; + for (_i1249 = 0; _i1249 < _size1245; ++_i1249) + { + xfer += this->success[_i1249].read(iprot); + } + xfer += iprot->readListEnd(); + } this->__isset.success = true; } else { xfer += iprot->skip(ftype); @@ -21449,22 +21477,6 @@ uint32_t ThriftHiveMetastore_update_table_column_statistics_result::read(::apach xfer += iprot->skip(ftype); } break; - case 3: - if (ftype == ::apache::thrift::protocol::T_STRUCT) { - xfer += this->o3.read(iprot); - this->__isset.o3 = true; - } else { - xfer += iprot->skip(ftype); - } - break; - case 4: - if (ftype == ::apache::thrift::protocol::T_STRUCT) { - xfer += this->o4.read(iprot); - this->__isset.o4 = true; - } else { - xfer += iprot->skip(ftype); - } - break; default: xfer += iprot->skip(ftype); break; @@ -21477,15 +21489,23 @@ uint32_t ThriftHiveMetastore_update_table_column_statistics_result::read(::apach return xfer; } -uint32_t ThriftHiveMetastore_update_table_column_statistics_result::write(::apache::thrift::protocol::TProtocol* oprot) const { +uint32_t ThriftHiveMetastore_get_primary_keys_result::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; - xfer += oprot->writeStructBegin("ThriftHiveMetastore_update_table_column_statistics_result"); + xfer += oprot->writeStructBegin("ThriftHiveMetastore_get_primary_keys_result"); if (this->__isset.success) { - xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_BOOL, 0); - xfer += oprot->writeBool(this->success); + xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0); + { + xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->success.size())); + std::vector ::const_iterator _iter1250; + for (_iter1250 = this->success.begin(); _iter1250 != this->success.end(); ++_iter1250) + { + xfer += (*_iter1250).write(oprot); + } + xfer += oprot->writeListEnd(); + } xfer += oprot->writeFieldEnd(); } else if (this->__isset.o1) { xfer += oprot->writeFieldBegin("o1", ::apache::thrift::protocol::T_STRUCT, 1); @@ -21495,14 +21515,6 @@ uint32_t ThriftHiveMetastore_update_table_column_statistics_result::write(::apac xfer += oprot->writeFieldBegin("o2", ::apache::thrift::protocol::T_STRUCT, 2); xfer += this->o2.write(oprot); xfer += oprot->writeFieldEnd(); - } else if (this->__isset.o3) { - xfer += oprot->writeFieldBegin("o3", ::apache::thrift::protocol::T_STRUCT, 3); - xfer += this->o3.write(oprot); - xfer += oprot->writeFieldEnd(); - } else if (this->__isset.o4) { - xfer += oprot->writeFieldBegin("o4", ::apache::thrift::protocol::T_STRUCT, 4); - xfer += this->o4.write(oprot); - xfer += oprot->writeFieldEnd(); } xfer += oprot->writeFieldStop(); xfer += oprot->writeStructEnd(); @@ -21510,11 +21522,11 @@ uint32_t ThriftHiveMetastore_update_table_column_statistics_result::write(::apac } -ThriftHiveMetastore_update_table_column_statistics_presult::~ThriftHiveMetastore_update_table_column_statistics_presult() throw() { +ThriftHiveMetastore_get_primary_keys_presult::~ThriftHiveMetastore_get_primary_keys_presult() throw() { } -uint32_t ThriftHiveMetastore_update_table_column_statistics_presult::read(::apache::thrift::protocol::TProtocol* iprot) { +uint32_t ThriftHiveMetastore_get_primary_keys_presult::read(::apache::thrift::protocol::TProtocol* iprot) { apache::thrift::protocol::TInputRecursionTracker tracker(*iprot); uint32_t xfer = 0; @@ -21536,8 +21548,20 @@ uint32_t ThriftHiveMetastore_update_table_column_statistics_presult::read(::apac switch (fid) { case 0: - if (ftype == ::apache::thrift::protocol::T_BOOL) { - xfer += iprot->readBool((*(this->success))); + if (ftype == ::apache::thrift::protocol::T_LIST) { + { + (*(this->success)).clear(); + uint32_t _size1251; + ::apache::thrift::protocol::TType _etype1254; + xfer += iprot->readListBegin(_etype1254, _size1251); + (*(this->success)).resize(_size1251); + uint32_t _i1255; + for (_i1255 = 0; _i1255 < _size1251; ++_i1255) + { + xfer += (*(this->success))[_i1255].read(iprot); + } + xfer += iprot->readListEnd(); + } this->__isset.success = true; } else { xfer += iprot->skip(ftype); @@ -21559,22 +21583,6 @@ uint32_t ThriftHiveMetastore_update_table_column_statistics_presult::read(::apac xfer += iprot->skip(ftype); } break; - case 3: - if (ftype == ::apache::thrift::protocol::T_STRUCT) { - xfer += this->o3.read(iprot); - this->__isset.o3 = true; - } else { - xfer += iprot->skip(ftype); - } - break; - case 4: - if (ftype == ::apache::thrift::protocol::T_STRUCT) { - xfer += this->o4.read(iprot); - this->__isset.o4 = true; - } else { - xfer += iprot->skip(ftype); - } - break; default: xfer += iprot->skip(ftype); break; @@ -21588,11 +21596,11 @@ uint32_t ThriftHiveMetastore_update_table_column_statistics_presult::read(::apac } -ThriftHiveMetastore_update_partition_column_statistics_args::~ThriftHiveMetastore_update_partition_column_statistics_args() throw() { +ThriftHiveMetastore_get_foreign_keys_args::~ThriftHiveMetastore_get_foreign_keys_args() throw() { } -uint32_t ThriftHiveMetastore_update_partition_column_statistics_args::read(::apache::thrift::protocol::TProtocol* iprot) { +uint32_t ThriftHiveMetastore_get_foreign_keys_args::read(::apache::thrift::protocol::TProtocol* iprot) { apache::thrift::protocol::TInputRecursionTracker tracker(*iprot); uint32_t xfer = 0; @@ -21614,9 +21622,33 @@ uint32_t ThriftHiveMetastore_update_partition_column_statistics_args::read(::apa switch (fid) { case 1: - if (ftype == ::apache::thrift::protocol::T_STRUCT) { - xfer += this->stats_obj.read(iprot); - this->__isset.stats_obj = true; + if (ftype == ::apache::thrift::protocol::T_STRING) { + xfer += iprot->readString(this->parent_db_name); + this->__isset.parent_db_name = true; + } else { + xfer += iprot->skip(ftype); + } + break; + case 2: + if (ftype == ::apache::thrift::protocol::T_STRING) { + xfer += iprot->readString(this->parent_tbl_name); + this->__isset.parent_tbl_name = true; + } else { + xfer += iprot->skip(ftype); + } + break; + case 3: + if (ftype == ::apache::thrift::protocol::T_STRING) { + xfer += iprot->readString(this->foreign_db_name); + this->__isset.foreign_db_name = true; + } else { + xfer += iprot->skip(ftype); + } + break; + case 4: + if (ftype == ::apache::thrift::protocol::T_STRING) { + xfer += iprot->readString(this->foreign_tbl_name); + this->__isset.foreign_tbl_name = true; } else { xfer += iprot->skip(ftype); } @@ -21633,13 +21665,25 @@ uint32_t ThriftHiveMetastore_update_partition_column_statistics_args::read(::apa return xfer; } -uint32_t ThriftHiveMetastore_update_partition_column_statistics_args::write(::apache::thrift::protocol::TProtocol* oprot) const { +uint32_t ThriftHiveMetastore_get_foreign_keys_args::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; apache::thrift::protocol::TOutputRecursionTracker tracker(*oprot); - xfer += oprot->writeStructBegin("ThriftHiveMetastore_update_partition_column_statistics_args"); + xfer += oprot->writeStructBegin("ThriftHiveMetastore_get_foreign_keys_args"); - xfer += oprot->writeFieldBegin("stats_obj", ::apache::thrift::protocol::T_STRUCT, 1); - xfer += this->stats_obj.write(oprot); + xfer += oprot->writeFieldBegin("parent_db_name", ::apache::thrift::protocol::T_STRING, 1); + xfer += oprot->writeString(this->parent_db_name); + xfer += oprot->writeFieldEnd(); + + xfer += oprot->writeFieldBegin("parent_tbl_name", ::apache::thrift::protocol::T_STRING, 2); + xfer += oprot->writeString(this->parent_tbl_name); + xfer += oprot->writeFieldEnd(); + + xfer += oprot->writeFieldBegin("foreign_db_name", ::apache::thrift::protocol::T_STRING, 3); + xfer += oprot->writeString(this->foreign_db_name); + xfer += oprot->writeFieldEnd(); + + xfer += oprot->writeFieldBegin("foreign_tbl_name", ::apache::thrift::protocol::T_STRING, 4); + xfer += oprot->writeString(this->foreign_tbl_name); xfer += oprot->writeFieldEnd(); xfer += oprot->writeFieldStop(); @@ -21648,17 +21692,29 @@ uint32_t ThriftHiveMetastore_update_partition_column_statistics_args::write(::ap } -ThriftHiveMetastore_update_partition_column_statistics_pargs::~ThriftHiveMetastore_update_partition_column_statistics_pargs() throw() { +ThriftHiveMetastore_get_foreign_keys_pargs::~ThriftHiveMetastore_get_foreign_keys_pargs() throw() { } -uint32_t ThriftHiveMetastore_update_partition_column_statistics_pargs::write(::apache::thrift::protocol::TProtocol* oprot) const { +uint32_t ThriftHiveMetastore_get_foreign_keys_pargs::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; apache::thrift::protocol::TOutputRecursionTracker tracker(*oprot); - xfer += oprot->writeStructBegin("ThriftHiveMetastore_update_partition_column_statistics_pargs"); + xfer += oprot->writeStructBegin("ThriftHiveMetastore_get_foreign_keys_pargs"); - xfer += oprot->writeFieldBegin("stats_obj", ::apache::thrift::protocol::T_STRUCT, 1); - xfer += (*(this->stats_obj)).write(oprot); + xfer += oprot->writeFieldBegin("parent_db_name", ::apache::thrift::protocol::T_STRING, 1); + xfer += oprot->writeString((*(this->parent_db_name))); + xfer += oprot->writeFieldEnd(); + + xfer += oprot->writeFieldBegin("parent_tbl_name", ::apache::thrift::protocol::T_STRING, 2); + xfer += oprot->writeString((*(this->parent_tbl_name))); + xfer += oprot->writeFieldEnd(); + + xfer += oprot->writeFieldBegin("foreign_db_name", ::apache::thrift::protocol::T_STRING, 3); + xfer += oprot->writeString((*(this->foreign_db_name))); + xfer += oprot->writeFieldEnd(); + + xfer += oprot->writeFieldBegin("foreign_tbl_name", ::apache::thrift::protocol::T_STRING, 4); + xfer += oprot->writeString((*(this->foreign_tbl_name))); xfer += oprot->writeFieldEnd(); xfer += oprot->writeFieldStop(); @@ -21667,11 +21723,11 @@ uint32_t ThriftHiveMetastore_update_partition_column_statistics_pargs::write(::a } -ThriftHiveMetastore_update_partition_column_statistics_result::~ThriftHiveMetastore_update_partition_column_statistics_result() throw() { +ThriftHiveMetastore_get_foreign_keys_result::~ThriftHiveMetastore_get_foreign_keys_result() throw() { } -uint32_t ThriftHiveMetastore_update_partition_column_statistics_result::read(::apache::thrift::protocol::TProtocol* iprot) { +uint32_t ThriftHiveMetastore_get_foreign_keys_result::read(::apache::thrift::protocol::TProtocol* iprot) { apache::thrift::protocol::TInputRecursionTracker tracker(*iprot); uint32_t xfer = 0; @@ -21693,8 +21749,20 @@ uint32_t ThriftHiveMetastore_update_partition_column_statistics_result::read(::a switch (fid) { case 0: - if (ftype == ::apache::thrift::protocol::T_BOOL) { - xfer += iprot->readBool(this->success); + if (ftype == ::apache::thrift::protocol::T_LIST) { + { + this->success.clear(); + uint32_t _size1256; + ::apache::thrift::protocol::TType _etype1259; + xfer += iprot->readListBegin(_etype1259, _size1256); + this->success.resize(_size1256); + uint32_t _i1260; + for (_i1260 = 0; _i1260 < _size1256; ++_i1260) + { + xfer += this->success[_i1260].read(iprot); + } + xfer += iprot->readListEnd(); + } this->__isset.success = true; } else { xfer += iprot->skip(ftype); @@ -21716,22 +21784,6 @@ uint32_t ThriftHiveMetastore_update_partition_column_statistics_result::read(::a xfer += iprot->skip(ftype); } break; - case 3: - if (ftype == ::apache::thrift::protocol::T_STRUCT) { - xfer += this->o3.read(iprot); - this->__isset.o3 = true; - } else { - xfer += iprot->skip(ftype); - } - break; - case 4: - if (ftype == ::apache::thrift::protocol::T_STRUCT) { - xfer += this->o4.read(iprot); - this->__isset.o4 = true; - } else { - xfer += iprot->skip(ftype); - } - break; default: xfer += iprot->skip(ftype); break; @@ -21744,15 +21796,23 @@ uint32_t ThriftHiveMetastore_update_partition_column_statistics_result::read(::a return xfer; } -uint32_t ThriftHiveMetastore_update_partition_column_statistics_result::write(::apache::thrift::protocol::TProtocol* oprot) const { +uint32_t ThriftHiveMetastore_get_foreign_keys_result::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; - xfer += oprot->writeStructBegin("ThriftHiveMetastore_update_partition_column_statistics_result"); + xfer += oprot->writeStructBegin("ThriftHiveMetastore_get_foreign_keys_result"); if (this->__isset.success) { - xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_BOOL, 0); - xfer += oprot->writeBool(this->success); + xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0); + { + xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->success.size())); + std::vector ::const_iterator _iter1261; + for (_iter1261 = this->success.begin(); _iter1261 != this->success.end(); ++_iter1261) + { + xfer += (*_iter1261).write(oprot); + } + xfer += oprot->writeListEnd(); + } xfer += oprot->writeFieldEnd(); } else if (this->__isset.o1) { xfer += oprot->writeFieldBegin("o1", ::apache::thrift::protocol::T_STRUCT, 1); @@ -21762,14 +21822,6 @@ uint32_t ThriftHiveMetastore_update_partition_column_statistics_result::write(:: xfer += oprot->writeFieldBegin("o2", ::apache::thrift::protocol::T_STRUCT, 2); xfer += this->o2.write(oprot); xfer += oprot->writeFieldEnd(); - } else if (this->__isset.o3) { - xfer += oprot->writeFieldBegin("o3", ::apache::thrift::protocol::T_STRUCT, 3); - xfer += this->o3.write(oprot); - xfer += oprot->writeFieldEnd(); - } else if (this->__isset.o4) { - xfer += oprot->writeFieldBegin("o4", ::apache::thrift::protocol::T_STRUCT, 4); - xfer += this->o4.write(oprot); - xfer += oprot->writeFieldEnd(); } xfer += oprot->writeFieldStop(); xfer += oprot->writeStructEnd(); @@ -21777,11 +21829,11 @@ uint32_t ThriftHiveMetastore_update_partition_column_statistics_result::write(:: } -ThriftHiveMetastore_update_partition_column_statistics_presult::~ThriftHiveMetastore_update_partition_column_statistics_presult() throw() { +ThriftHiveMetastore_get_foreign_keys_presult::~ThriftHiveMetastore_get_foreign_keys_presult() throw() { } -uint32_t ThriftHiveMetastore_update_partition_column_statistics_presult::read(::apache::thrift::protocol::TProtocol* iprot) { +uint32_t ThriftHiveMetastore_get_foreign_keys_presult::read(::apache::thrift::protocol::TProtocol* iprot) { apache::thrift::protocol::TInputRecursionTracker tracker(*iprot); uint32_t xfer = 0; @@ -21803,8 +21855,20 @@ uint32_t ThriftHiveMetastore_update_partition_column_statistics_presult::read(:: switch (fid) { case 0: - if (ftype == ::apache::thrift::protocol::T_BOOL) { - xfer += iprot->readBool((*(this->success))); + if (ftype == ::apache::thrift::protocol::T_LIST) { + { + (*(this->success)).clear(); + uint32_t _size1262; + ::apache::thrift::protocol::TType _etype1265; + xfer += iprot->readListBegin(_etype1265, _size1262); + (*(this->success)).resize(_size1262); + uint32_t _i1266; + for (_i1266 = 0; _i1266 < _size1262; ++_i1266) + { + xfer += (*(this->success))[_i1266].read(iprot); + } + xfer += iprot->readListEnd(); + } this->__isset.success = true; } else { xfer += iprot->skip(ftype); @@ -21826,22 +21890,6 @@ uint32_t ThriftHiveMetastore_update_partition_column_statistics_presult::read(:: xfer += iprot->skip(ftype); } break; - case 3: - if (ftype == ::apache::thrift::protocol::T_STRUCT) { - xfer += this->o3.read(iprot); - this->__isset.o3 = true; - } else { - xfer += iprot->skip(ftype); - } - break; - case 4: - if (ftype == ::apache::thrift::protocol::T_STRUCT) { - xfer += this->o4.read(iprot); - this->__isset.o4 = true; - } else { - xfer += iprot->skip(ftype); - } - break; default: xfer += iprot->skip(ftype); break; @@ -21855,11 +21903,11 @@ uint32_t ThriftHiveMetastore_update_partition_column_statistics_presult::read(:: } -ThriftHiveMetastore_get_table_column_statistics_args::~ThriftHiveMetastore_get_table_column_statistics_args() throw() { +ThriftHiveMetastore_update_table_column_statistics_args::~ThriftHiveMetastore_update_table_column_statistics_args() throw() { } -uint32_t ThriftHiveMetastore_get_table_column_statistics_args::read(::apache::thrift::protocol::TProtocol* iprot) { +uint32_t ThriftHiveMetastore_update_table_column_statistics_args::read(::apache::thrift::protocol::TProtocol* iprot) { apache::thrift::protocol::TInputRecursionTracker tracker(*iprot); uint32_t xfer = 0; @@ -21881,25 +21929,9 @@ uint32_t ThriftHiveMetastore_get_table_column_statistics_args::read(::apache::th switch (fid) { case 1: - if (ftype == ::apache::thrift::protocol::T_STRING) { - xfer += iprot->readString(this->db_name); - this->__isset.db_name = true; - } else { - xfer += iprot->skip(ftype); - } - break; - case 2: - if (ftype == ::apache::thrift::protocol::T_STRING) { - xfer += iprot->readString(this->tbl_name); - this->__isset.tbl_name = true; - } else { - xfer += iprot->skip(ftype); - } - break; - case 3: - if (ftype == ::apache::thrift::protocol::T_STRING) { - xfer += iprot->readString(this->col_name); - this->__isset.col_name = true; + if (ftype == ::apache::thrift::protocol::T_STRUCT) { + xfer += this->stats_obj.read(iprot); + this->__isset.stats_obj = true; } else { xfer += iprot->skip(ftype); } @@ -21916,21 +21948,13 @@ uint32_t ThriftHiveMetastore_get_table_column_statistics_args::read(::apache::th return xfer; } -uint32_t ThriftHiveMetastore_get_table_column_statistics_args::write(::apache::thrift::protocol::TProtocol* oprot) const { +uint32_t ThriftHiveMetastore_update_table_column_statistics_args::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; apache::thrift::protocol::TOutputRecursionTracker tracker(*oprot); - xfer += oprot->writeStructBegin("ThriftHiveMetastore_get_table_column_statistics_args"); - - xfer += oprot->writeFieldBegin("db_name", ::apache::thrift::protocol::T_STRING, 1); - xfer += oprot->writeString(this->db_name); - xfer += oprot->writeFieldEnd(); - - xfer += oprot->writeFieldBegin("tbl_name", ::apache::thrift::protocol::T_STRING, 2); - xfer += oprot->writeString(this->tbl_name); - xfer += oprot->writeFieldEnd(); + xfer += oprot->writeStructBegin("ThriftHiveMetastore_update_table_column_statistics_args"); - xfer += oprot->writeFieldBegin("col_name", ::apache::thrift::protocol::T_STRING, 3); - xfer += oprot->writeString(this->col_name); + xfer += oprot->writeFieldBegin("stats_obj", ::apache::thrift::protocol::T_STRUCT, 1); + xfer += this->stats_obj.write(oprot); xfer += oprot->writeFieldEnd(); xfer += oprot->writeFieldStop(); @@ -21939,25 +21963,17 @@ uint32_t ThriftHiveMetastore_get_table_column_statistics_args::write(::apache::t } -ThriftHiveMetastore_get_table_column_statistics_pargs::~ThriftHiveMetastore_get_table_column_statistics_pargs() throw() { +ThriftHiveMetastore_update_table_column_statistics_pargs::~ThriftHiveMetastore_update_table_column_statistics_pargs() throw() { } -uint32_t ThriftHiveMetastore_get_table_column_statistics_pargs::write(::apache::thrift::protocol::TProtocol* oprot) const { +uint32_t ThriftHiveMetastore_update_table_column_statistics_pargs::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; apache::thrift::protocol::TOutputRecursionTracker tracker(*oprot); - xfer += oprot->writeStructBegin("ThriftHiveMetastore_get_table_column_statistics_pargs"); - - xfer += oprot->writeFieldBegin("db_name", ::apache::thrift::protocol::T_STRING, 1); - xfer += oprot->writeString((*(this->db_name))); - xfer += oprot->writeFieldEnd(); - - xfer += oprot->writeFieldBegin("tbl_name", ::apache::thrift::protocol::T_STRING, 2); - xfer += oprot->writeString((*(this->tbl_name))); - xfer += oprot->writeFieldEnd(); + xfer += oprot->writeStructBegin("ThriftHiveMetastore_update_table_column_statistics_pargs"); - xfer += oprot->writeFieldBegin("col_name", ::apache::thrift::protocol::T_STRING, 3); - xfer += oprot->writeString((*(this->col_name))); + xfer += oprot->writeFieldBegin("stats_obj", ::apache::thrift::protocol::T_STRUCT, 1); + xfer += (*(this->stats_obj)).write(oprot); xfer += oprot->writeFieldEnd(); xfer += oprot->writeFieldStop(); @@ -21966,11 +21982,11 @@ uint32_t ThriftHiveMetastore_get_table_column_statistics_pargs::write(::apache:: } -ThriftHiveMetastore_get_table_column_statistics_result::~ThriftHiveMetastore_get_table_column_statistics_result() throw() { +ThriftHiveMetastore_update_table_column_statistics_result::~ThriftHiveMetastore_update_table_column_statistics_result() throw() { } -uint32_t ThriftHiveMetastore_get_table_column_statistics_result::read(::apache::thrift::protocol::TProtocol* iprot) { +uint32_t ThriftHiveMetastore_update_table_column_statistics_result::read(::apache::thrift::protocol::TProtocol* iprot) { apache::thrift::protocol::TInputRecursionTracker tracker(*iprot); uint32_t xfer = 0; @@ -21992,8 +22008,8 @@ uint32_t ThriftHiveMetastore_get_table_column_statistics_result::read(::apache:: switch (fid) { case 0: - if (ftype == ::apache::thrift::protocol::T_STRUCT) { - xfer += this->success.read(iprot); + if (ftype == ::apache::thrift::protocol::T_BOOL) { + xfer += iprot->readBool(this->success); this->__isset.success = true; } else { xfer += iprot->skip(ftype); @@ -22043,15 +22059,15 @@ uint32_t ThriftHiveMetastore_get_table_column_statistics_result::read(::apache:: return xfer; } -uint32_t ThriftHiveMetastore_get_table_column_statistics_result::write(::apache::thrift::protocol::TProtocol* oprot) const { +uint32_t ThriftHiveMetastore_update_table_column_statistics_result::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; - xfer += oprot->writeStructBegin("ThriftHiveMetastore_get_table_column_statistics_result"); + xfer += oprot->writeStructBegin("ThriftHiveMetastore_update_table_column_statistics_result"); if (this->__isset.success) { - xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_STRUCT, 0); - xfer += this->success.write(oprot); + xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_BOOL, 0); + xfer += oprot->writeBool(this->success); xfer += oprot->writeFieldEnd(); } else if (this->__isset.o1) { xfer += oprot->writeFieldBegin("o1", ::apache::thrift::protocol::T_STRUCT, 1); @@ -22076,11 +22092,11 @@ uint32_t ThriftHiveMetastore_get_table_column_statistics_result::write(::apache: } -ThriftHiveMetastore_get_table_column_statistics_presult::~ThriftHiveMetastore_get_table_column_statistics_presult() throw() { +ThriftHiveMetastore_update_table_column_statistics_presult::~ThriftHiveMetastore_update_table_column_statistics_presult() throw() { } -uint32_t ThriftHiveMetastore_get_table_column_statistics_presult::read(::apache::thrift::protocol::TProtocol* iprot) { +uint32_t ThriftHiveMetastore_update_table_column_statistics_presult::read(::apache::thrift::protocol::TProtocol* iprot) { apache::thrift::protocol::TInputRecursionTracker tracker(*iprot); uint32_t xfer = 0; @@ -22102,8 +22118,8 @@ uint32_t ThriftHiveMetastore_get_table_column_statistics_presult::read(::apache: switch (fid) { case 0: - if (ftype == ::apache::thrift::protocol::T_STRUCT) { - xfer += (*(this->success)).read(iprot); + if (ftype == ::apache::thrift::protocol::T_BOOL) { + xfer += iprot->readBool((*(this->success))); this->__isset.success = true; } else { xfer += iprot->skip(ftype); @@ -22154,11 +22170,11 @@ uint32_t ThriftHiveMetastore_get_table_column_statistics_presult::read(::apache: } -ThriftHiveMetastore_get_partition_column_statistics_args::~ThriftHiveMetastore_get_partition_column_statistics_args() throw() { +ThriftHiveMetastore_update_partition_column_statistics_args::~ThriftHiveMetastore_update_partition_column_statistics_args() throw() { } -uint32_t ThriftHiveMetastore_get_partition_column_statistics_args::read(::apache::thrift::protocol::TProtocol* iprot) { +uint32_t ThriftHiveMetastore_update_partition_column_statistics_args::read(::apache::thrift::protocol::TProtocol* iprot) { apache::thrift::protocol::TInputRecursionTracker tracker(*iprot); uint32_t xfer = 0; @@ -22180,33 +22196,9 @@ uint32_t ThriftHiveMetastore_get_partition_column_statistics_args::read(::apache switch (fid) { case 1: - if (ftype == ::apache::thrift::protocol::T_STRING) { - xfer += iprot->readString(this->db_name); - this->__isset.db_name = true; - } else { - xfer += iprot->skip(ftype); - } - break; - case 2: - if (ftype == ::apache::thrift::protocol::T_STRING) { - xfer += iprot->readString(this->tbl_name); - this->__isset.tbl_name = true; - } else { - xfer += iprot->skip(ftype); - } - break; - case 3: - if (ftype == ::apache::thrift::protocol::T_STRING) { - xfer += iprot->readString(this->part_name); - this->__isset.part_name = true; - } else { - xfer += iprot->skip(ftype); - } - break; - case 4: - if (ftype == ::apache::thrift::protocol::T_STRING) { - xfer += iprot->readString(this->col_name); - this->__isset.col_name = true; + if (ftype == ::apache::thrift::protocol::T_STRUCT) { + xfer += this->stats_obj.read(iprot); + this->__isset.stats_obj = true; } else { xfer += iprot->skip(ftype); } @@ -22223,25 +22215,13 @@ uint32_t ThriftHiveMetastore_get_partition_column_statistics_args::read(::apache return xfer; } -uint32_t ThriftHiveMetastore_get_partition_column_statistics_args::write(::apache::thrift::protocol::TProtocol* oprot) const { +uint32_t ThriftHiveMetastore_update_partition_column_statistics_args::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; apache::thrift::protocol::TOutputRecursionTracker tracker(*oprot); - xfer += oprot->writeStructBegin("ThriftHiveMetastore_get_partition_column_statistics_args"); - - xfer += oprot->writeFieldBegin("db_name", ::apache::thrift::protocol::T_STRING, 1); - xfer += oprot->writeString(this->db_name); - xfer += oprot->writeFieldEnd(); - - xfer += oprot->writeFieldBegin("tbl_name", ::apache::thrift::protocol::T_STRING, 2); - xfer += oprot->writeString(this->tbl_name); - xfer += oprot->writeFieldEnd(); - - xfer += oprot->writeFieldBegin("part_name", ::apache::thrift::protocol::T_STRING, 3); - xfer += oprot->writeString(this->part_name); - xfer += oprot->writeFieldEnd(); + xfer += oprot->writeStructBegin("ThriftHiveMetastore_update_partition_column_statistics_args"); - xfer += oprot->writeFieldBegin("col_name", ::apache::thrift::protocol::T_STRING, 4); - xfer += oprot->writeString(this->col_name); + xfer += oprot->writeFieldBegin("stats_obj", ::apache::thrift::protocol::T_STRUCT, 1); + xfer += this->stats_obj.write(oprot); xfer += oprot->writeFieldEnd(); xfer += oprot->writeFieldStop(); @@ -22250,29 +22230,17 @@ uint32_t ThriftHiveMetastore_get_partition_column_statistics_args::write(::apach } -ThriftHiveMetastore_get_partition_column_statistics_pargs::~ThriftHiveMetastore_get_partition_column_statistics_pargs() throw() { +ThriftHiveMetastore_update_partition_column_statistics_pargs::~ThriftHiveMetastore_update_partition_column_statistics_pargs() throw() { } -uint32_t ThriftHiveMetastore_get_partition_column_statistics_pargs::write(::apache::thrift::protocol::TProtocol* oprot) const { +uint32_t ThriftHiveMetastore_update_partition_column_statistics_pargs::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; apache::thrift::protocol::TOutputRecursionTracker tracker(*oprot); - xfer += oprot->writeStructBegin("ThriftHiveMetastore_get_partition_column_statistics_pargs"); - - xfer += oprot->writeFieldBegin("db_name", ::apache::thrift::protocol::T_STRING, 1); - xfer += oprot->writeString((*(this->db_name))); - xfer += oprot->writeFieldEnd(); - - xfer += oprot->writeFieldBegin("tbl_name", ::apache::thrift::protocol::T_STRING, 2); - xfer += oprot->writeString((*(this->tbl_name))); - xfer += oprot->writeFieldEnd(); - - xfer += oprot->writeFieldBegin("part_name", ::apache::thrift::protocol::T_STRING, 3); - xfer += oprot->writeString((*(this->part_name))); - xfer += oprot->writeFieldEnd(); + xfer += oprot->writeStructBegin("ThriftHiveMetastore_update_partition_column_statistics_pargs"); - xfer += oprot->writeFieldBegin("col_name", ::apache::thrift::protocol::T_STRING, 4); - xfer += oprot->writeString((*(this->col_name))); + xfer += oprot->writeFieldBegin("stats_obj", ::apache::thrift::protocol::T_STRUCT, 1); + xfer += (*(this->stats_obj)).write(oprot); xfer += oprot->writeFieldEnd(); xfer += oprot->writeFieldStop(); @@ -22281,11 +22249,11 @@ uint32_t ThriftHiveMetastore_get_partition_column_statistics_pargs::write(::apac } -ThriftHiveMetastore_get_partition_column_statistics_result::~ThriftHiveMetastore_get_partition_column_statistics_result() throw() { +ThriftHiveMetastore_update_partition_column_statistics_result::~ThriftHiveMetastore_update_partition_column_statistics_result() throw() { } -uint32_t ThriftHiveMetastore_get_partition_column_statistics_result::read(::apache::thrift::protocol::TProtocol* iprot) { +uint32_t ThriftHiveMetastore_update_partition_column_statistics_result::read(::apache::thrift::protocol::TProtocol* iprot) { apache::thrift::protocol::TInputRecursionTracker tracker(*iprot); uint32_t xfer = 0; @@ -22307,8 +22275,8 @@ uint32_t ThriftHiveMetastore_get_partition_column_statistics_result::read(::apac switch (fid) { case 0: - if (ftype == ::apache::thrift::protocol::T_STRUCT) { - xfer += this->success.read(iprot); + if (ftype == ::apache::thrift::protocol::T_BOOL) { + xfer += iprot->readBool(this->success); this->__isset.success = true; } else { xfer += iprot->skip(ftype); @@ -22358,15 +22326,15 @@ uint32_t ThriftHiveMetastore_get_partition_column_statistics_result::read(::apac return xfer; } -uint32_t ThriftHiveMetastore_get_partition_column_statistics_result::write(::apache::thrift::protocol::TProtocol* oprot) const { +uint32_t ThriftHiveMetastore_update_partition_column_statistics_result::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; - xfer += oprot->writeStructBegin("ThriftHiveMetastore_get_partition_column_statistics_result"); + xfer += oprot->writeStructBegin("ThriftHiveMetastore_update_partition_column_statistics_result"); if (this->__isset.success) { - xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_STRUCT, 0); - xfer += this->success.write(oprot); + xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_BOOL, 0); + xfer += oprot->writeBool(this->success); xfer += oprot->writeFieldEnd(); } else if (this->__isset.o1) { xfer += oprot->writeFieldBegin("o1", ::apache::thrift::protocol::T_STRUCT, 1); @@ -22391,11 +22359,11 @@ uint32_t ThriftHiveMetastore_get_partition_column_statistics_result::write(::apa } -ThriftHiveMetastore_get_partition_column_statistics_presult::~ThriftHiveMetastore_get_partition_column_statistics_presult() throw() { +ThriftHiveMetastore_update_partition_column_statistics_presult::~ThriftHiveMetastore_update_partition_column_statistics_presult() throw() { } -uint32_t ThriftHiveMetastore_get_partition_column_statistics_presult::read(::apache::thrift::protocol::TProtocol* iprot) { +uint32_t ThriftHiveMetastore_update_partition_column_statistics_presult::read(::apache::thrift::protocol::TProtocol* iprot) { apache::thrift::protocol::TInputRecursionTracker tracker(*iprot); uint32_t xfer = 0; @@ -22417,8 +22385,8 @@ uint32_t ThriftHiveMetastore_get_partition_column_statistics_presult::read(::apa switch (fid) { case 0: - if (ftype == ::apache::thrift::protocol::T_STRUCT) { - xfer += (*(this->success)).read(iprot); + if (ftype == ::apache::thrift::protocol::T_BOOL) { + xfer += iprot->readBool((*(this->success))); this->__isset.success = true; } else { xfer += iprot->skip(ftype); @@ -22469,11 +22437,11 @@ uint32_t ThriftHiveMetastore_get_partition_column_statistics_presult::read(::apa } -ThriftHiveMetastore_get_table_statistics_req_args::~ThriftHiveMetastore_get_table_statistics_req_args() throw() { +ThriftHiveMetastore_get_table_column_statistics_args::~ThriftHiveMetastore_get_table_column_statistics_args() throw() { } -uint32_t ThriftHiveMetastore_get_table_statistics_req_args::read(::apache::thrift::protocol::TProtocol* iprot) { +uint32_t ThriftHiveMetastore_get_table_column_statistics_args::read(::apache::thrift::protocol::TProtocol* iprot) { apache::thrift::protocol::TInputRecursionTracker tracker(*iprot); uint32_t xfer = 0; @@ -22495,9 +22463,25 @@ uint32_t ThriftHiveMetastore_get_table_statistics_req_args::read(::apache::thrif switch (fid) { case 1: - if (ftype == ::apache::thrift::protocol::T_STRUCT) { - xfer += this->request.read(iprot); - this->__isset.request = true; + if (ftype == ::apache::thrift::protocol::T_STRING) { + xfer += iprot->readString(this->db_name); + this->__isset.db_name = true; + } else { + xfer += iprot->skip(ftype); + } + break; + case 2: + if (ftype == ::apache::thrift::protocol::T_STRING) { + xfer += iprot->readString(this->tbl_name); + this->__isset.tbl_name = true; + } else { + xfer += iprot->skip(ftype); + } + break; + case 3: + if (ftype == ::apache::thrift::protocol::T_STRING) { + xfer += iprot->readString(this->col_name); + this->__isset.col_name = true; } else { xfer += iprot->skip(ftype); } @@ -22514,13 +22498,21 @@ uint32_t ThriftHiveMetastore_get_table_statistics_req_args::read(::apache::thrif return xfer; } -uint32_t ThriftHiveMetastore_get_table_statistics_req_args::write(::apache::thrift::protocol::TProtocol* oprot) const { +uint32_t ThriftHiveMetastore_get_table_column_statistics_args::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; apache::thrift::protocol::TOutputRecursionTracker tracker(*oprot); - xfer += oprot->writeStructBegin("ThriftHiveMetastore_get_table_statistics_req_args"); + xfer += oprot->writeStructBegin("ThriftHiveMetastore_get_table_column_statistics_args"); - xfer += oprot->writeFieldBegin("request", ::apache::thrift::protocol::T_STRUCT, 1); - xfer += this->request.write(oprot); + xfer += oprot->writeFieldBegin("db_name", ::apache::thrift::protocol::T_STRING, 1); + xfer += oprot->writeString(this->db_name); + xfer += oprot->writeFieldEnd(); + + xfer += oprot->writeFieldBegin("tbl_name", ::apache::thrift::protocol::T_STRING, 2); + xfer += oprot->writeString(this->tbl_name); + xfer += oprot->writeFieldEnd(); + + xfer += oprot->writeFieldBegin("col_name", ::apache::thrift::protocol::T_STRING, 3); + xfer += oprot->writeString(this->col_name); xfer += oprot->writeFieldEnd(); xfer += oprot->writeFieldStop(); @@ -22529,17 +22521,25 @@ uint32_t ThriftHiveMetastore_get_table_statistics_req_args::write(::apache::thri } -ThriftHiveMetastore_get_table_statistics_req_pargs::~ThriftHiveMetastore_get_table_statistics_req_pargs() throw() { +ThriftHiveMetastore_get_table_column_statistics_pargs::~ThriftHiveMetastore_get_table_column_statistics_pargs() throw() { } -uint32_t ThriftHiveMetastore_get_table_statistics_req_pargs::write(::apache::thrift::protocol::TProtocol* oprot) const { +uint32_t ThriftHiveMetastore_get_table_column_statistics_pargs::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; apache::thrift::protocol::TOutputRecursionTracker tracker(*oprot); - xfer += oprot->writeStructBegin("ThriftHiveMetastore_get_table_statistics_req_pargs"); + xfer += oprot->writeStructBegin("ThriftHiveMetastore_get_table_column_statistics_pargs"); - xfer += oprot->writeFieldBegin("request", ::apache::thrift::protocol::T_STRUCT, 1); - xfer += (*(this->request)).write(oprot); + xfer += oprot->writeFieldBegin("db_name", ::apache::thrift::protocol::T_STRING, 1); + xfer += oprot->writeString((*(this->db_name))); + xfer += oprot->writeFieldEnd(); + + xfer += oprot->writeFieldBegin("tbl_name", ::apache::thrift::protocol::T_STRING, 2); + xfer += oprot->writeString((*(this->tbl_name))); + xfer += oprot->writeFieldEnd(); + + xfer += oprot->writeFieldBegin("col_name", ::apache::thrift::protocol::T_STRING, 3); + xfer += oprot->writeString((*(this->col_name))); xfer += oprot->writeFieldEnd(); xfer += oprot->writeFieldStop(); @@ -22548,11 +22548,11 @@ uint32_t ThriftHiveMetastore_get_table_statistics_req_pargs::write(::apache::thr } -ThriftHiveMetastore_get_table_statistics_req_result::~ThriftHiveMetastore_get_table_statistics_req_result() throw() { +ThriftHiveMetastore_get_table_column_statistics_result::~ThriftHiveMetastore_get_table_column_statistics_result() throw() { } -uint32_t ThriftHiveMetastore_get_table_statistics_req_result::read(::apache::thrift::protocol::TProtocol* iprot) { +uint32_t ThriftHiveMetastore_get_table_column_statistics_result::read(::apache::thrift::protocol::TProtocol* iprot) { apache::thrift::protocol::TInputRecursionTracker tracker(*iprot); uint32_t xfer = 0; @@ -22597,6 +22597,22 @@ uint32_t ThriftHiveMetastore_get_table_statistics_req_result::read(::apache::thr xfer += iprot->skip(ftype); } break; + case 3: + if (ftype == ::apache::thrift::protocol::T_STRUCT) { + xfer += this->o3.read(iprot); + this->__isset.o3 = true; + } else { + xfer += iprot->skip(ftype); + } + break; + case 4: + if (ftype == ::apache::thrift::protocol::T_STRUCT) { + xfer += this->o4.read(iprot); + this->__isset.o4 = true; + } else { + xfer += iprot->skip(ftype); + } + break; default: xfer += iprot->skip(ftype); break; @@ -22609,11 +22625,11 @@ uint32_t ThriftHiveMetastore_get_table_statistics_req_result::read(::apache::thr return xfer; } -uint32_t ThriftHiveMetastore_get_table_statistics_req_result::write(::apache::thrift::protocol::TProtocol* oprot) const { +uint32_t ThriftHiveMetastore_get_table_column_statistics_result::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; - xfer += oprot->writeStructBegin("ThriftHiveMetastore_get_table_statistics_req_result"); + xfer += oprot->writeStructBegin("ThriftHiveMetastore_get_table_column_statistics_result"); if (this->__isset.success) { xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_STRUCT, 0); @@ -22627,6 +22643,14 @@ uint32_t ThriftHiveMetastore_get_table_statistics_req_result::write(::apache::th xfer += oprot->writeFieldBegin("o2", ::apache::thrift::protocol::T_STRUCT, 2); xfer += this->o2.write(oprot); xfer += oprot->writeFieldEnd(); + } else if (this->__isset.o3) { + xfer += oprot->writeFieldBegin("o3", ::apache::thrift::protocol::T_STRUCT, 3); + xfer += this->o3.write(oprot); + xfer += oprot->writeFieldEnd(); + } else if (this->__isset.o4) { + xfer += oprot->writeFieldBegin("o4", ::apache::thrift::protocol::T_STRUCT, 4); + xfer += this->o4.write(oprot); + xfer += oprot->writeFieldEnd(); } xfer += oprot->writeFieldStop(); xfer += oprot->writeStructEnd(); @@ -22634,11 +22658,11 @@ uint32_t ThriftHiveMetastore_get_table_statistics_req_result::write(::apache::th } -ThriftHiveMetastore_get_table_statistics_req_presult::~ThriftHiveMetastore_get_table_statistics_req_presult() throw() { +ThriftHiveMetastore_get_table_column_statistics_presult::~ThriftHiveMetastore_get_table_column_statistics_presult() throw() { } -uint32_t ThriftHiveMetastore_get_table_statistics_req_presult::read(::apache::thrift::protocol::TProtocol* iprot) { +uint32_t ThriftHiveMetastore_get_table_column_statistics_presult::read(::apache::thrift::protocol::TProtocol* iprot) { apache::thrift::protocol::TInputRecursionTracker tracker(*iprot); uint32_t xfer = 0; @@ -22683,6 +22707,22 @@ uint32_t ThriftHiveMetastore_get_table_statistics_req_presult::read(::apache::th xfer += iprot->skip(ftype); } break; + case 3: + if (ftype == ::apache::thrift::protocol::T_STRUCT) { + xfer += this->o3.read(iprot); + this->__isset.o3 = true; + } else { + xfer += iprot->skip(ftype); + } + break; + case 4: + if (ftype == ::apache::thrift::protocol::T_STRUCT) { + xfer += this->o4.read(iprot); + this->__isset.o4 = true; + } else { + xfer += iprot->skip(ftype); + } + break; default: xfer += iprot->skip(ftype); break; @@ -22696,11 +22736,11 @@ uint32_t ThriftHiveMetastore_get_table_statistics_req_presult::read(::apache::th } -ThriftHiveMetastore_get_partitions_statistics_req_args::~ThriftHiveMetastore_get_partitions_statistics_req_args() throw() { +ThriftHiveMetastore_get_partition_column_statistics_args::~ThriftHiveMetastore_get_partition_column_statistics_args() throw() { } -uint32_t ThriftHiveMetastore_get_partitions_statistics_req_args::read(::apache::thrift::protocol::TProtocol* iprot) { +uint32_t ThriftHiveMetastore_get_partition_column_statistics_args::read(::apache::thrift::protocol::TProtocol* iprot) { apache::thrift::protocol::TInputRecursionTracker tracker(*iprot); uint32_t xfer = 0; @@ -22722,9 +22762,33 @@ uint32_t ThriftHiveMetastore_get_partitions_statistics_req_args::read(::apache:: switch (fid) { case 1: - if (ftype == ::apache::thrift::protocol::T_STRUCT) { - xfer += this->request.read(iprot); - this->__isset.request = true; + if (ftype == ::apache::thrift::protocol::T_STRING) { + xfer += iprot->readString(this->db_name); + this->__isset.db_name = true; + } else { + xfer += iprot->skip(ftype); + } + break; + case 2: + if (ftype == ::apache::thrift::protocol::T_STRING) { + xfer += iprot->readString(this->tbl_name); + this->__isset.tbl_name = true; + } else { + xfer += iprot->skip(ftype); + } + break; + case 3: + if (ftype == ::apache::thrift::protocol::T_STRING) { + xfer += iprot->readString(this->part_name); + this->__isset.part_name = true; + } else { + xfer += iprot->skip(ftype); + } + break; + case 4: + if (ftype == ::apache::thrift::protocol::T_STRING) { + xfer += iprot->readString(this->col_name); + this->__isset.col_name = true; } else { xfer += iprot->skip(ftype); } @@ -22741,13 +22805,25 @@ uint32_t ThriftHiveMetastore_get_partitions_statistics_req_args::read(::apache:: return xfer; } -uint32_t ThriftHiveMetastore_get_partitions_statistics_req_args::write(::apache::thrift::protocol::TProtocol* oprot) const { +uint32_t ThriftHiveMetastore_get_partition_column_statistics_args::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; apache::thrift::protocol::TOutputRecursionTracker tracker(*oprot); - xfer += oprot->writeStructBegin("ThriftHiveMetastore_get_partitions_statistics_req_args"); + xfer += oprot->writeStructBegin("ThriftHiveMetastore_get_partition_column_statistics_args"); - xfer += oprot->writeFieldBegin("request", ::apache::thrift::protocol::T_STRUCT, 1); - xfer += this->request.write(oprot); + xfer += oprot->writeFieldBegin("db_name", ::apache::thrift::protocol::T_STRING, 1); + xfer += oprot->writeString(this->db_name); + xfer += oprot->writeFieldEnd(); + + xfer += oprot->writeFieldBegin("tbl_name", ::apache::thrift::protocol::T_STRING, 2); + xfer += oprot->writeString(this->tbl_name); + xfer += oprot->writeFieldEnd(); + + xfer += oprot->writeFieldBegin("part_name", ::apache::thrift::protocol::T_STRING, 3); + xfer += oprot->writeString(this->part_name); + xfer += oprot->writeFieldEnd(); + + xfer += oprot->writeFieldBegin("col_name", ::apache::thrift::protocol::T_STRING, 4); + xfer += oprot->writeString(this->col_name); xfer += oprot->writeFieldEnd(); xfer += oprot->writeFieldStop(); @@ -22756,17 +22832,29 @@ uint32_t ThriftHiveMetastore_get_partitions_statistics_req_args::write(::apache: } -ThriftHiveMetastore_get_partitions_statistics_req_pargs::~ThriftHiveMetastore_get_partitions_statistics_req_pargs() throw() { +ThriftHiveMetastore_get_partition_column_statistics_pargs::~ThriftHiveMetastore_get_partition_column_statistics_pargs() throw() { } -uint32_t ThriftHiveMetastore_get_partitions_statistics_req_pargs::write(::apache::thrift::protocol::TProtocol* oprot) const { +uint32_t ThriftHiveMetastore_get_partition_column_statistics_pargs::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; apache::thrift::protocol::TOutputRecursionTracker tracker(*oprot); - xfer += oprot->writeStructBegin("ThriftHiveMetastore_get_partitions_statistics_req_pargs"); + xfer += oprot->writeStructBegin("ThriftHiveMetastore_get_partition_column_statistics_pargs"); - xfer += oprot->writeFieldBegin("request", ::apache::thrift::protocol::T_STRUCT, 1); - xfer += (*(this->request)).write(oprot); + xfer += oprot->writeFieldBegin("db_name", ::apache::thrift::protocol::T_STRING, 1); + xfer += oprot->writeString((*(this->db_name))); + xfer += oprot->writeFieldEnd(); + + xfer += oprot->writeFieldBegin("tbl_name", ::apache::thrift::protocol::T_STRING, 2); + xfer += oprot->writeString((*(this->tbl_name))); + xfer += oprot->writeFieldEnd(); + + xfer += oprot->writeFieldBegin("part_name", ::apache::thrift::protocol::T_STRING, 3); + xfer += oprot->writeString((*(this->part_name))); + xfer += oprot->writeFieldEnd(); + + xfer += oprot->writeFieldBegin("col_name", ::apache::thrift::protocol::T_STRING, 4); + xfer += oprot->writeString((*(this->col_name))); xfer += oprot->writeFieldEnd(); xfer += oprot->writeFieldStop(); @@ -22775,11 +22863,11 @@ uint32_t ThriftHiveMetastore_get_partitions_statistics_req_pargs::write(::apache } -ThriftHiveMetastore_get_partitions_statistics_req_result::~ThriftHiveMetastore_get_partitions_statistics_req_result() throw() { +ThriftHiveMetastore_get_partition_column_statistics_result::~ThriftHiveMetastore_get_partition_column_statistics_result() throw() { } -uint32_t ThriftHiveMetastore_get_partitions_statistics_req_result::read(::apache::thrift::protocol::TProtocol* iprot) { +uint32_t ThriftHiveMetastore_get_partition_column_statistics_result::read(::apache::thrift::protocol::TProtocol* iprot) { apache::thrift::protocol::TInputRecursionTracker tracker(*iprot); uint32_t xfer = 0; @@ -22824,6 +22912,22 @@ uint32_t ThriftHiveMetastore_get_partitions_statistics_req_result::read(::apache xfer += iprot->skip(ftype); } break; + case 3: + if (ftype == ::apache::thrift::protocol::T_STRUCT) { + xfer += this->o3.read(iprot); + this->__isset.o3 = true; + } else { + xfer += iprot->skip(ftype); + } + break; + case 4: + if (ftype == ::apache::thrift::protocol::T_STRUCT) { + xfer += this->o4.read(iprot); + this->__isset.o4 = true; + } else { + xfer += iprot->skip(ftype); + } + break; default: xfer += iprot->skip(ftype); break; @@ -22836,11 +22940,11 @@ uint32_t ThriftHiveMetastore_get_partitions_statistics_req_result::read(::apache return xfer; } -uint32_t ThriftHiveMetastore_get_partitions_statistics_req_result::write(::apache::thrift::protocol::TProtocol* oprot) const { +uint32_t ThriftHiveMetastore_get_partition_column_statistics_result::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; - xfer += oprot->writeStructBegin("ThriftHiveMetastore_get_partitions_statistics_req_result"); + xfer += oprot->writeStructBegin("ThriftHiveMetastore_get_partition_column_statistics_result"); if (this->__isset.success) { xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_STRUCT, 0); @@ -22854,6 +22958,14 @@ uint32_t ThriftHiveMetastore_get_partitions_statistics_req_result::write(::apach xfer += oprot->writeFieldBegin("o2", ::apache::thrift::protocol::T_STRUCT, 2); xfer += this->o2.write(oprot); xfer += oprot->writeFieldEnd(); + } else if (this->__isset.o3) { + xfer += oprot->writeFieldBegin("o3", ::apache::thrift::protocol::T_STRUCT, 3); + xfer += this->o3.write(oprot); + xfer += oprot->writeFieldEnd(); + } else if (this->__isset.o4) { + xfer += oprot->writeFieldBegin("o4", ::apache::thrift::protocol::T_STRUCT, 4); + xfer += this->o4.write(oprot); + xfer += oprot->writeFieldEnd(); } xfer += oprot->writeFieldStop(); xfer += oprot->writeStructEnd(); @@ -22861,11 +22973,11 @@ uint32_t ThriftHiveMetastore_get_partitions_statistics_req_result::write(::apach } -ThriftHiveMetastore_get_partitions_statistics_req_presult::~ThriftHiveMetastore_get_partitions_statistics_req_presult() throw() { +ThriftHiveMetastore_get_partition_column_statistics_presult::~ThriftHiveMetastore_get_partition_column_statistics_presult() throw() { } -uint32_t ThriftHiveMetastore_get_partitions_statistics_req_presult::read(::apache::thrift::protocol::TProtocol* iprot) { +uint32_t ThriftHiveMetastore_get_partition_column_statistics_presult::read(::apache::thrift::protocol::TProtocol* iprot) { apache::thrift::protocol::TInputRecursionTracker tracker(*iprot); uint32_t xfer = 0; @@ -22910,6 +23022,22 @@ uint32_t ThriftHiveMetastore_get_partitions_statistics_req_presult::read(::apach xfer += iprot->skip(ftype); } break; + case 3: + if (ftype == ::apache::thrift::protocol::T_STRUCT) { + xfer += this->o3.read(iprot); + this->__isset.o3 = true; + } else { + xfer += iprot->skip(ftype); + } + break; + case 4: + if (ftype == ::apache::thrift::protocol::T_STRUCT) { + xfer += this->o4.read(iprot); + this->__isset.o4 = true; + } else { + xfer += iprot->skip(ftype); + } + break; default: xfer += iprot->skip(ftype); break; @@ -22923,11 +23051,11 @@ uint32_t ThriftHiveMetastore_get_partitions_statistics_req_presult::read(::apach } -ThriftHiveMetastore_get_aggr_stats_for_args::~ThriftHiveMetastore_get_aggr_stats_for_args() throw() { +ThriftHiveMetastore_get_table_statistics_req_args::~ThriftHiveMetastore_get_table_statistics_req_args() throw() { } -uint32_t ThriftHiveMetastore_get_aggr_stats_for_args::read(::apache::thrift::protocol::TProtocol* iprot) { +uint32_t ThriftHiveMetastore_get_table_statistics_req_args::read(::apache::thrift::protocol::TProtocol* iprot) { apache::thrift::protocol::TInputRecursionTracker tracker(*iprot); uint32_t xfer = 0; @@ -22968,10 +23096,10 @@ uint32_t ThriftHiveMetastore_get_aggr_stats_for_args::read(::apache::thrift::pro return xfer; } -uint32_t ThriftHiveMetastore_get_aggr_stats_for_args::write(::apache::thrift::protocol::TProtocol* oprot) const { +uint32_t ThriftHiveMetastore_get_table_statistics_req_args::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; apache::thrift::protocol::TOutputRecursionTracker tracker(*oprot); - xfer += oprot->writeStructBegin("ThriftHiveMetastore_get_aggr_stats_for_args"); + xfer += oprot->writeStructBegin("ThriftHiveMetastore_get_table_statistics_req_args"); xfer += oprot->writeFieldBegin("request", ::apache::thrift::protocol::T_STRUCT, 1); xfer += this->request.write(oprot); @@ -22983,14 +23111,14 @@ uint32_t ThriftHiveMetastore_get_aggr_stats_for_args::write(::apache::thrift::pr } -ThriftHiveMetastore_get_aggr_stats_for_pargs::~ThriftHiveMetastore_get_aggr_stats_for_pargs() throw() { +ThriftHiveMetastore_get_table_statistics_req_pargs::~ThriftHiveMetastore_get_table_statistics_req_pargs() throw() { } -uint32_t ThriftHiveMetastore_get_aggr_stats_for_pargs::write(::apache::thrift::protocol::TProtocol* oprot) const { +uint32_t ThriftHiveMetastore_get_table_statistics_req_pargs::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; apache::thrift::protocol::TOutputRecursionTracker tracker(*oprot); - xfer += oprot->writeStructBegin("ThriftHiveMetastore_get_aggr_stats_for_pargs"); + xfer += oprot->writeStructBegin("ThriftHiveMetastore_get_table_statistics_req_pargs"); xfer += oprot->writeFieldBegin("request", ::apache::thrift::protocol::T_STRUCT, 1); xfer += (*(this->request)).write(oprot); @@ -23002,11 +23130,11 @@ uint32_t ThriftHiveMetastore_get_aggr_stats_for_pargs::write(::apache::thrift::p } -ThriftHiveMetastore_get_aggr_stats_for_result::~ThriftHiveMetastore_get_aggr_stats_for_result() throw() { +ThriftHiveMetastore_get_table_statistics_req_result::~ThriftHiveMetastore_get_table_statistics_req_result() throw() { } -uint32_t ThriftHiveMetastore_get_aggr_stats_for_result::read(::apache::thrift::protocol::TProtocol* iprot) { +uint32_t ThriftHiveMetastore_get_table_statistics_req_result::read(::apache::thrift::protocol::TProtocol* iprot) { apache::thrift::protocol::TInputRecursionTracker tracker(*iprot); uint32_t xfer = 0; @@ -23063,11 +23191,11 @@ uint32_t ThriftHiveMetastore_get_aggr_stats_for_result::read(::apache::thrift::p return xfer; } -uint32_t ThriftHiveMetastore_get_aggr_stats_for_result::write(::apache::thrift::protocol::TProtocol* oprot) const { +uint32_t ThriftHiveMetastore_get_table_statistics_req_result::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; - xfer += oprot->writeStructBegin("ThriftHiveMetastore_get_aggr_stats_for_result"); + xfer += oprot->writeStructBegin("ThriftHiveMetastore_get_table_statistics_req_result"); if (this->__isset.success) { xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_STRUCT, 0); @@ -23088,11 +23216,11 @@ uint32_t ThriftHiveMetastore_get_aggr_stats_for_result::write(::apache::thrift:: } -ThriftHiveMetastore_get_aggr_stats_for_presult::~ThriftHiveMetastore_get_aggr_stats_for_presult() throw() { +ThriftHiveMetastore_get_table_statistics_req_presult::~ThriftHiveMetastore_get_table_statistics_req_presult() throw() { } -uint32_t ThriftHiveMetastore_get_aggr_stats_for_presult::read(::apache::thrift::protocol::TProtocol* iprot) { +uint32_t ThriftHiveMetastore_get_table_statistics_req_presult::read(::apache::thrift::protocol::TProtocol* iprot) { apache::thrift::protocol::TInputRecursionTracker tracker(*iprot); uint32_t xfer = 0; @@ -23150,11 +23278,11 @@ uint32_t ThriftHiveMetastore_get_aggr_stats_for_presult::read(::apache::thrift:: } -ThriftHiveMetastore_set_aggr_stats_for_args::~ThriftHiveMetastore_set_aggr_stats_for_args() throw() { +ThriftHiveMetastore_get_partitions_statistics_req_args::~ThriftHiveMetastore_get_partitions_statistics_req_args() throw() { } -uint32_t ThriftHiveMetastore_set_aggr_stats_for_args::read(::apache::thrift::protocol::TProtocol* iprot) { +uint32_t ThriftHiveMetastore_get_partitions_statistics_req_args::read(::apache::thrift::protocol::TProtocol* iprot) { apache::thrift::protocol::TInputRecursionTracker tracker(*iprot); uint32_t xfer = 0; @@ -23195,10 +23323,10 @@ uint32_t ThriftHiveMetastore_set_aggr_stats_for_args::read(::apache::thrift::pro return xfer; } -uint32_t ThriftHiveMetastore_set_aggr_stats_for_args::write(::apache::thrift::protocol::TProtocol* oprot) const { +uint32_t ThriftHiveMetastore_get_partitions_statistics_req_args::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; apache::thrift::protocol::TOutputRecursionTracker tracker(*oprot); - xfer += oprot->writeStructBegin("ThriftHiveMetastore_set_aggr_stats_for_args"); + xfer += oprot->writeStructBegin("ThriftHiveMetastore_get_partitions_statistics_req_args"); xfer += oprot->writeFieldBegin("request", ::apache::thrift::protocol::T_STRUCT, 1); xfer += this->request.write(oprot); @@ -23210,14 +23338,14 @@ uint32_t ThriftHiveMetastore_set_aggr_stats_for_args::write(::apache::thrift::pr } -ThriftHiveMetastore_set_aggr_stats_for_pargs::~ThriftHiveMetastore_set_aggr_stats_for_pargs() throw() { +ThriftHiveMetastore_get_partitions_statistics_req_pargs::~ThriftHiveMetastore_get_partitions_statistics_req_pargs() throw() { } -uint32_t ThriftHiveMetastore_set_aggr_stats_for_pargs::write(::apache::thrift::protocol::TProtocol* oprot) const { +uint32_t ThriftHiveMetastore_get_partitions_statistics_req_pargs::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; apache::thrift::protocol::TOutputRecursionTracker tracker(*oprot); - xfer += oprot->writeStructBegin("ThriftHiveMetastore_set_aggr_stats_for_pargs"); + xfer += oprot->writeStructBegin("ThriftHiveMetastore_get_partitions_statistics_req_pargs"); xfer += oprot->writeFieldBegin("request", ::apache::thrift::protocol::T_STRUCT, 1); xfer += (*(this->request)).write(oprot); @@ -23229,11 +23357,11 @@ uint32_t ThriftHiveMetastore_set_aggr_stats_for_pargs::write(::apache::thrift::p } -ThriftHiveMetastore_set_aggr_stats_for_result::~ThriftHiveMetastore_set_aggr_stats_for_result() throw() { +ThriftHiveMetastore_get_partitions_statistics_req_result::~ThriftHiveMetastore_get_partitions_statistics_req_result() throw() { } -uint32_t ThriftHiveMetastore_set_aggr_stats_for_result::read(::apache::thrift::protocol::TProtocol* iprot) { +uint32_t ThriftHiveMetastore_get_partitions_statistics_req_result::read(::apache::thrift::protocol::TProtocol* iprot) { apache::thrift::protocol::TInputRecursionTracker tracker(*iprot); uint32_t xfer = 0; @@ -23255,8 +23383,8 @@ uint32_t ThriftHiveMetastore_set_aggr_stats_for_result::read(::apache::thrift::p switch (fid) { case 0: - if (ftype == ::apache::thrift::protocol::T_BOOL) { - xfer += iprot->readBool(this->success); + if (ftype == ::apache::thrift::protocol::T_STRUCT) { + xfer += this->success.read(iprot); this->__isset.success = true; } else { xfer += iprot->skip(ftype); @@ -23278,22 +23406,6 @@ uint32_t ThriftHiveMetastore_set_aggr_stats_for_result::read(::apache::thrift::p xfer += iprot->skip(ftype); } break; - case 3: - if (ftype == ::apache::thrift::protocol::T_STRUCT) { - xfer += this->o3.read(iprot); - this->__isset.o3 = true; - } else { - xfer += iprot->skip(ftype); - } - break; - case 4: - if (ftype == ::apache::thrift::protocol::T_STRUCT) { - xfer += this->o4.read(iprot); - this->__isset.o4 = true; - } else { - xfer += iprot->skip(ftype); - } - break; default: xfer += iprot->skip(ftype); break; @@ -23306,15 +23418,15 @@ uint32_t ThriftHiveMetastore_set_aggr_stats_for_result::read(::apache::thrift::p return xfer; } -uint32_t ThriftHiveMetastore_set_aggr_stats_for_result::write(::apache::thrift::protocol::TProtocol* oprot) const { +uint32_t ThriftHiveMetastore_get_partitions_statistics_req_result::write(::apache::thrift::protocol::TProtocol* oprot) const { uint32_t xfer = 0; - xfer += oprot->writeStructBegin("ThriftHiveMetastore_set_aggr_stats_for_result"); + xfer += oprot->writeStructBegin("ThriftHiveMetastore_get_partitions_statistics_req_result"); if (this->__isset.success) { - xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_BOOL, 0); - xfer += oprot->writeBool(this->success); + xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_STRUCT, 0); + xfer += this->success.write(oprot); xfer += oprot->writeFieldEnd(); } else if (this->__isset.o1) { xfer += oprot->writeFieldBegin("o1", ::apache::thrift::protocol::T_STRUCT, 1); @@ -23324,14 +23436,6 @@ uint32_t ThriftHiveMetastore_set_aggr_stats_for_result::write(::apache::thrift:: xfer += oprot->writeFieldBegin("o2", ::apache::thrift::protocol::T_STRUCT, 2); xfer += this->o2.write(oprot); xfer += oprot->writeFieldEnd(); - } else if (this->__isset.o3) { - xfer += oprot->writeFieldBegin("o3", ::apache::thrift::protocol::T_STRUCT, 3); - xfer += this->o3.write(oprot); - xfer += oprot->writeFieldEnd(); - } else if (this->__isset.o4) { - xfer += oprot->writeFieldBegin("o4", ::apache::thrift::protocol::T_STRUCT, 4); - xfer += this->o4.write(oprot); - xfer += oprot->writeFieldEnd(); } xfer += oprot->writeFieldStop(); xfer += oprot->writeStructEnd(); @@ -23339,11 +23443,489 @@ uint32_t ThriftHiveMetastore_set_aggr_stats_for_result::write(::apache::thrift:: } -ThriftHiveMetastore_set_aggr_stats_for_presult::~ThriftHiveMetastore_set_aggr_stats_for_presult() throw() { +ThriftHiveMetastore_get_partitions_statistics_req_presult::~ThriftHiveMetastore_get_partitions_statistics_req_presult() throw() { } -uint32_t ThriftHiveMetastore_set_aggr_stats_for_presult::read(::apache::thrift::protocol::TProtocol* iprot) { +uint32_t ThriftHiveMetastore_get_partitions_statistics_req_presult::read(::apache::thrift::protocol::TProtocol* iprot) { + + apache::thrift::protocol::TInputRecursionTracker tracker(*iprot); + uint32_t xfer = 0; + std::string fname; + ::apache::thrift::protocol::TType ftype; + int16_t fid; + + xfer += iprot->readStructBegin(fname); + + using ::apache::thrift::protocol::TProtocolException; + + + while (true) + { + xfer += iprot->readFieldBegin(fname, ftype, fid); + if (ftype == ::apache::thrift::protocol::T_STOP) { + break; + } + switch (fid) + { + case 0: + if (ftype == ::apache::thrift::protocol::T_STRUCT) { + xfer += (*(this->success)).read(iprot); + this->__isset.success = true; + } else { + xfer += iprot->skip(ftype); + } + break; + case 1: + if (ftype == ::apache::thrift::protocol::T_STRUCT) { + xfer += this->o1.read(iprot); + this->__isset.o1 = true; + } else { + xfer += iprot->skip(ftype); + } + break; + case 2: + if (ftype == ::apache::thrift::protocol::T_STRUCT) { + xfer += this->o2.read(iprot); + this->__isset.o2 = true; + } else { + xfer += iprot->skip(ftype); + } + break; + default: + xfer += iprot->skip(ftype); + break; + } + xfer += iprot->readFieldEnd(); + } + + xfer += iprot->readStructEnd(); + + return xfer; +} + + +ThriftHiveMetastore_get_aggr_stats_for_args::~ThriftHiveMetastore_get_aggr_stats_for_args() throw() { +} + + +uint32_t ThriftHiveMetastore_get_aggr_stats_for_args::read(::apache::thrift::protocol::TProtocol* iprot) { + + apache::thrift::protocol::TInputRecursionTracker tracker(*iprot); + uint32_t xfer = 0; + std::string fname; + ::apache::thrift::protocol::TType ftype; + int16_t fid; + + xfer += iprot->readStructBegin(fname); + + using ::apache::thrift::protocol::TProtocolException; + + + while (true) + { + xfer += iprot->readFieldBegin(fname, ftype, fid); + if (ftype == ::apache::thrift::protocol::T_STOP) { + break; + } + switch (fid) + { + case 1: + if (ftype == ::apache::thrift::protocol::T_STRUCT) { + xfer += this->request.read(iprot); + this->__isset.request = true; + } else { + xfer += iprot->skip(ftype); + } + break; + default: + xfer += iprot->skip(ftype); + break; + } + xfer += iprot->readFieldEnd(); + } + + xfer += iprot->readStructEnd(); + + return xfer; +} + +uint32_t ThriftHiveMetastore_get_aggr_stats_for_args::write(::apache::thrift::protocol::TProtocol* oprot) const { + uint32_t xfer = 0; + apache::thrift::protocol::TOutputRecursionTracker tracker(*oprot); + xfer += oprot->writeStructBegin("ThriftHiveMetastore_get_aggr_stats_for_args"); + + xfer += oprot->writeFieldBegin("request", ::apache::thrift::protocol::T_STRUCT, 1); + xfer += this->request.write(oprot); + xfer += oprot->writeFieldEnd(); + + xfer += oprot->writeFieldStop(); + xfer += oprot->writeStructEnd(); + return xfer; +} + + +ThriftHiveMetastore_get_aggr_stats_for_pargs::~ThriftHiveMetastore_get_aggr_stats_for_pargs() throw() { +} + + +uint32_t ThriftHiveMetastore_get_aggr_stats_for_pargs::write(::apache::thrift::protocol::TProtocol* oprot) const { + uint32_t xfer = 0; + apache::thrift::protocol::TOutputRecursionTracker tracker(*oprot); + xfer += oprot->writeStructBegin("ThriftHiveMetastore_get_aggr_stats_for_pargs"); + + xfer += oprot->writeFieldBegin("request", ::apache::thrift::protocol::T_STRUCT, 1); + xfer += (*(this->request)).write(oprot); + xfer += oprot->writeFieldEnd(); + + xfer += oprot->writeFieldStop(); + xfer += oprot->writeStructEnd(); + return xfer; +} + + +ThriftHiveMetastore_get_aggr_stats_for_result::~ThriftHiveMetastore_get_aggr_stats_for_result() throw() { +} + + +uint32_t ThriftHiveMetastore_get_aggr_stats_for_result::read(::apache::thrift::protocol::TProtocol* iprot) { + + apache::thrift::protocol::TInputRecursionTracker tracker(*iprot); + uint32_t xfer = 0; + std::string fname; + ::apache::thrift::protocol::TType ftype; + int16_t fid; + + xfer += iprot->readStructBegin(fname); + + using ::apache::thrift::protocol::TProtocolException; + + + while (true) + { + xfer += iprot->readFieldBegin(fname, ftype, fid); + if (ftype == ::apache::thrift::protocol::T_STOP) { + break; + } + switch (fid) + { + case 0: + if (ftype == ::apache::thrift::protocol::T_STRUCT) { + xfer += this->success.read(iprot); + this->__isset.success = true; + } else { + xfer += iprot->skip(ftype); + } + break; + case 1: + if (ftype == ::apache::thrift::protocol::T_STRUCT) { + xfer += this->o1.read(iprot); + this->__isset.o1 = true; + } else { + xfer += iprot->skip(ftype); + } + break; + case 2: + if (ftype == ::apache::thrift::protocol::T_STRUCT) { + xfer += this->o2.read(iprot); + this->__isset.o2 = true; + } else { + xfer += iprot->skip(ftype); + } + break; + default: + xfer += iprot->skip(ftype); + break; + } + xfer += iprot->readFieldEnd(); + } + + xfer += iprot->readStructEnd(); + + return xfer; +} + +uint32_t ThriftHiveMetastore_get_aggr_stats_for_result::write(::apache::thrift::protocol::TProtocol* oprot) const { + + uint32_t xfer = 0; + + xfer += oprot->writeStructBegin("ThriftHiveMetastore_get_aggr_stats_for_result"); + + if (this->__isset.success) { + xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_STRUCT, 0); + xfer += this->success.write(oprot); + xfer += oprot->writeFieldEnd(); + } else if (this->__isset.o1) { + xfer += oprot->writeFieldBegin("o1", ::apache::thrift::protocol::T_STRUCT, 1); + xfer += this->o1.write(oprot); + xfer += oprot->writeFieldEnd(); + } else if (this->__isset.o2) { + xfer += oprot->writeFieldBegin("o2", ::apache::thrift::protocol::T_STRUCT, 2); + xfer += this->o2.write(oprot); + xfer += oprot->writeFieldEnd(); + } + xfer += oprot->writeFieldStop(); + xfer += oprot->writeStructEnd(); + return xfer; +} + + +ThriftHiveMetastore_get_aggr_stats_for_presult::~ThriftHiveMetastore_get_aggr_stats_for_presult() throw() { +} + + +uint32_t ThriftHiveMetastore_get_aggr_stats_for_presult::read(::apache::thrift::protocol::TProtocol* iprot) { + + apache::thrift::protocol::TInputRecursionTracker tracker(*iprot); + uint32_t xfer = 0; + std::string fname; + ::apache::thrift::protocol::TType ftype; + int16_t fid; + + xfer += iprot->readStructBegin(fname); + + using ::apache::thrift::protocol::TProtocolException; + + + while (true) + { + xfer += iprot->readFieldBegin(fname, ftype, fid); + if (ftype == ::apache::thrift::protocol::T_STOP) { + break; + } + switch (fid) + { + case 0: + if (ftype == ::apache::thrift::protocol::T_STRUCT) { + xfer += (*(this->success)).read(iprot); + this->__isset.success = true; + } else { + xfer += iprot->skip(ftype); + } + break; + case 1: + if (ftype == ::apache::thrift::protocol::T_STRUCT) { + xfer += this->o1.read(iprot); + this->__isset.o1 = true; + } else { + xfer += iprot->skip(ftype); + } + break; + case 2: + if (ftype == ::apache::thrift::protocol::T_STRUCT) { + xfer += this->o2.read(iprot); + this->__isset.o2 = true; + } else { + xfer += iprot->skip(ftype); + } + break; + default: + xfer += iprot->skip(ftype); + break; + } + xfer += iprot->readFieldEnd(); + } + + xfer += iprot->readStructEnd(); + + return xfer; +} + + +ThriftHiveMetastore_set_aggr_stats_for_args::~ThriftHiveMetastore_set_aggr_stats_for_args() throw() { +} + + +uint32_t ThriftHiveMetastore_set_aggr_stats_for_args::read(::apache::thrift::protocol::TProtocol* iprot) { + + apache::thrift::protocol::TInputRecursionTracker tracker(*iprot); + uint32_t xfer = 0; + std::string fname; + ::apache::thrift::protocol::TType ftype; + int16_t fid; + + xfer += iprot->readStructBegin(fname); + + using ::apache::thrift::protocol::TProtocolException; + + + while (true) + { + xfer += iprot->readFieldBegin(fname, ftype, fid); + if (ftype == ::apache::thrift::protocol::T_STOP) { + break; + } + switch (fid) + { + case 1: + if (ftype == ::apache::thrift::protocol::T_STRUCT) { + xfer += this->request.read(iprot); + this->__isset.request = true; + } else { + xfer += iprot->skip(ftype); + } + break; + default: + xfer += iprot->skip(ftype); + break; + } + xfer += iprot->readFieldEnd(); + } + + xfer += iprot->readStructEnd(); + + return xfer; +} + +uint32_t ThriftHiveMetastore_set_aggr_stats_for_args::write(::apache::thrift::protocol::TProtocol* oprot) const { + uint32_t xfer = 0; + apache::thrift::protocol::TOutputRecursionTracker tracker(*oprot); + xfer += oprot->writeStructBegin("ThriftHiveMetastore_set_aggr_stats_for_args"); + + xfer += oprot->writeFieldBegin("request", ::apache::thrift::protocol::T_STRUCT, 1); + xfer += this->request.write(oprot); + xfer += oprot->writeFieldEnd(); + + xfer += oprot->writeFieldStop(); + xfer += oprot->writeStructEnd(); + return xfer; +} + + +ThriftHiveMetastore_set_aggr_stats_for_pargs::~ThriftHiveMetastore_set_aggr_stats_for_pargs() throw() { +} + + +uint32_t ThriftHiveMetastore_set_aggr_stats_for_pargs::write(::apache::thrift::protocol::TProtocol* oprot) const { + uint32_t xfer = 0; + apache::thrift::protocol::TOutputRecursionTracker tracker(*oprot); + xfer += oprot->writeStructBegin("ThriftHiveMetastore_set_aggr_stats_for_pargs"); + + xfer += oprot->writeFieldBegin("request", ::apache::thrift::protocol::T_STRUCT, 1); + xfer += (*(this->request)).write(oprot); + xfer += oprot->writeFieldEnd(); + + xfer += oprot->writeFieldStop(); + xfer += oprot->writeStructEnd(); + return xfer; +} + + +ThriftHiveMetastore_set_aggr_stats_for_result::~ThriftHiveMetastore_set_aggr_stats_for_result() throw() { +} + + +uint32_t ThriftHiveMetastore_set_aggr_stats_for_result::read(::apache::thrift::protocol::TProtocol* iprot) { + + apache::thrift::protocol::TInputRecursionTracker tracker(*iprot); + uint32_t xfer = 0; + std::string fname; + ::apache::thrift::protocol::TType ftype; + int16_t fid; + + xfer += iprot->readStructBegin(fname); + + using ::apache::thrift::protocol::TProtocolException; + + + while (true) + { + xfer += iprot->readFieldBegin(fname, ftype, fid); + if (ftype == ::apache::thrift::protocol::T_STOP) { + break; + } + switch (fid) + { + case 0: + if (ftype == ::apache::thrift::protocol::T_BOOL) { + xfer += iprot->readBool(this->success); + this->__isset.success = true; + } else { + xfer += iprot->skip(ftype); + } + break; + case 1: + if (ftype == ::apache::thrift::protocol::T_STRUCT) { + xfer += this->o1.read(iprot); + this->__isset.o1 = true; + } else { + xfer += iprot->skip(ftype); + } + break; + case 2: + if (ftype == ::apache::thrift::protocol::T_STRUCT) { + xfer += this->o2.read(iprot); + this->__isset.o2 = true; + } else { + xfer += iprot->skip(ftype); + } + break; + case 3: + if (ftype == ::apache::thrift::protocol::T_STRUCT) { + xfer += this->o3.read(iprot); + this->__isset.o3 = true; + } else { + xfer += iprot->skip(ftype); + } + break; + case 4: + if (ftype == ::apache::thrift::protocol::T_STRUCT) { + xfer += this->o4.read(iprot); + this->__isset.o4 = true; + } else { + xfer += iprot->skip(ftype); + } + break; + default: + xfer += iprot->skip(ftype); + break; + } + xfer += iprot->readFieldEnd(); + } + + xfer += iprot->readStructEnd(); + + return xfer; +} + +uint32_t ThriftHiveMetastore_set_aggr_stats_for_result::write(::apache::thrift::protocol::TProtocol* oprot) const { + + uint32_t xfer = 0; + + xfer += oprot->writeStructBegin("ThriftHiveMetastore_set_aggr_stats_for_result"); + + if (this->__isset.success) { + xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_BOOL, 0); + xfer += oprot->writeBool(this->success); + xfer += oprot->writeFieldEnd(); + } else if (this->__isset.o1) { + xfer += oprot->writeFieldBegin("o1", ::apache::thrift::protocol::T_STRUCT, 1); + xfer += this->o1.write(oprot); + xfer += oprot->writeFieldEnd(); + } else if (this->__isset.o2) { + xfer += oprot->writeFieldBegin("o2", ::apache::thrift::protocol::T_STRUCT, 2); + xfer += this->o2.write(oprot); + xfer += oprot->writeFieldEnd(); + } else if (this->__isset.o3) { + xfer += oprot->writeFieldBegin("o3", ::apache::thrift::protocol::T_STRUCT, 3); + xfer += this->o3.write(oprot); + xfer += oprot->writeFieldEnd(); + } else if (this->__isset.o4) { + xfer += oprot->writeFieldBegin("o4", ::apache::thrift::protocol::T_STRUCT, 4); + xfer += this->o4.write(oprot); + xfer += oprot->writeFieldEnd(); + } + xfer += oprot->writeFieldStop(); + xfer += oprot->writeStructEnd(); + return xfer; +} + + +ThriftHiveMetastore_set_aggr_stats_for_presult::~ThriftHiveMetastore_set_aggr_stats_for_presult() throw() { +} + + +uint32_t ThriftHiveMetastore_set_aggr_stats_for_presult::read(::apache::thrift::protocol::TProtocol* iprot) { apache::thrift::protocol::TInputRecursionTracker tracker(*iprot); uint32_t xfer = 0; @@ -24864,14 +25446,14 @@ uint32_t ThriftHiveMetastore_get_functions_result::read(::apache::thrift::protoc if (ftype == ::apache::thrift::protocol::T_LIST) { { this->success.clear(); - uint32_t _size1215; - ::apache::thrift::protocol::TType _etype1218; - xfer += iprot->readListBegin(_etype1218, _size1215); - this->success.resize(_size1215); - uint32_t _i1219; - for (_i1219 = 0; _i1219 < _size1215; ++_i1219) + uint32_t _size1267; + ::apache::thrift::protocol::TType _etype1270; + xfer += iprot->readListBegin(_etype1270, _size1267); + this->success.resize(_size1267); + uint32_t _i1271; + for (_i1271 = 0; _i1271 < _size1267; ++_i1271) { - xfer += iprot->readString(this->success[_i1219]); + xfer += iprot->readString(this->success[_i1271]); } xfer += iprot->readListEnd(); } @@ -24910,10 +25492,10 @@ uint32_t ThriftHiveMetastore_get_functions_result::write(::apache::thrift::proto xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->success.size())); - std::vector ::const_iterator _iter1220; - for (_iter1220 = this->success.begin(); _iter1220 != this->success.end(); ++_iter1220) + std::vector ::const_iterator _iter1272; + for (_iter1272 = this->success.begin(); _iter1272 != this->success.end(); ++_iter1272) { - xfer += oprot->writeString((*_iter1220)); + xfer += oprot->writeString((*_iter1272)); } xfer += oprot->writeListEnd(); } @@ -24958,14 +25540,14 @@ uint32_t ThriftHiveMetastore_get_functions_presult::read(::apache::thrift::proto if (ftype == ::apache::thrift::protocol::T_LIST) { { (*(this->success)).clear(); - uint32_t _size1221; - ::apache::thrift::protocol::TType _etype1224; - xfer += iprot->readListBegin(_etype1224, _size1221); - (*(this->success)).resize(_size1221); - uint32_t _i1225; - for (_i1225 = 0; _i1225 < _size1221; ++_i1225) + uint32_t _size1273; + ::apache::thrift::protocol::TType _etype1276; + xfer += iprot->readListBegin(_etype1276, _size1273); + (*(this->success)).resize(_size1273); + uint32_t _i1277; + for (_i1277 = 0; _i1277 < _size1273; ++_i1277) { - xfer += iprot->readString((*(this->success))[_i1225]); + xfer += iprot->readString((*(this->success))[_i1277]); } xfer += iprot->readListEnd(); } @@ -25925,14 +26507,14 @@ uint32_t ThriftHiveMetastore_get_role_names_result::read(::apache::thrift::proto if (ftype == ::apache::thrift::protocol::T_LIST) { { this->success.clear(); - uint32_t _size1226; - ::apache::thrift::protocol::TType _etype1229; - xfer += iprot->readListBegin(_etype1229, _size1226); - this->success.resize(_size1226); - uint32_t _i1230; - for (_i1230 = 0; _i1230 < _size1226; ++_i1230) + uint32_t _size1278; + ::apache::thrift::protocol::TType _etype1281; + xfer += iprot->readListBegin(_etype1281, _size1278); + this->success.resize(_size1278); + uint32_t _i1282; + for (_i1282 = 0; _i1282 < _size1278; ++_i1282) { - xfer += iprot->readString(this->success[_i1230]); + xfer += iprot->readString(this->success[_i1282]); } xfer += iprot->readListEnd(); } @@ -25971,10 +26553,10 @@ uint32_t ThriftHiveMetastore_get_role_names_result::write(::apache::thrift::prot xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->success.size())); - std::vector ::const_iterator _iter1231; - for (_iter1231 = this->success.begin(); _iter1231 != this->success.end(); ++_iter1231) + std::vector ::const_iterator _iter1283; + for (_iter1283 = this->success.begin(); _iter1283 != this->success.end(); ++_iter1283) { - xfer += oprot->writeString((*_iter1231)); + xfer += oprot->writeString((*_iter1283)); } xfer += oprot->writeListEnd(); } @@ -26019,14 +26601,14 @@ uint32_t ThriftHiveMetastore_get_role_names_presult::read(::apache::thrift::prot if (ftype == ::apache::thrift::protocol::T_LIST) { { (*(this->success)).clear(); - uint32_t _size1232; - ::apache::thrift::protocol::TType _etype1235; - xfer += iprot->readListBegin(_etype1235, _size1232); - (*(this->success)).resize(_size1232); - uint32_t _i1236; - for (_i1236 = 0; _i1236 < _size1232; ++_i1236) + uint32_t _size1284; + ::apache::thrift::protocol::TType _etype1287; + xfer += iprot->readListBegin(_etype1287, _size1284); + (*(this->success)).resize(_size1284); + uint32_t _i1288; + for (_i1288 = 0; _i1288 < _size1284; ++_i1288) { - xfer += iprot->readString((*(this->success))[_i1236]); + xfer += iprot->readString((*(this->success))[_i1288]); } xfer += iprot->readListEnd(); } @@ -26099,9 +26681,9 @@ uint32_t ThriftHiveMetastore_grant_role_args::read(::apache::thrift::protocol::T break; case 3: if (ftype == ::apache::thrift::protocol::T_I32) { - int32_t ecast1237; - xfer += iprot->readI32(ecast1237); - this->principal_type = (PrincipalType::type)ecast1237; + int32_t ecast1289; + xfer += iprot->readI32(ecast1289); + this->principal_type = (PrincipalType::type)ecast1289; this->__isset.principal_type = true; } else { xfer += iprot->skip(ftype); @@ -26117,9 +26699,9 @@ uint32_t ThriftHiveMetastore_grant_role_args::read(::apache::thrift::protocol::T break; case 5: if (ftype == ::apache::thrift::protocol::T_I32) { - int32_t ecast1238; - xfer += iprot->readI32(ecast1238); - this->grantorType = (PrincipalType::type)ecast1238; + int32_t ecast1290; + xfer += iprot->readI32(ecast1290); + this->grantorType = (PrincipalType::type)ecast1290; this->__isset.grantorType = true; } else { xfer += iprot->skip(ftype); @@ -26390,9 +26972,9 @@ uint32_t ThriftHiveMetastore_revoke_role_args::read(::apache::thrift::protocol:: break; case 3: if (ftype == ::apache::thrift::protocol::T_I32) { - int32_t ecast1239; - xfer += iprot->readI32(ecast1239); - this->principal_type = (PrincipalType::type)ecast1239; + int32_t ecast1291; + xfer += iprot->readI32(ecast1291); + this->principal_type = (PrincipalType::type)ecast1291; this->__isset.principal_type = true; } else { xfer += iprot->skip(ftype); @@ -26623,9 +27205,9 @@ uint32_t ThriftHiveMetastore_list_roles_args::read(::apache::thrift::protocol::T break; case 2: if (ftype == ::apache::thrift::protocol::T_I32) { - int32_t ecast1240; - xfer += iprot->readI32(ecast1240); - this->principal_type = (PrincipalType::type)ecast1240; + int32_t ecast1292; + xfer += iprot->readI32(ecast1292); + this->principal_type = (PrincipalType::type)ecast1292; this->__isset.principal_type = true; } else { xfer += iprot->skip(ftype); @@ -26714,14 +27296,14 @@ uint32_t ThriftHiveMetastore_list_roles_result::read(::apache::thrift::protocol: if (ftype == ::apache::thrift::protocol::T_LIST) { { this->success.clear(); - uint32_t _size1241; - ::apache::thrift::protocol::TType _etype1244; - xfer += iprot->readListBegin(_etype1244, _size1241); - this->success.resize(_size1241); - uint32_t _i1245; - for (_i1245 = 0; _i1245 < _size1241; ++_i1245) + uint32_t _size1293; + ::apache::thrift::protocol::TType _etype1296; + xfer += iprot->readListBegin(_etype1296, _size1293); + this->success.resize(_size1293); + uint32_t _i1297; + for (_i1297 = 0; _i1297 < _size1293; ++_i1297) { - xfer += this->success[_i1245].read(iprot); + xfer += this->success[_i1297].read(iprot); } xfer += iprot->readListEnd(); } @@ -26760,10 +27342,10 @@ uint32_t ThriftHiveMetastore_list_roles_result::write(::apache::thrift::protocol xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->success.size())); - std::vector ::const_iterator _iter1246; - for (_iter1246 = this->success.begin(); _iter1246 != this->success.end(); ++_iter1246) + std::vector ::const_iterator _iter1298; + for (_iter1298 = this->success.begin(); _iter1298 != this->success.end(); ++_iter1298) { - xfer += (*_iter1246).write(oprot); + xfer += (*_iter1298).write(oprot); } xfer += oprot->writeListEnd(); } @@ -26808,14 +27390,14 @@ uint32_t ThriftHiveMetastore_list_roles_presult::read(::apache::thrift::protocol if (ftype == ::apache::thrift::protocol::T_LIST) { { (*(this->success)).clear(); - uint32_t _size1247; - ::apache::thrift::protocol::TType _etype1250; - xfer += iprot->readListBegin(_etype1250, _size1247); - (*(this->success)).resize(_size1247); - uint32_t _i1251; - for (_i1251 = 0; _i1251 < _size1247; ++_i1251) + uint32_t _size1299; + ::apache::thrift::protocol::TType _etype1302; + xfer += iprot->readListBegin(_etype1302, _size1299); + (*(this->success)).resize(_size1299); + uint32_t _i1303; + for (_i1303 = 0; _i1303 < _size1299; ++_i1303) { - xfer += (*(this->success))[_i1251].read(iprot); + xfer += (*(this->success))[_i1303].read(iprot); } xfer += iprot->readListEnd(); } @@ -27511,14 +28093,14 @@ uint32_t ThriftHiveMetastore_get_privilege_set_args::read(::apache::thrift::prot if (ftype == ::apache::thrift::protocol::T_LIST) { { this->group_names.clear(); - uint32_t _size1252; - ::apache::thrift::protocol::TType _etype1255; - xfer += iprot->readListBegin(_etype1255, _size1252); - this->group_names.resize(_size1252); - uint32_t _i1256; - for (_i1256 = 0; _i1256 < _size1252; ++_i1256) + uint32_t _size1304; + ::apache::thrift::protocol::TType _etype1307; + xfer += iprot->readListBegin(_etype1307, _size1304); + this->group_names.resize(_size1304); + uint32_t _i1308; + for (_i1308 = 0; _i1308 < _size1304; ++_i1308) { - xfer += iprot->readString(this->group_names[_i1256]); + xfer += iprot->readString(this->group_names[_i1308]); } xfer += iprot->readListEnd(); } @@ -27555,10 +28137,10 @@ uint32_t ThriftHiveMetastore_get_privilege_set_args::write(::apache::thrift::pro xfer += oprot->writeFieldBegin("group_names", ::apache::thrift::protocol::T_LIST, 3); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->group_names.size())); - std::vector ::const_iterator _iter1257; - for (_iter1257 = this->group_names.begin(); _iter1257 != this->group_names.end(); ++_iter1257) + std::vector ::const_iterator _iter1309; + for (_iter1309 = this->group_names.begin(); _iter1309 != this->group_names.end(); ++_iter1309) { - xfer += oprot->writeString((*_iter1257)); + xfer += oprot->writeString((*_iter1309)); } xfer += oprot->writeListEnd(); } @@ -27590,10 +28172,10 @@ uint32_t ThriftHiveMetastore_get_privilege_set_pargs::write(::apache::thrift::pr xfer += oprot->writeFieldBegin("group_names", ::apache::thrift::protocol::T_LIST, 3); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast((*(this->group_names)).size())); - std::vector ::const_iterator _iter1258; - for (_iter1258 = (*(this->group_names)).begin(); _iter1258 != (*(this->group_names)).end(); ++_iter1258) + std::vector ::const_iterator _iter1310; + for (_iter1310 = (*(this->group_names)).begin(); _iter1310 != (*(this->group_names)).end(); ++_iter1310) { - xfer += oprot->writeString((*_iter1258)); + xfer += oprot->writeString((*_iter1310)); } xfer += oprot->writeListEnd(); } @@ -27768,9 +28350,9 @@ uint32_t ThriftHiveMetastore_list_privileges_args::read(::apache::thrift::protoc break; case 2: if (ftype == ::apache::thrift::protocol::T_I32) { - int32_t ecast1259; - xfer += iprot->readI32(ecast1259); - this->principal_type = (PrincipalType::type)ecast1259; + int32_t ecast1311; + xfer += iprot->readI32(ecast1311); + this->principal_type = (PrincipalType::type)ecast1311; this->__isset.principal_type = true; } else { xfer += iprot->skip(ftype); @@ -27875,14 +28457,14 @@ uint32_t ThriftHiveMetastore_list_privileges_result::read(::apache::thrift::prot if (ftype == ::apache::thrift::protocol::T_LIST) { { this->success.clear(); - uint32_t _size1260; - ::apache::thrift::protocol::TType _etype1263; - xfer += iprot->readListBegin(_etype1263, _size1260); - this->success.resize(_size1260); - uint32_t _i1264; - for (_i1264 = 0; _i1264 < _size1260; ++_i1264) + uint32_t _size1312; + ::apache::thrift::protocol::TType _etype1315; + xfer += iprot->readListBegin(_etype1315, _size1312); + this->success.resize(_size1312); + uint32_t _i1316; + for (_i1316 = 0; _i1316 < _size1312; ++_i1316) { - xfer += this->success[_i1264].read(iprot); + xfer += this->success[_i1316].read(iprot); } xfer += iprot->readListEnd(); } @@ -27921,10 +28503,10 @@ uint32_t ThriftHiveMetastore_list_privileges_result::write(::apache::thrift::pro xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->success.size())); - std::vector ::const_iterator _iter1265; - for (_iter1265 = this->success.begin(); _iter1265 != this->success.end(); ++_iter1265) + std::vector ::const_iterator _iter1317; + for (_iter1317 = this->success.begin(); _iter1317 != this->success.end(); ++_iter1317) { - xfer += (*_iter1265).write(oprot); + xfer += (*_iter1317).write(oprot); } xfer += oprot->writeListEnd(); } @@ -27969,14 +28551,14 @@ uint32_t ThriftHiveMetastore_list_privileges_presult::read(::apache::thrift::pro if (ftype == ::apache::thrift::protocol::T_LIST) { { (*(this->success)).clear(); - uint32_t _size1266; - ::apache::thrift::protocol::TType _etype1269; - xfer += iprot->readListBegin(_etype1269, _size1266); - (*(this->success)).resize(_size1266); - uint32_t _i1270; - for (_i1270 = 0; _i1270 < _size1266; ++_i1270) + uint32_t _size1318; + ::apache::thrift::protocol::TType _etype1321; + xfer += iprot->readListBegin(_etype1321, _size1318); + (*(this->success)).resize(_size1318); + uint32_t _i1322; + for (_i1322 = 0; _i1322 < _size1318; ++_i1322) { - xfer += (*(this->success))[_i1270].read(iprot); + xfer += (*(this->success))[_i1322].read(iprot); } xfer += iprot->readListEnd(); } @@ -28664,14 +29246,14 @@ uint32_t ThriftHiveMetastore_set_ugi_args::read(::apache::thrift::protocol::TPro if (ftype == ::apache::thrift::protocol::T_LIST) { { this->group_names.clear(); - uint32_t _size1271; - ::apache::thrift::protocol::TType _etype1274; - xfer += iprot->readListBegin(_etype1274, _size1271); - this->group_names.resize(_size1271); - uint32_t _i1275; - for (_i1275 = 0; _i1275 < _size1271; ++_i1275) + uint32_t _size1323; + ::apache::thrift::protocol::TType _etype1326; + xfer += iprot->readListBegin(_etype1326, _size1323); + this->group_names.resize(_size1323); + uint32_t _i1327; + for (_i1327 = 0; _i1327 < _size1323; ++_i1327) { - xfer += iprot->readString(this->group_names[_i1275]); + xfer += iprot->readString(this->group_names[_i1327]); } xfer += iprot->readListEnd(); } @@ -28704,10 +29286,10 @@ uint32_t ThriftHiveMetastore_set_ugi_args::write(::apache::thrift::protocol::TPr xfer += oprot->writeFieldBegin("group_names", ::apache::thrift::protocol::T_LIST, 2); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->group_names.size())); - std::vector ::const_iterator _iter1276; - for (_iter1276 = this->group_names.begin(); _iter1276 != this->group_names.end(); ++_iter1276) + std::vector ::const_iterator _iter1328; + for (_iter1328 = this->group_names.begin(); _iter1328 != this->group_names.end(); ++_iter1328) { - xfer += oprot->writeString((*_iter1276)); + xfer += oprot->writeString((*_iter1328)); } xfer += oprot->writeListEnd(); } @@ -28735,10 +29317,10 @@ uint32_t ThriftHiveMetastore_set_ugi_pargs::write(::apache::thrift::protocol::TP xfer += oprot->writeFieldBegin("group_names", ::apache::thrift::protocol::T_LIST, 2); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast((*(this->group_names)).size())); - std::vector ::const_iterator _iter1277; - for (_iter1277 = (*(this->group_names)).begin(); _iter1277 != (*(this->group_names)).end(); ++_iter1277) + std::vector ::const_iterator _iter1329; + for (_iter1329 = (*(this->group_names)).begin(); _iter1329 != (*(this->group_names)).end(); ++_iter1329) { - xfer += oprot->writeString((*_iter1277)); + xfer += oprot->writeString((*_iter1329)); } xfer += oprot->writeListEnd(); } @@ -28779,14 +29361,14 @@ uint32_t ThriftHiveMetastore_set_ugi_result::read(::apache::thrift::protocol::TP if (ftype == ::apache::thrift::protocol::T_LIST) { { this->success.clear(); - uint32_t _size1278; - ::apache::thrift::protocol::TType _etype1281; - xfer += iprot->readListBegin(_etype1281, _size1278); - this->success.resize(_size1278); - uint32_t _i1282; - for (_i1282 = 0; _i1282 < _size1278; ++_i1282) + uint32_t _size1330; + ::apache::thrift::protocol::TType _etype1333; + xfer += iprot->readListBegin(_etype1333, _size1330); + this->success.resize(_size1330); + uint32_t _i1334; + for (_i1334 = 0; _i1334 < _size1330; ++_i1334) { - xfer += iprot->readString(this->success[_i1282]); + xfer += iprot->readString(this->success[_i1334]); } xfer += iprot->readListEnd(); } @@ -28825,10 +29407,10 @@ uint32_t ThriftHiveMetastore_set_ugi_result::write(::apache::thrift::protocol::T xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->success.size())); - std::vector ::const_iterator _iter1283; - for (_iter1283 = this->success.begin(); _iter1283 != this->success.end(); ++_iter1283) + std::vector ::const_iterator _iter1335; + for (_iter1335 = this->success.begin(); _iter1335 != this->success.end(); ++_iter1335) { - xfer += oprot->writeString((*_iter1283)); + xfer += oprot->writeString((*_iter1335)); } xfer += oprot->writeListEnd(); } @@ -28873,14 +29455,14 @@ uint32_t ThriftHiveMetastore_set_ugi_presult::read(::apache::thrift::protocol::T if (ftype == ::apache::thrift::protocol::T_LIST) { { (*(this->success)).clear(); - uint32_t _size1284; - ::apache::thrift::protocol::TType _etype1287; - xfer += iprot->readListBegin(_etype1287, _size1284); - (*(this->success)).resize(_size1284); - uint32_t _i1288; - for (_i1288 = 0; _i1288 < _size1284; ++_i1288) + uint32_t _size1336; + ::apache::thrift::protocol::TType _etype1339; + xfer += iprot->readListBegin(_etype1339, _size1336); + (*(this->success)).resize(_size1336); + uint32_t _i1340; + for (_i1340 = 0; _i1340 < _size1336; ++_i1340) { - xfer += iprot->readString((*(this->success))[_i1288]); + xfer += iprot->readString((*(this->success))[_i1340]); } xfer += iprot->readListEnd(); } @@ -30191,14 +30773,14 @@ uint32_t ThriftHiveMetastore_get_all_token_identifiers_result::read(::apache::th if (ftype == ::apache::thrift::protocol::T_LIST) { { this->success.clear(); - uint32_t _size1289; - ::apache::thrift::protocol::TType _etype1292; - xfer += iprot->readListBegin(_etype1292, _size1289); - this->success.resize(_size1289); - uint32_t _i1293; - for (_i1293 = 0; _i1293 < _size1289; ++_i1293) + uint32_t _size1341; + ::apache::thrift::protocol::TType _etype1344; + xfer += iprot->readListBegin(_etype1344, _size1341); + this->success.resize(_size1341); + uint32_t _i1345; + for (_i1345 = 0; _i1345 < _size1341; ++_i1345) { - xfer += iprot->readString(this->success[_i1293]); + xfer += iprot->readString(this->success[_i1345]); } xfer += iprot->readListEnd(); } @@ -30229,10 +30811,10 @@ uint32_t ThriftHiveMetastore_get_all_token_identifiers_result::write(::apache::t xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->success.size())); - std::vector ::const_iterator _iter1294; - for (_iter1294 = this->success.begin(); _iter1294 != this->success.end(); ++_iter1294) + std::vector ::const_iterator _iter1346; + for (_iter1346 = this->success.begin(); _iter1346 != this->success.end(); ++_iter1346) { - xfer += oprot->writeString((*_iter1294)); + xfer += oprot->writeString((*_iter1346)); } xfer += oprot->writeListEnd(); } @@ -30273,14 +30855,14 @@ uint32_t ThriftHiveMetastore_get_all_token_identifiers_presult::read(::apache::t if (ftype == ::apache::thrift::protocol::T_LIST) { { (*(this->success)).clear(); - uint32_t _size1295; - ::apache::thrift::protocol::TType _etype1298; - xfer += iprot->readListBegin(_etype1298, _size1295); - (*(this->success)).resize(_size1295); - uint32_t _i1299; - for (_i1299 = 0; _i1299 < _size1295; ++_i1299) + uint32_t _size1347; + ::apache::thrift::protocol::TType _etype1350; + xfer += iprot->readListBegin(_etype1350, _size1347); + (*(this->success)).resize(_size1347); + uint32_t _i1351; + for (_i1351 = 0; _i1351 < _size1347; ++_i1351) { - xfer += iprot->readString((*(this->success))[_i1299]); + xfer += iprot->readString((*(this->success))[_i1351]); } xfer += iprot->readListEnd(); } @@ -31006,14 +31588,14 @@ uint32_t ThriftHiveMetastore_get_master_keys_result::read(::apache::thrift::prot if (ftype == ::apache::thrift::protocol::T_LIST) { { this->success.clear(); - uint32_t _size1300; - ::apache::thrift::protocol::TType _etype1303; - xfer += iprot->readListBegin(_etype1303, _size1300); - this->success.resize(_size1300); - uint32_t _i1304; - for (_i1304 = 0; _i1304 < _size1300; ++_i1304) + uint32_t _size1352; + ::apache::thrift::protocol::TType _etype1355; + xfer += iprot->readListBegin(_etype1355, _size1352); + this->success.resize(_size1352); + uint32_t _i1356; + for (_i1356 = 0; _i1356 < _size1352; ++_i1356) { - xfer += iprot->readString(this->success[_i1304]); + xfer += iprot->readString(this->success[_i1356]); } xfer += iprot->readListEnd(); } @@ -31044,10 +31626,10 @@ uint32_t ThriftHiveMetastore_get_master_keys_result::write(::apache::thrift::pro xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->success.size())); - std::vector ::const_iterator _iter1305; - for (_iter1305 = this->success.begin(); _iter1305 != this->success.end(); ++_iter1305) + std::vector ::const_iterator _iter1357; + for (_iter1357 = this->success.begin(); _iter1357 != this->success.end(); ++_iter1357) { - xfer += oprot->writeString((*_iter1305)); + xfer += oprot->writeString((*_iter1357)); } xfer += oprot->writeListEnd(); } @@ -31088,14 +31670,14 @@ uint32_t ThriftHiveMetastore_get_master_keys_presult::read(::apache::thrift::pro if (ftype == ::apache::thrift::protocol::T_LIST) { { (*(this->success)).clear(); - uint32_t _size1306; - ::apache::thrift::protocol::TType _etype1309; - xfer += iprot->readListBegin(_etype1309, _size1306); - (*(this->success)).resize(_size1306); - uint32_t _i1310; - for (_i1310 = 0; _i1310 < _size1306; ++_i1310) + uint32_t _size1358; + ::apache::thrift::protocol::TType _etype1361; + xfer += iprot->readListBegin(_etype1361, _size1358); + (*(this->success)).resize(_size1358); + uint32_t _i1362; + for (_i1362 = 0; _i1362 < _size1358; ++_i1362) { - xfer += iprot->readString((*(this->success))[_i1310]); + xfer += iprot->readString((*(this->success))[_i1362]); } xfer += iprot->readListEnd(); } @@ -40695,6 +41277,138 @@ void ThriftHiveMetastoreClient::recv_get_index_names(std::vector & throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "get_index_names failed: unknown result"); } +void ThriftHiveMetastoreClient::get_primary_keys(std::vector & _return, const std::string& db_name, const std::string& tbl_name) +{ + send_get_primary_keys(db_name, tbl_name); + recv_get_primary_keys(_return); +} + +void ThriftHiveMetastoreClient::send_get_primary_keys(const std::string& db_name, const std::string& tbl_name) +{ + int32_t cseqid = 0; + oprot_->writeMessageBegin("get_primary_keys", ::apache::thrift::protocol::T_CALL, cseqid); + + ThriftHiveMetastore_get_primary_keys_pargs args; + args.db_name = &db_name; + args.tbl_name = &tbl_name; + args.write(oprot_); + + oprot_->writeMessageEnd(); + oprot_->getTransport()->writeEnd(); + oprot_->getTransport()->flush(); +} + +void ThriftHiveMetastoreClient::recv_get_primary_keys(std::vector & _return) +{ + + int32_t rseqid = 0; + std::string fname; + ::apache::thrift::protocol::TMessageType mtype; + + iprot_->readMessageBegin(fname, mtype, rseqid); + if (mtype == ::apache::thrift::protocol::T_EXCEPTION) { + ::apache::thrift::TApplicationException x; + x.read(iprot_); + iprot_->readMessageEnd(); + iprot_->getTransport()->readEnd(); + throw x; + } + if (mtype != ::apache::thrift::protocol::T_REPLY) { + iprot_->skip(::apache::thrift::protocol::T_STRUCT); + iprot_->readMessageEnd(); + iprot_->getTransport()->readEnd(); + } + if (fname.compare("get_primary_keys") != 0) { + iprot_->skip(::apache::thrift::protocol::T_STRUCT); + iprot_->readMessageEnd(); + iprot_->getTransport()->readEnd(); + } + ThriftHiveMetastore_get_primary_keys_presult result; + result.success = &_return; + result.read(iprot_); + iprot_->readMessageEnd(); + iprot_->getTransport()->readEnd(); + + if (result.__isset.success) { + // _return pointer has now been filled + return; + } + if (result.__isset.o1) { + throw result.o1; + } + if (result.__isset.o2) { + throw result.o2; + } + throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "get_primary_keys failed: unknown result"); +} + +void ThriftHiveMetastoreClient::get_foreign_keys(std::vector & _return, const std::string& parent_db_name, const std::string& parent_tbl_name, const std::string& foreign_db_name, const std::string& foreign_tbl_name) +{ + send_get_foreign_keys(parent_db_name, parent_tbl_name, foreign_db_name, foreign_tbl_name); + recv_get_foreign_keys(_return); +} + +void ThriftHiveMetastoreClient::send_get_foreign_keys(const std::string& parent_db_name, const std::string& parent_tbl_name, const std::string& foreign_db_name, const std::string& foreign_tbl_name) +{ + int32_t cseqid = 0; + oprot_->writeMessageBegin("get_foreign_keys", ::apache::thrift::protocol::T_CALL, cseqid); + + ThriftHiveMetastore_get_foreign_keys_pargs args; + args.parent_db_name = &parent_db_name; + args.parent_tbl_name = &parent_tbl_name; + args.foreign_db_name = &foreign_db_name; + args.foreign_tbl_name = &foreign_tbl_name; + args.write(oprot_); + + oprot_->writeMessageEnd(); + oprot_->getTransport()->writeEnd(); + oprot_->getTransport()->flush(); +} + +void ThriftHiveMetastoreClient::recv_get_foreign_keys(std::vector & _return) +{ + + int32_t rseqid = 0; + std::string fname; + ::apache::thrift::protocol::TMessageType mtype; + + iprot_->readMessageBegin(fname, mtype, rseqid); + if (mtype == ::apache::thrift::protocol::T_EXCEPTION) { + ::apache::thrift::TApplicationException x; + x.read(iprot_); + iprot_->readMessageEnd(); + iprot_->getTransport()->readEnd(); + throw x; + } + if (mtype != ::apache::thrift::protocol::T_REPLY) { + iprot_->skip(::apache::thrift::protocol::T_STRUCT); + iprot_->readMessageEnd(); + iprot_->getTransport()->readEnd(); + } + if (fname.compare("get_foreign_keys") != 0) { + iprot_->skip(::apache::thrift::protocol::T_STRUCT); + iprot_->readMessageEnd(); + iprot_->getTransport()->readEnd(); + } + ThriftHiveMetastore_get_foreign_keys_presult result; + result.success = &_return; + result.read(iprot_); + iprot_->readMessageEnd(); + iprot_->getTransport()->readEnd(); + + if (result.__isset.success) { + // _return pointer has now been filled + return; + } + if (result.__isset.o1) { + throw result.o1; + } + if (result.__isset.o2) { + throw result.o2; + } + throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "get_foreign_keys failed: unknown result"); +} + bool ThriftHiveMetastoreClient::update_table_column_statistics(const ColumnStatistics& stats_obj) { send_update_table_column_statistics(stats_obj); @@ -49407,6 +50121,126 @@ void ThriftHiveMetastoreProcessor::process_get_index_names(int32_t seqid, ::apac } } +void ThriftHiveMetastoreProcessor::process_get_primary_keys(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext) +{ + void* ctx = NULL; + if (this->eventHandler_.get() != NULL) { + ctx = this->eventHandler_->getContext("ThriftHiveMetastore.get_primary_keys", callContext); + } + ::apache::thrift::TProcessorContextFreer freer(this->eventHandler_.get(), ctx, "ThriftHiveMetastore.get_primary_keys"); + + if (this->eventHandler_.get() != NULL) { + this->eventHandler_->preRead(ctx, "ThriftHiveMetastore.get_primary_keys"); + } + + ThriftHiveMetastore_get_primary_keys_args args; + args.read(iprot); + iprot->readMessageEnd(); + uint32_t bytes = iprot->getTransport()->readEnd(); + + if (this->eventHandler_.get() != NULL) { + this->eventHandler_->postRead(ctx, "ThriftHiveMetastore.get_primary_keys", bytes); + } + + ThriftHiveMetastore_get_primary_keys_result result; + try { + iface_->get_primary_keys(result.success, args.db_name, args.tbl_name); + result.__isset.success = true; + } catch (MetaException &o1) { + result.o1 = o1; + result.__isset.o1 = true; + } catch (NoSuchObjectException &o2) { + result.o2 = o2; + result.__isset.o2 = true; + } catch (const std::exception& e) { + if (this->eventHandler_.get() != NULL) { + this->eventHandler_->handlerError(ctx, "ThriftHiveMetastore.get_primary_keys"); + } + + ::apache::thrift::TApplicationException x(e.what()); + oprot->writeMessageBegin("get_primary_keys", ::apache::thrift::protocol::T_EXCEPTION, seqid); + x.write(oprot); + oprot->writeMessageEnd(); + oprot->getTransport()->writeEnd(); + oprot->getTransport()->flush(); + return; + } + + if (this->eventHandler_.get() != NULL) { + this->eventHandler_->preWrite(ctx, "ThriftHiveMetastore.get_primary_keys"); + } + + oprot->writeMessageBegin("get_primary_keys", ::apache::thrift::protocol::T_REPLY, seqid); + result.write(oprot); + oprot->writeMessageEnd(); + bytes = oprot->getTransport()->writeEnd(); + oprot->getTransport()->flush(); + + if (this->eventHandler_.get() != NULL) { + this->eventHandler_->postWrite(ctx, "ThriftHiveMetastore.get_primary_keys", bytes); + } +} + +void ThriftHiveMetastoreProcessor::process_get_foreign_keys(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext) +{ + void* ctx = NULL; + if (this->eventHandler_.get() != NULL) { + ctx = this->eventHandler_->getContext("ThriftHiveMetastore.get_foreign_keys", callContext); + } + ::apache::thrift::TProcessorContextFreer freer(this->eventHandler_.get(), ctx, "ThriftHiveMetastore.get_foreign_keys"); + + if (this->eventHandler_.get() != NULL) { + this->eventHandler_->preRead(ctx, "ThriftHiveMetastore.get_foreign_keys"); + } + + ThriftHiveMetastore_get_foreign_keys_args args; + args.read(iprot); + iprot->readMessageEnd(); + uint32_t bytes = iprot->getTransport()->readEnd(); + + if (this->eventHandler_.get() != NULL) { + this->eventHandler_->postRead(ctx, "ThriftHiveMetastore.get_foreign_keys", bytes); + } + + ThriftHiveMetastore_get_foreign_keys_result result; + try { + iface_->get_foreign_keys(result.success, args.parent_db_name, args.parent_tbl_name, args.foreign_db_name, args.foreign_tbl_name); + result.__isset.success = true; + } catch (MetaException &o1) { + result.o1 = o1; + result.__isset.o1 = true; + } catch (NoSuchObjectException &o2) { + result.o2 = o2; + result.__isset.o2 = true; + } catch (const std::exception& e) { + if (this->eventHandler_.get() != NULL) { + this->eventHandler_->handlerError(ctx, "ThriftHiveMetastore.get_foreign_keys"); + } + + ::apache::thrift::TApplicationException x(e.what()); + oprot->writeMessageBegin("get_foreign_keys", ::apache::thrift::protocol::T_EXCEPTION, seqid); + x.write(oprot); + oprot->writeMessageEnd(); + oprot->getTransport()->writeEnd(); + oprot->getTransport()->flush(); + return; + } + + if (this->eventHandler_.get() != NULL) { + this->eventHandler_->preWrite(ctx, "ThriftHiveMetastore.get_foreign_keys"); + } + + oprot->writeMessageBegin("get_foreign_keys", ::apache::thrift::protocol::T_REPLY, seqid); + result.write(oprot); + oprot->writeMessageEnd(); + bytes = oprot->getTransport()->writeEnd(); + oprot->getTransport()->flush(); + + if (this->eventHandler_.get() != NULL) { + this->eventHandler_->postWrite(ctx, "ThriftHiveMetastore.get_foreign_keys", bytes); + } +} + void ThriftHiveMetastoreProcessor::process_update_table_column_statistics(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext) { void* ctx = NULL; @@ -60408,6 +61242,194 @@ void ThriftHiveMetastoreConcurrentClient::recv_get_index_names(std::vector & _return, const std::string& db_name, const std::string& tbl_name) +{ + int32_t seqid = send_get_primary_keys(db_name, tbl_name); + recv_get_primary_keys(_return, seqid); +} + +int32_t ThriftHiveMetastoreConcurrentClient::send_get_primary_keys(const std::string& db_name, const std::string& tbl_name) +{ + int32_t cseqid = this->sync_.generateSeqId(); + ::apache::thrift::async::TConcurrentSendSentry sentry(&this->sync_); + oprot_->writeMessageBegin("get_primary_keys", ::apache::thrift::protocol::T_CALL, cseqid); + + ThriftHiveMetastore_get_primary_keys_pargs args; + args.db_name = &db_name; + args.tbl_name = &tbl_name; + args.write(oprot_); + + oprot_->writeMessageEnd(); + oprot_->getTransport()->writeEnd(); + oprot_->getTransport()->flush(); + + sentry.commit(); + return cseqid; +} + +void ThriftHiveMetastoreConcurrentClient::recv_get_primary_keys(std::vector & _return, const int32_t seqid) +{ + + int32_t rseqid = 0; + std::string fname; + ::apache::thrift::protocol::TMessageType mtype; + + // the read mutex gets dropped and reacquired as part of waitForWork() + // The destructor of this sentry wakes up other clients + ::apache::thrift::async::TConcurrentRecvSentry sentry(&this->sync_, seqid); + + while(true) { + if(!this->sync_.getPending(fname, mtype, rseqid)) { + iprot_->readMessageBegin(fname, mtype, rseqid); + } + if(seqid == rseqid) { + if (mtype == ::apache::thrift::protocol::T_EXCEPTION) { + ::apache::thrift::TApplicationException x; + x.read(iprot_); + iprot_->readMessageEnd(); + iprot_->getTransport()->readEnd(); + sentry.commit(); + throw x; + } + if (mtype != ::apache::thrift::protocol::T_REPLY) { + iprot_->skip(::apache::thrift::protocol::T_STRUCT); + iprot_->readMessageEnd(); + iprot_->getTransport()->readEnd(); + } + if (fname.compare("get_primary_keys") != 0) { + iprot_->skip(::apache::thrift::protocol::T_STRUCT); + iprot_->readMessageEnd(); + iprot_->getTransport()->readEnd(); + + // in a bad state, don't commit + using ::apache::thrift::protocol::TProtocolException; + throw TProtocolException(TProtocolException::INVALID_DATA); + } + ThriftHiveMetastore_get_primary_keys_presult result; + result.success = &_return; + result.read(iprot_); + iprot_->readMessageEnd(); + iprot_->getTransport()->readEnd(); + + if (result.__isset.success) { + // _return pointer has now been filled + sentry.commit(); + return; + } + if (result.__isset.o1) { + sentry.commit(); + throw result.o1; + } + if (result.__isset.o2) { + sentry.commit(); + throw result.o2; + } + // in a bad state, don't commit + throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "get_primary_keys failed: unknown result"); + } + // seqid != rseqid + this->sync_.updatePending(fname, mtype, rseqid); + + // this will temporarily unlock the readMutex, and let other clients get work done + this->sync_.waitForWork(seqid); + } // end while(true) +} + +void ThriftHiveMetastoreConcurrentClient::get_foreign_keys(std::vector & _return, const std::string& parent_db_name, const std::string& parent_tbl_name, const std::string& foreign_db_name, const std::string& foreign_tbl_name) +{ + int32_t seqid = send_get_foreign_keys(parent_db_name, parent_tbl_name, foreign_db_name, foreign_tbl_name); + recv_get_foreign_keys(_return, seqid); +} + +int32_t ThriftHiveMetastoreConcurrentClient::send_get_foreign_keys(const std::string& parent_db_name, const std::string& parent_tbl_name, const std::string& foreign_db_name, const std::string& foreign_tbl_name) +{ + int32_t cseqid = this->sync_.generateSeqId(); + ::apache::thrift::async::TConcurrentSendSentry sentry(&this->sync_); + oprot_->writeMessageBegin("get_foreign_keys", ::apache::thrift::protocol::T_CALL, cseqid); + + ThriftHiveMetastore_get_foreign_keys_pargs args; + args.parent_db_name = &parent_db_name; + args.parent_tbl_name = &parent_tbl_name; + args.foreign_db_name = &foreign_db_name; + args.foreign_tbl_name = &foreign_tbl_name; + args.write(oprot_); + + oprot_->writeMessageEnd(); + oprot_->getTransport()->writeEnd(); + oprot_->getTransport()->flush(); + + sentry.commit(); + return cseqid; +} + +void ThriftHiveMetastoreConcurrentClient::recv_get_foreign_keys(std::vector & _return, const int32_t seqid) +{ + + int32_t rseqid = 0; + std::string fname; + ::apache::thrift::protocol::TMessageType mtype; + + // the read mutex gets dropped and reacquired as part of waitForWork() + // The destructor of this sentry wakes up other clients + ::apache::thrift::async::TConcurrentRecvSentry sentry(&this->sync_, seqid); + + while(true) { + if(!this->sync_.getPending(fname, mtype, rseqid)) { + iprot_->readMessageBegin(fname, mtype, rseqid); + } + if(seqid == rseqid) { + if (mtype == ::apache::thrift::protocol::T_EXCEPTION) { + ::apache::thrift::TApplicationException x; + x.read(iprot_); + iprot_->readMessageEnd(); + iprot_->getTransport()->readEnd(); + sentry.commit(); + throw x; + } + if (mtype != ::apache::thrift::protocol::T_REPLY) { + iprot_->skip(::apache::thrift::protocol::T_STRUCT); + iprot_->readMessageEnd(); + iprot_->getTransport()->readEnd(); + } + if (fname.compare("get_foreign_keys") != 0) { + iprot_->skip(::apache::thrift::protocol::T_STRUCT); + iprot_->readMessageEnd(); + iprot_->getTransport()->readEnd(); + + // in a bad state, don't commit + using ::apache::thrift::protocol::TProtocolException; + throw TProtocolException(TProtocolException::INVALID_DATA); + } + ThriftHiveMetastore_get_foreign_keys_presult result; + result.success = &_return; + result.read(iprot_); + iprot_->readMessageEnd(); + iprot_->getTransport()->readEnd(); + + if (result.__isset.success) { + // _return pointer has now been filled + sentry.commit(); + return; + } + if (result.__isset.o1) { + sentry.commit(); + throw result.o1; + } + if (result.__isset.o2) { + sentry.commit(); + throw result.o2; + } + // in a bad state, don't commit + throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "get_foreign_keys failed: unknown result"); + } + // seqid != rseqid + this->sync_.updatePending(fname, mtype, rseqid); + + // this will temporarily unlock the readMutex, and let other clients get work done + this->sync_.waitForWork(seqid); + } // end while(true) +} + bool ThriftHiveMetastoreConcurrentClient::update_table_column_statistics(const ColumnStatistics& stats_obj) { int32_t seqid = send_update_table_column_statistics(stats_obj); diff --git a/metastore/src/gen/thrift/gen-cpp/ThriftHiveMetastore.h b/metastore/src/gen/thrift/gen-cpp/ThriftHiveMetastore.h index 8a8f8b1..6c35a34 100644 --- a/metastore/src/gen/thrift/gen-cpp/ThriftHiveMetastore.h +++ b/metastore/src/gen/thrift/gen-cpp/ThriftHiveMetastore.h @@ -99,6 +99,8 @@ class ThriftHiveMetastoreIf : virtual public ::facebook::fb303::FacebookService virtual void get_index_by_name(Index& _return, const std::string& db_name, const std::string& tbl_name, const std::string& index_name) = 0; virtual void get_indexes(std::vector & _return, const std::string& db_name, const std::string& tbl_name, const int16_t max_indexes) = 0; virtual void get_index_names(std::vector & _return, const std::string& db_name, const std::string& tbl_name, const int16_t max_indexes) = 0; + virtual void get_primary_keys(std::vector & _return, const std::string& db_name, const std::string& tbl_name) = 0; + virtual void get_foreign_keys(std::vector & _return, const std::string& parent_db_name, const std::string& parent_tbl_name, const std::string& foreign_db_name, const std::string& foreign_tbl_name) = 0; virtual bool update_table_column_statistics(const ColumnStatistics& stats_obj) = 0; virtual bool update_partition_column_statistics(const ColumnStatistics& stats_obj) = 0; virtual void get_table_column_statistics(ColumnStatistics& _return, const std::string& db_name, const std::string& tbl_name, const std::string& col_name) = 0; @@ -437,6 +439,12 @@ class ThriftHiveMetastoreNull : virtual public ThriftHiveMetastoreIf , virtual p void get_index_names(std::vector & /* _return */, const std::string& /* db_name */, const std::string& /* tbl_name */, const int16_t /* max_indexes */) { return; } + void get_primary_keys(std::vector & /* _return */, const std::string& /* db_name */, const std::string& /* tbl_name */) { + return; + } + void get_foreign_keys(std::vector & /* _return */, const std::string& /* parent_db_name */, const std::string& /* parent_tbl_name */, const std::string& /* foreign_db_name */, const std::string& /* foreign_tbl_name */) { + return; + } bool update_table_column_statistics(const ColumnStatistics& /* stats_obj */) { bool _return = false; return _return; @@ -10864,6 +10872,274 @@ class ThriftHiveMetastore_get_index_names_presult { }; +typedef struct _ThriftHiveMetastore_get_primary_keys_args__isset { + _ThriftHiveMetastore_get_primary_keys_args__isset() : db_name(false), tbl_name(false) {} + bool db_name :1; + bool tbl_name :1; +} _ThriftHiveMetastore_get_primary_keys_args__isset; + +class ThriftHiveMetastore_get_primary_keys_args { + public: + + ThriftHiveMetastore_get_primary_keys_args(const ThriftHiveMetastore_get_primary_keys_args&); + ThriftHiveMetastore_get_primary_keys_args& operator=(const ThriftHiveMetastore_get_primary_keys_args&); + ThriftHiveMetastore_get_primary_keys_args() : db_name(), tbl_name() { + } + + virtual ~ThriftHiveMetastore_get_primary_keys_args() throw(); + std::string db_name; + std::string tbl_name; + + _ThriftHiveMetastore_get_primary_keys_args__isset __isset; + + void __set_db_name(const std::string& val); + + void __set_tbl_name(const std::string& val); + + bool operator == (const ThriftHiveMetastore_get_primary_keys_args & rhs) const + { + if (!(db_name == rhs.db_name)) + return false; + if (!(tbl_name == rhs.tbl_name)) + return false; + return true; + } + bool operator != (const ThriftHiveMetastore_get_primary_keys_args &rhs) const { + return !(*this == rhs); + } + + bool operator < (const ThriftHiveMetastore_get_primary_keys_args & ) const; + + uint32_t read(::apache::thrift::protocol::TProtocol* iprot); + uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const; + +}; + + +class ThriftHiveMetastore_get_primary_keys_pargs { + public: + + + virtual ~ThriftHiveMetastore_get_primary_keys_pargs() throw(); + const std::string* db_name; + const std::string* tbl_name; + + uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const; + +}; + +typedef struct _ThriftHiveMetastore_get_primary_keys_result__isset { + _ThriftHiveMetastore_get_primary_keys_result__isset() : success(false), o1(false), o2(false) {} + bool success :1; + bool o1 :1; + bool o2 :1; +} _ThriftHiveMetastore_get_primary_keys_result__isset; + +class ThriftHiveMetastore_get_primary_keys_result { + public: + + ThriftHiveMetastore_get_primary_keys_result(const ThriftHiveMetastore_get_primary_keys_result&); + ThriftHiveMetastore_get_primary_keys_result& operator=(const ThriftHiveMetastore_get_primary_keys_result&); + ThriftHiveMetastore_get_primary_keys_result() { + } + + virtual ~ThriftHiveMetastore_get_primary_keys_result() throw(); + std::vector success; + MetaException o1; + NoSuchObjectException o2; + + _ThriftHiveMetastore_get_primary_keys_result__isset __isset; + + void __set_success(const std::vector & val); + + void __set_o1(const MetaException& val); + + void __set_o2(const NoSuchObjectException& val); + + bool operator == (const ThriftHiveMetastore_get_primary_keys_result & rhs) const + { + if (!(success == rhs.success)) + return false; + if (!(o1 == rhs.o1)) + return false; + if (!(o2 == rhs.o2)) + return false; + return true; + } + bool operator != (const ThriftHiveMetastore_get_primary_keys_result &rhs) const { + return !(*this == rhs); + } + + bool operator < (const ThriftHiveMetastore_get_primary_keys_result & ) const; + + uint32_t read(::apache::thrift::protocol::TProtocol* iprot); + uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const; + +}; + +typedef struct _ThriftHiveMetastore_get_primary_keys_presult__isset { + _ThriftHiveMetastore_get_primary_keys_presult__isset() : success(false), o1(false), o2(false) {} + bool success :1; + bool o1 :1; + bool o2 :1; +} _ThriftHiveMetastore_get_primary_keys_presult__isset; + +class ThriftHiveMetastore_get_primary_keys_presult { + public: + + + virtual ~ThriftHiveMetastore_get_primary_keys_presult() throw(); + std::vector * success; + MetaException o1; + NoSuchObjectException o2; + + _ThriftHiveMetastore_get_primary_keys_presult__isset __isset; + + uint32_t read(::apache::thrift::protocol::TProtocol* iprot); + +}; + +typedef struct _ThriftHiveMetastore_get_foreign_keys_args__isset { + _ThriftHiveMetastore_get_foreign_keys_args__isset() : parent_db_name(false), parent_tbl_name(false), foreign_db_name(false), foreign_tbl_name(false) {} + bool parent_db_name :1; + bool parent_tbl_name :1; + bool foreign_db_name :1; + bool foreign_tbl_name :1; +} _ThriftHiveMetastore_get_foreign_keys_args__isset; + +class ThriftHiveMetastore_get_foreign_keys_args { + public: + + ThriftHiveMetastore_get_foreign_keys_args(const ThriftHiveMetastore_get_foreign_keys_args&); + ThriftHiveMetastore_get_foreign_keys_args& operator=(const ThriftHiveMetastore_get_foreign_keys_args&); + ThriftHiveMetastore_get_foreign_keys_args() : parent_db_name(), parent_tbl_name(), foreign_db_name(), foreign_tbl_name() { + } + + virtual ~ThriftHiveMetastore_get_foreign_keys_args() throw(); + std::string parent_db_name; + std::string parent_tbl_name; + std::string foreign_db_name; + std::string foreign_tbl_name; + + _ThriftHiveMetastore_get_foreign_keys_args__isset __isset; + + void __set_parent_db_name(const std::string& val); + + void __set_parent_tbl_name(const std::string& val); + + void __set_foreign_db_name(const std::string& val); + + void __set_foreign_tbl_name(const std::string& val); + + bool operator == (const ThriftHiveMetastore_get_foreign_keys_args & rhs) const + { + if (!(parent_db_name == rhs.parent_db_name)) + return false; + if (!(parent_tbl_name == rhs.parent_tbl_name)) + return false; + if (!(foreign_db_name == rhs.foreign_db_name)) + return false; + if (!(foreign_tbl_name == rhs.foreign_tbl_name)) + return false; + return true; + } + bool operator != (const ThriftHiveMetastore_get_foreign_keys_args &rhs) const { + return !(*this == rhs); + } + + bool operator < (const ThriftHiveMetastore_get_foreign_keys_args & ) const; + + uint32_t read(::apache::thrift::protocol::TProtocol* iprot); + uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const; + +}; + + +class ThriftHiveMetastore_get_foreign_keys_pargs { + public: + + + virtual ~ThriftHiveMetastore_get_foreign_keys_pargs() throw(); + const std::string* parent_db_name; + const std::string* parent_tbl_name; + const std::string* foreign_db_name; + const std::string* foreign_tbl_name; + + uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const; + +}; + +typedef struct _ThriftHiveMetastore_get_foreign_keys_result__isset { + _ThriftHiveMetastore_get_foreign_keys_result__isset() : success(false), o1(false), o2(false) {} + bool success :1; + bool o1 :1; + bool o2 :1; +} _ThriftHiveMetastore_get_foreign_keys_result__isset; + +class ThriftHiveMetastore_get_foreign_keys_result { + public: + + ThriftHiveMetastore_get_foreign_keys_result(const ThriftHiveMetastore_get_foreign_keys_result&); + ThriftHiveMetastore_get_foreign_keys_result& operator=(const ThriftHiveMetastore_get_foreign_keys_result&); + ThriftHiveMetastore_get_foreign_keys_result() { + } + + virtual ~ThriftHiveMetastore_get_foreign_keys_result() throw(); + std::vector success; + MetaException o1; + NoSuchObjectException o2; + + _ThriftHiveMetastore_get_foreign_keys_result__isset __isset; + + void __set_success(const std::vector & val); + + void __set_o1(const MetaException& val); + + void __set_o2(const NoSuchObjectException& val); + + bool operator == (const ThriftHiveMetastore_get_foreign_keys_result & rhs) const + { + if (!(success == rhs.success)) + return false; + if (!(o1 == rhs.o1)) + return false; + if (!(o2 == rhs.o2)) + return false; + return true; + } + bool operator != (const ThriftHiveMetastore_get_foreign_keys_result &rhs) const { + return !(*this == rhs); + } + + bool operator < (const ThriftHiveMetastore_get_foreign_keys_result & ) const; + + uint32_t read(::apache::thrift::protocol::TProtocol* iprot); + uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const; + +}; + +typedef struct _ThriftHiveMetastore_get_foreign_keys_presult__isset { + _ThriftHiveMetastore_get_foreign_keys_presult__isset() : success(false), o1(false), o2(false) {} + bool success :1; + bool o1 :1; + bool o2 :1; +} _ThriftHiveMetastore_get_foreign_keys_presult__isset; + +class ThriftHiveMetastore_get_foreign_keys_presult { + public: + + + virtual ~ThriftHiveMetastore_get_foreign_keys_presult() throw(); + std::vector * success; + MetaException o1; + NoSuchObjectException o2; + + _ThriftHiveMetastore_get_foreign_keys_presult__isset __isset; + + uint32_t read(::apache::thrift::protocol::TProtocol* iprot); + +}; + typedef struct _ThriftHiveMetastore_update_table_column_statistics_args__isset { _ThriftHiveMetastore_update_table_column_statistics_args__isset() : stats_obj(false) {} bool stats_obj :1; @@ -18631,6 +18907,12 @@ class ThriftHiveMetastoreClient : virtual public ThriftHiveMetastoreIf, public void get_index_names(std::vector & _return, const std::string& db_name, const std::string& tbl_name, const int16_t max_indexes); void send_get_index_names(const std::string& db_name, const std::string& tbl_name, const int16_t max_indexes); void recv_get_index_names(std::vector & _return); + void get_primary_keys(std::vector & _return, const std::string& db_name, const std::string& tbl_name); + void send_get_primary_keys(const std::string& db_name, const std::string& tbl_name); + void recv_get_primary_keys(std::vector & _return); + void get_foreign_keys(std::vector & _return, const std::string& parent_db_name, const std::string& parent_tbl_name, const std::string& foreign_db_name, const std::string& foreign_tbl_name); + void send_get_foreign_keys(const std::string& parent_db_name, const std::string& parent_tbl_name, const std::string& foreign_db_name, const std::string& foreign_tbl_name); + void recv_get_foreign_keys(std::vector & _return); bool update_table_column_statistics(const ColumnStatistics& stats_obj); void send_update_table_column_statistics(const ColumnStatistics& stats_obj); bool recv_update_table_column_statistics(); @@ -18916,6 +19198,8 @@ class ThriftHiveMetastoreProcessor : public ::facebook::fb303::FacebookServiceP void process_get_index_by_name(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext); void process_get_indexes(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext); void process_get_index_names(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext); + void process_get_primary_keys(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext); + void process_get_foreign_keys(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext); void process_update_table_column_statistics(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext); void process_update_partition_column_statistics(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext); void process_get_table_column_statistics(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext); @@ -19063,6 +19347,8 @@ class ThriftHiveMetastoreProcessor : public ::facebook::fb303::FacebookServiceP processMap_["get_index_by_name"] = &ThriftHiveMetastoreProcessor::process_get_index_by_name; processMap_["get_indexes"] = &ThriftHiveMetastoreProcessor::process_get_indexes; processMap_["get_index_names"] = &ThriftHiveMetastoreProcessor::process_get_index_names; + processMap_["get_primary_keys"] = &ThriftHiveMetastoreProcessor::process_get_primary_keys; + processMap_["get_foreign_keys"] = &ThriftHiveMetastoreProcessor::process_get_foreign_keys; processMap_["update_table_column_statistics"] = &ThriftHiveMetastoreProcessor::process_update_table_column_statistics; processMap_["update_partition_column_statistics"] = &ThriftHiveMetastoreProcessor::process_update_partition_column_statistics; processMap_["get_table_column_statistics"] = &ThriftHiveMetastoreProcessor::process_get_table_column_statistics; @@ -19902,6 +20188,26 @@ class ThriftHiveMetastoreMultiface : virtual public ThriftHiveMetastoreIf, publi return; } + void get_primary_keys(std::vector & _return, const std::string& db_name, const std::string& tbl_name) { + size_t sz = ifaces_.size(); + size_t i = 0; + for (; i < (sz - 1); ++i) { + ifaces_[i]->get_primary_keys(_return, db_name, tbl_name); + } + ifaces_[i]->get_primary_keys(_return, db_name, tbl_name); + return; + } + + void get_foreign_keys(std::vector & _return, const std::string& parent_db_name, const std::string& parent_tbl_name, const std::string& foreign_db_name, const std::string& foreign_tbl_name) { + size_t sz = ifaces_.size(); + size_t i = 0; + for (; i < (sz - 1); ++i) { + ifaces_[i]->get_foreign_keys(_return, parent_db_name, parent_tbl_name, foreign_db_name, foreign_tbl_name); + } + ifaces_[i]->get_foreign_keys(_return, parent_db_name, parent_tbl_name, foreign_db_name, foreign_tbl_name); + return; + } + bool update_table_column_statistics(const ColumnStatistics& stats_obj) { size_t sz = ifaces_.size(); size_t i = 0; @@ -20781,6 +21087,12 @@ class ThriftHiveMetastoreConcurrentClient : virtual public ThriftHiveMetastoreIf void get_index_names(std::vector & _return, const std::string& db_name, const std::string& tbl_name, const int16_t max_indexes); int32_t send_get_index_names(const std::string& db_name, const std::string& tbl_name, const int16_t max_indexes); void recv_get_index_names(std::vector & _return, const int32_t seqid); + void get_primary_keys(std::vector & _return, const std::string& db_name, const std::string& tbl_name); + int32_t send_get_primary_keys(const std::string& db_name, const std::string& tbl_name); + void recv_get_primary_keys(std::vector & _return, const int32_t seqid); + void get_foreign_keys(std::vector & _return, const std::string& parent_db_name, const std::string& parent_tbl_name, const std::string& foreign_db_name, const std::string& foreign_tbl_name); + int32_t send_get_foreign_keys(const std::string& parent_db_name, const std::string& parent_tbl_name, const std::string& foreign_db_name, const std::string& foreign_tbl_name); + void recv_get_foreign_keys(std::vector & _return, const int32_t seqid); bool update_table_column_statistics(const ColumnStatistics& stats_obj); int32_t send_update_table_column_statistics(const ColumnStatistics& stats_obj); bool recv_update_table_column_statistics(const int32_t seqid); diff --git a/metastore/src/gen/thrift/gen-cpp/ThriftHiveMetastore_server.skeleton.cpp b/metastore/src/gen/thrift/gen-cpp/ThriftHiveMetastore_server.skeleton.cpp index 3e7c6e7..7d941b5 100644 --- a/metastore/src/gen/thrift/gen-cpp/ThriftHiveMetastore_server.skeleton.cpp +++ b/metastore/src/gen/thrift/gen-cpp/ThriftHiveMetastore_server.skeleton.cpp @@ -407,6 +407,16 @@ class ThriftHiveMetastoreHandler : virtual public ThriftHiveMetastoreIf { printf("get_index_names\n"); } + void get_primary_keys(std::vector & _return, const std::string& db_name, const std::string& tbl_name) { + // Your implementation goes here + printf("get_primary_keys\n"); + } + + void get_foreign_keys(std::vector & _return, const std::string& parent_db_name, const std::string& parent_tbl_name, const std::string& foreign_db_name, const std::string& foreign_tbl_name) { + // Your implementation goes here + printf("get_foreign_keys\n"); + } + bool update_table_column_statistics(const ColumnStatistics& stats_obj) { // Your implementation goes here printf("update_table_column_statistics\n"); diff --git a/metastore/src/gen/thrift/gen-cpp/hive_metastore_types.cpp b/metastore/src/gen/thrift/gen-cpp/hive_metastore_types.cpp index 2695ffa..eec704a 100644 --- a/metastore/src/gen/thrift/gen-cpp/hive_metastore_types.cpp +++ b/metastore/src/gen/thrift/gen-cpp/hive_metastore_types.cpp @@ -392,6 +392,664 @@ void FieldSchema::printTo(std::ostream& out) const { } +ForeignKeyRel::~ForeignKeyRel() throw() { +} + + +void ForeignKeyRel::__set_foreignCols(const std::vector & val) { + this->foreignCols = val; +} + +void ForeignKeyRel::__set_parentCols(const std::vector & val) { + this->parentCols = val; +} + +void ForeignKeyRel::__set_foreignKeyName(const std::string& val) { + this->foreignKeyName = val; +} + +void ForeignKeyRel::__set_parentTableName(const std::string& val) { + this->parentTableName = val; +} + +void ForeignKeyRel::__set_parentTableSchema(const std::string& val) { + this->parentTableSchema = val; +} + +uint32_t ForeignKeyRel::read(::apache::thrift::protocol::TProtocol* iprot) { + + apache::thrift::protocol::TInputRecursionTracker tracker(*iprot); + uint32_t xfer = 0; + std::string fname; + ::apache::thrift::protocol::TType ftype; + int16_t fid; + + xfer += iprot->readStructBegin(fname); + + using ::apache::thrift::protocol::TProtocolException; + + + while (true) + { + xfer += iprot->readFieldBegin(fname, ftype, fid); + if (ftype == ::apache::thrift::protocol::T_STOP) { + break; + } + switch (fid) + { + case 1: + if (ftype == ::apache::thrift::protocol::T_LIST) { + { + this->foreignCols.clear(); + uint32_t _size4; + ::apache::thrift::protocol::TType _etype7; + xfer += iprot->readListBegin(_etype7, _size4); + this->foreignCols.resize(_size4); + uint32_t _i8; + for (_i8 = 0; _i8 < _size4; ++_i8) + { + xfer += this->foreignCols[_i8].read(iprot); + } + xfer += iprot->readListEnd(); + } + this->__isset.foreignCols = true; + } else { + xfer += iprot->skip(ftype); + } + break; + case 2: + if (ftype == ::apache::thrift::protocol::T_LIST) { + { + this->parentCols.clear(); + uint32_t _size9; + ::apache::thrift::protocol::TType _etype12; + xfer += iprot->readListBegin(_etype12, _size9); + this->parentCols.resize(_size9); + uint32_t _i13; + for (_i13 = 0; _i13 < _size9; ++_i13) + { + xfer += this->parentCols[_i13].read(iprot); + } + xfer += iprot->readListEnd(); + } + this->__isset.parentCols = true; + } else { + xfer += iprot->skip(ftype); + } + break; + case 3: + if (ftype == ::apache::thrift::protocol::T_STRING) { + xfer += iprot->readString(this->foreignKeyName); + this->__isset.foreignKeyName = true; + } else { + xfer += iprot->skip(ftype); + } + break; + case 4: + if (ftype == ::apache::thrift::protocol::T_STRING) { + xfer += iprot->readString(this->parentTableName); + this->__isset.parentTableName = true; + } else { + xfer += iprot->skip(ftype); + } + break; + case 5: + if (ftype == ::apache::thrift::protocol::T_STRING) { + xfer += iprot->readString(this->parentTableSchema); + this->__isset.parentTableSchema = true; + } else { + xfer += iprot->skip(ftype); + } + break; + default: + xfer += iprot->skip(ftype); + break; + } + xfer += iprot->readFieldEnd(); + } + + xfer += iprot->readStructEnd(); + + return xfer; +} + +uint32_t ForeignKeyRel::write(::apache::thrift::protocol::TProtocol* oprot) const { + uint32_t xfer = 0; + apache::thrift::protocol::TOutputRecursionTracker tracker(*oprot); + xfer += oprot->writeStructBegin("ForeignKeyRel"); + + xfer += oprot->writeFieldBegin("foreignCols", ::apache::thrift::protocol::T_LIST, 1); + { + xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->foreignCols.size())); + std::vector ::const_iterator _iter14; + for (_iter14 = this->foreignCols.begin(); _iter14 != this->foreignCols.end(); ++_iter14) + { + xfer += (*_iter14).write(oprot); + } + xfer += oprot->writeListEnd(); + } + xfer += oprot->writeFieldEnd(); + + xfer += oprot->writeFieldBegin("parentCols", ::apache::thrift::protocol::T_LIST, 2); + { + xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->parentCols.size())); + std::vector ::const_iterator _iter15; + for (_iter15 = this->parentCols.begin(); _iter15 != this->parentCols.end(); ++_iter15) + { + xfer += (*_iter15).write(oprot); + } + xfer += oprot->writeListEnd(); + } + xfer += oprot->writeFieldEnd(); + + xfer += oprot->writeFieldBegin("foreignKeyName", ::apache::thrift::protocol::T_STRING, 3); + xfer += oprot->writeString(this->foreignKeyName); + xfer += oprot->writeFieldEnd(); + + xfer += oprot->writeFieldBegin("parentTableName", ::apache::thrift::protocol::T_STRING, 4); + xfer += oprot->writeString(this->parentTableName); + xfer += oprot->writeFieldEnd(); + + xfer += oprot->writeFieldBegin("parentTableSchema", ::apache::thrift::protocol::T_STRING, 5); + xfer += oprot->writeString(this->parentTableSchema); + xfer += oprot->writeFieldEnd(); + + xfer += oprot->writeFieldStop(); + xfer += oprot->writeStructEnd(); + return xfer; +} + +void swap(ForeignKeyRel &a, ForeignKeyRel &b) { + using ::std::swap; + swap(a.foreignCols, b.foreignCols); + swap(a.parentCols, b.parentCols); + swap(a.foreignKeyName, b.foreignKeyName); + swap(a.parentTableName, b.parentTableName); + swap(a.parentTableSchema, b.parentTableSchema); + swap(a.__isset, b.__isset); +} + +ForeignKeyRel::ForeignKeyRel(const ForeignKeyRel& other16) { + foreignCols = other16.foreignCols; + parentCols = other16.parentCols; + foreignKeyName = other16.foreignKeyName; + parentTableName = other16.parentTableName; + parentTableSchema = other16.parentTableSchema; + __isset = other16.__isset; +} +ForeignKeyRel& ForeignKeyRel::operator=(const ForeignKeyRel& other17) { + foreignCols = other17.foreignCols; + parentCols = other17.parentCols; + foreignKeyName = other17.foreignKeyName; + parentTableName = other17.parentTableName; + parentTableSchema = other17.parentTableSchema; + __isset = other17.__isset; + return *this; +} +void ForeignKeyRel::printTo(std::ostream& out) const { + using ::apache::thrift::to_string; + out << "ForeignKeyRel("; + out << "foreignCols=" << to_string(foreignCols); + out << ", " << "parentCols=" << to_string(parentCols); + out << ", " << "foreignKeyName=" << to_string(foreignKeyName); + out << ", " << "parentTableName=" << to_string(parentTableName); + out << ", " << "parentTableSchema=" << to_string(parentTableSchema); + out << ")"; +} + + +SQLPrimaryKey::~SQLPrimaryKey() throw() { +} + + +void SQLPrimaryKey::__set_table_schem(const std::string& val) { + this->table_schem = val; +} + +void SQLPrimaryKey::__set_table_name(const std::string& val) { + this->table_name = val; +} + +void SQLPrimaryKey::__set_column_name(const std::string& val) { + this->column_name = val; +} + +void SQLPrimaryKey::__set_key_seq(const int32_t val) { + this->key_seq = val; +} + +void SQLPrimaryKey::__set_pk_name(const std::string& val) { + this->pk_name = val; +} + +uint32_t SQLPrimaryKey::read(::apache::thrift::protocol::TProtocol* iprot) { + + apache::thrift::protocol::TInputRecursionTracker tracker(*iprot); + uint32_t xfer = 0; + std::string fname; + ::apache::thrift::protocol::TType ftype; + int16_t fid; + + xfer += iprot->readStructBegin(fname); + + using ::apache::thrift::protocol::TProtocolException; + + + while (true) + { + xfer += iprot->readFieldBegin(fname, ftype, fid); + if (ftype == ::apache::thrift::protocol::T_STOP) { + break; + } + switch (fid) + { + case 1: + if (ftype == ::apache::thrift::protocol::T_STRING) { + xfer += iprot->readString(this->table_schem); + this->__isset.table_schem = true; + } else { + xfer += iprot->skip(ftype); + } + break; + case 2: + if (ftype == ::apache::thrift::protocol::T_STRING) { + xfer += iprot->readString(this->table_name); + this->__isset.table_name = true; + } else { + xfer += iprot->skip(ftype); + } + break; + case 3: + if (ftype == ::apache::thrift::protocol::T_STRING) { + xfer += iprot->readString(this->column_name); + this->__isset.column_name = true; + } else { + xfer += iprot->skip(ftype); + } + break; + case 4: + if (ftype == ::apache::thrift::protocol::T_I32) { + xfer += iprot->readI32(this->key_seq); + this->__isset.key_seq = true; + } else { + xfer += iprot->skip(ftype); + } + break; + case 5: + if (ftype == ::apache::thrift::protocol::T_STRING) { + xfer += iprot->readString(this->pk_name); + this->__isset.pk_name = true; + } else { + xfer += iprot->skip(ftype); + } + break; + default: + xfer += iprot->skip(ftype); + break; + } + xfer += iprot->readFieldEnd(); + } + + xfer += iprot->readStructEnd(); + + return xfer; +} + +uint32_t SQLPrimaryKey::write(::apache::thrift::protocol::TProtocol* oprot) const { + uint32_t xfer = 0; + apache::thrift::protocol::TOutputRecursionTracker tracker(*oprot); + xfer += oprot->writeStructBegin("SQLPrimaryKey"); + + xfer += oprot->writeFieldBegin("table_schem", ::apache::thrift::protocol::T_STRING, 1); + xfer += oprot->writeString(this->table_schem); + xfer += oprot->writeFieldEnd(); + + xfer += oprot->writeFieldBegin("table_name", ::apache::thrift::protocol::T_STRING, 2); + xfer += oprot->writeString(this->table_name); + xfer += oprot->writeFieldEnd(); + + xfer += oprot->writeFieldBegin("column_name", ::apache::thrift::protocol::T_STRING, 3); + xfer += oprot->writeString(this->column_name); + xfer += oprot->writeFieldEnd(); + + xfer += oprot->writeFieldBegin("key_seq", ::apache::thrift::protocol::T_I32, 4); + xfer += oprot->writeI32(this->key_seq); + xfer += oprot->writeFieldEnd(); + + xfer += oprot->writeFieldBegin("pk_name", ::apache::thrift::protocol::T_STRING, 5); + xfer += oprot->writeString(this->pk_name); + xfer += oprot->writeFieldEnd(); + + xfer += oprot->writeFieldStop(); + xfer += oprot->writeStructEnd(); + return xfer; +} + +void swap(SQLPrimaryKey &a, SQLPrimaryKey &b) { + using ::std::swap; + swap(a.table_schem, b.table_schem); + swap(a.table_name, b.table_name); + swap(a.column_name, b.column_name); + swap(a.key_seq, b.key_seq); + swap(a.pk_name, b.pk_name); + swap(a.__isset, b.__isset); +} + +SQLPrimaryKey::SQLPrimaryKey(const SQLPrimaryKey& other18) { + table_schem = other18.table_schem; + table_name = other18.table_name; + column_name = other18.column_name; + key_seq = other18.key_seq; + pk_name = other18.pk_name; + __isset = other18.__isset; +} +SQLPrimaryKey& SQLPrimaryKey::operator=(const SQLPrimaryKey& other19) { + table_schem = other19.table_schem; + table_name = other19.table_name; + column_name = other19.column_name; + key_seq = other19.key_seq; + pk_name = other19.pk_name; + __isset = other19.__isset; + return *this; +} +void SQLPrimaryKey::printTo(std::ostream& out) const { + using ::apache::thrift::to_string; + out << "SQLPrimaryKey("; + out << "table_schem=" << to_string(table_schem); + out << ", " << "table_name=" << to_string(table_name); + out << ", " << "column_name=" << to_string(column_name); + out << ", " << "key_seq=" << to_string(key_seq); + out << ", " << "pk_name=" << to_string(pk_name); + out << ")"; +} + + +SQLForeignKey::~SQLForeignKey() throw() { +} + + +void SQLForeignKey::__set_pktable_schem(const std::string& val) { + this->pktable_schem = val; +} + +void SQLForeignKey::__set_pktable_name(const std::string& val) { + this->pktable_name = val; +} + +void SQLForeignKey::__set_pkcolumn_name(const std::string& val) { + this->pkcolumn_name = val; +} + +void SQLForeignKey::__set_fktable_schem(const std::string& val) { + this->fktable_schem = val; +} + +void SQLForeignKey::__set_fktable_name(const std::string& val) { + this->fktable_name = val; +} + +void SQLForeignKey::__set_fkcolumn_name(const std::string& val) { + this->fkcolumn_name = val; +} + +void SQLForeignKey::__set_key_seq(const int32_t val) { + this->key_seq = val; +} + +void SQLForeignKey::__set_update_rule(const int32_t val) { + this->update_rule = val; +} + +void SQLForeignKey::__set_delete_rule(const int32_t val) { + this->delete_rule = val; +} + +void SQLForeignKey::__set_fk_name(const std::string& val) { + this->fk_name = val; +} + +void SQLForeignKey::__set_pk_name(const std::string& val) { + this->pk_name = val; +} + +uint32_t SQLForeignKey::read(::apache::thrift::protocol::TProtocol* iprot) { + + apache::thrift::protocol::TInputRecursionTracker tracker(*iprot); + uint32_t xfer = 0; + std::string fname; + ::apache::thrift::protocol::TType ftype; + int16_t fid; + + xfer += iprot->readStructBegin(fname); + + using ::apache::thrift::protocol::TProtocolException; + + + while (true) + { + xfer += iprot->readFieldBegin(fname, ftype, fid); + if (ftype == ::apache::thrift::protocol::T_STOP) { + break; + } + switch (fid) + { + case 1: + if (ftype == ::apache::thrift::protocol::T_STRING) { + xfer += iprot->readString(this->pktable_schem); + this->__isset.pktable_schem = true; + } else { + xfer += iprot->skip(ftype); + } + break; + case 2: + if (ftype == ::apache::thrift::protocol::T_STRING) { + xfer += iprot->readString(this->pktable_name); + this->__isset.pktable_name = true; + } else { + xfer += iprot->skip(ftype); + } + break; + case 3: + if (ftype == ::apache::thrift::protocol::T_STRING) { + xfer += iprot->readString(this->pkcolumn_name); + this->__isset.pkcolumn_name = true; + } else { + xfer += iprot->skip(ftype); + } + break; + case 4: + if (ftype == ::apache::thrift::protocol::T_STRING) { + xfer += iprot->readString(this->fktable_schem); + this->__isset.fktable_schem = true; + } else { + xfer += iprot->skip(ftype); + } + break; + case 5: + if (ftype == ::apache::thrift::protocol::T_STRING) { + xfer += iprot->readString(this->fktable_name); + this->__isset.fktable_name = true; + } else { + xfer += iprot->skip(ftype); + } + break; + case 6: + if (ftype == ::apache::thrift::protocol::T_STRING) { + xfer += iprot->readString(this->fkcolumn_name); + this->__isset.fkcolumn_name = true; + } else { + xfer += iprot->skip(ftype); + } + break; + case 7: + if (ftype == ::apache::thrift::protocol::T_I32) { + xfer += iprot->readI32(this->key_seq); + this->__isset.key_seq = true; + } else { + xfer += iprot->skip(ftype); + } + break; + case 8: + if (ftype == ::apache::thrift::protocol::T_I32) { + xfer += iprot->readI32(this->update_rule); + this->__isset.update_rule = true; + } else { + xfer += iprot->skip(ftype); + } + break; + case 9: + if (ftype == ::apache::thrift::protocol::T_I32) { + xfer += iprot->readI32(this->delete_rule); + this->__isset.delete_rule = true; + } else { + xfer += iprot->skip(ftype); + } + break; + case 10: + if (ftype == ::apache::thrift::protocol::T_STRING) { + xfer += iprot->readString(this->fk_name); + this->__isset.fk_name = true; + } else { + xfer += iprot->skip(ftype); + } + break; + case 11: + if (ftype == ::apache::thrift::protocol::T_STRING) { + xfer += iprot->readString(this->pk_name); + this->__isset.pk_name = true; + } else { + xfer += iprot->skip(ftype); + } + break; + default: + xfer += iprot->skip(ftype); + break; + } + xfer += iprot->readFieldEnd(); + } + + xfer += iprot->readStructEnd(); + + return xfer; +} + +uint32_t SQLForeignKey::write(::apache::thrift::protocol::TProtocol* oprot) const { + uint32_t xfer = 0; + apache::thrift::protocol::TOutputRecursionTracker tracker(*oprot); + xfer += oprot->writeStructBegin("SQLForeignKey"); + + xfer += oprot->writeFieldBegin("pktable_schem", ::apache::thrift::protocol::T_STRING, 1); + xfer += oprot->writeString(this->pktable_schem); + xfer += oprot->writeFieldEnd(); + + xfer += oprot->writeFieldBegin("pktable_name", ::apache::thrift::protocol::T_STRING, 2); + xfer += oprot->writeString(this->pktable_name); + xfer += oprot->writeFieldEnd(); + + xfer += oprot->writeFieldBegin("pkcolumn_name", ::apache::thrift::protocol::T_STRING, 3); + xfer += oprot->writeString(this->pkcolumn_name); + xfer += oprot->writeFieldEnd(); + + xfer += oprot->writeFieldBegin("fktable_schem", ::apache::thrift::protocol::T_STRING, 4); + xfer += oprot->writeString(this->fktable_schem); + xfer += oprot->writeFieldEnd(); + + xfer += oprot->writeFieldBegin("fktable_name", ::apache::thrift::protocol::T_STRING, 5); + xfer += oprot->writeString(this->fktable_name); + xfer += oprot->writeFieldEnd(); + + xfer += oprot->writeFieldBegin("fkcolumn_name", ::apache::thrift::protocol::T_STRING, 6); + xfer += oprot->writeString(this->fkcolumn_name); + xfer += oprot->writeFieldEnd(); + + xfer += oprot->writeFieldBegin("key_seq", ::apache::thrift::protocol::T_I32, 7); + xfer += oprot->writeI32(this->key_seq); + xfer += oprot->writeFieldEnd(); + + xfer += oprot->writeFieldBegin("update_rule", ::apache::thrift::protocol::T_I32, 8); + xfer += oprot->writeI32(this->update_rule); + xfer += oprot->writeFieldEnd(); + + xfer += oprot->writeFieldBegin("delete_rule", ::apache::thrift::protocol::T_I32, 9); + xfer += oprot->writeI32(this->delete_rule); + xfer += oprot->writeFieldEnd(); + + xfer += oprot->writeFieldBegin("fk_name", ::apache::thrift::protocol::T_STRING, 10); + xfer += oprot->writeString(this->fk_name); + xfer += oprot->writeFieldEnd(); + + xfer += oprot->writeFieldBegin("pk_name", ::apache::thrift::protocol::T_STRING, 11); + xfer += oprot->writeString(this->pk_name); + xfer += oprot->writeFieldEnd(); + + xfer += oprot->writeFieldStop(); + xfer += oprot->writeStructEnd(); + return xfer; +} + +void swap(SQLForeignKey &a, SQLForeignKey &b) { + using ::std::swap; + swap(a.pktable_schem, b.pktable_schem); + swap(a.pktable_name, b.pktable_name); + swap(a.pkcolumn_name, b.pkcolumn_name); + swap(a.fktable_schem, b.fktable_schem); + swap(a.fktable_name, b.fktable_name); + swap(a.fkcolumn_name, b.fkcolumn_name); + swap(a.key_seq, b.key_seq); + swap(a.update_rule, b.update_rule); + swap(a.delete_rule, b.delete_rule); + swap(a.fk_name, b.fk_name); + swap(a.pk_name, b.pk_name); + swap(a.__isset, b.__isset); +} + +SQLForeignKey::SQLForeignKey(const SQLForeignKey& other20) { + pktable_schem = other20.pktable_schem; + pktable_name = other20.pktable_name; + pkcolumn_name = other20.pkcolumn_name; + fktable_schem = other20.fktable_schem; + fktable_name = other20.fktable_name; + fkcolumn_name = other20.fkcolumn_name; + key_seq = other20.key_seq; + update_rule = other20.update_rule; + delete_rule = other20.delete_rule; + fk_name = other20.fk_name; + pk_name = other20.pk_name; + __isset = other20.__isset; +} +SQLForeignKey& SQLForeignKey::operator=(const SQLForeignKey& other21) { + pktable_schem = other21.pktable_schem; + pktable_name = other21.pktable_name; + pkcolumn_name = other21.pkcolumn_name; + fktable_schem = other21.fktable_schem; + fktable_name = other21.fktable_name; + fkcolumn_name = other21.fkcolumn_name; + key_seq = other21.key_seq; + update_rule = other21.update_rule; + delete_rule = other21.delete_rule; + fk_name = other21.fk_name; + pk_name = other21.pk_name; + __isset = other21.__isset; + return *this; +} +void SQLForeignKey::printTo(std::ostream& out) const { + using ::apache::thrift::to_string; + out << "SQLForeignKey("; + out << "pktable_schem=" << to_string(pktable_schem); + out << ", " << "pktable_name=" << to_string(pktable_name); + out << ", " << "pkcolumn_name=" << to_string(pkcolumn_name); + out << ", " << "fktable_schem=" << to_string(fktable_schem); + out << ", " << "fktable_name=" << to_string(fktable_name); + out << ", " << "fkcolumn_name=" << to_string(fkcolumn_name); + out << ", " << "key_seq=" << to_string(key_seq); + out << ", " << "update_rule=" << to_string(update_rule); + out << ", " << "delete_rule=" << to_string(delete_rule); + out << ", " << "fk_name=" << to_string(fk_name); + out << ", " << "pk_name=" << to_string(pk_name); + out << ")"; +} + + Type::~Type() throw() { } @@ -464,14 +1122,14 @@ uint32_t Type::read(::apache::thrift::protocol::TProtocol* iprot) { if (ftype == ::apache::thrift::protocol::T_LIST) { { this->fields.clear(); - uint32_t _size4; - ::apache::thrift::protocol::TType _etype7; - xfer += iprot->readListBegin(_etype7, _size4); - this->fields.resize(_size4); - uint32_t _i8; - for (_i8 = 0; _i8 < _size4; ++_i8) + uint32_t _size22; + ::apache::thrift::protocol::TType _etype25; + xfer += iprot->readListBegin(_etype25, _size22); + this->fields.resize(_size22); + uint32_t _i26; + for (_i26 = 0; _i26 < _size22; ++_i26) { - xfer += this->fields[_i8].read(iprot); + xfer += this->fields[_i26].read(iprot); } xfer += iprot->readListEnd(); } @@ -515,10 +1173,10 @@ uint32_t Type::write(::apache::thrift::protocol::TProtocol* oprot) const { xfer += oprot->writeFieldBegin("fields", ::apache::thrift::protocol::T_LIST, 4); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->fields.size())); - std::vector ::const_iterator _iter9; - for (_iter9 = this->fields.begin(); _iter9 != this->fields.end(); ++_iter9) + std::vector ::const_iterator _iter27; + for (_iter27 = this->fields.begin(); _iter27 != this->fields.end(); ++_iter27) { - xfer += (*_iter9).write(oprot); + xfer += (*_iter27).write(oprot); } xfer += oprot->writeListEnd(); } @@ -538,19 +1196,19 @@ void swap(Type &a, Type &b) { swap(a.__isset, b.__isset); } -Type::Type(const Type& other10) { - name = other10.name; - type1 = other10.type1; - type2 = other10.type2; - fields = other10.fields; - __isset = other10.__isset; +Type::Type(const Type& other28) { + name = other28.name; + type1 = other28.type1; + type2 = other28.type2; + fields = other28.fields; + __isset = other28.__isset; } -Type& Type::operator=(const Type& other11) { - name = other11.name; - type1 = other11.type1; - type2 = other11.type2; - fields = other11.fields; - __isset = other11.__isset; +Type& Type::operator=(const Type& other29) { + name = other29.name; + type1 = other29.type1; + type2 = other29.type2; + fields = other29.fields; + __isset = other29.__isset; return *this; } void Type::printTo(std::ostream& out) const { @@ -611,9 +1269,9 @@ uint32_t HiveObjectRef::read(::apache::thrift::protocol::TProtocol* iprot) { { case 1: if (ftype == ::apache::thrift::protocol::T_I32) { - int32_t ecast12; - xfer += iprot->readI32(ecast12); - this->objectType = (HiveObjectType::type)ecast12; + int32_t ecast30; + xfer += iprot->readI32(ecast30); + this->objectType = (HiveObjectType::type)ecast30; this->__isset.objectType = true; } else { xfer += iprot->skip(ftype); @@ -639,14 +1297,14 @@ uint32_t HiveObjectRef::read(::apache::thrift::protocol::TProtocol* iprot) { if (ftype == ::apache::thrift::protocol::T_LIST) { { this->partValues.clear(); - uint32_t _size13; - ::apache::thrift::protocol::TType _etype16; - xfer += iprot->readListBegin(_etype16, _size13); - this->partValues.resize(_size13); - uint32_t _i17; - for (_i17 = 0; _i17 < _size13; ++_i17) + uint32_t _size31; + ::apache::thrift::protocol::TType _etype34; + xfer += iprot->readListBegin(_etype34, _size31); + this->partValues.resize(_size31); + uint32_t _i35; + for (_i35 = 0; _i35 < _size31; ++_i35) { - xfer += iprot->readString(this->partValues[_i17]); + xfer += iprot->readString(this->partValues[_i35]); } xfer += iprot->readListEnd(); } @@ -695,10 +1353,10 @@ uint32_t HiveObjectRef::write(::apache::thrift::protocol::TProtocol* oprot) cons xfer += oprot->writeFieldBegin("partValues", ::apache::thrift::protocol::T_LIST, 4); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->partValues.size())); - std::vector ::const_iterator _iter18; - for (_iter18 = this->partValues.begin(); _iter18 != this->partValues.end(); ++_iter18) + std::vector ::const_iterator _iter36; + for (_iter36 = this->partValues.begin(); _iter36 != this->partValues.end(); ++_iter36) { - xfer += oprot->writeString((*_iter18)); + xfer += oprot->writeString((*_iter36)); } xfer += oprot->writeListEnd(); } @@ -723,21 +1381,21 @@ void swap(HiveObjectRef &a, HiveObjectRef &b) { swap(a.__isset, b.__isset); } -HiveObjectRef::HiveObjectRef(const HiveObjectRef& other19) { - objectType = other19.objectType; - dbName = other19.dbName; - objectName = other19.objectName; - partValues = other19.partValues; - columnName = other19.columnName; - __isset = other19.__isset; -} -HiveObjectRef& HiveObjectRef::operator=(const HiveObjectRef& other20) { - objectType = other20.objectType; - dbName = other20.dbName; - objectName = other20.objectName; - partValues = other20.partValues; - columnName = other20.columnName; - __isset = other20.__isset; +HiveObjectRef::HiveObjectRef(const HiveObjectRef& other37) { + objectType = other37.objectType; + dbName = other37.dbName; + objectName = other37.objectName; + partValues = other37.partValues; + columnName = other37.columnName; + __isset = other37.__isset; +} +HiveObjectRef& HiveObjectRef::operator=(const HiveObjectRef& other38) { + objectType = other38.objectType; + dbName = other38.dbName; + objectName = other38.objectName; + partValues = other38.partValues; + columnName = other38.columnName; + __isset = other38.__isset; return *this; } void HiveObjectRef::printTo(std::ostream& out) const { @@ -823,9 +1481,9 @@ uint32_t PrivilegeGrantInfo::read(::apache::thrift::protocol::TProtocol* iprot) break; case 4: if (ftype == ::apache::thrift::protocol::T_I32) { - int32_t ecast21; - xfer += iprot->readI32(ecast21); - this->grantorType = (PrincipalType::type)ecast21; + int32_t ecast39; + xfer += iprot->readI32(ecast39); + this->grantorType = (PrincipalType::type)ecast39; this->__isset.grantorType = true; } else { xfer += iprot->skip(ftype); @@ -891,21 +1549,21 @@ void swap(PrivilegeGrantInfo &a, PrivilegeGrantInfo &b) { swap(a.__isset, b.__isset); } -PrivilegeGrantInfo::PrivilegeGrantInfo(const PrivilegeGrantInfo& other22) { - privilege = other22.privilege; - createTime = other22.createTime; - grantor = other22.grantor; - grantorType = other22.grantorType; - grantOption = other22.grantOption; - __isset = other22.__isset; -} -PrivilegeGrantInfo& PrivilegeGrantInfo::operator=(const PrivilegeGrantInfo& other23) { - privilege = other23.privilege; - createTime = other23.createTime; - grantor = other23.grantor; - grantorType = other23.grantorType; - grantOption = other23.grantOption; - __isset = other23.__isset; +PrivilegeGrantInfo::PrivilegeGrantInfo(const PrivilegeGrantInfo& other40) { + privilege = other40.privilege; + createTime = other40.createTime; + grantor = other40.grantor; + grantorType = other40.grantorType; + grantOption = other40.grantOption; + __isset = other40.__isset; +} +PrivilegeGrantInfo& PrivilegeGrantInfo::operator=(const PrivilegeGrantInfo& other41) { + privilege = other41.privilege; + createTime = other41.createTime; + grantor = other41.grantor; + grantorType = other41.grantorType; + grantOption = other41.grantOption; + __isset = other41.__isset; return *this; } void PrivilegeGrantInfo::printTo(std::ostream& out) const { @@ -979,9 +1637,9 @@ uint32_t HiveObjectPrivilege::read(::apache::thrift::protocol::TProtocol* iprot) break; case 3: if (ftype == ::apache::thrift::protocol::T_I32) { - int32_t ecast24; - xfer += iprot->readI32(ecast24); - this->principalType = (PrincipalType::type)ecast24; + int32_t ecast42; + xfer += iprot->readI32(ecast42); + this->principalType = (PrincipalType::type)ecast42; this->__isset.principalType = true; } else { xfer += iprot->skip(ftype); @@ -1042,19 +1700,19 @@ void swap(HiveObjectPrivilege &a, HiveObjectPrivilege &b) { swap(a.__isset, b.__isset); } -HiveObjectPrivilege::HiveObjectPrivilege(const HiveObjectPrivilege& other25) { - hiveObject = other25.hiveObject; - principalName = other25.principalName; - principalType = other25.principalType; - grantInfo = other25.grantInfo; - __isset = other25.__isset; +HiveObjectPrivilege::HiveObjectPrivilege(const HiveObjectPrivilege& other43) { + hiveObject = other43.hiveObject; + principalName = other43.principalName; + principalType = other43.principalType; + grantInfo = other43.grantInfo; + __isset = other43.__isset; } -HiveObjectPrivilege& HiveObjectPrivilege::operator=(const HiveObjectPrivilege& other26) { - hiveObject = other26.hiveObject; - principalName = other26.principalName; - principalType = other26.principalType; - grantInfo = other26.grantInfo; - __isset = other26.__isset; +HiveObjectPrivilege& HiveObjectPrivilege::operator=(const HiveObjectPrivilege& other44) { + hiveObject = other44.hiveObject; + principalName = other44.principalName; + principalType = other44.principalType; + grantInfo = other44.grantInfo; + __isset = other44.__isset; return *this; } void HiveObjectPrivilege::printTo(std::ostream& out) const { @@ -1101,14 +1759,14 @@ uint32_t PrivilegeBag::read(::apache::thrift::protocol::TProtocol* iprot) { if (ftype == ::apache::thrift::protocol::T_LIST) { { this->privileges.clear(); - uint32_t _size27; - ::apache::thrift::protocol::TType _etype30; - xfer += iprot->readListBegin(_etype30, _size27); - this->privileges.resize(_size27); - uint32_t _i31; - for (_i31 = 0; _i31 < _size27; ++_i31) + uint32_t _size45; + ::apache::thrift::protocol::TType _etype48; + xfer += iprot->readListBegin(_etype48, _size45); + this->privileges.resize(_size45); + uint32_t _i49; + for (_i49 = 0; _i49 < _size45; ++_i49) { - xfer += this->privileges[_i31].read(iprot); + xfer += this->privileges[_i49].read(iprot); } xfer += iprot->readListEnd(); } @@ -1137,10 +1795,10 @@ uint32_t PrivilegeBag::write(::apache::thrift::protocol::TProtocol* oprot) const xfer += oprot->writeFieldBegin("privileges", ::apache::thrift::protocol::T_LIST, 1); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->privileges.size())); - std::vector ::const_iterator _iter32; - for (_iter32 = this->privileges.begin(); _iter32 != this->privileges.end(); ++_iter32) + std::vector ::const_iterator _iter50; + for (_iter50 = this->privileges.begin(); _iter50 != this->privileges.end(); ++_iter50) { - xfer += (*_iter32).write(oprot); + xfer += (*_iter50).write(oprot); } xfer += oprot->writeListEnd(); } @@ -1157,13 +1815,13 @@ void swap(PrivilegeBag &a, PrivilegeBag &b) { swap(a.__isset, b.__isset); } -PrivilegeBag::PrivilegeBag(const PrivilegeBag& other33) { - privileges = other33.privileges; - __isset = other33.__isset; +PrivilegeBag::PrivilegeBag(const PrivilegeBag& other51) { + privileges = other51.privileges; + __isset = other51.__isset; } -PrivilegeBag& PrivilegeBag::operator=(const PrivilegeBag& other34) { - privileges = other34.privileges; - __isset = other34.__isset; +PrivilegeBag& PrivilegeBag::operator=(const PrivilegeBag& other52) { + privileges = other52.privileges; + __isset = other52.__isset; return *this; } void PrivilegeBag::printTo(std::ostream& out) const { @@ -1215,26 +1873,26 @@ uint32_t PrincipalPrivilegeSet::read(::apache::thrift::protocol::TProtocol* ipro if (ftype == ::apache::thrift::protocol::T_MAP) { { this->userPrivileges.clear(); - uint32_t _size35; - ::apache::thrift::protocol::TType _ktype36; - ::apache::thrift::protocol::TType _vtype37; - xfer += iprot->readMapBegin(_ktype36, _vtype37, _size35); - uint32_t _i39; - for (_i39 = 0; _i39 < _size35; ++_i39) + uint32_t _size53; + ::apache::thrift::protocol::TType _ktype54; + ::apache::thrift::protocol::TType _vtype55; + xfer += iprot->readMapBegin(_ktype54, _vtype55, _size53); + uint32_t _i57; + for (_i57 = 0; _i57 < _size53; ++_i57) { - std::string _key40; - xfer += iprot->readString(_key40); - std::vector & _val41 = this->userPrivileges[_key40]; + std::string _key58; + xfer += iprot->readString(_key58); + std::vector & _val59 = this->userPrivileges[_key58]; { - _val41.clear(); - uint32_t _size42; - ::apache::thrift::protocol::TType _etype45; - xfer += iprot->readListBegin(_etype45, _size42); - _val41.resize(_size42); - uint32_t _i46; - for (_i46 = 0; _i46 < _size42; ++_i46) + _val59.clear(); + uint32_t _size60; + ::apache::thrift::protocol::TType _etype63; + xfer += iprot->readListBegin(_etype63, _size60); + _val59.resize(_size60); + uint32_t _i64; + for (_i64 = 0; _i64 < _size60; ++_i64) { - xfer += _val41[_i46].read(iprot); + xfer += _val59[_i64].read(iprot); } xfer += iprot->readListEnd(); } @@ -1250,26 +1908,26 @@ uint32_t PrincipalPrivilegeSet::read(::apache::thrift::protocol::TProtocol* ipro if (ftype == ::apache::thrift::protocol::T_MAP) { { this->groupPrivileges.clear(); - uint32_t _size47; - ::apache::thrift::protocol::TType _ktype48; - ::apache::thrift::protocol::TType _vtype49; - xfer += iprot->readMapBegin(_ktype48, _vtype49, _size47); - uint32_t _i51; - for (_i51 = 0; _i51 < _size47; ++_i51) + uint32_t _size65; + ::apache::thrift::protocol::TType _ktype66; + ::apache::thrift::protocol::TType _vtype67; + xfer += iprot->readMapBegin(_ktype66, _vtype67, _size65); + uint32_t _i69; + for (_i69 = 0; _i69 < _size65; ++_i69) { - std::string _key52; - xfer += iprot->readString(_key52); - std::vector & _val53 = this->groupPrivileges[_key52]; + std::string _key70; + xfer += iprot->readString(_key70); + std::vector & _val71 = this->groupPrivileges[_key70]; { - _val53.clear(); - uint32_t _size54; - ::apache::thrift::protocol::TType _etype57; - xfer += iprot->readListBegin(_etype57, _size54); - _val53.resize(_size54); - uint32_t _i58; - for (_i58 = 0; _i58 < _size54; ++_i58) + _val71.clear(); + uint32_t _size72; + ::apache::thrift::protocol::TType _etype75; + xfer += iprot->readListBegin(_etype75, _size72); + _val71.resize(_size72); + uint32_t _i76; + for (_i76 = 0; _i76 < _size72; ++_i76) { - xfer += _val53[_i58].read(iprot); + xfer += _val71[_i76].read(iprot); } xfer += iprot->readListEnd(); } @@ -1285,26 +1943,26 @@ uint32_t PrincipalPrivilegeSet::read(::apache::thrift::protocol::TProtocol* ipro if (ftype == ::apache::thrift::protocol::T_MAP) { { this->rolePrivileges.clear(); - uint32_t _size59; - ::apache::thrift::protocol::TType _ktype60; - ::apache::thrift::protocol::TType _vtype61; - xfer += iprot->readMapBegin(_ktype60, _vtype61, _size59); - uint32_t _i63; - for (_i63 = 0; _i63 < _size59; ++_i63) + uint32_t _size77; + ::apache::thrift::protocol::TType _ktype78; + ::apache::thrift::protocol::TType _vtype79; + xfer += iprot->readMapBegin(_ktype78, _vtype79, _size77); + uint32_t _i81; + for (_i81 = 0; _i81 < _size77; ++_i81) { - std::string _key64; - xfer += iprot->readString(_key64); - std::vector & _val65 = this->rolePrivileges[_key64]; + std::string _key82; + xfer += iprot->readString(_key82); + std::vector & _val83 = this->rolePrivileges[_key82]; { - _val65.clear(); - uint32_t _size66; - ::apache::thrift::protocol::TType _etype69; - xfer += iprot->readListBegin(_etype69, _size66); - _val65.resize(_size66); - uint32_t _i70; - for (_i70 = 0; _i70 < _size66; ++_i70) + _val83.clear(); + uint32_t _size84; + ::apache::thrift::protocol::TType _etype87; + xfer += iprot->readListBegin(_etype87, _size84); + _val83.resize(_size84); + uint32_t _i88; + for (_i88 = 0; _i88 < _size84; ++_i88) { - xfer += _val65[_i70].read(iprot); + xfer += _val83[_i88].read(iprot); } xfer += iprot->readListEnd(); } @@ -1336,16 +1994,16 @@ uint32_t PrincipalPrivilegeSet::write(::apache::thrift::protocol::TProtocol* opr xfer += oprot->writeFieldBegin("userPrivileges", ::apache::thrift::protocol::T_MAP, 1); { xfer += oprot->writeMapBegin(::apache::thrift::protocol::T_STRING, ::apache::thrift::protocol::T_LIST, static_cast(this->userPrivileges.size())); - std::map > ::const_iterator _iter71; - for (_iter71 = this->userPrivileges.begin(); _iter71 != this->userPrivileges.end(); ++_iter71) + std::map > ::const_iterator _iter89; + for (_iter89 = this->userPrivileges.begin(); _iter89 != this->userPrivileges.end(); ++_iter89) { - xfer += oprot->writeString(_iter71->first); + xfer += oprot->writeString(_iter89->first); { - xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(_iter71->second.size())); - std::vector ::const_iterator _iter72; - for (_iter72 = _iter71->second.begin(); _iter72 != _iter71->second.end(); ++_iter72) + xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(_iter89->second.size())); + std::vector ::const_iterator _iter90; + for (_iter90 = _iter89->second.begin(); _iter90 != _iter89->second.end(); ++_iter90) { - xfer += (*_iter72).write(oprot); + xfer += (*_iter90).write(oprot); } xfer += oprot->writeListEnd(); } @@ -1357,16 +2015,16 @@ uint32_t PrincipalPrivilegeSet::write(::apache::thrift::protocol::TProtocol* opr xfer += oprot->writeFieldBegin("groupPrivileges", ::apache::thrift::protocol::T_MAP, 2); { xfer += oprot->writeMapBegin(::apache::thrift::protocol::T_STRING, ::apache::thrift::protocol::T_LIST, static_cast(this->groupPrivileges.size())); - std::map > ::const_iterator _iter73; - for (_iter73 = this->groupPrivileges.begin(); _iter73 != this->groupPrivileges.end(); ++_iter73) + std::map > ::const_iterator _iter91; + for (_iter91 = this->groupPrivileges.begin(); _iter91 != this->groupPrivileges.end(); ++_iter91) { - xfer += oprot->writeString(_iter73->first); + xfer += oprot->writeString(_iter91->first); { - xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(_iter73->second.size())); - std::vector ::const_iterator _iter74; - for (_iter74 = _iter73->second.begin(); _iter74 != _iter73->second.end(); ++_iter74) + xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(_iter91->second.size())); + std::vector ::const_iterator _iter92; + for (_iter92 = _iter91->second.begin(); _iter92 != _iter91->second.end(); ++_iter92) { - xfer += (*_iter74).write(oprot); + xfer += (*_iter92).write(oprot); } xfer += oprot->writeListEnd(); } @@ -1378,16 +2036,16 @@ uint32_t PrincipalPrivilegeSet::write(::apache::thrift::protocol::TProtocol* opr xfer += oprot->writeFieldBegin("rolePrivileges", ::apache::thrift::protocol::T_MAP, 3); { xfer += oprot->writeMapBegin(::apache::thrift::protocol::T_STRING, ::apache::thrift::protocol::T_LIST, static_cast(this->rolePrivileges.size())); - std::map > ::const_iterator _iter75; - for (_iter75 = this->rolePrivileges.begin(); _iter75 != this->rolePrivileges.end(); ++_iter75) + std::map > ::const_iterator _iter93; + for (_iter93 = this->rolePrivileges.begin(); _iter93 != this->rolePrivileges.end(); ++_iter93) { - xfer += oprot->writeString(_iter75->first); + xfer += oprot->writeString(_iter93->first); { - xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(_iter75->second.size())); - std::vector ::const_iterator _iter76; - for (_iter76 = _iter75->second.begin(); _iter76 != _iter75->second.end(); ++_iter76) + xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(_iter93->second.size())); + std::vector ::const_iterator _iter94; + for (_iter94 = _iter93->second.begin(); _iter94 != _iter93->second.end(); ++_iter94) { - xfer += (*_iter76).write(oprot); + xfer += (*_iter94).write(oprot); } xfer += oprot->writeListEnd(); } @@ -1409,17 +2067,17 @@ void swap(PrincipalPrivilegeSet &a, PrincipalPrivilegeSet &b) { swap(a.__isset, b.__isset); } -PrincipalPrivilegeSet::PrincipalPrivilegeSet(const PrincipalPrivilegeSet& other77) { - userPrivileges = other77.userPrivileges; - groupPrivileges = other77.groupPrivileges; - rolePrivileges = other77.rolePrivileges; - __isset = other77.__isset; +PrincipalPrivilegeSet::PrincipalPrivilegeSet(const PrincipalPrivilegeSet& other95) { + userPrivileges = other95.userPrivileges; + groupPrivileges = other95.groupPrivileges; + rolePrivileges = other95.rolePrivileges; + __isset = other95.__isset; } -PrincipalPrivilegeSet& PrincipalPrivilegeSet::operator=(const PrincipalPrivilegeSet& other78) { - userPrivileges = other78.userPrivileges; - groupPrivileges = other78.groupPrivileges; - rolePrivileges = other78.rolePrivileges; - __isset = other78.__isset; +PrincipalPrivilegeSet& PrincipalPrivilegeSet::operator=(const PrincipalPrivilegeSet& other96) { + userPrivileges = other96.userPrivileges; + groupPrivileges = other96.groupPrivileges; + rolePrivileges = other96.rolePrivileges; + __isset = other96.__isset; return *this; } void PrincipalPrivilegeSet::printTo(std::ostream& out) const { @@ -1472,9 +2130,9 @@ uint32_t GrantRevokePrivilegeRequest::read(::apache::thrift::protocol::TProtocol { case 1: if (ftype == ::apache::thrift::protocol::T_I32) { - int32_t ecast79; - xfer += iprot->readI32(ecast79); - this->requestType = (GrantRevokeType::type)ecast79; + int32_t ecast97; + xfer += iprot->readI32(ecast97); + this->requestType = (GrantRevokeType::type)ecast97; this->__isset.requestType = true; } else { xfer += iprot->skip(ftype); @@ -1539,17 +2197,17 @@ void swap(GrantRevokePrivilegeRequest &a, GrantRevokePrivilegeRequest &b) { swap(a.__isset, b.__isset); } -GrantRevokePrivilegeRequest::GrantRevokePrivilegeRequest(const GrantRevokePrivilegeRequest& other80) { - requestType = other80.requestType; - privileges = other80.privileges; - revokeGrantOption = other80.revokeGrantOption; - __isset = other80.__isset; +GrantRevokePrivilegeRequest::GrantRevokePrivilegeRequest(const GrantRevokePrivilegeRequest& other98) { + requestType = other98.requestType; + privileges = other98.privileges; + revokeGrantOption = other98.revokeGrantOption; + __isset = other98.__isset; } -GrantRevokePrivilegeRequest& GrantRevokePrivilegeRequest::operator=(const GrantRevokePrivilegeRequest& other81) { - requestType = other81.requestType; - privileges = other81.privileges; - revokeGrantOption = other81.revokeGrantOption; - __isset = other81.__isset; +GrantRevokePrivilegeRequest& GrantRevokePrivilegeRequest::operator=(const GrantRevokePrivilegeRequest& other99) { + requestType = other99.requestType; + privileges = other99.privileges; + revokeGrantOption = other99.revokeGrantOption; + __isset = other99.__isset; return *this; } void GrantRevokePrivilegeRequest::printTo(std::ostream& out) const { @@ -1633,13 +2291,13 @@ void swap(GrantRevokePrivilegeResponse &a, GrantRevokePrivilegeResponse &b) { swap(a.__isset, b.__isset); } -GrantRevokePrivilegeResponse::GrantRevokePrivilegeResponse(const GrantRevokePrivilegeResponse& other82) { - success = other82.success; - __isset = other82.__isset; +GrantRevokePrivilegeResponse::GrantRevokePrivilegeResponse(const GrantRevokePrivilegeResponse& other100) { + success = other100.success; + __isset = other100.__isset; } -GrantRevokePrivilegeResponse& GrantRevokePrivilegeResponse::operator=(const GrantRevokePrivilegeResponse& other83) { - success = other83.success; - __isset = other83.__isset; +GrantRevokePrivilegeResponse& GrantRevokePrivilegeResponse::operator=(const GrantRevokePrivilegeResponse& other101) { + success = other101.success; + __isset = other101.__isset; return *this; } void GrantRevokePrivilegeResponse::printTo(std::ostream& out) const { @@ -1753,17 +2411,17 @@ void swap(Role &a, Role &b) { swap(a.__isset, b.__isset); } -Role::Role(const Role& other84) { - roleName = other84.roleName; - createTime = other84.createTime; - ownerName = other84.ownerName; - __isset = other84.__isset; -} -Role& Role::operator=(const Role& other85) { - roleName = other85.roleName; - createTime = other85.createTime; - ownerName = other85.ownerName; - __isset = other85.__isset; +Role::Role(const Role& other102) { + roleName = other102.roleName; + createTime = other102.createTime; + ownerName = other102.ownerName; + __isset = other102.__isset; +} +Role& Role::operator=(const Role& other103) { + roleName = other103.roleName; + createTime = other103.createTime; + ownerName = other103.ownerName; + __isset = other103.__isset; return *this; } void Role::printTo(std::ostream& out) const { @@ -1847,9 +2505,9 @@ uint32_t RolePrincipalGrant::read(::apache::thrift::protocol::TProtocol* iprot) break; case 3: if (ftype == ::apache::thrift::protocol::T_I32) { - int32_t ecast86; - xfer += iprot->readI32(ecast86); - this->principalType = (PrincipalType::type)ecast86; + int32_t ecast104; + xfer += iprot->readI32(ecast104); + this->principalType = (PrincipalType::type)ecast104; this->__isset.principalType = true; } else { xfer += iprot->skip(ftype); @@ -1881,9 +2539,9 @@ uint32_t RolePrincipalGrant::read(::apache::thrift::protocol::TProtocol* iprot) break; case 7: if (ftype == ::apache::thrift::protocol::T_I32) { - int32_t ecast87; - xfer += iprot->readI32(ecast87); - this->grantorPrincipalType = (PrincipalType::type)ecast87; + int32_t ecast105; + xfer += iprot->readI32(ecast105); + this->grantorPrincipalType = (PrincipalType::type)ecast105; this->__isset.grantorPrincipalType = true; } else { xfer += iprot->skip(ftype); @@ -1951,25 +2609,25 @@ void swap(RolePrincipalGrant &a, RolePrincipalGrant &b) { swap(a.__isset, b.__isset); } -RolePrincipalGrant::RolePrincipalGrant(const RolePrincipalGrant& other88) { - roleName = other88.roleName; - principalName = other88.principalName; - principalType = other88.principalType; - grantOption = other88.grantOption; - grantTime = other88.grantTime; - grantorName = other88.grantorName; - grantorPrincipalType = other88.grantorPrincipalType; - __isset = other88.__isset; -} -RolePrincipalGrant& RolePrincipalGrant::operator=(const RolePrincipalGrant& other89) { - roleName = other89.roleName; - principalName = other89.principalName; - principalType = other89.principalType; - grantOption = other89.grantOption; - grantTime = other89.grantTime; - grantorName = other89.grantorName; - grantorPrincipalType = other89.grantorPrincipalType; - __isset = other89.__isset; +RolePrincipalGrant::RolePrincipalGrant(const RolePrincipalGrant& other106) { + roleName = other106.roleName; + principalName = other106.principalName; + principalType = other106.principalType; + grantOption = other106.grantOption; + grantTime = other106.grantTime; + grantorName = other106.grantorName; + grantorPrincipalType = other106.grantorPrincipalType; + __isset = other106.__isset; +} +RolePrincipalGrant& RolePrincipalGrant::operator=(const RolePrincipalGrant& other107) { + roleName = other107.roleName; + principalName = other107.principalName; + principalType = other107.principalType; + grantOption = other107.grantOption; + grantTime = other107.grantTime; + grantorName = other107.grantorName; + grantorPrincipalType = other107.grantorPrincipalType; + __isset = other107.__isset; return *this; } void RolePrincipalGrant::printTo(std::ostream& out) const { @@ -2031,9 +2689,9 @@ uint32_t GetRoleGrantsForPrincipalRequest::read(::apache::thrift::protocol::TPro break; case 2: if (ftype == ::apache::thrift::protocol::T_I32) { - int32_t ecast90; - xfer += iprot->readI32(ecast90); - this->principal_type = (PrincipalType::type)ecast90; + int32_t ecast108; + xfer += iprot->readI32(ecast108); + this->principal_type = (PrincipalType::type)ecast108; isset_principal_type = true; } else { xfer += iprot->skip(ftype); @@ -2079,13 +2737,13 @@ void swap(GetRoleGrantsForPrincipalRequest &a, GetRoleGrantsForPrincipalRequest swap(a.principal_type, b.principal_type); } -GetRoleGrantsForPrincipalRequest::GetRoleGrantsForPrincipalRequest(const GetRoleGrantsForPrincipalRequest& other91) { - principal_name = other91.principal_name; - principal_type = other91.principal_type; +GetRoleGrantsForPrincipalRequest::GetRoleGrantsForPrincipalRequest(const GetRoleGrantsForPrincipalRequest& other109) { + principal_name = other109.principal_name; + principal_type = other109.principal_type; } -GetRoleGrantsForPrincipalRequest& GetRoleGrantsForPrincipalRequest::operator=(const GetRoleGrantsForPrincipalRequest& other92) { - principal_name = other92.principal_name; - principal_type = other92.principal_type; +GetRoleGrantsForPrincipalRequest& GetRoleGrantsForPrincipalRequest::operator=(const GetRoleGrantsForPrincipalRequest& other110) { + principal_name = other110.principal_name; + principal_type = other110.principal_type; return *this; } void GetRoleGrantsForPrincipalRequest::printTo(std::ostream& out) const { @@ -2131,14 +2789,14 @@ uint32_t GetRoleGrantsForPrincipalResponse::read(::apache::thrift::protocol::TPr if (ftype == ::apache::thrift::protocol::T_LIST) { { this->principalGrants.clear(); - uint32_t _size93; - ::apache::thrift::protocol::TType _etype96; - xfer += iprot->readListBegin(_etype96, _size93); - this->principalGrants.resize(_size93); - uint32_t _i97; - for (_i97 = 0; _i97 < _size93; ++_i97) + uint32_t _size111; + ::apache::thrift::protocol::TType _etype114; + xfer += iprot->readListBegin(_etype114, _size111); + this->principalGrants.resize(_size111); + uint32_t _i115; + for (_i115 = 0; _i115 < _size111; ++_i115) { - xfer += this->principalGrants[_i97].read(iprot); + xfer += this->principalGrants[_i115].read(iprot); } xfer += iprot->readListEnd(); } @@ -2169,10 +2827,10 @@ uint32_t GetRoleGrantsForPrincipalResponse::write(::apache::thrift::protocol::TP xfer += oprot->writeFieldBegin("principalGrants", ::apache::thrift::protocol::T_LIST, 1); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->principalGrants.size())); - std::vector ::const_iterator _iter98; - for (_iter98 = this->principalGrants.begin(); _iter98 != this->principalGrants.end(); ++_iter98) + std::vector ::const_iterator _iter116; + for (_iter116 = this->principalGrants.begin(); _iter116 != this->principalGrants.end(); ++_iter116) { - xfer += (*_iter98).write(oprot); + xfer += (*_iter116).write(oprot); } xfer += oprot->writeListEnd(); } @@ -2188,11 +2846,11 @@ void swap(GetRoleGrantsForPrincipalResponse &a, GetRoleGrantsForPrincipalRespons swap(a.principalGrants, b.principalGrants); } -GetRoleGrantsForPrincipalResponse::GetRoleGrantsForPrincipalResponse(const GetRoleGrantsForPrincipalResponse& other99) { - principalGrants = other99.principalGrants; +GetRoleGrantsForPrincipalResponse::GetRoleGrantsForPrincipalResponse(const GetRoleGrantsForPrincipalResponse& other117) { + principalGrants = other117.principalGrants; } -GetRoleGrantsForPrincipalResponse& GetRoleGrantsForPrincipalResponse::operator=(const GetRoleGrantsForPrincipalResponse& other100) { - principalGrants = other100.principalGrants; +GetRoleGrantsForPrincipalResponse& GetRoleGrantsForPrincipalResponse::operator=(const GetRoleGrantsForPrincipalResponse& other118) { + principalGrants = other118.principalGrants; return *this; } void GetRoleGrantsForPrincipalResponse::printTo(std::ostream& out) const { @@ -2274,11 +2932,11 @@ void swap(GetPrincipalsInRoleRequest &a, GetPrincipalsInRoleRequest &b) { swap(a.roleName, b.roleName); } -GetPrincipalsInRoleRequest::GetPrincipalsInRoleRequest(const GetPrincipalsInRoleRequest& other101) { - roleName = other101.roleName; +GetPrincipalsInRoleRequest::GetPrincipalsInRoleRequest(const GetPrincipalsInRoleRequest& other119) { + roleName = other119.roleName; } -GetPrincipalsInRoleRequest& GetPrincipalsInRoleRequest::operator=(const GetPrincipalsInRoleRequest& other102) { - roleName = other102.roleName; +GetPrincipalsInRoleRequest& GetPrincipalsInRoleRequest::operator=(const GetPrincipalsInRoleRequest& other120) { + roleName = other120.roleName; return *this; } void GetPrincipalsInRoleRequest::printTo(std::ostream& out) const { @@ -2323,14 +2981,14 @@ uint32_t GetPrincipalsInRoleResponse::read(::apache::thrift::protocol::TProtocol if (ftype == ::apache::thrift::protocol::T_LIST) { { this->principalGrants.clear(); - uint32_t _size103; - ::apache::thrift::protocol::TType _etype106; - xfer += iprot->readListBegin(_etype106, _size103); - this->principalGrants.resize(_size103); - uint32_t _i107; - for (_i107 = 0; _i107 < _size103; ++_i107) + uint32_t _size121; + ::apache::thrift::protocol::TType _etype124; + xfer += iprot->readListBegin(_etype124, _size121); + this->principalGrants.resize(_size121); + uint32_t _i125; + for (_i125 = 0; _i125 < _size121; ++_i125) { - xfer += this->principalGrants[_i107].read(iprot); + xfer += this->principalGrants[_i125].read(iprot); } xfer += iprot->readListEnd(); } @@ -2361,10 +3019,10 @@ uint32_t GetPrincipalsInRoleResponse::write(::apache::thrift::protocol::TProtoco xfer += oprot->writeFieldBegin("principalGrants", ::apache::thrift::protocol::T_LIST, 1); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->principalGrants.size())); - std::vector ::const_iterator _iter108; - for (_iter108 = this->principalGrants.begin(); _iter108 != this->principalGrants.end(); ++_iter108) + std::vector ::const_iterator _iter126; + for (_iter126 = this->principalGrants.begin(); _iter126 != this->principalGrants.end(); ++_iter126) { - xfer += (*_iter108).write(oprot); + xfer += (*_iter126).write(oprot); } xfer += oprot->writeListEnd(); } @@ -2380,11 +3038,11 @@ void swap(GetPrincipalsInRoleResponse &a, GetPrincipalsInRoleResponse &b) { swap(a.principalGrants, b.principalGrants); } -GetPrincipalsInRoleResponse::GetPrincipalsInRoleResponse(const GetPrincipalsInRoleResponse& other109) { - principalGrants = other109.principalGrants; +GetPrincipalsInRoleResponse::GetPrincipalsInRoleResponse(const GetPrincipalsInRoleResponse& other127) { + principalGrants = other127.principalGrants; } -GetPrincipalsInRoleResponse& GetPrincipalsInRoleResponse::operator=(const GetPrincipalsInRoleResponse& other110) { - principalGrants = other110.principalGrants; +GetPrincipalsInRoleResponse& GetPrincipalsInRoleResponse::operator=(const GetPrincipalsInRoleResponse& other128) { + principalGrants = other128.principalGrants; return *this; } void GetPrincipalsInRoleResponse::printTo(std::ostream& out) const { @@ -2453,9 +3111,9 @@ uint32_t GrantRevokeRoleRequest::read(::apache::thrift::protocol::TProtocol* ipr { case 1: if (ftype == ::apache::thrift::protocol::T_I32) { - int32_t ecast111; - xfer += iprot->readI32(ecast111); - this->requestType = (GrantRevokeType::type)ecast111; + int32_t ecast129; + xfer += iprot->readI32(ecast129); + this->requestType = (GrantRevokeType::type)ecast129; this->__isset.requestType = true; } else { xfer += iprot->skip(ftype); @@ -2479,9 +3137,9 @@ uint32_t GrantRevokeRoleRequest::read(::apache::thrift::protocol::TProtocol* ipr break; case 4: if (ftype == ::apache::thrift::protocol::T_I32) { - int32_t ecast112; - xfer += iprot->readI32(ecast112); - this->principalType = (PrincipalType::type)ecast112; + int32_t ecast130; + xfer += iprot->readI32(ecast130); + this->principalType = (PrincipalType::type)ecast130; this->__isset.principalType = true; } else { xfer += iprot->skip(ftype); @@ -2497,9 +3155,9 @@ uint32_t GrantRevokeRoleRequest::read(::apache::thrift::protocol::TProtocol* ipr break; case 6: if (ftype == ::apache::thrift::protocol::T_I32) { - int32_t ecast113; - xfer += iprot->readI32(ecast113); - this->grantorType = (PrincipalType::type)ecast113; + int32_t ecast131; + xfer += iprot->readI32(ecast131); + this->grantorType = (PrincipalType::type)ecast131; this->__isset.grantorType = true; } else { xfer += iprot->skip(ftype); @@ -2578,25 +3236,25 @@ void swap(GrantRevokeRoleRequest &a, GrantRevokeRoleRequest &b) { swap(a.__isset, b.__isset); } -GrantRevokeRoleRequest::GrantRevokeRoleRequest(const GrantRevokeRoleRequest& other114) { - requestType = other114.requestType; - roleName = other114.roleName; - principalName = other114.principalName; - principalType = other114.principalType; - grantor = other114.grantor; - grantorType = other114.grantorType; - grantOption = other114.grantOption; - __isset = other114.__isset; -} -GrantRevokeRoleRequest& GrantRevokeRoleRequest::operator=(const GrantRevokeRoleRequest& other115) { - requestType = other115.requestType; - roleName = other115.roleName; - principalName = other115.principalName; - principalType = other115.principalType; - grantor = other115.grantor; - grantorType = other115.grantorType; - grantOption = other115.grantOption; - __isset = other115.__isset; +GrantRevokeRoleRequest::GrantRevokeRoleRequest(const GrantRevokeRoleRequest& other132) { + requestType = other132.requestType; + roleName = other132.roleName; + principalName = other132.principalName; + principalType = other132.principalType; + grantor = other132.grantor; + grantorType = other132.grantorType; + grantOption = other132.grantOption; + __isset = other132.__isset; +} +GrantRevokeRoleRequest& GrantRevokeRoleRequest::operator=(const GrantRevokeRoleRequest& other133) { + requestType = other133.requestType; + roleName = other133.roleName; + principalName = other133.principalName; + principalType = other133.principalType; + grantor = other133.grantor; + grantorType = other133.grantorType; + grantOption = other133.grantOption; + __isset = other133.__isset; return *this; } void GrantRevokeRoleRequest::printTo(std::ostream& out) const { @@ -2684,13 +3342,13 @@ void swap(GrantRevokeRoleResponse &a, GrantRevokeRoleResponse &b) { swap(a.__isset, b.__isset); } -GrantRevokeRoleResponse::GrantRevokeRoleResponse(const GrantRevokeRoleResponse& other116) { - success = other116.success; - __isset = other116.__isset; +GrantRevokeRoleResponse::GrantRevokeRoleResponse(const GrantRevokeRoleResponse& other134) { + success = other134.success; + __isset = other134.__isset; } -GrantRevokeRoleResponse& GrantRevokeRoleResponse::operator=(const GrantRevokeRoleResponse& other117) { - success = other117.success; - __isset = other117.__isset; +GrantRevokeRoleResponse& GrantRevokeRoleResponse::operator=(const GrantRevokeRoleResponse& other135) { + success = other135.success; + __isset = other135.__isset; return *this; } void GrantRevokeRoleResponse::printTo(std::ostream& out) const { @@ -2785,17 +3443,17 @@ uint32_t Database::read(::apache::thrift::protocol::TProtocol* iprot) { if (ftype == ::apache::thrift::protocol::T_MAP) { { this->parameters.clear(); - uint32_t _size118; - ::apache::thrift::protocol::TType _ktype119; - ::apache::thrift::protocol::TType _vtype120; - xfer += iprot->readMapBegin(_ktype119, _vtype120, _size118); - uint32_t _i122; - for (_i122 = 0; _i122 < _size118; ++_i122) + uint32_t _size136; + ::apache::thrift::protocol::TType _ktype137; + ::apache::thrift::protocol::TType _vtype138; + xfer += iprot->readMapBegin(_ktype137, _vtype138, _size136); + uint32_t _i140; + for (_i140 = 0; _i140 < _size136; ++_i140) { - std::string _key123; - xfer += iprot->readString(_key123); - std::string& _val124 = this->parameters[_key123]; - xfer += iprot->readString(_val124); + std::string _key141; + xfer += iprot->readString(_key141); + std::string& _val142 = this->parameters[_key141]; + xfer += iprot->readString(_val142); } xfer += iprot->readMapEnd(); } @@ -2822,9 +3480,9 @@ uint32_t Database::read(::apache::thrift::protocol::TProtocol* iprot) { break; case 7: if (ftype == ::apache::thrift::protocol::T_I32) { - int32_t ecast125; - xfer += iprot->readI32(ecast125); - this->ownerType = (PrincipalType::type)ecast125; + int32_t ecast143; + xfer += iprot->readI32(ecast143); + this->ownerType = (PrincipalType::type)ecast143; this->__isset.ownerType = true; } else { xfer += iprot->skip(ftype); @@ -2862,11 +3520,11 @@ uint32_t Database::write(::apache::thrift::protocol::TProtocol* oprot) const { xfer += oprot->writeFieldBegin("parameters", ::apache::thrift::protocol::T_MAP, 4); { xfer += oprot->writeMapBegin(::apache::thrift::protocol::T_STRING, ::apache::thrift::protocol::T_STRING, static_cast(this->parameters.size())); - std::map ::const_iterator _iter126; - for (_iter126 = this->parameters.begin(); _iter126 != this->parameters.end(); ++_iter126) + std::map ::const_iterator _iter144; + for (_iter144 = this->parameters.begin(); _iter144 != this->parameters.end(); ++_iter144) { - xfer += oprot->writeString(_iter126->first); - xfer += oprot->writeString(_iter126->second); + xfer += oprot->writeString(_iter144->first); + xfer += oprot->writeString(_iter144->second); } xfer += oprot->writeMapEnd(); } @@ -2904,25 +3562,25 @@ void swap(Database &a, Database &b) { swap(a.__isset, b.__isset); } -Database::Database(const Database& other127) { - name = other127.name; - description = other127.description; - locationUri = other127.locationUri; - parameters = other127.parameters; - privileges = other127.privileges; - ownerName = other127.ownerName; - ownerType = other127.ownerType; - __isset = other127.__isset; -} -Database& Database::operator=(const Database& other128) { - name = other128.name; - description = other128.description; - locationUri = other128.locationUri; - parameters = other128.parameters; - privileges = other128.privileges; - ownerName = other128.ownerName; - ownerType = other128.ownerType; - __isset = other128.__isset; +Database::Database(const Database& other145) { + name = other145.name; + description = other145.description; + locationUri = other145.locationUri; + parameters = other145.parameters; + privileges = other145.privileges; + ownerName = other145.ownerName; + ownerType = other145.ownerType; + __isset = other145.__isset; +} +Database& Database::operator=(const Database& other146) { + name = other146.name; + description = other146.description; + locationUri = other146.locationUri; + parameters = other146.parameters; + privileges = other146.privileges; + ownerName = other146.ownerName; + ownerType = other146.ownerType; + __isset = other146.__isset; return *this; } void Database::printTo(std::ostream& out) const { @@ -2996,17 +3654,17 @@ uint32_t SerDeInfo::read(::apache::thrift::protocol::TProtocol* iprot) { if (ftype == ::apache::thrift::protocol::T_MAP) { { this->parameters.clear(); - uint32_t _size129; - ::apache::thrift::protocol::TType _ktype130; - ::apache::thrift::protocol::TType _vtype131; - xfer += iprot->readMapBegin(_ktype130, _vtype131, _size129); - uint32_t _i133; - for (_i133 = 0; _i133 < _size129; ++_i133) + uint32_t _size147; + ::apache::thrift::protocol::TType _ktype148; + ::apache::thrift::protocol::TType _vtype149; + xfer += iprot->readMapBegin(_ktype148, _vtype149, _size147); + uint32_t _i151; + for (_i151 = 0; _i151 < _size147; ++_i151) { - std::string _key134; - xfer += iprot->readString(_key134); - std::string& _val135 = this->parameters[_key134]; - xfer += iprot->readString(_val135); + std::string _key152; + xfer += iprot->readString(_key152); + std::string& _val153 = this->parameters[_key152]; + xfer += iprot->readString(_val153); } xfer += iprot->readMapEnd(); } @@ -3043,11 +3701,11 @@ uint32_t SerDeInfo::write(::apache::thrift::protocol::TProtocol* oprot) const { xfer += oprot->writeFieldBegin("parameters", ::apache::thrift::protocol::T_MAP, 3); { xfer += oprot->writeMapBegin(::apache::thrift::protocol::T_STRING, ::apache::thrift::protocol::T_STRING, static_cast(this->parameters.size())); - std::map ::const_iterator _iter136; - for (_iter136 = this->parameters.begin(); _iter136 != this->parameters.end(); ++_iter136) + std::map ::const_iterator _iter154; + for (_iter154 = this->parameters.begin(); _iter154 != this->parameters.end(); ++_iter154) { - xfer += oprot->writeString(_iter136->first); - xfer += oprot->writeString(_iter136->second); + xfer += oprot->writeString(_iter154->first); + xfer += oprot->writeString(_iter154->second); } xfer += oprot->writeMapEnd(); } @@ -3066,17 +3724,17 @@ void swap(SerDeInfo &a, SerDeInfo &b) { swap(a.__isset, b.__isset); } -SerDeInfo::SerDeInfo(const SerDeInfo& other137) { - name = other137.name; - serializationLib = other137.serializationLib; - parameters = other137.parameters; - __isset = other137.__isset; +SerDeInfo::SerDeInfo(const SerDeInfo& other155) { + name = other155.name; + serializationLib = other155.serializationLib; + parameters = other155.parameters; + __isset = other155.__isset; } -SerDeInfo& SerDeInfo::operator=(const SerDeInfo& other138) { - name = other138.name; - serializationLib = other138.serializationLib; - parameters = other138.parameters; - __isset = other138.__isset; +SerDeInfo& SerDeInfo::operator=(const SerDeInfo& other156) { + name = other156.name; + serializationLib = other156.serializationLib; + parameters = other156.parameters; + __isset = other156.__isset; return *this; } void SerDeInfo::printTo(std::ostream& out) const { @@ -3175,15 +3833,15 @@ void swap(Order &a, Order &b) { swap(a.__isset, b.__isset); } -Order::Order(const Order& other139) { - col = other139.col; - order = other139.order; - __isset = other139.__isset; +Order::Order(const Order& other157) { + col = other157.col; + order = other157.order; + __isset = other157.__isset; } -Order& Order::operator=(const Order& other140) { - col = other140.col; - order = other140.order; - __isset = other140.__isset; +Order& Order::operator=(const Order& other158) { + col = other158.col; + order = other158.order; + __isset = other158.__isset; return *this; } void Order::printTo(std::ostream& out) const { @@ -3236,14 +3894,14 @@ uint32_t SkewedInfo::read(::apache::thrift::protocol::TProtocol* iprot) { if (ftype == ::apache::thrift::protocol::T_LIST) { { this->skewedColNames.clear(); - uint32_t _size141; - ::apache::thrift::protocol::TType _etype144; - xfer += iprot->readListBegin(_etype144, _size141); - this->skewedColNames.resize(_size141); - uint32_t _i145; - for (_i145 = 0; _i145 < _size141; ++_i145) + uint32_t _size159; + ::apache::thrift::protocol::TType _etype162; + xfer += iprot->readListBegin(_etype162, _size159); + this->skewedColNames.resize(_size159); + uint32_t _i163; + for (_i163 = 0; _i163 < _size159; ++_i163) { - xfer += iprot->readString(this->skewedColNames[_i145]); + xfer += iprot->readString(this->skewedColNames[_i163]); } xfer += iprot->readListEnd(); } @@ -3256,23 +3914,23 @@ uint32_t SkewedInfo::read(::apache::thrift::protocol::TProtocol* iprot) { if (ftype == ::apache::thrift::protocol::T_LIST) { { this->skewedColValues.clear(); - uint32_t _size146; - ::apache::thrift::protocol::TType _etype149; - xfer += iprot->readListBegin(_etype149, _size146); - this->skewedColValues.resize(_size146); - uint32_t _i150; - for (_i150 = 0; _i150 < _size146; ++_i150) + uint32_t _size164; + ::apache::thrift::protocol::TType _etype167; + xfer += iprot->readListBegin(_etype167, _size164); + this->skewedColValues.resize(_size164); + uint32_t _i168; + for (_i168 = 0; _i168 < _size164; ++_i168) { { - this->skewedColValues[_i150].clear(); - uint32_t _size151; - ::apache::thrift::protocol::TType _etype154; - xfer += iprot->readListBegin(_etype154, _size151); - this->skewedColValues[_i150].resize(_size151); - uint32_t _i155; - for (_i155 = 0; _i155 < _size151; ++_i155) + this->skewedColValues[_i168].clear(); + uint32_t _size169; + ::apache::thrift::protocol::TType _etype172; + xfer += iprot->readListBegin(_etype172, _size169); + this->skewedColValues[_i168].resize(_size169); + uint32_t _i173; + for (_i173 = 0; _i173 < _size169; ++_i173) { - xfer += iprot->readString(this->skewedColValues[_i150][_i155]); + xfer += iprot->readString(this->skewedColValues[_i168][_i173]); } xfer += iprot->readListEnd(); } @@ -3288,29 +3946,29 @@ uint32_t SkewedInfo::read(::apache::thrift::protocol::TProtocol* iprot) { if (ftype == ::apache::thrift::protocol::T_MAP) { { this->skewedColValueLocationMaps.clear(); - uint32_t _size156; - ::apache::thrift::protocol::TType _ktype157; - ::apache::thrift::protocol::TType _vtype158; - xfer += iprot->readMapBegin(_ktype157, _vtype158, _size156); - uint32_t _i160; - for (_i160 = 0; _i160 < _size156; ++_i160) + uint32_t _size174; + ::apache::thrift::protocol::TType _ktype175; + ::apache::thrift::protocol::TType _vtype176; + xfer += iprot->readMapBegin(_ktype175, _vtype176, _size174); + uint32_t _i178; + for (_i178 = 0; _i178 < _size174; ++_i178) { - std::vector _key161; + std::vector _key179; { - _key161.clear(); - uint32_t _size163; - ::apache::thrift::protocol::TType _etype166; - xfer += iprot->readListBegin(_etype166, _size163); - _key161.resize(_size163); - uint32_t _i167; - for (_i167 = 0; _i167 < _size163; ++_i167) + _key179.clear(); + uint32_t _size181; + ::apache::thrift::protocol::TType _etype184; + xfer += iprot->readListBegin(_etype184, _size181); + _key179.resize(_size181); + uint32_t _i185; + for (_i185 = 0; _i185 < _size181; ++_i185) { - xfer += iprot->readString(_key161[_i167]); + xfer += iprot->readString(_key179[_i185]); } xfer += iprot->readListEnd(); } - std::string& _val162 = this->skewedColValueLocationMaps[_key161]; - xfer += iprot->readString(_val162); + std::string& _val180 = this->skewedColValueLocationMaps[_key179]; + xfer += iprot->readString(_val180); } xfer += iprot->readMapEnd(); } @@ -3339,10 +3997,10 @@ uint32_t SkewedInfo::write(::apache::thrift::protocol::TProtocol* oprot) const { xfer += oprot->writeFieldBegin("skewedColNames", ::apache::thrift::protocol::T_LIST, 1); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->skewedColNames.size())); - std::vector ::const_iterator _iter168; - for (_iter168 = this->skewedColNames.begin(); _iter168 != this->skewedColNames.end(); ++_iter168) + std::vector ::const_iterator _iter186; + for (_iter186 = this->skewedColNames.begin(); _iter186 != this->skewedColNames.end(); ++_iter186) { - xfer += oprot->writeString((*_iter168)); + xfer += oprot->writeString((*_iter186)); } xfer += oprot->writeListEnd(); } @@ -3351,15 +4009,15 @@ uint32_t SkewedInfo::write(::apache::thrift::protocol::TProtocol* oprot) const { xfer += oprot->writeFieldBegin("skewedColValues", ::apache::thrift::protocol::T_LIST, 2); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_LIST, static_cast(this->skewedColValues.size())); - std::vector > ::const_iterator _iter169; - for (_iter169 = this->skewedColValues.begin(); _iter169 != this->skewedColValues.end(); ++_iter169) + std::vector > ::const_iterator _iter187; + for (_iter187 = this->skewedColValues.begin(); _iter187 != this->skewedColValues.end(); ++_iter187) { { - xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast((*_iter169).size())); - std::vector ::const_iterator _iter170; - for (_iter170 = (*_iter169).begin(); _iter170 != (*_iter169).end(); ++_iter170) + xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast((*_iter187).size())); + std::vector ::const_iterator _iter188; + for (_iter188 = (*_iter187).begin(); _iter188 != (*_iter187).end(); ++_iter188) { - xfer += oprot->writeString((*_iter170)); + xfer += oprot->writeString((*_iter188)); } xfer += oprot->writeListEnd(); } @@ -3371,19 +4029,19 @@ uint32_t SkewedInfo::write(::apache::thrift::protocol::TProtocol* oprot) const { xfer += oprot->writeFieldBegin("skewedColValueLocationMaps", ::apache::thrift::protocol::T_MAP, 3); { xfer += oprot->writeMapBegin(::apache::thrift::protocol::T_LIST, ::apache::thrift::protocol::T_STRING, static_cast(this->skewedColValueLocationMaps.size())); - std::map , std::string> ::const_iterator _iter171; - for (_iter171 = this->skewedColValueLocationMaps.begin(); _iter171 != this->skewedColValueLocationMaps.end(); ++_iter171) + std::map , std::string> ::const_iterator _iter189; + for (_iter189 = this->skewedColValueLocationMaps.begin(); _iter189 != this->skewedColValueLocationMaps.end(); ++_iter189) { { - xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(_iter171->first.size())); - std::vector ::const_iterator _iter172; - for (_iter172 = _iter171->first.begin(); _iter172 != _iter171->first.end(); ++_iter172) + xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(_iter189->first.size())); + std::vector ::const_iterator _iter190; + for (_iter190 = _iter189->first.begin(); _iter190 != _iter189->first.end(); ++_iter190) { - xfer += oprot->writeString((*_iter172)); + xfer += oprot->writeString((*_iter190)); } xfer += oprot->writeListEnd(); } - xfer += oprot->writeString(_iter171->second); + xfer += oprot->writeString(_iter189->second); } xfer += oprot->writeMapEnd(); } @@ -3402,17 +4060,17 @@ void swap(SkewedInfo &a, SkewedInfo &b) { swap(a.__isset, b.__isset); } -SkewedInfo::SkewedInfo(const SkewedInfo& other173) { - skewedColNames = other173.skewedColNames; - skewedColValues = other173.skewedColValues; - skewedColValueLocationMaps = other173.skewedColValueLocationMaps; - __isset = other173.__isset; +SkewedInfo::SkewedInfo(const SkewedInfo& other191) { + skewedColNames = other191.skewedColNames; + skewedColValues = other191.skewedColValues; + skewedColValueLocationMaps = other191.skewedColValueLocationMaps; + __isset = other191.__isset; } -SkewedInfo& SkewedInfo::operator=(const SkewedInfo& other174) { - skewedColNames = other174.skewedColNames; - skewedColValues = other174.skewedColValues; - skewedColValueLocationMaps = other174.skewedColValueLocationMaps; - __isset = other174.__isset; +SkewedInfo& SkewedInfo::operator=(const SkewedInfo& other192) { + skewedColNames = other192.skewedColNames; + skewedColValues = other192.skewedColValues; + skewedColValueLocationMaps = other192.skewedColValueLocationMaps; + __isset = other192.__isset; return *this; } void SkewedInfo::printTo(std::ostream& out) const { @@ -3504,14 +4162,14 @@ uint32_t StorageDescriptor::read(::apache::thrift::protocol::TProtocol* iprot) { if (ftype == ::apache::thrift::protocol::T_LIST) { { this->cols.clear(); - uint32_t _size175; - ::apache::thrift::protocol::TType _etype178; - xfer += iprot->readListBegin(_etype178, _size175); - this->cols.resize(_size175); - uint32_t _i179; - for (_i179 = 0; _i179 < _size175; ++_i179) + uint32_t _size193; + ::apache::thrift::protocol::TType _etype196; + xfer += iprot->readListBegin(_etype196, _size193); + this->cols.resize(_size193); + uint32_t _i197; + for (_i197 = 0; _i197 < _size193; ++_i197) { - xfer += this->cols[_i179].read(iprot); + xfer += this->cols[_i197].read(iprot); } xfer += iprot->readListEnd(); } @@ -3572,14 +4230,14 @@ uint32_t StorageDescriptor::read(::apache::thrift::protocol::TProtocol* iprot) { if (ftype == ::apache::thrift::protocol::T_LIST) { { this->bucketCols.clear(); - uint32_t _size180; - ::apache::thrift::protocol::TType _etype183; - xfer += iprot->readListBegin(_etype183, _size180); - this->bucketCols.resize(_size180); - uint32_t _i184; - for (_i184 = 0; _i184 < _size180; ++_i184) + uint32_t _size198; + ::apache::thrift::protocol::TType _etype201; + xfer += iprot->readListBegin(_etype201, _size198); + this->bucketCols.resize(_size198); + uint32_t _i202; + for (_i202 = 0; _i202 < _size198; ++_i202) { - xfer += iprot->readString(this->bucketCols[_i184]); + xfer += iprot->readString(this->bucketCols[_i202]); } xfer += iprot->readListEnd(); } @@ -3592,14 +4250,14 @@ uint32_t StorageDescriptor::read(::apache::thrift::protocol::TProtocol* iprot) { if (ftype == ::apache::thrift::protocol::T_LIST) { { this->sortCols.clear(); - uint32_t _size185; - ::apache::thrift::protocol::TType _etype188; - xfer += iprot->readListBegin(_etype188, _size185); - this->sortCols.resize(_size185); - uint32_t _i189; - for (_i189 = 0; _i189 < _size185; ++_i189) + uint32_t _size203; + ::apache::thrift::protocol::TType _etype206; + xfer += iprot->readListBegin(_etype206, _size203); + this->sortCols.resize(_size203); + uint32_t _i207; + for (_i207 = 0; _i207 < _size203; ++_i207) { - xfer += this->sortCols[_i189].read(iprot); + xfer += this->sortCols[_i207].read(iprot); } xfer += iprot->readListEnd(); } @@ -3612,17 +4270,17 @@ uint32_t StorageDescriptor::read(::apache::thrift::protocol::TProtocol* iprot) { if (ftype == ::apache::thrift::protocol::T_MAP) { { this->parameters.clear(); - uint32_t _size190; - ::apache::thrift::protocol::TType _ktype191; - ::apache::thrift::protocol::TType _vtype192; - xfer += iprot->readMapBegin(_ktype191, _vtype192, _size190); - uint32_t _i194; - for (_i194 = 0; _i194 < _size190; ++_i194) + uint32_t _size208; + ::apache::thrift::protocol::TType _ktype209; + ::apache::thrift::protocol::TType _vtype210; + xfer += iprot->readMapBegin(_ktype209, _vtype210, _size208); + uint32_t _i212; + for (_i212 = 0; _i212 < _size208; ++_i212) { - std::string _key195; - xfer += iprot->readString(_key195); - std::string& _val196 = this->parameters[_key195]; - xfer += iprot->readString(_val196); + std::string _key213; + xfer += iprot->readString(_key213); + std::string& _val214 = this->parameters[_key213]; + xfer += iprot->readString(_val214); } xfer += iprot->readMapEnd(); } @@ -3667,10 +4325,10 @@ uint32_t StorageDescriptor::write(::apache::thrift::protocol::TProtocol* oprot) xfer += oprot->writeFieldBegin("cols", ::apache::thrift::protocol::T_LIST, 1); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->cols.size())); - std::vector ::const_iterator _iter197; - for (_iter197 = this->cols.begin(); _iter197 != this->cols.end(); ++_iter197) + std::vector ::const_iterator _iter215; + for (_iter215 = this->cols.begin(); _iter215 != this->cols.end(); ++_iter215) { - xfer += (*_iter197).write(oprot); + xfer += (*_iter215).write(oprot); } xfer += oprot->writeListEnd(); } @@ -3703,10 +4361,10 @@ uint32_t StorageDescriptor::write(::apache::thrift::protocol::TProtocol* oprot) xfer += oprot->writeFieldBegin("bucketCols", ::apache::thrift::protocol::T_LIST, 8); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->bucketCols.size())); - std::vector ::const_iterator _iter198; - for (_iter198 = this->bucketCols.begin(); _iter198 != this->bucketCols.end(); ++_iter198) + std::vector ::const_iterator _iter216; + for (_iter216 = this->bucketCols.begin(); _iter216 != this->bucketCols.end(); ++_iter216) { - xfer += oprot->writeString((*_iter198)); + xfer += oprot->writeString((*_iter216)); } xfer += oprot->writeListEnd(); } @@ -3715,10 +4373,10 @@ uint32_t StorageDescriptor::write(::apache::thrift::protocol::TProtocol* oprot) xfer += oprot->writeFieldBegin("sortCols", ::apache::thrift::protocol::T_LIST, 9); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->sortCols.size())); - std::vector ::const_iterator _iter199; - for (_iter199 = this->sortCols.begin(); _iter199 != this->sortCols.end(); ++_iter199) + std::vector ::const_iterator _iter217; + for (_iter217 = this->sortCols.begin(); _iter217 != this->sortCols.end(); ++_iter217) { - xfer += (*_iter199).write(oprot); + xfer += (*_iter217).write(oprot); } xfer += oprot->writeListEnd(); } @@ -3727,11 +4385,11 @@ uint32_t StorageDescriptor::write(::apache::thrift::protocol::TProtocol* oprot) xfer += oprot->writeFieldBegin("parameters", ::apache::thrift::protocol::T_MAP, 10); { xfer += oprot->writeMapBegin(::apache::thrift::protocol::T_STRING, ::apache::thrift::protocol::T_STRING, static_cast(this->parameters.size())); - std::map ::const_iterator _iter200; - for (_iter200 = this->parameters.begin(); _iter200 != this->parameters.end(); ++_iter200) + std::map ::const_iterator _iter218; + for (_iter218 = this->parameters.begin(); _iter218 != this->parameters.end(); ++_iter218) { - xfer += oprot->writeString(_iter200->first); - xfer += oprot->writeString(_iter200->second); + xfer += oprot->writeString(_iter218->first); + xfer += oprot->writeString(_iter218->second); } xfer += oprot->writeMapEnd(); } @@ -3769,35 +4427,35 @@ void swap(StorageDescriptor &a, StorageDescriptor &b) { swap(a.__isset, b.__isset); } -StorageDescriptor::StorageDescriptor(const StorageDescriptor& other201) { - cols = other201.cols; - location = other201.location; - inputFormat = other201.inputFormat; - outputFormat = other201.outputFormat; - compressed = other201.compressed; - numBuckets = other201.numBuckets; - serdeInfo = other201.serdeInfo; - bucketCols = other201.bucketCols; - sortCols = other201.sortCols; - parameters = other201.parameters; - skewedInfo = other201.skewedInfo; - storedAsSubDirectories = other201.storedAsSubDirectories; - __isset = other201.__isset; -} -StorageDescriptor& StorageDescriptor::operator=(const StorageDescriptor& other202) { - cols = other202.cols; - location = other202.location; - inputFormat = other202.inputFormat; - outputFormat = other202.outputFormat; - compressed = other202.compressed; - numBuckets = other202.numBuckets; - serdeInfo = other202.serdeInfo; - bucketCols = other202.bucketCols; - sortCols = other202.sortCols; - parameters = other202.parameters; - skewedInfo = other202.skewedInfo; - storedAsSubDirectories = other202.storedAsSubDirectories; - __isset = other202.__isset; +StorageDescriptor::StorageDescriptor(const StorageDescriptor& other219) { + cols = other219.cols; + location = other219.location; + inputFormat = other219.inputFormat; + outputFormat = other219.outputFormat; + compressed = other219.compressed; + numBuckets = other219.numBuckets; + serdeInfo = other219.serdeInfo; + bucketCols = other219.bucketCols; + sortCols = other219.sortCols; + parameters = other219.parameters; + skewedInfo = other219.skewedInfo; + storedAsSubDirectories = other219.storedAsSubDirectories; + __isset = other219.__isset; +} +StorageDescriptor& StorageDescriptor::operator=(const StorageDescriptor& other220) { + cols = other220.cols; + location = other220.location; + inputFormat = other220.inputFormat; + outputFormat = other220.outputFormat; + compressed = other220.compressed; + numBuckets = other220.numBuckets; + serdeInfo = other220.serdeInfo; + bucketCols = other220.bucketCols; + sortCols = other220.sortCols; + parameters = other220.parameters; + skewedInfo = other220.skewedInfo; + storedAsSubDirectories = other220.storedAsSubDirectories; + __isset = other220.__isset; return *this; } void StorageDescriptor::printTo(std::ostream& out) const { @@ -3881,6 +4539,14 @@ void Table::__set_temporary(const bool val) { __isset.temporary = true; } +void Table::__set_primaryKeys(const std::vector & val) { + this->primaryKeys = val; +} + +void Table::__set_foreignKeys(const std::vector & val) { + this->foreignKeys = val; +} + uint32_t Table::read(::apache::thrift::protocol::TProtocol* iprot) { apache::thrift::protocol::TInputRecursionTracker tracker(*iprot); @@ -3962,14 +4628,14 @@ uint32_t Table::read(::apache::thrift::protocol::TProtocol* iprot) { if (ftype == ::apache::thrift::protocol::T_LIST) { { this->partitionKeys.clear(); - uint32_t _size203; - ::apache::thrift::protocol::TType _etype206; - xfer += iprot->readListBegin(_etype206, _size203); - this->partitionKeys.resize(_size203); - uint32_t _i207; - for (_i207 = 0; _i207 < _size203; ++_i207) + uint32_t _size221; + ::apache::thrift::protocol::TType _etype224; + xfer += iprot->readListBegin(_etype224, _size221); + this->partitionKeys.resize(_size221); + uint32_t _i225; + for (_i225 = 0; _i225 < _size221; ++_i225) { - xfer += this->partitionKeys[_i207].read(iprot); + xfer += this->partitionKeys[_i225].read(iprot); } xfer += iprot->readListEnd(); } @@ -3982,17 +4648,17 @@ uint32_t Table::read(::apache::thrift::protocol::TProtocol* iprot) { if (ftype == ::apache::thrift::protocol::T_MAP) { { this->parameters.clear(); - uint32_t _size208; - ::apache::thrift::protocol::TType _ktype209; - ::apache::thrift::protocol::TType _vtype210; - xfer += iprot->readMapBegin(_ktype209, _vtype210, _size208); - uint32_t _i212; - for (_i212 = 0; _i212 < _size208; ++_i212) + uint32_t _size226; + ::apache::thrift::protocol::TType _ktype227; + ::apache::thrift::protocol::TType _vtype228; + xfer += iprot->readMapBegin(_ktype227, _vtype228, _size226); + uint32_t _i230; + for (_i230 = 0; _i230 < _size226; ++_i230) { - std::string _key213; - xfer += iprot->readString(_key213); - std::string& _val214 = this->parameters[_key213]; - xfer += iprot->readString(_val214); + std::string _key231; + xfer += iprot->readString(_key231); + std::string& _val232 = this->parameters[_key231]; + xfer += iprot->readString(_val232); } xfer += iprot->readMapEnd(); } @@ -4041,6 +4707,46 @@ uint32_t Table::read(::apache::thrift::protocol::TProtocol* iprot) { xfer += iprot->skip(ftype); } break; + case 15: + if (ftype == ::apache::thrift::protocol::T_LIST) { + { + this->primaryKeys.clear(); + uint32_t _size233; + ::apache::thrift::protocol::TType _etype236; + xfer += iprot->readListBegin(_etype236, _size233); + this->primaryKeys.resize(_size233); + uint32_t _i237; + for (_i237 = 0; _i237 < _size233; ++_i237) + { + xfer += this->primaryKeys[_i237].read(iprot); + } + xfer += iprot->readListEnd(); + } + this->__isset.primaryKeys = true; + } else { + xfer += iprot->skip(ftype); + } + break; + case 16: + if (ftype == ::apache::thrift::protocol::T_LIST) { + { + this->foreignKeys.clear(); + uint32_t _size238; + ::apache::thrift::protocol::TType _etype241; + xfer += iprot->readListBegin(_etype241, _size238); + this->foreignKeys.resize(_size238); + uint32_t _i242; + for (_i242 = 0; _i242 < _size238; ++_i242) + { + xfer += this->foreignKeys[_i242].read(iprot); + } + xfer += iprot->readListEnd(); + } + this->__isset.foreignKeys = true; + } else { + xfer += iprot->skip(ftype); + } + break; default: xfer += iprot->skip(ftype); break; @@ -4089,10 +4795,10 @@ uint32_t Table::write(::apache::thrift::protocol::TProtocol* oprot) const { xfer += oprot->writeFieldBegin("partitionKeys", ::apache::thrift::protocol::T_LIST, 8); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->partitionKeys.size())); - std::vector ::const_iterator _iter215; - for (_iter215 = this->partitionKeys.begin(); _iter215 != this->partitionKeys.end(); ++_iter215) + std::vector ::const_iterator _iter243; + for (_iter243 = this->partitionKeys.begin(); _iter243 != this->partitionKeys.end(); ++_iter243) { - xfer += (*_iter215).write(oprot); + xfer += (*_iter243).write(oprot); } xfer += oprot->writeListEnd(); } @@ -4101,11 +4807,11 @@ uint32_t Table::write(::apache::thrift::protocol::TProtocol* oprot) const { xfer += oprot->writeFieldBegin("parameters", ::apache::thrift::protocol::T_MAP, 9); { xfer += oprot->writeMapBegin(::apache::thrift::protocol::T_STRING, ::apache::thrift::protocol::T_STRING, static_cast(this->parameters.size())); - std::map ::const_iterator _iter216; - for (_iter216 = this->parameters.begin(); _iter216 != this->parameters.end(); ++_iter216) + std::map ::const_iterator _iter244; + for (_iter244 = this->parameters.begin(); _iter244 != this->parameters.end(); ++_iter244) { - xfer += oprot->writeString(_iter216->first); - xfer += oprot->writeString(_iter216->second); + xfer += oprot->writeString(_iter244->first); + xfer += oprot->writeString(_iter244->second); } xfer += oprot->writeMapEnd(); } @@ -4133,6 +4839,30 @@ uint32_t Table::write(::apache::thrift::protocol::TProtocol* oprot) const { xfer += oprot->writeBool(this->temporary); xfer += oprot->writeFieldEnd(); } + xfer += oprot->writeFieldBegin("primaryKeys", ::apache::thrift::protocol::T_LIST, 15); + { + xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->primaryKeys.size())); + std::vector ::const_iterator _iter245; + for (_iter245 = this->primaryKeys.begin(); _iter245 != this->primaryKeys.end(); ++_iter245) + { + xfer += (*_iter245).write(oprot); + } + xfer += oprot->writeListEnd(); + } + xfer += oprot->writeFieldEnd(); + + xfer += oprot->writeFieldBegin("foreignKeys", ::apache::thrift::protocol::T_LIST, 16); + { + xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->foreignKeys.size())); + std::vector ::const_iterator _iter246; + for (_iter246 = this->foreignKeys.begin(); _iter246 != this->foreignKeys.end(); ++_iter246) + { + xfer += (*_iter246).write(oprot); + } + xfer += oprot->writeListEnd(); + } + xfer += oprot->writeFieldEnd(); + xfer += oprot->writeFieldStop(); xfer += oprot->writeStructEnd(); return xfer; @@ -4154,42 +4884,48 @@ void swap(Table &a, Table &b) { swap(a.tableType, b.tableType); swap(a.privileges, b.privileges); swap(a.temporary, b.temporary); + swap(a.primaryKeys, b.primaryKeys); + swap(a.foreignKeys, b.foreignKeys); swap(a.__isset, b.__isset); } -Table::Table(const Table& other217) { - tableName = other217.tableName; - dbName = other217.dbName; - owner = other217.owner; - createTime = other217.createTime; - lastAccessTime = other217.lastAccessTime; - retention = other217.retention; - sd = other217.sd; - partitionKeys = other217.partitionKeys; - parameters = other217.parameters; - viewOriginalText = other217.viewOriginalText; - viewExpandedText = other217.viewExpandedText; - tableType = other217.tableType; - privileges = other217.privileges; - temporary = other217.temporary; - __isset = other217.__isset; -} -Table& Table::operator=(const Table& other218) { - tableName = other218.tableName; - dbName = other218.dbName; - owner = other218.owner; - createTime = other218.createTime; - lastAccessTime = other218.lastAccessTime; - retention = other218.retention; - sd = other218.sd; - partitionKeys = other218.partitionKeys; - parameters = other218.parameters; - viewOriginalText = other218.viewOriginalText; - viewExpandedText = other218.viewExpandedText; - tableType = other218.tableType; - privileges = other218.privileges; - temporary = other218.temporary; - __isset = other218.__isset; +Table::Table(const Table& other247) { + tableName = other247.tableName; + dbName = other247.dbName; + owner = other247.owner; + createTime = other247.createTime; + lastAccessTime = other247.lastAccessTime; + retention = other247.retention; + sd = other247.sd; + partitionKeys = other247.partitionKeys; + parameters = other247.parameters; + viewOriginalText = other247.viewOriginalText; + viewExpandedText = other247.viewExpandedText; + tableType = other247.tableType; + privileges = other247.privileges; + temporary = other247.temporary; + primaryKeys = other247.primaryKeys; + foreignKeys = other247.foreignKeys; + __isset = other247.__isset; +} +Table& Table::operator=(const Table& other248) { + tableName = other248.tableName; + dbName = other248.dbName; + owner = other248.owner; + createTime = other248.createTime; + lastAccessTime = other248.lastAccessTime; + retention = other248.retention; + sd = other248.sd; + partitionKeys = other248.partitionKeys; + parameters = other248.parameters; + viewOriginalText = other248.viewOriginalText; + viewExpandedText = other248.viewExpandedText; + tableType = other248.tableType; + privileges = other248.privileges; + temporary = other248.temporary; + primaryKeys = other248.primaryKeys; + foreignKeys = other248.foreignKeys; + __isset = other248.__isset; return *this; } void Table::printTo(std::ostream& out) const { @@ -4209,6 +4945,8 @@ void Table::printTo(std::ostream& out) const { out << ", " << "tableType=" << to_string(tableType); out << ", " << "privileges="; (__isset.privileges ? (out << to_string(privileges)) : (out << "")); out << ", " << "temporary="; (__isset.temporary ? (out << to_string(temporary)) : (out << "")); + out << ", " << "primaryKeys=" << to_string(primaryKeys); + out << ", " << "foreignKeys=" << to_string(foreignKeys); out << ")"; } @@ -4275,14 +5013,14 @@ uint32_t Partition::read(::apache::thrift::protocol::TProtocol* iprot) { if (ftype == ::apache::thrift::protocol::T_LIST) { { this->values.clear(); - uint32_t _size219; - ::apache::thrift::protocol::TType _etype222; - xfer += iprot->readListBegin(_etype222, _size219); - this->values.resize(_size219); - uint32_t _i223; - for (_i223 = 0; _i223 < _size219; ++_i223) + uint32_t _size249; + ::apache::thrift::protocol::TType _etype252; + xfer += iprot->readListBegin(_etype252, _size249); + this->values.resize(_size249); + uint32_t _i253; + for (_i253 = 0; _i253 < _size249; ++_i253) { - xfer += iprot->readString(this->values[_i223]); + xfer += iprot->readString(this->values[_i253]); } xfer += iprot->readListEnd(); } @@ -4335,17 +5073,17 @@ uint32_t Partition::read(::apache::thrift::protocol::TProtocol* iprot) { if (ftype == ::apache::thrift::protocol::T_MAP) { { this->parameters.clear(); - uint32_t _size224; - ::apache::thrift::protocol::TType _ktype225; - ::apache::thrift::protocol::TType _vtype226; - xfer += iprot->readMapBegin(_ktype225, _vtype226, _size224); - uint32_t _i228; - for (_i228 = 0; _i228 < _size224; ++_i228) + uint32_t _size254; + ::apache::thrift::protocol::TType _ktype255; + ::apache::thrift::protocol::TType _vtype256; + xfer += iprot->readMapBegin(_ktype255, _vtype256, _size254); + uint32_t _i258; + for (_i258 = 0; _i258 < _size254; ++_i258) { - std::string _key229; - xfer += iprot->readString(_key229); - std::string& _val230 = this->parameters[_key229]; - xfer += iprot->readString(_val230); + std::string _key259; + xfer += iprot->readString(_key259); + std::string& _val260 = this->parameters[_key259]; + xfer += iprot->readString(_val260); } xfer += iprot->readMapEnd(); } @@ -4382,10 +5120,10 @@ uint32_t Partition::write(::apache::thrift::protocol::TProtocol* oprot) const { xfer += oprot->writeFieldBegin("values", ::apache::thrift::protocol::T_LIST, 1); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->values.size())); - std::vector ::const_iterator _iter231; - for (_iter231 = this->values.begin(); _iter231 != this->values.end(); ++_iter231) + std::vector ::const_iterator _iter261; + for (_iter261 = this->values.begin(); _iter261 != this->values.end(); ++_iter261) { - xfer += oprot->writeString((*_iter231)); + xfer += oprot->writeString((*_iter261)); } xfer += oprot->writeListEnd(); } @@ -4414,11 +5152,11 @@ uint32_t Partition::write(::apache::thrift::protocol::TProtocol* oprot) const { xfer += oprot->writeFieldBegin("parameters", ::apache::thrift::protocol::T_MAP, 7); { xfer += oprot->writeMapBegin(::apache::thrift::protocol::T_STRING, ::apache::thrift::protocol::T_STRING, static_cast(this->parameters.size())); - std::map ::const_iterator _iter232; - for (_iter232 = this->parameters.begin(); _iter232 != this->parameters.end(); ++_iter232) + std::map ::const_iterator _iter262; + for (_iter262 = this->parameters.begin(); _iter262 != this->parameters.end(); ++_iter262) { - xfer += oprot->writeString(_iter232->first); - xfer += oprot->writeString(_iter232->second); + xfer += oprot->writeString(_iter262->first); + xfer += oprot->writeString(_iter262->second); } xfer += oprot->writeMapEnd(); } @@ -4447,27 +5185,27 @@ void swap(Partition &a, Partition &b) { swap(a.__isset, b.__isset); } -Partition::Partition(const Partition& other233) { - values = other233.values; - dbName = other233.dbName; - tableName = other233.tableName; - createTime = other233.createTime; - lastAccessTime = other233.lastAccessTime; - sd = other233.sd; - parameters = other233.parameters; - privileges = other233.privileges; - __isset = other233.__isset; -} -Partition& Partition::operator=(const Partition& other234) { - values = other234.values; - dbName = other234.dbName; - tableName = other234.tableName; - createTime = other234.createTime; - lastAccessTime = other234.lastAccessTime; - sd = other234.sd; - parameters = other234.parameters; - privileges = other234.privileges; - __isset = other234.__isset; +Partition::Partition(const Partition& other263) { + values = other263.values; + dbName = other263.dbName; + tableName = other263.tableName; + createTime = other263.createTime; + lastAccessTime = other263.lastAccessTime; + sd = other263.sd; + parameters = other263.parameters; + privileges = other263.privileges; + __isset = other263.__isset; +} +Partition& Partition::operator=(const Partition& other264) { + values = other264.values; + dbName = other264.dbName; + tableName = other264.tableName; + createTime = other264.createTime; + lastAccessTime = other264.lastAccessTime; + sd = other264.sd; + parameters = other264.parameters; + privileges = other264.privileges; + __isset = other264.__isset; return *this; } void Partition::printTo(std::ostream& out) const { @@ -4539,14 +5277,14 @@ uint32_t PartitionWithoutSD::read(::apache::thrift::protocol::TProtocol* iprot) if (ftype == ::apache::thrift::protocol::T_LIST) { { this->values.clear(); - uint32_t _size235; - ::apache::thrift::protocol::TType _etype238; - xfer += iprot->readListBegin(_etype238, _size235); - this->values.resize(_size235); - uint32_t _i239; - for (_i239 = 0; _i239 < _size235; ++_i239) + uint32_t _size265; + ::apache::thrift::protocol::TType _etype268; + xfer += iprot->readListBegin(_etype268, _size265); + this->values.resize(_size265); + uint32_t _i269; + for (_i269 = 0; _i269 < _size265; ++_i269) { - xfer += iprot->readString(this->values[_i239]); + xfer += iprot->readString(this->values[_i269]); } xfer += iprot->readListEnd(); } @@ -4583,17 +5321,17 @@ uint32_t PartitionWithoutSD::read(::apache::thrift::protocol::TProtocol* iprot) if (ftype == ::apache::thrift::protocol::T_MAP) { { this->parameters.clear(); - uint32_t _size240; - ::apache::thrift::protocol::TType _ktype241; - ::apache::thrift::protocol::TType _vtype242; - xfer += iprot->readMapBegin(_ktype241, _vtype242, _size240); - uint32_t _i244; - for (_i244 = 0; _i244 < _size240; ++_i244) + uint32_t _size270; + ::apache::thrift::protocol::TType _ktype271; + ::apache::thrift::protocol::TType _vtype272; + xfer += iprot->readMapBegin(_ktype271, _vtype272, _size270); + uint32_t _i274; + for (_i274 = 0; _i274 < _size270; ++_i274) { - std::string _key245; - xfer += iprot->readString(_key245); - std::string& _val246 = this->parameters[_key245]; - xfer += iprot->readString(_val246); + std::string _key275; + xfer += iprot->readString(_key275); + std::string& _val276 = this->parameters[_key275]; + xfer += iprot->readString(_val276); } xfer += iprot->readMapEnd(); } @@ -4630,10 +5368,10 @@ uint32_t PartitionWithoutSD::write(::apache::thrift::protocol::TProtocol* oprot) xfer += oprot->writeFieldBegin("values", ::apache::thrift::protocol::T_LIST, 1); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->values.size())); - std::vector ::const_iterator _iter247; - for (_iter247 = this->values.begin(); _iter247 != this->values.end(); ++_iter247) + std::vector ::const_iterator _iter277; + for (_iter277 = this->values.begin(); _iter277 != this->values.end(); ++_iter277) { - xfer += oprot->writeString((*_iter247)); + xfer += oprot->writeString((*_iter277)); } xfer += oprot->writeListEnd(); } @@ -4654,11 +5392,11 @@ uint32_t PartitionWithoutSD::write(::apache::thrift::protocol::TProtocol* oprot) xfer += oprot->writeFieldBegin("parameters", ::apache::thrift::protocol::T_MAP, 5); { xfer += oprot->writeMapBegin(::apache::thrift::protocol::T_STRING, ::apache::thrift::protocol::T_STRING, static_cast(this->parameters.size())); - std::map ::const_iterator _iter248; - for (_iter248 = this->parameters.begin(); _iter248 != this->parameters.end(); ++_iter248) + std::map ::const_iterator _iter278; + for (_iter278 = this->parameters.begin(); _iter278 != this->parameters.end(); ++_iter278) { - xfer += oprot->writeString(_iter248->first); - xfer += oprot->writeString(_iter248->second); + xfer += oprot->writeString(_iter278->first); + xfer += oprot->writeString(_iter278->second); } xfer += oprot->writeMapEnd(); } @@ -4685,23 +5423,23 @@ void swap(PartitionWithoutSD &a, PartitionWithoutSD &b) { swap(a.__isset, b.__isset); } -PartitionWithoutSD::PartitionWithoutSD(const PartitionWithoutSD& other249) { - values = other249.values; - createTime = other249.createTime; - lastAccessTime = other249.lastAccessTime; - relativePath = other249.relativePath; - parameters = other249.parameters; - privileges = other249.privileges; - __isset = other249.__isset; -} -PartitionWithoutSD& PartitionWithoutSD::operator=(const PartitionWithoutSD& other250) { - values = other250.values; - createTime = other250.createTime; - lastAccessTime = other250.lastAccessTime; - relativePath = other250.relativePath; - parameters = other250.parameters; - privileges = other250.privileges; - __isset = other250.__isset; +PartitionWithoutSD::PartitionWithoutSD(const PartitionWithoutSD& other279) { + values = other279.values; + createTime = other279.createTime; + lastAccessTime = other279.lastAccessTime; + relativePath = other279.relativePath; + parameters = other279.parameters; + privileges = other279.privileges; + __isset = other279.__isset; +} +PartitionWithoutSD& PartitionWithoutSD::operator=(const PartitionWithoutSD& other280) { + values = other280.values; + createTime = other280.createTime; + lastAccessTime = other280.lastAccessTime; + relativePath = other280.relativePath; + parameters = other280.parameters; + privileges = other280.privileges; + __isset = other280.__isset; return *this; } void PartitionWithoutSD::printTo(std::ostream& out) const { @@ -4754,14 +5492,14 @@ uint32_t PartitionSpecWithSharedSD::read(::apache::thrift::protocol::TProtocol* if (ftype == ::apache::thrift::protocol::T_LIST) { { this->partitions.clear(); - uint32_t _size251; - ::apache::thrift::protocol::TType _etype254; - xfer += iprot->readListBegin(_etype254, _size251); - this->partitions.resize(_size251); - uint32_t _i255; - for (_i255 = 0; _i255 < _size251; ++_i255) + uint32_t _size281; + ::apache::thrift::protocol::TType _etype284; + xfer += iprot->readListBegin(_etype284, _size281); + this->partitions.resize(_size281); + uint32_t _i285; + for (_i285 = 0; _i285 < _size281; ++_i285) { - xfer += this->partitions[_i255].read(iprot); + xfer += this->partitions[_i285].read(iprot); } xfer += iprot->readListEnd(); } @@ -4798,10 +5536,10 @@ uint32_t PartitionSpecWithSharedSD::write(::apache::thrift::protocol::TProtocol* xfer += oprot->writeFieldBegin("partitions", ::apache::thrift::protocol::T_LIST, 1); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->partitions.size())); - std::vector ::const_iterator _iter256; - for (_iter256 = this->partitions.begin(); _iter256 != this->partitions.end(); ++_iter256) + std::vector ::const_iterator _iter286; + for (_iter286 = this->partitions.begin(); _iter286 != this->partitions.end(); ++_iter286) { - xfer += (*_iter256).write(oprot); + xfer += (*_iter286).write(oprot); } xfer += oprot->writeListEnd(); } @@ -4823,15 +5561,15 @@ void swap(PartitionSpecWithSharedSD &a, PartitionSpecWithSharedSD &b) { swap(a.__isset, b.__isset); } -PartitionSpecWithSharedSD::PartitionSpecWithSharedSD(const PartitionSpecWithSharedSD& other257) { - partitions = other257.partitions; - sd = other257.sd; - __isset = other257.__isset; +PartitionSpecWithSharedSD::PartitionSpecWithSharedSD(const PartitionSpecWithSharedSD& other287) { + partitions = other287.partitions; + sd = other287.sd; + __isset = other287.__isset; } -PartitionSpecWithSharedSD& PartitionSpecWithSharedSD::operator=(const PartitionSpecWithSharedSD& other258) { - partitions = other258.partitions; - sd = other258.sd; - __isset = other258.__isset; +PartitionSpecWithSharedSD& PartitionSpecWithSharedSD::operator=(const PartitionSpecWithSharedSD& other288) { + partitions = other288.partitions; + sd = other288.sd; + __isset = other288.__isset; return *this; } void PartitionSpecWithSharedSD::printTo(std::ostream& out) const { @@ -4876,14 +5614,14 @@ uint32_t PartitionListComposingSpec::read(::apache::thrift::protocol::TProtocol* if (ftype == ::apache::thrift::protocol::T_LIST) { { this->partitions.clear(); - uint32_t _size259; - ::apache::thrift::protocol::TType _etype262; - xfer += iprot->readListBegin(_etype262, _size259); - this->partitions.resize(_size259); - uint32_t _i263; - for (_i263 = 0; _i263 < _size259; ++_i263) + uint32_t _size289; + ::apache::thrift::protocol::TType _etype292; + xfer += iprot->readListBegin(_etype292, _size289); + this->partitions.resize(_size289); + uint32_t _i293; + for (_i293 = 0; _i293 < _size289; ++_i293) { - xfer += this->partitions[_i263].read(iprot); + xfer += this->partitions[_i293].read(iprot); } xfer += iprot->readListEnd(); } @@ -4912,10 +5650,10 @@ uint32_t PartitionListComposingSpec::write(::apache::thrift::protocol::TProtocol xfer += oprot->writeFieldBegin("partitions", ::apache::thrift::protocol::T_LIST, 1); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->partitions.size())); - std::vector ::const_iterator _iter264; - for (_iter264 = this->partitions.begin(); _iter264 != this->partitions.end(); ++_iter264) + std::vector ::const_iterator _iter294; + for (_iter294 = this->partitions.begin(); _iter294 != this->partitions.end(); ++_iter294) { - xfer += (*_iter264).write(oprot); + xfer += (*_iter294).write(oprot); } xfer += oprot->writeListEnd(); } @@ -4932,13 +5670,13 @@ void swap(PartitionListComposingSpec &a, PartitionListComposingSpec &b) { swap(a.__isset, b.__isset); } -PartitionListComposingSpec::PartitionListComposingSpec(const PartitionListComposingSpec& other265) { - partitions = other265.partitions; - __isset = other265.__isset; +PartitionListComposingSpec::PartitionListComposingSpec(const PartitionListComposingSpec& other295) { + partitions = other295.partitions; + __isset = other295.__isset; } -PartitionListComposingSpec& PartitionListComposingSpec::operator=(const PartitionListComposingSpec& other266) { - partitions = other266.partitions; - __isset = other266.__isset; +PartitionListComposingSpec& PartitionListComposingSpec::operator=(const PartitionListComposingSpec& other296) { + partitions = other296.partitions; + __isset = other296.__isset; return *this; } void PartitionListComposingSpec::printTo(std::ostream& out) const { @@ -5090,21 +5828,21 @@ void swap(PartitionSpec &a, PartitionSpec &b) { swap(a.__isset, b.__isset); } -PartitionSpec::PartitionSpec(const PartitionSpec& other267) { - dbName = other267.dbName; - tableName = other267.tableName; - rootPath = other267.rootPath; - sharedSDPartitionSpec = other267.sharedSDPartitionSpec; - partitionList = other267.partitionList; - __isset = other267.__isset; -} -PartitionSpec& PartitionSpec::operator=(const PartitionSpec& other268) { - dbName = other268.dbName; - tableName = other268.tableName; - rootPath = other268.rootPath; - sharedSDPartitionSpec = other268.sharedSDPartitionSpec; - partitionList = other268.partitionList; - __isset = other268.__isset; +PartitionSpec::PartitionSpec(const PartitionSpec& other297) { + dbName = other297.dbName; + tableName = other297.tableName; + rootPath = other297.rootPath; + sharedSDPartitionSpec = other297.sharedSDPartitionSpec; + partitionList = other297.partitionList; + __isset = other297.__isset; +} +PartitionSpec& PartitionSpec::operator=(const PartitionSpec& other298) { + dbName = other298.dbName; + tableName = other298.tableName; + rootPath = other298.rootPath; + sharedSDPartitionSpec = other298.sharedSDPartitionSpec; + partitionList = other298.partitionList; + __isset = other298.__isset; return *this; } void PartitionSpec::printTo(std::ostream& out) const { @@ -5252,17 +5990,17 @@ uint32_t Index::read(::apache::thrift::protocol::TProtocol* iprot) { if (ftype == ::apache::thrift::protocol::T_MAP) { { this->parameters.clear(); - uint32_t _size269; - ::apache::thrift::protocol::TType _ktype270; - ::apache::thrift::protocol::TType _vtype271; - xfer += iprot->readMapBegin(_ktype270, _vtype271, _size269); - uint32_t _i273; - for (_i273 = 0; _i273 < _size269; ++_i273) + uint32_t _size299; + ::apache::thrift::protocol::TType _ktype300; + ::apache::thrift::protocol::TType _vtype301; + xfer += iprot->readMapBegin(_ktype300, _vtype301, _size299); + uint32_t _i303; + for (_i303 = 0; _i303 < _size299; ++_i303) { - std::string _key274; - xfer += iprot->readString(_key274); - std::string& _val275 = this->parameters[_key274]; - xfer += iprot->readString(_val275); + std::string _key304; + xfer += iprot->readString(_key304); + std::string& _val305 = this->parameters[_key304]; + xfer += iprot->readString(_val305); } xfer += iprot->readMapEnd(); } @@ -5331,11 +6069,11 @@ uint32_t Index::write(::apache::thrift::protocol::TProtocol* oprot) const { xfer += oprot->writeFieldBegin("parameters", ::apache::thrift::protocol::T_MAP, 9); { xfer += oprot->writeMapBegin(::apache::thrift::protocol::T_STRING, ::apache::thrift::protocol::T_STRING, static_cast(this->parameters.size())); - std::map ::const_iterator _iter276; - for (_iter276 = this->parameters.begin(); _iter276 != this->parameters.end(); ++_iter276) + std::map ::const_iterator _iter306; + for (_iter306 = this->parameters.begin(); _iter306 != this->parameters.end(); ++_iter306) { - xfer += oprot->writeString(_iter276->first); - xfer += oprot->writeString(_iter276->second); + xfer += oprot->writeString(_iter306->first); + xfer += oprot->writeString(_iter306->second); } xfer += oprot->writeMapEnd(); } @@ -5365,31 +6103,31 @@ void swap(Index &a, Index &b) { swap(a.__isset, b.__isset); } -Index::Index(const Index& other277) { - indexName = other277.indexName; - indexHandlerClass = other277.indexHandlerClass; - dbName = other277.dbName; - origTableName = other277.origTableName; - createTime = other277.createTime; - lastAccessTime = other277.lastAccessTime; - indexTableName = other277.indexTableName; - sd = other277.sd; - parameters = other277.parameters; - deferredRebuild = other277.deferredRebuild; - __isset = other277.__isset; -} -Index& Index::operator=(const Index& other278) { - indexName = other278.indexName; - indexHandlerClass = other278.indexHandlerClass; - dbName = other278.dbName; - origTableName = other278.origTableName; - createTime = other278.createTime; - lastAccessTime = other278.lastAccessTime; - indexTableName = other278.indexTableName; - sd = other278.sd; - parameters = other278.parameters; - deferredRebuild = other278.deferredRebuild; - __isset = other278.__isset; +Index::Index(const Index& other307) { + indexName = other307.indexName; + indexHandlerClass = other307.indexHandlerClass; + dbName = other307.dbName; + origTableName = other307.origTableName; + createTime = other307.createTime; + lastAccessTime = other307.lastAccessTime; + indexTableName = other307.indexTableName; + sd = other307.sd; + parameters = other307.parameters; + deferredRebuild = other307.deferredRebuild; + __isset = other307.__isset; +} +Index& Index::operator=(const Index& other308) { + indexName = other308.indexName; + indexHandlerClass = other308.indexHandlerClass; + dbName = other308.dbName; + origTableName = other308.origTableName; + createTime = other308.createTime; + lastAccessTime = other308.lastAccessTime; + indexTableName = other308.indexTableName; + sd = other308.sd; + parameters = other308.parameters; + deferredRebuild = other308.deferredRebuild; + __isset = other308.__isset; return *this; } void Index::printTo(std::ostream& out) const { @@ -5540,19 +6278,19 @@ void swap(BooleanColumnStatsData &a, BooleanColumnStatsData &b) { swap(a.__isset, b.__isset); } -BooleanColumnStatsData::BooleanColumnStatsData(const BooleanColumnStatsData& other279) { - numTrues = other279.numTrues; - numFalses = other279.numFalses; - numNulls = other279.numNulls; - bitVectors = other279.bitVectors; - __isset = other279.__isset; +BooleanColumnStatsData::BooleanColumnStatsData(const BooleanColumnStatsData& other309) { + numTrues = other309.numTrues; + numFalses = other309.numFalses; + numNulls = other309.numNulls; + bitVectors = other309.bitVectors; + __isset = other309.__isset; } -BooleanColumnStatsData& BooleanColumnStatsData::operator=(const BooleanColumnStatsData& other280) { - numTrues = other280.numTrues; - numFalses = other280.numFalses; - numNulls = other280.numNulls; - bitVectors = other280.bitVectors; - __isset = other280.__isset; +BooleanColumnStatsData& BooleanColumnStatsData::operator=(const BooleanColumnStatsData& other310) { + numTrues = other310.numTrues; + numFalses = other310.numFalses; + numNulls = other310.numNulls; + bitVectors = other310.bitVectors; + __isset = other310.__isset; return *this; } void BooleanColumnStatsData::printTo(std::ostream& out) const { @@ -5715,21 +6453,21 @@ void swap(DoubleColumnStatsData &a, DoubleColumnStatsData &b) { swap(a.__isset, b.__isset); } -DoubleColumnStatsData::DoubleColumnStatsData(const DoubleColumnStatsData& other281) { - lowValue = other281.lowValue; - highValue = other281.highValue; - numNulls = other281.numNulls; - numDVs = other281.numDVs; - bitVectors = other281.bitVectors; - __isset = other281.__isset; -} -DoubleColumnStatsData& DoubleColumnStatsData::operator=(const DoubleColumnStatsData& other282) { - lowValue = other282.lowValue; - highValue = other282.highValue; - numNulls = other282.numNulls; - numDVs = other282.numDVs; - bitVectors = other282.bitVectors; - __isset = other282.__isset; +DoubleColumnStatsData::DoubleColumnStatsData(const DoubleColumnStatsData& other311) { + lowValue = other311.lowValue; + highValue = other311.highValue; + numNulls = other311.numNulls; + numDVs = other311.numDVs; + bitVectors = other311.bitVectors; + __isset = other311.__isset; +} +DoubleColumnStatsData& DoubleColumnStatsData::operator=(const DoubleColumnStatsData& other312) { + lowValue = other312.lowValue; + highValue = other312.highValue; + numNulls = other312.numNulls; + numDVs = other312.numDVs; + bitVectors = other312.bitVectors; + __isset = other312.__isset; return *this; } void DoubleColumnStatsData::printTo(std::ostream& out) const { @@ -5893,21 +6631,21 @@ void swap(LongColumnStatsData &a, LongColumnStatsData &b) { swap(a.__isset, b.__isset); } -LongColumnStatsData::LongColumnStatsData(const LongColumnStatsData& other283) { - lowValue = other283.lowValue; - highValue = other283.highValue; - numNulls = other283.numNulls; - numDVs = other283.numDVs; - bitVectors = other283.bitVectors; - __isset = other283.__isset; -} -LongColumnStatsData& LongColumnStatsData::operator=(const LongColumnStatsData& other284) { - lowValue = other284.lowValue; - highValue = other284.highValue; - numNulls = other284.numNulls; - numDVs = other284.numDVs; - bitVectors = other284.bitVectors; - __isset = other284.__isset; +LongColumnStatsData::LongColumnStatsData(const LongColumnStatsData& other313) { + lowValue = other313.lowValue; + highValue = other313.highValue; + numNulls = other313.numNulls; + numDVs = other313.numDVs; + bitVectors = other313.bitVectors; + __isset = other313.__isset; +} +LongColumnStatsData& LongColumnStatsData::operator=(const LongColumnStatsData& other314) { + lowValue = other314.lowValue; + highValue = other314.highValue; + numNulls = other314.numNulls; + numDVs = other314.numDVs; + bitVectors = other314.bitVectors; + __isset = other314.__isset; return *this; } void LongColumnStatsData::printTo(std::ostream& out) const { @@ -6073,21 +6811,21 @@ void swap(StringColumnStatsData &a, StringColumnStatsData &b) { swap(a.__isset, b.__isset); } -StringColumnStatsData::StringColumnStatsData(const StringColumnStatsData& other285) { - maxColLen = other285.maxColLen; - avgColLen = other285.avgColLen; - numNulls = other285.numNulls; - numDVs = other285.numDVs; - bitVectors = other285.bitVectors; - __isset = other285.__isset; -} -StringColumnStatsData& StringColumnStatsData::operator=(const StringColumnStatsData& other286) { - maxColLen = other286.maxColLen; - avgColLen = other286.avgColLen; - numNulls = other286.numNulls; - numDVs = other286.numDVs; - bitVectors = other286.bitVectors; - __isset = other286.__isset; +StringColumnStatsData::StringColumnStatsData(const StringColumnStatsData& other315) { + maxColLen = other315.maxColLen; + avgColLen = other315.avgColLen; + numNulls = other315.numNulls; + numDVs = other315.numDVs; + bitVectors = other315.bitVectors; + __isset = other315.__isset; +} +StringColumnStatsData& StringColumnStatsData::operator=(const StringColumnStatsData& other316) { + maxColLen = other316.maxColLen; + avgColLen = other316.avgColLen; + numNulls = other316.numNulls; + numDVs = other316.numDVs; + bitVectors = other316.bitVectors; + __isset = other316.__isset; return *this; } void StringColumnStatsData::printTo(std::ostream& out) const { @@ -6233,19 +6971,19 @@ void swap(BinaryColumnStatsData &a, BinaryColumnStatsData &b) { swap(a.__isset, b.__isset); } -BinaryColumnStatsData::BinaryColumnStatsData(const BinaryColumnStatsData& other287) { - maxColLen = other287.maxColLen; - avgColLen = other287.avgColLen; - numNulls = other287.numNulls; - bitVectors = other287.bitVectors; - __isset = other287.__isset; +BinaryColumnStatsData::BinaryColumnStatsData(const BinaryColumnStatsData& other317) { + maxColLen = other317.maxColLen; + avgColLen = other317.avgColLen; + numNulls = other317.numNulls; + bitVectors = other317.bitVectors; + __isset = other317.__isset; } -BinaryColumnStatsData& BinaryColumnStatsData::operator=(const BinaryColumnStatsData& other288) { - maxColLen = other288.maxColLen; - avgColLen = other288.avgColLen; - numNulls = other288.numNulls; - bitVectors = other288.bitVectors; - __isset = other288.__isset; +BinaryColumnStatsData& BinaryColumnStatsData::operator=(const BinaryColumnStatsData& other318) { + maxColLen = other318.maxColLen; + avgColLen = other318.avgColLen; + numNulls = other318.numNulls; + bitVectors = other318.bitVectors; + __isset = other318.__isset; return *this; } void BinaryColumnStatsData::printTo(std::ostream& out) const { @@ -6350,13 +7088,13 @@ void swap(Decimal &a, Decimal &b) { swap(a.scale, b.scale); } -Decimal::Decimal(const Decimal& other289) { - unscaled = other289.unscaled; - scale = other289.scale; +Decimal::Decimal(const Decimal& other319) { + unscaled = other319.unscaled; + scale = other319.scale; } -Decimal& Decimal::operator=(const Decimal& other290) { - unscaled = other290.unscaled; - scale = other290.scale; +Decimal& Decimal::operator=(const Decimal& other320) { + unscaled = other320.unscaled; + scale = other320.scale; return *this; } void Decimal::printTo(std::ostream& out) const { @@ -6517,21 +7255,21 @@ void swap(DecimalColumnStatsData &a, DecimalColumnStatsData &b) { swap(a.__isset, b.__isset); } -DecimalColumnStatsData::DecimalColumnStatsData(const DecimalColumnStatsData& other291) { - lowValue = other291.lowValue; - highValue = other291.highValue; - numNulls = other291.numNulls; - numDVs = other291.numDVs; - bitVectors = other291.bitVectors; - __isset = other291.__isset; -} -DecimalColumnStatsData& DecimalColumnStatsData::operator=(const DecimalColumnStatsData& other292) { - lowValue = other292.lowValue; - highValue = other292.highValue; - numNulls = other292.numNulls; - numDVs = other292.numDVs; - bitVectors = other292.bitVectors; - __isset = other292.__isset; +DecimalColumnStatsData::DecimalColumnStatsData(const DecimalColumnStatsData& other321) { + lowValue = other321.lowValue; + highValue = other321.highValue; + numNulls = other321.numNulls; + numDVs = other321.numDVs; + bitVectors = other321.bitVectors; + __isset = other321.__isset; +} +DecimalColumnStatsData& DecimalColumnStatsData::operator=(const DecimalColumnStatsData& other322) { + lowValue = other322.lowValue; + highValue = other322.highValue; + numNulls = other322.numNulls; + numDVs = other322.numDVs; + bitVectors = other322.bitVectors; + __isset = other322.__isset; return *this; } void DecimalColumnStatsData::printTo(std::ostream& out) const { @@ -6617,11 +7355,11 @@ void swap(Date &a, Date &b) { swap(a.daysSinceEpoch, b.daysSinceEpoch); } -Date::Date(const Date& other293) { - daysSinceEpoch = other293.daysSinceEpoch; +Date::Date(const Date& other323) { + daysSinceEpoch = other323.daysSinceEpoch; } -Date& Date::operator=(const Date& other294) { - daysSinceEpoch = other294.daysSinceEpoch; +Date& Date::operator=(const Date& other324) { + daysSinceEpoch = other324.daysSinceEpoch; return *this; } void Date::printTo(std::ostream& out) const { @@ -6781,21 +7519,21 @@ void swap(DateColumnStatsData &a, DateColumnStatsData &b) { swap(a.__isset, b.__isset); } -DateColumnStatsData::DateColumnStatsData(const DateColumnStatsData& other295) { - lowValue = other295.lowValue; - highValue = other295.highValue; - numNulls = other295.numNulls; - numDVs = other295.numDVs; - bitVectors = other295.bitVectors; - __isset = other295.__isset; -} -DateColumnStatsData& DateColumnStatsData::operator=(const DateColumnStatsData& other296) { - lowValue = other296.lowValue; - highValue = other296.highValue; - numNulls = other296.numNulls; - numDVs = other296.numDVs; - bitVectors = other296.bitVectors; - __isset = other296.__isset; +DateColumnStatsData::DateColumnStatsData(const DateColumnStatsData& other325) { + lowValue = other325.lowValue; + highValue = other325.highValue; + numNulls = other325.numNulls; + numDVs = other325.numDVs; + bitVectors = other325.bitVectors; + __isset = other325.__isset; +} +DateColumnStatsData& DateColumnStatsData::operator=(const DateColumnStatsData& other326) { + lowValue = other326.lowValue; + highValue = other326.highValue; + numNulls = other326.numNulls; + numDVs = other326.numDVs; + bitVectors = other326.bitVectors; + __isset = other326.__isset; return *this; } void DateColumnStatsData::printTo(std::ostream& out) const { @@ -6981,25 +7719,25 @@ void swap(ColumnStatisticsData &a, ColumnStatisticsData &b) { swap(a.__isset, b.__isset); } -ColumnStatisticsData::ColumnStatisticsData(const ColumnStatisticsData& other297) { - booleanStats = other297.booleanStats; - longStats = other297.longStats; - doubleStats = other297.doubleStats; - stringStats = other297.stringStats; - binaryStats = other297.binaryStats; - decimalStats = other297.decimalStats; - dateStats = other297.dateStats; - __isset = other297.__isset; -} -ColumnStatisticsData& ColumnStatisticsData::operator=(const ColumnStatisticsData& other298) { - booleanStats = other298.booleanStats; - longStats = other298.longStats; - doubleStats = other298.doubleStats; - stringStats = other298.stringStats; - binaryStats = other298.binaryStats; - decimalStats = other298.decimalStats; - dateStats = other298.dateStats; - __isset = other298.__isset; +ColumnStatisticsData::ColumnStatisticsData(const ColumnStatisticsData& other327) { + booleanStats = other327.booleanStats; + longStats = other327.longStats; + doubleStats = other327.doubleStats; + stringStats = other327.stringStats; + binaryStats = other327.binaryStats; + decimalStats = other327.decimalStats; + dateStats = other327.dateStats; + __isset = other327.__isset; +} +ColumnStatisticsData& ColumnStatisticsData::operator=(const ColumnStatisticsData& other328) { + booleanStats = other328.booleanStats; + longStats = other328.longStats; + doubleStats = other328.doubleStats; + stringStats = other328.stringStats; + binaryStats = other328.binaryStats; + decimalStats = other328.decimalStats; + dateStats = other328.dateStats; + __isset = other328.__isset; return *this; } void ColumnStatisticsData::printTo(std::ostream& out) const { @@ -7127,15 +7865,15 @@ void swap(ColumnStatisticsObj &a, ColumnStatisticsObj &b) { swap(a.statsData, b.statsData); } -ColumnStatisticsObj::ColumnStatisticsObj(const ColumnStatisticsObj& other299) { - colName = other299.colName; - colType = other299.colType; - statsData = other299.statsData; +ColumnStatisticsObj::ColumnStatisticsObj(const ColumnStatisticsObj& other329) { + colName = other329.colName; + colType = other329.colType; + statsData = other329.statsData; } -ColumnStatisticsObj& ColumnStatisticsObj::operator=(const ColumnStatisticsObj& other300) { - colName = other300.colName; - colType = other300.colType; - statsData = other300.statsData; +ColumnStatisticsObj& ColumnStatisticsObj::operator=(const ColumnStatisticsObj& other330) { + colName = other330.colName; + colType = other330.colType; + statsData = other330.statsData; return *this; } void ColumnStatisticsObj::printTo(std::ostream& out) const { @@ -7298,21 +8036,21 @@ void swap(ColumnStatisticsDesc &a, ColumnStatisticsDesc &b) { swap(a.__isset, b.__isset); } -ColumnStatisticsDesc::ColumnStatisticsDesc(const ColumnStatisticsDesc& other301) { - isTblLevel = other301.isTblLevel; - dbName = other301.dbName; - tableName = other301.tableName; - partName = other301.partName; - lastAnalyzed = other301.lastAnalyzed; - __isset = other301.__isset; -} -ColumnStatisticsDesc& ColumnStatisticsDesc::operator=(const ColumnStatisticsDesc& other302) { - isTblLevel = other302.isTblLevel; - dbName = other302.dbName; - tableName = other302.tableName; - partName = other302.partName; - lastAnalyzed = other302.lastAnalyzed; - __isset = other302.__isset; +ColumnStatisticsDesc::ColumnStatisticsDesc(const ColumnStatisticsDesc& other331) { + isTblLevel = other331.isTblLevel; + dbName = other331.dbName; + tableName = other331.tableName; + partName = other331.partName; + lastAnalyzed = other331.lastAnalyzed; + __isset = other331.__isset; +} +ColumnStatisticsDesc& ColumnStatisticsDesc::operator=(const ColumnStatisticsDesc& other332) { + isTblLevel = other332.isTblLevel; + dbName = other332.dbName; + tableName = other332.tableName; + partName = other332.partName; + lastAnalyzed = other332.lastAnalyzed; + __isset = other332.__isset; return *this; } void ColumnStatisticsDesc::printTo(std::ostream& out) const { @@ -7374,14 +8112,14 @@ uint32_t ColumnStatistics::read(::apache::thrift::protocol::TProtocol* iprot) { if (ftype == ::apache::thrift::protocol::T_LIST) { { this->statsObj.clear(); - uint32_t _size303; - ::apache::thrift::protocol::TType _etype306; - xfer += iprot->readListBegin(_etype306, _size303); - this->statsObj.resize(_size303); - uint32_t _i307; - for (_i307 = 0; _i307 < _size303; ++_i307) + uint32_t _size333; + ::apache::thrift::protocol::TType _etype336; + xfer += iprot->readListBegin(_etype336, _size333); + this->statsObj.resize(_size333); + uint32_t _i337; + for (_i337 = 0; _i337 < _size333; ++_i337) { - xfer += this->statsObj[_i307].read(iprot); + xfer += this->statsObj[_i337].read(iprot); } xfer += iprot->readListEnd(); } @@ -7418,10 +8156,10 @@ uint32_t ColumnStatistics::write(::apache::thrift::protocol::TProtocol* oprot) c xfer += oprot->writeFieldBegin("statsObj", ::apache::thrift::protocol::T_LIST, 2); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->statsObj.size())); - std::vector ::const_iterator _iter308; - for (_iter308 = this->statsObj.begin(); _iter308 != this->statsObj.end(); ++_iter308) + std::vector ::const_iterator _iter338; + for (_iter338 = this->statsObj.begin(); _iter338 != this->statsObj.end(); ++_iter338) { - xfer += (*_iter308).write(oprot); + xfer += (*_iter338).write(oprot); } xfer += oprot->writeListEnd(); } @@ -7438,13 +8176,13 @@ void swap(ColumnStatistics &a, ColumnStatistics &b) { swap(a.statsObj, b.statsObj); } -ColumnStatistics::ColumnStatistics(const ColumnStatistics& other309) { - statsDesc = other309.statsDesc; - statsObj = other309.statsObj; +ColumnStatistics::ColumnStatistics(const ColumnStatistics& other339) { + statsDesc = other339.statsDesc; + statsObj = other339.statsObj; } -ColumnStatistics& ColumnStatistics::operator=(const ColumnStatistics& other310) { - statsDesc = other310.statsDesc; - statsObj = other310.statsObj; +ColumnStatistics& ColumnStatistics::operator=(const ColumnStatistics& other340) { + statsDesc = other340.statsDesc; + statsObj = other340.statsObj; return *this; } void ColumnStatistics::printTo(std::ostream& out) const { @@ -7495,14 +8233,14 @@ uint32_t AggrStats::read(::apache::thrift::protocol::TProtocol* iprot) { if (ftype == ::apache::thrift::protocol::T_LIST) { { this->colStats.clear(); - uint32_t _size311; - ::apache::thrift::protocol::TType _etype314; - xfer += iprot->readListBegin(_etype314, _size311); - this->colStats.resize(_size311); - uint32_t _i315; - for (_i315 = 0; _i315 < _size311; ++_i315) + uint32_t _size341; + ::apache::thrift::protocol::TType _etype344; + xfer += iprot->readListBegin(_etype344, _size341); + this->colStats.resize(_size341); + uint32_t _i345; + for (_i345 = 0; _i345 < _size341; ++_i345) { - xfer += this->colStats[_i315].read(iprot); + xfer += this->colStats[_i345].read(iprot); } xfer += iprot->readListEnd(); } @@ -7543,10 +8281,10 @@ uint32_t AggrStats::write(::apache::thrift::protocol::TProtocol* oprot) const { xfer += oprot->writeFieldBegin("colStats", ::apache::thrift::protocol::T_LIST, 1); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->colStats.size())); - std::vector ::const_iterator _iter316; - for (_iter316 = this->colStats.begin(); _iter316 != this->colStats.end(); ++_iter316) + std::vector ::const_iterator _iter346; + for (_iter346 = this->colStats.begin(); _iter346 != this->colStats.end(); ++_iter346) { - xfer += (*_iter316).write(oprot); + xfer += (*_iter346).write(oprot); } xfer += oprot->writeListEnd(); } @@ -7567,13 +8305,13 @@ void swap(AggrStats &a, AggrStats &b) { swap(a.partsFound, b.partsFound); } -AggrStats::AggrStats(const AggrStats& other317) { - colStats = other317.colStats; - partsFound = other317.partsFound; +AggrStats::AggrStats(const AggrStats& other347) { + colStats = other347.colStats; + partsFound = other347.partsFound; } -AggrStats& AggrStats::operator=(const AggrStats& other318) { - colStats = other318.colStats; - partsFound = other318.partsFound; +AggrStats& AggrStats::operator=(const AggrStats& other348) { + colStats = other348.colStats; + partsFound = other348.partsFound; return *this; } void AggrStats::printTo(std::ostream& out) const { @@ -7619,14 +8357,14 @@ uint32_t SetPartitionsStatsRequest::read(::apache::thrift::protocol::TProtocol* if (ftype == ::apache::thrift::protocol::T_LIST) { { this->colStats.clear(); - uint32_t _size319; - ::apache::thrift::protocol::TType _etype322; - xfer += iprot->readListBegin(_etype322, _size319); - this->colStats.resize(_size319); - uint32_t _i323; - for (_i323 = 0; _i323 < _size319; ++_i323) + uint32_t _size349; + ::apache::thrift::protocol::TType _etype352; + xfer += iprot->readListBegin(_etype352, _size349); + this->colStats.resize(_size349); + uint32_t _i353; + for (_i353 = 0; _i353 < _size349; ++_i353) { - xfer += this->colStats[_i323].read(iprot); + xfer += this->colStats[_i353].read(iprot); } xfer += iprot->readListEnd(); } @@ -7657,10 +8395,10 @@ uint32_t SetPartitionsStatsRequest::write(::apache::thrift::protocol::TProtocol* xfer += oprot->writeFieldBegin("colStats", ::apache::thrift::protocol::T_LIST, 1); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->colStats.size())); - std::vector ::const_iterator _iter324; - for (_iter324 = this->colStats.begin(); _iter324 != this->colStats.end(); ++_iter324) + std::vector ::const_iterator _iter354; + for (_iter354 = this->colStats.begin(); _iter354 != this->colStats.end(); ++_iter354) { - xfer += (*_iter324).write(oprot); + xfer += (*_iter354).write(oprot); } xfer += oprot->writeListEnd(); } @@ -7676,11 +8414,11 @@ void swap(SetPartitionsStatsRequest &a, SetPartitionsStatsRequest &b) { swap(a.colStats, b.colStats); } -SetPartitionsStatsRequest::SetPartitionsStatsRequest(const SetPartitionsStatsRequest& other325) { - colStats = other325.colStats; +SetPartitionsStatsRequest::SetPartitionsStatsRequest(const SetPartitionsStatsRequest& other355) { + colStats = other355.colStats; } -SetPartitionsStatsRequest& SetPartitionsStatsRequest::operator=(const SetPartitionsStatsRequest& other326) { - colStats = other326.colStats; +SetPartitionsStatsRequest& SetPartitionsStatsRequest::operator=(const SetPartitionsStatsRequest& other356) { + colStats = other356.colStats; return *this; } void SetPartitionsStatsRequest::printTo(std::ostream& out) const { @@ -7728,14 +8466,14 @@ uint32_t Schema::read(::apache::thrift::protocol::TProtocol* iprot) { if (ftype == ::apache::thrift::protocol::T_LIST) { { this->fieldSchemas.clear(); - uint32_t _size327; - ::apache::thrift::protocol::TType _etype330; - xfer += iprot->readListBegin(_etype330, _size327); - this->fieldSchemas.resize(_size327); - uint32_t _i331; - for (_i331 = 0; _i331 < _size327; ++_i331) + uint32_t _size357; + ::apache::thrift::protocol::TType _etype360; + xfer += iprot->readListBegin(_etype360, _size357); + this->fieldSchemas.resize(_size357); + uint32_t _i361; + for (_i361 = 0; _i361 < _size357; ++_i361) { - xfer += this->fieldSchemas[_i331].read(iprot); + xfer += this->fieldSchemas[_i361].read(iprot); } xfer += iprot->readListEnd(); } @@ -7748,17 +8486,17 @@ uint32_t Schema::read(::apache::thrift::protocol::TProtocol* iprot) { if (ftype == ::apache::thrift::protocol::T_MAP) { { this->properties.clear(); - uint32_t _size332; - ::apache::thrift::protocol::TType _ktype333; - ::apache::thrift::protocol::TType _vtype334; - xfer += iprot->readMapBegin(_ktype333, _vtype334, _size332); - uint32_t _i336; - for (_i336 = 0; _i336 < _size332; ++_i336) + uint32_t _size362; + ::apache::thrift::protocol::TType _ktype363; + ::apache::thrift::protocol::TType _vtype364; + xfer += iprot->readMapBegin(_ktype363, _vtype364, _size362); + uint32_t _i366; + for (_i366 = 0; _i366 < _size362; ++_i366) { - std::string _key337; - xfer += iprot->readString(_key337); - std::string& _val338 = this->properties[_key337]; - xfer += iprot->readString(_val338); + std::string _key367; + xfer += iprot->readString(_key367); + std::string& _val368 = this->properties[_key367]; + xfer += iprot->readString(_val368); } xfer += iprot->readMapEnd(); } @@ -7787,10 +8525,10 @@ uint32_t Schema::write(::apache::thrift::protocol::TProtocol* oprot) const { xfer += oprot->writeFieldBegin("fieldSchemas", ::apache::thrift::protocol::T_LIST, 1); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->fieldSchemas.size())); - std::vector ::const_iterator _iter339; - for (_iter339 = this->fieldSchemas.begin(); _iter339 != this->fieldSchemas.end(); ++_iter339) + std::vector ::const_iterator _iter369; + for (_iter369 = this->fieldSchemas.begin(); _iter369 != this->fieldSchemas.end(); ++_iter369) { - xfer += (*_iter339).write(oprot); + xfer += (*_iter369).write(oprot); } xfer += oprot->writeListEnd(); } @@ -7799,11 +8537,11 @@ uint32_t Schema::write(::apache::thrift::protocol::TProtocol* oprot) const { xfer += oprot->writeFieldBegin("properties", ::apache::thrift::protocol::T_MAP, 2); { xfer += oprot->writeMapBegin(::apache::thrift::protocol::T_STRING, ::apache::thrift::protocol::T_STRING, static_cast(this->properties.size())); - std::map ::const_iterator _iter340; - for (_iter340 = this->properties.begin(); _iter340 != this->properties.end(); ++_iter340) + std::map ::const_iterator _iter370; + for (_iter370 = this->properties.begin(); _iter370 != this->properties.end(); ++_iter370) { - xfer += oprot->writeString(_iter340->first); - xfer += oprot->writeString(_iter340->second); + xfer += oprot->writeString(_iter370->first); + xfer += oprot->writeString(_iter370->second); } xfer += oprot->writeMapEnd(); } @@ -7821,15 +8559,15 @@ void swap(Schema &a, Schema &b) { swap(a.__isset, b.__isset); } -Schema::Schema(const Schema& other341) { - fieldSchemas = other341.fieldSchemas; - properties = other341.properties; - __isset = other341.__isset; +Schema::Schema(const Schema& other371) { + fieldSchemas = other371.fieldSchemas; + properties = other371.properties; + __isset = other371.__isset; } -Schema& Schema::operator=(const Schema& other342) { - fieldSchemas = other342.fieldSchemas; - properties = other342.properties; - __isset = other342.__isset; +Schema& Schema::operator=(const Schema& other372) { + fieldSchemas = other372.fieldSchemas; + properties = other372.properties; + __isset = other372.__isset; return *this; } void Schema::printTo(std::ostream& out) const { @@ -7874,17 +8612,17 @@ uint32_t EnvironmentContext::read(::apache::thrift::protocol::TProtocol* iprot) if (ftype == ::apache::thrift::protocol::T_MAP) { { this->properties.clear(); - uint32_t _size343; - ::apache::thrift::protocol::TType _ktype344; - ::apache::thrift::protocol::TType _vtype345; - xfer += iprot->readMapBegin(_ktype344, _vtype345, _size343); - uint32_t _i347; - for (_i347 = 0; _i347 < _size343; ++_i347) + uint32_t _size373; + ::apache::thrift::protocol::TType _ktype374; + ::apache::thrift::protocol::TType _vtype375; + xfer += iprot->readMapBegin(_ktype374, _vtype375, _size373); + uint32_t _i377; + for (_i377 = 0; _i377 < _size373; ++_i377) { - std::string _key348; - xfer += iprot->readString(_key348); - std::string& _val349 = this->properties[_key348]; - xfer += iprot->readString(_val349); + std::string _key378; + xfer += iprot->readString(_key378); + std::string& _val379 = this->properties[_key378]; + xfer += iprot->readString(_val379); } xfer += iprot->readMapEnd(); } @@ -7913,11 +8651,11 @@ uint32_t EnvironmentContext::write(::apache::thrift::protocol::TProtocol* oprot) xfer += oprot->writeFieldBegin("properties", ::apache::thrift::protocol::T_MAP, 1); { xfer += oprot->writeMapBegin(::apache::thrift::protocol::T_STRING, ::apache::thrift::protocol::T_STRING, static_cast(this->properties.size())); - std::map ::const_iterator _iter350; - for (_iter350 = this->properties.begin(); _iter350 != this->properties.end(); ++_iter350) + std::map ::const_iterator _iter380; + for (_iter380 = this->properties.begin(); _iter380 != this->properties.end(); ++_iter380) { - xfer += oprot->writeString(_iter350->first); - xfer += oprot->writeString(_iter350->second); + xfer += oprot->writeString(_iter380->first); + xfer += oprot->writeString(_iter380->second); } xfer += oprot->writeMapEnd(); } @@ -7934,13 +8672,13 @@ void swap(EnvironmentContext &a, EnvironmentContext &b) { swap(a.__isset, b.__isset); } -EnvironmentContext::EnvironmentContext(const EnvironmentContext& other351) { - properties = other351.properties; - __isset = other351.__isset; +EnvironmentContext::EnvironmentContext(const EnvironmentContext& other381) { + properties = other381.properties; + __isset = other381.__isset; } -EnvironmentContext& EnvironmentContext::operator=(const EnvironmentContext& other352) { - properties = other352.properties; - __isset = other352.__isset; +EnvironmentContext& EnvironmentContext::operator=(const EnvironmentContext& other382) { + properties = other382.properties; + __isset = other382.__isset; return *this; } void EnvironmentContext::printTo(std::ostream& out) const { @@ -7990,14 +8728,14 @@ uint32_t PartitionsByExprResult::read(::apache::thrift::protocol::TProtocol* ipr if (ftype == ::apache::thrift::protocol::T_LIST) { { this->partitions.clear(); - uint32_t _size353; - ::apache::thrift::protocol::TType _etype356; - xfer += iprot->readListBegin(_etype356, _size353); - this->partitions.resize(_size353); - uint32_t _i357; - for (_i357 = 0; _i357 < _size353; ++_i357) + uint32_t _size383; + ::apache::thrift::protocol::TType _etype386; + xfer += iprot->readListBegin(_etype386, _size383); + this->partitions.resize(_size383); + uint32_t _i387; + for (_i387 = 0; _i387 < _size383; ++_i387) { - xfer += this->partitions[_i357].read(iprot); + xfer += this->partitions[_i387].read(iprot); } xfer += iprot->readListEnd(); } @@ -8038,10 +8776,10 @@ uint32_t PartitionsByExprResult::write(::apache::thrift::protocol::TProtocol* op xfer += oprot->writeFieldBegin("partitions", ::apache::thrift::protocol::T_LIST, 1); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->partitions.size())); - std::vector ::const_iterator _iter358; - for (_iter358 = this->partitions.begin(); _iter358 != this->partitions.end(); ++_iter358) + std::vector ::const_iterator _iter388; + for (_iter388 = this->partitions.begin(); _iter388 != this->partitions.end(); ++_iter388) { - xfer += (*_iter358).write(oprot); + xfer += (*_iter388).write(oprot); } xfer += oprot->writeListEnd(); } @@ -8062,13 +8800,13 @@ void swap(PartitionsByExprResult &a, PartitionsByExprResult &b) { swap(a.hasUnknownPartitions, b.hasUnknownPartitions); } -PartitionsByExprResult::PartitionsByExprResult(const PartitionsByExprResult& other359) { - partitions = other359.partitions; - hasUnknownPartitions = other359.hasUnknownPartitions; +PartitionsByExprResult::PartitionsByExprResult(const PartitionsByExprResult& other389) { + partitions = other389.partitions; + hasUnknownPartitions = other389.hasUnknownPartitions; } -PartitionsByExprResult& PartitionsByExprResult::operator=(const PartitionsByExprResult& other360) { - partitions = other360.partitions; - hasUnknownPartitions = other360.hasUnknownPartitions; +PartitionsByExprResult& PartitionsByExprResult::operator=(const PartitionsByExprResult& other390) { + partitions = other390.partitions; + hasUnknownPartitions = other390.hasUnknownPartitions; return *this; } void PartitionsByExprResult::printTo(std::ostream& out) const { @@ -8230,21 +8968,21 @@ void swap(PartitionsByExprRequest &a, PartitionsByExprRequest &b) { swap(a.__isset, b.__isset); } -PartitionsByExprRequest::PartitionsByExprRequest(const PartitionsByExprRequest& other361) { - dbName = other361.dbName; - tblName = other361.tblName; - expr = other361.expr; - defaultPartitionName = other361.defaultPartitionName; - maxParts = other361.maxParts; - __isset = other361.__isset; -} -PartitionsByExprRequest& PartitionsByExprRequest::operator=(const PartitionsByExprRequest& other362) { - dbName = other362.dbName; - tblName = other362.tblName; - expr = other362.expr; - defaultPartitionName = other362.defaultPartitionName; - maxParts = other362.maxParts; - __isset = other362.__isset; +PartitionsByExprRequest::PartitionsByExprRequest(const PartitionsByExprRequest& other391) { + dbName = other391.dbName; + tblName = other391.tblName; + expr = other391.expr; + defaultPartitionName = other391.defaultPartitionName; + maxParts = other391.maxParts; + __isset = other391.__isset; +} +PartitionsByExprRequest& PartitionsByExprRequest::operator=(const PartitionsByExprRequest& other392) { + dbName = other392.dbName; + tblName = other392.tblName; + expr = other392.expr; + defaultPartitionName = other392.defaultPartitionName; + maxParts = other392.maxParts; + __isset = other392.__isset; return *this; } void PartitionsByExprRequest::printTo(std::ostream& out) const { @@ -8293,14 +9031,14 @@ uint32_t TableStatsResult::read(::apache::thrift::protocol::TProtocol* iprot) { if (ftype == ::apache::thrift::protocol::T_LIST) { { this->tableStats.clear(); - uint32_t _size363; - ::apache::thrift::protocol::TType _etype366; - xfer += iprot->readListBegin(_etype366, _size363); - this->tableStats.resize(_size363); - uint32_t _i367; - for (_i367 = 0; _i367 < _size363; ++_i367) + uint32_t _size393; + ::apache::thrift::protocol::TType _etype396; + xfer += iprot->readListBegin(_etype396, _size393); + this->tableStats.resize(_size393); + uint32_t _i397; + for (_i397 = 0; _i397 < _size393; ++_i397) { - xfer += this->tableStats[_i367].read(iprot); + xfer += this->tableStats[_i397].read(iprot); } xfer += iprot->readListEnd(); } @@ -8331,10 +9069,10 @@ uint32_t TableStatsResult::write(::apache::thrift::protocol::TProtocol* oprot) c xfer += oprot->writeFieldBegin("tableStats", ::apache::thrift::protocol::T_LIST, 1); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->tableStats.size())); - std::vector ::const_iterator _iter368; - for (_iter368 = this->tableStats.begin(); _iter368 != this->tableStats.end(); ++_iter368) + std::vector ::const_iterator _iter398; + for (_iter398 = this->tableStats.begin(); _iter398 != this->tableStats.end(); ++_iter398) { - xfer += (*_iter368).write(oprot); + xfer += (*_iter398).write(oprot); } xfer += oprot->writeListEnd(); } @@ -8350,11 +9088,11 @@ void swap(TableStatsResult &a, TableStatsResult &b) { swap(a.tableStats, b.tableStats); } -TableStatsResult::TableStatsResult(const TableStatsResult& other369) { - tableStats = other369.tableStats; +TableStatsResult::TableStatsResult(const TableStatsResult& other399) { + tableStats = other399.tableStats; } -TableStatsResult& TableStatsResult::operator=(const TableStatsResult& other370) { - tableStats = other370.tableStats; +TableStatsResult& TableStatsResult::operator=(const TableStatsResult& other400) { + tableStats = other400.tableStats; return *this; } void TableStatsResult::printTo(std::ostream& out) const { @@ -8399,26 +9137,26 @@ uint32_t PartitionsStatsResult::read(::apache::thrift::protocol::TProtocol* ipro if (ftype == ::apache::thrift::protocol::T_MAP) { { this->partStats.clear(); - uint32_t _size371; - ::apache::thrift::protocol::TType _ktype372; - ::apache::thrift::protocol::TType _vtype373; - xfer += iprot->readMapBegin(_ktype372, _vtype373, _size371); - uint32_t _i375; - for (_i375 = 0; _i375 < _size371; ++_i375) + uint32_t _size401; + ::apache::thrift::protocol::TType _ktype402; + ::apache::thrift::protocol::TType _vtype403; + xfer += iprot->readMapBegin(_ktype402, _vtype403, _size401); + uint32_t _i405; + for (_i405 = 0; _i405 < _size401; ++_i405) { - std::string _key376; - xfer += iprot->readString(_key376); - std::vector & _val377 = this->partStats[_key376]; + std::string _key406; + xfer += iprot->readString(_key406); + std::vector & _val407 = this->partStats[_key406]; { - _val377.clear(); - uint32_t _size378; - ::apache::thrift::protocol::TType _etype381; - xfer += iprot->readListBegin(_etype381, _size378); - _val377.resize(_size378); - uint32_t _i382; - for (_i382 = 0; _i382 < _size378; ++_i382) + _val407.clear(); + uint32_t _size408; + ::apache::thrift::protocol::TType _etype411; + xfer += iprot->readListBegin(_etype411, _size408); + _val407.resize(_size408); + uint32_t _i412; + for (_i412 = 0; _i412 < _size408; ++_i412) { - xfer += _val377[_i382].read(iprot); + xfer += _val407[_i412].read(iprot); } xfer += iprot->readListEnd(); } @@ -8452,16 +9190,16 @@ uint32_t PartitionsStatsResult::write(::apache::thrift::protocol::TProtocol* opr xfer += oprot->writeFieldBegin("partStats", ::apache::thrift::protocol::T_MAP, 1); { xfer += oprot->writeMapBegin(::apache::thrift::protocol::T_STRING, ::apache::thrift::protocol::T_LIST, static_cast(this->partStats.size())); - std::map > ::const_iterator _iter383; - for (_iter383 = this->partStats.begin(); _iter383 != this->partStats.end(); ++_iter383) + std::map > ::const_iterator _iter413; + for (_iter413 = this->partStats.begin(); _iter413 != this->partStats.end(); ++_iter413) { - xfer += oprot->writeString(_iter383->first); + xfer += oprot->writeString(_iter413->first); { - xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(_iter383->second.size())); - std::vector ::const_iterator _iter384; - for (_iter384 = _iter383->second.begin(); _iter384 != _iter383->second.end(); ++_iter384) + xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(_iter413->second.size())); + std::vector ::const_iterator _iter414; + for (_iter414 = _iter413->second.begin(); _iter414 != _iter413->second.end(); ++_iter414) { - xfer += (*_iter384).write(oprot); + xfer += (*_iter414).write(oprot); } xfer += oprot->writeListEnd(); } @@ -8480,11 +9218,11 @@ void swap(PartitionsStatsResult &a, PartitionsStatsResult &b) { swap(a.partStats, b.partStats); } -PartitionsStatsResult::PartitionsStatsResult(const PartitionsStatsResult& other385) { - partStats = other385.partStats; +PartitionsStatsResult::PartitionsStatsResult(const PartitionsStatsResult& other415) { + partStats = other415.partStats; } -PartitionsStatsResult& PartitionsStatsResult::operator=(const PartitionsStatsResult& other386) { - partStats = other386.partStats; +PartitionsStatsResult& PartitionsStatsResult::operator=(const PartitionsStatsResult& other416) { + partStats = other416.partStats; return *this; } void PartitionsStatsResult::printTo(std::ostream& out) const { @@ -8555,14 +9293,14 @@ uint32_t TableStatsRequest::read(::apache::thrift::protocol::TProtocol* iprot) { if (ftype == ::apache::thrift::protocol::T_LIST) { { this->colNames.clear(); - uint32_t _size387; - ::apache::thrift::protocol::TType _etype390; - xfer += iprot->readListBegin(_etype390, _size387); - this->colNames.resize(_size387); - uint32_t _i391; - for (_i391 = 0; _i391 < _size387; ++_i391) + uint32_t _size417; + ::apache::thrift::protocol::TType _etype420; + xfer += iprot->readListBegin(_etype420, _size417); + this->colNames.resize(_size417); + uint32_t _i421; + for (_i421 = 0; _i421 < _size417; ++_i421) { - xfer += iprot->readString(this->colNames[_i391]); + xfer += iprot->readString(this->colNames[_i421]); } xfer += iprot->readListEnd(); } @@ -8605,10 +9343,10 @@ uint32_t TableStatsRequest::write(::apache::thrift::protocol::TProtocol* oprot) xfer += oprot->writeFieldBegin("colNames", ::apache::thrift::protocol::T_LIST, 3); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->colNames.size())); - std::vector ::const_iterator _iter392; - for (_iter392 = this->colNames.begin(); _iter392 != this->colNames.end(); ++_iter392) + std::vector ::const_iterator _iter422; + for (_iter422 = this->colNames.begin(); _iter422 != this->colNames.end(); ++_iter422) { - xfer += oprot->writeString((*_iter392)); + xfer += oprot->writeString((*_iter422)); } xfer += oprot->writeListEnd(); } @@ -8626,15 +9364,15 @@ void swap(TableStatsRequest &a, TableStatsRequest &b) { swap(a.colNames, b.colNames); } -TableStatsRequest::TableStatsRequest(const TableStatsRequest& other393) { - dbName = other393.dbName; - tblName = other393.tblName; - colNames = other393.colNames; +TableStatsRequest::TableStatsRequest(const TableStatsRequest& other423) { + dbName = other423.dbName; + tblName = other423.tblName; + colNames = other423.colNames; } -TableStatsRequest& TableStatsRequest::operator=(const TableStatsRequest& other394) { - dbName = other394.dbName; - tblName = other394.tblName; - colNames = other394.colNames; +TableStatsRequest& TableStatsRequest::operator=(const TableStatsRequest& other424) { + dbName = other424.dbName; + tblName = other424.tblName; + colNames = other424.colNames; return *this; } void TableStatsRequest::printTo(std::ostream& out) const { @@ -8712,14 +9450,14 @@ uint32_t PartitionsStatsRequest::read(::apache::thrift::protocol::TProtocol* ipr if (ftype == ::apache::thrift::protocol::T_LIST) { { this->colNames.clear(); - uint32_t _size395; - ::apache::thrift::protocol::TType _etype398; - xfer += iprot->readListBegin(_etype398, _size395); - this->colNames.resize(_size395); - uint32_t _i399; - for (_i399 = 0; _i399 < _size395; ++_i399) + uint32_t _size425; + ::apache::thrift::protocol::TType _etype428; + xfer += iprot->readListBegin(_etype428, _size425); + this->colNames.resize(_size425); + uint32_t _i429; + for (_i429 = 0; _i429 < _size425; ++_i429) { - xfer += iprot->readString(this->colNames[_i399]); + xfer += iprot->readString(this->colNames[_i429]); } xfer += iprot->readListEnd(); } @@ -8732,14 +9470,14 @@ uint32_t PartitionsStatsRequest::read(::apache::thrift::protocol::TProtocol* ipr if (ftype == ::apache::thrift::protocol::T_LIST) { { this->partNames.clear(); - uint32_t _size400; - ::apache::thrift::protocol::TType _etype403; - xfer += iprot->readListBegin(_etype403, _size400); - this->partNames.resize(_size400); - uint32_t _i404; - for (_i404 = 0; _i404 < _size400; ++_i404) + uint32_t _size430; + ::apache::thrift::protocol::TType _etype433; + xfer += iprot->readListBegin(_etype433, _size430); + this->partNames.resize(_size430); + uint32_t _i434; + for (_i434 = 0; _i434 < _size430; ++_i434) { - xfer += iprot->readString(this->partNames[_i404]); + xfer += iprot->readString(this->partNames[_i434]); } xfer += iprot->readListEnd(); } @@ -8784,10 +9522,10 @@ uint32_t PartitionsStatsRequest::write(::apache::thrift::protocol::TProtocol* op xfer += oprot->writeFieldBegin("colNames", ::apache::thrift::protocol::T_LIST, 3); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->colNames.size())); - std::vector ::const_iterator _iter405; - for (_iter405 = this->colNames.begin(); _iter405 != this->colNames.end(); ++_iter405) + std::vector ::const_iterator _iter435; + for (_iter435 = this->colNames.begin(); _iter435 != this->colNames.end(); ++_iter435) { - xfer += oprot->writeString((*_iter405)); + xfer += oprot->writeString((*_iter435)); } xfer += oprot->writeListEnd(); } @@ -8796,10 +9534,10 @@ uint32_t PartitionsStatsRequest::write(::apache::thrift::protocol::TProtocol* op xfer += oprot->writeFieldBegin("partNames", ::apache::thrift::protocol::T_LIST, 4); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->partNames.size())); - std::vector ::const_iterator _iter406; - for (_iter406 = this->partNames.begin(); _iter406 != this->partNames.end(); ++_iter406) + std::vector ::const_iterator _iter436; + for (_iter436 = this->partNames.begin(); _iter436 != this->partNames.end(); ++_iter436) { - xfer += oprot->writeString((*_iter406)); + xfer += oprot->writeString((*_iter436)); } xfer += oprot->writeListEnd(); } @@ -8818,17 +9556,17 @@ void swap(PartitionsStatsRequest &a, PartitionsStatsRequest &b) { swap(a.partNames, b.partNames); } -PartitionsStatsRequest::PartitionsStatsRequest(const PartitionsStatsRequest& other407) { - dbName = other407.dbName; - tblName = other407.tblName; - colNames = other407.colNames; - partNames = other407.partNames; +PartitionsStatsRequest::PartitionsStatsRequest(const PartitionsStatsRequest& other437) { + dbName = other437.dbName; + tblName = other437.tblName; + colNames = other437.colNames; + partNames = other437.partNames; } -PartitionsStatsRequest& PartitionsStatsRequest::operator=(const PartitionsStatsRequest& other408) { - dbName = other408.dbName; - tblName = other408.tblName; - colNames = other408.colNames; - partNames = other408.partNames; +PartitionsStatsRequest& PartitionsStatsRequest::operator=(const PartitionsStatsRequest& other438) { + dbName = other438.dbName; + tblName = other438.tblName; + colNames = other438.colNames; + partNames = other438.partNames; return *this; } void PartitionsStatsRequest::printTo(std::ostream& out) const { @@ -8876,14 +9614,14 @@ uint32_t AddPartitionsResult::read(::apache::thrift::protocol::TProtocol* iprot) if (ftype == ::apache::thrift::protocol::T_LIST) { { this->partitions.clear(); - uint32_t _size409; - ::apache::thrift::protocol::TType _etype412; - xfer += iprot->readListBegin(_etype412, _size409); - this->partitions.resize(_size409); - uint32_t _i413; - for (_i413 = 0; _i413 < _size409; ++_i413) + uint32_t _size439; + ::apache::thrift::protocol::TType _etype442; + xfer += iprot->readListBegin(_etype442, _size439); + this->partitions.resize(_size439); + uint32_t _i443; + for (_i443 = 0; _i443 < _size439; ++_i443) { - xfer += this->partitions[_i413].read(iprot); + xfer += this->partitions[_i443].read(iprot); } xfer += iprot->readListEnd(); } @@ -8913,10 +9651,10 @@ uint32_t AddPartitionsResult::write(::apache::thrift::protocol::TProtocol* oprot xfer += oprot->writeFieldBegin("partitions", ::apache::thrift::protocol::T_LIST, 1); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->partitions.size())); - std::vector ::const_iterator _iter414; - for (_iter414 = this->partitions.begin(); _iter414 != this->partitions.end(); ++_iter414) + std::vector ::const_iterator _iter444; + for (_iter444 = this->partitions.begin(); _iter444 != this->partitions.end(); ++_iter444) { - xfer += (*_iter414).write(oprot); + xfer += (*_iter444).write(oprot); } xfer += oprot->writeListEnd(); } @@ -8933,13 +9671,13 @@ void swap(AddPartitionsResult &a, AddPartitionsResult &b) { swap(a.__isset, b.__isset); } -AddPartitionsResult::AddPartitionsResult(const AddPartitionsResult& other415) { - partitions = other415.partitions; - __isset = other415.__isset; +AddPartitionsResult::AddPartitionsResult(const AddPartitionsResult& other445) { + partitions = other445.partitions; + __isset = other445.__isset; } -AddPartitionsResult& AddPartitionsResult::operator=(const AddPartitionsResult& other416) { - partitions = other416.partitions; - __isset = other416.__isset; +AddPartitionsResult& AddPartitionsResult::operator=(const AddPartitionsResult& other446) { + partitions = other446.partitions; + __isset = other446.__isset; return *this; } void AddPartitionsResult::printTo(std::ostream& out) const { @@ -9020,14 +9758,14 @@ uint32_t AddPartitionsRequest::read(::apache::thrift::protocol::TProtocol* iprot if (ftype == ::apache::thrift::protocol::T_LIST) { { this->parts.clear(); - uint32_t _size417; - ::apache::thrift::protocol::TType _etype420; - xfer += iprot->readListBegin(_etype420, _size417); - this->parts.resize(_size417); - uint32_t _i421; - for (_i421 = 0; _i421 < _size417; ++_i421) + uint32_t _size447; + ::apache::thrift::protocol::TType _etype450; + xfer += iprot->readListBegin(_etype450, _size447); + this->parts.resize(_size447); + uint32_t _i451; + for (_i451 = 0; _i451 < _size447; ++_i451) { - xfer += this->parts[_i421].read(iprot); + xfer += this->parts[_i451].read(iprot); } xfer += iprot->readListEnd(); } @@ -9088,10 +9826,10 @@ uint32_t AddPartitionsRequest::write(::apache::thrift::protocol::TProtocol* opro xfer += oprot->writeFieldBegin("parts", ::apache::thrift::protocol::T_LIST, 3); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->parts.size())); - std::vector ::const_iterator _iter422; - for (_iter422 = this->parts.begin(); _iter422 != this->parts.end(); ++_iter422) + std::vector ::const_iterator _iter452; + for (_iter452 = this->parts.begin(); _iter452 != this->parts.end(); ++_iter452) { - xfer += (*_iter422).write(oprot); + xfer += (*_iter452).write(oprot); } xfer += oprot->writeListEnd(); } @@ -9121,21 +9859,21 @@ void swap(AddPartitionsRequest &a, AddPartitionsRequest &b) { swap(a.__isset, b.__isset); } -AddPartitionsRequest::AddPartitionsRequest(const AddPartitionsRequest& other423) { - dbName = other423.dbName; - tblName = other423.tblName; - parts = other423.parts; - ifNotExists = other423.ifNotExists; - needResult = other423.needResult; - __isset = other423.__isset; +AddPartitionsRequest::AddPartitionsRequest(const AddPartitionsRequest& other453) { + dbName = other453.dbName; + tblName = other453.tblName; + parts = other453.parts; + ifNotExists = other453.ifNotExists; + needResult = other453.needResult; + __isset = other453.__isset; } -AddPartitionsRequest& AddPartitionsRequest::operator=(const AddPartitionsRequest& other424) { - dbName = other424.dbName; - tblName = other424.tblName; - parts = other424.parts; - ifNotExists = other424.ifNotExists; - needResult = other424.needResult; - __isset = other424.__isset; +AddPartitionsRequest& AddPartitionsRequest::operator=(const AddPartitionsRequest& other454) { + dbName = other454.dbName; + tblName = other454.tblName; + parts = other454.parts; + ifNotExists = other454.ifNotExists; + needResult = other454.needResult; + __isset = other454.__isset; return *this; } void AddPartitionsRequest::printTo(std::ostream& out) const { @@ -9184,14 +9922,14 @@ uint32_t DropPartitionsResult::read(::apache::thrift::protocol::TProtocol* iprot if (ftype == ::apache::thrift::protocol::T_LIST) { { this->partitions.clear(); - uint32_t _size425; - ::apache::thrift::protocol::TType _etype428; - xfer += iprot->readListBegin(_etype428, _size425); - this->partitions.resize(_size425); - uint32_t _i429; - for (_i429 = 0; _i429 < _size425; ++_i429) + uint32_t _size455; + ::apache::thrift::protocol::TType _etype458; + xfer += iprot->readListBegin(_etype458, _size455); + this->partitions.resize(_size455); + uint32_t _i459; + for (_i459 = 0; _i459 < _size455; ++_i459) { - xfer += this->partitions[_i429].read(iprot); + xfer += this->partitions[_i459].read(iprot); } xfer += iprot->readListEnd(); } @@ -9221,10 +9959,10 @@ uint32_t DropPartitionsResult::write(::apache::thrift::protocol::TProtocol* opro xfer += oprot->writeFieldBegin("partitions", ::apache::thrift::protocol::T_LIST, 1); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->partitions.size())); - std::vector ::const_iterator _iter430; - for (_iter430 = this->partitions.begin(); _iter430 != this->partitions.end(); ++_iter430) + std::vector ::const_iterator _iter460; + for (_iter460 = this->partitions.begin(); _iter460 != this->partitions.end(); ++_iter460) { - xfer += (*_iter430).write(oprot); + xfer += (*_iter460).write(oprot); } xfer += oprot->writeListEnd(); } @@ -9241,13 +9979,13 @@ void swap(DropPartitionsResult &a, DropPartitionsResult &b) { swap(a.__isset, b.__isset); } -DropPartitionsResult::DropPartitionsResult(const DropPartitionsResult& other431) { - partitions = other431.partitions; - __isset = other431.__isset; +DropPartitionsResult::DropPartitionsResult(const DropPartitionsResult& other461) { + partitions = other461.partitions; + __isset = other461.__isset; } -DropPartitionsResult& DropPartitionsResult::operator=(const DropPartitionsResult& other432) { - partitions = other432.partitions; - __isset = other432.__isset; +DropPartitionsResult& DropPartitionsResult::operator=(const DropPartitionsResult& other462) { + partitions = other462.partitions; + __isset = other462.__isset; return *this; } void DropPartitionsResult::printTo(std::ostream& out) const { @@ -9349,15 +10087,15 @@ void swap(DropPartitionsExpr &a, DropPartitionsExpr &b) { swap(a.__isset, b.__isset); } -DropPartitionsExpr::DropPartitionsExpr(const DropPartitionsExpr& other433) { - expr = other433.expr; - partArchiveLevel = other433.partArchiveLevel; - __isset = other433.__isset; +DropPartitionsExpr::DropPartitionsExpr(const DropPartitionsExpr& other463) { + expr = other463.expr; + partArchiveLevel = other463.partArchiveLevel; + __isset = other463.__isset; } -DropPartitionsExpr& DropPartitionsExpr::operator=(const DropPartitionsExpr& other434) { - expr = other434.expr; - partArchiveLevel = other434.partArchiveLevel; - __isset = other434.__isset; +DropPartitionsExpr& DropPartitionsExpr::operator=(const DropPartitionsExpr& other464) { + expr = other464.expr; + partArchiveLevel = other464.partArchiveLevel; + __isset = other464.__isset; return *this; } void DropPartitionsExpr::printTo(std::ostream& out) const { @@ -9406,14 +10144,14 @@ uint32_t RequestPartsSpec::read(::apache::thrift::protocol::TProtocol* iprot) { if (ftype == ::apache::thrift::protocol::T_LIST) { { this->names.clear(); - uint32_t _size435; - ::apache::thrift::protocol::TType _etype438; - xfer += iprot->readListBegin(_etype438, _size435); - this->names.resize(_size435); - uint32_t _i439; - for (_i439 = 0; _i439 < _size435; ++_i439) + uint32_t _size465; + ::apache::thrift::protocol::TType _etype468; + xfer += iprot->readListBegin(_etype468, _size465); + this->names.resize(_size465); + uint32_t _i469; + for (_i469 = 0; _i469 < _size465; ++_i469) { - xfer += iprot->readString(this->names[_i439]); + xfer += iprot->readString(this->names[_i469]); } xfer += iprot->readListEnd(); } @@ -9426,14 +10164,14 @@ uint32_t RequestPartsSpec::read(::apache::thrift::protocol::TProtocol* iprot) { if (ftype == ::apache::thrift::protocol::T_LIST) { { this->exprs.clear(); - uint32_t _size440; - ::apache::thrift::protocol::TType _etype443; - xfer += iprot->readListBegin(_etype443, _size440); - this->exprs.resize(_size440); - uint32_t _i444; - for (_i444 = 0; _i444 < _size440; ++_i444) + uint32_t _size470; + ::apache::thrift::protocol::TType _etype473; + xfer += iprot->readListBegin(_etype473, _size470); + this->exprs.resize(_size470); + uint32_t _i474; + for (_i474 = 0; _i474 < _size470; ++_i474) { - xfer += this->exprs[_i444].read(iprot); + xfer += this->exprs[_i474].read(iprot); } xfer += iprot->readListEnd(); } @@ -9462,10 +10200,10 @@ uint32_t RequestPartsSpec::write(::apache::thrift::protocol::TProtocol* oprot) c xfer += oprot->writeFieldBegin("names", ::apache::thrift::protocol::T_LIST, 1); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->names.size())); - std::vector ::const_iterator _iter445; - for (_iter445 = this->names.begin(); _iter445 != this->names.end(); ++_iter445) + std::vector ::const_iterator _iter475; + for (_iter475 = this->names.begin(); _iter475 != this->names.end(); ++_iter475) { - xfer += oprot->writeString((*_iter445)); + xfer += oprot->writeString((*_iter475)); } xfer += oprot->writeListEnd(); } @@ -9474,10 +10212,10 @@ uint32_t RequestPartsSpec::write(::apache::thrift::protocol::TProtocol* oprot) c xfer += oprot->writeFieldBegin("exprs", ::apache::thrift::protocol::T_LIST, 2); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->exprs.size())); - std::vector ::const_iterator _iter446; - for (_iter446 = this->exprs.begin(); _iter446 != this->exprs.end(); ++_iter446) + std::vector ::const_iterator _iter476; + for (_iter476 = this->exprs.begin(); _iter476 != this->exprs.end(); ++_iter476) { - xfer += (*_iter446).write(oprot); + xfer += (*_iter476).write(oprot); } xfer += oprot->writeListEnd(); } @@ -9495,15 +10233,15 @@ void swap(RequestPartsSpec &a, RequestPartsSpec &b) { swap(a.__isset, b.__isset); } -RequestPartsSpec::RequestPartsSpec(const RequestPartsSpec& other447) { - names = other447.names; - exprs = other447.exprs; - __isset = other447.__isset; +RequestPartsSpec::RequestPartsSpec(const RequestPartsSpec& other477) { + names = other477.names; + exprs = other477.exprs; + __isset = other477.__isset; } -RequestPartsSpec& RequestPartsSpec::operator=(const RequestPartsSpec& other448) { - names = other448.names; - exprs = other448.exprs; - __isset = other448.__isset; +RequestPartsSpec& RequestPartsSpec::operator=(const RequestPartsSpec& other478) { + names = other478.names; + exprs = other478.exprs; + __isset = other478.__isset; return *this; } void RequestPartsSpec::printTo(std::ostream& out) const { @@ -9722,27 +10460,27 @@ void swap(DropPartitionsRequest &a, DropPartitionsRequest &b) { swap(a.__isset, b.__isset); } -DropPartitionsRequest::DropPartitionsRequest(const DropPartitionsRequest& other449) { - dbName = other449.dbName; - tblName = other449.tblName; - parts = other449.parts; - deleteData = other449.deleteData; - ifExists = other449.ifExists; - ignoreProtection = other449.ignoreProtection; - environmentContext = other449.environmentContext; - needResult = other449.needResult; - __isset = other449.__isset; -} -DropPartitionsRequest& DropPartitionsRequest::operator=(const DropPartitionsRequest& other450) { - dbName = other450.dbName; - tblName = other450.tblName; - parts = other450.parts; - deleteData = other450.deleteData; - ifExists = other450.ifExists; - ignoreProtection = other450.ignoreProtection; - environmentContext = other450.environmentContext; - needResult = other450.needResult; - __isset = other450.__isset; +DropPartitionsRequest::DropPartitionsRequest(const DropPartitionsRequest& other479) { + dbName = other479.dbName; + tblName = other479.tblName; + parts = other479.parts; + deleteData = other479.deleteData; + ifExists = other479.ifExists; + ignoreProtection = other479.ignoreProtection; + environmentContext = other479.environmentContext; + needResult = other479.needResult; + __isset = other479.__isset; +} +DropPartitionsRequest& DropPartitionsRequest::operator=(const DropPartitionsRequest& other480) { + dbName = other480.dbName; + tblName = other480.tblName; + parts = other480.parts; + deleteData = other480.deleteData; + ifExists = other480.ifExists; + ignoreProtection = other480.ignoreProtection; + environmentContext = other480.environmentContext; + needResult = other480.needResult; + __isset = other480.__isset; return *this; } void DropPartitionsRequest::printTo(std::ostream& out) const { @@ -9795,9 +10533,9 @@ uint32_t ResourceUri::read(::apache::thrift::protocol::TProtocol* iprot) { { case 1: if (ftype == ::apache::thrift::protocol::T_I32) { - int32_t ecast451; - xfer += iprot->readI32(ecast451); - this->resourceType = (ResourceType::type)ecast451; + int32_t ecast481; + xfer += iprot->readI32(ecast481); + this->resourceType = (ResourceType::type)ecast481; this->__isset.resourceType = true; } else { xfer += iprot->skip(ftype); @@ -9848,15 +10586,15 @@ void swap(ResourceUri &a, ResourceUri &b) { swap(a.__isset, b.__isset); } -ResourceUri::ResourceUri(const ResourceUri& other452) { - resourceType = other452.resourceType; - uri = other452.uri; - __isset = other452.__isset; +ResourceUri::ResourceUri(const ResourceUri& other482) { + resourceType = other482.resourceType; + uri = other482.uri; + __isset = other482.__isset; } -ResourceUri& ResourceUri::operator=(const ResourceUri& other453) { - resourceType = other453.resourceType; - uri = other453.uri; - __isset = other453.__isset; +ResourceUri& ResourceUri::operator=(const ResourceUri& other483) { + resourceType = other483.resourceType; + uri = other483.uri; + __isset = other483.__isset; return *this; } void ResourceUri::printTo(std::ostream& out) const { @@ -9959,9 +10697,9 @@ uint32_t Function::read(::apache::thrift::protocol::TProtocol* iprot) { break; case 5: if (ftype == ::apache::thrift::protocol::T_I32) { - int32_t ecast454; - xfer += iprot->readI32(ecast454); - this->ownerType = (PrincipalType::type)ecast454; + int32_t ecast484; + xfer += iprot->readI32(ecast484); + this->ownerType = (PrincipalType::type)ecast484; this->__isset.ownerType = true; } else { xfer += iprot->skip(ftype); @@ -9977,9 +10715,9 @@ uint32_t Function::read(::apache::thrift::protocol::TProtocol* iprot) { break; case 7: if (ftype == ::apache::thrift::protocol::T_I32) { - int32_t ecast455; - xfer += iprot->readI32(ecast455); - this->functionType = (FunctionType::type)ecast455; + int32_t ecast485; + xfer += iprot->readI32(ecast485); + this->functionType = (FunctionType::type)ecast485; this->__isset.functionType = true; } else { xfer += iprot->skip(ftype); @@ -9989,14 +10727,14 @@ uint32_t Function::read(::apache::thrift::protocol::TProtocol* iprot) { if (ftype == ::apache::thrift::protocol::T_LIST) { { this->resourceUris.clear(); - uint32_t _size456; - ::apache::thrift::protocol::TType _etype459; - xfer += iprot->readListBegin(_etype459, _size456); - this->resourceUris.resize(_size456); - uint32_t _i460; - for (_i460 = 0; _i460 < _size456; ++_i460) + uint32_t _size486; + ::apache::thrift::protocol::TType _etype489; + xfer += iprot->readListBegin(_etype489, _size486); + this->resourceUris.resize(_size486); + uint32_t _i490; + for (_i490 = 0; _i490 < _size486; ++_i490) { - xfer += this->resourceUris[_i460].read(iprot); + xfer += this->resourceUris[_i490].read(iprot); } xfer += iprot->readListEnd(); } @@ -10053,10 +10791,10 @@ uint32_t Function::write(::apache::thrift::protocol::TProtocol* oprot) const { xfer += oprot->writeFieldBegin("resourceUris", ::apache::thrift::protocol::T_LIST, 8); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->resourceUris.size())); - std::vector ::const_iterator _iter461; - for (_iter461 = this->resourceUris.begin(); _iter461 != this->resourceUris.end(); ++_iter461) + std::vector ::const_iterator _iter491; + for (_iter491 = this->resourceUris.begin(); _iter491 != this->resourceUris.end(); ++_iter491) { - xfer += (*_iter461).write(oprot); + xfer += (*_iter491).write(oprot); } xfer += oprot->writeListEnd(); } @@ -10080,27 +10818,27 @@ void swap(Function &a, Function &b) { swap(a.__isset, b.__isset); } -Function::Function(const Function& other462) { - functionName = other462.functionName; - dbName = other462.dbName; - className = other462.className; - ownerName = other462.ownerName; - ownerType = other462.ownerType; - createTime = other462.createTime; - functionType = other462.functionType; - resourceUris = other462.resourceUris; - __isset = other462.__isset; -} -Function& Function::operator=(const Function& other463) { - functionName = other463.functionName; - dbName = other463.dbName; - className = other463.className; - ownerName = other463.ownerName; - ownerType = other463.ownerType; - createTime = other463.createTime; - functionType = other463.functionType; - resourceUris = other463.resourceUris; - __isset = other463.__isset; +Function::Function(const Function& other492) { + functionName = other492.functionName; + dbName = other492.dbName; + className = other492.className; + ownerName = other492.ownerName; + ownerType = other492.ownerType; + createTime = other492.createTime; + functionType = other492.functionType; + resourceUris = other492.resourceUris; + __isset = other492.__isset; +} +Function& Function::operator=(const Function& other493) { + functionName = other493.functionName; + dbName = other493.dbName; + className = other493.className; + ownerName = other493.ownerName; + ownerType = other493.ownerType; + createTime = other493.createTime; + functionType = other493.functionType; + resourceUris = other493.resourceUris; + __isset = other493.__isset; return *this; } void Function::printTo(std::ostream& out) const { @@ -10188,9 +10926,9 @@ uint32_t TxnInfo::read(::apache::thrift::protocol::TProtocol* iprot) { break; case 2: if (ftype == ::apache::thrift::protocol::T_I32) { - int32_t ecast464; - xfer += iprot->readI32(ecast464); - this->state = (TxnState::type)ecast464; + int32_t ecast494; + xfer += iprot->readI32(ecast494); + this->state = (TxnState::type)ecast494; isset_state = true; } else { xfer += iprot->skip(ftype); @@ -10309,25 +11047,25 @@ void swap(TxnInfo &a, TxnInfo &b) { swap(a.__isset, b.__isset); } -TxnInfo::TxnInfo(const TxnInfo& other465) { - id = other465.id; - state = other465.state; - user = other465.user; - hostname = other465.hostname; - agentInfo = other465.agentInfo; - heartbeatCount = other465.heartbeatCount; - metaInfo = other465.metaInfo; - __isset = other465.__isset; -} -TxnInfo& TxnInfo::operator=(const TxnInfo& other466) { - id = other466.id; - state = other466.state; - user = other466.user; - hostname = other466.hostname; - agentInfo = other466.agentInfo; - heartbeatCount = other466.heartbeatCount; - metaInfo = other466.metaInfo; - __isset = other466.__isset; +TxnInfo::TxnInfo(const TxnInfo& other495) { + id = other495.id; + state = other495.state; + user = other495.user; + hostname = other495.hostname; + agentInfo = other495.agentInfo; + heartbeatCount = other495.heartbeatCount; + metaInfo = other495.metaInfo; + __isset = other495.__isset; +} +TxnInfo& TxnInfo::operator=(const TxnInfo& other496) { + id = other496.id; + state = other496.state; + user = other496.user; + hostname = other496.hostname; + agentInfo = other496.agentInfo; + heartbeatCount = other496.heartbeatCount; + metaInfo = other496.metaInfo; + __isset = other496.__isset; return *this; } void TxnInfo::printTo(std::ostream& out) const { @@ -10391,14 +11129,14 @@ uint32_t GetOpenTxnsInfoResponse::read(::apache::thrift::protocol::TProtocol* ip if (ftype == ::apache::thrift::protocol::T_LIST) { { this->open_txns.clear(); - uint32_t _size467; - ::apache::thrift::protocol::TType _etype470; - xfer += iprot->readListBegin(_etype470, _size467); - this->open_txns.resize(_size467); - uint32_t _i471; - for (_i471 = 0; _i471 < _size467; ++_i471) + uint32_t _size497; + ::apache::thrift::protocol::TType _etype500; + xfer += iprot->readListBegin(_etype500, _size497); + this->open_txns.resize(_size497); + uint32_t _i501; + for (_i501 = 0; _i501 < _size497; ++_i501) { - xfer += this->open_txns[_i471].read(iprot); + xfer += this->open_txns[_i501].read(iprot); } xfer += iprot->readListEnd(); } @@ -10435,10 +11173,10 @@ uint32_t GetOpenTxnsInfoResponse::write(::apache::thrift::protocol::TProtocol* o xfer += oprot->writeFieldBegin("open_txns", ::apache::thrift::protocol::T_LIST, 2); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->open_txns.size())); - std::vector ::const_iterator _iter472; - for (_iter472 = this->open_txns.begin(); _iter472 != this->open_txns.end(); ++_iter472) + std::vector ::const_iterator _iter502; + for (_iter502 = this->open_txns.begin(); _iter502 != this->open_txns.end(); ++_iter502) { - xfer += (*_iter472).write(oprot); + xfer += (*_iter502).write(oprot); } xfer += oprot->writeListEnd(); } @@ -10455,13 +11193,13 @@ void swap(GetOpenTxnsInfoResponse &a, GetOpenTxnsInfoResponse &b) { swap(a.open_txns, b.open_txns); } -GetOpenTxnsInfoResponse::GetOpenTxnsInfoResponse(const GetOpenTxnsInfoResponse& other473) { - txn_high_water_mark = other473.txn_high_water_mark; - open_txns = other473.open_txns; +GetOpenTxnsInfoResponse::GetOpenTxnsInfoResponse(const GetOpenTxnsInfoResponse& other503) { + txn_high_water_mark = other503.txn_high_water_mark; + open_txns = other503.open_txns; } -GetOpenTxnsInfoResponse& GetOpenTxnsInfoResponse::operator=(const GetOpenTxnsInfoResponse& other474) { - txn_high_water_mark = other474.txn_high_water_mark; - open_txns = other474.open_txns; +GetOpenTxnsInfoResponse& GetOpenTxnsInfoResponse::operator=(const GetOpenTxnsInfoResponse& other504) { + txn_high_water_mark = other504.txn_high_water_mark; + open_txns = other504.open_txns; return *this; } void GetOpenTxnsInfoResponse::printTo(std::ostream& out) const { @@ -10520,15 +11258,15 @@ uint32_t GetOpenTxnsResponse::read(::apache::thrift::protocol::TProtocol* iprot) if (ftype == ::apache::thrift::protocol::T_SET) { { this->open_txns.clear(); - uint32_t _size475; - ::apache::thrift::protocol::TType _etype478; - xfer += iprot->readSetBegin(_etype478, _size475); - uint32_t _i479; - for (_i479 = 0; _i479 < _size475; ++_i479) + uint32_t _size505; + ::apache::thrift::protocol::TType _etype508; + xfer += iprot->readSetBegin(_etype508, _size505); + uint32_t _i509; + for (_i509 = 0; _i509 < _size505; ++_i509) { - int64_t _elem480; - xfer += iprot->readI64(_elem480); - this->open_txns.insert(_elem480); + int64_t _elem510; + xfer += iprot->readI64(_elem510); + this->open_txns.insert(_elem510); } xfer += iprot->readSetEnd(); } @@ -10565,10 +11303,10 @@ uint32_t GetOpenTxnsResponse::write(::apache::thrift::protocol::TProtocol* oprot xfer += oprot->writeFieldBegin("open_txns", ::apache::thrift::protocol::T_SET, 2); { xfer += oprot->writeSetBegin(::apache::thrift::protocol::T_I64, static_cast(this->open_txns.size())); - std::set ::const_iterator _iter481; - for (_iter481 = this->open_txns.begin(); _iter481 != this->open_txns.end(); ++_iter481) + std::set ::const_iterator _iter511; + for (_iter511 = this->open_txns.begin(); _iter511 != this->open_txns.end(); ++_iter511) { - xfer += oprot->writeI64((*_iter481)); + xfer += oprot->writeI64((*_iter511)); } xfer += oprot->writeSetEnd(); } @@ -10585,13 +11323,13 @@ void swap(GetOpenTxnsResponse &a, GetOpenTxnsResponse &b) { swap(a.open_txns, b.open_txns); } -GetOpenTxnsResponse::GetOpenTxnsResponse(const GetOpenTxnsResponse& other482) { - txn_high_water_mark = other482.txn_high_water_mark; - open_txns = other482.open_txns; +GetOpenTxnsResponse::GetOpenTxnsResponse(const GetOpenTxnsResponse& other512) { + txn_high_water_mark = other512.txn_high_water_mark; + open_txns = other512.open_txns; } -GetOpenTxnsResponse& GetOpenTxnsResponse::operator=(const GetOpenTxnsResponse& other483) { - txn_high_water_mark = other483.txn_high_water_mark; - open_txns = other483.open_txns; +GetOpenTxnsResponse& GetOpenTxnsResponse::operator=(const GetOpenTxnsResponse& other513) { + txn_high_water_mark = other513.txn_high_water_mark; + open_txns = other513.open_txns; return *this; } void GetOpenTxnsResponse::printTo(std::ostream& out) const { @@ -10734,19 +11472,19 @@ void swap(OpenTxnRequest &a, OpenTxnRequest &b) { swap(a.__isset, b.__isset); } -OpenTxnRequest::OpenTxnRequest(const OpenTxnRequest& other484) { - num_txns = other484.num_txns; - user = other484.user; - hostname = other484.hostname; - agentInfo = other484.agentInfo; - __isset = other484.__isset; +OpenTxnRequest::OpenTxnRequest(const OpenTxnRequest& other514) { + num_txns = other514.num_txns; + user = other514.user; + hostname = other514.hostname; + agentInfo = other514.agentInfo; + __isset = other514.__isset; } -OpenTxnRequest& OpenTxnRequest::operator=(const OpenTxnRequest& other485) { - num_txns = other485.num_txns; - user = other485.user; - hostname = other485.hostname; - agentInfo = other485.agentInfo; - __isset = other485.__isset; +OpenTxnRequest& OpenTxnRequest::operator=(const OpenTxnRequest& other515) { + num_txns = other515.num_txns; + user = other515.user; + hostname = other515.hostname; + agentInfo = other515.agentInfo; + __isset = other515.__isset; return *this; } void OpenTxnRequest::printTo(std::ostream& out) const { @@ -10794,14 +11532,14 @@ uint32_t OpenTxnsResponse::read(::apache::thrift::protocol::TProtocol* iprot) { if (ftype == ::apache::thrift::protocol::T_LIST) { { this->txn_ids.clear(); - uint32_t _size486; - ::apache::thrift::protocol::TType _etype489; - xfer += iprot->readListBegin(_etype489, _size486); - this->txn_ids.resize(_size486); - uint32_t _i490; - for (_i490 = 0; _i490 < _size486; ++_i490) + uint32_t _size516; + ::apache::thrift::protocol::TType _etype519; + xfer += iprot->readListBegin(_etype519, _size516); + this->txn_ids.resize(_size516); + uint32_t _i520; + for (_i520 = 0; _i520 < _size516; ++_i520) { - xfer += iprot->readI64(this->txn_ids[_i490]); + xfer += iprot->readI64(this->txn_ids[_i520]); } xfer += iprot->readListEnd(); } @@ -10832,10 +11570,10 @@ uint32_t OpenTxnsResponse::write(::apache::thrift::protocol::TProtocol* oprot) c xfer += oprot->writeFieldBegin("txn_ids", ::apache::thrift::protocol::T_LIST, 1); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_I64, static_cast(this->txn_ids.size())); - std::vector ::const_iterator _iter491; - for (_iter491 = this->txn_ids.begin(); _iter491 != this->txn_ids.end(); ++_iter491) + std::vector ::const_iterator _iter521; + for (_iter521 = this->txn_ids.begin(); _iter521 != this->txn_ids.end(); ++_iter521) { - xfer += oprot->writeI64((*_iter491)); + xfer += oprot->writeI64((*_iter521)); } xfer += oprot->writeListEnd(); } @@ -10851,11 +11589,11 @@ void swap(OpenTxnsResponse &a, OpenTxnsResponse &b) { swap(a.txn_ids, b.txn_ids); } -OpenTxnsResponse::OpenTxnsResponse(const OpenTxnsResponse& other492) { - txn_ids = other492.txn_ids; +OpenTxnsResponse::OpenTxnsResponse(const OpenTxnsResponse& other522) { + txn_ids = other522.txn_ids; } -OpenTxnsResponse& OpenTxnsResponse::operator=(const OpenTxnsResponse& other493) { - txn_ids = other493.txn_ids; +OpenTxnsResponse& OpenTxnsResponse::operator=(const OpenTxnsResponse& other523) { + txn_ids = other523.txn_ids; return *this; } void OpenTxnsResponse::printTo(std::ostream& out) const { @@ -10937,11 +11675,11 @@ void swap(AbortTxnRequest &a, AbortTxnRequest &b) { swap(a.txnid, b.txnid); } -AbortTxnRequest::AbortTxnRequest(const AbortTxnRequest& other494) { - txnid = other494.txnid; +AbortTxnRequest::AbortTxnRequest(const AbortTxnRequest& other524) { + txnid = other524.txnid; } -AbortTxnRequest& AbortTxnRequest::operator=(const AbortTxnRequest& other495) { - txnid = other495.txnid; +AbortTxnRequest& AbortTxnRequest::operator=(const AbortTxnRequest& other525) { + txnid = other525.txnid; return *this; } void AbortTxnRequest::printTo(std::ostream& out) const { @@ -11023,11 +11761,11 @@ void swap(CommitTxnRequest &a, CommitTxnRequest &b) { swap(a.txnid, b.txnid); } -CommitTxnRequest::CommitTxnRequest(const CommitTxnRequest& other496) { - txnid = other496.txnid; +CommitTxnRequest::CommitTxnRequest(const CommitTxnRequest& other526) { + txnid = other526.txnid; } -CommitTxnRequest& CommitTxnRequest::operator=(const CommitTxnRequest& other497) { - txnid = other497.txnid; +CommitTxnRequest& CommitTxnRequest::operator=(const CommitTxnRequest& other527) { + txnid = other527.txnid; return *this; } void CommitTxnRequest::printTo(std::ostream& out) const { @@ -11090,9 +11828,9 @@ uint32_t LockComponent::read(::apache::thrift::protocol::TProtocol* iprot) { { case 1: if (ftype == ::apache::thrift::protocol::T_I32) { - int32_t ecast498; - xfer += iprot->readI32(ecast498); - this->type = (LockType::type)ecast498; + int32_t ecast528; + xfer += iprot->readI32(ecast528); + this->type = (LockType::type)ecast528; isset_type = true; } else { xfer += iprot->skip(ftype); @@ -11100,9 +11838,9 @@ uint32_t LockComponent::read(::apache::thrift::protocol::TProtocol* iprot) { break; case 2: if (ftype == ::apache::thrift::protocol::T_I32) { - int32_t ecast499; - xfer += iprot->readI32(ecast499); - this->level = (LockLevel::type)ecast499; + int32_t ecast529; + xfer += iprot->readI32(ecast529); + this->level = (LockLevel::type)ecast529; isset_level = true; } else { xfer += iprot->skip(ftype); @@ -11192,21 +11930,21 @@ void swap(LockComponent &a, LockComponent &b) { swap(a.__isset, b.__isset); } -LockComponent::LockComponent(const LockComponent& other500) { - type = other500.type; - level = other500.level; - dbname = other500.dbname; - tablename = other500.tablename; - partitionname = other500.partitionname; - __isset = other500.__isset; -} -LockComponent& LockComponent::operator=(const LockComponent& other501) { - type = other501.type; - level = other501.level; - dbname = other501.dbname; - tablename = other501.tablename; - partitionname = other501.partitionname; - __isset = other501.__isset; +LockComponent::LockComponent(const LockComponent& other530) { + type = other530.type; + level = other530.level; + dbname = other530.dbname; + tablename = other530.tablename; + partitionname = other530.partitionname; + __isset = other530.__isset; +} +LockComponent& LockComponent::operator=(const LockComponent& other531) { + type = other531.type; + level = other531.level; + dbname = other531.dbname; + tablename = other531.tablename; + partitionname = other531.partitionname; + __isset = other531.__isset; return *this; } void LockComponent::printTo(std::ostream& out) const { @@ -11275,14 +12013,14 @@ uint32_t LockRequest::read(::apache::thrift::protocol::TProtocol* iprot) { if (ftype == ::apache::thrift::protocol::T_LIST) { { this->component.clear(); - uint32_t _size502; - ::apache::thrift::protocol::TType _etype505; - xfer += iprot->readListBegin(_etype505, _size502); - this->component.resize(_size502); - uint32_t _i506; - for (_i506 = 0; _i506 < _size502; ++_i506) + uint32_t _size532; + ::apache::thrift::protocol::TType _etype535; + xfer += iprot->readListBegin(_etype535, _size532); + this->component.resize(_size532); + uint32_t _i536; + for (_i536 = 0; _i536 < _size532; ++_i536) { - xfer += this->component[_i506].read(iprot); + xfer += this->component[_i536].read(iprot); } xfer += iprot->readListEnd(); } @@ -11349,10 +12087,10 @@ uint32_t LockRequest::write(::apache::thrift::protocol::TProtocol* oprot) const xfer += oprot->writeFieldBegin("component", ::apache::thrift::protocol::T_LIST, 1); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->component.size())); - std::vector ::const_iterator _iter507; - for (_iter507 = this->component.begin(); _iter507 != this->component.end(); ++_iter507) + std::vector ::const_iterator _iter537; + for (_iter537 = this->component.begin(); _iter537 != this->component.end(); ++_iter537) { - xfer += (*_iter507).write(oprot); + xfer += (*_iter537).write(oprot); } xfer += oprot->writeListEnd(); } @@ -11391,21 +12129,21 @@ void swap(LockRequest &a, LockRequest &b) { swap(a.__isset, b.__isset); } -LockRequest::LockRequest(const LockRequest& other508) { - component = other508.component; - txnid = other508.txnid; - user = other508.user; - hostname = other508.hostname; - agentInfo = other508.agentInfo; - __isset = other508.__isset; -} -LockRequest& LockRequest::operator=(const LockRequest& other509) { - component = other509.component; - txnid = other509.txnid; - user = other509.user; - hostname = other509.hostname; - agentInfo = other509.agentInfo; - __isset = other509.__isset; +LockRequest::LockRequest(const LockRequest& other538) { + component = other538.component; + txnid = other538.txnid; + user = other538.user; + hostname = other538.hostname; + agentInfo = other538.agentInfo; + __isset = other538.__isset; +} +LockRequest& LockRequest::operator=(const LockRequest& other539) { + component = other539.component; + txnid = other539.txnid; + user = other539.user; + hostname = other539.hostname; + agentInfo = other539.agentInfo; + __isset = other539.__isset; return *this; } void LockRequest::printTo(std::ostream& out) const { @@ -11465,9 +12203,9 @@ uint32_t LockResponse::read(::apache::thrift::protocol::TProtocol* iprot) { break; case 2: if (ftype == ::apache::thrift::protocol::T_I32) { - int32_t ecast510; - xfer += iprot->readI32(ecast510); - this->state = (LockState::type)ecast510; + int32_t ecast540; + xfer += iprot->readI32(ecast540); + this->state = (LockState::type)ecast540; isset_state = true; } else { xfer += iprot->skip(ftype); @@ -11513,13 +12251,13 @@ void swap(LockResponse &a, LockResponse &b) { swap(a.state, b.state); } -LockResponse::LockResponse(const LockResponse& other511) { - lockid = other511.lockid; - state = other511.state; +LockResponse::LockResponse(const LockResponse& other541) { + lockid = other541.lockid; + state = other541.state; } -LockResponse& LockResponse::operator=(const LockResponse& other512) { - lockid = other512.lockid; - state = other512.state; +LockResponse& LockResponse::operator=(const LockResponse& other542) { + lockid = other542.lockid; + state = other542.state; return *this; } void LockResponse::printTo(std::ostream& out) const { @@ -11641,17 +12379,17 @@ void swap(CheckLockRequest &a, CheckLockRequest &b) { swap(a.__isset, b.__isset); } -CheckLockRequest::CheckLockRequest(const CheckLockRequest& other513) { - lockid = other513.lockid; - txnid = other513.txnid; - elapsed_ms = other513.elapsed_ms; - __isset = other513.__isset; +CheckLockRequest::CheckLockRequest(const CheckLockRequest& other543) { + lockid = other543.lockid; + txnid = other543.txnid; + elapsed_ms = other543.elapsed_ms; + __isset = other543.__isset; } -CheckLockRequest& CheckLockRequest::operator=(const CheckLockRequest& other514) { - lockid = other514.lockid; - txnid = other514.txnid; - elapsed_ms = other514.elapsed_ms; - __isset = other514.__isset; +CheckLockRequest& CheckLockRequest::operator=(const CheckLockRequest& other544) { + lockid = other544.lockid; + txnid = other544.txnid; + elapsed_ms = other544.elapsed_ms; + __isset = other544.__isset; return *this; } void CheckLockRequest::printTo(std::ostream& out) const { @@ -11735,11 +12473,11 @@ void swap(UnlockRequest &a, UnlockRequest &b) { swap(a.lockid, b.lockid); } -UnlockRequest::UnlockRequest(const UnlockRequest& other515) { - lockid = other515.lockid; +UnlockRequest::UnlockRequest(const UnlockRequest& other545) { + lockid = other545.lockid; } -UnlockRequest& UnlockRequest::operator=(const UnlockRequest& other516) { - lockid = other516.lockid; +UnlockRequest& UnlockRequest::operator=(const UnlockRequest& other546) { + lockid = other546.lockid; return *this; } void UnlockRequest::printTo(std::ostream& out) const { @@ -11878,19 +12616,19 @@ void swap(ShowLocksRequest &a, ShowLocksRequest &b) { swap(a.__isset, b.__isset); } -ShowLocksRequest::ShowLocksRequest(const ShowLocksRequest& other517) { - dbname = other517.dbname; - tablename = other517.tablename; - partname = other517.partname; - isExtended = other517.isExtended; - __isset = other517.__isset; +ShowLocksRequest::ShowLocksRequest(const ShowLocksRequest& other547) { + dbname = other547.dbname; + tablename = other547.tablename; + partname = other547.partname; + isExtended = other547.isExtended; + __isset = other547.__isset; } -ShowLocksRequest& ShowLocksRequest::operator=(const ShowLocksRequest& other518) { - dbname = other518.dbname; - tablename = other518.tablename; - partname = other518.partname; - isExtended = other518.isExtended; - __isset = other518.__isset; +ShowLocksRequest& ShowLocksRequest::operator=(const ShowLocksRequest& other548) { + dbname = other548.dbname; + tablename = other548.tablename; + partname = other548.partname; + isExtended = other548.isExtended; + __isset = other548.__isset; return *this; } void ShowLocksRequest::printTo(std::ostream& out) const { @@ -12043,9 +12781,9 @@ uint32_t ShowLocksResponseElement::read(::apache::thrift::protocol::TProtocol* i break; case 5: if (ftype == ::apache::thrift::protocol::T_I32) { - int32_t ecast519; - xfer += iprot->readI32(ecast519); - this->state = (LockState::type)ecast519; + int32_t ecast549; + xfer += iprot->readI32(ecast549); + this->state = (LockState::type)ecast549; isset_state = true; } else { xfer += iprot->skip(ftype); @@ -12053,9 +12791,9 @@ uint32_t ShowLocksResponseElement::read(::apache::thrift::protocol::TProtocol* i break; case 6: if (ftype == ::apache::thrift::protocol::T_I32) { - int32_t ecast520; - xfer += iprot->readI32(ecast520); - this->type = (LockType::type)ecast520; + int32_t ecast550; + xfer += iprot->readI32(ecast550); + this->type = (LockType::type)ecast550; isset_type = true; } else { xfer += iprot->skip(ftype); @@ -12271,43 +13009,43 @@ void swap(ShowLocksResponseElement &a, ShowLocksResponseElement &b) { swap(a.__isset, b.__isset); } -ShowLocksResponseElement::ShowLocksResponseElement(const ShowLocksResponseElement& other521) { - lockid = other521.lockid; - dbname = other521.dbname; - tablename = other521.tablename; - partname = other521.partname; - state = other521.state; - type = other521.type; - txnid = other521.txnid; - lastheartbeat = other521.lastheartbeat; - acquiredat = other521.acquiredat; - user = other521.user; - hostname = other521.hostname; - heartbeatCount = other521.heartbeatCount; - agentInfo = other521.agentInfo; - blockedByExtId = other521.blockedByExtId; - blockedByIntId = other521.blockedByIntId; - lockIdInternal = other521.lockIdInternal; - __isset = other521.__isset; -} -ShowLocksResponseElement& ShowLocksResponseElement::operator=(const ShowLocksResponseElement& other522) { - lockid = other522.lockid; - dbname = other522.dbname; - tablename = other522.tablename; - partname = other522.partname; - state = other522.state; - type = other522.type; - txnid = other522.txnid; - lastheartbeat = other522.lastheartbeat; - acquiredat = other522.acquiredat; - user = other522.user; - hostname = other522.hostname; - heartbeatCount = other522.heartbeatCount; - agentInfo = other522.agentInfo; - blockedByExtId = other522.blockedByExtId; - blockedByIntId = other522.blockedByIntId; - lockIdInternal = other522.lockIdInternal; - __isset = other522.__isset; +ShowLocksResponseElement::ShowLocksResponseElement(const ShowLocksResponseElement& other551) { + lockid = other551.lockid; + dbname = other551.dbname; + tablename = other551.tablename; + partname = other551.partname; + state = other551.state; + type = other551.type; + txnid = other551.txnid; + lastheartbeat = other551.lastheartbeat; + acquiredat = other551.acquiredat; + user = other551.user; + hostname = other551.hostname; + heartbeatCount = other551.heartbeatCount; + agentInfo = other551.agentInfo; + blockedByExtId = other551.blockedByExtId; + blockedByIntId = other551.blockedByIntId; + lockIdInternal = other551.lockIdInternal; + __isset = other551.__isset; +} +ShowLocksResponseElement& ShowLocksResponseElement::operator=(const ShowLocksResponseElement& other552) { + lockid = other552.lockid; + dbname = other552.dbname; + tablename = other552.tablename; + partname = other552.partname; + state = other552.state; + type = other552.type; + txnid = other552.txnid; + lastheartbeat = other552.lastheartbeat; + acquiredat = other552.acquiredat; + user = other552.user; + hostname = other552.hostname; + heartbeatCount = other552.heartbeatCount; + agentInfo = other552.agentInfo; + blockedByExtId = other552.blockedByExtId; + blockedByIntId = other552.blockedByIntId; + lockIdInternal = other552.lockIdInternal; + __isset = other552.__isset; return *this; } void ShowLocksResponseElement::printTo(std::ostream& out) const { @@ -12366,14 +13104,14 @@ uint32_t ShowLocksResponse::read(::apache::thrift::protocol::TProtocol* iprot) { if (ftype == ::apache::thrift::protocol::T_LIST) { { this->locks.clear(); - uint32_t _size523; - ::apache::thrift::protocol::TType _etype526; - xfer += iprot->readListBegin(_etype526, _size523); - this->locks.resize(_size523); - uint32_t _i527; - for (_i527 = 0; _i527 < _size523; ++_i527) + uint32_t _size553; + ::apache::thrift::protocol::TType _etype556; + xfer += iprot->readListBegin(_etype556, _size553); + this->locks.resize(_size553); + uint32_t _i557; + for (_i557 = 0; _i557 < _size553; ++_i557) { - xfer += this->locks[_i527].read(iprot); + xfer += this->locks[_i557].read(iprot); } xfer += iprot->readListEnd(); } @@ -12402,10 +13140,10 @@ uint32_t ShowLocksResponse::write(::apache::thrift::protocol::TProtocol* oprot) xfer += oprot->writeFieldBegin("locks", ::apache::thrift::protocol::T_LIST, 1); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->locks.size())); - std::vector ::const_iterator _iter528; - for (_iter528 = this->locks.begin(); _iter528 != this->locks.end(); ++_iter528) + std::vector ::const_iterator _iter558; + for (_iter558 = this->locks.begin(); _iter558 != this->locks.end(); ++_iter558) { - xfer += (*_iter528).write(oprot); + xfer += (*_iter558).write(oprot); } xfer += oprot->writeListEnd(); } @@ -12422,13 +13160,13 @@ void swap(ShowLocksResponse &a, ShowLocksResponse &b) { swap(a.__isset, b.__isset); } -ShowLocksResponse::ShowLocksResponse(const ShowLocksResponse& other529) { - locks = other529.locks; - __isset = other529.__isset; +ShowLocksResponse::ShowLocksResponse(const ShowLocksResponse& other559) { + locks = other559.locks; + __isset = other559.__isset; } -ShowLocksResponse& ShowLocksResponse::operator=(const ShowLocksResponse& other530) { - locks = other530.locks; - __isset = other530.__isset; +ShowLocksResponse& ShowLocksResponse::operator=(const ShowLocksResponse& other560) { + locks = other560.locks; + __isset = other560.__isset; return *this; } void ShowLocksResponse::printTo(std::ostream& out) const { @@ -12529,15 +13267,15 @@ void swap(HeartbeatRequest &a, HeartbeatRequest &b) { swap(a.__isset, b.__isset); } -HeartbeatRequest::HeartbeatRequest(const HeartbeatRequest& other531) { - lockid = other531.lockid; - txnid = other531.txnid; - __isset = other531.__isset; +HeartbeatRequest::HeartbeatRequest(const HeartbeatRequest& other561) { + lockid = other561.lockid; + txnid = other561.txnid; + __isset = other561.__isset; } -HeartbeatRequest& HeartbeatRequest::operator=(const HeartbeatRequest& other532) { - lockid = other532.lockid; - txnid = other532.txnid; - __isset = other532.__isset; +HeartbeatRequest& HeartbeatRequest::operator=(const HeartbeatRequest& other562) { + lockid = other562.lockid; + txnid = other562.txnid; + __isset = other562.__isset; return *this; } void HeartbeatRequest::printTo(std::ostream& out) const { @@ -12640,13 +13378,13 @@ void swap(HeartbeatTxnRangeRequest &a, HeartbeatTxnRangeRequest &b) { swap(a.max, b.max); } -HeartbeatTxnRangeRequest::HeartbeatTxnRangeRequest(const HeartbeatTxnRangeRequest& other533) { - min = other533.min; - max = other533.max; +HeartbeatTxnRangeRequest::HeartbeatTxnRangeRequest(const HeartbeatTxnRangeRequest& other563) { + min = other563.min; + max = other563.max; } -HeartbeatTxnRangeRequest& HeartbeatTxnRangeRequest::operator=(const HeartbeatTxnRangeRequest& other534) { - min = other534.min; - max = other534.max; +HeartbeatTxnRangeRequest& HeartbeatTxnRangeRequest::operator=(const HeartbeatTxnRangeRequest& other564) { + min = other564.min; + max = other564.max; return *this; } void HeartbeatTxnRangeRequest::printTo(std::ostream& out) const { @@ -12697,15 +13435,15 @@ uint32_t HeartbeatTxnRangeResponse::read(::apache::thrift::protocol::TProtocol* if (ftype == ::apache::thrift::protocol::T_SET) { { this->aborted.clear(); - uint32_t _size535; - ::apache::thrift::protocol::TType _etype538; - xfer += iprot->readSetBegin(_etype538, _size535); - uint32_t _i539; - for (_i539 = 0; _i539 < _size535; ++_i539) + uint32_t _size565; + ::apache::thrift::protocol::TType _etype568; + xfer += iprot->readSetBegin(_etype568, _size565); + uint32_t _i569; + for (_i569 = 0; _i569 < _size565; ++_i569) { - int64_t _elem540; - xfer += iprot->readI64(_elem540); - this->aborted.insert(_elem540); + int64_t _elem570; + xfer += iprot->readI64(_elem570); + this->aborted.insert(_elem570); } xfer += iprot->readSetEnd(); } @@ -12718,15 +13456,15 @@ uint32_t HeartbeatTxnRangeResponse::read(::apache::thrift::protocol::TProtocol* if (ftype == ::apache::thrift::protocol::T_SET) { { this->nosuch.clear(); - uint32_t _size541; - ::apache::thrift::protocol::TType _etype544; - xfer += iprot->readSetBegin(_etype544, _size541); - uint32_t _i545; - for (_i545 = 0; _i545 < _size541; ++_i545) + uint32_t _size571; + ::apache::thrift::protocol::TType _etype574; + xfer += iprot->readSetBegin(_etype574, _size571); + uint32_t _i575; + for (_i575 = 0; _i575 < _size571; ++_i575) { - int64_t _elem546; - xfer += iprot->readI64(_elem546); - this->nosuch.insert(_elem546); + int64_t _elem576; + xfer += iprot->readI64(_elem576); + this->nosuch.insert(_elem576); } xfer += iprot->readSetEnd(); } @@ -12759,10 +13497,10 @@ uint32_t HeartbeatTxnRangeResponse::write(::apache::thrift::protocol::TProtocol* xfer += oprot->writeFieldBegin("aborted", ::apache::thrift::protocol::T_SET, 1); { xfer += oprot->writeSetBegin(::apache::thrift::protocol::T_I64, static_cast(this->aborted.size())); - std::set ::const_iterator _iter547; - for (_iter547 = this->aborted.begin(); _iter547 != this->aborted.end(); ++_iter547) + std::set ::const_iterator _iter577; + for (_iter577 = this->aborted.begin(); _iter577 != this->aborted.end(); ++_iter577) { - xfer += oprot->writeI64((*_iter547)); + xfer += oprot->writeI64((*_iter577)); } xfer += oprot->writeSetEnd(); } @@ -12771,10 +13509,10 @@ uint32_t HeartbeatTxnRangeResponse::write(::apache::thrift::protocol::TProtocol* xfer += oprot->writeFieldBegin("nosuch", ::apache::thrift::protocol::T_SET, 2); { xfer += oprot->writeSetBegin(::apache::thrift::protocol::T_I64, static_cast(this->nosuch.size())); - std::set ::const_iterator _iter548; - for (_iter548 = this->nosuch.begin(); _iter548 != this->nosuch.end(); ++_iter548) + std::set ::const_iterator _iter578; + for (_iter578 = this->nosuch.begin(); _iter578 != this->nosuch.end(); ++_iter578) { - xfer += oprot->writeI64((*_iter548)); + xfer += oprot->writeI64((*_iter578)); } xfer += oprot->writeSetEnd(); } @@ -12791,13 +13529,13 @@ void swap(HeartbeatTxnRangeResponse &a, HeartbeatTxnRangeResponse &b) { swap(a.nosuch, b.nosuch); } -HeartbeatTxnRangeResponse::HeartbeatTxnRangeResponse(const HeartbeatTxnRangeResponse& other549) { - aborted = other549.aborted; - nosuch = other549.nosuch; +HeartbeatTxnRangeResponse::HeartbeatTxnRangeResponse(const HeartbeatTxnRangeResponse& other579) { + aborted = other579.aborted; + nosuch = other579.nosuch; } -HeartbeatTxnRangeResponse& HeartbeatTxnRangeResponse::operator=(const HeartbeatTxnRangeResponse& other550) { - aborted = other550.aborted; - nosuch = other550.nosuch; +HeartbeatTxnRangeResponse& HeartbeatTxnRangeResponse::operator=(const HeartbeatTxnRangeResponse& other580) { + aborted = other580.aborted; + nosuch = other580.nosuch; return *this; } void HeartbeatTxnRangeResponse::printTo(std::ostream& out) const { @@ -12885,9 +13623,9 @@ uint32_t CompactionRequest::read(::apache::thrift::protocol::TProtocol* iprot) { break; case 4: if (ftype == ::apache::thrift::protocol::T_I32) { - int32_t ecast551; - xfer += iprot->readI32(ecast551); - this->type = (CompactionType::type)ecast551; + int32_t ecast581; + xfer += iprot->readI32(ecast581); + this->type = (CompactionType::type)ecast581; isset_type = true; } else { xfer += iprot->skip(ftype); @@ -12961,21 +13699,21 @@ void swap(CompactionRequest &a, CompactionRequest &b) { swap(a.__isset, b.__isset); } -CompactionRequest::CompactionRequest(const CompactionRequest& other552) { - dbname = other552.dbname; - tablename = other552.tablename; - partitionname = other552.partitionname; - type = other552.type; - runas = other552.runas; - __isset = other552.__isset; -} -CompactionRequest& CompactionRequest::operator=(const CompactionRequest& other553) { - dbname = other553.dbname; - tablename = other553.tablename; - partitionname = other553.partitionname; - type = other553.type; - runas = other553.runas; - __isset = other553.__isset; +CompactionRequest::CompactionRequest(const CompactionRequest& other582) { + dbname = other582.dbname; + tablename = other582.tablename; + partitionname = other582.partitionname; + type = other582.type; + runas = other582.runas; + __isset = other582.__isset; +} +CompactionRequest& CompactionRequest::operator=(const CompactionRequest& other583) { + dbname = other583.dbname; + tablename = other583.tablename; + partitionname = other583.partitionname; + type = other583.type; + runas = other583.runas; + __isset = other583.__isset; return *this; } void CompactionRequest::printTo(std::ostream& out) const { @@ -13038,11 +13776,11 @@ void swap(ShowCompactRequest &a, ShowCompactRequest &b) { (void) b; } -ShowCompactRequest::ShowCompactRequest(const ShowCompactRequest& other554) { - (void) other554; +ShowCompactRequest::ShowCompactRequest(const ShowCompactRequest& other584) { + (void) other584; } -ShowCompactRequest& ShowCompactRequest::operator=(const ShowCompactRequest& other555) { - (void) other555; +ShowCompactRequest& ShowCompactRequest::operator=(const ShowCompactRequest& other585) { + (void) other585; return *this; } void ShowCompactRequest::printTo(std::ostream& out) const { @@ -13163,9 +13901,9 @@ uint32_t ShowCompactResponseElement::read(::apache::thrift::protocol::TProtocol* break; case 4: if (ftype == ::apache::thrift::protocol::T_I32) { - int32_t ecast556; - xfer += iprot->readI32(ecast556); - this->type = (CompactionType::type)ecast556; + int32_t ecast586; + xfer += iprot->readI32(ecast586); + this->type = (CompactionType::type)ecast586; isset_type = true; } else { xfer += iprot->skip(ftype); @@ -13338,35 +14076,35 @@ void swap(ShowCompactResponseElement &a, ShowCompactResponseElement &b) { swap(a.__isset, b.__isset); } -ShowCompactResponseElement::ShowCompactResponseElement(const ShowCompactResponseElement& other557) { - dbname = other557.dbname; - tablename = other557.tablename; - partitionname = other557.partitionname; - type = other557.type; - state = other557.state; - workerid = other557.workerid; - start = other557.start; - runAs = other557.runAs; - hightestTxnId = other557.hightestTxnId; - metaInfo = other557.metaInfo; - endTime = other557.endTime; - hadoopJobId = other557.hadoopJobId; - __isset = other557.__isset; -} -ShowCompactResponseElement& ShowCompactResponseElement::operator=(const ShowCompactResponseElement& other558) { - dbname = other558.dbname; - tablename = other558.tablename; - partitionname = other558.partitionname; - type = other558.type; - state = other558.state; - workerid = other558.workerid; - start = other558.start; - runAs = other558.runAs; - hightestTxnId = other558.hightestTxnId; - metaInfo = other558.metaInfo; - endTime = other558.endTime; - hadoopJobId = other558.hadoopJobId; - __isset = other558.__isset; +ShowCompactResponseElement::ShowCompactResponseElement(const ShowCompactResponseElement& other587) { + dbname = other587.dbname; + tablename = other587.tablename; + partitionname = other587.partitionname; + type = other587.type; + state = other587.state; + workerid = other587.workerid; + start = other587.start; + runAs = other587.runAs; + hightestTxnId = other587.hightestTxnId; + metaInfo = other587.metaInfo; + endTime = other587.endTime; + hadoopJobId = other587.hadoopJobId; + __isset = other587.__isset; +} +ShowCompactResponseElement& ShowCompactResponseElement::operator=(const ShowCompactResponseElement& other588) { + dbname = other588.dbname; + tablename = other588.tablename; + partitionname = other588.partitionname; + type = other588.type; + state = other588.state; + workerid = other588.workerid; + start = other588.start; + runAs = other588.runAs; + hightestTxnId = other588.hightestTxnId; + metaInfo = other588.metaInfo; + endTime = other588.endTime; + hadoopJobId = other588.hadoopJobId; + __isset = other588.__isset; return *this; } void ShowCompactResponseElement::printTo(std::ostream& out) const { @@ -13422,14 +14160,14 @@ uint32_t ShowCompactResponse::read(::apache::thrift::protocol::TProtocol* iprot) if (ftype == ::apache::thrift::protocol::T_LIST) { { this->compacts.clear(); - uint32_t _size559; - ::apache::thrift::protocol::TType _etype562; - xfer += iprot->readListBegin(_etype562, _size559); - this->compacts.resize(_size559); - uint32_t _i563; - for (_i563 = 0; _i563 < _size559; ++_i563) + uint32_t _size589; + ::apache::thrift::protocol::TType _etype592; + xfer += iprot->readListBegin(_etype592, _size589); + this->compacts.resize(_size589); + uint32_t _i593; + for (_i593 = 0; _i593 < _size589; ++_i593) { - xfer += this->compacts[_i563].read(iprot); + xfer += this->compacts[_i593].read(iprot); } xfer += iprot->readListEnd(); } @@ -13460,10 +14198,10 @@ uint32_t ShowCompactResponse::write(::apache::thrift::protocol::TProtocol* oprot xfer += oprot->writeFieldBegin("compacts", ::apache::thrift::protocol::T_LIST, 1); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->compacts.size())); - std::vector ::const_iterator _iter564; - for (_iter564 = this->compacts.begin(); _iter564 != this->compacts.end(); ++_iter564) + std::vector ::const_iterator _iter594; + for (_iter594 = this->compacts.begin(); _iter594 != this->compacts.end(); ++_iter594) { - xfer += (*_iter564).write(oprot); + xfer += (*_iter594).write(oprot); } xfer += oprot->writeListEnd(); } @@ -13479,11 +14217,11 @@ void swap(ShowCompactResponse &a, ShowCompactResponse &b) { swap(a.compacts, b.compacts); } -ShowCompactResponse::ShowCompactResponse(const ShowCompactResponse& other565) { - compacts = other565.compacts; +ShowCompactResponse::ShowCompactResponse(const ShowCompactResponse& other595) { + compacts = other595.compacts; } -ShowCompactResponse& ShowCompactResponse::operator=(const ShowCompactResponse& other566) { - compacts = other566.compacts; +ShowCompactResponse& ShowCompactResponse::operator=(const ShowCompactResponse& other596) { + compacts = other596.compacts; return *this; } void ShowCompactResponse::printTo(std::ostream& out) const { @@ -13567,14 +14305,14 @@ uint32_t AddDynamicPartitions::read(::apache::thrift::protocol::TProtocol* iprot if (ftype == ::apache::thrift::protocol::T_LIST) { { this->partitionnames.clear(); - uint32_t _size567; - ::apache::thrift::protocol::TType _etype570; - xfer += iprot->readListBegin(_etype570, _size567); - this->partitionnames.resize(_size567); - uint32_t _i571; - for (_i571 = 0; _i571 < _size567; ++_i571) + uint32_t _size597; + ::apache::thrift::protocol::TType _etype600; + xfer += iprot->readListBegin(_etype600, _size597); + this->partitionnames.resize(_size597); + uint32_t _i601; + for (_i601 = 0; _i601 < _size597; ++_i601) { - xfer += iprot->readString(this->partitionnames[_i571]); + xfer += iprot->readString(this->partitionnames[_i601]); } xfer += iprot->readListEnd(); } @@ -13623,10 +14361,10 @@ uint32_t AddDynamicPartitions::write(::apache::thrift::protocol::TProtocol* opro xfer += oprot->writeFieldBegin("partitionnames", ::apache::thrift::protocol::T_LIST, 4); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->partitionnames.size())); - std::vector ::const_iterator _iter572; - for (_iter572 = this->partitionnames.begin(); _iter572 != this->partitionnames.end(); ++_iter572) + std::vector ::const_iterator _iter602; + for (_iter602 = this->partitionnames.begin(); _iter602 != this->partitionnames.end(); ++_iter602) { - xfer += oprot->writeString((*_iter572)); + xfer += oprot->writeString((*_iter602)); } xfer += oprot->writeListEnd(); } @@ -13645,17 +14383,17 @@ void swap(AddDynamicPartitions &a, AddDynamicPartitions &b) { swap(a.partitionnames, b.partitionnames); } -AddDynamicPartitions::AddDynamicPartitions(const AddDynamicPartitions& other573) { - txnid = other573.txnid; - dbname = other573.dbname; - tablename = other573.tablename; - partitionnames = other573.partitionnames; +AddDynamicPartitions::AddDynamicPartitions(const AddDynamicPartitions& other603) { + txnid = other603.txnid; + dbname = other603.dbname; + tablename = other603.tablename; + partitionnames = other603.partitionnames; } -AddDynamicPartitions& AddDynamicPartitions::operator=(const AddDynamicPartitions& other574) { - txnid = other574.txnid; - dbname = other574.dbname; - tablename = other574.tablename; - partitionnames = other574.partitionnames; +AddDynamicPartitions& AddDynamicPartitions::operator=(const AddDynamicPartitions& other604) { + txnid = other604.txnid; + dbname = other604.dbname; + tablename = other604.tablename; + partitionnames = other604.partitionnames; return *this; } void AddDynamicPartitions::printTo(std::ostream& out) const { @@ -13760,15 +14498,15 @@ void swap(NotificationEventRequest &a, NotificationEventRequest &b) { swap(a.__isset, b.__isset); } -NotificationEventRequest::NotificationEventRequest(const NotificationEventRequest& other575) { - lastEvent = other575.lastEvent; - maxEvents = other575.maxEvents; - __isset = other575.__isset; +NotificationEventRequest::NotificationEventRequest(const NotificationEventRequest& other605) { + lastEvent = other605.lastEvent; + maxEvents = other605.maxEvents; + __isset = other605.__isset; } -NotificationEventRequest& NotificationEventRequest::operator=(const NotificationEventRequest& other576) { - lastEvent = other576.lastEvent; - maxEvents = other576.maxEvents; - __isset = other576.__isset; +NotificationEventRequest& NotificationEventRequest::operator=(const NotificationEventRequest& other606) { + lastEvent = other606.lastEvent; + maxEvents = other606.maxEvents; + __isset = other606.__isset; return *this; } void NotificationEventRequest::printTo(std::ostream& out) const { @@ -13950,23 +14688,23 @@ void swap(NotificationEvent &a, NotificationEvent &b) { swap(a.__isset, b.__isset); } -NotificationEvent::NotificationEvent(const NotificationEvent& other577) { - eventId = other577.eventId; - eventTime = other577.eventTime; - eventType = other577.eventType; - dbName = other577.dbName; - tableName = other577.tableName; - message = other577.message; - __isset = other577.__isset; -} -NotificationEvent& NotificationEvent::operator=(const NotificationEvent& other578) { - eventId = other578.eventId; - eventTime = other578.eventTime; - eventType = other578.eventType; - dbName = other578.dbName; - tableName = other578.tableName; - message = other578.message; - __isset = other578.__isset; +NotificationEvent::NotificationEvent(const NotificationEvent& other607) { + eventId = other607.eventId; + eventTime = other607.eventTime; + eventType = other607.eventType; + dbName = other607.dbName; + tableName = other607.tableName; + message = other607.message; + __isset = other607.__isset; +} +NotificationEvent& NotificationEvent::operator=(const NotificationEvent& other608) { + eventId = other608.eventId; + eventTime = other608.eventTime; + eventType = other608.eventType; + dbName = other608.dbName; + tableName = other608.tableName; + message = other608.message; + __isset = other608.__isset; return *this; } void NotificationEvent::printTo(std::ostream& out) const { @@ -14016,14 +14754,14 @@ uint32_t NotificationEventResponse::read(::apache::thrift::protocol::TProtocol* if (ftype == ::apache::thrift::protocol::T_LIST) { { this->events.clear(); - uint32_t _size579; - ::apache::thrift::protocol::TType _etype582; - xfer += iprot->readListBegin(_etype582, _size579); - this->events.resize(_size579); - uint32_t _i583; - for (_i583 = 0; _i583 < _size579; ++_i583) + uint32_t _size609; + ::apache::thrift::protocol::TType _etype612; + xfer += iprot->readListBegin(_etype612, _size609); + this->events.resize(_size609); + uint32_t _i613; + for (_i613 = 0; _i613 < _size609; ++_i613) { - xfer += this->events[_i583].read(iprot); + xfer += this->events[_i613].read(iprot); } xfer += iprot->readListEnd(); } @@ -14054,10 +14792,10 @@ uint32_t NotificationEventResponse::write(::apache::thrift::protocol::TProtocol* xfer += oprot->writeFieldBegin("events", ::apache::thrift::protocol::T_LIST, 1); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->events.size())); - std::vector ::const_iterator _iter584; - for (_iter584 = this->events.begin(); _iter584 != this->events.end(); ++_iter584) + std::vector ::const_iterator _iter614; + for (_iter614 = this->events.begin(); _iter614 != this->events.end(); ++_iter614) { - xfer += (*_iter584).write(oprot); + xfer += (*_iter614).write(oprot); } xfer += oprot->writeListEnd(); } @@ -14073,11 +14811,11 @@ void swap(NotificationEventResponse &a, NotificationEventResponse &b) { swap(a.events, b.events); } -NotificationEventResponse::NotificationEventResponse(const NotificationEventResponse& other585) { - events = other585.events; +NotificationEventResponse::NotificationEventResponse(const NotificationEventResponse& other615) { + events = other615.events; } -NotificationEventResponse& NotificationEventResponse::operator=(const NotificationEventResponse& other586) { - events = other586.events; +NotificationEventResponse& NotificationEventResponse::operator=(const NotificationEventResponse& other616) { + events = other616.events; return *this; } void NotificationEventResponse::printTo(std::ostream& out) const { @@ -14159,11 +14897,11 @@ void swap(CurrentNotificationEventId &a, CurrentNotificationEventId &b) { swap(a.eventId, b.eventId); } -CurrentNotificationEventId::CurrentNotificationEventId(const CurrentNotificationEventId& other587) { - eventId = other587.eventId; +CurrentNotificationEventId::CurrentNotificationEventId(const CurrentNotificationEventId& other617) { + eventId = other617.eventId; } -CurrentNotificationEventId& CurrentNotificationEventId::operator=(const CurrentNotificationEventId& other588) { - eventId = other588.eventId; +CurrentNotificationEventId& CurrentNotificationEventId::operator=(const CurrentNotificationEventId& other618) { + eventId = other618.eventId; return *this; } void CurrentNotificationEventId::printTo(std::ostream& out) const { @@ -14208,14 +14946,14 @@ uint32_t InsertEventRequestData::read(::apache::thrift::protocol::TProtocol* ipr if (ftype == ::apache::thrift::protocol::T_LIST) { { this->filesAdded.clear(); - uint32_t _size589; - ::apache::thrift::protocol::TType _etype592; - xfer += iprot->readListBegin(_etype592, _size589); - this->filesAdded.resize(_size589); - uint32_t _i593; - for (_i593 = 0; _i593 < _size589; ++_i593) + uint32_t _size619; + ::apache::thrift::protocol::TType _etype622; + xfer += iprot->readListBegin(_etype622, _size619); + this->filesAdded.resize(_size619); + uint32_t _i623; + for (_i623 = 0; _i623 < _size619; ++_i623) { - xfer += iprot->readString(this->filesAdded[_i593]); + xfer += iprot->readString(this->filesAdded[_i623]); } xfer += iprot->readListEnd(); } @@ -14246,10 +14984,10 @@ uint32_t InsertEventRequestData::write(::apache::thrift::protocol::TProtocol* op xfer += oprot->writeFieldBegin("filesAdded", ::apache::thrift::protocol::T_LIST, 1); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->filesAdded.size())); - std::vector ::const_iterator _iter594; - for (_iter594 = this->filesAdded.begin(); _iter594 != this->filesAdded.end(); ++_iter594) + std::vector ::const_iterator _iter624; + for (_iter624 = this->filesAdded.begin(); _iter624 != this->filesAdded.end(); ++_iter624) { - xfer += oprot->writeString((*_iter594)); + xfer += oprot->writeString((*_iter624)); } xfer += oprot->writeListEnd(); } @@ -14265,11 +15003,11 @@ void swap(InsertEventRequestData &a, InsertEventRequestData &b) { swap(a.filesAdded, b.filesAdded); } -InsertEventRequestData::InsertEventRequestData(const InsertEventRequestData& other595) { - filesAdded = other595.filesAdded; +InsertEventRequestData::InsertEventRequestData(const InsertEventRequestData& other625) { + filesAdded = other625.filesAdded; } -InsertEventRequestData& InsertEventRequestData::operator=(const InsertEventRequestData& other596) { - filesAdded = other596.filesAdded; +InsertEventRequestData& InsertEventRequestData::operator=(const InsertEventRequestData& other626) { + filesAdded = other626.filesAdded; return *this; } void InsertEventRequestData::printTo(std::ostream& out) const { @@ -14349,13 +15087,13 @@ void swap(FireEventRequestData &a, FireEventRequestData &b) { swap(a.__isset, b.__isset); } -FireEventRequestData::FireEventRequestData(const FireEventRequestData& other597) { - insertData = other597.insertData; - __isset = other597.__isset; +FireEventRequestData::FireEventRequestData(const FireEventRequestData& other627) { + insertData = other627.insertData; + __isset = other627.__isset; } -FireEventRequestData& FireEventRequestData::operator=(const FireEventRequestData& other598) { - insertData = other598.insertData; - __isset = other598.__isset; +FireEventRequestData& FireEventRequestData::operator=(const FireEventRequestData& other628) { + insertData = other628.insertData; + __isset = other628.__isset; return *this; } void FireEventRequestData::printTo(std::ostream& out) const { @@ -14452,14 +15190,14 @@ uint32_t FireEventRequest::read(::apache::thrift::protocol::TProtocol* iprot) { if (ftype == ::apache::thrift::protocol::T_LIST) { { this->partitionVals.clear(); - uint32_t _size599; - ::apache::thrift::protocol::TType _etype602; - xfer += iprot->readListBegin(_etype602, _size599); - this->partitionVals.resize(_size599); - uint32_t _i603; - for (_i603 = 0; _i603 < _size599; ++_i603) + uint32_t _size629; + ::apache::thrift::protocol::TType _etype632; + xfer += iprot->readListBegin(_etype632, _size629); + this->partitionVals.resize(_size629); + uint32_t _i633; + for (_i633 = 0; _i633 < _size629; ++_i633) { - xfer += iprot->readString(this->partitionVals[_i603]); + xfer += iprot->readString(this->partitionVals[_i633]); } xfer += iprot->readListEnd(); } @@ -14511,10 +15249,10 @@ uint32_t FireEventRequest::write(::apache::thrift::protocol::TProtocol* oprot) c xfer += oprot->writeFieldBegin("partitionVals", ::apache::thrift::protocol::T_LIST, 5); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->partitionVals.size())); - std::vector ::const_iterator _iter604; - for (_iter604 = this->partitionVals.begin(); _iter604 != this->partitionVals.end(); ++_iter604) + std::vector ::const_iterator _iter634; + for (_iter634 = this->partitionVals.begin(); _iter634 != this->partitionVals.end(); ++_iter634) { - xfer += oprot->writeString((*_iter604)); + xfer += oprot->writeString((*_iter634)); } xfer += oprot->writeListEnd(); } @@ -14535,21 +15273,21 @@ void swap(FireEventRequest &a, FireEventRequest &b) { swap(a.__isset, b.__isset); } -FireEventRequest::FireEventRequest(const FireEventRequest& other605) { - successful = other605.successful; - data = other605.data; - dbName = other605.dbName; - tableName = other605.tableName; - partitionVals = other605.partitionVals; - __isset = other605.__isset; -} -FireEventRequest& FireEventRequest::operator=(const FireEventRequest& other606) { - successful = other606.successful; - data = other606.data; - dbName = other606.dbName; - tableName = other606.tableName; - partitionVals = other606.partitionVals; - __isset = other606.__isset; +FireEventRequest::FireEventRequest(const FireEventRequest& other635) { + successful = other635.successful; + data = other635.data; + dbName = other635.dbName; + tableName = other635.tableName; + partitionVals = other635.partitionVals; + __isset = other635.__isset; +} +FireEventRequest& FireEventRequest::operator=(const FireEventRequest& other636) { + successful = other636.successful; + data = other636.data; + dbName = other636.dbName; + tableName = other636.tableName; + partitionVals = other636.partitionVals; + __isset = other636.__isset; return *this; } void FireEventRequest::printTo(std::ostream& out) const { @@ -14612,11 +15350,11 @@ void swap(FireEventResponse &a, FireEventResponse &b) { (void) b; } -FireEventResponse::FireEventResponse(const FireEventResponse& other607) { - (void) other607; +FireEventResponse::FireEventResponse(const FireEventResponse& other637) { + (void) other637; } -FireEventResponse& FireEventResponse::operator=(const FireEventResponse& other608) { - (void) other608; +FireEventResponse& FireEventResponse::operator=(const FireEventResponse& other638) { + (void) other638; return *this; } void FireEventResponse::printTo(std::ostream& out) const { @@ -14697,11 +15435,11 @@ void swap(GetChangeVersionRequest &a, GetChangeVersionRequest &b) { swap(a.topic, b.topic); } -GetChangeVersionRequest::GetChangeVersionRequest(const GetChangeVersionRequest& other609) { - topic = other609.topic; +GetChangeVersionRequest::GetChangeVersionRequest(const GetChangeVersionRequest& other639) { + topic = other639.topic; } -GetChangeVersionRequest& GetChangeVersionRequest::operator=(const GetChangeVersionRequest& other610) { - topic = other610.topic; +GetChangeVersionRequest& GetChangeVersionRequest::operator=(const GetChangeVersionRequest& other640) { + topic = other640.topic; return *this; } void GetChangeVersionRequest::printTo(std::ostream& out) const { @@ -14783,11 +15521,11 @@ void swap(GetChangeVersionResult &a, GetChangeVersionResult &b) { swap(a.version, b.version); } -GetChangeVersionResult::GetChangeVersionResult(const GetChangeVersionResult& other611) { - version = other611.version; +GetChangeVersionResult::GetChangeVersionResult(const GetChangeVersionResult& other641) { + version = other641.version; } -GetChangeVersionResult& GetChangeVersionResult::operator=(const GetChangeVersionResult& other612) { - version = other612.version; +GetChangeVersionResult& GetChangeVersionResult::operator=(const GetChangeVersionResult& other642) { + version = other642.version; return *this; } void GetChangeVersionResult::printTo(std::ostream& out) const { @@ -14888,15 +15626,15 @@ void swap(MetadataPpdResult &a, MetadataPpdResult &b) { swap(a.__isset, b.__isset); } -MetadataPpdResult::MetadataPpdResult(const MetadataPpdResult& other613) { - metadata = other613.metadata; - includeBitset = other613.includeBitset; - __isset = other613.__isset; +MetadataPpdResult::MetadataPpdResult(const MetadataPpdResult& other643) { + metadata = other643.metadata; + includeBitset = other643.includeBitset; + __isset = other643.__isset; } -MetadataPpdResult& MetadataPpdResult::operator=(const MetadataPpdResult& other614) { - metadata = other614.metadata; - includeBitset = other614.includeBitset; - __isset = other614.__isset; +MetadataPpdResult& MetadataPpdResult::operator=(const MetadataPpdResult& other644) { + metadata = other644.metadata; + includeBitset = other644.includeBitset; + __isset = other644.__isset; return *this; } void MetadataPpdResult::printTo(std::ostream& out) const { @@ -14947,17 +15685,17 @@ uint32_t GetFileMetadataByExprResult::read(::apache::thrift::protocol::TProtocol if (ftype == ::apache::thrift::protocol::T_MAP) { { this->metadata.clear(); - uint32_t _size615; - ::apache::thrift::protocol::TType _ktype616; - ::apache::thrift::protocol::TType _vtype617; - xfer += iprot->readMapBegin(_ktype616, _vtype617, _size615); - uint32_t _i619; - for (_i619 = 0; _i619 < _size615; ++_i619) + uint32_t _size645; + ::apache::thrift::protocol::TType _ktype646; + ::apache::thrift::protocol::TType _vtype647; + xfer += iprot->readMapBegin(_ktype646, _vtype647, _size645); + uint32_t _i649; + for (_i649 = 0; _i649 < _size645; ++_i649) { - int64_t _key620; - xfer += iprot->readI64(_key620); - MetadataPpdResult& _val621 = this->metadata[_key620]; - xfer += _val621.read(iprot); + int64_t _key650; + xfer += iprot->readI64(_key650); + MetadataPpdResult& _val651 = this->metadata[_key650]; + xfer += _val651.read(iprot); } xfer += iprot->readMapEnd(); } @@ -14998,11 +15736,11 @@ uint32_t GetFileMetadataByExprResult::write(::apache::thrift::protocol::TProtoco xfer += oprot->writeFieldBegin("metadata", ::apache::thrift::protocol::T_MAP, 1); { xfer += oprot->writeMapBegin(::apache::thrift::protocol::T_I64, ::apache::thrift::protocol::T_STRUCT, static_cast(this->metadata.size())); - std::map ::const_iterator _iter622; - for (_iter622 = this->metadata.begin(); _iter622 != this->metadata.end(); ++_iter622) + std::map ::const_iterator _iter652; + for (_iter652 = this->metadata.begin(); _iter652 != this->metadata.end(); ++_iter652) { - xfer += oprot->writeI64(_iter622->first); - xfer += _iter622->second.write(oprot); + xfer += oprot->writeI64(_iter652->first); + xfer += _iter652->second.write(oprot); } xfer += oprot->writeMapEnd(); } @@ -15023,13 +15761,13 @@ void swap(GetFileMetadataByExprResult &a, GetFileMetadataByExprResult &b) { swap(a.isSupported, b.isSupported); } -GetFileMetadataByExprResult::GetFileMetadataByExprResult(const GetFileMetadataByExprResult& other623) { - metadata = other623.metadata; - isSupported = other623.isSupported; +GetFileMetadataByExprResult::GetFileMetadataByExprResult(const GetFileMetadataByExprResult& other653) { + metadata = other653.metadata; + isSupported = other653.isSupported; } -GetFileMetadataByExprResult& GetFileMetadataByExprResult::operator=(const GetFileMetadataByExprResult& other624) { - metadata = other624.metadata; - isSupported = other624.isSupported; +GetFileMetadataByExprResult& GetFileMetadataByExprResult::operator=(const GetFileMetadataByExprResult& other654) { + metadata = other654.metadata; + isSupported = other654.isSupported; return *this; } void GetFileMetadataByExprResult::printTo(std::ostream& out) const { @@ -15090,14 +15828,14 @@ uint32_t GetFileMetadataByExprRequest::read(::apache::thrift::protocol::TProtoco if (ftype == ::apache::thrift::protocol::T_LIST) { { this->fileIds.clear(); - uint32_t _size625; - ::apache::thrift::protocol::TType _etype628; - xfer += iprot->readListBegin(_etype628, _size625); - this->fileIds.resize(_size625); - uint32_t _i629; - for (_i629 = 0; _i629 < _size625; ++_i629) + uint32_t _size655; + ::apache::thrift::protocol::TType _etype658; + xfer += iprot->readListBegin(_etype658, _size655); + this->fileIds.resize(_size655); + uint32_t _i659; + for (_i659 = 0; _i659 < _size655; ++_i659) { - xfer += iprot->readI64(this->fileIds[_i629]); + xfer += iprot->readI64(this->fileIds[_i659]); } xfer += iprot->readListEnd(); } @@ -15124,9 +15862,9 @@ uint32_t GetFileMetadataByExprRequest::read(::apache::thrift::protocol::TProtoco break; case 4: if (ftype == ::apache::thrift::protocol::T_I32) { - int32_t ecast630; - xfer += iprot->readI32(ecast630); - this->type = (FileMetadataExprType::type)ecast630; + int32_t ecast660; + xfer += iprot->readI32(ecast660); + this->type = (FileMetadataExprType::type)ecast660; this->__isset.type = true; } else { xfer += iprot->skip(ftype); @@ -15156,10 +15894,10 @@ uint32_t GetFileMetadataByExprRequest::write(::apache::thrift::protocol::TProtoc xfer += oprot->writeFieldBegin("fileIds", ::apache::thrift::protocol::T_LIST, 1); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_I64, static_cast(this->fileIds.size())); - std::vector ::const_iterator _iter631; - for (_iter631 = this->fileIds.begin(); _iter631 != this->fileIds.end(); ++_iter631) + std::vector ::const_iterator _iter661; + for (_iter661 = this->fileIds.begin(); _iter661 != this->fileIds.end(); ++_iter661) { - xfer += oprot->writeI64((*_iter631)); + xfer += oprot->writeI64((*_iter661)); } xfer += oprot->writeListEnd(); } @@ -15193,19 +15931,19 @@ void swap(GetFileMetadataByExprRequest &a, GetFileMetadataByExprRequest &b) { swap(a.__isset, b.__isset); } -GetFileMetadataByExprRequest::GetFileMetadataByExprRequest(const GetFileMetadataByExprRequest& other632) { - fileIds = other632.fileIds; - expr = other632.expr; - doGetFooters = other632.doGetFooters; - type = other632.type; - __isset = other632.__isset; +GetFileMetadataByExprRequest::GetFileMetadataByExprRequest(const GetFileMetadataByExprRequest& other662) { + fileIds = other662.fileIds; + expr = other662.expr; + doGetFooters = other662.doGetFooters; + type = other662.type; + __isset = other662.__isset; } -GetFileMetadataByExprRequest& GetFileMetadataByExprRequest::operator=(const GetFileMetadataByExprRequest& other633) { - fileIds = other633.fileIds; - expr = other633.expr; - doGetFooters = other633.doGetFooters; - type = other633.type; - __isset = other633.__isset; +GetFileMetadataByExprRequest& GetFileMetadataByExprRequest::operator=(const GetFileMetadataByExprRequest& other663) { + fileIds = other663.fileIds; + expr = other663.expr; + doGetFooters = other663.doGetFooters; + type = other663.type; + __isset = other663.__isset; return *this; } void GetFileMetadataByExprRequest::printTo(std::ostream& out) const { @@ -15258,17 +15996,17 @@ uint32_t GetFileMetadataResult::read(::apache::thrift::protocol::TProtocol* ipro if (ftype == ::apache::thrift::protocol::T_MAP) { { this->metadata.clear(); - uint32_t _size634; - ::apache::thrift::protocol::TType _ktype635; - ::apache::thrift::protocol::TType _vtype636; - xfer += iprot->readMapBegin(_ktype635, _vtype636, _size634); - uint32_t _i638; - for (_i638 = 0; _i638 < _size634; ++_i638) + uint32_t _size664; + ::apache::thrift::protocol::TType _ktype665; + ::apache::thrift::protocol::TType _vtype666; + xfer += iprot->readMapBegin(_ktype665, _vtype666, _size664); + uint32_t _i668; + for (_i668 = 0; _i668 < _size664; ++_i668) { - int64_t _key639; - xfer += iprot->readI64(_key639); - std::string& _val640 = this->metadata[_key639]; - xfer += iprot->readBinary(_val640); + int64_t _key669; + xfer += iprot->readI64(_key669); + std::string& _val670 = this->metadata[_key669]; + xfer += iprot->readBinary(_val670); } xfer += iprot->readMapEnd(); } @@ -15309,11 +16047,11 @@ uint32_t GetFileMetadataResult::write(::apache::thrift::protocol::TProtocol* opr xfer += oprot->writeFieldBegin("metadata", ::apache::thrift::protocol::T_MAP, 1); { xfer += oprot->writeMapBegin(::apache::thrift::protocol::T_I64, ::apache::thrift::protocol::T_STRING, static_cast(this->metadata.size())); - std::map ::const_iterator _iter641; - for (_iter641 = this->metadata.begin(); _iter641 != this->metadata.end(); ++_iter641) + std::map ::const_iterator _iter671; + for (_iter671 = this->metadata.begin(); _iter671 != this->metadata.end(); ++_iter671) { - xfer += oprot->writeI64(_iter641->first); - xfer += oprot->writeBinary(_iter641->second); + xfer += oprot->writeI64(_iter671->first); + xfer += oprot->writeBinary(_iter671->second); } xfer += oprot->writeMapEnd(); } @@ -15334,13 +16072,13 @@ void swap(GetFileMetadataResult &a, GetFileMetadataResult &b) { swap(a.isSupported, b.isSupported); } -GetFileMetadataResult::GetFileMetadataResult(const GetFileMetadataResult& other642) { - metadata = other642.metadata; - isSupported = other642.isSupported; +GetFileMetadataResult::GetFileMetadataResult(const GetFileMetadataResult& other672) { + metadata = other672.metadata; + isSupported = other672.isSupported; } -GetFileMetadataResult& GetFileMetadataResult::operator=(const GetFileMetadataResult& other643) { - metadata = other643.metadata; - isSupported = other643.isSupported; +GetFileMetadataResult& GetFileMetadataResult::operator=(const GetFileMetadataResult& other673) { + metadata = other673.metadata; + isSupported = other673.isSupported; return *this; } void GetFileMetadataResult::printTo(std::ostream& out) const { @@ -15386,14 +16124,14 @@ uint32_t GetFileMetadataRequest::read(::apache::thrift::protocol::TProtocol* ipr if (ftype == ::apache::thrift::protocol::T_LIST) { { this->fileIds.clear(); - uint32_t _size644; - ::apache::thrift::protocol::TType _etype647; - xfer += iprot->readListBegin(_etype647, _size644); - this->fileIds.resize(_size644); - uint32_t _i648; - for (_i648 = 0; _i648 < _size644; ++_i648) + uint32_t _size674; + ::apache::thrift::protocol::TType _etype677; + xfer += iprot->readListBegin(_etype677, _size674); + this->fileIds.resize(_size674); + uint32_t _i678; + for (_i678 = 0; _i678 < _size674; ++_i678) { - xfer += iprot->readI64(this->fileIds[_i648]); + xfer += iprot->readI64(this->fileIds[_i678]); } xfer += iprot->readListEnd(); } @@ -15424,10 +16162,10 @@ uint32_t GetFileMetadataRequest::write(::apache::thrift::protocol::TProtocol* op xfer += oprot->writeFieldBegin("fileIds", ::apache::thrift::protocol::T_LIST, 1); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_I64, static_cast(this->fileIds.size())); - std::vector ::const_iterator _iter649; - for (_iter649 = this->fileIds.begin(); _iter649 != this->fileIds.end(); ++_iter649) + std::vector ::const_iterator _iter679; + for (_iter679 = this->fileIds.begin(); _iter679 != this->fileIds.end(); ++_iter679) { - xfer += oprot->writeI64((*_iter649)); + xfer += oprot->writeI64((*_iter679)); } xfer += oprot->writeListEnd(); } @@ -15443,11 +16181,11 @@ void swap(GetFileMetadataRequest &a, GetFileMetadataRequest &b) { swap(a.fileIds, b.fileIds); } -GetFileMetadataRequest::GetFileMetadataRequest(const GetFileMetadataRequest& other650) { - fileIds = other650.fileIds; +GetFileMetadataRequest::GetFileMetadataRequest(const GetFileMetadataRequest& other680) { + fileIds = other680.fileIds; } -GetFileMetadataRequest& GetFileMetadataRequest::operator=(const GetFileMetadataRequest& other651) { - fileIds = other651.fileIds; +GetFileMetadataRequest& GetFileMetadataRequest::operator=(const GetFileMetadataRequest& other681) { + fileIds = other681.fileIds; return *this; } void GetFileMetadataRequest::printTo(std::ostream& out) const { @@ -15506,11 +16244,11 @@ void swap(PutFileMetadataResult &a, PutFileMetadataResult &b) { (void) b; } -PutFileMetadataResult::PutFileMetadataResult(const PutFileMetadataResult& other652) { - (void) other652; +PutFileMetadataResult::PutFileMetadataResult(const PutFileMetadataResult& other682) { + (void) other682; } -PutFileMetadataResult& PutFileMetadataResult::operator=(const PutFileMetadataResult& other653) { - (void) other653; +PutFileMetadataResult& PutFileMetadataResult::operator=(const PutFileMetadataResult& other683) { + (void) other683; return *this; } void PutFileMetadataResult::printTo(std::ostream& out) const { @@ -15564,14 +16302,14 @@ uint32_t PutFileMetadataRequest::read(::apache::thrift::protocol::TProtocol* ipr if (ftype == ::apache::thrift::protocol::T_LIST) { { this->fileIds.clear(); - uint32_t _size654; - ::apache::thrift::protocol::TType _etype657; - xfer += iprot->readListBegin(_etype657, _size654); - this->fileIds.resize(_size654); - uint32_t _i658; - for (_i658 = 0; _i658 < _size654; ++_i658) + uint32_t _size684; + ::apache::thrift::protocol::TType _etype687; + xfer += iprot->readListBegin(_etype687, _size684); + this->fileIds.resize(_size684); + uint32_t _i688; + for (_i688 = 0; _i688 < _size684; ++_i688) { - xfer += iprot->readI64(this->fileIds[_i658]); + xfer += iprot->readI64(this->fileIds[_i688]); } xfer += iprot->readListEnd(); } @@ -15584,14 +16322,14 @@ uint32_t PutFileMetadataRequest::read(::apache::thrift::protocol::TProtocol* ipr if (ftype == ::apache::thrift::protocol::T_LIST) { { this->metadata.clear(); - uint32_t _size659; - ::apache::thrift::protocol::TType _etype662; - xfer += iprot->readListBegin(_etype662, _size659); - this->metadata.resize(_size659); - uint32_t _i663; - for (_i663 = 0; _i663 < _size659; ++_i663) + uint32_t _size689; + ::apache::thrift::protocol::TType _etype692; + xfer += iprot->readListBegin(_etype692, _size689); + this->metadata.resize(_size689); + uint32_t _i693; + for (_i693 = 0; _i693 < _size689; ++_i693) { - xfer += iprot->readBinary(this->metadata[_i663]); + xfer += iprot->readBinary(this->metadata[_i693]); } xfer += iprot->readListEnd(); } @@ -15602,9 +16340,9 @@ uint32_t PutFileMetadataRequest::read(::apache::thrift::protocol::TProtocol* ipr break; case 3: if (ftype == ::apache::thrift::protocol::T_I32) { - int32_t ecast664; - xfer += iprot->readI32(ecast664); - this->type = (FileMetadataExprType::type)ecast664; + int32_t ecast694; + xfer += iprot->readI32(ecast694); + this->type = (FileMetadataExprType::type)ecast694; this->__isset.type = true; } else { xfer += iprot->skip(ftype); @@ -15634,10 +16372,10 @@ uint32_t PutFileMetadataRequest::write(::apache::thrift::protocol::TProtocol* op xfer += oprot->writeFieldBegin("fileIds", ::apache::thrift::protocol::T_LIST, 1); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_I64, static_cast(this->fileIds.size())); - std::vector ::const_iterator _iter665; - for (_iter665 = this->fileIds.begin(); _iter665 != this->fileIds.end(); ++_iter665) + std::vector ::const_iterator _iter695; + for (_iter695 = this->fileIds.begin(); _iter695 != this->fileIds.end(); ++_iter695) { - xfer += oprot->writeI64((*_iter665)); + xfer += oprot->writeI64((*_iter695)); } xfer += oprot->writeListEnd(); } @@ -15646,10 +16384,10 @@ uint32_t PutFileMetadataRequest::write(::apache::thrift::protocol::TProtocol* op xfer += oprot->writeFieldBegin("metadata", ::apache::thrift::protocol::T_LIST, 2); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->metadata.size())); - std::vector ::const_iterator _iter666; - for (_iter666 = this->metadata.begin(); _iter666 != this->metadata.end(); ++_iter666) + std::vector ::const_iterator _iter696; + for (_iter696 = this->metadata.begin(); _iter696 != this->metadata.end(); ++_iter696) { - xfer += oprot->writeBinary((*_iter666)); + xfer += oprot->writeBinary((*_iter696)); } xfer += oprot->writeListEnd(); } @@ -15673,17 +16411,17 @@ void swap(PutFileMetadataRequest &a, PutFileMetadataRequest &b) { swap(a.__isset, b.__isset); } -PutFileMetadataRequest::PutFileMetadataRequest(const PutFileMetadataRequest& other667) { - fileIds = other667.fileIds; - metadata = other667.metadata; - type = other667.type; - __isset = other667.__isset; +PutFileMetadataRequest::PutFileMetadataRequest(const PutFileMetadataRequest& other697) { + fileIds = other697.fileIds; + metadata = other697.metadata; + type = other697.type; + __isset = other697.__isset; } -PutFileMetadataRequest& PutFileMetadataRequest::operator=(const PutFileMetadataRequest& other668) { - fileIds = other668.fileIds; - metadata = other668.metadata; - type = other668.type; - __isset = other668.__isset; +PutFileMetadataRequest& PutFileMetadataRequest::operator=(const PutFileMetadataRequest& other698) { + fileIds = other698.fileIds; + metadata = other698.metadata; + type = other698.type; + __isset = other698.__isset; return *this; } void PutFileMetadataRequest::printTo(std::ostream& out) const { @@ -15744,11 +16482,11 @@ void swap(ClearFileMetadataResult &a, ClearFileMetadataResult &b) { (void) b; } -ClearFileMetadataResult::ClearFileMetadataResult(const ClearFileMetadataResult& other669) { - (void) other669; +ClearFileMetadataResult::ClearFileMetadataResult(const ClearFileMetadataResult& other699) { + (void) other699; } -ClearFileMetadataResult& ClearFileMetadataResult::operator=(const ClearFileMetadataResult& other670) { - (void) other670; +ClearFileMetadataResult& ClearFileMetadataResult::operator=(const ClearFileMetadataResult& other700) { + (void) other700; return *this; } void ClearFileMetadataResult::printTo(std::ostream& out) const { @@ -15792,14 +16530,14 @@ uint32_t ClearFileMetadataRequest::read(::apache::thrift::protocol::TProtocol* i if (ftype == ::apache::thrift::protocol::T_LIST) { { this->fileIds.clear(); - uint32_t _size671; - ::apache::thrift::protocol::TType _etype674; - xfer += iprot->readListBegin(_etype674, _size671); - this->fileIds.resize(_size671); - uint32_t _i675; - for (_i675 = 0; _i675 < _size671; ++_i675) + uint32_t _size701; + ::apache::thrift::protocol::TType _etype704; + xfer += iprot->readListBegin(_etype704, _size701); + this->fileIds.resize(_size701); + uint32_t _i705; + for (_i705 = 0; _i705 < _size701; ++_i705) { - xfer += iprot->readI64(this->fileIds[_i675]); + xfer += iprot->readI64(this->fileIds[_i705]); } xfer += iprot->readListEnd(); } @@ -15830,10 +16568,10 @@ uint32_t ClearFileMetadataRequest::write(::apache::thrift::protocol::TProtocol* xfer += oprot->writeFieldBegin("fileIds", ::apache::thrift::protocol::T_LIST, 1); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_I64, static_cast(this->fileIds.size())); - std::vector ::const_iterator _iter676; - for (_iter676 = this->fileIds.begin(); _iter676 != this->fileIds.end(); ++_iter676) + std::vector ::const_iterator _iter706; + for (_iter706 = this->fileIds.begin(); _iter706 != this->fileIds.end(); ++_iter706) { - xfer += oprot->writeI64((*_iter676)); + xfer += oprot->writeI64((*_iter706)); } xfer += oprot->writeListEnd(); } @@ -15849,11 +16587,11 @@ void swap(ClearFileMetadataRequest &a, ClearFileMetadataRequest &b) { swap(a.fileIds, b.fileIds); } -ClearFileMetadataRequest::ClearFileMetadataRequest(const ClearFileMetadataRequest& other677) { - fileIds = other677.fileIds; +ClearFileMetadataRequest::ClearFileMetadataRequest(const ClearFileMetadataRequest& other707) { + fileIds = other707.fileIds; } -ClearFileMetadataRequest& ClearFileMetadataRequest::operator=(const ClearFileMetadataRequest& other678) { - fileIds = other678.fileIds; +ClearFileMetadataRequest& ClearFileMetadataRequest::operator=(const ClearFileMetadataRequest& other708) { + fileIds = other708.fileIds; return *this; } void ClearFileMetadataRequest::printTo(std::ostream& out) const { @@ -15935,11 +16673,11 @@ void swap(CacheFileMetadataResult &a, CacheFileMetadataResult &b) { swap(a.isSupported, b.isSupported); } -CacheFileMetadataResult::CacheFileMetadataResult(const CacheFileMetadataResult& other679) { - isSupported = other679.isSupported; +CacheFileMetadataResult::CacheFileMetadataResult(const CacheFileMetadataResult& other709) { + isSupported = other709.isSupported; } -CacheFileMetadataResult& CacheFileMetadataResult::operator=(const CacheFileMetadataResult& other680) { - isSupported = other680.isSupported; +CacheFileMetadataResult& CacheFileMetadataResult::operator=(const CacheFileMetadataResult& other710) { + isSupported = other710.isSupported; return *this; } void CacheFileMetadataResult::printTo(std::ostream& out) const { @@ -16080,19 +16818,19 @@ void swap(CacheFileMetadataRequest &a, CacheFileMetadataRequest &b) { swap(a.__isset, b.__isset); } -CacheFileMetadataRequest::CacheFileMetadataRequest(const CacheFileMetadataRequest& other681) { - dbName = other681.dbName; - tblName = other681.tblName; - partName = other681.partName; - isAllParts = other681.isAllParts; - __isset = other681.__isset; +CacheFileMetadataRequest::CacheFileMetadataRequest(const CacheFileMetadataRequest& other711) { + dbName = other711.dbName; + tblName = other711.tblName; + partName = other711.partName; + isAllParts = other711.isAllParts; + __isset = other711.__isset; } -CacheFileMetadataRequest& CacheFileMetadataRequest::operator=(const CacheFileMetadataRequest& other682) { - dbName = other682.dbName; - tblName = other682.tblName; - partName = other682.partName; - isAllParts = other682.isAllParts; - __isset = other682.__isset; +CacheFileMetadataRequest& CacheFileMetadataRequest::operator=(const CacheFileMetadataRequest& other712) { + dbName = other712.dbName; + tblName = other712.tblName; + partName = other712.partName; + isAllParts = other712.isAllParts; + __isset = other712.__isset; return *this; } void CacheFileMetadataRequest::printTo(std::ostream& out) const { @@ -16140,14 +16878,14 @@ uint32_t GetAllFunctionsResponse::read(::apache::thrift::protocol::TProtocol* ip if (ftype == ::apache::thrift::protocol::T_LIST) { { this->functions.clear(); - uint32_t _size683; - ::apache::thrift::protocol::TType _etype686; - xfer += iprot->readListBegin(_etype686, _size683); - this->functions.resize(_size683); - uint32_t _i687; - for (_i687 = 0; _i687 < _size683; ++_i687) + uint32_t _size713; + ::apache::thrift::protocol::TType _etype716; + xfer += iprot->readListBegin(_etype716, _size713); + this->functions.resize(_size713); + uint32_t _i717; + for (_i717 = 0; _i717 < _size713; ++_i717) { - xfer += this->functions[_i687].read(iprot); + xfer += this->functions[_i717].read(iprot); } xfer += iprot->readListEnd(); } @@ -16177,10 +16915,10 @@ uint32_t GetAllFunctionsResponse::write(::apache::thrift::protocol::TProtocol* o xfer += oprot->writeFieldBegin("functions", ::apache::thrift::protocol::T_LIST, 1); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->functions.size())); - std::vector ::const_iterator _iter688; - for (_iter688 = this->functions.begin(); _iter688 != this->functions.end(); ++_iter688) + std::vector ::const_iterator _iter718; + for (_iter718 = this->functions.begin(); _iter718 != this->functions.end(); ++_iter718) { - xfer += (*_iter688).write(oprot); + xfer += (*_iter718).write(oprot); } xfer += oprot->writeListEnd(); } @@ -16197,13 +16935,13 @@ void swap(GetAllFunctionsResponse &a, GetAllFunctionsResponse &b) { swap(a.__isset, b.__isset); } -GetAllFunctionsResponse::GetAllFunctionsResponse(const GetAllFunctionsResponse& other689) { - functions = other689.functions; - __isset = other689.__isset; +GetAllFunctionsResponse::GetAllFunctionsResponse(const GetAllFunctionsResponse& other719) { + functions = other719.functions; + __isset = other719.__isset; } -GetAllFunctionsResponse& GetAllFunctionsResponse::operator=(const GetAllFunctionsResponse& other690) { - functions = other690.functions; - __isset = other690.__isset; +GetAllFunctionsResponse& GetAllFunctionsResponse::operator=(const GetAllFunctionsResponse& other720) { + functions = other720.functions; + __isset = other720.__isset; return *this; } void GetAllFunctionsResponse::printTo(std::ostream& out) const { @@ -16345,19 +17083,19 @@ void swap(TableMeta &a, TableMeta &b) { swap(a.__isset, b.__isset); } -TableMeta::TableMeta(const TableMeta& other691) { - dbName = other691.dbName; - tableName = other691.tableName; - tableType = other691.tableType; - comments = other691.comments; - __isset = other691.__isset; +TableMeta::TableMeta(const TableMeta& other721) { + dbName = other721.dbName; + tableName = other721.tableName; + tableType = other721.tableType; + comments = other721.comments; + __isset = other721.__isset; } -TableMeta& TableMeta::operator=(const TableMeta& other692) { - dbName = other692.dbName; - tableName = other692.tableName; - tableType = other692.tableType; - comments = other692.comments; - __isset = other692.__isset; +TableMeta& TableMeta::operator=(const TableMeta& other722) { + dbName = other722.dbName; + tableName = other722.tableName; + tableType = other722.tableType; + comments = other722.comments; + __isset = other722.__isset; return *this; } void TableMeta::printTo(std::ostream& out) const { @@ -16440,13 +17178,13 @@ void swap(MetaException &a, MetaException &b) { swap(a.__isset, b.__isset); } -MetaException::MetaException(const MetaException& other693) : TException() { - message = other693.message; - __isset = other693.__isset; +MetaException::MetaException(const MetaException& other723) : TException() { + message = other723.message; + __isset = other723.__isset; } -MetaException& MetaException::operator=(const MetaException& other694) { - message = other694.message; - __isset = other694.__isset; +MetaException& MetaException::operator=(const MetaException& other724) { + message = other724.message; + __isset = other724.__isset; return *this; } void MetaException::printTo(std::ostream& out) const { @@ -16537,13 +17275,13 @@ void swap(UnknownTableException &a, UnknownTableException &b) { swap(a.__isset, b.__isset); } -UnknownTableException::UnknownTableException(const UnknownTableException& other695) : TException() { - message = other695.message; - __isset = other695.__isset; +UnknownTableException::UnknownTableException(const UnknownTableException& other725) : TException() { + message = other725.message; + __isset = other725.__isset; } -UnknownTableException& UnknownTableException::operator=(const UnknownTableException& other696) { - message = other696.message; - __isset = other696.__isset; +UnknownTableException& UnknownTableException::operator=(const UnknownTableException& other726) { + message = other726.message; + __isset = other726.__isset; return *this; } void UnknownTableException::printTo(std::ostream& out) const { @@ -16634,13 +17372,13 @@ void swap(UnknownDBException &a, UnknownDBException &b) { swap(a.__isset, b.__isset); } -UnknownDBException::UnknownDBException(const UnknownDBException& other697) : TException() { - message = other697.message; - __isset = other697.__isset; +UnknownDBException::UnknownDBException(const UnknownDBException& other727) : TException() { + message = other727.message; + __isset = other727.__isset; } -UnknownDBException& UnknownDBException::operator=(const UnknownDBException& other698) { - message = other698.message; - __isset = other698.__isset; +UnknownDBException& UnknownDBException::operator=(const UnknownDBException& other728) { + message = other728.message; + __isset = other728.__isset; return *this; } void UnknownDBException::printTo(std::ostream& out) const { @@ -16731,13 +17469,13 @@ void swap(AlreadyExistsException &a, AlreadyExistsException &b) { swap(a.__isset, b.__isset); } -AlreadyExistsException::AlreadyExistsException(const AlreadyExistsException& other699) : TException() { - message = other699.message; - __isset = other699.__isset; +AlreadyExistsException::AlreadyExistsException(const AlreadyExistsException& other729) : TException() { + message = other729.message; + __isset = other729.__isset; } -AlreadyExistsException& AlreadyExistsException::operator=(const AlreadyExistsException& other700) { - message = other700.message; - __isset = other700.__isset; +AlreadyExistsException& AlreadyExistsException::operator=(const AlreadyExistsException& other730) { + message = other730.message; + __isset = other730.__isset; return *this; } void AlreadyExistsException::printTo(std::ostream& out) const { @@ -16828,13 +17566,13 @@ void swap(InvalidPartitionException &a, InvalidPartitionException &b) { swap(a.__isset, b.__isset); } -InvalidPartitionException::InvalidPartitionException(const InvalidPartitionException& other701) : TException() { - message = other701.message; - __isset = other701.__isset; +InvalidPartitionException::InvalidPartitionException(const InvalidPartitionException& other731) : TException() { + message = other731.message; + __isset = other731.__isset; } -InvalidPartitionException& InvalidPartitionException::operator=(const InvalidPartitionException& other702) { - message = other702.message; - __isset = other702.__isset; +InvalidPartitionException& InvalidPartitionException::operator=(const InvalidPartitionException& other732) { + message = other732.message; + __isset = other732.__isset; return *this; } void InvalidPartitionException::printTo(std::ostream& out) const { @@ -16925,13 +17663,13 @@ void swap(UnknownPartitionException &a, UnknownPartitionException &b) { swap(a.__isset, b.__isset); } -UnknownPartitionException::UnknownPartitionException(const UnknownPartitionException& other703) : TException() { - message = other703.message; - __isset = other703.__isset; +UnknownPartitionException::UnknownPartitionException(const UnknownPartitionException& other733) : TException() { + message = other733.message; + __isset = other733.__isset; } -UnknownPartitionException& UnknownPartitionException::operator=(const UnknownPartitionException& other704) { - message = other704.message; - __isset = other704.__isset; +UnknownPartitionException& UnknownPartitionException::operator=(const UnknownPartitionException& other734) { + message = other734.message; + __isset = other734.__isset; return *this; } void UnknownPartitionException::printTo(std::ostream& out) const { @@ -17022,13 +17760,13 @@ void swap(InvalidObjectException &a, InvalidObjectException &b) { swap(a.__isset, b.__isset); } -InvalidObjectException::InvalidObjectException(const InvalidObjectException& other705) : TException() { - message = other705.message; - __isset = other705.__isset; +InvalidObjectException::InvalidObjectException(const InvalidObjectException& other735) : TException() { + message = other735.message; + __isset = other735.__isset; } -InvalidObjectException& InvalidObjectException::operator=(const InvalidObjectException& other706) { - message = other706.message; - __isset = other706.__isset; +InvalidObjectException& InvalidObjectException::operator=(const InvalidObjectException& other736) { + message = other736.message; + __isset = other736.__isset; return *this; } void InvalidObjectException::printTo(std::ostream& out) const { @@ -17119,13 +17857,13 @@ void swap(NoSuchObjectException &a, NoSuchObjectException &b) { swap(a.__isset, b.__isset); } -NoSuchObjectException::NoSuchObjectException(const NoSuchObjectException& other707) : TException() { - message = other707.message; - __isset = other707.__isset; +NoSuchObjectException::NoSuchObjectException(const NoSuchObjectException& other737) : TException() { + message = other737.message; + __isset = other737.__isset; } -NoSuchObjectException& NoSuchObjectException::operator=(const NoSuchObjectException& other708) { - message = other708.message; - __isset = other708.__isset; +NoSuchObjectException& NoSuchObjectException::operator=(const NoSuchObjectException& other738) { + message = other738.message; + __isset = other738.__isset; return *this; } void NoSuchObjectException::printTo(std::ostream& out) const { @@ -17216,13 +17954,13 @@ void swap(IndexAlreadyExistsException &a, IndexAlreadyExistsException &b) { swap(a.__isset, b.__isset); } -IndexAlreadyExistsException::IndexAlreadyExistsException(const IndexAlreadyExistsException& other709) : TException() { - message = other709.message; - __isset = other709.__isset; +IndexAlreadyExistsException::IndexAlreadyExistsException(const IndexAlreadyExistsException& other739) : TException() { + message = other739.message; + __isset = other739.__isset; } -IndexAlreadyExistsException& IndexAlreadyExistsException::operator=(const IndexAlreadyExistsException& other710) { - message = other710.message; - __isset = other710.__isset; +IndexAlreadyExistsException& IndexAlreadyExistsException::operator=(const IndexAlreadyExistsException& other740) { + message = other740.message; + __isset = other740.__isset; return *this; } void IndexAlreadyExistsException::printTo(std::ostream& out) const { @@ -17313,13 +18051,13 @@ void swap(InvalidOperationException &a, InvalidOperationException &b) { swap(a.__isset, b.__isset); } -InvalidOperationException::InvalidOperationException(const InvalidOperationException& other711) : TException() { - message = other711.message; - __isset = other711.__isset; +InvalidOperationException::InvalidOperationException(const InvalidOperationException& other741) : TException() { + message = other741.message; + __isset = other741.__isset; } -InvalidOperationException& InvalidOperationException::operator=(const InvalidOperationException& other712) { - message = other712.message; - __isset = other712.__isset; +InvalidOperationException& InvalidOperationException::operator=(const InvalidOperationException& other742) { + message = other742.message; + __isset = other742.__isset; return *this; } void InvalidOperationException::printTo(std::ostream& out) const { @@ -17410,13 +18148,13 @@ void swap(ConfigValSecurityException &a, ConfigValSecurityException &b) { swap(a.__isset, b.__isset); } -ConfigValSecurityException::ConfigValSecurityException(const ConfigValSecurityException& other713) : TException() { - message = other713.message; - __isset = other713.__isset; +ConfigValSecurityException::ConfigValSecurityException(const ConfigValSecurityException& other743) : TException() { + message = other743.message; + __isset = other743.__isset; } -ConfigValSecurityException& ConfigValSecurityException::operator=(const ConfigValSecurityException& other714) { - message = other714.message; - __isset = other714.__isset; +ConfigValSecurityException& ConfigValSecurityException::operator=(const ConfigValSecurityException& other744) { + message = other744.message; + __isset = other744.__isset; return *this; } void ConfigValSecurityException::printTo(std::ostream& out) const { @@ -17507,13 +18245,13 @@ void swap(InvalidInputException &a, InvalidInputException &b) { swap(a.__isset, b.__isset); } -InvalidInputException::InvalidInputException(const InvalidInputException& other715) : TException() { - message = other715.message; - __isset = other715.__isset; +InvalidInputException::InvalidInputException(const InvalidInputException& other745) : TException() { + message = other745.message; + __isset = other745.__isset; } -InvalidInputException& InvalidInputException::operator=(const InvalidInputException& other716) { - message = other716.message; - __isset = other716.__isset; +InvalidInputException& InvalidInputException::operator=(const InvalidInputException& other746) { + message = other746.message; + __isset = other746.__isset; return *this; } void InvalidInputException::printTo(std::ostream& out) const { @@ -17604,13 +18342,13 @@ void swap(NoSuchTxnException &a, NoSuchTxnException &b) { swap(a.__isset, b.__isset); } -NoSuchTxnException::NoSuchTxnException(const NoSuchTxnException& other717) : TException() { - message = other717.message; - __isset = other717.__isset; +NoSuchTxnException::NoSuchTxnException(const NoSuchTxnException& other747) : TException() { + message = other747.message; + __isset = other747.__isset; } -NoSuchTxnException& NoSuchTxnException::operator=(const NoSuchTxnException& other718) { - message = other718.message; - __isset = other718.__isset; +NoSuchTxnException& NoSuchTxnException::operator=(const NoSuchTxnException& other748) { + message = other748.message; + __isset = other748.__isset; return *this; } void NoSuchTxnException::printTo(std::ostream& out) const { @@ -17701,13 +18439,13 @@ void swap(TxnAbortedException &a, TxnAbortedException &b) { swap(a.__isset, b.__isset); } -TxnAbortedException::TxnAbortedException(const TxnAbortedException& other719) : TException() { - message = other719.message; - __isset = other719.__isset; +TxnAbortedException::TxnAbortedException(const TxnAbortedException& other749) : TException() { + message = other749.message; + __isset = other749.__isset; } -TxnAbortedException& TxnAbortedException::operator=(const TxnAbortedException& other720) { - message = other720.message; - __isset = other720.__isset; +TxnAbortedException& TxnAbortedException::operator=(const TxnAbortedException& other750) { + message = other750.message; + __isset = other750.__isset; return *this; } void TxnAbortedException::printTo(std::ostream& out) const { @@ -17798,13 +18536,13 @@ void swap(TxnOpenException &a, TxnOpenException &b) { swap(a.__isset, b.__isset); } -TxnOpenException::TxnOpenException(const TxnOpenException& other721) : TException() { - message = other721.message; - __isset = other721.__isset; +TxnOpenException::TxnOpenException(const TxnOpenException& other751) : TException() { + message = other751.message; + __isset = other751.__isset; } -TxnOpenException& TxnOpenException::operator=(const TxnOpenException& other722) { - message = other722.message; - __isset = other722.__isset; +TxnOpenException& TxnOpenException::operator=(const TxnOpenException& other752) { + message = other752.message; + __isset = other752.__isset; return *this; } void TxnOpenException::printTo(std::ostream& out) const { @@ -17895,13 +18633,13 @@ void swap(NoSuchLockException &a, NoSuchLockException &b) { swap(a.__isset, b.__isset); } -NoSuchLockException::NoSuchLockException(const NoSuchLockException& other723) : TException() { - message = other723.message; - __isset = other723.__isset; +NoSuchLockException::NoSuchLockException(const NoSuchLockException& other753) : TException() { + message = other753.message; + __isset = other753.__isset; } -NoSuchLockException& NoSuchLockException::operator=(const NoSuchLockException& other724) { - message = other724.message; - __isset = other724.__isset; +NoSuchLockException& NoSuchLockException::operator=(const NoSuchLockException& other754) { + message = other754.message; + __isset = other754.__isset; return *this; } void NoSuchLockException::printTo(std::ostream& out) const { diff --git a/metastore/src/gen/thrift/gen-cpp/hive_metastore_types.h b/metastore/src/gen/thrift/gen-cpp/hive_metastore_types.h index 97c07a5..5ebc2e5 100644 --- a/metastore/src/gen/thrift/gen-cpp/hive_metastore_types.h +++ b/metastore/src/gen/thrift/gen-cpp/hive_metastore_types.h @@ -149,6 +149,12 @@ class Version; class FieldSchema; +class ForeignKeyRel; + +class SQLPrimaryKey; + +class SQLForeignKey; + class Type; class HiveObjectRef; @@ -501,6 +507,252 @@ inline std::ostream& operator<<(std::ostream& out, const FieldSchema& obj) return out; } +typedef struct _ForeignKeyRel__isset { + _ForeignKeyRel__isset() : foreignCols(false), parentCols(false), foreignKeyName(false), parentTableName(false), parentTableSchema(false) {} + bool foreignCols :1; + bool parentCols :1; + bool foreignKeyName :1; + bool parentTableName :1; + bool parentTableSchema :1; +} _ForeignKeyRel__isset; + +class ForeignKeyRel { + public: + + ForeignKeyRel(const ForeignKeyRel&); + ForeignKeyRel& operator=(const ForeignKeyRel&); + ForeignKeyRel() : foreignKeyName(), parentTableName(), parentTableSchema() { + } + + virtual ~ForeignKeyRel() throw(); + std::vector foreignCols; + std::vector parentCols; + std::string foreignKeyName; + std::string parentTableName; + std::string parentTableSchema; + + _ForeignKeyRel__isset __isset; + + void __set_foreignCols(const std::vector & val); + + void __set_parentCols(const std::vector & val); + + void __set_foreignKeyName(const std::string& val); + + void __set_parentTableName(const std::string& val); + + void __set_parentTableSchema(const std::string& val); + + bool operator == (const ForeignKeyRel & rhs) const + { + if (!(foreignCols == rhs.foreignCols)) + return false; + if (!(parentCols == rhs.parentCols)) + return false; + if (!(foreignKeyName == rhs.foreignKeyName)) + return false; + if (!(parentTableName == rhs.parentTableName)) + return false; + if (!(parentTableSchema == rhs.parentTableSchema)) + return false; + return true; + } + bool operator != (const ForeignKeyRel &rhs) const { + return !(*this == rhs); + } + + bool operator < (const ForeignKeyRel & ) const; + + uint32_t read(::apache::thrift::protocol::TProtocol* iprot); + uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const; + + virtual void printTo(std::ostream& out) const; +}; + +void swap(ForeignKeyRel &a, ForeignKeyRel &b); + +inline std::ostream& operator<<(std::ostream& out, const ForeignKeyRel& obj) +{ + obj.printTo(out); + return out; +} + +typedef struct _SQLPrimaryKey__isset { + _SQLPrimaryKey__isset() : table_schem(false), table_name(false), column_name(false), key_seq(false), pk_name(false) {} + bool table_schem :1; + bool table_name :1; + bool column_name :1; + bool key_seq :1; + bool pk_name :1; +} _SQLPrimaryKey__isset; + +class SQLPrimaryKey { + public: + + SQLPrimaryKey(const SQLPrimaryKey&); + SQLPrimaryKey& operator=(const SQLPrimaryKey&); + SQLPrimaryKey() : table_schem(), table_name(), column_name(), key_seq(0), pk_name() { + } + + virtual ~SQLPrimaryKey() throw(); + std::string table_schem; + std::string table_name; + std::string column_name; + int32_t key_seq; + std::string pk_name; + + _SQLPrimaryKey__isset __isset; + + void __set_table_schem(const std::string& val); + + void __set_table_name(const std::string& val); + + void __set_column_name(const std::string& val); + + void __set_key_seq(const int32_t val); + + void __set_pk_name(const std::string& val); + + bool operator == (const SQLPrimaryKey & rhs) const + { + if (!(table_schem == rhs.table_schem)) + return false; + if (!(table_name == rhs.table_name)) + return false; + if (!(column_name == rhs.column_name)) + return false; + if (!(key_seq == rhs.key_seq)) + return false; + if (!(pk_name == rhs.pk_name)) + return false; + return true; + } + bool operator != (const SQLPrimaryKey &rhs) const { + return !(*this == rhs); + } + + bool operator < (const SQLPrimaryKey & ) const; + + uint32_t read(::apache::thrift::protocol::TProtocol* iprot); + uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const; + + virtual void printTo(std::ostream& out) const; +}; + +void swap(SQLPrimaryKey &a, SQLPrimaryKey &b); + +inline std::ostream& operator<<(std::ostream& out, const SQLPrimaryKey& obj) +{ + obj.printTo(out); + return out; +} + +typedef struct _SQLForeignKey__isset { + _SQLForeignKey__isset() : pktable_schem(false), pktable_name(false), pkcolumn_name(false), fktable_schem(false), fktable_name(false), fkcolumn_name(false), key_seq(false), update_rule(false), delete_rule(false), fk_name(false), pk_name(false) {} + bool pktable_schem :1; + bool pktable_name :1; + bool pkcolumn_name :1; + bool fktable_schem :1; + bool fktable_name :1; + bool fkcolumn_name :1; + bool key_seq :1; + bool update_rule :1; + bool delete_rule :1; + bool fk_name :1; + bool pk_name :1; +} _SQLForeignKey__isset; + +class SQLForeignKey { + public: + + SQLForeignKey(const SQLForeignKey&); + SQLForeignKey& operator=(const SQLForeignKey&); + SQLForeignKey() : pktable_schem(), pktable_name(), pkcolumn_name(), fktable_schem(), fktable_name(), fkcolumn_name(), key_seq(0), update_rule(0), delete_rule(0), fk_name(), pk_name() { + } + + virtual ~SQLForeignKey() throw(); + std::string pktable_schem; + std::string pktable_name; + std::string pkcolumn_name; + std::string fktable_schem; + std::string fktable_name; + std::string fkcolumn_name; + int32_t key_seq; + int32_t update_rule; + int32_t delete_rule; + std::string fk_name; + std::string pk_name; + + _SQLForeignKey__isset __isset; + + void __set_pktable_schem(const std::string& val); + + void __set_pktable_name(const std::string& val); + + void __set_pkcolumn_name(const std::string& val); + + void __set_fktable_schem(const std::string& val); + + void __set_fktable_name(const std::string& val); + + void __set_fkcolumn_name(const std::string& val); + + void __set_key_seq(const int32_t val); + + void __set_update_rule(const int32_t val); + + void __set_delete_rule(const int32_t val); + + void __set_fk_name(const std::string& val); + + void __set_pk_name(const std::string& val); + + bool operator == (const SQLForeignKey & rhs) const + { + if (!(pktable_schem == rhs.pktable_schem)) + return false; + if (!(pktable_name == rhs.pktable_name)) + return false; + if (!(pkcolumn_name == rhs.pkcolumn_name)) + return false; + if (!(fktable_schem == rhs.fktable_schem)) + return false; + if (!(fktable_name == rhs.fktable_name)) + return false; + if (!(fkcolumn_name == rhs.fkcolumn_name)) + return false; + if (!(key_seq == rhs.key_seq)) + return false; + if (!(update_rule == rhs.update_rule)) + return false; + if (!(delete_rule == rhs.delete_rule)) + return false; + if (!(fk_name == rhs.fk_name)) + return false; + if (!(pk_name == rhs.pk_name)) + return false; + return true; + } + bool operator != (const SQLForeignKey &rhs) const { + return !(*this == rhs); + } + + bool operator < (const SQLForeignKey & ) const; + + uint32_t read(::apache::thrift::protocol::TProtocol* iprot); + uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const; + + virtual void printTo(std::ostream& out) const; +}; + +void swap(SQLForeignKey &a, SQLForeignKey &b); + +inline std::ostream& operator<<(std::ostream& out, const SQLForeignKey& obj) +{ + obj.printTo(out); + return out; +} + typedef struct _Type__isset { _Type__isset() : name(false), type1(false), type2(false), fields(false) {} bool name :1; @@ -1801,7 +2053,7 @@ inline std::ostream& operator<<(std::ostream& out, const StorageDescriptor& obj) } typedef struct _Table__isset { - _Table__isset() : tableName(false), dbName(false), owner(false), createTime(false), lastAccessTime(false), retention(false), sd(false), partitionKeys(false), parameters(false), viewOriginalText(false), viewExpandedText(false), tableType(false), privileges(false), temporary(true) {} + _Table__isset() : tableName(false), dbName(false), owner(false), createTime(false), lastAccessTime(false), retention(false), sd(false), partitionKeys(false), parameters(false), viewOriginalText(false), viewExpandedText(false), tableType(false), privileges(false), temporary(true), primaryKeys(false), foreignKeys(false) {} bool tableName :1; bool dbName :1; bool owner :1; @@ -1816,6 +2068,8 @@ typedef struct _Table__isset { bool tableType :1; bool privileges :1; bool temporary :1; + bool primaryKeys :1; + bool foreignKeys :1; } _Table__isset; class Table { @@ -1841,6 +2095,8 @@ class Table { std::string tableType; PrincipalPrivilegeSet privileges; bool temporary; + std::vector primaryKeys; + std::vector foreignKeys; _Table__isset __isset; @@ -1872,6 +2128,10 @@ class Table { void __set_temporary(const bool val); + void __set_primaryKeys(const std::vector & val); + + void __set_foreignKeys(const std::vector & val); + bool operator == (const Table & rhs) const { if (!(tableName == rhs.tableName)) @@ -1906,6 +2166,10 @@ class Table { return false; else if (__isset.temporary && !(temporary == rhs.temporary)) return false; + if (!(primaryKeys == rhs.primaryKeys)) + return false; + if (!(foreignKeys == rhs.foreignKeys)) + return false; return true; } bool operator != (const Table &rhs) const { diff --git a/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/AddDynamicPartitions.java b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/AddDynamicPartitions.java index bb6e584..bd6ddd0 100644 --- a/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/AddDynamicPartitions.java +++ b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/AddDynamicPartitions.java @@ -630,13 +630,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, AddDynamicPartition case 4: // PARTITIONNAMES if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list492 = iprot.readListBegin(); - struct.partitionnames = new ArrayList(_list492.size); - String _elem493; - for (int _i494 = 0; _i494 < _list492.size; ++_i494) + org.apache.thrift.protocol.TList _list524 = iprot.readListBegin(); + struct.partitionnames = new ArrayList(_list524.size); + String _elem525; + for (int _i526 = 0; _i526 < _list524.size; ++_i526) { - _elem493 = iprot.readString(); - struct.partitionnames.add(_elem493); + _elem525 = iprot.readString(); + struct.partitionnames.add(_elem525); } iprot.readListEnd(); } @@ -675,9 +675,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, AddDynamicPartitio oprot.writeFieldBegin(PARTITIONNAMES_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.partitionnames.size())); - for (String _iter495 : struct.partitionnames) + for (String _iter527 : struct.partitionnames) { - oprot.writeString(_iter495); + oprot.writeString(_iter527); } oprot.writeListEnd(); } @@ -705,9 +705,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, AddDynamicPartition oprot.writeString(struct.tablename); { oprot.writeI32(struct.partitionnames.size()); - for (String _iter496 : struct.partitionnames) + for (String _iter528 : struct.partitionnames) { - oprot.writeString(_iter496); + oprot.writeString(_iter528); } } } @@ -722,13 +722,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, AddDynamicPartitions struct.tablename = iprot.readString(); struct.setTablenameIsSet(true); { - org.apache.thrift.protocol.TList _list497 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.partitionnames = new ArrayList(_list497.size); - String _elem498; - for (int _i499 = 0; _i499 < _list497.size; ++_i499) + org.apache.thrift.protocol.TList _list529 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.partitionnames = new ArrayList(_list529.size); + String _elem530; + for (int _i531 = 0; _i531 < _list529.size; ++_i531) { - _elem498 = iprot.readString(); - struct.partitionnames.add(_elem498); + _elem530 = iprot.readString(); + struct.partitionnames.add(_elem530); } } struct.setPartitionnamesIsSet(true); diff --git a/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/AddPartitionsRequest.java b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/AddPartitionsRequest.java index 083d340..da23f72 100644 --- a/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/AddPartitionsRequest.java +++ b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/AddPartitionsRequest.java @@ -704,14 +704,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, AddPartitionsReques case 3: // PARTS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list388 = iprot.readListBegin(); - struct.parts = new ArrayList(_list388.size); - Partition _elem389; - for (int _i390 = 0; _i390 < _list388.size; ++_i390) + org.apache.thrift.protocol.TList _list420 = iprot.readListBegin(); + struct.parts = new ArrayList(_list420.size); + Partition _elem421; + for (int _i422 = 0; _i422 < _list420.size; ++_i422) { - _elem389 = new Partition(); - _elem389.read(iprot); - struct.parts.add(_elem389); + _elem421 = new Partition(); + _elem421.read(iprot); + struct.parts.add(_elem421); } iprot.readListEnd(); } @@ -763,9 +763,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, AddPartitionsReque oprot.writeFieldBegin(PARTS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.parts.size())); - for (Partition _iter391 : struct.parts) + for (Partition _iter423 : struct.parts) { - _iter391.write(oprot); + _iter423.write(oprot); } oprot.writeListEnd(); } @@ -800,9 +800,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, AddPartitionsReques oprot.writeString(struct.tblName); { oprot.writeI32(struct.parts.size()); - for (Partition _iter392 : struct.parts) + for (Partition _iter424 : struct.parts) { - _iter392.write(oprot); + _iter424.write(oprot); } } oprot.writeBool(struct.ifNotExists); @@ -824,14 +824,14 @@ public void read(org.apache.thrift.protocol.TProtocol prot, AddPartitionsRequest struct.tblName = iprot.readString(); struct.setTblNameIsSet(true); { - org.apache.thrift.protocol.TList _list393 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.parts = new ArrayList(_list393.size); - Partition _elem394; - for (int _i395 = 0; _i395 < _list393.size; ++_i395) + org.apache.thrift.protocol.TList _list425 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.parts = new ArrayList(_list425.size); + Partition _elem426; + for (int _i427 = 0; _i427 < _list425.size; ++_i427) { - _elem394 = new Partition(); - _elem394.read(iprot); - struct.parts.add(_elem394); + _elem426 = new Partition(); + _elem426.read(iprot); + struct.parts.add(_elem426); } } struct.setPartsIsSet(true); diff --git a/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/AddPartitionsResult.java b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/AddPartitionsResult.java index 9004457..bfd483e 100644 --- a/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/AddPartitionsResult.java +++ b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/AddPartitionsResult.java @@ -346,14 +346,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, AddPartitionsResult case 1: // PARTITIONS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list380 = iprot.readListBegin(); - struct.partitions = new ArrayList(_list380.size); - Partition _elem381; - for (int _i382 = 0; _i382 < _list380.size; ++_i382) + org.apache.thrift.protocol.TList _list412 = iprot.readListBegin(); + struct.partitions = new ArrayList(_list412.size); + Partition _elem413; + for (int _i414 = 0; _i414 < _list412.size; ++_i414) { - _elem381 = new Partition(); - _elem381.read(iprot); - struct.partitions.add(_elem381); + _elem413 = new Partition(); + _elem413.read(iprot); + struct.partitions.add(_elem413); } iprot.readListEnd(); } @@ -380,9 +380,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, AddPartitionsResul oprot.writeFieldBegin(PARTITIONS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.partitions.size())); - for (Partition _iter383 : struct.partitions) + for (Partition _iter415 : struct.partitions) { - _iter383.write(oprot); + _iter415.write(oprot); } oprot.writeListEnd(); } @@ -414,9 +414,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, AddPartitionsResult if (struct.isSetPartitions()) { { oprot.writeI32(struct.partitions.size()); - for (Partition _iter384 : struct.partitions) + for (Partition _iter416 : struct.partitions) { - _iter384.write(oprot); + _iter416.write(oprot); } } } @@ -428,14 +428,14 @@ public void read(org.apache.thrift.protocol.TProtocol prot, AddPartitionsResult BitSet incoming = iprot.readBitSet(1); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list385 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.partitions = new ArrayList(_list385.size); - Partition _elem386; - for (int _i387 = 0; _i387 < _list385.size; ++_i387) + org.apache.thrift.protocol.TList _list417 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.partitions = new ArrayList(_list417.size); + Partition _elem418; + for (int _i419 = 0; _i419 < _list417.size; ++_i419) { - _elem386 = new Partition(); - _elem386.read(iprot); - struct.partitions.add(_elem386); + _elem418 = new Partition(); + _elem418.read(iprot); + struct.partitions.add(_elem418); } } struct.setPartitionsIsSet(true); diff --git a/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/AggrStats.java b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/AggrStats.java index bfd1206..1ef52cf 100644 --- a/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/AggrStats.java +++ b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/AggrStats.java @@ -439,14 +439,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, AggrStats struct) t case 1: // COL_STATS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list278 = iprot.readListBegin(); - struct.colStats = new ArrayList(_list278.size); - ColumnStatisticsObj _elem279; - for (int _i280 = 0; _i280 < _list278.size; ++_i280) + org.apache.thrift.protocol.TList _list310 = iprot.readListBegin(); + struct.colStats = new ArrayList(_list310.size); + ColumnStatisticsObj _elem311; + for (int _i312 = 0; _i312 < _list310.size; ++_i312) { - _elem279 = new ColumnStatisticsObj(); - _elem279.read(iprot); - struct.colStats.add(_elem279); + _elem311 = new ColumnStatisticsObj(); + _elem311.read(iprot); + struct.colStats.add(_elem311); } iprot.readListEnd(); } @@ -480,9 +480,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, AggrStats struct) oprot.writeFieldBegin(COL_STATS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.colStats.size())); - for (ColumnStatisticsObj _iter281 : struct.colStats) + for (ColumnStatisticsObj _iter313 : struct.colStats) { - _iter281.write(oprot); + _iter313.write(oprot); } oprot.writeListEnd(); } @@ -510,9 +510,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, AggrStats struct) t TTupleProtocol oprot = (TTupleProtocol) prot; { oprot.writeI32(struct.colStats.size()); - for (ColumnStatisticsObj _iter282 : struct.colStats) + for (ColumnStatisticsObj _iter314 : struct.colStats) { - _iter282.write(oprot); + _iter314.write(oprot); } } oprot.writeI64(struct.partsFound); @@ -522,14 +522,14 @@ public void write(org.apache.thrift.protocol.TProtocol prot, AggrStats struct) t public void read(org.apache.thrift.protocol.TProtocol prot, AggrStats struct) throws org.apache.thrift.TException { TTupleProtocol iprot = (TTupleProtocol) prot; { - org.apache.thrift.protocol.TList _list283 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.colStats = new ArrayList(_list283.size); - ColumnStatisticsObj _elem284; - for (int _i285 = 0; _i285 < _list283.size; ++_i285) + org.apache.thrift.protocol.TList _list315 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.colStats = new ArrayList(_list315.size); + ColumnStatisticsObj _elem316; + for (int _i317 = 0; _i317 < _list315.size; ++_i317) { - _elem284 = new ColumnStatisticsObj(); - _elem284.read(iprot); - struct.colStats.add(_elem284); + _elem316 = new ColumnStatisticsObj(); + _elem316.read(iprot); + struct.colStats.add(_elem316); } } struct.setColStatsIsSet(true); diff --git a/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ClearFileMetadataRequest.java b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ClearFileMetadataRequest.java index 657bb7b..f594d47 100644 --- a/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ClearFileMetadataRequest.java +++ b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ClearFileMetadataRequest.java @@ -351,13 +351,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, ClearFileMetadataRe case 1: // FILE_IDS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list576 = iprot.readListBegin(); - struct.fileIds = new ArrayList(_list576.size); - long _elem577; - for (int _i578 = 0; _i578 < _list576.size; ++_i578) + org.apache.thrift.protocol.TList _list608 = iprot.readListBegin(); + struct.fileIds = new ArrayList(_list608.size); + long _elem609; + for (int _i610 = 0; _i610 < _list608.size; ++_i610) { - _elem577 = iprot.readI64(); - struct.fileIds.add(_elem577); + _elem609 = iprot.readI64(); + struct.fileIds.add(_elem609); } iprot.readListEnd(); } @@ -383,9 +383,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, ClearFileMetadataR oprot.writeFieldBegin(FILE_IDS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.I64, struct.fileIds.size())); - for (long _iter579 : struct.fileIds) + for (long _iter611 : struct.fileIds) { - oprot.writeI64(_iter579); + oprot.writeI64(_iter611); } oprot.writeListEnd(); } @@ -410,9 +410,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, ClearFileMetadataRe TTupleProtocol oprot = (TTupleProtocol) prot; { oprot.writeI32(struct.fileIds.size()); - for (long _iter580 : struct.fileIds) + for (long _iter612 : struct.fileIds) { - oprot.writeI64(_iter580); + oprot.writeI64(_iter612); } } } @@ -421,13 +421,13 @@ public void write(org.apache.thrift.protocol.TProtocol prot, ClearFileMetadataRe public void read(org.apache.thrift.protocol.TProtocol prot, ClearFileMetadataRequest struct) throws org.apache.thrift.TException { TTupleProtocol iprot = (TTupleProtocol) prot; { - org.apache.thrift.protocol.TList _list581 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.I64, iprot.readI32()); - struct.fileIds = new ArrayList(_list581.size); - long _elem582; - for (int _i583 = 0; _i583 < _list581.size; ++_i583) + org.apache.thrift.protocol.TList _list613 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.I64, iprot.readI32()); + struct.fileIds = new ArrayList(_list613.size); + long _elem614; + for (int _i615 = 0; _i615 < _list613.size; ++_i615) { - _elem582 = iprot.readI64(); - struct.fileIds.add(_elem582); + _elem614 = iprot.readI64(); + struct.fileIds.add(_elem614); } } struct.setFileIdsIsSet(true); diff --git a/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ColumnStatistics.java b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ColumnStatistics.java index ba059fe..3c54fb0 100644 --- a/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ColumnStatistics.java +++ b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ColumnStatistics.java @@ -451,14 +451,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, ColumnStatistics st case 2: // STATS_OBJ if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list270 = iprot.readListBegin(); - struct.statsObj = new ArrayList(_list270.size); - ColumnStatisticsObj _elem271; - for (int _i272 = 0; _i272 < _list270.size; ++_i272) + org.apache.thrift.protocol.TList _list302 = iprot.readListBegin(); + struct.statsObj = new ArrayList(_list302.size); + ColumnStatisticsObj _elem303; + for (int _i304 = 0; _i304 < _list302.size; ++_i304) { - _elem271 = new ColumnStatisticsObj(); - _elem271.read(iprot); - struct.statsObj.add(_elem271); + _elem303 = new ColumnStatisticsObj(); + _elem303.read(iprot); + struct.statsObj.add(_elem303); } iprot.readListEnd(); } @@ -489,9 +489,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, ColumnStatistics s oprot.writeFieldBegin(STATS_OBJ_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.statsObj.size())); - for (ColumnStatisticsObj _iter273 : struct.statsObj) + for (ColumnStatisticsObj _iter305 : struct.statsObj) { - _iter273.write(oprot); + _iter305.write(oprot); } oprot.writeListEnd(); } @@ -517,9 +517,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, ColumnStatistics st struct.statsDesc.write(oprot); { oprot.writeI32(struct.statsObj.size()); - for (ColumnStatisticsObj _iter274 : struct.statsObj) + for (ColumnStatisticsObj _iter306 : struct.statsObj) { - _iter274.write(oprot); + _iter306.write(oprot); } } } @@ -531,14 +531,14 @@ public void read(org.apache.thrift.protocol.TProtocol prot, ColumnStatistics str struct.statsDesc.read(iprot); struct.setStatsDescIsSet(true); { - org.apache.thrift.protocol.TList _list275 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.statsObj = new ArrayList(_list275.size); - ColumnStatisticsObj _elem276; - for (int _i277 = 0; _i277 < _list275.size; ++_i277) + org.apache.thrift.protocol.TList _list307 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.statsObj = new ArrayList(_list307.size); + ColumnStatisticsObj _elem308; + for (int _i309 = 0; _i309 < _list307.size; ++_i309) { - _elem276 = new ColumnStatisticsObj(); - _elem276.read(iprot); - struct.statsObj.add(_elem276); + _elem308 = new ColumnStatisticsObj(); + _elem308.read(iprot); + struct.statsObj.add(_elem308); } } struct.setStatsObjIsSet(true); diff --git a/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/Database.java b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/Database.java index 2769845..b912cb6 100644 --- a/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/Database.java +++ b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/Database.java @@ -870,15 +870,15 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, Database struct) th case 4: // PARAMETERS if (schemeField.type == org.apache.thrift.protocol.TType.MAP) { { - org.apache.thrift.protocol.TMap _map94 = iprot.readMapBegin(); - struct.parameters = new HashMap(2*_map94.size); - String _key95; - String _val96; - for (int _i97 = 0; _i97 < _map94.size; ++_i97) + org.apache.thrift.protocol.TMap _map110 = iprot.readMapBegin(); + struct.parameters = new HashMap(2*_map110.size); + String _key111; + String _val112; + for (int _i113 = 0; _i113 < _map110.size; ++_i113) { - _key95 = iprot.readString(); - _val96 = iprot.readString(); - struct.parameters.put(_key95, _val96); + _key111 = iprot.readString(); + _val112 = iprot.readString(); + struct.parameters.put(_key111, _val112); } iprot.readMapEnd(); } @@ -944,10 +944,10 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, Database struct) t oprot.writeFieldBegin(PARAMETERS_FIELD_DESC); { oprot.writeMapBegin(new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING, struct.parameters.size())); - for (Map.Entry _iter98 : struct.parameters.entrySet()) + for (Map.Entry _iter114 : struct.parameters.entrySet()) { - oprot.writeString(_iter98.getKey()); - oprot.writeString(_iter98.getValue()); + oprot.writeString(_iter114.getKey()); + oprot.writeString(_iter114.getValue()); } oprot.writeMapEnd(); } @@ -1026,10 +1026,10 @@ public void write(org.apache.thrift.protocol.TProtocol prot, Database struct) th if (struct.isSetParameters()) { { oprot.writeI32(struct.parameters.size()); - for (Map.Entry _iter99 : struct.parameters.entrySet()) + for (Map.Entry _iter115 : struct.parameters.entrySet()) { - oprot.writeString(_iter99.getKey()); - oprot.writeString(_iter99.getValue()); + oprot.writeString(_iter115.getKey()); + oprot.writeString(_iter115.getValue()); } } } @@ -1062,15 +1062,15 @@ public void read(org.apache.thrift.protocol.TProtocol prot, Database struct) thr } if (incoming.get(3)) { { - org.apache.thrift.protocol.TMap _map100 = new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.parameters = new HashMap(2*_map100.size); - String _key101; - String _val102; - for (int _i103 = 0; _i103 < _map100.size; ++_i103) + org.apache.thrift.protocol.TMap _map116 = new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.parameters = new HashMap(2*_map116.size); + String _key117; + String _val118; + for (int _i119 = 0; _i119 < _map116.size; ++_i119) { - _key101 = iprot.readString(); - _val102 = iprot.readString(); - struct.parameters.put(_key101, _val102); + _key117 = iprot.readString(); + _val118 = iprot.readString(); + struct.parameters.put(_key117, _val118); } } struct.setParametersIsSet(true); diff --git a/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/DropPartitionsResult.java b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/DropPartitionsResult.java index adef415..96cfbd2 100644 --- a/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/DropPartitionsResult.java +++ b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/DropPartitionsResult.java @@ -346,14 +346,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, DropPartitionsResul case 1: // PARTITIONS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list396 = iprot.readListBegin(); - struct.partitions = new ArrayList(_list396.size); - Partition _elem397; - for (int _i398 = 0; _i398 < _list396.size; ++_i398) + org.apache.thrift.protocol.TList _list428 = iprot.readListBegin(); + struct.partitions = new ArrayList(_list428.size); + Partition _elem429; + for (int _i430 = 0; _i430 < _list428.size; ++_i430) { - _elem397 = new Partition(); - _elem397.read(iprot); - struct.partitions.add(_elem397); + _elem429 = new Partition(); + _elem429.read(iprot); + struct.partitions.add(_elem429); } iprot.readListEnd(); } @@ -380,9 +380,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, DropPartitionsResu oprot.writeFieldBegin(PARTITIONS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.partitions.size())); - for (Partition _iter399 : struct.partitions) + for (Partition _iter431 : struct.partitions) { - _iter399.write(oprot); + _iter431.write(oprot); } oprot.writeListEnd(); } @@ -414,9 +414,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, DropPartitionsResul if (struct.isSetPartitions()) { { oprot.writeI32(struct.partitions.size()); - for (Partition _iter400 : struct.partitions) + for (Partition _iter432 : struct.partitions) { - _iter400.write(oprot); + _iter432.write(oprot); } } } @@ -428,14 +428,14 @@ public void read(org.apache.thrift.protocol.TProtocol prot, DropPartitionsResult BitSet incoming = iprot.readBitSet(1); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list401 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.partitions = new ArrayList(_list401.size); - Partition _elem402; - for (int _i403 = 0; _i403 < _list401.size; ++_i403) + org.apache.thrift.protocol.TList _list433 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.partitions = new ArrayList(_list433.size); + Partition _elem434; + for (int _i435 = 0; _i435 < _list433.size; ++_i435) { - _elem402 = new Partition(); - _elem402.read(iprot); - struct.partitions.add(_elem402); + _elem434 = new Partition(); + _elem434.read(iprot); + struct.partitions.add(_elem434); } } struct.setPartitionsIsSet(true); diff --git a/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/EnvironmentContext.java b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/EnvironmentContext.java index de588e2..7281ebd 100644 --- a/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/EnvironmentContext.java +++ b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/EnvironmentContext.java @@ -344,15 +344,15 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, EnvironmentContext case 1: // PROPERTIES if (schemeField.type == org.apache.thrift.protocol.TType.MAP) { { - org.apache.thrift.protocol.TMap _map312 = iprot.readMapBegin(); - struct.properties = new HashMap(2*_map312.size); - String _key313; - String _val314; - for (int _i315 = 0; _i315 < _map312.size; ++_i315) + org.apache.thrift.protocol.TMap _map344 = iprot.readMapBegin(); + struct.properties = new HashMap(2*_map344.size); + String _key345; + String _val346; + for (int _i347 = 0; _i347 < _map344.size; ++_i347) { - _key313 = iprot.readString(); - _val314 = iprot.readString(); - struct.properties.put(_key313, _val314); + _key345 = iprot.readString(); + _val346 = iprot.readString(); + struct.properties.put(_key345, _val346); } iprot.readMapEnd(); } @@ -378,10 +378,10 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, EnvironmentContext oprot.writeFieldBegin(PROPERTIES_FIELD_DESC); { oprot.writeMapBegin(new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING, struct.properties.size())); - for (Map.Entry _iter316 : struct.properties.entrySet()) + for (Map.Entry _iter348 : struct.properties.entrySet()) { - oprot.writeString(_iter316.getKey()); - oprot.writeString(_iter316.getValue()); + oprot.writeString(_iter348.getKey()); + oprot.writeString(_iter348.getValue()); } oprot.writeMapEnd(); } @@ -412,10 +412,10 @@ public void write(org.apache.thrift.protocol.TProtocol prot, EnvironmentContext if (struct.isSetProperties()) { { oprot.writeI32(struct.properties.size()); - for (Map.Entry _iter317 : struct.properties.entrySet()) + for (Map.Entry _iter349 : struct.properties.entrySet()) { - oprot.writeString(_iter317.getKey()); - oprot.writeString(_iter317.getValue()); + oprot.writeString(_iter349.getKey()); + oprot.writeString(_iter349.getValue()); } } } @@ -427,15 +427,15 @@ public void read(org.apache.thrift.protocol.TProtocol prot, EnvironmentContext s BitSet incoming = iprot.readBitSet(1); if (incoming.get(0)) { { - org.apache.thrift.protocol.TMap _map318 = new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.properties = new HashMap(2*_map318.size); - String _key319; - String _val320; - for (int _i321 = 0; _i321 < _map318.size; ++_i321) + org.apache.thrift.protocol.TMap _map350 = new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.properties = new HashMap(2*_map350.size); + String _key351; + String _val352; + for (int _i353 = 0; _i353 < _map350.size; ++_i353) { - _key319 = iprot.readString(); - _val320 = iprot.readString(); - struct.properties.put(_key319, _val320); + _key351 = iprot.readString(); + _val352 = iprot.readString(); + struct.properties.put(_key351, _val352); } } struct.setPropertiesIsSet(true); diff --git a/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/FireEventRequest.java b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/FireEventRequest.java index 6b08234..eb5a8e6 100644 --- a/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/FireEventRequest.java +++ b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/FireEventRequest.java @@ -713,13 +713,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, FireEventRequest st case 5: // PARTITION_VALS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list516 = iprot.readListBegin(); - struct.partitionVals = new ArrayList(_list516.size); - String _elem517; - for (int _i518 = 0; _i518 < _list516.size; ++_i518) + org.apache.thrift.protocol.TList _list548 = iprot.readListBegin(); + struct.partitionVals = new ArrayList(_list548.size); + String _elem549; + for (int _i550 = 0; _i550 < _list548.size; ++_i550) { - _elem517 = iprot.readString(); - struct.partitionVals.add(_elem517); + _elem549 = iprot.readString(); + struct.partitionVals.add(_elem549); } iprot.readListEnd(); } @@ -768,9 +768,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, FireEventRequest s oprot.writeFieldBegin(PARTITION_VALS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.partitionVals.size())); - for (String _iter519 : struct.partitionVals) + for (String _iter551 : struct.partitionVals) { - oprot.writeString(_iter519); + oprot.writeString(_iter551); } oprot.writeListEnd(); } @@ -816,9 +816,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, FireEventRequest st if (struct.isSetPartitionVals()) { { oprot.writeI32(struct.partitionVals.size()); - for (String _iter520 : struct.partitionVals) + for (String _iter552 : struct.partitionVals) { - oprot.writeString(_iter520); + oprot.writeString(_iter552); } } } @@ -843,13 +843,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, FireEventRequest str } if (incoming.get(2)) { { - org.apache.thrift.protocol.TList _list521 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.partitionVals = new ArrayList(_list521.size); - String _elem522; - for (int _i523 = 0; _i523 < _list521.size; ++_i523) + org.apache.thrift.protocol.TList _list553 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.partitionVals = new ArrayList(_list553.size); + String _elem554; + for (int _i555 = 0; _i555 < _list553.size; ++_i555) { - _elem522 = iprot.readString(); - struct.partitionVals.add(_elem522); + _elem554 = iprot.readString(); + struct.partitionVals.add(_elem554); } } struct.setPartitionValsIsSet(true); diff --git a/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ForeignKeyRel.java b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ForeignKeyRel.java new file mode 100644 index 0000000..22e7c9c --- /dev/null +++ b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ForeignKeyRel.java @@ -0,0 +1,919 @@ +/** + * Autogenerated by Thrift Compiler (0.9.3) + * + * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING + * @generated + */ +package org.apache.hadoop.hive.metastore.api; + +import org.apache.thrift.scheme.IScheme; +import org.apache.thrift.scheme.SchemeFactory; +import org.apache.thrift.scheme.StandardScheme; + +import org.apache.thrift.scheme.TupleScheme; +import org.apache.thrift.protocol.TTupleProtocol; +import org.apache.thrift.protocol.TProtocolException; +import org.apache.thrift.EncodingUtils; +import org.apache.thrift.TException; +import org.apache.thrift.async.AsyncMethodCallback; +import org.apache.thrift.server.AbstractNonblockingServer.*; +import java.util.List; +import java.util.ArrayList; +import java.util.Map; +import java.util.HashMap; +import java.util.EnumMap; +import java.util.Set; +import java.util.HashSet; +import java.util.EnumSet; +import java.util.Collections; +import java.util.BitSet; +import java.nio.ByteBuffer; +import java.util.Arrays; +import javax.annotation.Generated; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +@SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"}) +@Generated(value = "Autogenerated by Thrift Compiler (0.9.3)") +public class ForeignKeyRel implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { + private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("ForeignKeyRel"); + + private static final org.apache.thrift.protocol.TField FOREIGN_COLS_FIELD_DESC = new org.apache.thrift.protocol.TField("foreignCols", org.apache.thrift.protocol.TType.LIST, (short)1); + private static final org.apache.thrift.protocol.TField PARENT_COLS_FIELD_DESC = new org.apache.thrift.protocol.TField("parentCols", org.apache.thrift.protocol.TType.LIST, (short)2); + private static final org.apache.thrift.protocol.TField FOREIGN_KEY_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("foreignKeyName", org.apache.thrift.protocol.TType.STRING, (short)3); + private static final org.apache.thrift.protocol.TField PARENT_TABLE_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("parentTableName", org.apache.thrift.protocol.TType.STRING, (short)4); + private static final org.apache.thrift.protocol.TField PARENT_TABLE_SCHEMA_FIELD_DESC = new org.apache.thrift.protocol.TField("parentTableSchema", org.apache.thrift.protocol.TType.STRING, (short)5); + + private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); + static { + schemes.put(StandardScheme.class, new ForeignKeyRelStandardSchemeFactory()); + schemes.put(TupleScheme.class, new ForeignKeyRelTupleSchemeFactory()); + } + + private List foreignCols; // required + private List parentCols; // required + private String foreignKeyName; // required + private String parentTableName; // required + private String parentTableSchema; // required + + /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ + public enum _Fields implements org.apache.thrift.TFieldIdEnum { + FOREIGN_COLS((short)1, "foreignCols"), + PARENT_COLS((short)2, "parentCols"), + FOREIGN_KEY_NAME((short)3, "foreignKeyName"), + PARENT_TABLE_NAME((short)4, "parentTableName"), + PARENT_TABLE_SCHEMA((short)5, "parentTableSchema"); + + private static final Map byName = new HashMap(); + + static { + for (_Fields field : EnumSet.allOf(_Fields.class)) { + byName.put(field.getFieldName(), field); + } + } + + /** + * Find the _Fields constant that matches fieldId, or null if its not found. + */ + public static _Fields findByThriftId(int fieldId) { + switch(fieldId) { + case 1: // FOREIGN_COLS + return FOREIGN_COLS; + case 2: // PARENT_COLS + return PARENT_COLS; + case 3: // FOREIGN_KEY_NAME + return FOREIGN_KEY_NAME; + case 4: // PARENT_TABLE_NAME + return PARENT_TABLE_NAME; + case 5: // PARENT_TABLE_SCHEMA + return PARENT_TABLE_SCHEMA; + default: + return null; + } + } + + /** + * Find the _Fields constant that matches fieldId, throwing an exception + * if it is not found. + */ + public static _Fields findByThriftIdOrThrow(int fieldId) { + _Fields fields = findByThriftId(fieldId); + if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!"); + return fields; + } + + /** + * Find the _Fields constant that matches name, or null if its not found. + */ + public static _Fields findByName(String name) { + return byName.get(name); + } + + private final short _thriftId; + private final String _fieldName; + + _Fields(short thriftId, String fieldName) { + _thriftId = thriftId; + _fieldName = fieldName; + } + + public short getThriftFieldId() { + return _thriftId; + } + + public String getFieldName() { + return _fieldName; + } + } + + // isset id assignments + public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; + static { + Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); + tmpMap.put(_Fields.FOREIGN_COLS, new org.apache.thrift.meta_data.FieldMetaData("foreignCols", org.apache.thrift.TFieldRequirementType.DEFAULT, + new org.apache.thrift.meta_data.ListMetaData(org.apache.thrift.protocol.TType.LIST, + new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, FieldSchema.class)))); + tmpMap.put(_Fields.PARENT_COLS, new org.apache.thrift.meta_data.FieldMetaData("parentCols", org.apache.thrift.TFieldRequirementType.DEFAULT, + new org.apache.thrift.meta_data.ListMetaData(org.apache.thrift.protocol.TType.LIST, + new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, FieldSchema.class)))); + tmpMap.put(_Fields.FOREIGN_KEY_NAME, new org.apache.thrift.meta_data.FieldMetaData("foreignKeyName", org.apache.thrift.TFieldRequirementType.DEFAULT, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); + tmpMap.put(_Fields.PARENT_TABLE_NAME, new org.apache.thrift.meta_data.FieldMetaData("parentTableName", org.apache.thrift.TFieldRequirementType.DEFAULT, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); + tmpMap.put(_Fields.PARENT_TABLE_SCHEMA, new org.apache.thrift.meta_data.FieldMetaData("parentTableSchema", org.apache.thrift.TFieldRequirementType.DEFAULT, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); + metaDataMap = Collections.unmodifiableMap(tmpMap); + org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(ForeignKeyRel.class, metaDataMap); + } + + public ForeignKeyRel() { + } + + public ForeignKeyRel( + List foreignCols, + List parentCols, + String foreignKeyName, + String parentTableName, + String parentTableSchema) + { + this(); + this.foreignCols = foreignCols; + this.parentCols = parentCols; + this.foreignKeyName = foreignKeyName; + this.parentTableName = parentTableName; + this.parentTableSchema = parentTableSchema; + } + + /** + * Performs a deep copy on other. + */ + public ForeignKeyRel(ForeignKeyRel other) { + if (other.isSetForeignCols()) { + List __this__foreignCols = new ArrayList(other.foreignCols.size()); + for (FieldSchema other_element : other.foreignCols) { + __this__foreignCols.add(new FieldSchema(other_element)); + } + this.foreignCols = __this__foreignCols; + } + if (other.isSetParentCols()) { + List __this__parentCols = new ArrayList(other.parentCols.size()); + for (FieldSchema other_element : other.parentCols) { + __this__parentCols.add(new FieldSchema(other_element)); + } + this.parentCols = __this__parentCols; + } + if (other.isSetForeignKeyName()) { + this.foreignKeyName = other.foreignKeyName; + } + if (other.isSetParentTableName()) { + this.parentTableName = other.parentTableName; + } + if (other.isSetParentTableSchema()) { + this.parentTableSchema = other.parentTableSchema; + } + } + + public ForeignKeyRel deepCopy() { + return new ForeignKeyRel(this); + } + + @Override + public void clear() { + this.foreignCols = null; + this.parentCols = null; + this.foreignKeyName = null; + this.parentTableName = null; + this.parentTableSchema = null; + } + + public int getForeignColsSize() { + return (this.foreignCols == null) ? 0 : this.foreignCols.size(); + } + + public java.util.Iterator getForeignColsIterator() { + return (this.foreignCols == null) ? null : this.foreignCols.iterator(); + } + + public void addToForeignCols(FieldSchema elem) { + if (this.foreignCols == null) { + this.foreignCols = new ArrayList(); + } + this.foreignCols.add(elem); + } + + public List getForeignCols() { + return this.foreignCols; + } + + public void setForeignCols(List foreignCols) { + this.foreignCols = foreignCols; + } + + public void unsetForeignCols() { + this.foreignCols = null; + } + + /** Returns true if field foreignCols is set (has been assigned a value) and false otherwise */ + public boolean isSetForeignCols() { + return this.foreignCols != null; + } + + public void setForeignColsIsSet(boolean value) { + if (!value) { + this.foreignCols = null; + } + } + + public int getParentColsSize() { + return (this.parentCols == null) ? 0 : this.parentCols.size(); + } + + public java.util.Iterator getParentColsIterator() { + return (this.parentCols == null) ? null : this.parentCols.iterator(); + } + + public void addToParentCols(FieldSchema elem) { + if (this.parentCols == null) { + this.parentCols = new ArrayList(); + } + this.parentCols.add(elem); + } + + public List getParentCols() { + return this.parentCols; + } + + public void setParentCols(List parentCols) { + this.parentCols = parentCols; + } + + public void unsetParentCols() { + this.parentCols = null; + } + + /** Returns true if field parentCols is set (has been assigned a value) and false otherwise */ + public boolean isSetParentCols() { + return this.parentCols != null; + } + + public void setParentColsIsSet(boolean value) { + if (!value) { + this.parentCols = null; + } + } + + public String getForeignKeyName() { + return this.foreignKeyName; + } + + public void setForeignKeyName(String foreignKeyName) { + this.foreignKeyName = foreignKeyName; + } + + public void unsetForeignKeyName() { + this.foreignKeyName = null; + } + + /** Returns true if field foreignKeyName is set (has been assigned a value) and false otherwise */ + public boolean isSetForeignKeyName() { + return this.foreignKeyName != null; + } + + public void setForeignKeyNameIsSet(boolean value) { + if (!value) { + this.foreignKeyName = null; + } + } + + public String getParentTableName() { + return this.parentTableName; + } + + public void setParentTableName(String parentTableName) { + this.parentTableName = parentTableName; + } + + public void unsetParentTableName() { + this.parentTableName = null; + } + + /** Returns true if field parentTableName is set (has been assigned a value) and false otherwise */ + public boolean isSetParentTableName() { + return this.parentTableName != null; + } + + public void setParentTableNameIsSet(boolean value) { + if (!value) { + this.parentTableName = null; + } + } + + public String getParentTableSchema() { + return this.parentTableSchema; + } + + public void setParentTableSchema(String parentTableSchema) { + this.parentTableSchema = parentTableSchema; + } + + public void unsetParentTableSchema() { + this.parentTableSchema = null; + } + + /** Returns true if field parentTableSchema is set (has been assigned a value) and false otherwise */ + public boolean isSetParentTableSchema() { + return this.parentTableSchema != null; + } + + public void setParentTableSchemaIsSet(boolean value) { + if (!value) { + this.parentTableSchema = null; + } + } + + public void setFieldValue(_Fields field, Object value) { + switch (field) { + case FOREIGN_COLS: + if (value == null) { + unsetForeignCols(); + } else { + setForeignCols((List)value); + } + break; + + case PARENT_COLS: + if (value == null) { + unsetParentCols(); + } else { + setParentCols((List)value); + } + break; + + case FOREIGN_KEY_NAME: + if (value == null) { + unsetForeignKeyName(); + } else { + setForeignKeyName((String)value); + } + break; + + case PARENT_TABLE_NAME: + if (value == null) { + unsetParentTableName(); + } else { + setParentTableName((String)value); + } + break; + + case PARENT_TABLE_SCHEMA: + if (value == null) { + unsetParentTableSchema(); + } else { + setParentTableSchema((String)value); + } + break; + + } + } + + public Object getFieldValue(_Fields field) { + switch (field) { + case FOREIGN_COLS: + return getForeignCols(); + + case PARENT_COLS: + return getParentCols(); + + case FOREIGN_KEY_NAME: + return getForeignKeyName(); + + case PARENT_TABLE_NAME: + return getParentTableName(); + + case PARENT_TABLE_SCHEMA: + return getParentTableSchema(); + + } + throw new IllegalStateException(); + } + + /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */ + public boolean isSet(_Fields field) { + if (field == null) { + throw new IllegalArgumentException(); + } + + switch (field) { + case FOREIGN_COLS: + return isSetForeignCols(); + case PARENT_COLS: + return isSetParentCols(); + case FOREIGN_KEY_NAME: + return isSetForeignKeyName(); + case PARENT_TABLE_NAME: + return isSetParentTableName(); + case PARENT_TABLE_SCHEMA: + return isSetParentTableSchema(); + } + throw new IllegalStateException(); + } + + @Override + public boolean equals(Object that) { + if (that == null) + return false; + if (that instanceof ForeignKeyRel) + return this.equals((ForeignKeyRel)that); + return false; + } + + public boolean equals(ForeignKeyRel that) { + if (that == null) + return false; + + boolean this_present_foreignCols = true && this.isSetForeignCols(); + boolean that_present_foreignCols = true && that.isSetForeignCols(); + if (this_present_foreignCols || that_present_foreignCols) { + if (!(this_present_foreignCols && that_present_foreignCols)) + return false; + if (!this.foreignCols.equals(that.foreignCols)) + return false; + } + + boolean this_present_parentCols = true && this.isSetParentCols(); + boolean that_present_parentCols = true && that.isSetParentCols(); + if (this_present_parentCols || that_present_parentCols) { + if (!(this_present_parentCols && that_present_parentCols)) + return false; + if (!this.parentCols.equals(that.parentCols)) + return false; + } + + boolean this_present_foreignKeyName = true && this.isSetForeignKeyName(); + boolean that_present_foreignKeyName = true && that.isSetForeignKeyName(); + if (this_present_foreignKeyName || that_present_foreignKeyName) { + if (!(this_present_foreignKeyName && that_present_foreignKeyName)) + return false; + if (!this.foreignKeyName.equals(that.foreignKeyName)) + return false; + } + + boolean this_present_parentTableName = true && this.isSetParentTableName(); + boolean that_present_parentTableName = true && that.isSetParentTableName(); + if (this_present_parentTableName || that_present_parentTableName) { + if (!(this_present_parentTableName && that_present_parentTableName)) + return false; + if (!this.parentTableName.equals(that.parentTableName)) + return false; + } + + boolean this_present_parentTableSchema = true && this.isSetParentTableSchema(); + boolean that_present_parentTableSchema = true && that.isSetParentTableSchema(); + if (this_present_parentTableSchema || that_present_parentTableSchema) { + if (!(this_present_parentTableSchema && that_present_parentTableSchema)) + return false; + if (!this.parentTableSchema.equals(that.parentTableSchema)) + return false; + } + + return true; + } + + @Override + public int hashCode() { + List list = new ArrayList(); + + boolean present_foreignCols = true && (isSetForeignCols()); + list.add(present_foreignCols); + if (present_foreignCols) + list.add(foreignCols); + + boolean present_parentCols = true && (isSetParentCols()); + list.add(present_parentCols); + if (present_parentCols) + list.add(parentCols); + + boolean present_foreignKeyName = true && (isSetForeignKeyName()); + list.add(present_foreignKeyName); + if (present_foreignKeyName) + list.add(foreignKeyName); + + boolean present_parentTableName = true && (isSetParentTableName()); + list.add(present_parentTableName); + if (present_parentTableName) + list.add(parentTableName); + + boolean present_parentTableSchema = true && (isSetParentTableSchema()); + list.add(present_parentTableSchema); + if (present_parentTableSchema) + list.add(parentTableSchema); + + return list.hashCode(); + } + + @Override + public int compareTo(ForeignKeyRel other) { + if (!getClass().equals(other.getClass())) { + return getClass().getName().compareTo(other.getClass().getName()); + } + + int lastComparison = 0; + + lastComparison = Boolean.valueOf(isSetForeignCols()).compareTo(other.isSetForeignCols()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetForeignCols()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.foreignCols, other.foreignCols); + if (lastComparison != 0) { + return lastComparison; + } + } + lastComparison = Boolean.valueOf(isSetParentCols()).compareTo(other.isSetParentCols()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetParentCols()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.parentCols, other.parentCols); + if (lastComparison != 0) { + return lastComparison; + } + } + lastComparison = Boolean.valueOf(isSetForeignKeyName()).compareTo(other.isSetForeignKeyName()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetForeignKeyName()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.foreignKeyName, other.foreignKeyName); + if (lastComparison != 0) { + return lastComparison; + } + } + lastComparison = Boolean.valueOf(isSetParentTableName()).compareTo(other.isSetParentTableName()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetParentTableName()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.parentTableName, other.parentTableName); + if (lastComparison != 0) { + return lastComparison; + } + } + lastComparison = Boolean.valueOf(isSetParentTableSchema()).compareTo(other.isSetParentTableSchema()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetParentTableSchema()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.parentTableSchema, other.parentTableSchema); + if (lastComparison != 0) { + return lastComparison; + } + } + return 0; + } + + public _Fields fieldForId(int fieldId) { + return _Fields.findByThriftId(fieldId); + } + + public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException { + schemes.get(iprot.getScheme()).getScheme().read(iprot, this); + } + + public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException { + schemes.get(oprot.getScheme()).getScheme().write(oprot, this); + } + + @Override + public String toString() { + StringBuilder sb = new StringBuilder("ForeignKeyRel("); + boolean first = true; + + sb.append("foreignCols:"); + if (this.foreignCols == null) { + sb.append("null"); + } else { + sb.append(this.foreignCols); + } + first = false; + if (!first) sb.append(", "); + sb.append("parentCols:"); + if (this.parentCols == null) { + sb.append("null"); + } else { + sb.append(this.parentCols); + } + first = false; + if (!first) sb.append(", "); + sb.append("foreignKeyName:"); + if (this.foreignKeyName == null) { + sb.append("null"); + } else { + sb.append(this.foreignKeyName); + } + first = false; + if (!first) sb.append(", "); + sb.append("parentTableName:"); + if (this.parentTableName == null) { + sb.append("null"); + } else { + sb.append(this.parentTableName); + } + first = false; + if (!first) sb.append(", "); + sb.append("parentTableSchema:"); + if (this.parentTableSchema == null) { + sb.append("null"); + } else { + sb.append(this.parentTableSchema); + } + first = false; + sb.append(")"); + return sb.toString(); + } + + public void validate() throws org.apache.thrift.TException { + // check for required fields + // check for sub-struct validity + } + + private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { + try { + write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out))); + } catch (org.apache.thrift.TException te) { + throw new java.io.IOException(te); + } + } + + private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { + try { + read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); + } catch (org.apache.thrift.TException te) { + throw new java.io.IOException(te); + } + } + + private static class ForeignKeyRelStandardSchemeFactory implements SchemeFactory { + public ForeignKeyRelStandardScheme getScheme() { + return new ForeignKeyRelStandardScheme(); + } + } + + private static class ForeignKeyRelStandardScheme extends StandardScheme { + + public void read(org.apache.thrift.protocol.TProtocol iprot, ForeignKeyRel struct) throws org.apache.thrift.TException { + org.apache.thrift.protocol.TField schemeField; + iprot.readStructBegin(); + while (true) + { + schemeField = iprot.readFieldBegin(); + if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { + break; + } + switch (schemeField.id) { + case 1: // FOREIGN_COLS + if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { + { + org.apache.thrift.protocol.TList _list0 = iprot.readListBegin(); + struct.foreignCols = new ArrayList(_list0.size); + FieldSchema _elem1; + for (int _i2 = 0; _i2 < _list0.size; ++_i2) + { + _elem1 = new FieldSchema(); + _elem1.read(iprot); + struct.foreignCols.add(_elem1); + } + iprot.readListEnd(); + } + struct.setForeignColsIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; + case 2: // PARENT_COLS + if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { + { + org.apache.thrift.protocol.TList _list3 = iprot.readListBegin(); + struct.parentCols = new ArrayList(_list3.size); + FieldSchema _elem4; + for (int _i5 = 0; _i5 < _list3.size; ++_i5) + { + _elem4 = new FieldSchema(); + _elem4.read(iprot); + struct.parentCols.add(_elem4); + } + iprot.readListEnd(); + } + struct.setParentColsIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; + case 3: // FOREIGN_KEY_NAME + if (schemeField.type == org.apache.thrift.protocol.TType.STRING) { + struct.foreignKeyName = iprot.readString(); + struct.setForeignKeyNameIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; + case 4: // PARENT_TABLE_NAME + if (schemeField.type == org.apache.thrift.protocol.TType.STRING) { + struct.parentTableName = iprot.readString(); + struct.setParentTableNameIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; + case 5: // PARENT_TABLE_SCHEMA + if (schemeField.type == org.apache.thrift.protocol.TType.STRING) { + struct.parentTableSchema = iprot.readString(); + struct.setParentTableSchemaIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; + default: + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + iprot.readFieldEnd(); + } + iprot.readStructEnd(); + struct.validate(); + } + + public void write(org.apache.thrift.protocol.TProtocol oprot, ForeignKeyRel struct) throws org.apache.thrift.TException { + struct.validate(); + + oprot.writeStructBegin(STRUCT_DESC); + if (struct.foreignCols != null) { + oprot.writeFieldBegin(FOREIGN_COLS_FIELD_DESC); + { + oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.foreignCols.size())); + for (FieldSchema _iter6 : struct.foreignCols) + { + _iter6.write(oprot); + } + oprot.writeListEnd(); + } + oprot.writeFieldEnd(); + } + if (struct.parentCols != null) { + oprot.writeFieldBegin(PARENT_COLS_FIELD_DESC); + { + oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.parentCols.size())); + for (FieldSchema _iter7 : struct.parentCols) + { + _iter7.write(oprot); + } + oprot.writeListEnd(); + } + oprot.writeFieldEnd(); + } + if (struct.foreignKeyName != null) { + oprot.writeFieldBegin(FOREIGN_KEY_NAME_FIELD_DESC); + oprot.writeString(struct.foreignKeyName); + oprot.writeFieldEnd(); + } + if (struct.parentTableName != null) { + oprot.writeFieldBegin(PARENT_TABLE_NAME_FIELD_DESC); + oprot.writeString(struct.parentTableName); + oprot.writeFieldEnd(); + } + if (struct.parentTableSchema != null) { + oprot.writeFieldBegin(PARENT_TABLE_SCHEMA_FIELD_DESC); + oprot.writeString(struct.parentTableSchema); + oprot.writeFieldEnd(); + } + oprot.writeFieldStop(); + oprot.writeStructEnd(); + } + + } + + private static class ForeignKeyRelTupleSchemeFactory implements SchemeFactory { + public ForeignKeyRelTupleScheme getScheme() { + return new ForeignKeyRelTupleScheme(); + } + } + + private static class ForeignKeyRelTupleScheme extends TupleScheme { + + @Override + public void write(org.apache.thrift.protocol.TProtocol prot, ForeignKeyRel struct) throws org.apache.thrift.TException { + TTupleProtocol oprot = (TTupleProtocol) prot; + BitSet optionals = new BitSet(); + if (struct.isSetForeignCols()) { + optionals.set(0); + } + if (struct.isSetParentCols()) { + optionals.set(1); + } + if (struct.isSetForeignKeyName()) { + optionals.set(2); + } + if (struct.isSetParentTableName()) { + optionals.set(3); + } + if (struct.isSetParentTableSchema()) { + optionals.set(4); + } + oprot.writeBitSet(optionals, 5); + if (struct.isSetForeignCols()) { + { + oprot.writeI32(struct.foreignCols.size()); + for (FieldSchema _iter8 : struct.foreignCols) + { + _iter8.write(oprot); + } + } + } + if (struct.isSetParentCols()) { + { + oprot.writeI32(struct.parentCols.size()); + for (FieldSchema _iter9 : struct.parentCols) + { + _iter9.write(oprot); + } + } + } + if (struct.isSetForeignKeyName()) { + oprot.writeString(struct.foreignKeyName); + } + if (struct.isSetParentTableName()) { + oprot.writeString(struct.parentTableName); + } + if (struct.isSetParentTableSchema()) { + oprot.writeString(struct.parentTableSchema); + } + } + + @Override + public void read(org.apache.thrift.protocol.TProtocol prot, ForeignKeyRel struct) throws org.apache.thrift.TException { + TTupleProtocol iprot = (TTupleProtocol) prot; + BitSet incoming = iprot.readBitSet(5); + if (incoming.get(0)) { + { + org.apache.thrift.protocol.TList _list10 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.foreignCols = new ArrayList(_list10.size); + FieldSchema _elem11; + for (int _i12 = 0; _i12 < _list10.size; ++_i12) + { + _elem11 = new FieldSchema(); + _elem11.read(iprot); + struct.foreignCols.add(_elem11); + } + } + struct.setForeignColsIsSet(true); + } + if (incoming.get(1)) { + { + org.apache.thrift.protocol.TList _list13 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.parentCols = new ArrayList(_list13.size); + FieldSchema _elem14; + for (int _i15 = 0; _i15 < _list13.size; ++_i15) + { + _elem14 = new FieldSchema(); + _elem14.read(iprot); + struct.parentCols.add(_elem14); + } + } + struct.setParentColsIsSet(true); + } + if (incoming.get(2)) { + struct.foreignKeyName = iprot.readString(); + struct.setForeignKeyNameIsSet(true); + } + if (incoming.get(3)) { + struct.parentTableName = iprot.readString(); + struct.setParentTableNameIsSet(true); + } + if (incoming.get(4)) { + struct.parentTableSchema = iprot.readString(); + struct.setParentTableSchemaIsSet(true); + } + } + } + +} + diff --git a/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/Function.java b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/Function.java index 5f8ce0d..56a4d30 100644 --- a/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/Function.java +++ b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/Function.java @@ -997,14 +997,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, Function struct) th case 8: // RESOURCE_URIS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list420 = iprot.readListBegin(); - struct.resourceUris = new ArrayList(_list420.size); - ResourceUri _elem421; - for (int _i422 = 0; _i422 < _list420.size; ++_i422) + org.apache.thrift.protocol.TList _list452 = iprot.readListBegin(); + struct.resourceUris = new ArrayList(_list452.size); + ResourceUri _elem453; + for (int _i454 = 0; _i454 < _list452.size; ++_i454) { - _elem421 = new ResourceUri(); - _elem421.read(iprot); - struct.resourceUris.add(_elem421); + _elem453 = new ResourceUri(); + _elem453.read(iprot); + struct.resourceUris.add(_elem453); } iprot.readListEnd(); } @@ -1063,9 +1063,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, Function struct) t oprot.writeFieldBegin(RESOURCE_URIS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.resourceUris.size())); - for (ResourceUri _iter423 : struct.resourceUris) + for (ResourceUri _iter455 : struct.resourceUris) { - _iter423.write(oprot); + _iter455.write(oprot); } oprot.writeListEnd(); } @@ -1138,9 +1138,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, Function struct) th if (struct.isSetResourceUris()) { { oprot.writeI32(struct.resourceUris.size()); - for (ResourceUri _iter424 : struct.resourceUris) + for (ResourceUri _iter456 : struct.resourceUris) { - _iter424.write(oprot); + _iter456.write(oprot); } } } @@ -1180,14 +1180,14 @@ public void read(org.apache.thrift.protocol.TProtocol prot, Function struct) thr } if (incoming.get(7)) { { - org.apache.thrift.protocol.TList _list425 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.resourceUris = new ArrayList(_list425.size); - ResourceUri _elem426; - for (int _i427 = 0; _i427 < _list425.size; ++_i427) + org.apache.thrift.protocol.TList _list457 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.resourceUris = new ArrayList(_list457.size); + ResourceUri _elem458; + for (int _i459 = 0; _i459 < _list457.size; ++_i459) { - _elem426 = new ResourceUri(); - _elem426.read(iprot); - struct.resourceUris.add(_elem426); + _elem458 = new ResourceUri(); + _elem458.read(iprot); + struct.resourceUris.add(_elem458); } } struct.setResourceUrisIsSet(true); diff --git a/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetAllFunctionsResponse.java b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetAllFunctionsResponse.java index f88e279..5a0ba4b 100644 --- a/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetAllFunctionsResponse.java +++ b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetAllFunctionsResponse.java @@ -346,14 +346,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, GetAllFunctionsResp case 1: // FUNCTIONS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list584 = iprot.readListBegin(); - struct.functions = new ArrayList(_list584.size); - Function _elem585; - for (int _i586 = 0; _i586 < _list584.size; ++_i586) + org.apache.thrift.protocol.TList _list616 = iprot.readListBegin(); + struct.functions = new ArrayList(_list616.size); + Function _elem617; + for (int _i618 = 0; _i618 < _list616.size; ++_i618) { - _elem585 = new Function(); - _elem585.read(iprot); - struct.functions.add(_elem585); + _elem617 = new Function(); + _elem617.read(iprot); + struct.functions.add(_elem617); } iprot.readListEnd(); } @@ -380,9 +380,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, GetAllFunctionsRes oprot.writeFieldBegin(FUNCTIONS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.functions.size())); - for (Function _iter587 : struct.functions) + for (Function _iter619 : struct.functions) { - _iter587.write(oprot); + _iter619.write(oprot); } oprot.writeListEnd(); } @@ -414,9 +414,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, GetAllFunctionsResp if (struct.isSetFunctions()) { { oprot.writeI32(struct.functions.size()); - for (Function _iter588 : struct.functions) + for (Function _iter620 : struct.functions) { - _iter588.write(oprot); + _iter620.write(oprot); } } } @@ -428,14 +428,14 @@ public void read(org.apache.thrift.protocol.TProtocol prot, GetAllFunctionsRespo BitSet incoming = iprot.readBitSet(1); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list589 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.functions = new ArrayList(_list589.size); - Function _elem590; - for (int _i591 = 0; _i591 < _list589.size; ++_i591) + org.apache.thrift.protocol.TList _list621 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.functions = new ArrayList(_list621.size); + Function _elem622; + for (int _i623 = 0; _i623 < _list621.size; ++_i623) { - _elem590 = new Function(); - _elem590.read(iprot); - struct.functions.add(_elem590); + _elem622 = new Function(); + _elem622.read(iprot); + struct.functions.add(_elem622); } } struct.setFunctionsIsSet(true); diff --git a/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetFileMetadataByExprRequest.java b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetFileMetadataByExprRequest.java index 0236b4a..0916e87 100644 --- a/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetFileMetadataByExprRequest.java +++ b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetFileMetadataByExprRequest.java @@ -619,13 +619,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, GetFileMetadataByEx case 1: // FILE_IDS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list534 = iprot.readListBegin(); - struct.fileIds = new ArrayList(_list534.size); - long _elem535; - for (int _i536 = 0; _i536 < _list534.size; ++_i536) + org.apache.thrift.protocol.TList _list566 = iprot.readListBegin(); + struct.fileIds = new ArrayList(_list566.size); + long _elem567; + for (int _i568 = 0; _i568 < _list566.size; ++_i568) { - _elem535 = iprot.readI64(); - struct.fileIds.add(_elem535); + _elem567 = iprot.readI64(); + struct.fileIds.add(_elem567); } iprot.readListEnd(); } @@ -675,9 +675,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, GetFileMetadataByE oprot.writeFieldBegin(FILE_IDS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.I64, struct.fileIds.size())); - for (long _iter537 : struct.fileIds) + for (long _iter569 : struct.fileIds) { - oprot.writeI64(_iter537); + oprot.writeI64(_iter569); } oprot.writeListEnd(); } @@ -719,9 +719,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, GetFileMetadataByEx TTupleProtocol oprot = (TTupleProtocol) prot; { oprot.writeI32(struct.fileIds.size()); - for (long _iter538 : struct.fileIds) + for (long _iter570 : struct.fileIds) { - oprot.writeI64(_iter538); + oprot.writeI64(_iter570); } } oprot.writeBinary(struct.expr); @@ -745,13 +745,13 @@ public void write(org.apache.thrift.protocol.TProtocol prot, GetFileMetadataByEx public void read(org.apache.thrift.protocol.TProtocol prot, GetFileMetadataByExprRequest struct) throws org.apache.thrift.TException { TTupleProtocol iprot = (TTupleProtocol) prot; { - org.apache.thrift.protocol.TList _list539 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.I64, iprot.readI32()); - struct.fileIds = new ArrayList(_list539.size); - long _elem540; - for (int _i541 = 0; _i541 < _list539.size; ++_i541) + org.apache.thrift.protocol.TList _list571 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.I64, iprot.readI32()); + struct.fileIds = new ArrayList(_list571.size); + long _elem572; + for (int _i573 = 0; _i573 < _list571.size; ++_i573) { - _elem540 = iprot.readI64(); - struct.fileIds.add(_elem540); + _elem572 = iprot.readI64(); + struct.fileIds.add(_elem572); } } struct.setFileIdsIsSet(true); diff --git a/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetFileMetadataByExprResult.java b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetFileMetadataByExprResult.java index 89eb819..494f31d 100644 --- a/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetFileMetadataByExprResult.java +++ b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetFileMetadataByExprResult.java @@ -444,16 +444,16 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, GetFileMetadataByEx case 1: // METADATA if (schemeField.type == org.apache.thrift.protocol.TType.MAP) { { - org.apache.thrift.protocol.TMap _map524 = iprot.readMapBegin(); - struct.metadata = new HashMap(2*_map524.size); - long _key525; - MetadataPpdResult _val526; - for (int _i527 = 0; _i527 < _map524.size; ++_i527) + org.apache.thrift.protocol.TMap _map556 = iprot.readMapBegin(); + struct.metadata = new HashMap(2*_map556.size); + long _key557; + MetadataPpdResult _val558; + for (int _i559 = 0; _i559 < _map556.size; ++_i559) { - _key525 = iprot.readI64(); - _val526 = new MetadataPpdResult(); - _val526.read(iprot); - struct.metadata.put(_key525, _val526); + _key557 = iprot.readI64(); + _val558 = new MetadataPpdResult(); + _val558.read(iprot); + struct.metadata.put(_key557, _val558); } iprot.readMapEnd(); } @@ -487,10 +487,10 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, GetFileMetadataByE oprot.writeFieldBegin(METADATA_FIELD_DESC); { oprot.writeMapBegin(new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.I64, org.apache.thrift.protocol.TType.STRUCT, struct.metadata.size())); - for (Map.Entry _iter528 : struct.metadata.entrySet()) + for (Map.Entry _iter560 : struct.metadata.entrySet()) { - oprot.writeI64(_iter528.getKey()); - _iter528.getValue().write(oprot); + oprot.writeI64(_iter560.getKey()); + _iter560.getValue().write(oprot); } oprot.writeMapEnd(); } @@ -518,10 +518,10 @@ public void write(org.apache.thrift.protocol.TProtocol prot, GetFileMetadataByEx TTupleProtocol oprot = (TTupleProtocol) prot; { oprot.writeI32(struct.metadata.size()); - for (Map.Entry _iter529 : struct.metadata.entrySet()) + for (Map.Entry _iter561 : struct.metadata.entrySet()) { - oprot.writeI64(_iter529.getKey()); - _iter529.getValue().write(oprot); + oprot.writeI64(_iter561.getKey()); + _iter561.getValue().write(oprot); } } oprot.writeBool(struct.isSupported); @@ -531,16 +531,16 @@ public void write(org.apache.thrift.protocol.TProtocol prot, GetFileMetadataByEx public void read(org.apache.thrift.protocol.TProtocol prot, GetFileMetadataByExprResult struct) throws org.apache.thrift.TException { TTupleProtocol iprot = (TTupleProtocol) prot; { - org.apache.thrift.protocol.TMap _map530 = new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.I64, org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.metadata = new HashMap(2*_map530.size); - long _key531; - MetadataPpdResult _val532; - for (int _i533 = 0; _i533 < _map530.size; ++_i533) + org.apache.thrift.protocol.TMap _map562 = new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.I64, org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.metadata = new HashMap(2*_map562.size); + long _key563; + MetadataPpdResult _val564; + for (int _i565 = 0; _i565 < _map562.size; ++_i565) { - _key531 = iprot.readI64(); - _val532 = new MetadataPpdResult(); - _val532.read(iprot); - struct.metadata.put(_key531, _val532); + _key563 = iprot.readI64(); + _val564 = new MetadataPpdResult(); + _val564.read(iprot); + struct.metadata.put(_key563, _val564); } } struct.setMetadataIsSet(true); diff --git a/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetFileMetadataRequest.java b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetFileMetadataRequest.java index 2408ad1..163ba87 100644 --- a/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetFileMetadataRequest.java +++ b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetFileMetadataRequest.java @@ -351,13 +351,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, GetFileMetadataRequ case 1: // FILE_IDS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list552 = iprot.readListBegin(); - struct.fileIds = new ArrayList(_list552.size); - long _elem553; - for (int _i554 = 0; _i554 < _list552.size; ++_i554) + org.apache.thrift.protocol.TList _list584 = iprot.readListBegin(); + struct.fileIds = new ArrayList(_list584.size); + long _elem585; + for (int _i586 = 0; _i586 < _list584.size; ++_i586) { - _elem553 = iprot.readI64(); - struct.fileIds.add(_elem553); + _elem585 = iprot.readI64(); + struct.fileIds.add(_elem585); } iprot.readListEnd(); } @@ -383,9 +383,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, GetFileMetadataReq oprot.writeFieldBegin(FILE_IDS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.I64, struct.fileIds.size())); - for (long _iter555 : struct.fileIds) + for (long _iter587 : struct.fileIds) { - oprot.writeI64(_iter555); + oprot.writeI64(_iter587); } oprot.writeListEnd(); } @@ -410,9 +410,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, GetFileMetadataRequ TTupleProtocol oprot = (TTupleProtocol) prot; { oprot.writeI32(struct.fileIds.size()); - for (long _iter556 : struct.fileIds) + for (long _iter588 : struct.fileIds) { - oprot.writeI64(_iter556); + oprot.writeI64(_iter588); } } } @@ -421,13 +421,13 @@ public void write(org.apache.thrift.protocol.TProtocol prot, GetFileMetadataRequ public void read(org.apache.thrift.protocol.TProtocol prot, GetFileMetadataRequest struct) throws org.apache.thrift.TException { TTupleProtocol iprot = (TTupleProtocol) prot; { - org.apache.thrift.protocol.TList _list557 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.I64, iprot.readI32()); - struct.fileIds = new ArrayList(_list557.size); - long _elem558; - for (int _i559 = 0; _i559 < _list557.size; ++_i559) + org.apache.thrift.protocol.TList _list589 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.I64, iprot.readI32()); + struct.fileIds = new ArrayList(_list589.size); + long _elem590; + for (int _i591 = 0; _i591 < _list589.size; ++_i591) { - _elem558 = iprot.readI64(); - struct.fileIds.add(_elem558); + _elem590 = iprot.readI64(); + struct.fileIds.add(_elem590); } } struct.setFileIdsIsSet(true); diff --git a/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetFileMetadataResult.java b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetFileMetadataResult.java index 8946635..4ceee38 100644 --- a/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetFileMetadataResult.java +++ b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetFileMetadataResult.java @@ -433,15 +433,15 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, GetFileMetadataResu case 1: // METADATA if (schemeField.type == org.apache.thrift.protocol.TType.MAP) { { - org.apache.thrift.protocol.TMap _map542 = iprot.readMapBegin(); - struct.metadata = new HashMap(2*_map542.size); - long _key543; - ByteBuffer _val544; - for (int _i545 = 0; _i545 < _map542.size; ++_i545) + org.apache.thrift.protocol.TMap _map574 = iprot.readMapBegin(); + struct.metadata = new HashMap(2*_map574.size); + long _key575; + ByteBuffer _val576; + for (int _i577 = 0; _i577 < _map574.size; ++_i577) { - _key543 = iprot.readI64(); - _val544 = iprot.readBinary(); - struct.metadata.put(_key543, _val544); + _key575 = iprot.readI64(); + _val576 = iprot.readBinary(); + struct.metadata.put(_key575, _val576); } iprot.readMapEnd(); } @@ -475,10 +475,10 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, GetFileMetadataRes oprot.writeFieldBegin(METADATA_FIELD_DESC); { oprot.writeMapBegin(new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.I64, org.apache.thrift.protocol.TType.STRING, struct.metadata.size())); - for (Map.Entry _iter546 : struct.metadata.entrySet()) + for (Map.Entry _iter578 : struct.metadata.entrySet()) { - oprot.writeI64(_iter546.getKey()); - oprot.writeBinary(_iter546.getValue()); + oprot.writeI64(_iter578.getKey()); + oprot.writeBinary(_iter578.getValue()); } oprot.writeMapEnd(); } @@ -506,10 +506,10 @@ public void write(org.apache.thrift.protocol.TProtocol prot, GetFileMetadataResu TTupleProtocol oprot = (TTupleProtocol) prot; { oprot.writeI32(struct.metadata.size()); - for (Map.Entry _iter547 : struct.metadata.entrySet()) + for (Map.Entry _iter579 : struct.metadata.entrySet()) { - oprot.writeI64(_iter547.getKey()); - oprot.writeBinary(_iter547.getValue()); + oprot.writeI64(_iter579.getKey()); + oprot.writeBinary(_iter579.getValue()); } } oprot.writeBool(struct.isSupported); @@ -519,15 +519,15 @@ public void write(org.apache.thrift.protocol.TProtocol prot, GetFileMetadataResu public void read(org.apache.thrift.protocol.TProtocol prot, GetFileMetadataResult struct) throws org.apache.thrift.TException { TTupleProtocol iprot = (TTupleProtocol) prot; { - org.apache.thrift.protocol.TMap _map548 = new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.I64, org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.metadata = new HashMap(2*_map548.size); - long _key549; - ByteBuffer _val550; - for (int _i551 = 0; _i551 < _map548.size; ++_i551) + org.apache.thrift.protocol.TMap _map580 = new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.I64, org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.metadata = new HashMap(2*_map580.size); + long _key581; + ByteBuffer _val582; + for (int _i583 = 0; _i583 < _map580.size; ++_i583) { - _key549 = iprot.readI64(); - _val550 = iprot.readBinary(); - struct.metadata.put(_key549, _val550); + _key581 = iprot.readI64(); + _val582 = iprot.readBinary(); + struct.metadata.put(_key581, _val582); } } struct.setMetadataIsSet(true); diff --git a/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetOpenTxnsInfoResponse.java b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetOpenTxnsInfoResponse.java index 629c042..e68793b 100644 --- a/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetOpenTxnsInfoResponse.java +++ b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetOpenTxnsInfoResponse.java @@ -447,14 +447,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, GetOpenTxnsInfoResp case 2: // OPEN_TXNS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list428 = iprot.readListBegin(); - struct.open_txns = new ArrayList(_list428.size); - TxnInfo _elem429; - for (int _i430 = 0; _i430 < _list428.size; ++_i430) + org.apache.thrift.protocol.TList _list460 = iprot.readListBegin(); + struct.open_txns = new ArrayList(_list460.size); + TxnInfo _elem461; + for (int _i462 = 0; _i462 < _list460.size; ++_i462) { - _elem429 = new TxnInfo(); - _elem429.read(iprot); - struct.open_txns.add(_elem429); + _elem461 = new TxnInfo(); + _elem461.read(iprot); + struct.open_txns.add(_elem461); } iprot.readListEnd(); } @@ -483,9 +483,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, GetOpenTxnsInfoRes oprot.writeFieldBegin(OPEN_TXNS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.open_txns.size())); - for (TxnInfo _iter431 : struct.open_txns) + for (TxnInfo _iter463 : struct.open_txns) { - _iter431.write(oprot); + _iter463.write(oprot); } oprot.writeListEnd(); } @@ -511,9 +511,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, GetOpenTxnsInfoResp oprot.writeI64(struct.txn_high_water_mark); { oprot.writeI32(struct.open_txns.size()); - for (TxnInfo _iter432 : struct.open_txns) + for (TxnInfo _iter464 : struct.open_txns) { - _iter432.write(oprot); + _iter464.write(oprot); } } } @@ -524,14 +524,14 @@ public void read(org.apache.thrift.protocol.TProtocol prot, GetOpenTxnsInfoRespo struct.txn_high_water_mark = iprot.readI64(); struct.setTxn_high_water_markIsSet(true); { - org.apache.thrift.protocol.TList _list433 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.open_txns = new ArrayList(_list433.size); - TxnInfo _elem434; - for (int _i435 = 0; _i435 < _list433.size; ++_i435) + org.apache.thrift.protocol.TList _list465 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.open_txns = new ArrayList(_list465.size); + TxnInfo _elem466; + for (int _i467 = 0; _i467 < _list465.size; ++_i467) { - _elem434 = new TxnInfo(); - _elem434.read(iprot); - struct.open_txns.add(_elem434); + _elem466 = new TxnInfo(); + _elem466.read(iprot); + struct.open_txns.add(_elem466); } } struct.setOpen_txnsIsSet(true); diff --git a/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetOpenTxnsResponse.java b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetOpenTxnsResponse.java index 9f57a4a..6986fc2 100644 --- a/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetOpenTxnsResponse.java +++ b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetOpenTxnsResponse.java @@ -444,13 +444,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, GetOpenTxnsResponse case 2: // OPEN_TXNS if (schemeField.type == org.apache.thrift.protocol.TType.SET) { { - org.apache.thrift.protocol.TSet _set436 = iprot.readSetBegin(); - struct.open_txns = new HashSet(2*_set436.size); - long _elem437; - for (int _i438 = 0; _i438 < _set436.size; ++_i438) + org.apache.thrift.protocol.TSet _set468 = iprot.readSetBegin(); + struct.open_txns = new HashSet(2*_set468.size); + long _elem469; + for (int _i470 = 0; _i470 < _set468.size; ++_i470) { - _elem437 = iprot.readI64(); - struct.open_txns.add(_elem437); + _elem469 = iprot.readI64(); + struct.open_txns.add(_elem469); } iprot.readSetEnd(); } @@ -479,9 +479,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, GetOpenTxnsRespons oprot.writeFieldBegin(OPEN_TXNS_FIELD_DESC); { oprot.writeSetBegin(new org.apache.thrift.protocol.TSet(org.apache.thrift.protocol.TType.I64, struct.open_txns.size())); - for (long _iter439 : struct.open_txns) + for (long _iter471 : struct.open_txns) { - oprot.writeI64(_iter439); + oprot.writeI64(_iter471); } oprot.writeSetEnd(); } @@ -507,9 +507,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, GetOpenTxnsResponse oprot.writeI64(struct.txn_high_water_mark); { oprot.writeI32(struct.open_txns.size()); - for (long _iter440 : struct.open_txns) + for (long _iter472 : struct.open_txns) { - oprot.writeI64(_iter440); + oprot.writeI64(_iter472); } } } @@ -520,13 +520,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, GetOpenTxnsResponse struct.txn_high_water_mark = iprot.readI64(); struct.setTxn_high_water_markIsSet(true); { - org.apache.thrift.protocol.TSet _set441 = new org.apache.thrift.protocol.TSet(org.apache.thrift.protocol.TType.I64, iprot.readI32()); - struct.open_txns = new HashSet(2*_set441.size); - long _elem442; - for (int _i443 = 0; _i443 < _set441.size; ++_i443) + org.apache.thrift.protocol.TSet _set473 = new org.apache.thrift.protocol.TSet(org.apache.thrift.protocol.TType.I64, iprot.readI32()); + struct.open_txns = new HashSet(2*_set473.size); + long _elem474; + for (int _i475 = 0; _i475 < _set473.size; ++_i475) { - _elem442 = iprot.readI64(); - struct.open_txns.add(_elem442); + _elem474 = iprot.readI64(); + struct.open_txns.add(_elem474); } } struct.setOpen_txnsIsSet(true); diff --git a/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetPrincipalsInRoleResponse.java b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetPrincipalsInRoleResponse.java index 02c652a..6d0c516 100644 --- a/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetPrincipalsInRoleResponse.java +++ b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetPrincipalsInRoleResponse.java @@ -354,14 +354,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, GetPrincipalsInRole case 1: // PRINCIPAL_GRANTS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list86 = iprot.readListBegin(); - struct.principalGrants = new ArrayList(_list86.size); - RolePrincipalGrant _elem87; - for (int _i88 = 0; _i88 < _list86.size; ++_i88) + org.apache.thrift.protocol.TList _list102 = iprot.readListBegin(); + struct.principalGrants = new ArrayList(_list102.size); + RolePrincipalGrant _elem103; + for (int _i104 = 0; _i104 < _list102.size; ++_i104) { - _elem87 = new RolePrincipalGrant(); - _elem87.read(iprot); - struct.principalGrants.add(_elem87); + _elem103 = new RolePrincipalGrant(); + _elem103.read(iprot); + struct.principalGrants.add(_elem103); } iprot.readListEnd(); } @@ -387,9 +387,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, GetPrincipalsInRol oprot.writeFieldBegin(PRINCIPAL_GRANTS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.principalGrants.size())); - for (RolePrincipalGrant _iter89 : struct.principalGrants) + for (RolePrincipalGrant _iter105 : struct.principalGrants) { - _iter89.write(oprot); + _iter105.write(oprot); } oprot.writeListEnd(); } @@ -414,9 +414,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, GetPrincipalsInRole TTupleProtocol oprot = (TTupleProtocol) prot; { oprot.writeI32(struct.principalGrants.size()); - for (RolePrincipalGrant _iter90 : struct.principalGrants) + for (RolePrincipalGrant _iter106 : struct.principalGrants) { - _iter90.write(oprot); + _iter106.write(oprot); } } } @@ -425,14 +425,14 @@ public void write(org.apache.thrift.protocol.TProtocol prot, GetPrincipalsInRole public void read(org.apache.thrift.protocol.TProtocol prot, GetPrincipalsInRoleResponse struct) throws org.apache.thrift.TException { TTupleProtocol iprot = (TTupleProtocol) prot; { - org.apache.thrift.protocol.TList _list91 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.principalGrants = new ArrayList(_list91.size); - RolePrincipalGrant _elem92; - for (int _i93 = 0; _i93 < _list91.size; ++_i93) + org.apache.thrift.protocol.TList _list107 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.principalGrants = new ArrayList(_list107.size); + RolePrincipalGrant _elem108; + for (int _i109 = 0; _i109 < _list107.size; ++_i109) { - _elem92 = new RolePrincipalGrant(); - _elem92.read(iprot); - struct.principalGrants.add(_elem92); + _elem108 = new RolePrincipalGrant(); + _elem108.read(iprot); + struct.principalGrants.add(_elem108); } } struct.setPrincipalGrantsIsSet(true); diff --git a/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetRoleGrantsForPrincipalResponse.java b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetRoleGrantsForPrincipalResponse.java index c013e0e..6531770 100644 --- a/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetRoleGrantsForPrincipalResponse.java +++ b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetRoleGrantsForPrincipalResponse.java @@ -354,14 +354,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, GetRoleGrantsForPri case 1: // PRINCIPAL_GRANTS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list78 = iprot.readListBegin(); - struct.principalGrants = new ArrayList(_list78.size); - RolePrincipalGrant _elem79; - for (int _i80 = 0; _i80 < _list78.size; ++_i80) + org.apache.thrift.protocol.TList _list94 = iprot.readListBegin(); + struct.principalGrants = new ArrayList(_list94.size); + RolePrincipalGrant _elem95; + for (int _i96 = 0; _i96 < _list94.size; ++_i96) { - _elem79 = new RolePrincipalGrant(); - _elem79.read(iprot); - struct.principalGrants.add(_elem79); + _elem95 = new RolePrincipalGrant(); + _elem95.read(iprot); + struct.principalGrants.add(_elem95); } iprot.readListEnd(); } @@ -387,9 +387,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, GetRoleGrantsForPr oprot.writeFieldBegin(PRINCIPAL_GRANTS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.principalGrants.size())); - for (RolePrincipalGrant _iter81 : struct.principalGrants) + for (RolePrincipalGrant _iter97 : struct.principalGrants) { - _iter81.write(oprot); + _iter97.write(oprot); } oprot.writeListEnd(); } @@ -414,9 +414,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, GetRoleGrantsForPri TTupleProtocol oprot = (TTupleProtocol) prot; { oprot.writeI32(struct.principalGrants.size()); - for (RolePrincipalGrant _iter82 : struct.principalGrants) + for (RolePrincipalGrant _iter98 : struct.principalGrants) { - _iter82.write(oprot); + _iter98.write(oprot); } } } @@ -425,14 +425,14 @@ public void write(org.apache.thrift.protocol.TProtocol prot, GetRoleGrantsForPri public void read(org.apache.thrift.protocol.TProtocol prot, GetRoleGrantsForPrincipalResponse struct) throws org.apache.thrift.TException { TTupleProtocol iprot = (TTupleProtocol) prot; { - org.apache.thrift.protocol.TList _list83 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.principalGrants = new ArrayList(_list83.size); - RolePrincipalGrant _elem84; - for (int _i85 = 0; _i85 < _list83.size; ++_i85) + org.apache.thrift.protocol.TList _list99 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.principalGrants = new ArrayList(_list99.size); + RolePrincipalGrant _elem100; + for (int _i101 = 0; _i101 < _list99.size; ++_i101) { - _elem84 = new RolePrincipalGrant(); - _elem84.read(iprot); - struct.principalGrants.add(_elem84); + _elem100 = new RolePrincipalGrant(); + _elem100.read(iprot); + struct.principalGrants.add(_elem100); } } struct.setPrincipalGrantsIsSet(true); diff --git a/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/HeartbeatTxnRangeResponse.java b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/HeartbeatTxnRangeResponse.java index b00fb9c..924a4ca 100644 --- a/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/HeartbeatTxnRangeResponse.java +++ b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/HeartbeatTxnRangeResponse.java @@ -453,13 +453,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, HeartbeatTxnRangeRe case 1: // ABORTED if (schemeField.type == org.apache.thrift.protocol.TType.SET) { { - org.apache.thrift.protocol.TSet _set468 = iprot.readSetBegin(); - struct.aborted = new HashSet(2*_set468.size); - long _elem469; - for (int _i470 = 0; _i470 < _set468.size; ++_i470) + org.apache.thrift.protocol.TSet _set500 = iprot.readSetBegin(); + struct.aborted = new HashSet(2*_set500.size); + long _elem501; + for (int _i502 = 0; _i502 < _set500.size; ++_i502) { - _elem469 = iprot.readI64(); - struct.aborted.add(_elem469); + _elem501 = iprot.readI64(); + struct.aborted.add(_elem501); } iprot.readSetEnd(); } @@ -471,13 +471,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, HeartbeatTxnRangeRe case 2: // NOSUCH if (schemeField.type == org.apache.thrift.protocol.TType.SET) { { - org.apache.thrift.protocol.TSet _set471 = iprot.readSetBegin(); - struct.nosuch = new HashSet(2*_set471.size); - long _elem472; - for (int _i473 = 0; _i473 < _set471.size; ++_i473) + org.apache.thrift.protocol.TSet _set503 = iprot.readSetBegin(); + struct.nosuch = new HashSet(2*_set503.size); + long _elem504; + for (int _i505 = 0; _i505 < _set503.size; ++_i505) { - _elem472 = iprot.readI64(); - struct.nosuch.add(_elem472); + _elem504 = iprot.readI64(); + struct.nosuch.add(_elem504); } iprot.readSetEnd(); } @@ -503,9 +503,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, HeartbeatTxnRangeR oprot.writeFieldBegin(ABORTED_FIELD_DESC); { oprot.writeSetBegin(new org.apache.thrift.protocol.TSet(org.apache.thrift.protocol.TType.I64, struct.aborted.size())); - for (long _iter474 : struct.aborted) + for (long _iter506 : struct.aborted) { - oprot.writeI64(_iter474); + oprot.writeI64(_iter506); } oprot.writeSetEnd(); } @@ -515,9 +515,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, HeartbeatTxnRangeR oprot.writeFieldBegin(NOSUCH_FIELD_DESC); { oprot.writeSetBegin(new org.apache.thrift.protocol.TSet(org.apache.thrift.protocol.TType.I64, struct.nosuch.size())); - for (long _iter475 : struct.nosuch) + for (long _iter507 : struct.nosuch) { - oprot.writeI64(_iter475); + oprot.writeI64(_iter507); } oprot.writeSetEnd(); } @@ -542,16 +542,16 @@ public void write(org.apache.thrift.protocol.TProtocol prot, HeartbeatTxnRangeRe TTupleProtocol oprot = (TTupleProtocol) prot; { oprot.writeI32(struct.aborted.size()); - for (long _iter476 : struct.aborted) + for (long _iter508 : struct.aborted) { - oprot.writeI64(_iter476); + oprot.writeI64(_iter508); } } { oprot.writeI32(struct.nosuch.size()); - for (long _iter477 : struct.nosuch) + for (long _iter509 : struct.nosuch) { - oprot.writeI64(_iter477); + oprot.writeI64(_iter509); } } } @@ -560,24 +560,24 @@ public void write(org.apache.thrift.protocol.TProtocol prot, HeartbeatTxnRangeRe public void read(org.apache.thrift.protocol.TProtocol prot, HeartbeatTxnRangeResponse struct) throws org.apache.thrift.TException { TTupleProtocol iprot = (TTupleProtocol) prot; { - org.apache.thrift.protocol.TSet _set478 = new org.apache.thrift.protocol.TSet(org.apache.thrift.protocol.TType.I64, iprot.readI32()); - struct.aborted = new HashSet(2*_set478.size); - long _elem479; - for (int _i480 = 0; _i480 < _set478.size; ++_i480) + org.apache.thrift.protocol.TSet _set510 = new org.apache.thrift.protocol.TSet(org.apache.thrift.protocol.TType.I64, iprot.readI32()); + struct.aborted = new HashSet(2*_set510.size); + long _elem511; + for (int _i512 = 0; _i512 < _set510.size; ++_i512) { - _elem479 = iprot.readI64(); - struct.aborted.add(_elem479); + _elem511 = iprot.readI64(); + struct.aborted.add(_elem511); } } struct.setAbortedIsSet(true); { - org.apache.thrift.protocol.TSet _set481 = new org.apache.thrift.protocol.TSet(org.apache.thrift.protocol.TType.I64, iprot.readI32()); - struct.nosuch = new HashSet(2*_set481.size); - long _elem482; - for (int _i483 = 0; _i483 < _set481.size; ++_i483) + org.apache.thrift.protocol.TSet _set513 = new org.apache.thrift.protocol.TSet(org.apache.thrift.protocol.TType.I64, iprot.readI32()); + struct.nosuch = new HashSet(2*_set513.size); + long _elem514; + for (int _i515 = 0; _i515 < _set513.size; ++_i515) { - _elem482 = iprot.readI64(); - struct.nosuch.add(_elem482); + _elem514 = iprot.readI64(); + struct.nosuch.add(_elem514); } } struct.setNosuchIsSet(true); diff --git a/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/HiveObjectRef.java b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/HiveObjectRef.java index d7dd8ec..b062aed 100644 --- a/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/HiveObjectRef.java +++ b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/HiveObjectRef.java @@ -707,13 +707,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, HiveObjectRef struc case 4: // PART_VALUES if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list8 = iprot.readListBegin(); - struct.partValues = new ArrayList(_list8.size); - String _elem9; - for (int _i10 = 0; _i10 < _list8.size; ++_i10) + org.apache.thrift.protocol.TList _list24 = iprot.readListBegin(); + struct.partValues = new ArrayList(_list24.size); + String _elem25; + for (int _i26 = 0; _i26 < _list24.size; ++_i26) { - _elem9 = iprot.readString(); - struct.partValues.add(_elem9); + _elem25 = iprot.readString(); + struct.partValues.add(_elem25); } iprot.readListEnd(); } @@ -762,9 +762,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, HiveObjectRef stru oprot.writeFieldBegin(PART_VALUES_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.partValues.size())); - for (String _iter11 : struct.partValues) + for (String _iter27 : struct.partValues) { - oprot.writeString(_iter11); + oprot.writeString(_iter27); } oprot.writeListEnd(); } @@ -821,9 +821,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, HiveObjectRef struc if (struct.isSetPartValues()) { { oprot.writeI32(struct.partValues.size()); - for (String _iter12 : struct.partValues) + for (String _iter28 : struct.partValues) { - oprot.writeString(_iter12); + oprot.writeString(_iter28); } } } @@ -850,13 +850,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, HiveObjectRef struct } if (incoming.get(3)) { { - org.apache.thrift.protocol.TList _list13 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.partValues = new ArrayList(_list13.size); - String _elem14; - for (int _i15 = 0; _i15 < _list13.size; ++_i15) + org.apache.thrift.protocol.TList _list29 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.partValues = new ArrayList(_list29.size); + String _elem30; + for (int _i31 = 0; _i31 < _list29.size; ++_i31) { - _elem14 = iprot.readString(); - struct.partValues.add(_elem14); + _elem30 = iprot.readString(); + struct.partValues.add(_elem30); } } struct.setPartValuesIsSet(true); diff --git a/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/Index.java b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/Index.java index 44d99c7..19b2f75 100644 --- a/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/Index.java +++ b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/Index.java @@ -1133,15 +1133,15 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, Index struct) throw case 9: // PARAMETERS if (schemeField.type == org.apache.thrift.protocol.TType.MAP) { { - org.apache.thrift.protocol.TMap _map260 = iprot.readMapBegin(); - struct.parameters = new HashMap(2*_map260.size); - String _key261; - String _val262; - for (int _i263 = 0; _i263 < _map260.size; ++_i263) + org.apache.thrift.protocol.TMap _map292 = iprot.readMapBegin(); + struct.parameters = new HashMap(2*_map292.size); + String _key293; + String _val294; + for (int _i295 = 0; _i295 < _map292.size; ++_i295) { - _key261 = iprot.readString(); - _val262 = iprot.readString(); - struct.parameters.put(_key261, _val262); + _key293 = iprot.readString(); + _val294 = iprot.readString(); + struct.parameters.put(_key293, _val294); } iprot.readMapEnd(); } @@ -1211,10 +1211,10 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, Index struct) thro oprot.writeFieldBegin(PARAMETERS_FIELD_DESC); { oprot.writeMapBegin(new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING, struct.parameters.size())); - for (Map.Entry _iter264 : struct.parameters.entrySet()) + for (Map.Entry _iter296 : struct.parameters.entrySet()) { - oprot.writeString(_iter264.getKey()); - oprot.writeString(_iter264.getValue()); + oprot.writeString(_iter296.getKey()); + oprot.writeString(_iter296.getValue()); } oprot.writeMapEnd(); } @@ -1299,10 +1299,10 @@ public void write(org.apache.thrift.protocol.TProtocol prot, Index struct) throw if (struct.isSetParameters()) { { oprot.writeI32(struct.parameters.size()); - for (Map.Entry _iter265 : struct.parameters.entrySet()) + for (Map.Entry _iter297 : struct.parameters.entrySet()) { - oprot.writeString(_iter265.getKey()); - oprot.writeString(_iter265.getValue()); + oprot.writeString(_iter297.getKey()); + oprot.writeString(_iter297.getValue()); } } } @@ -1350,15 +1350,15 @@ public void read(org.apache.thrift.protocol.TProtocol prot, Index struct) throws } if (incoming.get(8)) { { - org.apache.thrift.protocol.TMap _map266 = new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.parameters = new HashMap(2*_map266.size); - String _key267; - String _val268; - for (int _i269 = 0; _i269 < _map266.size; ++_i269) + org.apache.thrift.protocol.TMap _map298 = new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.parameters = new HashMap(2*_map298.size); + String _key299; + String _val300; + for (int _i301 = 0; _i301 < _map298.size; ++_i301) { - _key267 = iprot.readString(); - _val268 = iprot.readString(); - struct.parameters.put(_key267, _val268); + _key299 = iprot.readString(); + _val300 = iprot.readString(); + struct.parameters.put(_key299, _val300); } } struct.setParametersIsSet(true); diff --git a/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/InsertEventRequestData.java b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/InsertEventRequestData.java index 488d3a0..8ca081f 100644 --- a/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/InsertEventRequestData.java +++ b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/InsertEventRequestData.java @@ -351,13 +351,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, InsertEventRequestD case 1: // FILES_ADDED if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list508 = iprot.readListBegin(); - struct.filesAdded = new ArrayList(_list508.size); - String _elem509; - for (int _i510 = 0; _i510 < _list508.size; ++_i510) + org.apache.thrift.protocol.TList _list540 = iprot.readListBegin(); + struct.filesAdded = new ArrayList(_list540.size); + String _elem541; + for (int _i542 = 0; _i542 < _list540.size; ++_i542) { - _elem509 = iprot.readString(); - struct.filesAdded.add(_elem509); + _elem541 = iprot.readString(); + struct.filesAdded.add(_elem541); } iprot.readListEnd(); } @@ -383,9 +383,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, InsertEventRequest oprot.writeFieldBegin(FILES_ADDED_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.filesAdded.size())); - for (String _iter511 : struct.filesAdded) + for (String _iter543 : struct.filesAdded) { - oprot.writeString(_iter511); + oprot.writeString(_iter543); } oprot.writeListEnd(); } @@ -410,9 +410,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, InsertEventRequestD TTupleProtocol oprot = (TTupleProtocol) prot; { oprot.writeI32(struct.filesAdded.size()); - for (String _iter512 : struct.filesAdded) + for (String _iter544 : struct.filesAdded) { - oprot.writeString(_iter512); + oprot.writeString(_iter544); } } } @@ -421,13 +421,13 @@ public void write(org.apache.thrift.protocol.TProtocol prot, InsertEventRequestD public void read(org.apache.thrift.protocol.TProtocol prot, InsertEventRequestData struct) throws org.apache.thrift.TException { TTupleProtocol iprot = (TTupleProtocol) prot; { - org.apache.thrift.protocol.TList _list513 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.filesAdded = new ArrayList(_list513.size); - String _elem514; - for (int _i515 = 0; _i515 < _list513.size; ++_i515) + org.apache.thrift.protocol.TList _list545 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.filesAdded = new ArrayList(_list545.size); + String _elem546; + for (int _i547 = 0; _i547 < _list545.size; ++_i547) { - _elem514 = iprot.readString(); - struct.filesAdded.add(_elem514); + _elem546 = iprot.readString(); + struct.filesAdded.add(_elem546); } } struct.setFilesAddedIsSet(true); diff --git a/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/LockRequest.java b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/LockRequest.java index f39f582..aa24071 100644 --- a/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/LockRequest.java +++ b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/LockRequest.java @@ -689,14 +689,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, LockRequest struct) case 1: // COMPONENT if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list452 = iprot.readListBegin(); - struct.component = new ArrayList(_list452.size); - LockComponent _elem453; - for (int _i454 = 0; _i454 < _list452.size; ++_i454) + org.apache.thrift.protocol.TList _list484 = iprot.readListBegin(); + struct.component = new ArrayList(_list484.size); + LockComponent _elem485; + for (int _i486 = 0; _i486 < _list484.size; ++_i486) { - _elem453 = new LockComponent(); - _elem453.read(iprot); - struct.component.add(_elem453); + _elem485 = new LockComponent(); + _elem485.read(iprot); + struct.component.add(_elem485); } iprot.readListEnd(); } @@ -754,9 +754,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, LockRequest struct oprot.writeFieldBegin(COMPONENT_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.component.size())); - for (LockComponent _iter455 : struct.component) + for (LockComponent _iter487 : struct.component) { - _iter455.write(oprot); + _iter487.write(oprot); } oprot.writeListEnd(); } @@ -803,9 +803,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, LockRequest struct) TTupleProtocol oprot = (TTupleProtocol) prot; { oprot.writeI32(struct.component.size()); - for (LockComponent _iter456 : struct.component) + for (LockComponent _iter488 : struct.component) { - _iter456.write(oprot); + _iter488.write(oprot); } } oprot.writeString(struct.user); @@ -830,14 +830,14 @@ public void write(org.apache.thrift.protocol.TProtocol prot, LockRequest struct) public void read(org.apache.thrift.protocol.TProtocol prot, LockRequest struct) throws org.apache.thrift.TException { TTupleProtocol iprot = (TTupleProtocol) prot; { - org.apache.thrift.protocol.TList _list457 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.component = new ArrayList(_list457.size); - LockComponent _elem458; - for (int _i459 = 0; _i459 < _list457.size; ++_i459) + org.apache.thrift.protocol.TList _list489 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.component = new ArrayList(_list489.size); + LockComponent _elem490; + for (int _i491 = 0; _i491 < _list489.size; ++_i491) { - _elem458 = new LockComponent(); - _elem458.read(iprot); - struct.component.add(_elem458); + _elem490 = new LockComponent(); + _elem490.read(iprot); + struct.component.add(_elem490); } } struct.setComponentIsSet(true); diff --git a/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/NotificationEventResponse.java b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/NotificationEventResponse.java index fcbbd18..bb0a273 100644 --- a/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/NotificationEventResponse.java +++ b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/NotificationEventResponse.java @@ -354,14 +354,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, NotificationEventRe case 1: // EVENTS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list500 = iprot.readListBegin(); - struct.events = new ArrayList(_list500.size); - NotificationEvent _elem501; - for (int _i502 = 0; _i502 < _list500.size; ++_i502) + org.apache.thrift.protocol.TList _list532 = iprot.readListBegin(); + struct.events = new ArrayList(_list532.size); + NotificationEvent _elem533; + for (int _i534 = 0; _i534 < _list532.size; ++_i534) { - _elem501 = new NotificationEvent(); - _elem501.read(iprot); - struct.events.add(_elem501); + _elem533 = new NotificationEvent(); + _elem533.read(iprot); + struct.events.add(_elem533); } iprot.readListEnd(); } @@ -387,9 +387,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, NotificationEventR oprot.writeFieldBegin(EVENTS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.events.size())); - for (NotificationEvent _iter503 : struct.events) + for (NotificationEvent _iter535 : struct.events) { - _iter503.write(oprot); + _iter535.write(oprot); } oprot.writeListEnd(); } @@ -414,9 +414,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, NotificationEventRe TTupleProtocol oprot = (TTupleProtocol) prot; { oprot.writeI32(struct.events.size()); - for (NotificationEvent _iter504 : struct.events) + for (NotificationEvent _iter536 : struct.events) { - _iter504.write(oprot); + _iter536.write(oprot); } } } @@ -425,14 +425,14 @@ public void write(org.apache.thrift.protocol.TProtocol prot, NotificationEventRe public void read(org.apache.thrift.protocol.TProtocol prot, NotificationEventResponse struct) throws org.apache.thrift.TException { TTupleProtocol iprot = (TTupleProtocol) prot; { - org.apache.thrift.protocol.TList _list505 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.events = new ArrayList(_list505.size); - NotificationEvent _elem506; - for (int _i507 = 0; _i507 < _list505.size; ++_i507) + org.apache.thrift.protocol.TList _list537 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.events = new ArrayList(_list537.size); + NotificationEvent _elem538; + for (int _i539 = 0; _i539 < _list537.size; ++_i539) { - _elem506 = new NotificationEvent(); - _elem506.read(iprot); - struct.events.add(_elem506); + _elem538 = new NotificationEvent(); + _elem538.read(iprot); + struct.events.add(_elem538); } } struct.setEventsIsSet(true); diff --git a/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/OpenTxnsResponse.java b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/OpenTxnsResponse.java index e11a2b3..a8af71b 100644 --- a/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/OpenTxnsResponse.java +++ b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/OpenTxnsResponse.java @@ -351,13 +351,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, OpenTxnsResponse st case 1: // TXN_IDS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list444 = iprot.readListBegin(); - struct.txn_ids = new ArrayList(_list444.size); - long _elem445; - for (int _i446 = 0; _i446 < _list444.size; ++_i446) + org.apache.thrift.protocol.TList _list476 = iprot.readListBegin(); + struct.txn_ids = new ArrayList(_list476.size); + long _elem477; + for (int _i478 = 0; _i478 < _list476.size; ++_i478) { - _elem445 = iprot.readI64(); - struct.txn_ids.add(_elem445); + _elem477 = iprot.readI64(); + struct.txn_ids.add(_elem477); } iprot.readListEnd(); } @@ -383,9 +383,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, OpenTxnsResponse s oprot.writeFieldBegin(TXN_IDS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.I64, struct.txn_ids.size())); - for (long _iter447 : struct.txn_ids) + for (long _iter479 : struct.txn_ids) { - oprot.writeI64(_iter447); + oprot.writeI64(_iter479); } oprot.writeListEnd(); } @@ -410,9 +410,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, OpenTxnsResponse st TTupleProtocol oprot = (TTupleProtocol) prot; { oprot.writeI32(struct.txn_ids.size()); - for (long _iter448 : struct.txn_ids) + for (long _iter480 : struct.txn_ids) { - oprot.writeI64(_iter448); + oprot.writeI64(_iter480); } } } @@ -421,13 +421,13 @@ public void write(org.apache.thrift.protocol.TProtocol prot, OpenTxnsResponse st public void read(org.apache.thrift.protocol.TProtocol prot, OpenTxnsResponse struct) throws org.apache.thrift.TException { TTupleProtocol iprot = (TTupleProtocol) prot; { - org.apache.thrift.protocol.TList _list449 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.I64, iprot.readI32()); - struct.txn_ids = new ArrayList(_list449.size); - long _elem450; - for (int _i451 = 0; _i451 < _list449.size; ++_i451) + org.apache.thrift.protocol.TList _list481 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.I64, iprot.readI32()); + struct.txn_ids = new ArrayList(_list481.size); + long _elem482; + for (int _i483 = 0; _i483 < _list481.size; ++_i483) { - _elem450 = iprot.readI64(); - struct.txn_ids.add(_elem450); + _elem482 = iprot.readI64(); + struct.txn_ids.add(_elem482); } } struct.setTxn_idsIsSet(true); diff --git a/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/Partition.java b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/Partition.java index a247221..e058d2b 100644 --- a/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/Partition.java +++ b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/Partition.java @@ -931,13 +931,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, Partition struct) t case 1: // VALUES if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list208 = iprot.readListBegin(); - struct.values = new ArrayList(_list208.size); - String _elem209; - for (int _i210 = 0; _i210 < _list208.size; ++_i210) + org.apache.thrift.protocol.TList _list240 = iprot.readListBegin(); + struct.values = new ArrayList(_list240.size); + String _elem241; + for (int _i242 = 0; _i242 < _list240.size; ++_i242) { - _elem209 = iprot.readString(); - struct.values.add(_elem209); + _elem241 = iprot.readString(); + struct.values.add(_elem241); } iprot.readListEnd(); } @@ -990,15 +990,15 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, Partition struct) t case 7: // PARAMETERS if (schemeField.type == org.apache.thrift.protocol.TType.MAP) { { - org.apache.thrift.protocol.TMap _map211 = iprot.readMapBegin(); - struct.parameters = new HashMap(2*_map211.size); - String _key212; - String _val213; - for (int _i214 = 0; _i214 < _map211.size; ++_i214) + org.apache.thrift.protocol.TMap _map243 = iprot.readMapBegin(); + struct.parameters = new HashMap(2*_map243.size); + String _key244; + String _val245; + for (int _i246 = 0; _i246 < _map243.size; ++_i246) { - _key212 = iprot.readString(); - _val213 = iprot.readString(); - struct.parameters.put(_key212, _val213); + _key244 = iprot.readString(); + _val245 = iprot.readString(); + struct.parameters.put(_key244, _val245); } iprot.readMapEnd(); } @@ -1033,9 +1033,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, Partition struct) oprot.writeFieldBegin(VALUES_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.values.size())); - for (String _iter215 : struct.values) + for (String _iter247 : struct.values) { - oprot.writeString(_iter215); + oprot.writeString(_iter247); } oprot.writeListEnd(); } @@ -1066,10 +1066,10 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, Partition struct) oprot.writeFieldBegin(PARAMETERS_FIELD_DESC); { oprot.writeMapBegin(new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING, struct.parameters.size())); - for (Map.Entry _iter216 : struct.parameters.entrySet()) + for (Map.Entry _iter248 : struct.parameters.entrySet()) { - oprot.writeString(_iter216.getKey()); - oprot.writeString(_iter216.getValue()); + oprot.writeString(_iter248.getKey()); + oprot.writeString(_iter248.getValue()); } oprot.writeMapEnd(); } @@ -1128,9 +1128,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, Partition struct) t if (struct.isSetValues()) { { oprot.writeI32(struct.values.size()); - for (String _iter217 : struct.values) + for (String _iter249 : struct.values) { - oprot.writeString(_iter217); + oprot.writeString(_iter249); } } } @@ -1152,10 +1152,10 @@ public void write(org.apache.thrift.protocol.TProtocol prot, Partition struct) t if (struct.isSetParameters()) { { oprot.writeI32(struct.parameters.size()); - for (Map.Entry _iter218 : struct.parameters.entrySet()) + for (Map.Entry _iter250 : struct.parameters.entrySet()) { - oprot.writeString(_iter218.getKey()); - oprot.writeString(_iter218.getValue()); + oprot.writeString(_iter250.getKey()); + oprot.writeString(_iter250.getValue()); } } } @@ -1170,13 +1170,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, Partition struct) th BitSet incoming = iprot.readBitSet(8); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list219 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.values = new ArrayList(_list219.size); - String _elem220; - for (int _i221 = 0; _i221 < _list219.size; ++_i221) + org.apache.thrift.protocol.TList _list251 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.values = new ArrayList(_list251.size); + String _elem252; + for (int _i253 = 0; _i253 < _list251.size; ++_i253) { - _elem220 = iprot.readString(); - struct.values.add(_elem220); + _elem252 = iprot.readString(); + struct.values.add(_elem252); } } struct.setValuesIsSet(true); @@ -1204,15 +1204,15 @@ public void read(org.apache.thrift.protocol.TProtocol prot, Partition struct) th } if (incoming.get(6)) { { - org.apache.thrift.protocol.TMap _map222 = new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.parameters = new HashMap(2*_map222.size); - String _key223; - String _val224; - for (int _i225 = 0; _i225 < _map222.size; ++_i225) + org.apache.thrift.protocol.TMap _map254 = new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.parameters = new HashMap(2*_map254.size); + String _key255; + String _val256; + for (int _i257 = 0; _i257 < _map254.size; ++_i257) { - _key223 = iprot.readString(); - _val224 = iprot.readString(); - struct.parameters.put(_key223, _val224); + _key255 = iprot.readString(); + _val256 = iprot.readString(); + struct.parameters.put(_key255, _val256); } } struct.setParametersIsSet(true); diff --git a/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/PartitionListComposingSpec.java b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/PartitionListComposingSpec.java index e8232b9..85e83cb 100644 --- a/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/PartitionListComposingSpec.java +++ b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/PartitionListComposingSpec.java @@ -350,14 +350,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, PartitionListCompos case 1: // PARTITIONS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list252 = iprot.readListBegin(); - struct.partitions = new ArrayList(_list252.size); - Partition _elem253; - for (int _i254 = 0; _i254 < _list252.size; ++_i254) + org.apache.thrift.protocol.TList _list284 = iprot.readListBegin(); + struct.partitions = new ArrayList(_list284.size); + Partition _elem285; + for (int _i286 = 0; _i286 < _list284.size; ++_i286) { - _elem253 = new Partition(); - _elem253.read(iprot); - struct.partitions.add(_elem253); + _elem285 = new Partition(); + _elem285.read(iprot); + struct.partitions.add(_elem285); } iprot.readListEnd(); } @@ -383,9 +383,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, PartitionListCompo oprot.writeFieldBegin(PARTITIONS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.partitions.size())); - for (Partition _iter255 : struct.partitions) + for (Partition _iter287 : struct.partitions) { - _iter255.write(oprot); + _iter287.write(oprot); } oprot.writeListEnd(); } @@ -416,9 +416,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, PartitionListCompos if (struct.isSetPartitions()) { { oprot.writeI32(struct.partitions.size()); - for (Partition _iter256 : struct.partitions) + for (Partition _iter288 : struct.partitions) { - _iter256.write(oprot); + _iter288.write(oprot); } } } @@ -430,14 +430,14 @@ public void read(org.apache.thrift.protocol.TProtocol prot, PartitionListComposi BitSet incoming = iprot.readBitSet(1); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list257 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.partitions = new ArrayList(_list257.size); - Partition _elem258; - for (int _i259 = 0; _i259 < _list257.size; ++_i259) + org.apache.thrift.protocol.TList _list289 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.partitions = new ArrayList(_list289.size); + Partition _elem290; + for (int _i291 = 0; _i291 < _list289.size; ++_i291) { - _elem258 = new Partition(); - _elem258.read(iprot); - struct.partitions.add(_elem258); + _elem290 = new Partition(); + _elem290.read(iprot); + struct.partitions.add(_elem290); } } struct.setPartitionsIsSet(true); diff --git a/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/PartitionSpecWithSharedSD.java b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/PartitionSpecWithSharedSD.java index 2c41912..34fe3bb 100644 --- a/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/PartitionSpecWithSharedSD.java +++ b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/PartitionSpecWithSharedSD.java @@ -434,14 +434,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, PartitionSpecWithSh case 1: // PARTITIONS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list244 = iprot.readListBegin(); - struct.partitions = new ArrayList(_list244.size); - PartitionWithoutSD _elem245; - for (int _i246 = 0; _i246 < _list244.size; ++_i246) + org.apache.thrift.protocol.TList _list276 = iprot.readListBegin(); + struct.partitions = new ArrayList(_list276.size); + PartitionWithoutSD _elem277; + for (int _i278 = 0; _i278 < _list276.size; ++_i278) { - _elem245 = new PartitionWithoutSD(); - _elem245.read(iprot); - struct.partitions.add(_elem245); + _elem277 = new PartitionWithoutSD(); + _elem277.read(iprot); + struct.partitions.add(_elem277); } iprot.readListEnd(); } @@ -476,9 +476,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, PartitionSpecWithS oprot.writeFieldBegin(PARTITIONS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.partitions.size())); - for (PartitionWithoutSD _iter247 : struct.partitions) + for (PartitionWithoutSD _iter279 : struct.partitions) { - _iter247.write(oprot); + _iter279.write(oprot); } oprot.writeListEnd(); } @@ -517,9 +517,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, PartitionSpecWithSh if (struct.isSetPartitions()) { { oprot.writeI32(struct.partitions.size()); - for (PartitionWithoutSD _iter248 : struct.partitions) + for (PartitionWithoutSD _iter280 : struct.partitions) { - _iter248.write(oprot); + _iter280.write(oprot); } } } @@ -534,14 +534,14 @@ public void read(org.apache.thrift.protocol.TProtocol prot, PartitionSpecWithSha BitSet incoming = iprot.readBitSet(2); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list249 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.partitions = new ArrayList(_list249.size); - PartitionWithoutSD _elem250; - for (int _i251 = 0; _i251 < _list249.size; ++_i251) + org.apache.thrift.protocol.TList _list281 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.partitions = new ArrayList(_list281.size); + PartitionWithoutSD _elem282; + for (int _i283 = 0; _i283 < _list281.size; ++_i283) { - _elem250 = new PartitionWithoutSD(); - _elem250.read(iprot); - struct.partitions.add(_elem250); + _elem282 = new PartitionWithoutSD(); + _elem282.read(iprot); + struct.partitions.add(_elem282); } } struct.setPartitionsIsSet(true); diff --git a/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/PartitionWithoutSD.java b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/PartitionWithoutSD.java index f0c5ce1..f1b7c39 100644 --- a/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/PartitionWithoutSD.java +++ b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/PartitionWithoutSD.java @@ -766,13 +766,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, PartitionWithoutSD case 1: // VALUES if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list226 = iprot.readListBegin(); - struct.values = new ArrayList(_list226.size); - String _elem227; - for (int _i228 = 0; _i228 < _list226.size; ++_i228) + org.apache.thrift.protocol.TList _list258 = iprot.readListBegin(); + struct.values = new ArrayList(_list258.size); + String _elem259; + for (int _i260 = 0; _i260 < _list258.size; ++_i260) { - _elem227 = iprot.readString(); - struct.values.add(_elem227); + _elem259 = iprot.readString(); + struct.values.add(_elem259); } iprot.readListEnd(); } @@ -808,15 +808,15 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, PartitionWithoutSD case 5: // PARAMETERS if (schemeField.type == org.apache.thrift.protocol.TType.MAP) { { - org.apache.thrift.protocol.TMap _map229 = iprot.readMapBegin(); - struct.parameters = new HashMap(2*_map229.size); - String _key230; - String _val231; - for (int _i232 = 0; _i232 < _map229.size; ++_i232) + org.apache.thrift.protocol.TMap _map261 = iprot.readMapBegin(); + struct.parameters = new HashMap(2*_map261.size); + String _key262; + String _val263; + for (int _i264 = 0; _i264 < _map261.size; ++_i264) { - _key230 = iprot.readString(); - _val231 = iprot.readString(); - struct.parameters.put(_key230, _val231); + _key262 = iprot.readString(); + _val263 = iprot.readString(); + struct.parameters.put(_key262, _val263); } iprot.readMapEnd(); } @@ -851,9 +851,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, PartitionWithoutSD oprot.writeFieldBegin(VALUES_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.values.size())); - for (String _iter233 : struct.values) + for (String _iter265 : struct.values) { - oprot.writeString(_iter233); + oprot.writeString(_iter265); } oprot.writeListEnd(); } @@ -874,10 +874,10 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, PartitionWithoutSD oprot.writeFieldBegin(PARAMETERS_FIELD_DESC); { oprot.writeMapBegin(new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING, struct.parameters.size())); - for (Map.Entry _iter234 : struct.parameters.entrySet()) + for (Map.Entry _iter266 : struct.parameters.entrySet()) { - oprot.writeString(_iter234.getKey()); - oprot.writeString(_iter234.getValue()); + oprot.writeString(_iter266.getKey()); + oprot.writeString(_iter266.getValue()); } oprot.writeMapEnd(); } @@ -930,9 +930,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, PartitionWithoutSD if (struct.isSetValues()) { { oprot.writeI32(struct.values.size()); - for (String _iter235 : struct.values) + for (String _iter267 : struct.values) { - oprot.writeString(_iter235); + oprot.writeString(_iter267); } } } @@ -948,10 +948,10 @@ public void write(org.apache.thrift.protocol.TProtocol prot, PartitionWithoutSD if (struct.isSetParameters()) { { oprot.writeI32(struct.parameters.size()); - for (Map.Entry _iter236 : struct.parameters.entrySet()) + for (Map.Entry _iter268 : struct.parameters.entrySet()) { - oprot.writeString(_iter236.getKey()); - oprot.writeString(_iter236.getValue()); + oprot.writeString(_iter268.getKey()); + oprot.writeString(_iter268.getValue()); } } } @@ -966,13 +966,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, PartitionWithoutSD s BitSet incoming = iprot.readBitSet(6); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list237 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.values = new ArrayList(_list237.size); - String _elem238; - for (int _i239 = 0; _i239 < _list237.size; ++_i239) + org.apache.thrift.protocol.TList _list269 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.values = new ArrayList(_list269.size); + String _elem270; + for (int _i271 = 0; _i271 < _list269.size; ++_i271) { - _elem238 = iprot.readString(); - struct.values.add(_elem238); + _elem270 = iprot.readString(); + struct.values.add(_elem270); } } struct.setValuesIsSet(true); @@ -991,15 +991,15 @@ public void read(org.apache.thrift.protocol.TProtocol prot, PartitionWithoutSD s } if (incoming.get(4)) { { - org.apache.thrift.protocol.TMap _map240 = new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.parameters = new HashMap(2*_map240.size); - String _key241; - String _val242; - for (int _i243 = 0; _i243 < _map240.size; ++_i243) + org.apache.thrift.protocol.TMap _map272 = new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.parameters = new HashMap(2*_map272.size); + String _key273; + String _val274; + for (int _i275 = 0; _i275 < _map272.size; ++_i275) { - _key241 = iprot.readString(); - _val242 = iprot.readString(); - struct.parameters.put(_key241, _val242); + _key273 = iprot.readString(); + _val274 = iprot.readString(); + struct.parameters.put(_key273, _val274); } } struct.setParametersIsSet(true); diff --git a/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/PartitionsByExprResult.java b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/PartitionsByExprResult.java index 12ae66d..0533053 100644 --- a/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/PartitionsByExprResult.java +++ b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/PartitionsByExprResult.java @@ -439,14 +439,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, PartitionsByExprRes case 1: // PARTITIONS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list322 = iprot.readListBegin(); - struct.partitions = new ArrayList(_list322.size); - Partition _elem323; - for (int _i324 = 0; _i324 < _list322.size; ++_i324) + org.apache.thrift.protocol.TList _list354 = iprot.readListBegin(); + struct.partitions = new ArrayList(_list354.size); + Partition _elem355; + for (int _i356 = 0; _i356 < _list354.size; ++_i356) { - _elem323 = new Partition(); - _elem323.read(iprot); - struct.partitions.add(_elem323); + _elem355 = new Partition(); + _elem355.read(iprot); + struct.partitions.add(_elem355); } iprot.readListEnd(); } @@ -480,9 +480,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, PartitionsByExprRe oprot.writeFieldBegin(PARTITIONS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.partitions.size())); - for (Partition _iter325 : struct.partitions) + for (Partition _iter357 : struct.partitions) { - _iter325.write(oprot); + _iter357.write(oprot); } oprot.writeListEnd(); } @@ -510,9 +510,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, PartitionsByExprRes TTupleProtocol oprot = (TTupleProtocol) prot; { oprot.writeI32(struct.partitions.size()); - for (Partition _iter326 : struct.partitions) + for (Partition _iter358 : struct.partitions) { - _iter326.write(oprot); + _iter358.write(oprot); } } oprot.writeBool(struct.hasUnknownPartitions); @@ -522,14 +522,14 @@ public void write(org.apache.thrift.protocol.TProtocol prot, PartitionsByExprRes public void read(org.apache.thrift.protocol.TProtocol prot, PartitionsByExprResult struct) throws org.apache.thrift.TException { TTupleProtocol iprot = (TTupleProtocol) prot; { - org.apache.thrift.protocol.TList _list327 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.partitions = new ArrayList(_list327.size); - Partition _elem328; - for (int _i329 = 0; _i329 < _list327.size; ++_i329) + org.apache.thrift.protocol.TList _list359 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.partitions = new ArrayList(_list359.size); + Partition _elem360; + for (int _i361 = 0; _i361 < _list359.size; ++_i361) { - _elem328 = new Partition(); - _elem328.read(iprot); - struct.partitions.add(_elem328); + _elem360 = new Partition(); + _elem360.read(iprot); + struct.partitions.add(_elem360); } } struct.setPartitionsIsSet(true); diff --git a/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/PartitionsStatsRequest.java b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/PartitionsStatsRequest.java index 8416369..65b8a54 100644 --- a/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/PartitionsStatsRequest.java +++ b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/PartitionsStatsRequest.java @@ -639,13 +639,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, PartitionsStatsRequ case 3: // COL_NAMES if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list364 = iprot.readListBegin(); - struct.colNames = new ArrayList(_list364.size); - String _elem365; - for (int _i366 = 0; _i366 < _list364.size; ++_i366) + org.apache.thrift.protocol.TList _list396 = iprot.readListBegin(); + struct.colNames = new ArrayList(_list396.size); + String _elem397; + for (int _i398 = 0; _i398 < _list396.size; ++_i398) { - _elem365 = iprot.readString(); - struct.colNames.add(_elem365); + _elem397 = iprot.readString(); + struct.colNames.add(_elem397); } iprot.readListEnd(); } @@ -657,13 +657,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, PartitionsStatsRequ case 4: // PART_NAMES if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list367 = iprot.readListBegin(); - struct.partNames = new ArrayList(_list367.size); - String _elem368; - for (int _i369 = 0; _i369 < _list367.size; ++_i369) + org.apache.thrift.protocol.TList _list399 = iprot.readListBegin(); + struct.partNames = new ArrayList(_list399.size); + String _elem400; + for (int _i401 = 0; _i401 < _list399.size; ++_i401) { - _elem368 = iprot.readString(); - struct.partNames.add(_elem368); + _elem400 = iprot.readString(); + struct.partNames.add(_elem400); } iprot.readListEnd(); } @@ -699,9 +699,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, PartitionsStatsReq oprot.writeFieldBegin(COL_NAMES_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.colNames.size())); - for (String _iter370 : struct.colNames) + for (String _iter402 : struct.colNames) { - oprot.writeString(_iter370); + oprot.writeString(_iter402); } oprot.writeListEnd(); } @@ -711,9 +711,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, PartitionsStatsReq oprot.writeFieldBegin(PART_NAMES_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.partNames.size())); - for (String _iter371 : struct.partNames) + for (String _iter403 : struct.partNames) { - oprot.writeString(_iter371); + oprot.writeString(_iter403); } oprot.writeListEnd(); } @@ -740,16 +740,16 @@ public void write(org.apache.thrift.protocol.TProtocol prot, PartitionsStatsRequ oprot.writeString(struct.tblName); { oprot.writeI32(struct.colNames.size()); - for (String _iter372 : struct.colNames) + for (String _iter404 : struct.colNames) { - oprot.writeString(_iter372); + oprot.writeString(_iter404); } } { oprot.writeI32(struct.partNames.size()); - for (String _iter373 : struct.partNames) + for (String _iter405 : struct.partNames) { - oprot.writeString(_iter373); + oprot.writeString(_iter405); } } } @@ -762,24 +762,24 @@ public void read(org.apache.thrift.protocol.TProtocol prot, PartitionsStatsReque struct.tblName = iprot.readString(); struct.setTblNameIsSet(true); { - org.apache.thrift.protocol.TList _list374 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.colNames = new ArrayList(_list374.size); - String _elem375; - for (int _i376 = 0; _i376 < _list374.size; ++_i376) + org.apache.thrift.protocol.TList _list406 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.colNames = new ArrayList(_list406.size); + String _elem407; + for (int _i408 = 0; _i408 < _list406.size; ++_i408) { - _elem375 = iprot.readString(); - struct.colNames.add(_elem375); + _elem407 = iprot.readString(); + struct.colNames.add(_elem407); } } struct.setColNamesIsSet(true); { - org.apache.thrift.protocol.TList _list377 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.partNames = new ArrayList(_list377.size); - String _elem378; - for (int _i379 = 0; _i379 < _list377.size; ++_i379) + org.apache.thrift.protocol.TList _list409 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.partNames = new ArrayList(_list409.size); + String _elem410; + for (int _i411 = 0; _i411 < _list409.size; ++_i411) { - _elem378 = iprot.readString(); - struct.partNames.add(_elem378); + _elem410 = iprot.readString(); + struct.partNames.add(_elem410); } } struct.setPartNamesIsSet(true); diff --git a/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/PartitionsStatsResult.java b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/PartitionsStatsResult.java index 2e903f1..1d5e6ce 100644 --- a/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/PartitionsStatsResult.java +++ b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/PartitionsStatsResult.java @@ -363,26 +363,26 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, PartitionsStatsResu case 1: // PART_STATS if (schemeField.type == org.apache.thrift.protocol.TType.MAP) { { - org.apache.thrift.protocol.TMap _map338 = iprot.readMapBegin(); - struct.partStats = new HashMap>(2*_map338.size); - String _key339; - List _val340; - for (int _i341 = 0; _i341 < _map338.size; ++_i341) + org.apache.thrift.protocol.TMap _map370 = iprot.readMapBegin(); + struct.partStats = new HashMap>(2*_map370.size); + String _key371; + List _val372; + for (int _i373 = 0; _i373 < _map370.size; ++_i373) { - _key339 = iprot.readString(); + _key371 = iprot.readString(); { - org.apache.thrift.protocol.TList _list342 = iprot.readListBegin(); - _val340 = new ArrayList(_list342.size); - ColumnStatisticsObj _elem343; - for (int _i344 = 0; _i344 < _list342.size; ++_i344) + org.apache.thrift.protocol.TList _list374 = iprot.readListBegin(); + _val372 = new ArrayList(_list374.size); + ColumnStatisticsObj _elem375; + for (int _i376 = 0; _i376 < _list374.size; ++_i376) { - _elem343 = new ColumnStatisticsObj(); - _elem343.read(iprot); - _val340.add(_elem343); + _elem375 = new ColumnStatisticsObj(); + _elem375.read(iprot); + _val372.add(_elem375); } iprot.readListEnd(); } - struct.partStats.put(_key339, _val340); + struct.partStats.put(_key371, _val372); } iprot.readMapEnd(); } @@ -408,14 +408,14 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, PartitionsStatsRes oprot.writeFieldBegin(PART_STATS_FIELD_DESC); { oprot.writeMapBegin(new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.LIST, struct.partStats.size())); - for (Map.Entry> _iter345 : struct.partStats.entrySet()) + for (Map.Entry> _iter377 : struct.partStats.entrySet()) { - oprot.writeString(_iter345.getKey()); + oprot.writeString(_iter377.getKey()); { - oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, _iter345.getValue().size())); - for (ColumnStatisticsObj _iter346 : _iter345.getValue()) + oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, _iter377.getValue().size())); + for (ColumnStatisticsObj _iter378 : _iter377.getValue()) { - _iter346.write(oprot); + _iter378.write(oprot); } oprot.writeListEnd(); } @@ -443,14 +443,14 @@ public void write(org.apache.thrift.protocol.TProtocol prot, PartitionsStatsResu TTupleProtocol oprot = (TTupleProtocol) prot; { oprot.writeI32(struct.partStats.size()); - for (Map.Entry> _iter347 : struct.partStats.entrySet()) + for (Map.Entry> _iter379 : struct.partStats.entrySet()) { - oprot.writeString(_iter347.getKey()); + oprot.writeString(_iter379.getKey()); { - oprot.writeI32(_iter347.getValue().size()); - for (ColumnStatisticsObj _iter348 : _iter347.getValue()) + oprot.writeI32(_iter379.getValue().size()); + for (ColumnStatisticsObj _iter380 : _iter379.getValue()) { - _iter348.write(oprot); + _iter380.write(oprot); } } } @@ -461,25 +461,25 @@ public void write(org.apache.thrift.protocol.TProtocol prot, PartitionsStatsResu public void read(org.apache.thrift.protocol.TProtocol prot, PartitionsStatsResult struct) throws org.apache.thrift.TException { TTupleProtocol iprot = (TTupleProtocol) prot; { - org.apache.thrift.protocol.TMap _map349 = new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.LIST, iprot.readI32()); - struct.partStats = new HashMap>(2*_map349.size); - String _key350; - List _val351; - for (int _i352 = 0; _i352 < _map349.size; ++_i352) + org.apache.thrift.protocol.TMap _map381 = new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.LIST, iprot.readI32()); + struct.partStats = new HashMap>(2*_map381.size); + String _key382; + List _val383; + for (int _i384 = 0; _i384 < _map381.size; ++_i384) { - _key350 = iprot.readString(); + _key382 = iprot.readString(); { - org.apache.thrift.protocol.TList _list353 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - _val351 = new ArrayList(_list353.size); - ColumnStatisticsObj _elem354; - for (int _i355 = 0; _i355 < _list353.size; ++_i355) + org.apache.thrift.protocol.TList _list385 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + _val383 = new ArrayList(_list385.size); + ColumnStatisticsObj _elem386; + for (int _i387 = 0; _i387 < _list385.size; ++_i387) { - _elem354 = new ColumnStatisticsObj(); - _elem354.read(iprot); - _val351.add(_elem354); + _elem386 = new ColumnStatisticsObj(); + _elem386.read(iprot); + _val383.add(_elem386); } } - struct.partStats.put(_key350, _val351); + struct.partStats.put(_key382, _val383); } } struct.setPartStatsIsSet(true); diff --git a/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/PrincipalPrivilegeSet.java b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/PrincipalPrivilegeSet.java index e1792bd..6b95d60 100644 --- a/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/PrincipalPrivilegeSet.java +++ b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/PrincipalPrivilegeSet.java @@ -579,26 +579,26 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, PrincipalPrivilegeS case 1: // USER_PRIVILEGES if (schemeField.type == org.apache.thrift.protocol.TType.MAP) { { - org.apache.thrift.protocol.TMap _map24 = iprot.readMapBegin(); - struct.userPrivileges = new HashMap>(2*_map24.size); - String _key25; - List _val26; - for (int _i27 = 0; _i27 < _map24.size; ++_i27) + org.apache.thrift.protocol.TMap _map40 = iprot.readMapBegin(); + struct.userPrivileges = new HashMap>(2*_map40.size); + String _key41; + List _val42; + for (int _i43 = 0; _i43 < _map40.size; ++_i43) { - _key25 = iprot.readString(); + _key41 = iprot.readString(); { - org.apache.thrift.protocol.TList _list28 = iprot.readListBegin(); - _val26 = new ArrayList(_list28.size); - PrivilegeGrantInfo _elem29; - for (int _i30 = 0; _i30 < _list28.size; ++_i30) + org.apache.thrift.protocol.TList _list44 = iprot.readListBegin(); + _val42 = new ArrayList(_list44.size); + PrivilegeGrantInfo _elem45; + for (int _i46 = 0; _i46 < _list44.size; ++_i46) { - _elem29 = new PrivilegeGrantInfo(); - _elem29.read(iprot); - _val26.add(_elem29); + _elem45 = new PrivilegeGrantInfo(); + _elem45.read(iprot); + _val42.add(_elem45); } iprot.readListEnd(); } - struct.userPrivileges.put(_key25, _val26); + struct.userPrivileges.put(_key41, _val42); } iprot.readMapEnd(); } @@ -610,26 +610,26 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, PrincipalPrivilegeS case 2: // GROUP_PRIVILEGES if (schemeField.type == org.apache.thrift.protocol.TType.MAP) { { - org.apache.thrift.protocol.TMap _map31 = iprot.readMapBegin(); - struct.groupPrivileges = new HashMap>(2*_map31.size); - String _key32; - List _val33; - for (int _i34 = 0; _i34 < _map31.size; ++_i34) + org.apache.thrift.protocol.TMap _map47 = iprot.readMapBegin(); + struct.groupPrivileges = new HashMap>(2*_map47.size); + String _key48; + List _val49; + for (int _i50 = 0; _i50 < _map47.size; ++_i50) { - _key32 = iprot.readString(); + _key48 = iprot.readString(); { - org.apache.thrift.protocol.TList _list35 = iprot.readListBegin(); - _val33 = new ArrayList(_list35.size); - PrivilegeGrantInfo _elem36; - for (int _i37 = 0; _i37 < _list35.size; ++_i37) + org.apache.thrift.protocol.TList _list51 = iprot.readListBegin(); + _val49 = new ArrayList(_list51.size); + PrivilegeGrantInfo _elem52; + for (int _i53 = 0; _i53 < _list51.size; ++_i53) { - _elem36 = new PrivilegeGrantInfo(); - _elem36.read(iprot); - _val33.add(_elem36); + _elem52 = new PrivilegeGrantInfo(); + _elem52.read(iprot); + _val49.add(_elem52); } iprot.readListEnd(); } - struct.groupPrivileges.put(_key32, _val33); + struct.groupPrivileges.put(_key48, _val49); } iprot.readMapEnd(); } @@ -641,26 +641,26 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, PrincipalPrivilegeS case 3: // ROLE_PRIVILEGES if (schemeField.type == org.apache.thrift.protocol.TType.MAP) { { - org.apache.thrift.protocol.TMap _map38 = iprot.readMapBegin(); - struct.rolePrivileges = new HashMap>(2*_map38.size); - String _key39; - List _val40; - for (int _i41 = 0; _i41 < _map38.size; ++_i41) + org.apache.thrift.protocol.TMap _map54 = iprot.readMapBegin(); + struct.rolePrivileges = new HashMap>(2*_map54.size); + String _key55; + List _val56; + for (int _i57 = 0; _i57 < _map54.size; ++_i57) { - _key39 = iprot.readString(); + _key55 = iprot.readString(); { - org.apache.thrift.protocol.TList _list42 = iprot.readListBegin(); - _val40 = new ArrayList(_list42.size); - PrivilegeGrantInfo _elem43; - for (int _i44 = 0; _i44 < _list42.size; ++_i44) + org.apache.thrift.protocol.TList _list58 = iprot.readListBegin(); + _val56 = new ArrayList(_list58.size); + PrivilegeGrantInfo _elem59; + for (int _i60 = 0; _i60 < _list58.size; ++_i60) { - _elem43 = new PrivilegeGrantInfo(); - _elem43.read(iprot); - _val40.add(_elem43); + _elem59 = new PrivilegeGrantInfo(); + _elem59.read(iprot); + _val56.add(_elem59); } iprot.readListEnd(); } - struct.rolePrivileges.put(_key39, _val40); + struct.rolePrivileges.put(_key55, _val56); } iprot.readMapEnd(); } @@ -686,14 +686,14 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, PrincipalPrivilege oprot.writeFieldBegin(USER_PRIVILEGES_FIELD_DESC); { oprot.writeMapBegin(new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.LIST, struct.userPrivileges.size())); - for (Map.Entry> _iter45 : struct.userPrivileges.entrySet()) + for (Map.Entry> _iter61 : struct.userPrivileges.entrySet()) { - oprot.writeString(_iter45.getKey()); + oprot.writeString(_iter61.getKey()); { - oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, _iter45.getValue().size())); - for (PrivilegeGrantInfo _iter46 : _iter45.getValue()) + oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, _iter61.getValue().size())); + for (PrivilegeGrantInfo _iter62 : _iter61.getValue()) { - _iter46.write(oprot); + _iter62.write(oprot); } oprot.writeListEnd(); } @@ -706,14 +706,14 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, PrincipalPrivilege oprot.writeFieldBegin(GROUP_PRIVILEGES_FIELD_DESC); { oprot.writeMapBegin(new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.LIST, struct.groupPrivileges.size())); - for (Map.Entry> _iter47 : struct.groupPrivileges.entrySet()) + for (Map.Entry> _iter63 : struct.groupPrivileges.entrySet()) { - oprot.writeString(_iter47.getKey()); + oprot.writeString(_iter63.getKey()); { - oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, _iter47.getValue().size())); - for (PrivilegeGrantInfo _iter48 : _iter47.getValue()) + oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, _iter63.getValue().size())); + for (PrivilegeGrantInfo _iter64 : _iter63.getValue()) { - _iter48.write(oprot); + _iter64.write(oprot); } oprot.writeListEnd(); } @@ -726,14 +726,14 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, PrincipalPrivilege oprot.writeFieldBegin(ROLE_PRIVILEGES_FIELD_DESC); { oprot.writeMapBegin(new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.LIST, struct.rolePrivileges.size())); - for (Map.Entry> _iter49 : struct.rolePrivileges.entrySet()) + for (Map.Entry> _iter65 : struct.rolePrivileges.entrySet()) { - oprot.writeString(_iter49.getKey()); + oprot.writeString(_iter65.getKey()); { - oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, _iter49.getValue().size())); - for (PrivilegeGrantInfo _iter50 : _iter49.getValue()) + oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, _iter65.getValue().size())); + for (PrivilegeGrantInfo _iter66 : _iter65.getValue()) { - _iter50.write(oprot); + _iter66.write(oprot); } oprot.writeListEnd(); } @@ -773,14 +773,14 @@ public void write(org.apache.thrift.protocol.TProtocol prot, PrincipalPrivilegeS if (struct.isSetUserPrivileges()) { { oprot.writeI32(struct.userPrivileges.size()); - for (Map.Entry> _iter51 : struct.userPrivileges.entrySet()) + for (Map.Entry> _iter67 : struct.userPrivileges.entrySet()) { - oprot.writeString(_iter51.getKey()); + oprot.writeString(_iter67.getKey()); { - oprot.writeI32(_iter51.getValue().size()); - for (PrivilegeGrantInfo _iter52 : _iter51.getValue()) + oprot.writeI32(_iter67.getValue().size()); + for (PrivilegeGrantInfo _iter68 : _iter67.getValue()) { - _iter52.write(oprot); + _iter68.write(oprot); } } } @@ -789,14 +789,14 @@ public void write(org.apache.thrift.protocol.TProtocol prot, PrincipalPrivilegeS if (struct.isSetGroupPrivileges()) { { oprot.writeI32(struct.groupPrivileges.size()); - for (Map.Entry> _iter53 : struct.groupPrivileges.entrySet()) + for (Map.Entry> _iter69 : struct.groupPrivileges.entrySet()) { - oprot.writeString(_iter53.getKey()); + oprot.writeString(_iter69.getKey()); { - oprot.writeI32(_iter53.getValue().size()); - for (PrivilegeGrantInfo _iter54 : _iter53.getValue()) + oprot.writeI32(_iter69.getValue().size()); + for (PrivilegeGrantInfo _iter70 : _iter69.getValue()) { - _iter54.write(oprot); + _iter70.write(oprot); } } } @@ -805,14 +805,14 @@ public void write(org.apache.thrift.protocol.TProtocol prot, PrincipalPrivilegeS if (struct.isSetRolePrivileges()) { { oprot.writeI32(struct.rolePrivileges.size()); - for (Map.Entry> _iter55 : struct.rolePrivileges.entrySet()) + for (Map.Entry> _iter71 : struct.rolePrivileges.entrySet()) { - oprot.writeString(_iter55.getKey()); + oprot.writeString(_iter71.getKey()); { - oprot.writeI32(_iter55.getValue().size()); - for (PrivilegeGrantInfo _iter56 : _iter55.getValue()) + oprot.writeI32(_iter71.getValue().size()); + for (PrivilegeGrantInfo _iter72 : _iter71.getValue()) { - _iter56.write(oprot); + _iter72.write(oprot); } } } @@ -826,75 +826,75 @@ public void read(org.apache.thrift.protocol.TProtocol prot, PrincipalPrivilegeSe BitSet incoming = iprot.readBitSet(3); if (incoming.get(0)) { { - org.apache.thrift.protocol.TMap _map57 = new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.LIST, iprot.readI32()); - struct.userPrivileges = new HashMap>(2*_map57.size); - String _key58; - List _val59; - for (int _i60 = 0; _i60 < _map57.size; ++_i60) + org.apache.thrift.protocol.TMap _map73 = new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.LIST, iprot.readI32()); + struct.userPrivileges = new HashMap>(2*_map73.size); + String _key74; + List _val75; + for (int _i76 = 0; _i76 < _map73.size; ++_i76) { - _key58 = iprot.readString(); + _key74 = iprot.readString(); { - org.apache.thrift.protocol.TList _list61 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - _val59 = new ArrayList(_list61.size); - PrivilegeGrantInfo _elem62; - for (int _i63 = 0; _i63 < _list61.size; ++_i63) + org.apache.thrift.protocol.TList _list77 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + _val75 = new ArrayList(_list77.size); + PrivilegeGrantInfo _elem78; + for (int _i79 = 0; _i79 < _list77.size; ++_i79) { - _elem62 = new PrivilegeGrantInfo(); - _elem62.read(iprot); - _val59.add(_elem62); + _elem78 = new PrivilegeGrantInfo(); + _elem78.read(iprot); + _val75.add(_elem78); } } - struct.userPrivileges.put(_key58, _val59); + struct.userPrivileges.put(_key74, _val75); } } struct.setUserPrivilegesIsSet(true); } if (incoming.get(1)) { { - org.apache.thrift.protocol.TMap _map64 = new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.LIST, iprot.readI32()); - struct.groupPrivileges = new HashMap>(2*_map64.size); - String _key65; - List _val66; - for (int _i67 = 0; _i67 < _map64.size; ++_i67) + org.apache.thrift.protocol.TMap _map80 = new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.LIST, iprot.readI32()); + struct.groupPrivileges = new HashMap>(2*_map80.size); + String _key81; + List _val82; + for (int _i83 = 0; _i83 < _map80.size; ++_i83) { - _key65 = iprot.readString(); + _key81 = iprot.readString(); { - org.apache.thrift.protocol.TList _list68 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - _val66 = new ArrayList(_list68.size); - PrivilegeGrantInfo _elem69; - for (int _i70 = 0; _i70 < _list68.size; ++_i70) + org.apache.thrift.protocol.TList _list84 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + _val82 = new ArrayList(_list84.size); + PrivilegeGrantInfo _elem85; + for (int _i86 = 0; _i86 < _list84.size; ++_i86) { - _elem69 = new PrivilegeGrantInfo(); - _elem69.read(iprot); - _val66.add(_elem69); + _elem85 = new PrivilegeGrantInfo(); + _elem85.read(iprot); + _val82.add(_elem85); } } - struct.groupPrivileges.put(_key65, _val66); + struct.groupPrivileges.put(_key81, _val82); } } struct.setGroupPrivilegesIsSet(true); } if (incoming.get(2)) { { - org.apache.thrift.protocol.TMap _map71 = new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.LIST, iprot.readI32()); - struct.rolePrivileges = new HashMap>(2*_map71.size); - String _key72; - List _val73; - for (int _i74 = 0; _i74 < _map71.size; ++_i74) + org.apache.thrift.protocol.TMap _map87 = new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.LIST, iprot.readI32()); + struct.rolePrivileges = new HashMap>(2*_map87.size); + String _key88; + List _val89; + for (int _i90 = 0; _i90 < _map87.size; ++_i90) { - _key72 = iprot.readString(); + _key88 = iprot.readString(); { - org.apache.thrift.protocol.TList _list75 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - _val73 = new ArrayList(_list75.size); - PrivilegeGrantInfo _elem76; - for (int _i77 = 0; _i77 < _list75.size; ++_i77) + org.apache.thrift.protocol.TList _list91 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + _val89 = new ArrayList(_list91.size); + PrivilegeGrantInfo _elem92; + for (int _i93 = 0; _i93 < _list91.size; ++_i93) { - _elem76 = new PrivilegeGrantInfo(); - _elem76.read(iprot); - _val73.add(_elem76); + _elem92 = new PrivilegeGrantInfo(); + _elem92.read(iprot); + _val89.add(_elem92); } } - struct.rolePrivileges.put(_key72, _val73); + struct.rolePrivileges.put(_key88, _val89); } } struct.setRolePrivilegesIsSet(true); diff --git a/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/PrivilegeBag.java b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/PrivilegeBag.java index bba2d40..aeb0dd0 100644 --- a/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/PrivilegeBag.java +++ b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/PrivilegeBag.java @@ -350,14 +350,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, PrivilegeBag struct case 1: // PRIVILEGES if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list16 = iprot.readListBegin(); - struct.privileges = new ArrayList(_list16.size); - HiveObjectPrivilege _elem17; - for (int _i18 = 0; _i18 < _list16.size; ++_i18) + org.apache.thrift.protocol.TList _list32 = iprot.readListBegin(); + struct.privileges = new ArrayList(_list32.size); + HiveObjectPrivilege _elem33; + for (int _i34 = 0; _i34 < _list32.size; ++_i34) { - _elem17 = new HiveObjectPrivilege(); - _elem17.read(iprot); - struct.privileges.add(_elem17); + _elem33 = new HiveObjectPrivilege(); + _elem33.read(iprot); + struct.privileges.add(_elem33); } iprot.readListEnd(); } @@ -383,9 +383,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, PrivilegeBag struc oprot.writeFieldBegin(PRIVILEGES_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.privileges.size())); - for (HiveObjectPrivilege _iter19 : struct.privileges) + for (HiveObjectPrivilege _iter35 : struct.privileges) { - _iter19.write(oprot); + _iter35.write(oprot); } oprot.writeListEnd(); } @@ -416,9 +416,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, PrivilegeBag struct if (struct.isSetPrivileges()) { { oprot.writeI32(struct.privileges.size()); - for (HiveObjectPrivilege _iter20 : struct.privileges) + for (HiveObjectPrivilege _iter36 : struct.privileges) { - _iter20.write(oprot); + _iter36.write(oprot); } } } @@ -430,14 +430,14 @@ public void read(org.apache.thrift.protocol.TProtocol prot, PrivilegeBag struct) BitSet incoming = iprot.readBitSet(1); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list21 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.privileges = new ArrayList(_list21.size); - HiveObjectPrivilege _elem22; - for (int _i23 = 0; _i23 < _list21.size; ++_i23) + org.apache.thrift.protocol.TList _list37 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.privileges = new ArrayList(_list37.size); + HiveObjectPrivilege _elem38; + for (int _i39 = 0; _i39 < _list37.size; ++_i39) { - _elem22 = new HiveObjectPrivilege(); - _elem22.read(iprot); - struct.privileges.add(_elem22); + _elem38 = new HiveObjectPrivilege(); + _elem38.read(iprot); + struct.privileges.add(_elem38); } } struct.setPrivilegesIsSet(true); diff --git a/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/PutFileMetadataRequest.java b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/PutFileMetadataRequest.java index ab151b1..69b0cb6 100644 --- a/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/PutFileMetadataRequest.java +++ b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/PutFileMetadataRequest.java @@ -547,13 +547,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, PutFileMetadataRequ case 1: // FILE_IDS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list560 = iprot.readListBegin(); - struct.fileIds = new ArrayList(_list560.size); - long _elem561; - for (int _i562 = 0; _i562 < _list560.size; ++_i562) + org.apache.thrift.protocol.TList _list592 = iprot.readListBegin(); + struct.fileIds = new ArrayList(_list592.size); + long _elem593; + for (int _i594 = 0; _i594 < _list592.size; ++_i594) { - _elem561 = iprot.readI64(); - struct.fileIds.add(_elem561); + _elem593 = iprot.readI64(); + struct.fileIds.add(_elem593); } iprot.readListEnd(); } @@ -565,13 +565,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, PutFileMetadataRequ case 2: // METADATA if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list563 = iprot.readListBegin(); - struct.metadata = new ArrayList(_list563.size); - ByteBuffer _elem564; - for (int _i565 = 0; _i565 < _list563.size; ++_i565) + org.apache.thrift.protocol.TList _list595 = iprot.readListBegin(); + struct.metadata = new ArrayList(_list595.size); + ByteBuffer _elem596; + for (int _i597 = 0; _i597 < _list595.size; ++_i597) { - _elem564 = iprot.readBinary(); - struct.metadata.add(_elem564); + _elem596 = iprot.readBinary(); + struct.metadata.add(_elem596); } iprot.readListEnd(); } @@ -605,9 +605,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, PutFileMetadataReq oprot.writeFieldBegin(FILE_IDS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.I64, struct.fileIds.size())); - for (long _iter566 : struct.fileIds) + for (long _iter598 : struct.fileIds) { - oprot.writeI64(_iter566); + oprot.writeI64(_iter598); } oprot.writeListEnd(); } @@ -617,9 +617,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, PutFileMetadataReq oprot.writeFieldBegin(METADATA_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.metadata.size())); - for (ByteBuffer _iter567 : struct.metadata) + for (ByteBuffer _iter599 : struct.metadata) { - oprot.writeBinary(_iter567); + oprot.writeBinary(_iter599); } oprot.writeListEnd(); } @@ -651,16 +651,16 @@ public void write(org.apache.thrift.protocol.TProtocol prot, PutFileMetadataRequ TTupleProtocol oprot = (TTupleProtocol) prot; { oprot.writeI32(struct.fileIds.size()); - for (long _iter568 : struct.fileIds) + for (long _iter600 : struct.fileIds) { - oprot.writeI64(_iter568); + oprot.writeI64(_iter600); } } { oprot.writeI32(struct.metadata.size()); - for (ByteBuffer _iter569 : struct.metadata) + for (ByteBuffer _iter601 : struct.metadata) { - oprot.writeBinary(_iter569); + oprot.writeBinary(_iter601); } } BitSet optionals = new BitSet(); @@ -677,24 +677,24 @@ public void write(org.apache.thrift.protocol.TProtocol prot, PutFileMetadataRequ public void read(org.apache.thrift.protocol.TProtocol prot, PutFileMetadataRequest struct) throws org.apache.thrift.TException { TTupleProtocol iprot = (TTupleProtocol) prot; { - org.apache.thrift.protocol.TList _list570 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.I64, iprot.readI32()); - struct.fileIds = new ArrayList(_list570.size); - long _elem571; - for (int _i572 = 0; _i572 < _list570.size; ++_i572) + org.apache.thrift.protocol.TList _list602 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.I64, iprot.readI32()); + struct.fileIds = new ArrayList(_list602.size); + long _elem603; + for (int _i604 = 0; _i604 < _list602.size; ++_i604) { - _elem571 = iprot.readI64(); - struct.fileIds.add(_elem571); + _elem603 = iprot.readI64(); + struct.fileIds.add(_elem603); } } struct.setFileIdsIsSet(true); { - org.apache.thrift.protocol.TList _list573 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.metadata = new ArrayList(_list573.size); - ByteBuffer _elem574; - for (int _i575 = 0; _i575 < _list573.size; ++_i575) + org.apache.thrift.protocol.TList _list605 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.metadata = new ArrayList(_list605.size); + ByteBuffer _elem606; + for (int _i607 = 0; _i607 < _list605.size; ++_i607) { - _elem574 = iprot.readBinary(); - struct.metadata.add(_elem574); + _elem606 = iprot.readBinary(); + struct.metadata.add(_elem606); } } struct.setMetadataIsSet(true); diff --git a/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/RequestPartsSpec.java b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/RequestPartsSpec.java index 7f8a044..7368e9a 100644 --- a/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/RequestPartsSpec.java +++ b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/RequestPartsSpec.java @@ -168,13 +168,13 @@ protected Object standardSchemeReadValue(org.apache.thrift.protocol.TProtocol ip if (field.type == NAMES_FIELD_DESC.type) { List names; { - org.apache.thrift.protocol.TList _list404 = iprot.readListBegin(); - names = new ArrayList(_list404.size); - String _elem405; - for (int _i406 = 0; _i406 < _list404.size; ++_i406) + org.apache.thrift.protocol.TList _list436 = iprot.readListBegin(); + names = new ArrayList(_list436.size); + String _elem437; + for (int _i438 = 0; _i438 < _list436.size; ++_i438) { - _elem405 = iprot.readString(); - names.add(_elem405); + _elem437 = iprot.readString(); + names.add(_elem437); } iprot.readListEnd(); } @@ -187,14 +187,14 @@ protected Object standardSchemeReadValue(org.apache.thrift.protocol.TProtocol ip if (field.type == EXPRS_FIELD_DESC.type) { List exprs; { - org.apache.thrift.protocol.TList _list407 = iprot.readListBegin(); - exprs = new ArrayList(_list407.size); - DropPartitionsExpr _elem408; - for (int _i409 = 0; _i409 < _list407.size; ++_i409) + org.apache.thrift.protocol.TList _list439 = iprot.readListBegin(); + exprs = new ArrayList(_list439.size); + DropPartitionsExpr _elem440; + for (int _i441 = 0; _i441 < _list439.size; ++_i441) { - _elem408 = new DropPartitionsExpr(); - _elem408.read(iprot); - exprs.add(_elem408); + _elem440 = new DropPartitionsExpr(); + _elem440.read(iprot); + exprs.add(_elem440); } iprot.readListEnd(); } @@ -219,9 +219,9 @@ protected void standardSchemeWriteValue(org.apache.thrift.protocol.TProtocol opr List names = (List)value_; { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, names.size())); - for (String _iter410 : names) + for (String _iter442 : names) { - oprot.writeString(_iter410); + oprot.writeString(_iter442); } oprot.writeListEnd(); } @@ -230,9 +230,9 @@ protected void standardSchemeWriteValue(org.apache.thrift.protocol.TProtocol opr List exprs = (List)value_; { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, exprs.size())); - for (DropPartitionsExpr _iter411 : exprs) + for (DropPartitionsExpr _iter443 : exprs) { - _iter411.write(oprot); + _iter443.write(oprot); } oprot.writeListEnd(); } @@ -250,13 +250,13 @@ protected Object tupleSchemeReadValue(org.apache.thrift.protocol.TProtocol iprot case NAMES: List names; { - org.apache.thrift.protocol.TList _list412 = iprot.readListBegin(); - names = new ArrayList(_list412.size); - String _elem413; - for (int _i414 = 0; _i414 < _list412.size; ++_i414) + org.apache.thrift.protocol.TList _list444 = iprot.readListBegin(); + names = new ArrayList(_list444.size); + String _elem445; + for (int _i446 = 0; _i446 < _list444.size; ++_i446) { - _elem413 = iprot.readString(); - names.add(_elem413); + _elem445 = iprot.readString(); + names.add(_elem445); } iprot.readListEnd(); } @@ -264,14 +264,14 @@ protected Object tupleSchemeReadValue(org.apache.thrift.protocol.TProtocol iprot case EXPRS: List exprs; { - org.apache.thrift.protocol.TList _list415 = iprot.readListBegin(); - exprs = new ArrayList(_list415.size); - DropPartitionsExpr _elem416; - for (int _i417 = 0; _i417 < _list415.size; ++_i417) + org.apache.thrift.protocol.TList _list447 = iprot.readListBegin(); + exprs = new ArrayList(_list447.size); + DropPartitionsExpr _elem448; + for (int _i449 = 0; _i449 < _list447.size; ++_i449) { - _elem416 = new DropPartitionsExpr(); - _elem416.read(iprot); - exprs.add(_elem416); + _elem448 = new DropPartitionsExpr(); + _elem448.read(iprot); + exprs.add(_elem448); } iprot.readListEnd(); } @@ -291,9 +291,9 @@ protected void tupleSchemeWriteValue(org.apache.thrift.protocol.TProtocol oprot) List names = (List)value_; { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, names.size())); - for (String _iter418 : names) + for (String _iter450 : names) { - oprot.writeString(_iter418); + oprot.writeString(_iter450); } oprot.writeListEnd(); } @@ -302,9 +302,9 @@ protected void tupleSchemeWriteValue(org.apache.thrift.protocol.TProtocol oprot) List exprs = (List)value_; { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, exprs.size())); - for (DropPartitionsExpr _iter419 : exprs) + for (DropPartitionsExpr _iter451 : exprs) { - _iter419.write(oprot); + _iter451.write(oprot); } oprot.writeListEnd(); } diff --git a/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/SQLForeignKey.java b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/SQLForeignKey.java new file mode 100644 index 0000000..0646f3e --- /dev/null +++ b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/SQLForeignKey.java @@ -0,0 +1,1421 @@ +/** + * Autogenerated by Thrift Compiler (0.9.3) + * + * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING + * @generated + */ +package org.apache.hadoop.hive.metastore.api; + +import org.apache.thrift.scheme.IScheme; +import org.apache.thrift.scheme.SchemeFactory; +import org.apache.thrift.scheme.StandardScheme; + +import org.apache.thrift.scheme.TupleScheme; +import org.apache.thrift.protocol.TTupleProtocol; +import org.apache.thrift.protocol.TProtocolException; +import org.apache.thrift.EncodingUtils; +import org.apache.thrift.TException; +import org.apache.thrift.async.AsyncMethodCallback; +import org.apache.thrift.server.AbstractNonblockingServer.*; +import java.util.List; +import java.util.ArrayList; +import java.util.Map; +import java.util.HashMap; +import java.util.EnumMap; +import java.util.Set; +import java.util.HashSet; +import java.util.EnumSet; +import java.util.Collections; +import java.util.BitSet; +import java.nio.ByteBuffer; +import java.util.Arrays; +import javax.annotation.Generated; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +@SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"}) +@Generated(value = "Autogenerated by Thrift Compiler (0.9.3)") +public class SQLForeignKey implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { + private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("SQLForeignKey"); + + private static final org.apache.thrift.protocol.TField PKTABLE_SCHEM_FIELD_DESC = new org.apache.thrift.protocol.TField("pktable_schem", org.apache.thrift.protocol.TType.STRING, (short)1); + private static final org.apache.thrift.protocol.TField PKTABLE_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("pktable_name", org.apache.thrift.protocol.TType.STRING, (short)2); + private static final org.apache.thrift.protocol.TField PKCOLUMN_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("pkcolumn_name", org.apache.thrift.protocol.TType.STRING, (short)3); + private static final org.apache.thrift.protocol.TField FKTABLE_SCHEM_FIELD_DESC = new org.apache.thrift.protocol.TField("fktable_schem", org.apache.thrift.protocol.TType.STRING, (short)4); + private static final org.apache.thrift.protocol.TField FKTABLE_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("fktable_name", org.apache.thrift.protocol.TType.STRING, (short)5); + private static final org.apache.thrift.protocol.TField FKCOLUMN_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("fkcolumn_name", org.apache.thrift.protocol.TType.STRING, (short)6); + private static final org.apache.thrift.protocol.TField KEY_SEQ_FIELD_DESC = new org.apache.thrift.protocol.TField("key_seq", org.apache.thrift.protocol.TType.I32, (short)7); + private static final org.apache.thrift.protocol.TField UPDATE_RULE_FIELD_DESC = new org.apache.thrift.protocol.TField("update_rule", org.apache.thrift.protocol.TType.I32, (short)8); + private static final org.apache.thrift.protocol.TField DELETE_RULE_FIELD_DESC = new org.apache.thrift.protocol.TField("delete_rule", org.apache.thrift.protocol.TType.I32, (short)9); + private static final org.apache.thrift.protocol.TField FK_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("fk_name", org.apache.thrift.protocol.TType.STRING, (short)10); + private static final org.apache.thrift.protocol.TField PK_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("pk_name", org.apache.thrift.protocol.TType.STRING, (short)11); + + private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); + static { + schemes.put(StandardScheme.class, new SQLForeignKeyStandardSchemeFactory()); + schemes.put(TupleScheme.class, new SQLForeignKeyTupleSchemeFactory()); + } + + private String pktable_schem; // required + private String pktable_name; // required + private String pkcolumn_name; // required + private String fktable_schem; // required + private String fktable_name; // required + private String fkcolumn_name; // required + private int key_seq; // required + private int update_rule; // required + private int delete_rule; // required + private String fk_name; // required + private String pk_name; // required + + /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ + public enum _Fields implements org.apache.thrift.TFieldIdEnum { + PKTABLE_SCHEM((short)1, "pktable_schem"), + PKTABLE_NAME((short)2, "pktable_name"), + PKCOLUMN_NAME((short)3, "pkcolumn_name"), + FKTABLE_SCHEM((short)4, "fktable_schem"), + FKTABLE_NAME((short)5, "fktable_name"), + FKCOLUMN_NAME((short)6, "fkcolumn_name"), + KEY_SEQ((short)7, "key_seq"), + UPDATE_RULE((short)8, "update_rule"), + DELETE_RULE((short)9, "delete_rule"), + FK_NAME((short)10, "fk_name"), + PK_NAME((short)11, "pk_name"); + + private static final Map byName = new HashMap(); + + static { + for (_Fields field : EnumSet.allOf(_Fields.class)) { + byName.put(field.getFieldName(), field); + } + } + + /** + * Find the _Fields constant that matches fieldId, or null if its not found. + */ + public static _Fields findByThriftId(int fieldId) { + switch(fieldId) { + case 1: // PKTABLE_SCHEM + return PKTABLE_SCHEM; + case 2: // PKTABLE_NAME + return PKTABLE_NAME; + case 3: // PKCOLUMN_NAME + return PKCOLUMN_NAME; + case 4: // FKTABLE_SCHEM + return FKTABLE_SCHEM; + case 5: // FKTABLE_NAME + return FKTABLE_NAME; + case 6: // FKCOLUMN_NAME + return FKCOLUMN_NAME; + case 7: // KEY_SEQ + return KEY_SEQ; + case 8: // UPDATE_RULE + return UPDATE_RULE; + case 9: // DELETE_RULE + return DELETE_RULE; + case 10: // FK_NAME + return FK_NAME; + case 11: // PK_NAME + return PK_NAME; + default: + return null; + } + } + + /** + * Find the _Fields constant that matches fieldId, throwing an exception + * if it is not found. + */ + public static _Fields findByThriftIdOrThrow(int fieldId) { + _Fields fields = findByThriftId(fieldId); + if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!"); + return fields; + } + + /** + * Find the _Fields constant that matches name, or null if its not found. + */ + public static _Fields findByName(String name) { + return byName.get(name); + } + + private final short _thriftId; + private final String _fieldName; + + _Fields(short thriftId, String fieldName) { + _thriftId = thriftId; + _fieldName = fieldName; + } + + public short getThriftFieldId() { + return _thriftId; + } + + public String getFieldName() { + return _fieldName; + } + } + + // isset id assignments + private static final int __KEY_SEQ_ISSET_ID = 0; + private static final int __UPDATE_RULE_ISSET_ID = 1; + private static final int __DELETE_RULE_ISSET_ID = 2; + private byte __isset_bitfield = 0; + public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; + static { + Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); + tmpMap.put(_Fields.PKTABLE_SCHEM, new org.apache.thrift.meta_data.FieldMetaData("pktable_schem", org.apache.thrift.TFieldRequirementType.DEFAULT, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); + tmpMap.put(_Fields.PKTABLE_NAME, new org.apache.thrift.meta_data.FieldMetaData("pktable_name", org.apache.thrift.TFieldRequirementType.DEFAULT, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); + tmpMap.put(_Fields.PKCOLUMN_NAME, new org.apache.thrift.meta_data.FieldMetaData("pkcolumn_name", org.apache.thrift.TFieldRequirementType.DEFAULT, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); + tmpMap.put(_Fields.FKTABLE_SCHEM, new org.apache.thrift.meta_data.FieldMetaData("fktable_schem", org.apache.thrift.TFieldRequirementType.DEFAULT, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); + tmpMap.put(_Fields.FKTABLE_NAME, new org.apache.thrift.meta_data.FieldMetaData("fktable_name", org.apache.thrift.TFieldRequirementType.DEFAULT, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); + tmpMap.put(_Fields.FKCOLUMN_NAME, new org.apache.thrift.meta_data.FieldMetaData("fkcolumn_name", org.apache.thrift.TFieldRequirementType.DEFAULT, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); + tmpMap.put(_Fields.KEY_SEQ, new org.apache.thrift.meta_data.FieldMetaData("key_seq", org.apache.thrift.TFieldRequirementType.DEFAULT, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I32))); + tmpMap.put(_Fields.UPDATE_RULE, new org.apache.thrift.meta_data.FieldMetaData("update_rule", org.apache.thrift.TFieldRequirementType.DEFAULT, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I32))); + tmpMap.put(_Fields.DELETE_RULE, new org.apache.thrift.meta_data.FieldMetaData("delete_rule", org.apache.thrift.TFieldRequirementType.DEFAULT, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I32))); + tmpMap.put(_Fields.FK_NAME, new org.apache.thrift.meta_data.FieldMetaData("fk_name", org.apache.thrift.TFieldRequirementType.DEFAULT, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); + tmpMap.put(_Fields.PK_NAME, new org.apache.thrift.meta_data.FieldMetaData("pk_name", org.apache.thrift.TFieldRequirementType.DEFAULT, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); + metaDataMap = Collections.unmodifiableMap(tmpMap); + org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(SQLForeignKey.class, metaDataMap); + } + + public SQLForeignKey() { + } + + public SQLForeignKey( + String pktable_schem, + String pktable_name, + String pkcolumn_name, + String fktable_schem, + String fktable_name, + String fkcolumn_name, + int key_seq, + int update_rule, + int delete_rule, + String fk_name, + String pk_name) + { + this(); + this.pktable_schem = pktable_schem; + this.pktable_name = pktable_name; + this.pkcolumn_name = pkcolumn_name; + this.fktable_schem = fktable_schem; + this.fktable_name = fktable_name; + this.fkcolumn_name = fkcolumn_name; + this.key_seq = key_seq; + setKey_seqIsSet(true); + this.update_rule = update_rule; + setUpdate_ruleIsSet(true); + this.delete_rule = delete_rule; + setDelete_ruleIsSet(true); + this.fk_name = fk_name; + this.pk_name = pk_name; + } + + /** + * Performs a deep copy on other. + */ + public SQLForeignKey(SQLForeignKey other) { + __isset_bitfield = other.__isset_bitfield; + if (other.isSetPktable_schem()) { + this.pktable_schem = other.pktable_schem; + } + if (other.isSetPktable_name()) { + this.pktable_name = other.pktable_name; + } + if (other.isSetPkcolumn_name()) { + this.pkcolumn_name = other.pkcolumn_name; + } + if (other.isSetFktable_schem()) { + this.fktable_schem = other.fktable_schem; + } + if (other.isSetFktable_name()) { + this.fktable_name = other.fktable_name; + } + if (other.isSetFkcolumn_name()) { + this.fkcolumn_name = other.fkcolumn_name; + } + this.key_seq = other.key_seq; + this.update_rule = other.update_rule; + this.delete_rule = other.delete_rule; + if (other.isSetFk_name()) { + this.fk_name = other.fk_name; + } + if (other.isSetPk_name()) { + this.pk_name = other.pk_name; + } + } + + public SQLForeignKey deepCopy() { + return new SQLForeignKey(this); + } + + @Override + public void clear() { + this.pktable_schem = null; + this.pktable_name = null; + this.pkcolumn_name = null; + this.fktable_schem = null; + this.fktable_name = null; + this.fkcolumn_name = null; + setKey_seqIsSet(false); + this.key_seq = 0; + setUpdate_ruleIsSet(false); + this.update_rule = 0; + setDelete_ruleIsSet(false); + this.delete_rule = 0; + this.fk_name = null; + this.pk_name = null; + } + + public String getPktable_schem() { + return this.pktable_schem; + } + + public void setPktable_schem(String pktable_schem) { + this.pktable_schem = pktable_schem; + } + + public void unsetPktable_schem() { + this.pktable_schem = null; + } + + /** Returns true if field pktable_schem is set (has been assigned a value) and false otherwise */ + public boolean isSetPktable_schem() { + return this.pktable_schem != null; + } + + public void setPktable_schemIsSet(boolean value) { + if (!value) { + this.pktable_schem = null; + } + } + + public String getPktable_name() { + return this.pktable_name; + } + + public void setPktable_name(String pktable_name) { + this.pktable_name = pktable_name; + } + + public void unsetPktable_name() { + this.pktable_name = null; + } + + /** Returns true if field pktable_name is set (has been assigned a value) and false otherwise */ + public boolean isSetPktable_name() { + return this.pktable_name != null; + } + + public void setPktable_nameIsSet(boolean value) { + if (!value) { + this.pktable_name = null; + } + } + + public String getPkcolumn_name() { + return this.pkcolumn_name; + } + + public void setPkcolumn_name(String pkcolumn_name) { + this.pkcolumn_name = pkcolumn_name; + } + + public void unsetPkcolumn_name() { + this.pkcolumn_name = null; + } + + /** Returns true if field pkcolumn_name is set (has been assigned a value) and false otherwise */ + public boolean isSetPkcolumn_name() { + return this.pkcolumn_name != null; + } + + public void setPkcolumn_nameIsSet(boolean value) { + if (!value) { + this.pkcolumn_name = null; + } + } + + public String getFktable_schem() { + return this.fktable_schem; + } + + public void setFktable_schem(String fktable_schem) { + this.fktable_schem = fktable_schem; + } + + public void unsetFktable_schem() { + this.fktable_schem = null; + } + + /** Returns true if field fktable_schem is set (has been assigned a value) and false otherwise */ + public boolean isSetFktable_schem() { + return this.fktable_schem != null; + } + + public void setFktable_schemIsSet(boolean value) { + if (!value) { + this.fktable_schem = null; + } + } + + public String getFktable_name() { + return this.fktable_name; + } + + public void setFktable_name(String fktable_name) { + this.fktable_name = fktable_name; + } + + public void unsetFktable_name() { + this.fktable_name = null; + } + + /** Returns true if field fktable_name is set (has been assigned a value) and false otherwise */ + public boolean isSetFktable_name() { + return this.fktable_name != null; + } + + public void setFktable_nameIsSet(boolean value) { + if (!value) { + this.fktable_name = null; + } + } + + public String getFkcolumn_name() { + return this.fkcolumn_name; + } + + public void setFkcolumn_name(String fkcolumn_name) { + this.fkcolumn_name = fkcolumn_name; + } + + public void unsetFkcolumn_name() { + this.fkcolumn_name = null; + } + + /** Returns true if field fkcolumn_name is set (has been assigned a value) and false otherwise */ + public boolean isSetFkcolumn_name() { + return this.fkcolumn_name != null; + } + + public void setFkcolumn_nameIsSet(boolean value) { + if (!value) { + this.fkcolumn_name = null; + } + } + + public int getKey_seq() { + return this.key_seq; + } + + public void setKey_seq(int key_seq) { + this.key_seq = key_seq; + setKey_seqIsSet(true); + } + + public void unsetKey_seq() { + __isset_bitfield = EncodingUtils.clearBit(__isset_bitfield, __KEY_SEQ_ISSET_ID); + } + + /** Returns true if field key_seq is set (has been assigned a value) and false otherwise */ + public boolean isSetKey_seq() { + return EncodingUtils.testBit(__isset_bitfield, __KEY_SEQ_ISSET_ID); + } + + public void setKey_seqIsSet(boolean value) { + __isset_bitfield = EncodingUtils.setBit(__isset_bitfield, __KEY_SEQ_ISSET_ID, value); + } + + public int getUpdate_rule() { + return this.update_rule; + } + + public void setUpdate_rule(int update_rule) { + this.update_rule = update_rule; + setUpdate_ruleIsSet(true); + } + + public void unsetUpdate_rule() { + __isset_bitfield = EncodingUtils.clearBit(__isset_bitfield, __UPDATE_RULE_ISSET_ID); + } + + /** Returns true if field update_rule is set (has been assigned a value) and false otherwise */ + public boolean isSetUpdate_rule() { + return EncodingUtils.testBit(__isset_bitfield, __UPDATE_RULE_ISSET_ID); + } + + public void setUpdate_ruleIsSet(boolean value) { + __isset_bitfield = EncodingUtils.setBit(__isset_bitfield, __UPDATE_RULE_ISSET_ID, value); + } + + public int getDelete_rule() { + return this.delete_rule; + } + + public void setDelete_rule(int delete_rule) { + this.delete_rule = delete_rule; + setDelete_ruleIsSet(true); + } + + public void unsetDelete_rule() { + __isset_bitfield = EncodingUtils.clearBit(__isset_bitfield, __DELETE_RULE_ISSET_ID); + } + + /** Returns true if field delete_rule is set (has been assigned a value) and false otherwise */ + public boolean isSetDelete_rule() { + return EncodingUtils.testBit(__isset_bitfield, __DELETE_RULE_ISSET_ID); + } + + public void setDelete_ruleIsSet(boolean value) { + __isset_bitfield = EncodingUtils.setBit(__isset_bitfield, __DELETE_RULE_ISSET_ID, value); + } + + public String getFk_name() { + return this.fk_name; + } + + public void setFk_name(String fk_name) { + this.fk_name = fk_name; + } + + public void unsetFk_name() { + this.fk_name = null; + } + + /** Returns true if field fk_name is set (has been assigned a value) and false otherwise */ + public boolean isSetFk_name() { + return this.fk_name != null; + } + + public void setFk_nameIsSet(boolean value) { + if (!value) { + this.fk_name = null; + } + } + + public String getPk_name() { + return this.pk_name; + } + + public void setPk_name(String pk_name) { + this.pk_name = pk_name; + } + + public void unsetPk_name() { + this.pk_name = null; + } + + /** Returns true if field pk_name is set (has been assigned a value) and false otherwise */ + public boolean isSetPk_name() { + return this.pk_name != null; + } + + public void setPk_nameIsSet(boolean value) { + if (!value) { + this.pk_name = null; + } + } + + public void setFieldValue(_Fields field, Object value) { + switch (field) { + case PKTABLE_SCHEM: + if (value == null) { + unsetPktable_schem(); + } else { + setPktable_schem((String)value); + } + break; + + case PKTABLE_NAME: + if (value == null) { + unsetPktable_name(); + } else { + setPktable_name((String)value); + } + break; + + case PKCOLUMN_NAME: + if (value == null) { + unsetPkcolumn_name(); + } else { + setPkcolumn_name((String)value); + } + break; + + case FKTABLE_SCHEM: + if (value == null) { + unsetFktable_schem(); + } else { + setFktable_schem((String)value); + } + break; + + case FKTABLE_NAME: + if (value == null) { + unsetFktable_name(); + } else { + setFktable_name((String)value); + } + break; + + case FKCOLUMN_NAME: + if (value == null) { + unsetFkcolumn_name(); + } else { + setFkcolumn_name((String)value); + } + break; + + case KEY_SEQ: + if (value == null) { + unsetKey_seq(); + } else { + setKey_seq((Integer)value); + } + break; + + case UPDATE_RULE: + if (value == null) { + unsetUpdate_rule(); + } else { + setUpdate_rule((Integer)value); + } + break; + + case DELETE_RULE: + if (value == null) { + unsetDelete_rule(); + } else { + setDelete_rule((Integer)value); + } + break; + + case FK_NAME: + if (value == null) { + unsetFk_name(); + } else { + setFk_name((String)value); + } + break; + + case PK_NAME: + if (value == null) { + unsetPk_name(); + } else { + setPk_name((String)value); + } + break; + + } + } + + public Object getFieldValue(_Fields field) { + switch (field) { + case PKTABLE_SCHEM: + return getPktable_schem(); + + case PKTABLE_NAME: + return getPktable_name(); + + case PKCOLUMN_NAME: + return getPkcolumn_name(); + + case FKTABLE_SCHEM: + return getFktable_schem(); + + case FKTABLE_NAME: + return getFktable_name(); + + case FKCOLUMN_NAME: + return getFkcolumn_name(); + + case KEY_SEQ: + return getKey_seq(); + + case UPDATE_RULE: + return getUpdate_rule(); + + case DELETE_RULE: + return getDelete_rule(); + + case FK_NAME: + return getFk_name(); + + case PK_NAME: + return getPk_name(); + + } + throw new IllegalStateException(); + } + + /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */ + public boolean isSet(_Fields field) { + if (field == null) { + throw new IllegalArgumentException(); + } + + switch (field) { + case PKTABLE_SCHEM: + return isSetPktable_schem(); + case PKTABLE_NAME: + return isSetPktable_name(); + case PKCOLUMN_NAME: + return isSetPkcolumn_name(); + case FKTABLE_SCHEM: + return isSetFktable_schem(); + case FKTABLE_NAME: + return isSetFktable_name(); + case FKCOLUMN_NAME: + return isSetFkcolumn_name(); + case KEY_SEQ: + return isSetKey_seq(); + case UPDATE_RULE: + return isSetUpdate_rule(); + case DELETE_RULE: + return isSetDelete_rule(); + case FK_NAME: + return isSetFk_name(); + case PK_NAME: + return isSetPk_name(); + } + throw new IllegalStateException(); + } + + @Override + public boolean equals(Object that) { + if (that == null) + return false; + if (that instanceof SQLForeignKey) + return this.equals((SQLForeignKey)that); + return false; + } + + public boolean equals(SQLForeignKey that) { + if (that == null) + return false; + + boolean this_present_pktable_schem = true && this.isSetPktable_schem(); + boolean that_present_pktable_schem = true && that.isSetPktable_schem(); + if (this_present_pktable_schem || that_present_pktable_schem) { + if (!(this_present_pktable_schem && that_present_pktable_schem)) + return false; + if (!this.pktable_schem.equals(that.pktable_schem)) + return false; + } + + boolean this_present_pktable_name = true && this.isSetPktable_name(); + boolean that_present_pktable_name = true && that.isSetPktable_name(); + if (this_present_pktable_name || that_present_pktable_name) { + if (!(this_present_pktable_name && that_present_pktable_name)) + return false; + if (!this.pktable_name.equals(that.pktable_name)) + return false; + } + + boolean this_present_pkcolumn_name = true && this.isSetPkcolumn_name(); + boolean that_present_pkcolumn_name = true && that.isSetPkcolumn_name(); + if (this_present_pkcolumn_name || that_present_pkcolumn_name) { + if (!(this_present_pkcolumn_name && that_present_pkcolumn_name)) + return false; + if (!this.pkcolumn_name.equals(that.pkcolumn_name)) + return false; + } + + boolean this_present_fktable_schem = true && this.isSetFktable_schem(); + boolean that_present_fktable_schem = true && that.isSetFktable_schem(); + if (this_present_fktable_schem || that_present_fktable_schem) { + if (!(this_present_fktable_schem && that_present_fktable_schem)) + return false; + if (!this.fktable_schem.equals(that.fktable_schem)) + return false; + } + + boolean this_present_fktable_name = true && this.isSetFktable_name(); + boolean that_present_fktable_name = true && that.isSetFktable_name(); + if (this_present_fktable_name || that_present_fktable_name) { + if (!(this_present_fktable_name && that_present_fktable_name)) + return false; + if (!this.fktable_name.equals(that.fktable_name)) + return false; + } + + boolean this_present_fkcolumn_name = true && this.isSetFkcolumn_name(); + boolean that_present_fkcolumn_name = true && that.isSetFkcolumn_name(); + if (this_present_fkcolumn_name || that_present_fkcolumn_name) { + if (!(this_present_fkcolumn_name && that_present_fkcolumn_name)) + return false; + if (!this.fkcolumn_name.equals(that.fkcolumn_name)) + return false; + } + + boolean this_present_key_seq = true; + boolean that_present_key_seq = true; + if (this_present_key_seq || that_present_key_seq) { + if (!(this_present_key_seq && that_present_key_seq)) + return false; + if (this.key_seq != that.key_seq) + return false; + } + + boolean this_present_update_rule = true; + boolean that_present_update_rule = true; + if (this_present_update_rule || that_present_update_rule) { + if (!(this_present_update_rule && that_present_update_rule)) + return false; + if (this.update_rule != that.update_rule) + return false; + } + + boolean this_present_delete_rule = true; + boolean that_present_delete_rule = true; + if (this_present_delete_rule || that_present_delete_rule) { + if (!(this_present_delete_rule && that_present_delete_rule)) + return false; + if (this.delete_rule != that.delete_rule) + return false; + } + + boolean this_present_fk_name = true && this.isSetFk_name(); + boolean that_present_fk_name = true && that.isSetFk_name(); + if (this_present_fk_name || that_present_fk_name) { + if (!(this_present_fk_name && that_present_fk_name)) + return false; + if (!this.fk_name.equals(that.fk_name)) + return false; + } + + boolean this_present_pk_name = true && this.isSetPk_name(); + boolean that_present_pk_name = true && that.isSetPk_name(); + if (this_present_pk_name || that_present_pk_name) { + if (!(this_present_pk_name && that_present_pk_name)) + return false; + if (!this.pk_name.equals(that.pk_name)) + return false; + } + + return true; + } + + @Override + public int hashCode() { + List list = new ArrayList(); + + boolean present_pktable_schem = true && (isSetPktable_schem()); + list.add(present_pktable_schem); + if (present_pktable_schem) + list.add(pktable_schem); + + boolean present_pktable_name = true && (isSetPktable_name()); + list.add(present_pktable_name); + if (present_pktable_name) + list.add(pktable_name); + + boolean present_pkcolumn_name = true && (isSetPkcolumn_name()); + list.add(present_pkcolumn_name); + if (present_pkcolumn_name) + list.add(pkcolumn_name); + + boolean present_fktable_schem = true && (isSetFktable_schem()); + list.add(present_fktable_schem); + if (present_fktable_schem) + list.add(fktable_schem); + + boolean present_fktable_name = true && (isSetFktable_name()); + list.add(present_fktable_name); + if (present_fktable_name) + list.add(fktable_name); + + boolean present_fkcolumn_name = true && (isSetFkcolumn_name()); + list.add(present_fkcolumn_name); + if (present_fkcolumn_name) + list.add(fkcolumn_name); + + boolean present_key_seq = true; + list.add(present_key_seq); + if (present_key_seq) + list.add(key_seq); + + boolean present_update_rule = true; + list.add(present_update_rule); + if (present_update_rule) + list.add(update_rule); + + boolean present_delete_rule = true; + list.add(present_delete_rule); + if (present_delete_rule) + list.add(delete_rule); + + boolean present_fk_name = true && (isSetFk_name()); + list.add(present_fk_name); + if (present_fk_name) + list.add(fk_name); + + boolean present_pk_name = true && (isSetPk_name()); + list.add(present_pk_name); + if (present_pk_name) + list.add(pk_name); + + return list.hashCode(); + } + + @Override + public int compareTo(SQLForeignKey other) { + if (!getClass().equals(other.getClass())) { + return getClass().getName().compareTo(other.getClass().getName()); + } + + int lastComparison = 0; + + lastComparison = Boolean.valueOf(isSetPktable_schem()).compareTo(other.isSetPktable_schem()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetPktable_schem()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.pktable_schem, other.pktable_schem); + if (lastComparison != 0) { + return lastComparison; + } + } + lastComparison = Boolean.valueOf(isSetPktable_name()).compareTo(other.isSetPktable_name()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetPktable_name()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.pktable_name, other.pktable_name); + if (lastComparison != 0) { + return lastComparison; + } + } + lastComparison = Boolean.valueOf(isSetPkcolumn_name()).compareTo(other.isSetPkcolumn_name()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetPkcolumn_name()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.pkcolumn_name, other.pkcolumn_name); + if (lastComparison != 0) { + return lastComparison; + } + } + lastComparison = Boolean.valueOf(isSetFktable_schem()).compareTo(other.isSetFktable_schem()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetFktable_schem()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.fktable_schem, other.fktable_schem); + if (lastComparison != 0) { + return lastComparison; + } + } + lastComparison = Boolean.valueOf(isSetFktable_name()).compareTo(other.isSetFktable_name()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetFktable_name()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.fktable_name, other.fktable_name); + if (lastComparison != 0) { + return lastComparison; + } + } + lastComparison = Boolean.valueOf(isSetFkcolumn_name()).compareTo(other.isSetFkcolumn_name()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetFkcolumn_name()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.fkcolumn_name, other.fkcolumn_name); + if (lastComparison != 0) { + return lastComparison; + } + } + lastComparison = Boolean.valueOf(isSetKey_seq()).compareTo(other.isSetKey_seq()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetKey_seq()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.key_seq, other.key_seq); + if (lastComparison != 0) { + return lastComparison; + } + } + lastComparison = Boolean.valueOf(isSetUpdate_rule()).compareTo(other.isSetUpdate_rule()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetUpdate_rule()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.update_rule, other.update_rule); + if (lastComparison != 0) { + return lastComparison; + } + } + lastComparison = Boolean.valueOf(isSetDelete_rule()).compareTo(other.isSetDelete_rule()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetDelete_rule()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.delete_rule, other.delete_rule); + if (lastComparison != 0) { + return lastComparison; + } + } + lastComparison = Boolean.valueOf(isSetFk_name()).compareTo(other.isSetFk_name()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetFk_name()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.fk_name, other.fk_name); + if (lastComparison != 0) { + return lastComparison; + } + } + lastComparison = Boolean.valueOf(isSetPk_name()).compareTo(other.isSetPk_name()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetPk_name()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.pk_name, other.pk_name); + if (lastComparison != 0) { + return lastComparison; + } + } + return 0; + } + + public _Fields fieldForId(int fieldId) { + return _Fields.findByThriftId(fieldId); + } + + public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException { + schemes.get(iprot.getScheme()).getScheme().read(iprot, this); + } + + public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException { + schemes.get(oprot.getScheme()).getScheme().write(oprot, this); + } + + @Override + public String toString() { + StringBuilder sb = new StringBuilder("SQLForeignKey("); + boolean first = true; + + sb.append("pktable_schem:"); + if (this.pktable_schem == null) { + sb.append("null"); + } else { + sb.append(this.pktable_schem); + } + first = false; + if (!first) sb.append(", "); + sb.append("pktable_name:"); + if (this.pktable_name == null) { + sb.append("null"); + } else { + sb.append(this.pktable_name); + } + first = false; + if (!first) sb.append(", "); + sb.append("pkcolumn_name:"); + if (this.pkcolumn_name == null) { + sb.append("null"); + } else { + sb.append(this.pkcolumn_name); + } + first = false; + if (!first) sb.append(", "); + sb.append("fktable_schem:"); + if (this.fktable_schem == null) { + sb.append("null"); + } else { + sb.append(this.fktable_schem); + } + first = false; + if (!first) sb.append(", "); + sb.append("fktable_name:"); + if (this.fktable_name == null) { + sb.append("null"); + } else { + sb.append(this.fktable_name); + } + first = false; + if (!first) sb.append(", "); + sb.append("fkcolumn_name:"); + if (this.fkcolumn_name == null) { + sb.append("null"); + } else { + sb.append(this.fkcolumn_name); + } + first = false; + if (!first) sb.append(", "); + sb.append("key_seq:"); + sb.append(this.key_seq); + first = false; + if (!first) sb.append(", "); + sb.append("update_rule:"); + sb.append(this.update_rule); + first = false; + if (!first) sb.append(", "); + sb.append("delete_rule:"); + sb.append(this.delete_rule); + first = false; + if (!first) sb.append(", "); + sb.append("fk_name:"); + if (this.fk_name == null) { + sb.append("null"); + } else { + sb.append(this.fk_name); + } + first = false; + if (!first) sb.append(", "); + sb.append("pk_name:"); + if (this.pk_name == null) { + sb.append("null"); + } else { + sb.append(this.pk_name); + } + first = false; + sb.append(")"); + return sb.toString(); + } + + public void validate() throws org.apache.thrift.TException { + // check for required fields + // check for sub-struct validity + } + + private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { + try { + write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out))); + } catch (org.apache.thrift.TException te) { + throw new java.io.IOException(te); + } + } + + private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { + try { + // it doesn't seem like you should have to do this, but java serialization is wacky, and doesn't call the default constructor. + __isset_bitfield = 0; + read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); + } catch (org.apache.thrift.TException te) { + throw new java.io.IOException(te); + } + } + + private static class SQLForeignKeyStandardSchemeFactory implements SchemeFactory { + public SQLForeignKeyStandardScheme getScheme() { + return new SQLForeignKeyStandardScheme(); + } + } + + private static class SQLForeignKeyStandardScheme extends StandardScheme { + + public void read(org.apache.thrift.protocol.TProtocol iprot, SQLForeignKey struct) throws org.apache.thrift.TException { + org.apache.thrift.protocol.TField schemeField; + iprot.readStructBegin(); + while (true) + { + schemeField = iprot.readFieldBegin(); + if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { + break; + } + switch (schemeField.id) { + case 1: // PKTABLE_SCHEM + if (schemeField.type == org.apache.thrift.protocol.TType.STRING) { + struct.pktable_schem = iprot.readString(); + struct.setPktable_schemIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; + case 2: // PKTABLE_NAME + if (schemeField.type == org.apache.thrift.protocol.TType.STRING) { + struct.pktable_name = iprot.readString(); + struct.setPktable_nameIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; + case 3: // PKCOLUMN_NAME + if (schemeField.type == org.apache.thrift.protocol.TType.STRING) { + struct.pkcolumn_name = iprot.readString(); + struct.setPkcolumn_nameIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; + case 4: // FKTABLE_SCHEM + if (schemeField.type == org.apache.thrift.protocol.TType.STRING) { + struct.fktable_schem = iprot.readString(); + struct.setFktable_schemIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; + case 5: // FKTABLE_NAME + if (schemeField.type == org.apache.thrift.protocol.TType.STRING) { + struct.fktable_name = iprot.readString(); + struct.setFktable_nameIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; + case 6: // FKCOLUMN_NAME + if (schemeField.type == org.apache.thrift.protocol.TType.STRING) { + struct.fkcolumn_name = iprot.readString(); + struct.setFkcolumn_nameIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; + case 7: // KEY_SEQ + if (schemeField.type == org.apache.thrift.protocol.TType.I32) { + struct.key_seq = iprot.readI32(); + struct.setKey_seqIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; + case 8: // UPDATE_RULE + if (schemeField.type == org.apache.thrift.protocol.TType.I32) { + struct.update_rule = iprot.readI32(); + struct.setUpdate_ruleIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; + case 9: // DELETE_RULE + if (schemeField.type == org.apache.thrift.protocol.TType.I32) { + struct.delete_rule = iprot.readI32(); + struct.setDelete_ruleIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; + case 10: // FK_NAME + if (schemeField.type == org.apache.thrift.protocol.TType.STRING) { + struct.fk_name = iprot.readString(); + struct.setFk_nameIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; + case 11: // PK_NAME + if (schemeField.type == org.apache.thrift.protocol.TType.STRING) { + struct.pk_name = iprot.readString(); + struct.setPk_nameIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; + default: + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + iprot.readFieldEnd(); + } + iprot.readStructEnd(); + struct.validate(); + } + + public void write(org.apache.thrift.protocol.TProtocol oprot, SQLForeignKey struct) throws org.apache.thrift.TException { + struct.validate(); + + oprot.writeStructBegin(STRUCT_DESC); + if (struct.pktable_schem != null) { + oprot.writeFieldBegin(PKTABLE_SCHEM_FIELD_DESC); + oprot.writeString(struct.pktable_schem); + oprot.writeFieldEnd(); + } + if (struct.pktable_name != null) { + oprot.writeFieldBegin(PKTABLE_NAME_FIELD_DESC); + oprot.writeString(struct.pktable_name); + oprot.writeFieldEnd(); + } + if (struct.pkcolumn_name != null) { + oprot.writeFieldBegin(PKCOLUMN_NAME_FIELD_DESC); + oprot.writeString(struct.pkcolumn_name); + oprot.writeFieldEnd(); + } + if (struct.fktable_schem != null) { + oprot.writeFieldBegin(FKTABLE_SCHEM_FIELD_DESC); + oprot.writeString(struct.fktable_schem); + oprot.writeFieldEnd(); + } + if (struct.fktable_name != null) { + oprot.writeFieldBegin(FKTABLE_NAME_FIELD_DESC); + oprot.writeString(struct.fktable_name); + oprot.writeFieldEnd(); + } + if (struct.fkcolumn_name != null) { + oprot.writeFieldBegin(FKCOLUMN_NAME_FIELD_DESC); + oprot.writeString(struct.fkcolumn_name); + oprot.writeFieldEnd(); + } + oprot.writeFieldBegin(KEY_SEQ_FIELD_DESC); + oprot.writeI32(struct.key_seq); + oprot.writeFieldEnd(); + oprot.writeFieldBegin(UPDATE_RULE_FIELD_DESC); + oprot.writeI32(struct.update_rule); + oprot.writeFieldEnd(); + oprot.writeFieldBegin(DELETE_RULE_FIELD_DESC); + oprot.writeI32(struct.delete_rule); + oprot.writeFieldEnd(); + if (struct.fk_name != null) { + oprot.writeFieldBegin(FK_NAME_FIELD_DESC); + oprot.writeString(struct.fk_name); + oprot.writeFieldEnd(); + } + if (struct.pk_name != null) { + oprot.writeFieldBegin(PK_NAME_FIELD_DESC); + oprot.writeString(struct.pk_name); + oprot.writeFieldEnd(); + } + oprot.writeFieldStop(); + oprot.writeStructEnd(); + } + + } + + private static class SQLForeignKeyTupleSchemeFactory implements SchemeFactory { + public SQLForeignKeyTupleScheme getScheme() { + return new SQLForeignKeyTupleScheme(); + } + } + + private static class SQLForeignKeyTupleScheme extends TupleScheme { + + @Override + public void write(org.apache.thrift.protocol.TProtocol prot, SQLForeignKey struct) throws org.apache.thrift.TException { + TTupleProtocol oprot = (TTupleProtocol) prot; + BitSet optionals = new BitSet(); + if (struct.isSetPktable_schem()) { + optionals.set(0); + } + if (struct.isSetPktable_name()) { + optionals.set(1); + } + if (struct.isSetPkcolumn_name()) { + optionals.set(2); + } + if (struct.isSetFktable_schem()) { + optionals.set(3); + } + if (struct.isSetFktable_name()) { + optionals.set(4); + } + if (struct.isSetFkcolumn_name()) { + optionals.set(5); + } + if (struct.isSetKey_seq()) { + optionals.set(6); + } + if (struct.isSetUpdate_rule()) { + optionals.set(7); + } + if (struct.isSetDelete_rule()) { + optionals.set(8); + } + if (struct.isSetFk_name()) { + optionals.set(9); + } + if (struct.isSetPk_name()) { + optionals.set(10); + } + oprot.writeBitSet(optionals, 11); + if (struct.isSetPktable_schem()) { + oprot.writeString(struct.pktable_schem); + } + if (struct.isSetPktable_name()) { + oprot.writeString(struct.pktable_name); + } + if (struct.isSetPkcolumn_name()) { + oprot.writeString(struct.pkcolumn_name); + } + if (struct.isSetFktable_schem()) { + oprot.writeString(struct.fktable_schem); + } + if (struct.isSetFktable_name()) { + oprot.writeString(struct.fktable_name); + } + if (struct.isSetFkcolumn_name()) { + oprot.writeString(struct.fkcolumn_name); + } + if (struct.isSetKey_seq()) { + oprot.writeI32(struct.key_seq); + } + if (struct.isSetUpdate_rule()) { + oprot.writeI32(struct.update_rule); + } + if (struct.isSetDelete_rule()) { + oprot.writeI32(struct.delete_rule); + } + if (struct.isSetFk_name()) { + oprot.writeString(struct.fk_name); + } + if (struct.isSetPk_name()) { + oprot.writeString(struct.pk_name); + } + } + + @Override + public void read(org.apache.thrift.protocol.TProtocol prot, SQLForeignKey struct) throws org.apache.thrift.TException { + TTupleProtocol iprot = (TTupleProtocol) prot; + BitSet incoming = iprot.readBitSet(11); + if (incoming.get(0)) { + struct.pktable_schem = iprot.readString(); + struct.setPktable_schemIsSet(true); + } + if (incoming.get(1)) { + struct.pktable_name = iprot.readString(); + struct.setPktable_nameIsSet(true); + } + if (incoming.get(2)) { + struct.pkcolumn_name = iprot.readString(); + struct.setPkcolumn_nameIsSet(true); + } + if (incoming.get(3)) { + struct.fktable_schem = iprot.readString(); + struct.setFktable_schemIsSet(true); + } + if (incoming.get(4)) { + struct.fktable_name = iprot.readString(); + struct.setFktable_nameIsSet(true); + } + if (incoming.get(5)) { + struct.fkcolumn_name = iprot.readString(); + struct.setFkcolumn_nameIsSet(true); + } + if (incoming.get(6)) { + struct.key_seq = iprot.readI32(); + struct.setKey_seqIsSet(true); + } + if (incoming.get(7)) { + struct.update_rule = iprot.readI32(); + struct.setUpdate_ruleIsSet(true); + } + if (incoming.get(8)) { + struct.delete_rule = iprot.readI32(); + struct.setDelete_ruleIsSet(true); + } + if (incoming.get(9)) { + struct.fk_name = iprot.readString(); + struct.setFk_nameIsSet(true); + } + if (incoming.get(10)) { + struct.pk_name = iprot.readString(); + struct.setPk_nameIsSet(true); + } + } + } + +} + diff --git a/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/SQLPrimaryKey.java b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/SQLPrimaryKey.java new file mode 100644 index 0000000..58977af --- /dev/null +++ b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/SQLPrimaryKey.java @@ -0,0 +1,809 @@ +/** + * Autogenerated by Thrift Compiler (0.9.3) + * + * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING + * @generated + */ +package org.apache.hadoop.hive.metastore.api; + +import org.apache.thrift.scheme.IScheme; +import org.apache.thrift.scheme.SchemeFactory; +import org.apache.thrift.scheme.StandardScheme; + +import org.apache.thrift.scheme.TupleScheme; +import org.apache.thrift.protocol.TTupleProtocol; +import org.apache.thrift.protocol.TProtocolException; +import org.apache.thrift.EncodingUtils; +import org.apache.thrift.TException; +import org.apache.thrift.async.AsyncMethodCallback; +import org.apache.thrift.server.AbstractNonblockingServer.*; +import java.util.List; +import java.util.ArrayList; +import java.util.Map; +import java.util.HashMap; +import java.util.EnumMap; +import java.util.Set; +import java.util.HashSet; +import java.util.EnumSet; +import java.util.Collections; +import java.util.BitSet; +import java.nio.ByteBuffer; +import java.util.Arrays; +import javax.annotation.Generated; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +@SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"}) +@Generated(value = "Autogenerated by Thrift Compiler (0.9.3)") +public class SQLPrimaryKey implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { + private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("SQLPrimaryKey"); + + private static final org.apache.thrift.protocol.TField TABLE_SCHEM_FIELD_DESC = new org.apache.thrift.protocol.TField("table_schem", org.apache.thrift.protocol.TType.STRING, (short)1); + private static final org.apache.thrift.protocol.TField TABLE_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("table_name", org.apache.thrift.protocol.TType.STRING, (short)2); + private static final org.apache.thrift.protocol.TField COLUMN_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("column_name", org.apache.thrift.protocol.TType.STRING, (short)3); + private static final org.apache.thrift.protocol.TField KEY_SEQ_FIELD_DESC = new org.apache.thrift.protocol.TField("key_seq", org.apache.thrift.protocol.TType.I32, (short)4); + private static final org.apache.thrift.protocol.TField PK_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("pk_name", org.apache.thrift.protocol.TType.STRING, (short)5); + + private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); + static { + schemes.put(StandardScheme.class, new SQLPrimaryKeyStandardSchemeFactory()); + schemes.put(TupleScheme.class, new SQLPrimaryKeyTupleSchemeFactory()); + } + + private String table_schem; // required + private String table_name; // required + private String column_name; // required + private int key_seq; // required + private String pk_name; // required + + /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ + public enum _Fields implements org.apache.thrift.TFieldIdEnum { + TABLE_SCHEM((short)1, "table_schem"), + TABLE_NAME((short)2, "table_name"), + COLUMN_NAME((short)3, "column_name"), + KEY_SEQ((short)4, "key_seq"), + PK_NAME((short)5, "pk_name"); + + private static final Map byName = new HashMap(); + + static { + for (_Fields field : EnumSet.allOf(_Fields.class)) { + byName.put(field.getFieldName(), field); + } + } + + /** + * Find the _Fields constant that matches fieldId, or null if its not found. + */ + public static _Fields findByThriftId(int fieldId) { + switch(fieldId) { + case 1: // TABLE_SCHEM + return TABLE_SCHEM; + case 2: // TABLE_NAME + return TABLE_NAME; + case 3: // COLUMN_NAME + return COLUMN_NAME; + case 4: // KEY_SEQ + return KEY_SEQ; + case 5: // PK_NAME + return PK_NAME; + default: + return null; + } + } + + /** + * Find the _Fields constant that matches fieldId, throwing an exception + * if it is not found. + */ + public static _Fields findByThriftIdOrThrow(int fieldId) { + _Fields fields = findByThriftId(fieldId); + if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!"); + return fields; + } + + /** + * Find the _Fields constant that matches name, or null if its not found. + */ + public static _Fields findByName(String name) { + return byName.get(name); + } + + private final short _thriftId; + private final String _fieldName; + + _Fields(short thriftId, String fieldName) { + _thriftId = thriftId; + _fieldName = fieldName; + } + + public short getThriftFieldId() { + return _thriftId; + } + + public String getFieldName() { + return _fieldName; + } + } + + // isset id assignments + private static final int __KEY_SEQ_ISSET_ID = 0; + private byte __isset_bitfield = 0; + public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; + static { + Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); + tmpMap.put(_Fields.TABLE_SCHEM, new org.apache.thrift.meta_data.FieldMetaData("table_schem", org.apache.thrift.TFieldRequirementType.DEFAULT, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); + tmpMap.put(_Fields.TABLE_NAME, new org.apache.thrift.meta_data.FieldMetaData("table_name", org.apache.thrift.TFieldRequirementType.DEFAULT, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); + tmpMap.put(_Fields.COLUMN_NAME, new org.apache.thrift.meta_data.FieldMetaData("column_name", org.apache.thrift.TFieldRequirementType.DEFAULT, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); + tmpMap.put(_Fields.KEY_SEQ, new org.apache.thrift.meta_data.FieldMetaData("key_seq", org.apache.thrift.TFieldRequirementType.DEFAULT, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I32))); + tmpMap.put(_Fields.PK_NAME, new org.apache.thrift.meta_data.FieldMetaData("pk_name", org.apache.thrift.TFieldRequirementType.DEFAULT, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); + metaDataMap = Collections.unmodifiableMap(tmpMap); + org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(SQLPrimaryKey.class, metaDataMap); + } + + public SQLPrimaryKey() { + } + + public SQLPrimaryKey( + String table_schem, + String table_name, + String column_name, + int key_seq, + String pk_name) + { + this(); + this.table_schem = table_schem; + this.table_name = table_name; + this.column_name = column_name; + this.key_seq = key_seq; + setKey_seqIsSet(true); + this.pk_name = pk_name; + } + + /** + * Performs a deep copy on other. + */ + public SQLPrimaryKey(SQLPrimaryKey other) { + __isset_bitfield = other.__isset_bitfield; + if (other.isSetTable_schem()) { + this.table_schem = other.table_schem; + } + if (other.isSetTable_name()) { + this.table_name = other.table_name; + } + if (other.isSetColumn_name()) { + this.column_name = other.column_name; + } + this.key_seq = other.key_seq; + if (other.isSetPk_name()) { + this.pk_name = other.pk_name; + } + } + + public SQLPrimaryKey deepCopy() { + return new SQLPrimaryKey(this); + } + + @Override + public void clear() { + this.table_schem = null; + this.table_name = null; + this.column_name = null; + setKey_seqIsSet(false); + this.key_seq = 0; + this.pk_name = null; + } + + public String getTable_schem() { + return this.table_schem; + } + + public void setTable_schem(String table_schem) { + this.table_schem = table_schem; + } + + public void unsetTable_schem() { + this.table_schem = null; + } + + /** Returns true if field table_schem is set (has been assigned a value) and false otherwise */ + public boolean isSetTable_schem() { + return this.table_schem != null; + } + + public void setTable_schemIsSet(boolean value) { + if (!value) { + this.table_schem = null; + } + } + + public String getTable_name() { + return this.table_name; + } + + public void setTable_name(String table_name) { + this.table_name = table_name; + } + + public void unsetTable_name() { + this.table_name = null; + } + + /** Returns true if field table_name is set (has been assigned a value) and false otherwise */ + public boolean isSetTable_name() { + return this.table_name != null; + } + + public void setTable_nameIsSet(boolean value) { + if (!value) { + this.table_name = null; + } + } + + public String getColumn_name() { + return this.column_name; + } + + public void setColumn_name(String column_name) { + this.column_name = column_name; + } + + public void unsetColumn_name() { + this.column_name = null; + } + + /** Returns true if field column_name is set (has been assigned a value) and false otherwise */ + public boolean isSetColumn_name() { + return this.column_name != null; + } + + public void setColumn_nameIsSet(boolean value) { + if (!value) { + this.column_name = null; + } + } + + public int getKey_seq() { + return this.key_seq; + } + + public void setKey_seq(int key_seq) { + this.key_seq = key_seq; + setKey_seqIsSet(true); + } + + public void unsetKey_seq() { + __isset_bitfield = EncodingUtils.clearBit(__isset_bitfield, __KEY_SEQ_ISSET_ID); + } + + /** Returns true if field key_seq is set (has been assigned a value) and false otherwise */ + public boolean isSetKey_seq() { + return EncodingUtils.testBit(__isset_bitfield, __KEY_SEQ_ISSET_ID); + } + + public void setKey_seqIsSet(boolean value) { + __isset_bitfield = EncodingUtils.setBit(__isset_bitfield, __KEY_SEQ_ISSET_ID, value); + } + + public String getPk_name() { + return this.pk_name; + } + + public void setPk_name(String pk_name) { + this.pk_name = pk_name; + } + + public void unsetPk_name() { + this.pk_name = null; + } + + /** Returns true if field pk_name is set (has been assigned a value) and false otherwise */ + public boolean isSetPk_name() { + return this.pk_name != null; + } + + public void setPk_nameIsSet(boolean value) { + if (!value) { + this.pk_name = null; + } + } + + public void setFieldValue(_Fields field, Object value) { + switch (field) { + case TABLE_SCHEM: + if (value == null) { + unsetTable_schem(); + } else { + setTable_schem((String)value); + } + break; + + case TABLE_NAME: + if (value == null) { + unsetTable_name(); + } else { + setTable_name((String)value); + } + break; + + case COLUMN_NAME: + if (value == null) { + unsetColumn_name(); + } else { + setColumn_name((String)value); + } + break; + + case KEY_SEQ: + if (value == null) { + unsetKey_seq(); + } else { + setKey_seq((Integer)value); + } + break; + + case PK_NAME: + if (value == null) { + unsetPk_name(); + } else { + setPk_name((String)value); + } + break; + + } + } + + public Object getFieldValue(_Fields field) { + switch (field) { + case TABLE_SCHEM: + return getTable_schem(); + + case TABLE_NAME: + return getTable_name(); + + case COLUMN_NAME: + return getColumn_name(); + + case KEY_SEQ: + return getKey_seq(); + + case PK_NAME: + return getPk_name(); + + } + throw new IllegalStateException(); + } + + /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */ + public boolean isSet(_Fields field) { + if (field == null) { + throw new IllegalArgumentException(); + } + + switch (field) { + case TABLE_SCHEM: + return isSetTable_schem(); + case TABLE_NAME: + return isSetTable_name(); + case COLUMN_NAME: + return isSetColumn_name(); + case KEY_SEQ: + return isSetKey_seq(); + case PK_NAME: + return isSetPk_name(); + } + throw new IllegalStateException(); + } + + @Override + public boolean equals(Object that) { + if (that == null) + return false; + if (that instanceof SQLPrimaryKey) + return this.equals((SQLPrimaryKey)that); + return false; + } + + public boolean equals(SQLPrimaryKey that) { + if (that == null) + return false; + + boolean this_present_table_schem = true && this.isSetTable_schem(); + boolean that_present_table_schem = true && that.isSetTable_schem(); + if (this_present_table_schem || that_present_table_schem) { + if (!(this_present_table_schem && that_present_table_schem)) + return false; + if (!this.table_schem.equals(that.table_schem)) + return false; + } + + boolean this_present_table_name = true && this.isSetTable_name(); + boolean that_present_table_name = true && that.isSetTable_name(); + if (this_present_table_name || that_present_table_name) { + if (!(this_present_table_name && that_present_table_name)) + return false; + if (!this.table_name.equals(that.table_name)) + return false; + } + + boolean this_present_column_name = true && this.isSetColumn_name(); + boolean that_present_column_name = true && that.isSetColumn_name(); + if (this_present_column_name || that_present_column_name) { + if (!(this_present_column_name && that_present_column_name)) + return false; + if (!this.column_name.equals(that.column_name)) + return false; + } + + boolean this_present_key_seq = true; + boolean that_present_key_seq = true; + if (this_present_key_seq || that_present_key_seq) { + if (!(this_present_key_seq && that_present_key_seq)) + return false; + if (this.key_seq != that.key_seq) + return false; + } + + boolean this_present_pk_name = true && this.isSetPk_name(); + boolean that_present_pk_name = true && that.isSetPk_name(); + if (this_present_pk_name || that_present_pk_name) { + if (!(this_present_pk_name && that_present_pk_name)) + return false; + if (!this.pk_name.equals(that.pk_name)) + return false; + } + + return true; + } + + @Override + public int hashCode() { + List list = new ArrayList(); + + boolean present_table_schem = true && (isSetTable_schem()); + list.add(present_table_schem); + if (present_table_schem) + list.add(table_schem); + + boolean present_table_name = true && (isSetTable_name()); + list.add(present_table_name); + if (present_table_name) + list.add(table_name); + + boolean present_column_name = true && (isSetColumn_name()); + list.add(present_column_name); + if (present_column_name) + list.add(column_name); + + boolean present_key_seq = true; + list.add(present_key_seq); + if (present_key_seq) + list.add(key_seq); + + boolean present_pk_name = true && (isSetPk_name()); + list.add(present_pk_name); + if (present_pk_name) + list.add(pk_name); + + return list.hashCode(); + } + + @Override + public int compareTo(SQLPrimaryKey other) { + if (!getClass().equals(other.getClass())) { + return getClass().getName().compareTo(other.getClass().getName()); + } + + int lastComparison = 0; + + lastComparison = Boolean.valueOf(isSetTable_schem()).compareTo(other.isSetTable_schem()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetTable_schem()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.table_schem, other.table_schem); + if (lastComparison != 0) { + return lastComparison; + } + } + lastComparison = Boolean.valueOf(isSetTable_name()).compareTo(other.isSetTable_name()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetTable_name()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.table_name, other.table_name); + if (lastComparison != 0) { + return lastComparison; + } + } + lastComparison = Boolean.valueOf(isSetColumn_name()).compareTo(other.isSetColumn_name()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetColumn_name()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.column_name, other.column_name); + if (lastComparison != 0) { + return lastComparison; + } + } + lastComparison = Boolean.valueOf(isSetKey_seq()).compareTo(other.isSetKey_seq()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetKey_seq()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.key_seq, other.key_seq); + if (lastComparison != 0) { + return lastComparison; + } + } + lastComparison = Boolean.valueOf(isSetPk_name()).compareTo(other.isSetPk_name()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetPk_name()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.pk_name, other.pk_name); + if (lastComparison != 0) { + return lastComparison; + } + } + return 0; + } + + public _Fields fieldForId(int fieldId) { + return _Fields.findByThriftId(fieldId); + } + + public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException { + schemes.get(iprot.getScheme()).getScheme().read(iprot, this); + } + + public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException { + schemes.get(oprot.getScheme()).getScheme().write(oprot, this); + } + + @Override + public String toString() { + StringBuilder sb = new StringBuilder("SQLPrimaryKey("); + boolean first = true; + + sb.append("table_schem:"); + if (this.table_schem == null) { + sb.append("null"); + } else { + sb.append(this.table_schem); + } + first = false; + if (!first) sb.append(", "); + sb.append("table_name:"); + if (this.table_name == null) { + sb.append("null"); + } else { + sb.append(this.table_name); + } + first = false; + if (!first) sb.append(", "); + sb.append("column_name:"); + if (this.column_name == null) { + sb.append("null"); + } else { + sb.append(this.column_name); + } + first = false; + if (!first) sb.append(", "); + sb.append("key_seq:"); + sb.append(this.key_seq); + first = false; + if (!first) sb.append(", "); + sb.append("pk_name:"); + if (this.pk_name == null) { + sb.append("null"); + } else { + sb.append(this.pk_name); + } + first = false; + sb.append(")"); + return sb.toString(); + } + + public void validate() throws org.apache.thrift.TException { + // check for required fields + // check for sub-struct validity + } + + private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { + try { + write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out))); + } catch (org.apache.thrift.TException te) { + throw new java.io.IOException(te); + } + } + + private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { + try { + // it doesn't seem like you should have to do this, but java serialization is wacky, and doesn't call the default constructor. + __isset_bitfield = 0; + read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); + } catch (org.apache.thrift.TException te) { + throw new java.io.IOException(te); + } + } + + private static class SQLPrimaryKeyStandardSchemeFactory implements SchemeFactory { + public SQLPrimaryKeyStandardScheme getScheme() { + return new SQLPrimaryKeyStandardScheme(); + } + } + + private static class SQLPrimaryKeyStandardScheme extends StandardScheme { + + public void read(org.apache.thrift.protocol.TProtocol iprot, SQLPrimaryKey struct) throws org.apache.thrift.TException { + org.apache.thrift.protocol.TField schemeField; + iprot.readStructBegin(); + while (true) + { + schemeField = iprot.readFieldBegin(); + if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { + break; + } + switch (schemeField.id) { + case 1: // TABLE_SCHEM + if (schemeField.type == org.apache.thrift.protocol.TType.STRING) { + struct.table_schem = iprot.readString(); + struct.setTable_schemIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; + case 2: // TABLE_NAME + if (schemeField.type == org.apache.thrift.protocol.TType.STRING) { + struct.table_name = iprot.readString(); + struct.setTable_nameIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; + case 3: // COLUMN_NAME + if (schemeField.type == org.apache.thrift.protocol.TType.STRING) { + struct.column_name = iprot.readString(); + struct.setColumn_nameIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; + case 4: // KEY_SEQ + if (schemeField.type == org.apache.thrift.protocol.TType.I32) { + struct.key_seq = iprot.readI32(); + struct.setKey_seqIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; + case 5: // PK_NAME + if (schemeField.type == org.apache.thrift.protocol.TType.STRING) { + struct.pk_name = iprot.readString(); + struct.setPk_nameIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; + default: + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + iprot.readFieldEnd(); + } + iprot.readStructEnd(); + struct.validate(); + } + + public void write(org.apache.thrift.protocol.TProtocol oprot, SQLPrimaryKey struct) throws org.apache.thrift.TException { + struct.validate(); + + oprot.writeStructBegin(STRUCT_DESC); + if (struct.table_schem != null) { + oprot.writeFieldBegin(TABLE_SCHEM_FIELD_DESC); + oprot.writeString(struct.table_schem); + oprot.writeFieldEnd(); + } + if (struct.table_name != null) { + oprot.writeFieldBegin(TABLE_NAME_FIELD_DESC); + oprot.writeString(struct.table_name); + oprot.writeFieldEnd(); + } + if (struct.column_name != null) { + oprot.writeFieldBegin(COLUMN_NAME_FIELD_DESC); + oprot.writeString(struct.column_name); + oprot.writeFieldEnd(); + } + oprot.writeFieldBegin(KEY_SEQ_FIELD_DESC); + oprot.writeI32(struct.key_seq); + oprot.writeFieldEnd(); + if (struct.pk_name != null) { + oprot.writeFieldBegin(PK_NAME_FIELD_DESC); + oprot.writeString(struct.pk_name); + oprot.writeFieldEnd(); + } + oprot.writeFieldStop(); + oprot.writeStructEnd(); + } + + } + + private static class SQLPrimaryKeyTupleSchemeFactory implements SchemeFactory { + public SQLPrimaryKeyTupleScheme getScheme() { + return new SQLPrimaryKeyTupleScheme(); + } + } + + private static class SQLPrimaryKeyTupleScheme extends TupleScheme { + + @Override + public void write(org.apache.thrift.protocol.TProtocol prot, SQLPrimaryKey struct) throws org.apache.thrift.TException { + TTupleProtocol oprot = (TTupleProtocol) prot; + BitSet optionals = new BitSet(); + if (struct.isSetTable_schem()) { + optionals.set(0); + } + if (struct.isSetTable_name()) { + optionals.set(1); + } + if (struct.isSetColumn_name()) { + optionals.set(2); + } + if (struct.isSetKey_seq()) { + optionals.set(3); + } + if (struct.isSetPk_name()) { + optionals.set(4); + } + oprot.writeBitSet(optionals, 5); + if (struct.isSetTable_schem()) { + oprot.writeString(struct.table_schem); + } + if (struct.isSetTable_name()) { + oprot.writeString(struct.table_name); + } + if (struct.isSetColumn_name()) { + oprot.writeString(struct.column_name); + } + if (struct.isSetKey_seq()) { + oprot.writeI32(struct.key_seq); + } + if (struct.isSetPk_name()) { + oprot.writeString(struct.pk_name); + } + } + + @Override + public void read(org.apache.thrift.protocol.TProtocol prot, SQLPrimaryKey struct) throws org.apache.thrift.TException { + TTupleProtocol iprot = (TTupleProtocol) prot; + BitSet incoming = iprot.readBitSet(5); + if (incoming.get(0)) { + struct.table_schem = iprot.readString(); + struct.setTable_schemIsSet(true); + } + if (incoming.get(1)) { + struct.table_name = iprot.readString(); + struct.setTable_nameIsSet(true); + } + if (incoming.get(2)) { + struct.column_name = iprot.readString(); + struct.setColumn_nameIsSet(true); + } + if (incoming.get(3)) { + struct.key_seq = iprot.readI32(); + struct.setKey_seqIsSet(true); + } + if (incoming.get(4)) { + struct.pk_name = iprot.readString(); + struct.setPk_nameIsSet(true); + } + } + } + +} + diff --git a/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/Schema.java b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/Schema.java index 75d0ebb..0bb76b3 100644 --- a/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/Schema.java +++ b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/Schema.java @@ -445,14 +445,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, Schema struct) thro case 1: // FIELD_SCHEMAS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list294 = iprot.readListBegin(); - struct.fieldSchemas = new ArrayList(_list294.size); - FieldSchema _elem295; - for (int _i296 = 0; _i296 < _list294.size; ++_i296) + org.apache.thrift.protocol.TList _list326 = iprot.readListBegin(); + struct.fieldSchemas = new ArrayList(_list326.size); + FieldSchema _elem327; + for (int _i328 = 0; _i328 < _list326.size; ++_i328) { - _elem295 = new FieldSchema(); - _elem295.read(iprot); - struct.fieldSchemas.add(_elem295); + _elem327 = new FieldSchema(); + _elem327.read(iprot); + struct.fieldSchemas.add(_elem327); } iprot.readListEnd(); } @@ -464,15 +464,15 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, Schema struct) thro case 2: // PROPERTIES if (schemeField.type == org.apache.thrift.protocol.TType.MAP) { { - org.apache.thrift.protocol.TMap _map297 = iprot.readMapBegin(); - struct.properties = new HashMap(2*_map297.size); - String _key298; - String _val299; - for (int _i300 = 0; _i300 < _map297.size; ++_i300) + org.apache.thrift.protocol.TMap _map329 = iprot.readMapBegin(); + struct.properties = new HashMap(2*_map329.size); + String _key330; + String _val331; + for (int _i332 = 0; _i332 < _map329.size; ++_i332) { - _key298 = iprot.readString(); - _val299 = iprot.readString(); - struct.properties.put(_key298, _val299); + _key330 = iprot.readString(); + _val331 = iprot.readString(); + struct.properties.put(_key330, _val331); } iprot.readMapEnd(); } @@ -498,9 +498,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, Schema struct) thr oprot.writeFieldBegin(FIELD_SCHEMAS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.fieldSchemas.size())); - for (FieldSchema _iter301 : struct.fieldSchemas) + for (FieldSchema _iter333 : struct.fieldSchemas) { - _iter301.write(oprot); + _iter333.write(oprot); } oprot.writeListEnd(); } @@ -510,10 +510,10 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, Schema struct) thr oprot.writeFieldBegin(PROPERTIES_FIELD_DESC); { oprot.writeMapBegin(new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING, struct.properties.size())); - for (Map.Entry _iter302 : struct.properties.entrySet()) + for (Map.Entry _iter334 : struct.properties.entrySet()) { - oprot.writeString(_iter302.getKey()); - oprot.writeString(_iter302.getValue()); + oprot.writeString(_iter334.getKey()); + oprot.writeString(_iter334.getValue()); } oprot.writeMapEnd(); } @@ -547,19 +547,19 @@ public void write(org.apache.thrift.protocol.TProtocol prot, Schema struct) thro if (struct.isSetFieldSchemas()) { { oprot.writeI32(struct.fieldSchemas.size()); - for (FieldSchema _iter303 : struct.fieldSchemas) + for (FieldSchema _iter335 : struct.fieldSchemas) { - _iter303.write(oprot); + _iter335.write(oprot); } } } if (struct.isSetProperties()) { { oprot.writeI32(struct.properties.size()); - for (Map.Entry _iter304 : struct.properties.entrySet()) + for (Map.Entry _iter336 : struct.properties.entrySet()) { - oprot.writeString(_iter304.getKey()); - oprot.writeString(_iter304.getValue()); + oprot.writeString(_iter336.getKey()); + oprot.writeString(_iter336.getValue()); } } } @@ -571,29 +571,29 @@ public void read(org.apache.thrift.protocol.TProtocol prot, Schema struct) throw BitSet incoming = iprot.readBitSet(2); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list305 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.fieldSchemas = new ArrayList(_list305.size); - FieldSchema _elem306; - for (int _i307 = 0; _i307 < _list305.size; ++_i307) + org.apache.thrift.protocol.TList _list337 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.fieldSchemas = new ArrayList(_list337.size); + FieldSchema _elem338; + for (int _i339 = 0; _i339 < _list337.size; ++_i339) { - _elem306 = new FieldSchema(); - _elem306.read(iprot); - struct.fieldSchemas.add(_elem306); + _elem338 = new FieldSchema(); + _elem338.read(iprot); + struct.fieldSchemas.add(_elem338); } } struct.setFieldSchemasIsSet(true); } if (incoming.get(1)) { { - org.apache.thrift.protocol.TMap _map308 = new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.properties = new HashMap(2*_map308.size); - String _key309; - String _val310; - for (int _i311 = 0; _i311 < _map308.size; ++_i311) + org.apache.thrift.protocol.TMap _map340 = new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.properties = new HashMap(2*_map340.size); + String _key341; + String _val342; + for (int _i343 = 0; _i343 < _map340.size; ++_i343) { - _key309 = iprot.readString(); - _val310 = iprot.readString(); - struct.properties.put(_key309, _val310); + _key341 = iprot.readString(); + _val342 = iprot.readString(); + struct.properties.put(_key341, _val342); } } struct.setPropertiesIsSet(true); diff --git a/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/SerDeInfo.java b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/SerDeInfo.java index d5a8d99..9331400 100644 --- a/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/SerDeInfo.java +++ b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/SerDeInfo.java @@ -522,15 +522,15 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, SerDeInfo struct) t case 3: // PARAMETERS if (schemeField.type == org.apache.thrift.protocol.TType.MAP) { { - org.apache.thrift.protocol.TMap _map104 = iprot.readMapBegin(); - struct.parameters = new HashMap(2*_map104.size); - String _key105; - String _val106; - for (int _i107 = 0; _i107 < _map104.size; ++_i107) + org.apache.thrift.protocol.TMap _map120 = iprot.readMapBegin(); + struct.parameters = new HashMap(2*_map120.size); + String _key121; + String _val122; + for (int _i123 = 0; _i123 < _map120.size; ++_i123) { - _key105 = iprot.readString(); - _val106 = iprot.readString(); - struct.parameters.put(_key105, _val106); + _key121 = iprot.readString(); + _val122 = iprot.readString(); + struct.parameters.put(_key121, _val122); } iprot.readMapEnd(); } @@ -566,10 +566,10 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, SerDeInfo struct) oprot.writeFieldBegin(PARAMETERS_FIELD_DESC); { oprot.writeMapBegin(new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING, struct.parameters.size())); - for (Map.Entry _iter108 : struct.parameters.entrySet()) + for (Map.Entry _iter124 : struct.parameters.entrySet()) { - oprot.writeString(_iter108.getKey()); - oprot.writeString(_iter108.getValue()); + oprot.writeString(_iter124.getKey()); + oprot.writeString(_iter124.getValue()); } oprot.writeMapEnd(); } @@ -612,10 +612,10 @@ public void write(org.apache.thrift.protocol.TProtocol prot, SerDeInfo struct) t if (struct.isSetParameters()) { { oprot.writeI32(struct.parameters.size()); - for (Map.Entry _iter109 : struct.parameters.entrySet()) + for (Map.Entry _iter125 : struct.parameters.entrySet()) { - oprot.writeString(_iter109.getKey()); - oprot.writeString(_iter109.getValue()); + oprot.writeString(_iter125.getKey()); + oprot.writeString(_iter125.getValue()); } } } @@ -635,15 +635,15 @@ public void read(org.apache.thrift.protocol.TProtocol prot, SerDeInfo struct) th } if (incoming.get(2)) { { - org.apache.thrift.protocol.TMap _map110 = new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.parameters = new HashMap(2*_map110.size); - String _key111; - String _val112; - for (int _i113 = 0; _i113 < _map110.size; ++_i113) + org.apache.thrift.protocol.TMap _map126 = new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.parameters = new HashMap(2*_map126.size); + String _key127; + String _val128; + for (int _i129 = 0; _i129 < _map126.size; ++_i129) { - _key111 = iprot.readString(); - _val112 = iprot.readString(); - struct.parameters.put(_key111, _val112); + _key127 = iprot.readString(); + _val128 = iprot.readString(); + struct.parameters.put(_key127, _val128); } } struct.setParametersIsSet(true); diff --git a/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/SetPartitionsStatsRequest.java b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/SetPartitionsStatsRequest.java index 6e334f6..b599ce2 100644 --- a/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/SetPartitionsStatsRequest.java +++ b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/SetPartitionsStatsRequest.java @@ -354,14 +354,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, SetPartitionsStatsR case 1: // COL_STATS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list286 = iprot.readListBegin(); - struct.colStats = new ArrayList(_list286.size); - ColumnStatistics _elem287; - for (int _i288 = 0; _i288 < _list286.size; ++_i288) + org.apache.thrift.protocol.TList _list318 = iprot.readListBegin(); + struct.colStats = new ArrayList(_list318.size); + ColumnStatistics _elem319; + for (int _i320 = 0; _i320 < _list318.size; ++_i320) { - _elem287 = new ColumnStatistics(); - _elem287.read(iprot); - struct.colStats.add(_elem287); + _elem319 = new ColumnStatistics(); + _elem319.read(iprot); + struct.colStats.add(_elem319); } iprot.readListEnd(); } @@ -387,9 +387,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, SetPartitionsStats oprot.writeFieldBegin(COL_STATS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.colStats.size())); - for (ColumnStatistics _iter289 : struct.colStats) + for (ColumnStatistics _iter321 : struct.colStats) { - _iter289.write(oprot); + _iter321.write(oprot); } oprot.writeListEnd(); } @@ -414,9 +414,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, SetPartitionsStatsR TTupleProtocol oprot = (TTupleProtocol) prot; { oprot.writeI32(struct.colStats.size()); - for (ColumnStatistics _iter290 : struct.colStats) + for (ColumnStatistics _iter322 : struct.colStats) { - _iter290.write(oprot); + _iter322.write(oprot); } } } @@ -425,14 +425,14 @@ public void write(org.apache.thrift.protocol.TProtocol prot, SetPartitionsStatsR public void read(org.apache.thrift.protocol.TProtocol prot, SetPartitionsStatsRequest struct) throws org.apache.thrift.TException { TTupleProtocol iprot = (TTupleProtocol) prot; { - org.apache.thrift.protocol.TList _list291 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.colStats = new ArrayList(_list291.size); - ColumnStatistics _elem292; - for (int _i293 = 0; _i293 < _list291.size; ++_i293) + org.apache.thrift.protocol.TList _list323 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.colStats = new ArrayList(_list323.size); + ColumnStatistics _elem324; + for (int _i325 = 0; _i325 < _list323.size; ++_i325) { - _elem292 = new ColumnStatistics(); - _elem292.read(iprot); - struct.colStats.add(_elem292); + _elem324 = new ColumnStatistics(); + _elem324.read(iprot); + struct.colStats.add(_elem324); } } struct.setColStatsIsSet(true); diff --git a/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ShowCompactResponse.java b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ShowCompactResponse.java index afa832c..50e1924 100644 --- a/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ShowCompactResponse.java +++ b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ShowCompactResponse.java @@ -354,14 +354,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, ShowCompactResponse case 1: // COMPACTS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list484 = iprot.readListBegin(); - struct.compacts = new ArrayList(_list484.size); - ShowCompactResponseElement _elem485; - for (int _i486 = 0; _i486 < _list484.size; ++_i486) + org.apache.thrift.protocol.TList _list516 = iprot.readListBegin(); + struct.compacts = new ArrayList(_list516.size); + ShowCompactResponseElement _elem517; + for (int _i518 = 0; _i518 < _list516.size; ++_i518) { - _elem485 = new ShowCompactResponseElement(); - _elem485.read(iprot); - struct.compacts.add(_elem485); + _elem517 = new ShowCompactResponseElement(); + _elem517.read(iprot); + struct.compacts.add(_elem517); } iprot.readListEnd(); } @@ -387,9 +387,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, ShowCompactRespons oprot.writeFieldBegin(COMPACTS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.compacts.size())); - for (ShowCompactResponseElement _iter487 : struct.compacts) + for (ShowCompactResponseElement _iter519 : struct.compacts) { - _iter487.write(oprot); + _iter519.write(oprot); } oprot.writeListEnd(); } @@ -414,9 +414,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, ShowCompactResponse TTupleProtocol oprot = (TTupleProtocol) prot; { oprot.writeI32(struct.compacts.size()); - for (ShowCompactResponseElement _iter488 : struct.compacts) + for (ShowCompactResponseElement _iter520 : struct.compacts) { - _iter488.write(oprot); + _iter520.write(oprot); } } } @@ -425,14 +425,14 @@ public void write(org.apache.thrift.protocol.TProtocol prot, ShowCompactResponse public void read(org.apache.thrift.protocol.TProtocol prot, ShowCompactResponse struct) throws org.apache.thrift.TException { TTupleProtocol iprot = (TTupleProtocol) prot; { - org.apache.thrift.protocol.TList _list489 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.compacts = new ArrayList(_list489.size); - ShowCompactResponseElement _elem490; - for (int _i491 = 0; _i491 < _list489.size; ++_i491) + org.apache.thrift.protocol.TList _list521 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.compacts = new ArrayList(_list521.size); + ShowCompactResponseElement _elem522; + for (int _i523 = 0; _i523 < _list521.size; ++_i523) { - _elem490 = new ShowCompactResponseElement(); - _elem490.read(iprot); - struct.compacts.add(_elem490); + _elem522 = new ShowCompactResponseElement(); + _elem522.read(iprot); + struct.compacts.add(_elem522); } } struct.setCompactsIsSet(true); diff --git a/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ShowLocksResponse.java b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ShowLocksResponse.java index b9b7f3c..d7b316d 100644 --- a/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ShowLocksResponse.java +++ b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ShowLocksResponse.java @@ -350,14 +350,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, ShowLocksResponse s case 1: // LOCKS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list460 = iprot.readListBegin(); - struct.locks = new ArrayList(_list460.size); - ShowLocksResponseElement _elem461; - for (int _i462 = 0; _i462 < _list460.size; ++_i462) + org.apache.thrift.protocol.TList _list492 = iprot.readListBegin(); + struct.locks = new ArrayList(_list492.size); + ShowLocksResponseElement _elem493; + for (int _i494 = 0; _i494 < _list492.size; ++_i494) { - _elem461 = new ShowLocksResponseElement(); - _elem461.read(iprot); - struct.locks.add(_elem461); + _elem493 = new ShowLocksResponseElement(); + _elem493.read(iprot); + struct.locks.add(_elem493); } iprot.readListEnd(); } @@ -383,9 +383,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, ShowLocksResponse oprot.writeFieldBegin(LOCKS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.locks.size())); - for (ShowLocksResponseElement _iter463 : struct.locks) + for (ShowLocksResponseElement _iter495 : struct.locks) { - _iter463.write(oprot); + _iter495.write(oprot); } oprot.writeListEnd(); } @@ -416,9 +416,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, ShowLocksResponse s if (struct.isSetLocks()) { { oprot.writeI32(struct.locks.size()); - for (ShowLocksResponseElement _iter464 : struct.locks) + for (ShowLocksResponseElement _iter496 : struct.locks) { - _iter464.write(oprot); + _iter496.write(oprot); } } } @@ -430,14 +430,14 @@ public void read(org.apache.thrift.protocol.TProtocol prot, ShowLocksResponse st BitSet incoming = iprot.readBitSet(1); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list465 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.locks = new ArrayList(_list465.size); - ShowLocksResponseElement _elem466; - for (int _i467 = 0; _i467 < _list465.size; ++_i467) + org.apache.thrift.protocol.TList _list497 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.locks = new ArrayList(_list497.size); + ShowLocksResponseElement _elem498; + for (int _i499 = 0; _i499 < _list497.size; ++_i499) { - _elem466 = new ShowLocksResponseElement(); - _elem466.read(iprot); - struct.locks.add(_elem466); + _elem498 = new ShowLocksResponseElement(); + _elem498.read(iprot); + struct.locks.add(_elem498); } } struct.setLocksIsSet(true); diff --git a/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/SkewedInfo.java b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/SkewedInfo.java index c32f50c..0263c2c 100644 --- a/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/SkewedInfo.java +++ b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/SkewedInfo.java @@ -557,13 +557,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, SkewedInfo struct) case 1: // SKEWED_COL_NAMES if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list114 = iprot.readListBegin(); - struct.skewedColNames = new ArrayList(_list114.size); - String _elem115; - for (int _i116 = 0; _i116 < _list114.size; ++_i116) + org.apache.thrift.protocol.TList _list130 = iprot.readListBegin(); + struct.skewedColNames = new ArrayList(_list130.size); + String _elem131; + for (int _i132 = 0; _i132 < _list130.size; ++_i132) { - _elem115 = iprot.readString(); - struct.skewedColNames.add(_elem115); + _elem131 = iprot.readString(); + struct.skewedColNames.add(_elem131); } iprot.readListEnd(); } @@ -575,23 +575,23 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, SkewedInfo struct) case 2: // SKEWED_COL_VALUES if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list117 = iprot.readListBegin(); - struct.skewedColValues = new ArrayList>(_list117.size); - List _elem118; - for (int _i119 = 0; _i119 < _list117.size; ++_i119) + org.apache.thrift.protocol.TList _list133 = iprot.readListBegin(); + struct.skewedColValues = new ArrayList>(_list133.size); + List _elem134; + for (int _i135 = 0; _i135 < _list133.size; ++_i135) { { - org.apache.thrift.protocol.TList _list120 = iprot.readListBegin(); - _elem118 = new ArrayList(_list120.size); - String _elem121; - for (int _i122 = 0; _i122 < _list120.size; ++_i122) + org.apache.thrift.protocol.TList _list136 = iprot.readListBegin(); + _elem134 = new ArrayList(_list136.size); + String _elem137; + for (int _i138 = 0; _i138 < _list136.size; ++_i138) { - _elem121 = iprot.readString(); - _elem118.add(_elem121); + _elem137 = iprot.readString(); + _elem134.add(_elem137); } iprot.readListEnd(); } - struct.skewedColValues.add(_elem118); + struct.skewedColValues.add(_elem134); } iprot.readListEnd(); } @@ -603,25 +603,25 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, SkewedInfo struct) case 3: // SKEWED_COL_VALUE_LOCATION_MAPS if (schemeField.type == org.apache.thrift.protocol.TType.MAP) { { - org.apache.thrift.protocol.TMap _map123 = iprot.readMapBegin(); - struct.skewedColValueLocationMaps = new HashMap,String>(2*_map123.size); - List _key124; - String _val125; - for (int _i126 = 0; _i126 < _map123.size; ++_i126) + org.apache.thrift.protocol.TMap _map139 = iprot.readMapBegin(); + struct.skewedColValueLocationMaps = new HashMap,String>(2*_map139.size); + List _key140; + String _val141; + for (int _i142 = 0; _i142 < _map139.size; ++_i142) { { - org.apache.thrift.protocol.TList _list127 = iprot.readListBegin(); - _key124 = new ArrayList(_list127.size); - String _elem128; - for (int _i129 = 0; _i129 < _list127.size; ++_i129) + org.apache.thrift.protocol.TList _list143 = iprot.readListBegin(); + _key140 = new ArrayList(_list143.size); + String _elem144; + for (int _i145 = 0; _i145 < _list143.size; ++_i145) { - _elem128 = iprot.readString(); - _key124.add(_elem128); + _elem144 = iprot.readString(); + _key140.add(_elem144); } iprot.readListEnd(); } - _val125 = iprot.readString(); - struct.skewedColValueLocationMaps.put(_key124, _val125); + _val141 = iprot.readString(); + struct.skewedColValueLocationMaps.put(_key140, _val141); } iprot.readMapEnd(); } @@ -647,9 +647,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, SkewedInfo struct) oprot.writeFieldBegin(SKEWED_COL_NAMES_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.skewedColNames.size())); - for (String _iter130 : struct.skewedColNames) + for (String _iter146 : struct.skewedColNames) { - oprot.writeString(_iter130); + oprot.writeString(_iter146); } oprot.writeListEnd(); } @@ -659,13 +659,13 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, SkewedInfo struct) oprot.writeFieldBegin(SKEWED_COL_VALUES_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.LIST, struct.skewedColValues.size())); - for (List _iter131 : struct.skewedColValues) + for (List _iter147 : struct.skewedColValues) { { - oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, _iter131.size())); - for (String _iter132 : _iter131) + oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, _iter147.size())); + for (String _iter148 : _iter147) { - oprot.writeString(_iter132); + oprot.writeString(_iter148); } oprot.writeListEnd(); } @@ -678,17 +678,17 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, SkewedInfo struct) oprot.writeFieldBegin(SKEWED_COL_VALUE_LOCATION_MAPS_FIELD_DESC); { oprot.writeMapBegin(new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.LIST, org.apache.thrift.protocol.TType.STRING, struct.skewedColValueLocationMaps.size())); - for (Map.Entry, String> _iter133 : struct.skewedColValueLocationMaps.entrySet()) + for (Map.Entry, String> _iter149 : struct.skewedColValueLocationMaps.entrySet()) { { - oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, _iter133.getKey().size())); - for (String _iter134 : _iter133.getKey()) + oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, _iter149.getKey().size())); + for (String _iter150 : _iter149.getKey()) { - oprot.writeString(_iter134); + oprot.writeString(_iter150); } oprot.writeListEnd(); } - oprot.writeString(_iter133.getValue()); + oprot.writeString(_iter149.getValue()); } oprot.writeMapEnd(); } @@ -725,22 +725,22 @@ public void write(org.apache.thrift.protocol.TProtocol prot, SkewedInfo struct) if (struct.isSetSkewedColNames()) { { oprot.writeI32(struct.skewedColNames.size()); - for (String _iter135 : struct.skewedColNames) + for (String _iter151 : struct.skewedColNames) { - oprot.writeString(_iter135); + oprot.writeString(_iter151); } } } if (struct.isSetSkewedColValues()) { { oprot.writeI32(struct.skewedColValues.size()); - for (List _iter136 : struct.skewedColValues) + for (List _iter152 : struct.skewedColValues) { { - oprot.writeI32(_iter136.size()); - for (String _iter137 : _iter136) + oprot.writeI32(_iter152.size()); + for (String _iter153 : _iter152) { - oprot.writeString(_iter137); + oprot.writeString(_iter153); } } } @@ -749,16 +749,16 @@ public void write(org.apache.thrift.protocol.TProtocol prot, SkewedInfo struct) if (struct.isSetSkewedColValueLocationMaps()) { { oprot.writeI32(struct.skewedColValueLocationMaps.size()); - for (Map.Entry, String> _iter138 : struct.skewedColValueLocationMaps.entrySet()) + for (Map.Entry, String> _iter154 : struct.skewedColValueLocationMaps.entrySet()) { { - oprot.writeI32(_iter138.getKey().size()); - for (String _iter139 : _iter138.getKey()) + oprot.writeI32(_iter154.getKey().size()); + for (String _iter155 : _iter154.getKey()) { - oprot.writeString(_iter139); + oprot.writeString(_iter155); } } - oprot.writeString(_iter138.getValue()); + oprot.writeString(_iter154.getValue()); } } } @@ -770,59 +770,59 @@ public void read(org.apache.thrift.protocol.TProtocol prot, SkewedInfo struct) t BitSet incoming = iprot.readBitSet(3); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list140 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.skewedColNames = new ArrayList(_list140.size); - String _elem141; - for (int _i142 = 0; _i142 < _list140.size; ++_i142) + org.apache.thrift.protocol.TList _list156 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.skewedColNames = new ArrayList(_list156.size); + String _elem157; + for (int _i158 = 0; _i158 < _list156.size; ++_i158) { - _elem141 = iprot.readString(); - struct.skewedColNames.add(_elem141); + _elem157 = iprot.readString(); + struct.skewedColNames.add(_elem157); } } struct.setSkewedColNamesIsSet(true); } if (incoming.get(1)) { { - org.apache.thrift.protocol.TList _list143 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.LIST, iprot.readI32()); - struct.skewedColValues = new ArrayList>(_list143.size); - List _elem144; - for (int _i145 = 0; _i145 < _list143.size; ++_i145) + org.apache.thrift.protocol.TList _list159 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.LIST, iprot.readI32()); + struct.skewedColValues = new ArrayList>(_list159.size); + List _elem160; + for (int _i161 = 0; _i161 < _list159.size; ++_i161) { { - org.apache.thrift.protocol.TList _list146 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - _elem144 = new ArrayList(_list146.size); - String _elem147; - for (int _i148 = 0; _i148 < _list146.size; ++_i148) + org.apache.thrift.protocol.TList _list162 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + _elem160 = new ArrayList(_list162.size); + String _elem163; + for (int _i164 = 0; _i164 < _list162.size; ++_i164) { - _elem147 = iprot.readString(); - _elem144.add(_elem147); + _elem163 = iprot.readString(); + _elem160.add(_elem163); } } - struct.skewedColValues.add(_elem144); + struct.skewedColValues.add(_elem160); } } struct.setSkewedColValuesIsSet(true); } if (incoming.get(2)) { { - org.apache.thrift.protocol.TMap _map149 = new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.LIST, org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.skewedColValueLocationMaps = new HashMap,String>(2*_map149.size); - List _key150; - String _val151; - for (int _i152 = 0; _i152 < _map149.size; ++_i152) + org.apache.thrift.protocol.TMap _map165 = new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.LIST, org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.skewedColValueLocationMaps = new HashMap,String>(2*_map165.size); + List _key166; + String _val167; + for (int _i168 = 0; _i168 < _map165.size; ++_i168) { { - org.apache.thrift.protocol.TList _list153 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - _key150 = new ArrayList(_list153.size); - String _elem154; - for (int _i155 = 0; _i155 < _list153.size; ++_i155) + org.apache.thrift.protocol.TList _list169 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + _key166 = new ArrayList(_list169.size); + String _elem170; + for (int _i171 = 0; _i171 < _list169.size; ++_i171) { - _elem154 = iprot.readString(); - _key150.add(_elem154); + _elem170 = iprot.readString(); + _key166.add(_elem170); } } - _val151 = iprot.readString(); - struct.skewedColValueLocationMaps.put(_key150, _val151); + _val167 = iprot.readString(); + struct.skewedColValueLocationMaps.put(_key166, _val167); } } struct.setSkewedColValueLocationMapsIsSet(true); diff --git a/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/StorageDescriptor.java b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/StorageDescriptor.java index 938f06b..29250ef 100644 --- a/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/StorageDescriptor.java +++ b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/StorageDescriptor.java @@ -1290,14 +1290,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, StorageDescriptor s case 1: // COLS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list156 = iprot.readListBegin(); - struct.cols = new ArrayList(_list156.size); - FieldSchema _elem157; - for (int _i158 = 0; _i158 < _list156.size; ++_i158) + org.apache.thrift.protocol.TList _list172 = iprot.readListBegin(); + struct.cols = new ArrayList(_list172.size); + FieldSchema _elem173; + for (int _i174 = 0; _i174 < _list172.size; ++_i174) { - _elem157 = new FieldSchema(); - _elem157.read(iprot); - struct.cols.add(_elem157); + _elem173 = new FieldSchema(); + _elem173.read(iprot); + struct.cols.add(_elem173); } iprot.readListEnd(); } @@ -1358,13 +1358,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, StorageDescriptor s case 8: // BUCKET_COLS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list159 = iprot.readListBegin(); - struct.bucketCols = new ArrayList(_list159.size); - String _elem160; - for (int _i161 = 0; _i161 < _list159.size; ++_i161) + org.apache.thrift.protocol.TList _list175 = iprot.readListBegin(); + struct.bucketCols = new ArrayList(_list175.size); + String _elem176; + for (int _i177 = 0; _i177 < _list175.size; ++_i177) { - _elem160 = iprot.readString(); - struct.bucketCols.add(_elem160); + _elem176 = iprot.readString(); + struct.bucketCols.add(_elem176); } iprot.readListEnd(); } @@ -1376,14 +1376,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, StorageDescriptor s case 9: // SORT_COLS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list162 = iprot.readListBegin(); - struct.sortCols = new ArrayList(_list162.size); - Order _elem163; - for (int _i164 = 0; _i164 < _list162.size; ++_i164) + org.apache.thrift.protocol.TList _list178 = iprot.readListBegin(); + struct.sortCols = new ArrayList(_list178.size); + Order _elem179; + for (int _i180 = 0; _i180 < _list178.size; ++_i180) { - _elem163 = new Order(); - _elem163.read(iprot); - struct.sortCols.add(_elem163); + _elem179 = new Order(); + _elem179.read(iprot); + struct.sortCols.add(_elem179); } iprot.readListEnd(); } @@ -1395,15 +1395,15 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, StorageDescriptor s case 10: // PARAMETERS if (schemeField.type == org.apache.thrift.protocol.TType.MAP) { { - org.apache.thrift.protocol.TMap _map165 = iprot.readMapBegin(); - struct.parameters = new HashMap(2*_map165.size); - String _key166; - String _val167; - for (int _i168 = 0; _i168 < _map165.size; ++_i168) + org.apache.thrift.protocol.TMap _map181 = iprot.readMapBegin(); + struct.parameters = new HashMap(2*_map181.size); + String _key182; + String _val183; + for (int _i184 = 0; _i184 < _map181.size; ++_i184) { - _key166 = iprot.readString(); - _val167 = iprot.readString(); - struct.parameters.put(_key166, _val167); + _key182 = iprot.readString(); + _val183 = iprot.readString(); + struct.parameters.put(_key182, _val183); } iprot.readMapEnd(); } @@ -1446,9 +1446,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, StorageDescriptor oprot.writeFieldBegin(COLS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.cols.size())); - for (FieldSchema _iter169 : struct.cols) + for (FieldSchema _iter185 : struct.cols) { - _iter169.write(oprot); + _iter185.write(oprot); } oprot.writeListEnd(); } @@ -1484,9 +1484,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, StorageDescriptor oprot.writeFieldBegin(BUCKET_COLS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.bucketCols.size())); - for (String _iter170 : struct.bucketCols) + for (String _iter186 : struct.bucketCols) { - oprot.writeString(_iter170); + oprot.writeString(_iter186); } oprot.writeListEnd(); } @@ -1496,9 +1496,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, StorageDescriptor oprot.writeFieldBegin(SORT_COLS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.sortCols.size())); - for (Order _iter171 : struct.sortCols) + for (Order _iter187 : struct.sortCols) { - _iter171.write(oprot); + _iter187.write(oprot); } oprot.writeListEnd(); } @@ -1508,10 +1508,10 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, StorageDescriptor oprot.writeFieldBegin(PARAMETERS_FIELD_DESC); { oprot.writeMapBegin(new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING, struct.parameters.size())); - for (Map.Entry _iter172 : struct.parameters.entrySet()) + for (Map.Entry _iter188 : struct.parameters.entrySet()) { - oprot.writeString(_iter172.getKey()); - oprot.writeString(_iter172.getValue()); + oprot.writeString(_iter188.getKey()); + oprot.writeString(_iter188.getValue()); } oprot.writeMapEnd(); } @@ -1587,9 +1587,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, StorageDescriptor s if (struct.isSetCols()) { { oprot.writeI32(struct.cols.size()); - for (FieldSchema _iter173 : struct.cols) + for (FieldSchema _iter189 : struct.cols) { - _iter173.write(oprot); + _iter189.write(oprot); } } } @@ -1614,28 +1614,28 @@ public void write(org.apache.thrift.protocol.TProtocol prot, StorageDescriptor s if (struct.isSetBucketCols()) { { oprot.writeI32(struct.bucketCols.size()); - for (String _iter174 : struct.bucketCols) + for (String _iter190 : struct.bucketCols) { - oprot.writeString(_iter174); + oprot.writeString(_iter190); } } } if (struct.isSetSortCols()) { { oprot.writeI32(struct.sortCols.size()); - for (Order _iter175 : struct.sortCols) + for (Order _iter191 : struct.sortCols) { - _iter175.write(oprot); + _iter191.write(oprot); } } } if (struct.isSetParameters()) { { oprot.writeI32(struct.parameters.size()); - for (Map.Entry _iter176 : struct.parameters.entrySet()) + for (Map.Entry _iter192 : struct.parameters.entrySet()) { - oprot.writeString(_iter176.getKey()); - oprot.writeString(_iter176.getValue()); + oprot.writeString(_iter192.getKey()); + oprot.writeString(_iter192.getValue()); } } } @@ -1653,14 +1653,14 @@ public void read(org.apache.thrift.protocol.TProtocol prot, StorageDescriptor st BitSet incoming = iprot.readBitSet(12); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list177 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.cols = new ArrayList(_list177.size); - FieldSchema _elem178; - for (int _i179 = 0; _i179 < _list177.size; ++_i179) + org.apache.thrift.protocol.TList _list193 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.cols = new ArrayList(_list193.size); + FieldSchema _elem194; + for (int _i195 = 0; _i195 < _list193.size; ++_i195) { - _elem178 = new FieldSchema(); - _elem178.read(iprot); - struct.cols.add(_elem178); + _elem194 = new FieldSchema(); + _elem194.read(iprot); + struct.cols.add(_elem194); } } struct.setColsIsSet(true); @@ -1692,42 +1692,42 @@ public void read(org.apache.thrift.protocol.TProtocol prot, StorageDescriptor st } if (incoming.get(7)) { { - org.apache.thrift.protocol.TList _list180 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.bucketCols = new ArrayList(_list180.size); - String _elem181; - for (int _i182 = 0; _i182 < _list180.size; ++_i182) + org.apache.thrift.protocol.TList _list196 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.bucketCols = new ArrayList(_list196.size); + String _elem197; + for (int _i198 = 0; _i198 < _list196.size; ++_i198) { - _elem181 = iprot.readString(); - struct.bucketCols.add(_elem181); + _elem197 = iprot.readString(); + struct.bucketCols.add(_elem197); } } struct.setBucketColsIsSet(true); } if (incoming.get(8)) { { - org.apache.thrift.protocol.TList _list183 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.sortCols = new ArrayList(_list183.size); - Order _elem184; - for (int _i185 = 0; _i185 < _list183.size; ++_i185) + org.apache.thrift.protocol.TList _list199 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.sortCols = new ArrayList(_list199.size); + Order _elem200; + for (int _i201 = 0; _i201 < _list199.size; ++_i201) { - _elem184 = new Order(); - _elem184.read(iprot); - struct.sortCols.add(_elem184); + _elem200 = new Order(); + _elem200.read(iprot); + struct.sortCols.add(_elem200); } } struct.setSortColsIsSet(true); } if (incoming.get(9)) { { - org.apache.thrift.protocol.TMap _map186 = new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.parameters = new HashMap(2*_map186.size); - String _key187; - String _val188; - for (int _i189 = 0; _i189 < _map186.size; ++_i189) + org.apache.thrift.protocol.TMap _map202 = new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.parameters = new HashMap(2*_map202.size); + String _key203; + String _val204; + for (int _i205 = 0; _i205 < _map202.size; ++_i205) { - _key187 = iprot.readString(); - _val188 = iprot.readString(); - struct.parameters.put(_key187, _val188); + _key203 = iprot.readString(); + _val204 = iprot.readString(); + struct.parameters.put(_key203, _val204); } } struct.setParametersIsSet(true); diff --git a/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/Table.java b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/Table.java index 5d683fb..a7f603e 100644 --- a/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/Table.java +++ b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/Table.java @@ -52,6 +52,8 @@ private static final org.apache.thrift.protocol.TField TABLE_TYPE_FIELD_DESC = new org.apache.thrift.protocol.TField("tableType", org.apache.thrift.protocol.TType.STRING, (short)12); private static final org.apache.thrift.protocol.TField PRIVILEGES_FIELD_DESC = new org.apache.thrift.protocol.TField("privileges", org.apache.thrift.protocol.TType.STRUCT, (short)13); private static final org.apache.thrift.protocol.TField TEMPORARY_FIELD_DESC = new org.apache.thrift.protocol.TField("temporary", org.apache.thrift.protocol.TType.BOOL, (short)14); + private static final org.apache.thrift.protocol.TField PRIMARY_KEYS_FIELD_DESC = new org.apache.thrift.protocol.TField("primaryKeys", org.apache.thrift.protocol.TType.LIST, (short)15); + private static final org.apache.thrift.protocol.TField FOREIGN_KEYS_FIELD_DESC = new org.apache.thrift.protocol.TField("foreignKeys", org.apache.thrift.protocol.TType.LIST, (short)16); private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); static { @@ -73,6 +75,8 @@ private String tableType; // required private PrincipalPrivilegeSet privileges; // optional private boolean temporary; // optional + private List primaryKeys; // required + private List foreignKeys; // required /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ public enum _Fields implements org.apache.thrift.TFieldIdEnum { @@ -89,7 +93,9 @@ VIEW_EXPANDED_TEXT((short)11, "viewExpandedText"), TABLE_TYPE((short)12, "tableType"), PRIVILEGES((short)13, "privileges"), - TEMPORARY((short)14, "temporary"); + TEMPORARY((short)14, "temporary"), + PRIMARY_KEYS((short)15, "primaryKeys"), + FOREIGN_KEYS((short)16, "foreignKeys"); private static final Map byName = new HashMap(); @@ -132,6 +138,10 @@ public static _Fields findByThriftId(int fieldId) { return PRIVILEGES; case 14: // TEMPORARY return TEMPORARY; + case 15: // PRIMARY_KEYS + return PRIMARY_KEYS; + case 16: // FOREIGN_KEYS + return FOREIGN_KEYS; default: return null; } @@ -212,6 +222,12 @@ public String getFieldName() { new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, PrincipalPrivilegeSet.class))); tmpMap.put(_Fields.TEMPORARY, new org.apache.thrift.meta_data.FieldMetaData("temporary", org.apache.thrift.TFieldRequirementType.OPTIONAL, new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.BOOL))); + tmpMap.put(_Fields.PRIMARY_KEYS, new org.apache.thrift.meta_data.FieldMetaData("primaryKeys", org.apache.thrift.TFieldRequirementType.DEFAULT, + new org.apache.thrift.meta_data.ListMetaData(org.apache.thrift.protocol.TType.LIST, + new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, FieldSchema.class)))); + tmpMap.put(_Fields.FOREIGN_KEYS, new org.apache.thrift.meta_data.FieldMetaData("foreignKeys", org.apache.thrift.TFieldRequirementType.DEFAULT, + new org.apache.thrift.meta_data.ListMetaData(org.apache.thrift.protocol.TType.LIST, + new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, ForeignKeyRel.class)))); metaDataMap = Collections.unmodifiableMap(tmpMap); org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(Table.class, metaDataMap); } @@ -233,7 +249,9 @@ public Table( Map parameters, String viewOriginalText, String viewExpandedText, - String tableType) + String tableType, + List primaryKeys, + List foreignKeys) { this(); this.tableName = tableName; @@ -251,6 +269,8 @@ public Table( this.viewOriginalText = viewOriginalText; this.viewExpandedText = viewExpandedText; this.tableType = tableType; + this.primaryKeys = primaryKeys; + this.foreignKeys = foreignKeys; } /** @@ -297,6 +317,20 @@ public Table(Table other) { this.privileges = new PrincipalPrivilegeSet(other.privileges); } this.temporary = other.temporary; + if (other.isSetPrimaryKeys()) { + List __this__primaryKeys = new ArrayList(other.primaryKeys.size()); + for (FieldSchema other_element : other.primaryKeys) { + __this__primaryKeys.add(new FieldSchema(other_element)); + } + this.primaryKeys = __this__primaryKeys; + } + if (other.isSetForeignKeys()) { + List __this__foreignKeys = new ArrayList(other.foreignKeys.size()); + for (ForeignKeyRel other_element : other.foreignKeys) { + __this__foreignKeys.add(new ForeignKeyRel(other_element)); + } + this.foreignKeys = __this__foreignKeys; + } } public Table deepCopy() { @@ -323,6 +357,8 @@ public void clear() { this.privileges = null; this.temporary = false; + this.primaryKeys = null; + this.foreignKeys = null; } public String getTableName() { @@ -669,6 +705,82 @@ public void setTemporaryIsSet(boolean value) { __isset_bitfield = EncodingUtils.setBit(__isset_bitfield, __TEMPORARY_ISSET_ID, value); } + public int getPrimaryKeysSize() { + return (this.primaryKeys == null) ? 0 : this.primaryKeys.size(); + } + + public java.util.Iterator getPrimaryKeysIterator() { + return (this.primaryKeys == null) ? null : this.primaryKeys.iterator(); + } + + public void addToPrimaryKeys(FieldSchema elem) { + if (this.primaryKeys == null) { + this.primaryKeys = new ArrayList(); + } + this.primaryKeys.add(elem); + } + + public List getPrimaryKeys() { + return this.primaryKeys; + } + + public void setPrimaryKeys(List primaryKeys) { + this.primaryKeys = primaryKeys; + } + + public void unsetPrimaryKeys() { + this.primaryKeys = null; + } + + /** Returns true if field primaryKeys is set (has been assigned a value) and false otherwise */ + public boolean isSetPrimaryKeys() { + return this.primaryKeys != null; + } + + public void setPrimaryKeysIsSet(boolean value) { + if (!value) { + this.primaryKeys = null; + } + } + + public int getForeignKeysSize() { + return (this.foreignKeys == null) ? 0 : this.foreignKeys.size(); + } + + public java.util.Iterator getForeignKeysIterator() { + return (this.foreignKeys == null) ? null : this.foreignKeys.iterator(); + } + + public void addToForeignKeys(ForeignKeyRel elem) { + if (this.foreignKeys == null) { + this.foreignKeys = new ArrayList(); + } + this.foreignKeys.add(elem); + } + + public List getForeignKeys() { + return this.foreignKeys; + } + + public void setForeignKeys(List foreignKeys) { + this.foreignKeys = foreignKeys; + } + + public void unsetForeignKeys() { + this.foreignKeys = null; + } + + /** Returns true if field foreignKeys is set (has been assigned a value) and false otherwise */ + public boolean isSetForeignKeys() { + return this.foreignKeys != null; + } + + public void setForeignKeysIsSet(boolean value) { + if (!value) { + this.foreignKeys = null; + } + } + public void setFieldValue(_Fields field, Object value) { switch (field) { case TABLE_NAME: @@ -783,6 +895,22 @@ public void setFieldValue(_Fields field, Object value) { } break; + case PRIMARY_KEYS: + if (value == null) { + unsetPrimaryKeys(); + } else { + setPrimaryKeys((List)value); + } + break; + + case FOREIGN_KEYS: + if (value == null) { + unsetForeignKeys(); + } else { + setForeignKeys((List)value); + } + break; + } } @@ -830,6 +958,12 @@ public Object getFieldValue(_Fields field) { case TEMPORARY: return isTemporary(); + case PRIMARY_KEYS: + return getPrimaryKeys(); + + case FOREIGN_KEYS: + return getForeignKeys(); + } throw new IllegalStateException(); } @@ -869,6 +1003,10 @@ public boolean isSet(_Fields field) { return isSetPrivileges(); case TEMPORARY: return isSetTemporary(); + case PRIMARY_KEYS: + return isSetPrimaryKeys(); + case FOREIGN_KEYS: + return isSetForeignKeys(); } throw new IllegalStateException(); } @@ -1012,6 +1150,24 @@ public boolean equals(Table that) { return false; } + boolean this_present_primaryKeys = true && this.isSetPrimaryKeys(); + boolean that_present_primaryKeys = true && that.isSetPrimaryKeys(); + if (this_present_primaryKeys || that_present_primaryKeys) { + if (!(this_present_primaryKeys && that_present_primaryKeys)) + return false; + if (!this.primaryKeys.equals(that.primaryKeys)) + return false; + } + + boolean this_present_foreignKeys = true && this.isSetForeignKeys(); + boolean that_present_foreignKeys = true && that.isSetForeignKeys(); + if (this_present_foreignKeys || that_present_foreignKeys) { + if (!(this_present_foreignKeys && that_present_foreignKeys)) + return false; + if (!this.foreignKeys.equals(that.foreignKeys)) + return false; + } + return true; } @@ -1089,6 +1245,16 @@ public int hashCode() { if (present_temporary) list.add(temporary); + boolean present_primaryKeys = true && (isSetPrimaryKeys()); + list.add(present_primaryKeys); + if (present_primaryKeys) + list.add(primaryKeys); + + boolean present_foreignKeys = true && (isSetForeignKeys()); + list.add(present_foreignKeys); + if (present_foreignKeys) + list.add(foreignKeys); + return list.hashCode(); } @@ -1240,6 +1406,26 @@ public int compareTo(Table other) { return lastComparison; } } + lastComparison = Boolean.valueOf(isSetPrimaryKeys()).compareTo(other.isSetPrimaryKeys()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetPrimaryKeys()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.primaryKeys, other.primaryKeys); + if (lastComparison != 0) { + return lastComparison; + } + } + lastComparison = Boolean.valueOf(isSetForeignKeys()).compareTo(other.isSetForeignKeys()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetForeignKeys()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.foreignKeys, other.foreignKeys); + if (lastComparison != 0) { + return lastComparison; + } + } return 0; } @@ -1359,6 +1545,22 @@ public String toString() { sb.append(this.temporary); first = false; } + if (!first) sb.append(", "); + sb.append("primaryKeys:"); + if (this.primaryKeys == null) { + sb.append("null"); + } else { + sb.append(this.primaryKeys); + } + first = false; + if (!first) sb.append(", "); + sb.append("foreignKeys:"); + if (this.foreignKeys == null) { + sb.append("null"); + } else { + sb.append(this.foreignKeys); + } + first = false; sb.append(")"); return sb.toString(); } @@ -1470,14 +1672,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, Table struct) throw case 8: // PARTITION_KEYS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list190 = iprot.readListBegin(); - struct.partitionKeys = new ArrayList(_list190.size); - FieldSchema _elem191; - for (int _i192 = 0; _i192 < _list190.size; ++_i192) + org.apache.thrift.protocol.TList _list206 = iprot.readListBegin(); + struct.partitionKeys = new ArrayList(_list206.size); + FieldSchema _elem207; + for (int _i208 = 0; _i208 < _list206.size; ++_i208) { - _elem191 = new FieldSchema(); - _elem191.read(iprot); - struct.partitionKeys.add(_elem191); + _elem207 = new FieldSchema(); + _elem207.read(iprot); + struct.partitionKeys.add(_elem207); } iprot.readListEnd(); } @@ -1489,15 +1691,15 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, Table struct) throw case 9: // PARAMETERS if (schemeField.type == org.apache.thrift.protocol.TType.MAP) { { - org.apache.thrift.protocol.TMap _map193 = iprot.readMapBegin(); - struct.parameters = new HashMap(2*_map193.size); - String _key194; - String _val195; - for (int _i196 = 0; _i196 < _map193.size; ++_i196) + org.apache.thrift.protocol.TMap _map209 = iprot.readMapBegin(); + struct.parameters = new HashMap(2*_map209.size); + String _key210; + String _val211; + for (int _i212 = 0; _i212 < _map209.size; ++_i212) { - _key194 = iprot.readString(); - _val195 = iprot.readString(); - struct.parameters.put(_key194, _val195); + _key210 = iprot.readString(); + _val211 = iprot.readString(); + struct.parameters.put(_key210, _val211); } iprot.readMapEnd(); } @@ -1547,6 +1749,44 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, Table struct) throw org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); } break; + case 15: // PRIMARY_KEYS + if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { + { + org.apache.thrift.protocol.TList _list213 = iprot.readListBegin(); + struct.primaryKeys = new ArrayList(_list213.size); + FieldSchema _elem214; + for (int _i215 = 0; _i215 < _list213.size; ++_i215) + { + _elem214 = new FieldSchema(); + _elem214.read(iprot); + struct.primaryKeys.add(_elem214); + } + iprot.readListEnd(); + } + struct.setPrimaryKeysIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; + case 16: // FOREIGN_KEYS + if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { + { + org.apache.thrift.protocol.TList _list216 = iprot.readListBegin(); + struct.foreignKeys = new ArrayList(_list216.size); + ForeignKeyRel _elem217; + for (int _i218 = 0; _i218 < _list216.size; ++_i218) + { + _elem217 = new ForeignKeyRel(); + _elem217.read(iprot); + struct.foreignKeys.add(_elem217); + } + iprot.readListEnd(); + } + struct.setForeignKeysIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; default: org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); } @@ -1593,9 +1833,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, Table struct) thro oprot.writeFieldBegin(PARTITION_KEYS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.partitionKeys.size())); - for (FieldSchema _iter197 : struct.partitionKeys) + for (FieldSchema _iter219 : struct.partitionKeys) { - _iter197.write(oprot); + _iter219.write(oprot); } oprot.writeListEnd(); } @@ -1605,10 +1845,10 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, Table struct) thro oprot.writeFieldBegin(PARAMETERS_FIELD_DESC); { oprot.writeMapBegin(new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING, struct.parameters.size())); - for (Map.Entry _iter198 : struct.parameters.entrySet()) + for (Map.Entry _iter220 : struct.parameters.entrySet()) { - oprot.writeString(_iter198.getKey()); - oprot.writeString(_iter198.getValue()); + oprot.writeString(_iter220.getKey()); + oprot.writeString(_iter220.getValue()); } oprot.writeMapEnd(); } @@ -1641,6 +1881,30 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, Table struct) thro oprot.writeBool(struct.temporary); oprot.writeFieldEnd(); } + if (struct.primaryKeys != null) { + oprot.writeFieldBegin(PRIMARY_KEYS_FIELD_DESC); + { + oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.primaryKeys.size())); + for (FieldSchema _iter221 : struct.primaryKeys) + { + _iter221.write(oprot); + } + oprot.writeListEnd(); + } + oprot.writeFieldEnd(); + } + if (struct.foreignKeys != null) { + oprot.writeFieldBegin(FOREIGN_KEYS_FIELD_DESC); + { + oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.foreignKeys.size())); + for (ForeignKeyRel _iter222 : struct.foreignKeys) + { + _iter222.write(oprot); + } + oprot.writeListEnd(); + } + oprot.writeFieldEnd(); + } oprot.writeFieldStop(); oprot.writeStructEnd(); } @@ -1701,7 +1965,13 @@ public void write(org.apache.thrift.protocol.TProtocol prot, Table struct) throw if (struct.isSetTemporary()) { optionals.set(13); } - oprot.writeBitSet(optionals, 14); + if (struct.isSetPrimaryKeys()) { + optionals.set(14); + } + if (struct.isSetForeignKeys()) { + optionals.set(15); + } + oprot.writeBitSet(optionals, 16); if (struct.isSetTableName()) { oprot.writeString(struct.tableName); } @@ -1726,19 +1996,19 @@ public void write(org.apache.thrift.protocol.TProtocol prot, Table struct) throw if (struct.isSetPartitionKeys()) { { oprot.writeI32(struct.partitionKeys.size()); - for (FieldSchema _iter199 : struct.partitionKeys) + for (FieldSchema _iter223 : struct.partitionKeys) { - _iter199.write(oprot); + _iter223.write(oprot); } } } if (struct.isSetParameters()) { { oprot.writeI32(struct.parameters.size()); - for (Map.Entry _iter200 : struct.parameters.entrySet()) + for (Map.Entry _iter224 : struct.parameters.entrySet()) { - oprot.writeString(_iter200.getKey()); - oprot.writeString(_iter200.getValue()); + oprot.writeString(_iter224.getKey()); + oprot.writeString(_iter224.getValue()); } } } @@ -1757,12 +2027,30 @@ public void write(org.apache.thrift.protocol.TProtocol prot, Table struct) throw if (struct.isSetTemporary()) { oprot.writeBool(struct.temporary); } + if (struct.isSetPrimaryKeys()) { + { + oprot.writeI32(struct.primaryKeys.size()); + for (FieldSchema _iter225 : struct.primaryKeys) + { + _iter225.write(oprot); + } + } + } + if (struct.isSetForeignKeys()) { + { + oprot.writeI32(struct.foreignKeys.size()); + for (ForeignKeyRel _iter226 : struct.foreignKeys) + { + _iter226.write(oprot); + } + } + } } @Override public void read(org.apache.thrift.protocol.TProtocol prot, Table struct) throws org.apache.thrift.TException { TTupleProtocol iprot = (TTupleProtocol) prot; - BitSet incoming = iprot.readBitSet(14); + BitSet incoming = iprot.readBitSet(16); if (incoming.get(0)) { struct.tableName = iprot.readString(); struct.setTableNameIsSet(true); @@ -1794,29 +2082,29 @@ public void read(org.apache.thrift.protocol.TProtocol prot, Table struct) throws } if (incoming.get(7)) { { - org.apache.thrift.protocol.TList _list201 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.partitionKeys = new ArrayList(_list201.size); - FieldSchema _elem202; - for (int _i203 = 0; _i203 < _list201.size; ++_i203) + org.apache.thrift.protocol.TList _list227 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.partitionKeys = new ArrayList(_list227.size); + FieldSchema _elem228; + for (int _i229 = 0; _i229 < _list227.size; ++_i229) { - _elem202 = new FieldSchema(); - _elem202.read(iprot); - struct.partitionKeys.add(_elem202); + _elem228 = new FieldSchema(); + _elem228.read(iprot); + struct.partitionKeys.add(_elem228); } } struct.setPartitionKeysIsSet(true); } if (incoming.get(8)) { { - org.apache.thrift.protocol.TMap _map204 = new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.parameters = new HashMap(2*_map204.size); - String _key205; - String _val206; - for (int _i207 = 0; _i207 < _map204.size; ++_i207) + org.apache.thrift.protocol.TMap _map230 = new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.parameters = new HashMap(2*_map230.size); + String _key231; + String _val232; + for (int _i233 = 0; _i233 < _map230.size; ++_i233) { - _key205 = iprot.readString(); - _val206 = iprot.readString(); - struct.parameters.put(_key205, _val206); + _key231 = iprot.readString(); + _val232 = iprot.readString(); + struct.parameters.put(_key231, _val232); } } struct.setParametersIsSet(true); @@ -1842,6 +2130,34 @@ public void read(org.apache.thrift.protocol.TProtocol prot, Table struct) throws struct.temporary = iprot.readBool(); struct.setTemporaryIsSet(true); } + if (incoming.get(14)) { + { + org.apache.thrift.protocol.TList _list234 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.primaryKeys = new ArrayList(_list234.size); + FieldSchema _elem235; + for (int _i236 = 0; _i236 < _list234.size; ++_i236) + { + _elem235 = new FieldSchema(); + _elem235.read(iprot); + struct.primaryKeys.add(_elem235); + } + } + struct.setPrimaryKeysIsSet(true); + } + if (incoming.get(15)) { + { + org.apache.thrift.protocol.TList _list237 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.foreignKeys = new ArrayList(_list237.size); + ForeignKeyRel _elem238; + for (int _i239 = 0; _i239 < _list237.size; ++_i239) + { + _elem238 = new ForeignKeyRel(); + _elem238.read(iprot); + struct.foreignKeys.add(_elem238); + } + } + struct.setForeignKeysIsSet(true); + } } } diff --git a/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/TableStatsRequest.java b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/TableStatsRequest.java index d0daee5..c8c762a 100644 --- a/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/TableStatsRequest.java +++ b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/TableStatsRequest.java @@ -537,13 +537,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, TableStatsRequest s case 3: // COL_NAMES if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list356 = iprot.readListBegin(); - struct.colNames = new ArrayList(_list356.size); - String _elem357; - for (int _i358 = 0; _i358 < _list356.size; ++_i358) + org.apache.thrift.protocol.TList _list388 = iprot.readListBegin(); + struct.colNames = new ArrayList(_list388.size); + String _elem389; + for (int _i390 = 0; _i390 < _list388.size; ++_i390) { - _elem357 = iprot.readString(); - struct.colNames.add(_elem357); + _elem389 = iprot.readString(); + struct.colNames.add(_elem389); } iprot.readListEnd(); } @@ -579,9 +579,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, TableStatsRequest oprot.writeFieldBegin(COL_NAMES_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.colNames.size())); - for (String _iter359 : struct.colNames) + for (String _iter391 : struct.colNames) { - oprot.writeString(_iter359); + oprot.writeString(_iter391); } oprot.writeListEnd(); } @@ -608,9 +608,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, TableStatsRequest s oprot.writeString(struct.tblName); { oprot.writeI32(struct.colNames.size()); - for (String _iter360 : struct.colNames) + for (String _iter392 : struct.colNames) { - oprot.writeString(_iter360); + oprot.writeString(_iter392); } } } @@ -623,13 +623,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, TableStatsRequest st struct.tblName = iprot.readString(); struct.setTblNameIsSet(true); { - org.apache.thrift.protocol.TList _list361 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.colNames = new ArrayList(_list361.size); - String _elem362; - for (int _i363 = 0; _i363 < _list361.size; ++_i363) + org.apache.thrift.protocol.TList _list393 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.colNames = new ArrayList(_list393.size); + String _elem394; + for (int _i395 = 0; _i395 < _list393.size; ++_i395) { - _elem362 = iprot.readString(); - struct.colNames.add(_elem362); + _elem394 = iprot.readString(); + struct.colNames.add(_elem394); } } struct.setColNamesIsSet(true); diff --git a/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/TableStatsResult.java b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/TableStatsResult.java index 78d4250..49da294 100644 --- a/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/TableStatsResult.java +++ b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/TableStatsResult.java @@ -354,14 +354,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, TableStatsResult st case 1: // TABLE_STATS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list330 = iprot.readListBegin(); - struct.tableStats = new ArrayList(_list330.size); - ColumnStatisticsObj _elem331; - for (int _i332 = 0; _i332 < _list330.size; ++_i332) + org.apache.thrift.protocol.TList _list362 = iprot.readListBegin(); + struct.tableStats = new ArrayList(_list362.size); + ColumnStatisticsObj _elem363; + for (int _i364 = 0; _i364 < _list362.size; ++_i364) { - _elem331 = new ColumnStatisticsObj(); - _elem331.read(iprot); - struct.tableStats.add(_elem331); + _elem363 = new ColumnStatisticsObj(); + _elem363.read(iprot); + struct.tableStats.add(_elem363); } iprot.readListEnd(); } @@ -387,9 +387,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, TableStatsResult s oprot.writeFieldBegin(TABLE_STATS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.tableStats.size())); - for (ColumnStatisticsObj _iter333 : struct.tableStats) + for (ColumnStatisticsObj _iter365 : struct.tableStats) { - _iter333.write(oprot); + _iter365.write(oprot); } oprot.writeListEnd(); } @@ -414,9 +414,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, TableStatsResult st TTupleProtocol oprot = (TTupleProtocol) prot; { oprot.writeI32(struct.tableStats.size()); - for (ColumnStatisticsObj _iter334 : struct.tableStats) + for (ColumnStatisticsObj _iter366 : struct.tableStats) { - _iter334.write(oprot); + _iter366.write(oprot); } } } @@ -425,14 +425,14 @@ public void write(org.apache.thrift.protocol.TProtocol prot, TableStatsResult st public void read(org.apache.thrift.protocol.TProtocol prot, TableStatsResult struct) throws org.apache.thrift.TException { TTupleProtocol iprot = (TTupleProtocol) prot; { - org.apache.thrift.protocol.TList _list335 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.tableStats = new ArrayList(_list335.size); - ColumnStatisticsObj _elem336; - for (int _i337 = 0; _i337 < _list335.size; ++_i337) + org.apache.thrift.protocol.TList _list367 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.tableStats = new ArrayList(_list367.size); + ColumnStatisticsObj _elem368; + for (int _i369 = 0; _i369 < _list367.size; ++_i369) { - _elem336 = new ColumnStatisticsObj(); - _elem336.read(iprot); - struct.tableStats.add(_elem336); + _elem368 = new ColumnStatisticsObj(); + _elem368.read(iprot); + struct.tableStats.add(_elem368); } } struct.setTableStatsIsSet(true); diff --git a/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ThriftHiveMetastore.java b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ThriftHiveMetastore.java index 13e30db..36ac7d8 100644 --- a/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ThriftHiveMetastore.java +++ b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ThriftHiveMetastore.java @@ -196,6 +196,10 @@ public List get_index_names(String db_name, String tbl_name, short max_indexes) throws MetaException, org.apache.thrift.TException; + public List get_primary_keys(String db_name, String tbl_name) throws MetaException, NoSuchObjectException, org.apache.thrift.TException; + + public List get_foreign_keys(String parent_db_name, String parent_tbl_name, String foreign_db_name, String foreign_tbl_name) throws MetaException, NoSuchObjectException, org.apache.thrift.TException; + public boolean update_table_column_statistics(ColumnStatistics stats_obj) throws NoSuchObjectException, InvalidObjectException, MetaException, InvalidInputException, org.apache.thrift.TException; public boolean update_partition_column_statistics(ColumnStatistics stats_obj) throws NoSuchObjectException, InvalidObjectException, MetaException, InvalidInputException, org.apache.thrift.TException; @@ -486,6 +490,10 @@ public void get_index_names(String db_name, String tbl_name, short max_indexes, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException; + public void get_primary_keys(String db_name, String tbl_name, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException; + + public void get_foreign_keys(String parent_db_name, String parent_tbl_name, String foreign_db_name, String foreign_tbl_name, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException; + public void update_table_column_statistics(ColumnStatistics stats_obj, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException; public void update_partition_column_statistics(ColumnStatistics stats_obj, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException; @@ -3018,6 +3026,68 @@ public void send_get_index_names(String db_name, String tbl_name, short max_inde throw new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.MISSING_RESULT, "get_index_names failed: unknown result"); } + public List get_primary_keys(String db_name, String tbl_name) throws MetaException, NoSuchObjectException, org.apache.thrift.TException + { + send_get_primary_keys(db_name, tbl_name); + return recv_get_primary_keys(); + } + + public void send_get_primary_keys(String db_name, String tbl_name) throws org.apache.thrift.TException + { + get_primary_keys_args args = new get_primary_keys_args(); + args.setDb_name(db_name); + args.setTbl_name(tbl_name); + sendBase("get_primary_keys", args); + } + + public List recv_get_primary_keys() throws MetaException, NoSuchObjectException, org.apache.thrift.TException + { + get_primary_keys_result result = new get_primary_keys_result(); + receiveBase(result, "get_primary_keys"); + if (result.isSetSuccess()) { + return result.success; + } + if (result.o1 != null) { + throw result.o1; + } + if (result.o2 != null) { + throw result.o2; + } + throw new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.MISSING_RESULT, "get_primary_keys failed: unknown result"); + } + + public List get_foreign_keys(String parent_db_name, String parent_tbl_name, String foreign_db_name, String foreign_tbl_name) throws MetaException, NoSuchObjectException, org.apache.thrift.TException + { + send_get_foreign_keys(parent_db_name, parent_tbl_name, foreign_db_name, foreign_tbl_name); + return recv_get_foreign_keys(); + } + + public void send_get_foreign_keys(String parent_db_name, String parent_tbl_name, String foreign_db_name, String foreign_tbl_name) throws org.apache.thrift.TException + { + get_foreign_keys_args args = new get_foreign_keys_args(); + args.setParent_db_name(parent_db_name); + args.setParent_tbl_name(parent_tbl_name); + args.setForeign_db_name(foreign_db_name); + args.setForeign_tbl_name(foreign_tbl_name); + sendBase("get_foreign_keys", args); + } + + public List recv_get_foreign_keys() throws MetaException, NoSuchObjectException, org.apache.thrift.TException + { + get_foreign_keys_result result = new get_foreign_keys_result(); + receiveBase(result, "get_foreign_keys"); + if (result.isSetSuccess()) { + return result.success; + } + if (result.o1 != null) { + throw result.o1; + } + if (result.o2 != null) { + throw result.o2; + } + throw new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.MISSING_RESULT, "get_foreign_keys failed: unknown result"); + } + public boolean update_table_column_statistics(ColumnStatistics stats_obj) throws NoSuchObjectException, InvalidObjectException, MetaException, InvalidInputException, org.apache.thrift.TException { send_update_table_column_statistics(stats_obj); @@ -7659,6 +7729,82 @@ public void write_args(org.apache.thrift.protocol.TProtocol prot) throws org.apa } } + public void get_primary_keys(String db_name, String tbl_name, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException { + checkReady(); + get_primary_keys_call method_call = new get_primary_keys_call(db_name, tbl_name, resultHandler, this, ___protocolFactory, ___transport); + this.___currentMethod = method_call; + ___manager.call(method_call); + } + + public static class get_primary_keys_call extends org.apache.thrift.async.TAsyncMethodCall { + private String db_name; + private String tbl_name; + public get_primary_keys_call(String db_name, String tbl_name, org.apache.thrift.async.AsyncMethodCallback resultHandler, org.apache.thrift.async.TAsyncClient client, org.apache.thrift.protocol.TProtocolFactory protocolFactory, org.apache.thrift.transport.TNonblockingTransport transport) throws org.apache.thrift.TException { + super(client, protocolFactory, transport, resultHandler, false); + this.db_name = db_name; + this.tbl_name = tbl_name; + } + + public void write_args(org.apache.thrift.protocol.TProtocol prot) throws org.apache.thrift.TException { + prot.writeMessageBegin(new org.apache.thrift.protocol.TMessage("get_primary_keys", org.apache.thrift.protocol.TMessageType.CALL, 0)); + get_primary_keys_args args = new get_primary_keys_args(); + args.setDb_name(db_name); + args.setTbl_name(tbl_name); + args.write(prot); + prot.writeMessageEnd(); + } + + public List getResult() throws MetaException, NoSuchObjectException, org.apache.thrift.TException { + if (getState() != org.apache.thrift.async.TAsyncMethodCall.State.RESPONSE_READ) { + throw new IllegalStateException("Method call not finished!"); + } + org.apache.thrift.transport.TMemoryInputTransport memoryTransport = new org.apache.thrift.transport.TMemoryInputTransport(getFrameBuffer().array()); + org.apache.thrift.protocol.TProtocol prot = client.getProtocolFactory().getProtocol(memoryTransport); + return (new Client(prot)).recv_get_primary_keys(); + } + } + + public void get_foreign_keys(String parent_db_name, String parent_tbl_name, String foreign_db_name, String foreign_tbl_name, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException { + checkReady(); + get_foreign_keys_call method_call = new get_foreign_keys_call(parent_db_name, parent_tbl_name, foreign_db_name, foreign_tbl_name, resultHandler, this, ___protocolFactory, ___transport); + this.___currentMethod = method_call; + ___manager.call(method_call); + } + + public static class get_foreign_keys_call extends org.apache.thrift.async.TAsyncMethodCall { + private String parent_db_name; + private String parent_tbl_name; + private String foreign_db_name; + private String foreign_tbl_name; + public get_foreign_keys_call(String parent_db_name, String parent_tbl_name, String foreign_db_name, String foreign_tbl_name, org.apache.thrift.async.AsyncMethodCallback resultHandler, org.apache.thrift.async.TAsyncClient client, org.apache.thrift.protocol.TProtocolFactory protocolFactory, org.apache.thrift.transport.TNonblockingTransport transport) throws org.apache.thrift.TException { + super(client, protocolFactory, transport, resultHandler, false); + this.parent_db_name = parent_db_name; + this.parent_tbl_name = parent_tbl_name; + this.foreign_db_name = foreign_db_name; + this.foreign_tbl_name = foreign_tbl_name; + } + + public void write_args(org.apache.thrift.protocol.TProtocol prot) throws org.apache.thrift.TException { + prot.writeMessageBegin(new org.apache.thrift.protocol.TMessage("get_foreign_keys", org.apache.thrift.protocol.TMessageType.CALL, 0)); + get_foreign_keys_args args = new get_foreign_keys_args(); + args.setParent_db_name(parent_db_name); + args.setParent_tbl_name(parent_tbl_name); + args.setForeign_db_name(foreign_db_name); + args.setForeign_tbl_name(foreign_tbl_name); + args.write(prot); + prot.writeMessageEnd(); + } + + public List getResult() throws MetaException, NoSuchObjectException, org.apache.thrift.TException { + if (getState() != org.apache.thrift.async.TAsyncMethodCall.State.RESPONSE_READ) { + throw new IllegalStateException("Method call not finished!"); + } + org.apache.thrift.transport.TMemoryInputTransport memoryTransport = new org.apache.thrift.transport.TMemoryInputTransport(getFrameBuffer().array()); + org.apache.thrift.protocol.TProtocol prot = client.getProtocolFactory().getProtocol(memoryTransport); + return (new Client(prot)).recv_get_foreign_keys(); + } + } + public void update_table_column_statistics(ColumnStatistics stats_obj, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException { checkReady(); update_table_column_statistics_call method_call = new update_table_column_statistics_call(stats_obj, resultHandler, this, ___protocolFactory, ___transport); @@ -9930,6 +10076,8 @@ protected Processor(I iface, Map extends org.apache.thrift.ProcessFunction { + public get_primary_keys() { + super("get_primary_keys"); + } + + public get_primary_keys_args getEmptyArgsInstance() { + return new get_primary_keys_args(); + } + + protected boolean isOneway() { + return false; + } + + public get_primary_keys_result getResult(I iface, get_primary_keys_args args) throws org.apache.thrift.TException { + get_primary_keys_result result = new get_primary_keys_result(); + try { + result.success = iface.get_primary_keys(args.db_name, args.tbl_name); + } catch (MetaException o1) { + result.o1 = o1; + } catch (NoSuchObjectException o2) { + result.o2 = o2; + } + return result; + } + } + + public static class get_foreign_keys extends org.apache.thrift.ProcessFunction { + public get_foreign_keys() { + super("get_foreign_keys"); + } + + public get_foreign_keys_args getEmptyArgsInstance() { + return new get_foreign_keys_args(); + } + + protected boolean isOneway() { + return false; + } + + public get_foreign_keys_result getResult(I iface, get_foreign_keys_args args) throws org.apache.thrift.TException { + get_foreign_keys_result result = new get_foreign_keys_result(); + try { + result.success = iface.get_foreign_keys(args.parent_db_name, args.parent_tbl_name, args.foreign_db_name, args.foreign_tbl_name); + } catch (MetaException o1) { + result.o1 = o1; + } catch (NoSuchObjectException o2) { + result.o2 = o2; + } + return result; + } + } + public static class update_table_column_statistics extends org.apache.thrift.ProcessFunction { public update_table_column_statistics() { super("update_table_column_statistics"); @@ -13731,6 +13931,8 @@ protected AsyncProcessor(I iface, Map extends org.apache.thrift.AsyncProcessFunction> { + public get_primary_keys() { + super("get_primary_keys"); + } + + public get_primary_keys_args getEmptyArgsInstance() { + return new get_primary_keys_args(); + } + + public AsyncMethodCallback> getResultHandler(final AsyncFrameBuffer fb, final int seqid) { + final org.apache.thrift.AsyncProcessFunction fcall = this; + return new AsyncMethodCallback>() { + public void onComplete(List o) { + get_primary_keys_result result = new get_primary_keys_result(); + result.success = o; + try { + fcall.sendResponse(fb,result, org.apache.thrift.protocol.TMessageType.REPLY,seqid); + return; + } catch (Exception e) { + LOGGER.error("Exception writing to internal frame buffer", e); + } + fb.close(); + } + public void onError(Exception e) { + byte msgType = org.apache.thrift.protocol.TMessageType.REPLY; + org.apache.thrift.TBase msg; + get_primary_keys_result result = new get_primary_keys_result(); + if (e instanceof MetaException) { + result.o1 = (MetaException) e; + result.setO1IsSet(true); + msg = result; + } + else if (e instanceof NoSuchObjectException) { + result.o2 = (NoSuchObjectException) e; + result.setO2IsSet(true); + msg = result; + } + else + { + msgType = org.apache.thrift.protocol.TMessageType.EXCEPTION; + msg = (org.apache.thrift.TBase)new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.INTERNAL_ERROR, e.getMessage()); + } + try { + fcall.sendResponse(fb,msg,msgType,seqid); + return; + } catch (Exception ex) { + LOGGER.error("Exception writing to internal frame buffer", ex); + } + fb.close(); + } + }; + } + + protected boolean isOneway() { + return false; + } + + public void start(I iface, get_primary_keys_args args, org.apache.thrift.async.AsyncMethodCallback> resultHandler) throws TException { + iface.get_primary_keys(args.db_name, args.tbl_name,resultHandler); + } + } + + public static class get_foreign_keys extends org.apache.thrift.AsyncProcessFunction> { + public get_foreign_keys() { + super("get_foreign_keys"); + } + + public get_foreign_keys_args getEmptyArgsInstance() { + return new get_foreign_keys_args(); + } + + public AsyncMethodCallback> getResultHandler(final AsyncFrameBuffer fb, final int seqid) { + final org.apache.thrift.AsyncProcessFunction fcall = this; + return new AsyncMethodCallback>() { + public void onComplete(List o) { + get_foreign_keys_result result = new get_foreign_keys_result(); + result.success = o; + try { + fcall.sendResponse(fb,result, org.apache.thrift.protocol.TMessageType.REPLY,seqid); + return; + } catch (Exception e) { + LOGGER.error("Exception writing to internal frame buffer", e); + } + fb.close(); + } + public void onError(Exception e) { + byte msgType = org.apache.thrift.protocol.TMessageType.REPLY; + org.apache.thrift.TBase msg; + get_foreign_keys_result result = new get_foreign_keys_result(); + if (e instanceof MetaException) { + result.o1 = (MetaException) e; + result.setO1IsSet(true); + msg = result; + } + else if (e instanceof NoSuchObjectException) { + result.o2 = (NoSuchObjectException) e; + result.setO2IsSet(true); + msg = result; + } + else + { + msgType = org.apache.thrift.protocol.TMessageType.EXCEPTION; + msg = (org.apache.thrift.TBase)new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.INTERNAL_ERROR, e.getMessage()); + } + try { + fcall.sendResponse(fb,msg,msgType,seqid); + return; + } catch (Exception ex) { + LOGGER.error("Exception writing to internal frame buffer", ex); + } + fb.close(); + } + }; + } + + protected boolean isOneway() { + return false; + } + + public void start(I iface, get_foreign_keys_args args, org.apache.thrift.async.AsyncMethodCallback> resultHandler) throws TException { + iface.get_foreign_keys(args.parent_db_name, args.parent_tbl_name, args.foreign_db_name, args.foreign_tbl_name,resultHandler); + } + } + public static class update_table_column_statistics extends org.apache.thrift.AsyncProcessFunction { public update_table_column_statistics() { super("update_table_column_statistics"); @@ -27891,13 +28217,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_databases_resul case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list592 = iprot.readListBegin(); - struct.success = new ArrayList(_list592.size); - String _elem593; - for (int _i594 = 0; _i594 < _list592.size; ++_i594) + org.apache.thrift.protocol.TList _list624 = iprot.readListBegin(); + struct.success = new ArrayList(_list624.size); + String _elem625; + for (int _i626 = 0; _i626 < _list624.size; ++_i626) { - _elem593 = iprot.readString(); - struct.success.add(_elem593); + _elem625 = iprot.readString(); + struct.success.add(_elem625); } iprot.readListEnd(); } @@ -27932,9 +28258,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_databases_resu oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.success.size())); - for (String _iter595 : struct.success) + for (String _iter627 : struct.success) { - oprot.writeString(_iter595); + oprot.writeString(_iter627); } oprot.writeListEnd(); } @@ -27973,9 +28299,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_databases_resul if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (String _iter596 : struct.success) + for (String _iter628 : struct.success) { - oprot.writeString(_iter596); + oprot.writeString(_iter628); } } } @@ -27990,13 +28316,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_databases_result BitSet incoming = iprot.readBitSet(2); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list597 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.success = new ArrayList(_list597.size); - String _elem598; - for (int _i599 = 0; _i599 < _list597.size; ++_i599) + org.apache.thrift.protocol.TList _list629 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.success = new ArrayList(_list629.size); + String _elem630; + for (int _i631 = 0; _i631 < _list629.size; ++_i631) { - _elem598 = iprot.readString(); - struct.success.add(_elem598); + _elem630 = iprot.readString(); + struct.success.add(_elem630); } } struct.setSuccessIsSet(true); @@ -28650,13 +28976,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_all_databases_r case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list600 = iprot.readListBegin(); - struct.success = new ArrayList(_list600.size); - String _elem601; - for (int _i602 = 0; _i602 < _list600.size; ++_i602) + org.apache.thrift.protocol.TList _list632 = iprot.readListBegin(); + struct.success = new ArrayList(_list632.size); + String _elem633; + for (int _i634 = 0; _i634 < _list632.size; ++_i634) { - _elem601 = iprot.readString(); - struct.success.add(_elem601); + _elem633 = iprot.readString(); + struct.success.add(_elem633); } iprot.readListEnd(); } @@ -28691,9 +29017,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_all_databases_ oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.success.size())); - for (String _iter603 : struct.success) + for (String _iter635 : struct.success) { - oprot.writeString(_iter603); + oprot.writeString(_iter635); } oprot.writeListEnd(); } @@ -28732,9 +29058,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_all_databases_r if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (String _iter604 : struct.success) + for (String _iter636 : struct.success) { - oprot.writeString(_iter604); + oprot.writeString(_iter636); } } } @@ -28749,13 +29075,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_all_databases_re BitSet incoming = iprot.readBitSet(2); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list605 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.success = new ArrayList(_list605.size); - String _elem606; - for (int _i607 = 0; _i607 < _list605.size; ++_i607) + org.apache.thrift.protocol.TList _list637 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.success = new ArrayList(_list637.size); + String _elem638; + for (int _i639 = 0; _i639 < _list637.size; ++_i639) { - _elem606 = iprot.readString(); - struct.success.add(_elem606); + _elem638 = iprot.readString(); + struct.success.add(_elem638); } } struct.setSuccessIsSet(true); @@ -33362,16 +33688,16 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_type_all_result case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.MAP) { { - org.apache.thrift.protocol.TMap _map608 = iprot.readMapBegin(); - struct.success = new HashMap(2*_map608.size); - String _key609; - Type _val610; - for (int _i611 = 0; _i611 < _map608.size; ++_i611) + org.apache.thrift.protocol.TMap _map640 = iprot.readMapBegin(); + struct.success = new HashMap(2*_map640.size); + String _key641; + Type _val642; + for (int _i643 = 0; _i643 < _map640.size; ++_i643) { - _key609 = iprot.readString(); - _val610 = new Type(); - _val610.read(iprot); - struct.success.put(_key609, _val610); + _key641 = iprot.readString(); + _val642 = new Type(); + _val642.read(iprot); + struct.success.put(_key641, _val642); } iprot.readMapEnd(); } @@ -33406,10 +33732,10 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_type_all_resul oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeMapBegin(new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRUCT, struct.success.size())); - for (Map.Entry _iter612 : struct.success.entrySet()) + for (Map.Entry _iter644 : struct.success.entrySet()) { - oprot.writeString(_iter612.getKey()); - _iter612.getValue().write(oprot); + oprot.writeString(_iter644.getKey()); + _iter644.getValue().write(oprot); } oprot.writeMapEnd(); } @@ -33448,10 +33774,10 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_type_all_result if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (Map.Entry _iter613 : struct.success.entrySet()) + for (Map.Entry _iter645 : struct.success.entrySet()) { - oprot.writeString(_iter613.getKey()); - _iter613.getValue().write(oprot); + oprot.writeString(_iter645.getKey()); + _iter645.getValue().write(oprot); } } } @@ -33466,16 +33792,16 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_type_all_result BitSet incoming = iprot.readBitSet(2); if (incoming.get(0)) { { - org.apache.thrift.protocol.TMap _map614 = new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.success = new HashMap(2*_map614.size); - String _key615; - Type _val616; - for (int _i617 = 0; _i617 < _map614.size; ++_i617) + org.apache.thrift.protocol.TMap _map646 = new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.success = new HashMap(2*_map646.size); + String _key647; + Type _val648; + for (int _i649 = 0; _i649 < _map646.size; ++_i649) { - _key615 = iprot.readString(); - _val616 = new Type(); - _val616.read(iprot); - struct.success.put(_key615, _val616); + _key647 = iprot.readString(); + _val648 = new Type(); + _val648.read(iprot); + struct.success.put(_key647, _val648); } } struct.setSuccessIsSet(true); @@ -34510,14 +34836,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_fields_result s case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list618 = iprot.readListBegin(); - struct.success = new ArrayList(_list618.size); - FieldSchema _elem619; - for (int _i620 = 0; _i620 < _list618.size; ++_i620) + org.apache.thrift.protocol.TList _list650 = iprot.readListBegin(); + struct.success = new ArrayList(_list650.size); + FieldSchema _elem651; + for (int _i652 = 0; _i652 < _list650.size; ++_i652) { - _elem619 = new FieldSchema(); - _elem619.read(iprot); - struct.success.add(_elem619); + _elem651 = new FieldSchema(); + _elem651.read(iprot); + struct.success.add(_elem651); } iprot.readListEnd(); } @@ -34570,9 +34896,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_fields_result oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.success.size())); - for (FieldSchema _iter621 : struct.success) + for (FieldSchema _iter653 : struct.success) { - _iter621.write(oprot); + _iter653.write(oprot); } oprot.writeListEnd(); } @@ -34627,9 +34953,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_fields_result s if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (FieldSchema _iter622 : struct.success) + for (FieldSchema _iter654 : struct.success) { - _iter622.write(oprot); + _iter654.write(oprot); } } } @@ -34650,14 +34976,14 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_fields_result st BitSet incoming = iprot.readBitSet(4); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list623 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.success = new ArrayList(_list623.size); - FieldSchema _elem624; - for (int _i625 = 0; _i625 < _list623.size; ++_i625) + org.apache.thrift.protocol.TList _list655 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.success = new ArrayList(_list655.size); + FieldSchema _elem656; + for (int _i657 = 0; _i657 < _list655.size; ++_i657) { - _elem624 = new FieldSchema(); - _elem624.read(iprot); - struct.success.add(_elem624); + _elem656 = new FieldSchema(); + _elem656.read(iprot); + struct.success.add(_elem656); } } struct.setSuccessIsSet(true); @@ -35811,14 +36137,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_fields_with_env case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list626 = iprot.readListBegin(); - struct.success = new ArrayList(_list626.size); - FieldSchema _elem627; - for (int _i628 = 0; _i628 < _list626.size; ++_i628) + org.apache.thrift.protocol.TList _list658 = iprot.readListBegin(); + struct.success = new ArrayList(_list658.size); + FieldSchema _elem659; + for (int _i660 = 0; _i660 < _list658.size; ++_i660) { - _elem627 = new FieldSchema(); - _elem627.read(iprot); - struct.success.add(_elem627); + _elem659 = new FieldSchema(); + _elem659.read(iprot); + struct.success.add(_elem659); } iprot.readListEnd(); } @@ -35871,9 +36197,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_fields_with_en oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.success.size())); - for (FieldSchema _iter629 : struct.success) + for (FieldSchema _iter661 : struct.success) { - _iter629.write(oprot); + _iter661.write(oprot); } oprot.writeListEnd(); } @@ -35928,9 +36254,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_fields_with_env if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (FieldSchema _iter630 : struct.success) + for (FieldSchema _iter662 : struct.success) { - _iter630.write(oprot); + _iter662.write(oprot); } } } @@ -35951,14 +36277,14 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_fields_with_envi BitSet incoming = iprot.readBitSet(4); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list631 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.success = new ArrayList(_list631.size); - FieldSchema _elem632; - for (int _i633 = 0; _i633 < _list631.size; ++_i633) + org.apache.thrift.protocol.TList _list663 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.success = new ArrayList(_list663.size); + FieldSchema _elem664; + for (int _i665 = 0; _i665 < _list663.size; ++_i665) { - _elem632 = new FieldSchema(); - _elem632.read(iprot); - struct.success.add(_elem632); + _elem664 = new FieldSchema(); + _elem664.read(iprot); + struct.success.add(_elem664); } } struct.setSuccessIsSet(true); @@ -37003,14 +37329,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_schema_result s case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list634 = iprot.readListBegin(); - struct.success = new ArrayList(_list634.size); - FieldSchema _elem635; - for (int _i636 = 0; _i636 < _list634.size; ++_i636) + org.apache.thrift.protocol.TList _list666 = iprot.readListBegin(); + struct.success = new ArrayList(_list666.size); + FieldSchema _elem667; + for (int _i668 = 0; _i668 < _list666.size; ++_i668) { - _elem635 = new FieldSchema(); - _elem635.read(iprot); - struct.success.add(_elem635); + _elem667 = new FieldSchema(); + _elem667.read(iprot); + struct.success.add(_elem667); } iprot.readListEnd(); } @@ -37063,9 +37389,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_schema_result oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.success.size())); - for (FieldSchema _iter637 : struct.success) + for (FieldSchema _iter669 : struct.success) { - _iter637.write(oprot); + _iter669.write(oprot); } oprot.writeListEnd(); } @@ -37120,9 +37446,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_schema_result s if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (FieldSchema _iter638 : struct.success) + for (FieldSchema _iter670 : struct.success) { - _iter638.write(oprot); + _iter670.write(oprot); } } } @@ -37143,14 +37469,14 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_schema_result st BitSet incoming = iprot.readBitSet(4); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list639 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.success = new ArrayList(_list639.size); - FieldSchema _elem640; - for (int _i641 = 0; _i641 < _list639.size; ++_i641) + org.apache.thrift.protocol.TList _list671 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.success = new ArrayList(_list671.size); + FieldSchema _elem672; + for (int _i673 = 0; _i673 < _list671.size; ++_i673) { - _elem640 = new FieldSchema(); - _elem640.read(iprot); - struct.success.add(_elem640); + _elem672 = new FieldSchema(); + _elem672.read(iprot); + struct.success.add(_elem672); } } struct.setSuccessIsSet(true); @@ -38304,14 +38630,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_schema_with_env case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list642 = iprot.readListBegin(); - struct.success = new ArrayList(_list642.size); - FieldSchema _elem643; - for (int _i644 = 0; _i644 < _list642.size; ++_i644) + org.apache.thrift.protocol.TList _list674 = iprot.readListBegin(); + struct.success = new ArrayList(_list674.size); + FieldSchema _elem675; + for (int _i676 = 0; _i676 < _list674.size; ++_i676) { - _elem643 = new FieldSchema(); - _elem643.read(iprot); - struct.success.add(_elem643); + _elem675 = new FieldSchema(); + _elem675.read(iprot); + struct.success.add(_elem675); } iprot.readListEnd(); } @@ -38364,9 +38690,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_schema_with_en oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.success.size())); - for (FieldSchema _iter645 : struct.success) + for (FieldSchema _iter677 : struct.success) { - _iter645.write(oprot); + _iter677.write(oprot); } oprot.writeListEnd(); } @@ -38421,9 +38747,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_schema_with_env if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (FieldSchema _iter646 : struct.success) + for (FieldSchema _iter678 : struct.success) { - _iter646.write(oprot); + _iter678.write(oprot); } } } @@ -38444,14 +38770,14 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_schema_with_envi BitSet incoming = iprot.readBitSet(4); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list647 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.success = new ArrayList(_list647.size); - FieldSchema _elem648; - for (int _i649 = 0; _i649 < _list647.size; ++_i649) + org.apache.thrift.protocol.TList _list679 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.success = new ArrayList(_list679.size); + FieldSchema _elem680; + for (int _i681 = 0; _i681 < _list679.size; ++_i681) { - _elem648 = new FieldSchema(); - _elem648.read(iprot); - struct.success.add(_elem648); + _elem680 = new FieldSchema(); + _elem680.read(iprot); + struct.success.add(_elem680); } } struct.setSuccessIsSet(true); @@ -43691,13 +44017,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_tables_result s case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list650 = iprot.readListBegin(); - struct.success = new ArrayList(_list650.size); - String _elem651; - for (int _i652 = 0; _i652 < _list650.size; ++_i652) + org.apache.thrift.protocol.TList _list682 = iprot.readListBegin(); + struct.success = new ArrayList(_list682.size); + String _elem683; + for (int _i684 = 0; _i684 < _list682.size; ++_i684) { - _elem651 = iprot.readString(); - struct.success.add(_elem651); + _elem683 = iprot.readString(); + struct.success.add(_elem683); } iprot.readListEnd(); } @@ -43732,9 +44058,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_tables_result oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.success.size())); - for (String _iter653 : struct.success) + for (String _iter685 : struct.success) { - oprot.writeString(_iter653); + oprot.writeString(_iter685); } oprot.writeListEnd(); } @@ -43773,9 +44099,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_tables_result s if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (String _iter654 : struct.success) + for (String _iter686 : struct.success) { - oprot.writeString(_iter654); + oprot.writeString(_iter686); } } } @@ -43790,13 +44116,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_tables_result st BitSet incoming = iprot.readBitSet(2); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list655 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.success = new ArrayList(_list655.size); - String _elem656; - for (int _i657 = 0; _i657 < _list655.size; ++_i657) + org.apache.thrift.protocol.TList _list687 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.success = new ArrayList(_list687.size); + String _elem688; + for (int _i689 = 0; _i689 < _list687.size; ++_i689) { - _elem656 = iprot.readString(); - struct.success.add(_elem656); + _elem688 = iprot.readString(); + struct.success.add(_elem688); } } struct.setSuccessIsSet(true); @@ -44301,13 +44627,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_table_meta_args case 3: // TBL_TYPES if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list658 = iprot.readListBegin(); - struct.tbl_types = new ArrayList(_list658.size); - String _elem659; - for (int _i660 = 0; _i660 < _list658.size; ++_i660) + org.apache.thrift.protocol.TList _list690 = iprot.readListBegin(); + struct.tbl_types = new ArrayList(_list690.size); + String _elem691; + for (int _i692 = 0; _i692 < _list690.size; ++_i692) { - _elem659 = iprot.readString(); - struct.tbl_types.add(_elem659); + _elem691 = iprot.readString(); + struct.tbl_types.add(_elem691); } iprot.readListEnd(); } @@ -44343,9 +44669,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_table_meta_arg oprot.writeFieldBegin(TBL_TYPES_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.tbl_types.size())); - for (String _iter661 : struct.tbl_types) + for (String _iter693 : struct.tbl_types) { - oprot.writeString(_iter661); + oprot.writeString(_iter693); } oprot.writeListEnd(); } @@ -44388,9 +44714,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_table_meta_args if (struct.isSetTbl_types()) { { oprot.writeI32(struct.tbl_types.size()); - for (String _iter662 : struct.tbl_types) + for (String _iter694 : struct.tbl_types) { - oprot.writeString(_iter662); + oprot.writeString(_iter694); } } } @@ -44410,13 +44736,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_table_meta_args } if (incoming.get(2)) { { - org.apache.thrift.protocol.TList _list663 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.tbl_types = new ArrayList(_list663.size); - String _elem664; - for (int _i665 = 0; _i665 < _list663.size; ++_i665) + org.apache.thrift.protocol.TList _list695 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.tbl_types = new ArrayList(_list695.size); + String _elem696; + for (int _i697 = 0; _i697 < _list695.size; ++_i697) { - _elem664 = iprot.readString(); - struct.tbl_types.add(_elem664); + _elem696 = iprot.readString(); + struct.tbl_types.add(_elem696); } } struct.setTbl_typesIsSet(true); @@ -44822,14 +45148,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_table_meta_resu case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list666 = iprot.readListBegin(); - struct.success = new ArrayList(_list666.size); - TableMeta _elem667; - for (int _i668 = 0; _i668 < _list666.size; ++_i668) + org.apache.thrift.protocol.TList _list698 = iprot.readListBegin(); + struct.success = new ArrayList(_list698.size); + TableMeta _elem699; + for (int _i700 = 0; _i700 < _list698.size; ++_i700) { - _elem667 = new TableMeta(); - _elem667.read(iprot); - struct.success.add(_elem667); + _elem699 = new TableMeta(); + _elem699.read(iprot); + struct.success.add(_elem699); } iprot.readListEnd(); } @@ -44864,9 +45190,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_table_meta_res oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.success.size())); - for (TableMeta _iter669 : struct.success) + for (TableMeta _iter701 : struct.success) { - _iter669.write(oprot); + _iter701.write(oprot); } oprot.writeListEnd(); } @@ -44905,9 +45231,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_table_meta_resu if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (TableMeta _iter670 : struct.success) + for (TableMeta _iter702 : struct.success) { - _iter670.write(oprot); + _iter702.write(oprot); } } } @@ -44922,14 +45248,14 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_table_meta_resul BitSet incoming = iprot.readBitSet(2); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list671 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.success = new ArrayList(_list671.size); - TableMeta _elem672; - for (int _i673 = 0; _i673 < _list671.size; ++_i673) + org.apache.thrift.protocol.TList _list703 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.success = new ArrayList(_list703.size); + TableMeta _elem704; + for (int _i705 = 0; _i705 < _list703.size; ++_i705) { - _elem672 = new TableMeta(); - _elem672.read(iprot); - struct.success.add(_elem672); + _elem704 = new TableMeta(); + _elem704.read(iprot); + struct.success.add(_elem704); } } struct.setSuccessIsSet(true); @@ -45695,13 +46021,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_all_tables_resu case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list674 = iprot.readListBegin(); - struct.success = new ArrayList(_list674.size); - String _elem675; - for (int _i676 = 0; _i676 < _list674.size; ++_i676) + org.apache.thrift.protocol.TList _list706 = iprot.readListBegin(); + struct.success = new ArrayList(_list706.size); + String _elem707; + for (int _i708 = 0; _i708 < _list706.size; ++_i708) { - _elem675 = iprot.readString(); - struct.success.add(_elem675); + _elem707 = iprot.readString(); + struct.success.add(_elem707); } iprot.readListEnd(); } @@ -45736,9 +46062,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_all_tables_res oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.success.size())); - for (String _iter677 : struct.success) + for (String _iter709 : struct.success) { - oprot.writeString(_iter677); + oprot.writeString(_iter709); } oprot.writeListEnd(); } @@ -45777,9 +46103,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_all_tables_resu if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (String _iter678 : struct.success) + for (String _iter710 : struct.success) { - oprot.writeString(_iter678); + oprot.writeString(_iter710); } } } @@ -45794,13 +46120,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_all_tables_resul BitSet incoming = iprot.readBitSet(2); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list679 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.success = new ArrayList(_list679.size); - String _elem680; - for (int _i681 = 0; _i681 < _list679.size; ++_i681) + org.apache.thrift.protocol.TList _list711 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.success = new ArrayList(_list711.size); + String _elem712; + for (int _i713 = 0; _i713 < _list711.size; ++_i713) { - _elem680 = iprot.readString(); - struct.success.add(_elem680); + _elem712 = iprot.readString(); + struct.success.add(_elem712); } } struct.setSuccessIsSet(true); @@ -47253,13 +47579,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_table_objects_b case 2: // TBL_NAMES if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list682 = iprot.readListBegin(); - struct.tbl_names = new ArrayList(_list682.size); - String _elem683; - for (int _i684 = 0; _i684 < _list682.size; ++_i684) + org.apache.thrift.protocol.TList _list714 = iprot.readListBegin(); + struct.tbl_names = new ArrayList(_list714.size); + String _elem715; + for (int _i716 = 0; _i716 < _list714.size; ++_i716) { - _elem683 = iprot.readString(); - struct.tbl_names.add(_elem683); + _elem715 = iprot.readString(); + struct.tbl_names.add(_elem715); } iprot.readListEnd(); } @@ -47290,9 +47616,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_table_objects_ oprot.writeFieldBegin(TBL_NAMES_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.tbl_names.size())); - for (String _iter685 : struct.tbl_names) + for (String _iter717 : struct.tbl_names) { - oprot.writeString(_iter685); + oprot.writeString(_iter717); } oprot.writeListEnd(); } @@ -47329,9 +47655,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_table_objects_b if (struct.isSetTbl_names()) { { oprot.writeI32(struct.tbl_names.size()); - for (String _iter686 : struct.tbl_names) + for (String _iter718 : struct.tbl_names) { - oprot.writeString(_iter686); + oprot.writeString(_iter718); } } } @@ -47347,13 +47673,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_table_objects_by } if (incoming.get(1)) { { - org.apache.thrift.protocol.TList _list687 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.tbl_names = new ArrayList(_list687.size); - String _elem688; - for (int _i689 = 0; _i689 < _list687.size; ++_i689) + org.apache.thrift.protocol.TList _list719 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.tbl_names = new ArrayList(_list719.size); + String _elem720; + for (int _i721 = 0; _i721 < _list719.size; ++_i721) { - _elem688 = iprot.readString(); - struct.tbl_names.add(_elem688); + _elem720 = iprot.readString(); + struct.tbl_names.add(_elem720); } } struct.setTbl_namesIsSet(true); @@ -47921,14 +48247,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_table_objects_b case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list690 = iprot.readListBegin(); - struct.success = new ArrayList
(_list690.size); - Table _elem691; - for (int _i692 = 0; _i692 < _list690.size; ++_i692) + org.apache.thrift.protocol.TList _list722 = iprot.readListBegin(); + struct.success = new ArrayList
(_list722.size); + Table _elem723; + for (int _i724 = 0; _i724 < _list722.size; ++_i724) { - _elem691 = new Table(); - _elem691.read(iprot); - struct.success.add(_elem691); + _elem723 = new Table(); + _elem723.read(iprot); + struct.success.add(_elem723); } iprot.readListEnd(); } @@ -47981,9 +48307,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_table_objects_ oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.success.size())); - for (Table _iter693 : struct.success) + for (Table _iter725 : struct.success) { - _iter693.write(oprot); + _iter725.write(oprot); } oprot.writeListEnd(); } @@ -48038,9 +48364,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_table_objects_b if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (Table _iter694 : struct.success) + for (Table _iter726 : struct.success) { - _iter694.write(oprot); + _iter726.write(oprot); } } } @@ -48061,14 +48387,14 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_table_objects_by BitSet incoming = iprot.readBitSet(4); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list695 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.success = new ArrayList
(_list695.size); - Table _elem696; - for (int _i697 = 0; _i697 < _list695.size; ++_i697) + org.apache.thrift.protocol.TList _list727 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.success = new ArrayList
(_list727.size); + Table _elem728; + for (int _i729 = 0; _i729 < _list727.size; ++_i729) { - _elem696 = new Table(); - _elem696.read(iprot); - struct.success.add(_elem696); + _elem728 = new Table(); + _elem728.read(iprot); + struct.success.add(_elem728); } } struct.setSuccessIsSet(true); @@ -49214,13 +49540,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_table_names_by_ case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list698 = iprot.readListBegin(); - struct.success = new ArrayList(_list698.size); - String _elem699; - for (int _i700 = 0; _i700 < _list698.size; ++_i700) + org.apache.thrift.protocol.TList _list730 = iprot.readListBegin(); + struct.success = new ArrayList(_list730.size); + String _elem731; + for (int _i732 = 0; _i732 < _list730.size; ++_i732) { - _elem699 = iprot.readString(); - struct.success.add(_elem699); + _elem731 = iprot.readString(); + struct.success.add(_elem731); } iprot.readListEnd(); } @@ -49273,9 +49599,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_table_names_by oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.success.size())); - for (String _iter701 : struct.success) + for (String _iter733 : struct.success) { - oprot.writeString(_iter701); + oprot.writeString(_iter733); } oprot.writeListEnd(); } @@ -49330,9 +49656,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_table_names_by_ if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (String _iter702 : struct.success) + for (String _iter734 : struct.success) { - oprot.writeString(_iter702); + oprot.writeString(_iter734); } } } @@ -49353,13 +49679,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_table_names_by_f BitSet incoming = iprot.readBitSet(4); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list703 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.success = new ArrayList(_list703.size); - String _elem704; - for (int _i705 = 0; _i705 < _list703.size; ++_i705) + org.apache.thrift.protocol.TList _list735 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.success = new ArrayList(_list735.size); + String _elem736; + for (int _i737 = 0; _i737 < _list735.size; ++_i737) { - _elem704 = iprot.readString(); - struct.success.add(_elem704); + _elem736 = iprot.readString(); + struct.success.add(_elem736); } } struct.setSuccessIsSet(true); @@ -55218,14 +55544,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, add_partitions_args case 1: // NEW_PARTS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list706 = iprot.readListBegin(); - struct.new_parts = new ArrayList(_list706.size); - Partition _elem707; - for (int _i708 = 0; _i708 < _list706.size; ++_i708) + org.apache.thrift.protocol.TList _list738 = iprot.readListBegin(); + struct.new_parts = new ArrayList(_list738.size); + Partition _elem739; + for (int _i740 = 0; _i740 < _list738.size; ++_i740) { - _elem707 = new Partition(); - _elem707.read(iprot); - struct.new_parts.add(_elem707); + _elem739 = new Partition(); + _elem739.read(iprot); + struct.new_parts.add(_elem739); } iprot.readListEnd(); } @@ -55251,9 +55577,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, add_partitions_arg oprot.writeFieldBegin(NEW_PARTS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.new_parts.size())); - for (Partition _iter709 : struct.new_parts) + for (Partition _iter741 : struct.new_parts) { - _iter709.write(oprot); + _iter741.write(oprot); } oprot.writeListEnd(); } @@ -55284,9 +55610,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, add_partitions_args if (struct.isSetNew_parts()) { { oprot.writeI32(struct.new_parts.size()); - for (Partition _iter710 : struct.new_parts) + for (Partition _iter742 : struct.new_parts) { - _iter710.write(oprot); + _iter742.write(oprot); } } } @@ -55298,14 +55624,14 @@ public void read(org.apache.thrift.protocol.TProtocol prot, add_partitions_args BitSet incoming = iprot.readBitSet(1); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list711 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.new_parts = new ArrayList(_list711.size); - Partition _elem712; - for (int _i713 = 0; _i713 < _list711.size; ++_i713) + org.apache.thrift.protocol.TList _list743 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.new_parts = new ArrayList(_list743.size); + Partition _elem744; + for (int _i745 = 0; _i745 < _list743.size; ++_i745) { - _elem712 = new Partition(); - _elem712.read(iprot); - struct.new_parts.add(_elem712); + _elem744 = new Partition(); + _elem744.read(iprot); + struct.new_parts.add(_elem744); } } struct.setNew_partsIsSet(true); @@ -56306,14 +56632,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, add_partitions_pspe case 1: // NEW_PARTS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list714 = iprot.readListBegin(); - struct.new_parts = new ArrayList(_list714.size); - PartitionSpec _elem715; - for (int _i716 = 0; _i716 < _list714.size; ++_i716) + org.apache.thrift.protocol.TList _list746 = iprot.readListBegin(); + struct.new_parts = new ArrayList(_list746.size); + PartitionSpec _elem747; + for (int _i748 = 0; _i748 < _list746.size; ++_i748) { - _elem715 = new PartitionSpec(); - _elem715.read(iprot); - struct.new_parts.add(_elem715); + _elem747 = new PartitionSpec(); + _elem747.read(iprot); + struct.new_parts.add(_elem747); } iprot.readListEnd(); } @@ -56339,9 +56665,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, add_partitions_psp oprot.writeFieldBegin(NEW_PARTS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.new_parts.size())); - for (PartitionSpec _iter717 : struct.new_parts) + for (PartitionSpec _iter749 : struct.new_parts) { - _iter717.write(oprot); + _iter749.write(oprot); } oprot.writeListEnd(); } @@ -56372,9 +56698,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, add_partitions_pspe if (struct.isSetNew_parts()) { { oprot.writeI32(struct.new_parts.size()); - for (PartitionSpec _iter718 : struct.new_parts) + for (PartitionSpec _iter750 : struct.new_parts) { - _iter718.write(oprot); + _iter750.write(oprot); } } } @@ -56386,14 +56712,14 @@ public void read(org.apache.thrift.protocol.TProtocol prot, add_partitions_pspec BitSet incoming = iprot.readBitSet(1); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list719 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.new_parts = new ArrayList(_list719.size); - PartitionSpec _elem720; - for (int _i721 = 0; _i721 < _list719.size; ++_i721) + org.apache.thrift.protocol.TList _list751 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.new_parts = new ArrayList(_list751.size); + PartitionSpec _elem752; + for (int _i753 = 0; _i753 < _list751.size; ++_i753) { - _elem720 = new PartitionSpec(); - _elem720.read(iprot); - struct.new_parts.add(_elem720); + _elem752 = new PartitionSpec(); + _elem752.read(iprot); + struct.new_parts.add(_elem752); } } struct.setNew_partsIsSet(true); @@ -57569,13 +57895,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, append_partition_ar case 3: // PART_VALS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list722 = iprot.readListBegin(); - struct.part_vals = new ArrayList(_list722.size); - String _elem723; - for (int _i724 = 0; _i724 < _list722.size; ++_i724) + org.apache.thrift.protocol.TList _list754 = iprot.readListBegin(); + struct.part_vals = new ArrayList(_list754.size); + String _elem755; + for (int _i756 = 0; _i756 < _list754.size; ++_i756) { - _elem723 = iprot.readString(); - struct.part_vals.add(_elem723); + _elem755 = iprot.readString(); + struct.part_vals.add(_elem755); } iprot.readListEnd(); } @@ -57611,9 +57937,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, append_partition_a oprot.writeFieldBegin(PART_VALS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.part_vals.size())); - for (String _iter725 : struct.part_vals) + for (String _iter757 : struct.part_vals) { - oprot.writeString(_iter725); + oprot.writeString(_iter757); } oprot.writeListEnd(); } @@ -57656,9 +57982,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, append_partition_ar if (struct.isSetPart_vals()) { { oprot.writeI32(struct.part_vals.size()); - for (String _iter726 : struct.part_vals) + for (String _iter758 : struct.part_vals) { - oprot.writeString(_iter726); + oprot.writeString(_iter758); } } } @@ -57678,13 +58004,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, append_partition_arg } if (incoming.get(2)) { { - org.apache.thrift.protocol.TList _list727 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.part_vals = new ArrayList(_list727.size); - String _elem728; - for (int _i729 = 0; _i729 < _list727.size; ++_i729) + org.apache.thrift.protocol.TList _list759 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.part_vals = new ArrayList(_list759.size); + String _elem760; + for (int _i761 = 0; _i761 < _list759.size; ++_i761) { - _elem728 = iprot.readString(); - struct.part_vals.add(_elem728); + _elem760 = iprot.readString(); + struct.part_vals.add(_elem760); } } struct.setPart_valsIsSet(true); @@ -59993,13 +60319,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, append_partition_wi case 3: // PART_VALS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list730 = iprot.readListBegin(); - struct.part_vals = new ArrayList(_list730.size); - String _elem731; - for (int _i732 = 0; _i732 < _list730.size; ++_i732) + org.apache.thrift.protocol.TList _list762 = iprot.readListBegin(); + struct.part_vals = new ArrayList(_list762.size); + String _elem763; + for (int _i764 = 0; _i764 < _list762.size; ++_i764) { - _elem731 = iprot.readString(); - struct.part_vals.add(_elem731); + _elem763 = iprot.readString(); + struct.part_vals.add(_elem763); } iprot.readListEnd(); } @@ -60044,9 +60370,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, append_partition_w oprot.writeFieldBegin(PART_VALS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.part_vals.size())); - for (String _iter733 : struct.part_vals) + for (String _iter765 : struct.part_vals) { - oprot.writeString(_iter733); + oprot.writeString(_iter765); } oprot.writeListEnd(); } @@ -60097,9 +60423,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, append_partition_wi if (struct.isSetPart_vals()) { { oprot.writeI32(struct.part_vals.size()); - for (String _iter734 : struct.part_vals) + for (String _iter766 : struct.part_vals) { - oprot.writeString(_iter734); + oprot.writeString(_iter766); } } } @@ -60122,13 +60448,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, append_partition_wit } if (incoming.get(2)) { { - org.apache.thrift.protocol.TList _list735 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.part_vals = new ArrayList(_list735.size); - String _elem736; - for (int _i737 = 0; _i737 < _list735.size; ++_i737) + org.apache.thrift.protocol.TList _list767 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.part_vals = new ArrayList(_list767.size); + String _elem768; + for (int _i769 = 0; _i769 < _list767.size; ++_i769) { - _elem736 = iprot.readString(); - struct.part_vals.add(_elem736); + _elem768 = iprot.readString(); + struct.part_vals.add(_elem768); } } struct.setPart_valsIsSet(true); @@ -63998,13 +64324,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, drop_partition_args case 3: // PART_VALS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list738 = iprot.readListBegin(); - struct.part_vals = new ArrayList(_list738.size); - String _elem739; - for (int _i740 = 0; _i740 < _list738.size; ++_i740) + org.apache.thrift.protocol.TList _list770 = iprot.readListBegin(); + struct.part_vals = new ArrayList(_list770.size); + String _elem771; + for (int _i772 = 0; _i772 < _list770.size; ++_i772) { - _elem739 = iprot.readString(); - struct.part_vals.add(_elem739); + _elem771 = iprot.readString(); + struct.part_vals.add(_elem771); } iprot.readListEnd(); } @@ -64048,9 +64374,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, drop_partition_arg oprot.writeFieldBegin(PART_VALS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.part_vals.size())); - for (String _iter741 : struct.part_vals) + for (String _iter773 : struct.part_vals) { - oprot.writeString(_iter741); + oprot.writeString(_iter773); } oprot.writeListEnd(); } @@ -64099,9 +64425,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, drop_partition_args if (struct.isSetPart_vals()) { { oprot.writeI32(struct.part_vals.size()); - for (String _iter742 : struct.part_vals) + for (String _iter774 : struct.part_vals) { - oprot.writeString(_iter742); + oprot.writeString(_iter774); } } } @@ -64124,13 +64450,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, drop_partition_args } if (incoming.get(2)) { { - org.apache.thrift.protocol.TList _list743 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.part_vals = new ArrayList(_list743.size); - String _elem744; - for (int _i745 = 0; _i745 < _list743.size; ++_i745) + org.apache.thrift.protocol.TList _list775 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.part_vals = new ArrayList(_list775.size); + String _elem776; + for (int _i777 = 0; _i777 < _list775.size; ++_i777) { - _elem744 = iprot.readString(); - struct.part_vals.add(_elem744); + _elem776 = iprot.readString(); + struct.part_vals.add(_elem776); } } struct.setPart_valsIsSet(true); @@ -65369,13 +65695,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, drop_partition_with case 3: // PART_VALS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list746 = iprot.readListBegin(); - struct.part_vals = new ArrayList(_list746.size); - String _elem747; - for (int _i748 = 0; _i748 < _list746.size; ++_i748) + org.apache.thrift.protocol.TList _list778 = iprot.readListBegin(); + struct.part_vals = new ArrayList(_list778.size); + String _elem779; + for (int _i780 = 0; _i780 < _list778.size; ++_i780) { - _elem747 = iprot.readString(); - struct.part_vals.add(_elem747); + _elem779 = iprot.readString(); + struct.part_vals.add(_elem779); } iprot.readListEnd(); } @@ -65428,9 +65754,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, drop_partition_wit oprot.writeFieldBegin(PART_VALS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.part_vals.size())); - for (String _iter749 : struct.part_vals) + for (String _iter781 : struct.part_vals) { - oprot.writeString(_iter749); + oprot.writeString(_iter781); } oprot.writeListEnd(); } @@ -65487,9 +65813,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, drop_partition_with if (struct.isSetPart_vals()) { { oprot.writeI32(struct.part_vals.size()); - for (String _iter750 : struct.part_vals) + for (String _iter782 : struct.part_vals) { - oprot.writeString(_iter750); + oprot.writeString(_iter782); } } } @@ -65515,13 +65841,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, drop_partition_with_ } if (incoming.get(2)) { { - org.apache.thrift.protocol.TList _list751 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.part_vals = new ArrayList(_list751.size); - String _elem752; - for (int _i753 = 0; _i753 < _list751.size; ++_i753) + org.apache.thrift.protocol.TList _list783 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.part_vals = new ArrayList(_list783.size); + String _elem784; + for (int _i785 = 0; _i785 < _list783.size; ++_i785) { - _elem752 = iprot.readString(); - struct.part_vals.add(_elem752); + _elem784 = iprot.readString(); + struct.part_vals.add(_elem784); } } struct.setPart_valsIsSet(true); @@ -70123,13 +70449,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_partition_args case 3: // PART_VALS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list754 = iprot.readListBegin(); - struct.part_vals = new ArrayList(_list754.size); - String _elem755; - for (int _i756 = 0; _i756 < _list754.size; ++_i756) + org.apache.thrift.protocol.TList _list786 = iprot.readListBegin(); + struct.part_vals = new ArrayList(_list786.size); + String _elem787; + for (int _i788 = 0; _i788 < _list786.size; ++_i788) { - _elem755 = iprot.readString(); - struct.part_vals.add(_elem755); + _elem787 = iprot.readString(); + struct.part_vals.add(_elem787); } iprot.readListEnd(); } @@ -70165,9 +70491,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_partition_args oprot.writeFieldBegin(PART_VALS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.part_vals.size())); - for (String _iter757 : struct.part_vals) + for (String _iter789 : struct.part_vals) { - oprot.writeString(_iter757); + oprot.writeString(_iter789); } oprot.writeListEnd(); } @@ -70210,9 +70536,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_partition_args if (struct.isSetPart_vals()) { { oprot.writeI32(struct.part_vals.size()); - for (String _iter758 : struct.part_vals) + for (String _iter790 : struct.part_vals) { - oprot.writeString(_iter758); + oprot.writeString(_iter790); } } } @@ -70232,13 +70558,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_partition_args s } if (incoming.get(2)) { { - org.apache.thrift.protocol.TList _list759 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.part_vals = new ArrayList(_list759.size); - String _elem760; - for (int _i761 = 0; _i761 < _list759.size; ++_i761) + org.apache.thrift.protocol.TList _list791 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.part_vals = new ArrayList(_list791.size); + String _elem792; + for (int _i793 = 0; _i793 < _list791.size; ++_i793) { - _elem760 = iprot.readString(); - struct.part_vals.add(_elem760); + _elem792 = iprot.readString(); + struct.part_vals.add(_elem792); } } struct.setPart_valsIsSet(true); @@ -71456,15 +71782,15 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, exchange_partition_ case 1: // PARTITION_SPECS if (schemeField.type == org.apache.thrift.protocol.TType.MAP) { { - org.apache.thrift.protocol.TMap _map762 = iprot.readMapBegin(); - struct.partitionSpecs = new HashMap(2*_map762.size); - String _key763; - String _val764; - for (int _i765 = 0; _i765 < _map762.size; ++_i765) + org.apache.thrift.protocol.TMap _map794 = iprot.readMapBegin(); + struct.partitionSpecs = new HashMap(2*_map794.size); + String _key795; + String _val796; + for (int _i797 = 0; _i797 < _map794.size; ++_i797) { - _key763 = iprot.readString(); - _val764 = iprot.readString(); - struct.partitionSpecs.put(_key763, _val764); + _key795 = iprot.readString(); + _val796 = iprot.readString(); + struct.partitionSpecs.put(_key795, _val796); } iprot.readMapEnd(); } @@ -71522,10 +71848,10 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, exchange_partition oprot.writeFieldBegin(PARTITION_SPECS_FIELD_DESC); { oprot.writeMapBegin(new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING, struct.partitionSpecs.size())); - for (Map.Entry _iter766 : struct.partitionSpecs.entrySet()) + for (Map.Entry _iter798 : struct.partitionSpecs.entrySet()) { - oprot.writeString(_iter766.getKey()); - oprot.writeString(_iter766.getValue()); + oprot.writeString(_iter798.getKey()); + oprot.writeString(_iter798.getValue()); } oprot.writeMapEnd(); } @@ -71588,10 +71914,10 @@ public void write(org.apache.thrift.protocol.TProtocol prot, exchange_partition_ if (struct.isSetPartitionSpecs()) { { oprot.writeI32(struct.partitionSpecs.size()); - for (Map.Entry _iter767 : struct.partitionSpecs.entrySet()) + for (Map.Entry _iter799 : struct.partitionSpecs.entrySet()) { - oprot.writeString(_iter767.getKey()); - oprot.writeString(_iter767.getValue()); + oprot.writeString(_iter799.getKey()); + oprot.writeString(_iter799.getValue()); } } } @@ -71615,15 +71941,15 @@ public void read(org.apache.thrift.protocol.TProtocol prot, exchange_partition_a BitSet incoming = iprot.readBitSet(5); if (incoming.get(0)) { { - org.apache.thrift.protocol.TMap _map768 = new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.partitionSpecs = new HashMap(2*_map768.size); - String _key769; - String _val770; - for (int _i771 = 0; _i771 < _map768.size; ++_i771) + org.apache.thrift.protocol.TMap _map800 = new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.partitionSpecs = new HashMap(2*_map800.size); + String _key801; + String _val802; + for (int _i803 = 0; _i803 < _map800.size; ++_i803) { - _key769 = iprot.readString(); - _val770 = iprot.readString(); - struct.partitionSpecs.put(_key769, _val770); + _key801 = iprot.readString(); + _val802 = iprot.readString(); + struct.partitionSpecs.put(_key801, _val802); } } struct.setPartitionSpecsIsSet(true); @@ -73069,15 +73395,15 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, exchange_partitions case 1: // PARTITION_SPECS if (schemeField.type == org.apache.thrift.protocol.TType.MAP) { { - org.apache.thrift.protocol.TMap _map772 = iprot.readMapBegin(); - struct.partitionSpecs = new HashMap(2*_map772.size); - String _key773; - String _val774; - for (int _i775 = 0; _i775 < _map772.size; ++_i775) + org.apache.thrift.protocol.TMap _map804 = iprot.readMapBegin(); + struct.partitionSpecs = new HashMap(2*_map804.size); + String _key805; + String _val806; + for (int _i807 = 0; _i807 < _map804.size; ++_i807) { - _key773 = iprot.readString(); - _val774 = iprot.readString(); - struct.partitionSpecs.put(_key773, _val774); + _key805 = iprot.readString(); + _val806 = iprot.readString(); + struct.partitionSpecs.put(_key805, _val806); } iprot.readMapEnd(); } @@ -73135,10 +73461,10 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, exchange_partition oprot.writeFieldBegin(PARTITION_SPECS_FIELD_DESC); { oprot.writeMapBegin(new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING, struct.partitionSpecs.size())); - for (Map.Entry _iter776 : struct.partitionSpecs.entrySet()) + for (Map.Entry _iter808 : struct.partitionSpecs.entrySet()) { - oprot.writeString(_iter776.getKey()); - oprot.writeString(_iter776.getValue()); + oprot.writeString(_iter808.getKey()); + oprot.writeString(_iter808.getValue()); } oprot.writeMapEnd(); } @@ -73201,10 +73527,10 @@ public void write(org.apache.thrift.protocol.TProtocol prot, exchange_partitions if (struct.isSetPartitionSpecs()) { { oprot.writeI32(struct.partitionSpecs.size()); - for (Map.Entry _iter777 : struct.partitionSpecs.entrySet()) + for (Map.Entry _iter809 : struct.partitionSpecs.entrySet()) { - oprot.writeString(_iter777.getKey()); - oprot.writeString(_iter777.getValue()); + oprot.writeString(_iter809.getKey()); + oprot.writeString(_iter809.getValue()); } } } @@ -73228,15 +73554,15 @@ public void read(org.apache.thrift.protocol.TProtocol prot, exchange_partitions_ BitSet incoming = iprot.readBitSet(5); if (incoming.get(0)) { { - org.apache.thrift.protocol.TMap _map778 = new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.partitionSpecs = new HashMap(2*_map778.size); - String _key779; - String _val780; - for (int _i781 = 0; _i781 < _map778.size; ++_i781) + org.apache.thrift.protocol.TMap _map810 = new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.partitionSpecs = new HashMap(2*_map810.size); + String _key811; + String _val812; + for (int _i813 = 0; _i813 < _map810.size; ++_i813) { - _key779 = iprot.readString(); - _val780 = iprot.readString(); - struct.partitionSpecs.put(_key779, _val780); + _key811 = iprot.readString(); + _val812 = iprot.readString(); + struct.partitionSpecs.put(_key811, _val812); } } struct.setPartitionSpecsIsSet(true); @@ -73901,14 +74227,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, exchange_partitions case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list782 = iprot.readListBegin(); - struct.success = new ArrayList(_list782.size); - Partition _elem783; - for (int _i784 = 0; _i784 < _list782.size; ++_i784) + org.apache.thrift.protocol.TList _list814 = iprot.readListBegin(); + struct.success = new ArrayList(_list814.size); + Partition _elem815; + for (int _i816 = 0; _i816 < _list814.size; ++_i816) { - _elem783 = new Partition(); - _elem783.read(iprot); - struct.success.add(_elem783); + _elem815 = new Partition(); + _elem815.read(iprot); + struct.success.add(_elem815); } iprot.readListEnd(); } @@ -73970,9 +74296,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, exchange_partition oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.success.size())); - for (Partition _iter785 : struct.success) + for (Partition _iter817 : struct.success) { - _iter785.write(oprot); + _iter817.write(oprot); } oprot.writeListEnd(); } @@ -74035,9 +74361,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, exchange_partitions if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (Partition _iter786 : struct.success) + for (Partition _iter818 : struct.success) { - _iter786.write(oprot); + _iter818.write(oprot); } } } @@ -74061,14 +74387,14 @@ public void read(org.apache.thrift.protocol.TProtocol prot, exchange_partitions_ BitSet incoming = iprot.readBitSet(5); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list787 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.success = new ArrayList(_list787.size); - Partition _elem788; - for (int _i789 = 0; _i789 < _list787.size; ++_i789) + org.apache.thrift.protocol.TList _list819 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.success = new ArrayList(_list819.size); + Partition _elem820; + for (int _i821 = 0; _i821 < _list819.size; ++_i821) { - _elem788 = new Partition(); - _elem788.read(iprot); - struct.success.add(_elem788); + _elem820 = new Partition(); + _elem820.read(iprot); + struct.success.add(_elem820); } } struct.setSuccessIsSet(true); @@ -74767,13 +75093,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_partition_with_ case 3: // PART_VALS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list790 = iprot.readListBegin(); - struct.part_vals = new ArrayList(_list790.size); - String _elem791; - for (int _i792 = 0; _i792 < _list790.size; ++_i792) + org.apache.thrift.protocol.TList _list822 = iprot.readListBegin(); + struct.part_vals = new ArrayList(_list822.size); + String _elem823; + for (int _i824 = 0; _i824 < _list822.size; ++_i824) { - _elem791 = iprot.readString(); - struct.part_vals.add(_elem791); + _elem823 = iprot.readString(); + struct.part_vals.add(_elem823); } iprot.readListEnd(); } @@ -74793,13 +75119,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_partition_with_ case 5: // GROUP_NAMES if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list793 = iprot.readListBegin(); - struct.group_names = new ArrayList(_list793.size); - String _elem794; - for (int _i795 = 0; _i795 < _list793.size; ++_i795) + org.apache.thrift.protocol.TList _list825 = iprot.readListBegin(); + struct.group_names = new ArrayList(_list825.size); + String _elem826; + for (int _i827 = 0; _i827 < _list825.size; ++_i827) { - _elem794 = iprot.readString(); - struct.group_names.add(_elem794); + _elem826 = iprot.readString(); + struct.group_names.add(_elem826); } iprot.readListEnd(); } @@ -74835,9 +75161,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_partition_with oprot.writeFieldBegin(PART_VALS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.part_vals.size())); - for (String _iter796 : struct.part_vals) + for (String _iter828 : struct.part_vals) { - oprot.writeString(_iter796); + oprot.writeString(_iter828); } oprot.writeListEnd(); } @@ -74852,9 +75178,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_partition_with oprot.writeFieldBegin(GROUP_NAMES_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.group_names.size())); - for (String _iter797 : struct.group_names) + for (String _iter829 : struct.group_names) { - oprot.writeString(_iter797); + oprot.writeString(_iter829); } oprot.writeListEnd(); } @@ -74903,9 +75229,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_partition_with_ if (struct.isSetPart_vals()) { { oprot.writeI32(struct.part_vals.size()); - for (String _iter798 : struct.part_vals) + for (String _iter830 : struct.part_vals) { - oprot.writeString(_iter798); + oprot.writeString(_iter830); } } } @@ -74915,9 +75241,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_partition_with_ if (struct.isSetGroup_names()) { { oprot.writeI32(struct.group_names.size()); - for (String _iter799 : struct.group_names) + for (String _iter831 : struct.group_names) { - oprot.writeString(_iter799); + oprot.writeString(_iter831); } } } @@ -74937,13 +75263,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_partition_with_a } if (incoming.get(2)) { { - org.apache.thrift.protocol.TList _list800 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.part_vals = new ArrayList(_list800.size); - String _elem801; - for (int _i802 = 0; _i802 < _list800.size; ++_i802) + org.apache.thrift.protocol.TList _list832 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.part_vals = new ArrayList(_list832.size); + String _elem833; + for (int _i834 = 0; _i834 < _list832.size; ++_i834) { - _elem801 = iprot.readString(); - struct.part_vals.add(_elem801); + _elem833 = iprot.readString(); + struct.part_vals.add(_elem833); } } struct.setPart_valsIsSet(true); @@ -74954,13 +75280,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_partition_with_a } if (incoming.get(4)) { { - org.apache.thrift.protocol.TList _list803 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.group_names = new ArrayList(_list803.size); - String _elem804; - for (int _i805 = 0; _i805 < _list803.size; ++_i805) + org.apache.thrift.protocol.TList _list835 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.group_names = new ArrayList(_list835.size); + String _elem836; + for (int _i837 = 0; _i837 < _list835.size; ++_i837) { - _elem804 = iprot.readString(); - struct.group_names.add(_elem804); + _elem836 = iprot.readString(); + struct.group_names.add(_elem836); } } struct.setGroup_namesIsSet(true); @@ -77729,14 +78055,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_partitions_resu case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list806 = iprot.readListBegin(); - struct.success = new ArrayList(_list806.size); - Partition _elem807; - for (int _i808 = 0; _i808 < _list806.size; ++_i808) + org.apache.thrift.protocol.TList _list838 = iprot.readListBegin(); + struct.success = new ArrayList(_list838.size); + Partition _elem839; + for (int _i840 = 0; _i840 < _list838.size; ++_i840) { - _elem807 = new Partition(); - _elem807.read(iprot); - struct.success.add(_elem807); + _elem839 = new Partition(); + _elem839.read(iprot); + struct.success.add(_elem839); } iprot.readListEnd(); } @@ -77780,9 +78106,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_partitions_res oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.success.size())); - for (Partition _iter809 : struct.success) + for (Partition _iter841 : struct.success) { - _iter809.write(oprot); + _iter841.write(oprot); } oprot.writeListEnd(); } @@ -77829,9 +78155,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_partitions_resu if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (Partition _iter810 : struct.success) + for (Partition _iter842 : struct.success) { - _iter810.write(oprot); + _iter842.write(oprot); } } } @@ -77849,14 +78175,14 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_partitions_resul BitSet incoming = iprot.readBitSet(3); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list811 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.success = new ArrayList(_list811.size); - Partition _elem812; - for (int _i813 = 0; _i813 < _list811.size; ++_i813) + org.apache.thrift.protocol.TList _list843 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.success = new ArrayList(_list843.size); + Partition _elem844; + for (int _i845 = 0; _i845 < _list843.size; ++_i845) { - _elem812 = new Partition(); - _elem812.read(iprot); - struct.success.add(_elem812); + _elem844 = new Partition(); + _elem844.read(iprot); + struct.success.add(_elem844); } } struct.setSuccessIsSet(true); @@ -78546,13 +78872,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_partitions_with case 5: // GROUP_NAMES if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list814 = iprot.readListBegin(); - struct.group_names = new ArrayList(_list814.size); - String _elem815; - for (int _i816 = 0; _i816 < _list814.size; ++_i816) + org.apache.thrift.protocol.TList _list846 = iprot.readListBegin(); + struct.group_names = new ArrayList(_list846.size); + String _elem847; + for (int _i848 = 0; _i848 < _list846.size; ++_i848) { - _elem815 = iprot.readString(); - struct.group_names.add(_elem815); + _elem847 = iprot.readString(); + struct.group_names.add(_elem847); } iprot.readListEnd(); } @@ -78596,9 +78922,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_partitions_wit oprot.writeFieldBegin(GROUP_NAMES_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.group_names.size())); - for (String _iter817 : struct.group_names) + for (String _iter849 : struct.group_names) { - oprot.writeString(_iter817); + oprot.writeString(_iter849); } oprot.writeListEnd(); } @@ -78653,9 +78979,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_partitions_with if (struct.isSetGroup_names()) { { oprot.writeI32(struct.group_names.size()); - for (String _iter818 : struct.group_names) + for (String _iter850 : struct.group_names) { - oprot.writeString(_iter818); + oprot.writeString(_iter850); } } } @@ -78683,13 +79009,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_partitions_with_ } if (incoming.get(4)) { { - org.apache.thrift.protocol.TList _list819 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.group_names = new ArrayList(_list819.size); - String _elem820; - for (int _i821 = 0; _i821 < _list819.size; ++_i821) + org.apache.thrift.protocol.TList _list851 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.group_names = new ArrayList(_list851.size); + String _elem852; + for (int _i853 = 0; _i853 < _list851.size; ++_i853) { - _elem820 = iprot.readString(); - struct.group_names.add(_elem820); + _elem852 = iprot.readString(); + struct.group_names.add(_elem852); } } struct.setGroup_namesIsSet(true); @@ -79176,14 +79502,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_partitions_with case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list822 = iprot.readListBegin(); - struct.success = new ArrayList(_list822.size); - Partition _elem823; - for (int _i824 = 0; _i824 < _list822.size; ++_i824) + org.apache.thrift.protocol.TList _list854 = iprot.readListBegin(); + struct.success = new ArrayList(_list854.size); + Partition _elem855; + for (int _i856 = 0; _i856 < _list854.size; ++_i856) { - _elem823 = new Partition(); - _elem823.read(iprot); - struct.success.add(_elem823); + _elem855 = new Partition(); + _elem855.read(iprot); + struct.success.add(_elem855); } iprot.readListEnd(); } @@ -79227,9 +79553,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_partitions_wit oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.success.size())); - for (Partition _iter825 : struct.success) + for (Partition _iter857 : struct.success) { - _iter825.write(oprot); + _iter857.write(oprot); } oprot.writeListEnd(); } @@ -79276,9 +79602,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_partitions_with if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (Partition _iter826 : struct.success) + for (Partition _iter858 : struct.success) { - _iter826.write(oprot); + _iter858.write(oprot); } } } @@ -79296,14 +79622,14 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_partitions_with_ BitSet incoming = iprot.readBitSet(3); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list827 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.success = new ArrayList(_list827.size); - Partition _elem828; - for (int _i829 = 0; _i829 < _list827.size; ++_i829) + org.apache.thrift.protocol.TList _list859 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.success = new ArrayList(_list859.size); + Partition _elem860; + for (int _i861 = 0; _i861 < _list859.size; ++_i861) { - _elem828 = new Partition(); - _elem828.read(iprot); - struct.success.add(_elem828); + _elem860 = new Partition(); + _elem860.read(iprot); + struct.success.add(_elem860); } } struct.setSuccessIsSet(true); @@ -80366,14 +80692,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_partitions_pspe case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list830 = iprot.readListBegin(); - struct.success = new ArrayList(_list830.size); - PartitionSpec _elem831; - for (int _i832 = 0; _i832 < _list830.size; ++_i832) + org.apache.thrift.protocol.TList _list862 = iprot.readListBegin(); + struct.success = new ArrayList(_list862.size); + PartitionSpec _elem863; + for (int _i864 = 0; _i864 < _list862.size; ++_i864) { - _elem831 = new PartitionSpec(); - _elem831.read(iprot); - struct.success.add(_elem831); + _elem863 = new PartitionSpec(); + _elem863.read(iprot); + struct.success.add(_elem863); } iprot.readListEnd(); } @@ -80417,9 +80743,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_partitions_psp oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.success.size())); - for (PartitionSpec _iter833 : struct.success) + for (PartitionSpec _iter865 : struct.success) { - _iter833.write(oprot); + _iter865.write(oprot); } oprot.writeListEnd(); } @@ -80466,9 +80792,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_partitions_pspe if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (PartitionSpec _iter834 : struct.success) + for (PartitionSpec _iter866 : struct.success) { - _iter834.write(oprot); + _iter866.write(oprot); } } } @@ -80486,14 +80812,14 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_partitions_pspec BitSet incoming = iprot.readBitSet(3); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list835 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.success = new ArrayList(_list835.size); - PartitionSpec _elem836; - for (int _i837 = 0; _i837 < _list835.size; ++_i837) + org.apache.thrift.protocol.TList _list867 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.success = new ArrayList(_list867.size); + PartitionSpec _elem868; + for (int _i869 = 0; _i869 < _list867.size; ++_i869) { - _elem836 = new PartitionSpec(); - _elem836.read(iprot); - struct.success.add(_elem836); + _elem868 = new PartitionSpec(); + _elem868.read(iprot); + struct.success.add(_elem868); } } struct.setSuccessIsSet(true); @@ -81472,13 +81798,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_partition_names case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list838 = iprot.readListBegin(); - struct.success = new ArrayList(_list838.size); - String _elem839; - for (int _i840 = 0; _i840 < _list838.size; ++_i840) + org.apache.thrift.protocol.TList _list870 = iprot.readListBegin(); + struct.success = new ArrayList(_list870.size); + String _elem871; + for (int _i872 = 0; _i872 < _list870.size; ++_i872) { - _elem839 = iprot.readString(); - struct.success.add(_elem839); + _elem871 = iprot.readString(); + struct.success.add(_elem871); } iprot.readListEnd(); } @@ -81513,9 +81839,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_partition_name oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.success.size())); - for (String _iter841 : struct.success) + for (String _iter873 : struct.success) { - oprot.writeString(_iter841); + oprot.writeString(_iter873); } oprot.writeListEnd(); } @@ -81554,9 +81880,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_partition_names if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (String _iter842 : struct.success) + for (String _iter874 : struct.success) { - oprot.writeString(_iter842); + oprot.writeString(_iter874); } } } @@ -81571,13 +81897,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_partition_names_ BitSet incoming = iprot.readBitSet(2); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list843 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.success = new ArrayList(_list843.size); - String _elem844; - for (int _i845 = 0; _i845 < _list843.size; ++_i845) + org.apache.thrift.protocol.TList _list875 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.success = new ArrayList(_list875.size); + String _elem876; + for (int _i877 = 0; _i877 < _list875.size; ++_i877) { - _elem844 = iprot.readString(); - struct.success.add(_elem844); + _elem876 = iprot.readString(); + struct.success.add(_elem876); } } struct.setSuccessIsSet(true); @@ -82165,13 +82491,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_partitions_ps_a case 3: // PART_VALS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list846 = iprot.readListBegin(); - struct.part_vals = new ArrayList(_list846.size); - String _elem847; - for (int _i848 = 0; _i848 < _list846.size; ++_i848) + org.apache.thrift.protocol.TList _list878 = iprot.readListBegin(); + struct.part_vals = new ArrayList(_list878.size); + String _elem879; + for (int _i880 = 0; _i880 < _list878.size; ++_i880) { - _elem847 = iprot.readString(); - struct.part_vals.add(_elem847); + _elem879 = iprot.readString(); + struct.part_vals.add(_elem879); } iprot.readListEnd(); } @@ -82215,9 +82541,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_partitions_ps_ oprot.writeFieldBegin(PART_VALS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.part_vals.size())); - for (String _iter849 : struct.part_vals) + for (String _iter881 : struct.part_vals) { - oprot.writeString(_iter849); + oprot.writeString(_iter881); } oprot.writeListEnd(); } @@ -82266,9 +82592,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_partitions_ps_a if (struct.isSetPart_vals()) { { oprot.writeI32(struct.part_vals.size()); - for (String _iter850 : struct.part_vals) + for (String _iter882 : struct.part_vals) { - oprot.writeString(_iter850); + oprot.writeString(_iter882); } } } @@ -82291,13 +82617,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_partitions_ps_ar } if (incoming.get(2)) { { - org.apache.thrift.protocol.TList _list851 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.part_vals = new ArrayList(_list851.size); - String _elem852; - for (int _i853 = 0; _i853 < _list851.size; ++_i853) + org.apache.thrift.protocol.TList _list883 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.part_vals = new ArrayList(_list883.size); + String _elem884; + for (int _i885 = 0; _i885 < _list883.size; ++_i885) { - _elem852 = iprot.readString(); - struct.part_vals.add(_elem852); + _elem884 = iprot.readString(); + struct.part_vals.add(_elem884); } } struct.setPart_valsIsSet(true); @@ -82788,14 +83114,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_partitions_ps_r case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list854 = iprot.readListBegin(); - struct.success = new ArrayList(_list854.size); - Partition _elem855; - for (int _i856 = 0; _i856 < _list854.size; ++_i856) + org.apache.thrift.protocol.TList _list886 = iprot.readListBegin(); + struct.success = new ArrayList(_list886.size); + Partition _elem887; + for (int _i888 = 0; _i888 < _list886.size; ++_i888) { - _elem855 = new Partition(); - _elem855.read(iprot); - struct.success.add(_elem855); + _elem887 = new Partition(); + _elem887.read(iprot); + struct.success.add(_elem887); } iprot.readListEnd(); } @@ -82839,9 +83165,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_partitions_ps_ oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.success.size())); - for (Partition _iter857 : struct.success) + for (Partition _iter889 : struct.success) { - _iter857.write(oprot); + _iter889.write(oprot); } oprot.writeListEnd(); } @@ -82888,9 +83214,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_partitions_ps_r if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (Partition _iter858 : struct.success) + for (Partition _iter890 : struct.success) { - _iter858.write(oprot); + _iter890.write(oprot); } } } @@ -82908,14 +83234,14 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_partitions_ps_re BitSet incoming = iprot.readBitSet(3); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list859 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.success = new ArrayList(_list859.size); - Partition _elem860; - for (int _i861 = 0; _i861 < _list859.size; ++_i861) + org.apache.thrift.protocol.TList _list891 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.success = new ArrayList(_list891.size); + Partition _elem892; + for (int _i893 = 0; _i893 < _list891.size; ++_i893) { - _elem860 = new Partition(); - _elem860.read(iprot); - struct.success.add(_elem860); + _elem892 = new Partition(); + _elem892.read(iprot); + struct.success.add(_elem892); } } struct.setSuccessIsSet(true); @@ -83687,13 +84013,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_partitions_ps_w case 3: // PART_VALS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list862 = iprot.readListBegin(); - struct.part_vals = new ArrayList(_list862.size); - String _elem863; - for (int _i864 = 0; _i864 < _list862.size; ++_i864) + org.apache.thrift.protocol.TList _list894 = iprot.readListBegin(); + struct.part_vals = new ArrayList(_list894.size); + String _elem895; + for (int _i896 = 0; _i896 < _list894.size; ++_i896) { - _elem863 = iprot.readString(); - struct.part_vals.add(_elem863); + _elem895 = iprot.readString(); + struct.part_vals.add(_elem895); } iprot.readListEnd(); } @@ -83721,13 +84047,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_partitions_ps_w case 6: // GROUP_NAMES if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list865 = iprot.readListBegin(); - struct.group_names = new ArrayList(_list865.size); - String _elem866; - for (int _i867 = 0; _i867 < _list865.size; ++_i867) + org.apache.thrift.protocol.TList _list897 = iprot.readListBegin(); + struct.group_names = new ArrayList(_list897.size); + String _elem898; + for (int _i899 = 0; _i899 < _list897.size; ++_i899) { - _elem866 = iprot.readString(); - struct.group_names.add(_elem866); + _elem898 = iprot.readString(); + struct.group_names.add(_elem898); } iprot.readListEnd(); } @@ -83763,9 +84089,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_partitions_ps_ oprot.writeFieldBegin(PART_VALS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.part_vals.size())); - for (String _iter868 : struct.part_vals) + for (String _iter900 : struct.part_vals) { - oprot.writeString(_iter868); + oprot.writeString(_iter900); } oprot.writeListEnd(); } @@ -83783,9 +84109,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_partitions_ps_ oprot.writeFieldBegin(GROUP_NAMES_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.group_names.size())); - for (String _iter869 : struct.group_names) + for (String _iter901 : struct.group_names) { - oprot.writeString(_iter869); + oprot.writeString(_iter901); } oprot.writeListEnd(); } @@ -83837,9 +84163,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_partitions_ps_w if (struct.isSetPart_vals()) { { oprot.writeI32(struct.part_vals.size()); - for (String _iter870 : struct.part_vals) + for (String _iter902 : struct.part_vals) { - oprot.writeString(_iter870); + oprot.writeString(_iter902); } } } @@ -83852,9 +84178,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_partitions_ps_w if (struct.isSetGroup_names()) { { oprot.writeI32(struct.group_names.size()); - for (String _iter871 : struct.group_names) + for (String _iter903 : struct.group_names) { - oprot.writeString(_iter871); + oprot.writeString(_iter903); } } } @@ -83874,13 +84200,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_partitions_ps_wi } if (incoming.get(2)) { { - org.apache.thrift.protocol.TList _list872 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.part_vals = new ArrayList(_list872.size); - String _elem873; - for (int _i874 = 0; _i874 < _list872.size; ++_i874) + org.apache.thrift.protocol.TList _list904 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.part_vals = new ArrayList(_list904.size); + String _elem905; + for (int _i906 = 0; _i906 < _list904.size; ++_i906) { - _elem873 = iprot.readString(); - struct.part_vals.add(_elem873); + _elem905 = iprot.readString(); + struct.part_vals.add(_elem905); } } struct.setPart_valsIsSet(true); @@ -83895,13 +84221,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_partitions_ps_wi } if (incoming.get(5)) { { - org.apache.thrift.protocol.TList _list875 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.group_names = new ArrayList(_list875.size); - String _elem876; - for (int _i877 = 0; _i877 < _list875.size; ++_i877) + org.apache.thrift.protocol.TList _list907 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.group_names = new ArrayList(_list907.size); + String _elem908; + for (int _i909 = 0; _i909 < _list907.size; ++_i909) { - _elem876 = iprot.readString(); - struct.group_names.add(_elem876); + _elem908 = iprot.readString(); + struct.group_names.add(_elem908); } } struct.setGroup_namesIsSet(true); @@ -84388,14 +84714,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_partitions_ps_w case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list878 = iprot.readListBegin(); - struct.success = new ArrayList(_list878.size); - Partition _elem879; - for (int _i880 = 0; _i880 < _list878.size; ++_i880) + org.apache.thrift.protocol.TList _list910 = iprot.readListBegin(); + struct.success = new ArrayList(_list910.size); + Partition _elem911; + for (int _i912 = 0; _i912 < _list910.size; ++_i912) { - _elem879 = new Partition(); - _elem879.read(iprot); - struct.success.add(_elem879); + _elem911 = new Partition(); + _elem911.read(iprot); + struct.success.add(_elem911); } iprot.readListEnd(); } @@ -84439,9 +84765,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_partitions_ps_ oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.success.size())); - for (Partition _iter881 : struct.success) + for (Partition _iter913 : struct.success) { - _iter881.write(oprot); + _iter913.write(oprot); } oprot.writeListEnd(); } @@ -84488,9 +84814,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_partitions_ps_w if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (Partition _iter882 : struct.success) + for (Partition _iter914 : struct.success) { - _iter882.write(oprot); + _iter914.write(oprot); } } } @@ -84508,14 +84834,14 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_partitions_ps_wi BitSet incoming = iprot.readBitSet(3); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list883 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.success = new ArrayList(_list883.size); - Partition _elem884; - for (int _i885 = 0; _i885 < _list883.size; ++_i885) + org.apache.thrift.protocol.TList _list915 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.success = new ArrayList(_list915.size); + Partition _elem916; + for (int _i917 = 0; _i917 < _list915.size; ++_i917) { - _elem884 = new Partition(); - _elem884.read(iprot); - struct.success.add(_elem884); + _elem916 = new Partition(); + _elem916.read(iprot); + struct.success.add(_elem916); } } struct.setSuccessIsSet(true); @@ -85108,13 +85434,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_partition_names case 3: // PART_VALS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list886 = iprot.readListBegin(); - struct.part_vals = new ArrayList(_list886.size); - String _elem887; - for (int _i888 = 0; _i888 < _list886.size; ++_i888) + org.apache.thrift.protocol.TList _list918 = iprot.readListBegin(); + struct.part_vals = new ArrayList(_list918.size); + String _elem919; + for (int _i920 = 0; _i920 < _list918.size; ++_i920) { - _elem887 = iprot.readString(); - struct.part_vals.add(_elem887); + _elem919 = iprot.readString(); + struct.part_vals.add(_elem919); } iprot.readListEnd(); } @@ -85158,9 +85484,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_partition_name oprot.writeFieldBegin(PART_VALS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.part_vals.size())); - for (String _iter889 : struct.part_vals) + for (String _iter921 : struct.part_vals) { - oprot.writeString(_iter889); + oprot.writeString(_iter921); } oprot.writeListEnd(); } @@ -85209,9 +85535,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_partition_names if (struct.isSetPart_vals()) { { oprot.writeI32(struct.part_vals.size()); - for (String _iter890 : struct.part_vals) + for (String _iter922 : struct.part_vals) { - oprot.writeString(_iter890); + oprot.writeString(_iter922); } } } @@ -85234,13 +85560,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_partition_names_ } if (incoming.get(2)) { { - org.apache.thrift.protocol.TList _list891 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.part_vals = new ArrayList(_list891.size); - String _elem892; - for (int _i893 = 0; _i893 < _list891.size; ++_i893) + org.apache.thrift.protocol.TList _list923 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.part_vals = new ArrayList(_list923.size); + String _elem924; + for (int _i925 = 0; _i925 < _list923.size; ++_i925) { - _elem892 = iprot.readString(); - struct.part_vals.add(_elem892); + _elem924 = iprot.readString(); + struct.part_vals.add(_elem924); } } struct.setPart_valsIsSet(true); @@ -85728,13 +86054,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_partition_names case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list894 = iprot.readListBegin(); - struct.success = new ArrayList(_list894.size); - String _elem895; - for (int _i896 = 0; _i896 < _list894.size; ++_i896) + org.apache.thrift.protocol.TList _list926 = iprot.readListBegin(); + struct.success = new ArrayList(_list926.size); + String _elem927; + for (int _i928 = 0; _i928 < _list926.size; ++_i928) { - _elem895 = iprot.readString(); - struct.success.add(_elem895); + _elem927 = iprot.readString(); + struct.success.add(_elem927); } iprot.readListEnd(); } @@ -85778,9 +86104,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_partition_name oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.success.size())); - for (String _iter897 : struct.success) + for (String _iter929 : struct.success) { - oprot.writeString(_iter897); + oprot.writeString(_iter929); } oprot.writeListEnd(); } @@ -85827,9 +86153,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_partition_names if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (String _iter898 : struct.success) + for (String _iter930 : struct.success) { - oprot.writeString(_iter898); + oprot.writeString(_iter930); } } } @@ -85847,13 +86173,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_partition_names_ BitSet incoming = iprot.readBitSet(3); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list899 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.success = new ArrayList(_list899.size); - String _elem900; - for (int _i901 = 0; _i901 < _list899.size; ++_i901) + org.apache.thrift.protocol.TList _list931 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.success = new ArrayList(_list931.size); + String _elem932; + for (int _i933 = 0; _i933 < _list931.size; ++_i933) { - _elem900 = iprot.readString(); - struct.success.add(_elem900); + _elem932 = iprot.readString(); + struct.success.add(_elem932); } } struct.setSuccessIsSet(true); @@ -87020,14 +87346,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_partitions_by_f case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list902 = iprot.readListBegin(); - struct.success = new ArrayList(_list902.size); - Partition _elem903; - for (int _i904 = 0; _i904 < _list902.size; ++_i904) + org.apache.thrift.protocol.TList _list934 = iprot.readListBegin(); + struct.success = new ArrayList(_list934.size); + Partition _elem935; + for (int _i936 = 0; _i936 < _list934.size; ++_i936) { - _elem903 = new Partition(); - _elem903.read(iprot); - struct.success.add(_elem903); + _elem935 = new Partition(); + _elem935.read(iprot); + struct.success.add(_elem935); } iprot.readListEnd(); } @@ -87071,9 +87397,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_partitions_by_ oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.success.size())); - for (Partition _iter905 : struct.success) + for (Partition _iter937 : struct.success) { - _iter905.write(oprot); + _iter937.write(oprot); } oprot.writeListEnd(); } @@ -87120,9 +87446,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_partitions_by_f if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (Partition _iter906 : struct.success) + for (Partition _iter938 : struct.success) { - _iter906.write(oprot); + _iter938.write(oprot); } } } @@ -87140,14 +87466,14 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_partitions_by_fi BitSet incoming = iprot.readBitSet(3); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list907 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.success = new ArrayList(_list907.size); - Partition _elem908; - for (int _i909 = 0; _i909 < _list907.size; ++_i909) + org.apache.thrift.protocol.TList _list939 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.success = new ArrayList(_list939.size); + Partition _elem940; + for (int _i941 = 0; _i941 < _list939.size; ++_i941) { - _elem908 = new Partition(); - _elem908.read(iprot); - struct.success.add(_elem908); + _elem940 = new Partition(); + _elem940.read(iprot); + struct.success.add(_elem940); } } struct.setSuccessIsSet(true); @@ -88314,14 +88640,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_part_specs_by_f case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list910 = iprot.readListBegin(); - struct.success = new ArrayList(_list910.size); - PartitionSpec _elem911; - for (int _i912 = 0; _i912 < _list910.size; ++_i912) + org.apache.thrift.protocol.TList _list942 = iprot.readListBegin(); + struct.success = new ArrayList(_list942.size); + PartitionSpec _elem943; + for (int _i944 = 0; _i944 < _list942.size; ++_i944) { - _elem911 = new PartitionSpec(); - _elem911.read(iprot); - struct.success.add(_elem911); + _elem943 = new PartitionSpec(); + _elem943.read(iprot); + struct.success.add(_elem943); } iprot.readListEnd(); } @@ -88365,9 +88691,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_part_specs_by_ oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.success.size())); - for (PartitionSpec _iter913 : struct.success) + for (PartitionSpec _iter945 : struct.success) { - _iter913.write(oprot); + _iter945.write(oprot); } oprot.writeListEnd(); } @@ -88414,9 +88740,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_part_specs_by_f if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (PartitionSpec _iter914 : struct.success) + for (PartitionSpec _iter946 : struct.success) { - _iter914.write(oprot); + _iter946.write(oprot); } } } @@ -88434,14 +88760,14 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_part_specs_by_fi BitSet incoming = iprot.readBitSet(3); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list915 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.success = new ArrayList(_list915.size); - PartitionSpec _elem916; - for (int _i917 = 0; _i917 < _list915.size; ++_i917) + org.apache.thrift.protocol.TList _list947 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.success = new ArrayList(_list947.size); + PartitionSpec _elem948; + for (int _i949 = 0; _i949 < _list947.size; ++_i949) { - _elem916 = new PartitionSpec(); - _elem916.read(iprot); - struct.success.add(_elem916); + _elem948 = new PartitionSpec(); + _elem948.read(iprot); + struct.success.add(_elem948); } } struct.setSuccessIsSet(true); @@ -91025,13 +91351,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_partitions_by_n case 3: // NAMES if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list918 = iprot.readListBegin(); - struct.names = new ArrayList(_list918.size); - String _elem919; - for (int _i920 = 0; _i920 < _list918.size; ++_i920) + org.apache.thrift.protocol.TList _list950 = iprot.readListBegin(); + struct.names = new ArrayList(_list950.size); + String _elem951; + for (int _i952 = 0; _i952 < _list950.size; ++_i952) { - _elem919 = iprot.readString(); - struct.names.add(_elem919); + _elem951 = iprot.readString(); + struct.names.add(_elem951); } iprot.readListEnd(); } @@ -91067,9 +91393,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_partitions_by_ oprot.writeFieldBegin(NAMES_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.names.size())); - for (String _iter921 : struct.names) + for (String _iter953 : struct.names) { - oprot.writeString(_iter921); + oprot.writeString(_iter953); } oprot.writeListEnd(); } @@ -91112,9 +91438,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_partitions_by_n if (struct.isSetNames()) { { oprot.writeI32(struct.names.size()); - for (String _iter922 : struct.names) + for (String _iter954 : struct.names) { - oprot.writeString(_iter922); + oprot.writeString(_iter954); } } } @@ -91134,13 +91460,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_partitions_by_na } if (incoming.get(2)) { { - org.apache.thrift.protocol.TList _list923 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.names = new ArrayList(_list923.size); - String _elem924; - for (int _i925 = 0; _i925 < _list923.size; ++_i925) + org.apache.thrift.protocol.TList _list955 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.names = new ArrayList(_list955.size); + String _elem956; + for (int _i957 = 0; _i957 < _list955.size; ++_i957) { - _elem924 = iprot.readString(); - struct.names.add(_elem924); + _elem956 = iprot.readString(); + struct.names.add(_elem956); } } struct.setNamesIsSet(true); @@ -91627,14 +91953,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_partitions_by_n case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list926 = iprot.readListBegin(); - struct.success = new ArrayList(_list926.size); - Partition _elem927; - for (int _i928 = 0; _i928 < _list926.size; ++_i928) + org.apache.thrift.protocol.TList _list958 = iprot.readListBegin(); + struct.success = new ArrayList(_list958.size); + Partition _elem959; + for (int _i960 = 0; _i960 < _list958.size; ++_i960) { - _elem927 = new Partition(); - _elem927.read(iprot); - struct.success.add(_elem927); + _elem959 = new Partition(); + _elem959.read(iprot); + struct.success.add(_elem959); } iprot.readListEnd(); } @@ -91678,9 +92004,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_partitions_by_ oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.success.size())); - for (Partition _iter929 : struct.success) + for (Partition _iter961 : struct.success) { - _iter929.write(oprot); + _iter961.write(oprot); } oprot.writeListEnd(); } @@ -91727,9 +92053,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_partitions_by_n if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (Partition _iter930 : struct.success) + for (Partition _iter962 : struct.success) { - _iter930.write(oprot); + _iter962.write(oprot); } } } @@ -91747,14 +92073,14 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_partitions_by_na BitSet incoming = iprot.readBitSet(3); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list931 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.success = new ArrayList(_list931.size); - Partition _elem932; - for (int _i933 = 0; _i933 < _list931.size; ++_i933) + org.apache.thrift.protocol.TList _list963 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.success = new ArrayList(_list963.size); + Partition _elem964; + for (int _i965 = 0; _i965 < _list963.size; ++_i965) { - _elem932 = new Partition(); - _elem932.read(iprot); - struct.success.add(_elem932); + _elem964 = new Partition(); + _elem964.read(iprot); + struct.success.add(_elem964); } } struct.setSuccessIsSet(true); @@ -93304,14 +93630,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, alter_partitions_ar case 3: // NEW_PARTS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list934 = iprot.readListBegin(); - struct.new_parts = new ArrayList(_list934.size); - Partition _elem935; - for (int _i936 = 0; _i936 < _list934.size; ++_i936) + org.apache.thrift.protocol.TList _list966 = iprot.readListBegin(); + struct.new_parts = new ArrayList(_list966.size); + Partition _elem967; + for (int _i968 = 0; _i968 < _list966.size; ++_i968) { - _elem935 = new Partition(); - _elem935.read(iprot); - struct.new_parts.add(_elem935); + _elem967 = new Partition(); + _elem967.read(iprot); + struct.new_parts.add(_elem967); } iprot.readListEnd(); } @@ -93347,9 +93673,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, alter_partitions_a oprot.writeFieldBegin(NEW_PARTS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.new_parts.size())); - for (Partition _iter937 : struct.new_parts) + for (Partition _iter969 : struct.new_parts) { - _iter937.write(oprot); + _iter969.write(oprot); } oprot.writeListEnd(); } @@ -93392,9 +93718,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, alter_partitions_ar if (struct.isSetNew_parts()) { { oprot.writeI32(struct.new_parts.size()); - for (Partition _iter938 : struct.new_parts) + for (Partition _iter970 : struct.new_parts) { - _iter938.write(oprot); + _iter970.write(oprot); } } } @@ -93414,14 +93740,14 @@ public void read(org.apache.thrift.protocol.TProtocol prot, alter_partitions_arg } if (incoming.get(2)) { { - org.apache.thrift.protocol.TList _list939 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.new_parts = new ArrayList(_list939.size); - Partition _elem940; - for (int _i941 = 0; _i941 < _list939.size; ++_i941) + org.apache.thrift.protocol.TList _list971 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.new_parts = new ArrayList(_list971.size); + Partition _elem972; + for (int _i973 = 0; _i973 < _list971.size; ++_i973) { - _elem940 = new Partition(); - _elem940.read(iprot); - struct.new_parts.add(_elem940); + _elem972 = new Partition(); + _elem972.read(iprot); + struct.new_parts.add(_elem972); } } struct.setNew_partsIsSet(true); @@ -94474,14 +94800,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, alter_partitions_wi case 3: // NEW_PARTS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list942 = iprot.readListBegin(); - struct.new_parts = new ArrayList(_list942.size); - Partition _elem943; - for (int _i944 = 0; _i944 < _list942.size; ++_i944) + org.apache.thrift.protocol.TList _list974 = iprot.readListBegin(); + struct.new_parts = new ArrayList(_list974.size); + Partition _elem975; + for (int _i976 = 0; _i976 < _list974.size; ++_i976) { - _elem943 = new Partition(); - _elem943.read(iprot); - struct.new_parts.add(_elem943); + _elem975 = new Partition(); + _elem975.read(iprot); + struct.new_parts.add(_elem975); } iprot.readListEnd(); } @@ -94526,9 +94852,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, alter_partitions_w oprot.writeFieldBegin(NEW_PARTS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.new_parts.size())); - for (Partition _iter945 : struct.new_parts) + for (Partition _iter977 : struct.new_parts) { - _iter945.write(oprot); + _iter977.write(oprot); } oprot.writeListEnd(); } @@ -94579,9 +94905,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, alter_partitions_wi if (struct.isSetNew_parts()) { { oprot.writeI32(struct.new_parts.size()); - for (Partition _iter946 : struct.new_parts) + for (Partition _iter978 : struct.new_parts) { - _iter946.write(oprot); + _iter978.write(oprot); } } } @@ -94604,14 +94930,14 @@ public void read(org.apache.thrift.protocol.TProtocol prot, alter_partitions_wit } if (incoming.get(2)) { { - org.apache.thrift.protocol.TList _list947 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.new_parts = new ArrayList(_list947.size); - Partition _elem948; - for (int _i949 = 0; _i949 < _list947.size; ++_i949) + org.apache.thrift.protocol.TList _list979 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.new_parts = new ArrayList(_list979.size); + Partition _elem980; + for (int _i981 = 0; _i981 < _list979.size; ++_i981) { - _elem948 = new Partition(); - _elem948.read(iprot); - struct.new_parts.add(_elem948); + _elem980 = new Partition(); + _elem980.read(iprot); + struct.new_parts.add(_elem980); } } struct.setNew_partsIsSet(true); @@ -96812,13 +97138,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, rename_partition_ar case 3: // PART_VALS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list950 = iprot.readListBegin(); - struct.part_vals = new ArrayList(_list950.size); - String _elem951; - for (int _i952 = 0; _i952 < _list950.size; ++_i952) + org.apache.thrift.protocol.TList _list982 = iprot.readListBegin(); + struct.part_vals = new ArrayList(_list982.size); + String _elem983; + for (int _i984 = 0; _i984 < _list982.size; ++_i984) { - _elem951 = iprot.readString(); - struct.part_vals.add(_elem951); + _elem983 = iprot.readString(); + struct.part_vals.add(_elem983); } iprot.readListEnd(); } @@ -96863,9 +97189,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, rename_partition_a oprot.writeFieldBegin(PART_VALS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.part_vals.size())); - for (String _iter953 : struct.part_vals) + for (String _iter985 : struct.part_vals) { - oprot.writeString(_iter953); + oprot.writeString(_iter985); } oprot.writeListEnd(); } @@ -96916,9 +97242,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, rename_partition_ar if (struct.isSetPart_vals()) { { oprot.writeI32(struct.part_vals.size()); - for (String _iter954 : struct.part_vals) + for (String _iter986 : struct.part_vals) { - oprot.writeString(_iter954); + oprot.writeString(_iter986); } } } @@ -96941,13 +97267,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, rename_partition_arg } if (incoming.get(2)) { { - org.apache.thrift.protocol.TList _list955 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.part_vals = new ArrayList(_list955.size); - String _elem956; - for (int _i957 = 0; _i957 < _list955.size; ++_i957) + org.apache.thrift.protocol.TList _list987 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.part_vals = new ArrayList(_list987.size); + String _elem988; + for (int _i989 = 0; _i989 < _list987.size; ++_i989) { - _elem956 = iprot.readString(); - struct.part_vals.add(_elem956); + _elem988 = iprot.readString(); + struct.part_vals.add(_elem988); } } struct.setPart_valsIsSet(true); @@ -97821,13 +98147,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, partition_name_has_ case 1: // PART_VALS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list958 = iprot.readListBegin(); - struct.part_vals = new ArrayList(_list958.size); - String _elem959; - for (int _i960 = 0; _i960 < _list958.size; ++_i960) + org.apache.thrift.protocol.TList _list990 = iprot.readListBegin(); + struct.part_vals = new ArrayList(_list990.size); + String _elem991; + for (int _i992 = 0; _i992 < _list990.size; ++_i992) { - _elem959 = iprot.readString(); - struct.part_vals.add(_elem959); + _elem991 = iprot.readString(); + struct.part_vals.add(_elem991); } iprot.readListEnd(); } @@ -97861,9 +98187,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, partition_name_has oprot.writeFieldBegin(PART_VALS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.part_vals.size())); - for (String _iter961 : struct.part_vals) + for (String _iter993 : struct.part_vals) { - oprot.writeString(_iter961); + oprot.writeString(_iter993); } oprot.writeListEnd(); } @@ -97900,9 +98226,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, partition_name_has_ if (struct.isSetPart_vals()) { { oprot.writeI32(struct.part_vals.size()); - for (String _iter962 : struct.part_vals) + for (String _iter994 : struct.part_vals) { - oprot.writeString(_iter962); + oprot.writeString(_iter994); } } } @@ -97917,13 +98243,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, partition_name_has_v BitSet incoming = iprot.readBitSet(2); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list963 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.part_vals = new ArrayList(_list963.size); - String _elem964; - for (int _i965 = 0; _i965 < _list963.size; ++_i965) + org.apache.thrift.protocol.TList _list995 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.part_vals = new ArrayList(_list995.size); + String _elem996; + for (int _i997 = 0; _i997 < _list995.size; ++_i997) { - _elem964 = iprot.readString(); - struct.part_vals.add(_elem964); + _elem996 = iprot.readString(); + struct.part_vals.add(_elem996); } } struct.setPart_valsIsSet(true); @@ -100078,13 +100404,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, partition_name_to_v case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list966 = iprot.readListBegin(); - struct.success = new ArrayList(_list966.size); - String _elem967; - for (int _i968 = 0; _i968 < _list966.size; ++_i968) + org.apache.thrift.protocol.TList _list998 = iprot.readListBegin(); + struct.success = new ArrayList(_list998.size); + String _elem999; + for (int _i1000 = 0; _i1000 < _list998.size; ++_i1000) { - _elem967 = iprot.readString(); - struct.success.add(_elem967); + _elem999 = iprot.readString(); + struct.success.add(_elem999); } iprot.readListEnd(); } @@ -100119,9 +100445,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, partition_name_to_ oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.success.size())); - for (String _iter969 : struct.success) + for (String _iter1001 : struct.success) { - oprot.writeString(_iter969); + oprot.writeString(_iter1001); } oprot.writeListEnd(); } @@ -100160,9 +100486,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, partition_name_to_v if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (String _iter970 : struct.success) + for (String _iter1002 : struct.success) { - oprot.writeString(_iter970); + oprot.writeString(_iter1002); } } } @@ -100177,13 +100503,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, partition_name_to_va BitSet incoming = iprot.readBitSet(2); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list971 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.success = new ArrayList(_list971.size); - String _elem972; - for (int _i973 = 0; _i973 < _list971.size; ++_i973) + org.apache.thrift.protocol.TList _list1003 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.success = new ArrayList(_list1003.size); + String _elem1004; + for (int _i1005 = 0; _i1005 < _list1003.size; ++_i1005) { - _elem972 = iprot.readString(); - struct.success.add(_elem972); + _elem1004 = iprot.readString(); + struct.success.add(_elem1004); } } struct.setSuccessIsSet(true); @@ -100946,15 +101272,15 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, partition_name_to_s case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.MAP) { { - org.apache.thrift.protocol.TMap _map974 = iprot.readMapBegin(); - struct.success = new HashMap(2*_map974.size); - String _key975; - String _val976; - for (int _i977 = 0; _i977 < _map974.size; ++_i977) + org.apache.thrift.protocol.TMap _map1006 = iprot.readMapBegin(); + struct.success = new HashMap(2*_map1006.size); + String _key1007; + String _val1008; + for (int _i1009 = 0; _i1009 < _map1006.size; ++_i1009) { - _key975 = iprot.readString(); - _val976 = iprot.readString(); - struct.success.put(_key975, _val976); + _key1007 = iprot.readString(); + _val1008 = iprot.readString(); + struct.success.put(_key1007, _val1008); } iprot.readMapEnd(); } @@ -100989,10 +101315,10 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, partition_name_to_ oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeMapBegin(new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING, struct.success.size())); - for (Map.Entry _iter978 : struct.success.entrySet()) + for (Map.Entry _iter1010 : struct.success.entrySet()) { - oprot.writeString(_iter978.getKey()); - oprot.writeString(_iter978.getValue()); + oprot.writeString(_iter1010.getKey()); + oprot.writeString(_iter1010.getValue()); } oprot.writeMapEnd(); } @@ -101031,10 +101357,10 @@ public void write(org.apache.thrift.protocol.TProtocol prot, partition_name_to_s if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (Map.Entry _iter979 : struct.success.entrySet()) + for (Map.Entry _iter1011 : struct.success.entrySet()) { - oprot.writeString(_iter979.getKey()); - oprot.writeString(_iter979.getValue()); + oprot.writeString(_iter1011.getKey()); + oprot.writeString(_iter1011.getValue()); } } } @@ -101049,15 +101375,15 @@ public void read(org.apache.thrift.protocol.TProtocol prot, partition_name_to_sp BitSet incoming = iprot.readBitSet(2); if (incoming.get(0)) { { - org.apache.thrift.protocol.TMap _map980 = new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.success = new HashMap(2*_map980.size); - String _key981; - String _val982; - for (int _i983 = 0; _i983 < _map980.size; ++_i983) + org.apache.thrift.protocol.TMap _map1012 = new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.success = new HashMap(2*_map1012.size); + String _key1013; + String _val1014; + for (int _i1015 = 0; _i1015 < _map1012.size; ++_i1015) { - _key981 = iprot.readString(); - _val982 = iprot.readString(); - struct.success.put(_key981, _val982); + _key1013 = iprot.readString(); + _val1014 = iprot.readString(); + struct.success.put(_key1013, _val1014); } } struct.setSuccessIsSet(true); @@ -101652,15 +101978,15 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, markPartitionForEve case 3: // PART_VALS if (schemeField.type == org.apache.thrift.protocol.TType.MAP) { { - org.apache.thrift.protocol.TMap _map984 = iprot.readMapBegin(); - struct.part_vals = new HashMap(2*_map984.size); - String _key985; - String _val986; - for (int _i987 = 0; _i987 < _map984.size; ++_i987) + org.apache.thrift.protocol.TMap _map1016 = iprot.readMapBegin(); + struct.part_vals = new HashMap(2*_map1016.size); + String _key1017; + String _val1018; + for (int _i1019 = 0; _i1019 < _map1016.size; ++_i1019) { - _key985 = iprot.readString(); - _val986 = iprot.readString(); - struct.part_vals.put(_key985, _val986); + _key1017 = iprot.readString(); + _val1018 = iprot.readString(); + struct.part_vals.put(_key1017, _val1018); } iprot.readMapEnd(); } @@ -101704,10 +102030,10 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, markPartitionForEv oprot.writeFieldBegin(PART_VALS_FIELD_DESC); { oprot.writeMapBegin(new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING, struct.part_vals.size())); - for (Map.Entry _iter988 : struct.part_vals.entrySet()) + for (Map.Entry _iter1020 : struct.part_vals.entrySet()) { - oprot.writeString(_iter988.getKey()); - oprot.writeString(_iter988.getValue()); + oprot.writeString(_iter1020.getKey()); + oprot.writeString(_iter1020.getValue()); } oprot.writeMapEnd(); } @@ -101758,10 +102084,10 @@ public void write(org.apache.thrift.protocol.TProtocol prot, markPartitionForEve if (struct.isSetPart_vals()) { { oprot.writeI32(struct.part_vals.size()); - for (Map.Entry _iter989 : struct.part_vals.entrySet()) + for (Map.Entry _iter1021 : struct.part_vals.entrySet()) { - oprot.writeString(_iter989.getKey()); - oprot.writeString(_iter989.getValue()); + oprot.writeString(_iter1021.getKey()); + oprot.writeString(_iter1021.getValue()); } } } @@ -101784,15 +102110,15 @@ public void read(org.apache.thrift.protocol.TProtocol prot, markPartitionForEven } if (incoming.get(2)) { { - org.apache.thrift.protocol.TMap _map990 = new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.part_vals = new HashMap(2*_map990.size); - String _key991; - String _val992; - for (int _i993 = 0; _i993 < _map990.size; ++_i993) + org.apache.thrift.protocol.TMap _map1022 = new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.part_vals = new HashMap(2*_map1022.size); + String _key1023; + String _val1024; + for (int _i1025 = 0; _i1025 < _map1022.size; ++_i1025) { - _key991 = iprot.readString(); - _val992 = iprot.readString(); - struct.part_vals.put(_key991, _val992); + _key1023 = iprot.readString(); + _val1024 = iprot.readString(); + struct.part_vals.put(_key1023, _val1024); } } struct.setPart_valsIsSet(true); @@ -103276,15 +103602,15 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, isPartitionMarkedFo case 3: // PART_VALS if (schemeField.type == org.apache.thrift.protocol.TType.MAP) { { - org.apache.thrift.protocol.TMap _map994 = iprot.readMapBegin(); - struct.part_vals = new HashMap(2*_map994.size); - String _key995; - String _val996; - for (int _i997 = 0; _i997 < _map994.size; ++_i997) + org.apache.thrift.protocol.TMap _map1026 = iprot.readMapBegin(); + struct.part_vals = new HashMap(2*_map1026.size); + String _key1027; + String _val1028; + for (int _i1029 = 0; _i1029 < _map1026.size; ++_i1029) { - _key995 = iprot.readString(); - _val996 = iprot.readString(); - struct.part_vals.put(_key995, _val996); + _key1027 = iprot.readString(); + _val1028 = iprot.readString(); + struct.part_vals.put(_key1027, _val1028); } iprot.readMapEnd(); } @@ -103328,10 +103654,10 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, isPartitionMarkedF oprot.writeFieldBegin(PART_VALS_FIELD_DESC); { oprot.writeMapBegin(new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING, struct.part_vals.size())); - for (Map.Entry _iter998 : struct.part_vals.entrySet()) + for (Map.Entry _iter1030 : struct.part_vals.entrySet()) { - oprot.writeString(_iter998.getKey()); - oprot.writeString(_iter998.getValue()); + oprot.writeString(_iter1030.getKey()); + oprot.writeString(_iter1030.getValue()); } oprot.writeMapEnd(); } @@ -103382,10 +103708,10 @@ public void write(org.apache.thrift.protocol.TProtocol prot, isPartitionMarkedFo if (struct.isSetPart_vals()) { { oprot.writeI32(struct.part_vals.size()); - for (Map.Entry _iter999 : struct.part_vals.entrySet()) + for (Map.Entry _iter1031 : struct.part_vals.entrySet()) { - oprot.writeString(_iter999.getKey()); - oprot.writeString(_iter999.getValue()); + oprot.writeString(_iter1031.getKey()); + oprot.writeString(_iter1031.getValue()); } } } @@ -103408,15 +103734,15 @@ public void read(org.apache.thrift.protocol.TProtocol prot, isPartitionMarkedFor } if (incoming.get(2)) { { - org.apache.thrift.protocol.TMap _map1000 = new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.part_vals = new HashMap(2*_map1000.size); - String _key1001; - String _val1002; - for (int _i1003 = 0; _i1003 < _map1000.size; ++_i1003) + org.apache.thrift.protocol.TMap _map1032 = new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.part_vals = new HashMap(2*_map1032.size); + String _key1033; + String _val1034; + for (int _i1035 = 0; _i1035 < _map1032.size; ++_i1035) { - _key1001 = iprot.readString(); - _val1002 = iprot.readString(); - struct.part_vals.put(_key1001, _val1002); + _key1033 = iprot.readString(); + _val1034 = iprot.readString(); + struct.part_vals.put(_key1033, _val1034); } } struct.setPart_valsIsSet(true); @@ -106607,15 +106933,2369 @@ private void readObject(java.io.ObjectInputStream in) throws java.io.IOException } } - private static class alter_index_resultStandardSchemeFactory implements SchemeFactory { - public alter_index_resultStandardScheme getScheme() { - return new alter_index_resultStandardScheme(); + private static class alter_index_resultStandardSchemeFactory implements SchemeFactory { + public alter_index_resultStandardScheme getScheme() { + return new alter_index_resultStandardScheme(); + } + } + + private static class alter_index_resultStandardScheme extends StandardScheme { + + public void read(org.apache.thrift.protocol.TProtocol iprot, alter_index_result struct) throws org.apache.thrift.TException { + org.apache.thrift.protocol.TField schemeField; + iprot.readStructBegin(); + while (true) + { + schemeField = iprot.readFieldBegin(); + if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { + break; + } + switch (schemeField.id) { + case 1: // O1 + if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { + struct.o1 = new InvalidOperationException(); + struct.o1.read(iprot); + struct.setO1IsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; + case 2: // O2 + if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { + struct.o2 = new MetaException(); + struct.o2.read(iprot); + struct.setO2IsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; + default: + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + iprot.readFieldEnd(); + } + iprot.readStructEnd(); + struct.validate(); + } + + public void write(org.apache.thrift.protocol.TProtocol oprot, alter_index_result struct) throws org.apache.thrift.TException { + struct.validate(); + + oprot.writeStructBegin(STRUCT_DESC); + if (struct.o1 != null) { + oprot.writeFieldBegin(O1_FIELD_DESC); + struct.o1.write(oprot); + oprot.writeFieldEnd(); + } + if (struct.o2 != null) { + oprot.writeFieldBegin(O2_FIELD_DESC); + struct.o2.write(oprot); + oprot.writeFieldEnd(); + } + oprot.writeFieldStop(); + oprot.writeStructEnd(); + } + + } + + private static class alter_index_resultTupleSchemeFactory implements SchemeFactory { + public alter_index_resultTupleScheme getScheme() { + return new alter_index_resultTupleScheme(); + } + } + + private static class alter_index_resultTupleScheme extends TupleScheme { + + @Override + public void write(org.apache.thrift.protocol.TProtocol prot, alter_index_result struct) throws org.apache.thrift.TException { + TTupleProtocol oprot = (TTupleProtocol) prot; + BitSet optionals = new BitSet(); + if (struct.isSetO1()) { + optionals.set(0); + } + if (struct.isSetO2()) { + optionals.set(1); + } + oprot.writeBitSet(optionals, 2); + if (struct.isSetO1()) { + struct.o1.write(oprot); + } + if (struct.isSetO2()) { + struct.o2.write(oprot); + } + } + + @Override + public void read(org.apache.thrift.protocol.TProtocol prot, alter_index_result struct) throws org.apache.thrift.TException { + TTupleProtocol iprot = (TTupleProtocol) prot; + BitSet incoming = iprot.readBitSet(2); + if (incoming.get(0)) { + struct.o1 = new InvalidOperationException(); + struct.o1.read(iprot); + struct.setO1IsSet(true); + } + if (incoming.get(1)) { + struct.o2 = new MetaException(); + struct.o2.read(iprot); + struct.setO2IsSet(true); + } + } + } + + } + + public static class drop_index_by_name_args implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { + private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("drop_index_by_name_args"); + + private static final org.apache.thrift.protocol.TField DB_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("db_name", org.apache.thrift.protocol.TType.STRING, (short)1); + private static final org.apache.thrift.protocol.TField TBL_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("tbl_name", org.apache.thrift.protocol.TType.STRING, (short)2); + private static final org.apache.thrift.protocol.TField INDEX_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("index_name", org.apache.thrift.protocol.TType.STRING, (short)3); + private static final org.apache.thrift.protocol.TField DELETE_DATA_FIELD_DESC = new org.apache.thrift.protocol.TField("deleteData", org.apache.thrift.protocol.TType.BOOL, (short)4); + + private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); + static { + schemes.put(StandardScheme.class, new drop_index_by_name_argsStandardSchemeFactory()); + schemes.put(TupleScheme.class, new drop_index_by_name_argsTupleSchemeFactory()); + } + + private String db_name; // required + private String tbl_name; // required + private String index_name; // required + private boolean deleteData; // required + + /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ + public enum _Fields implements org.apache.thrift.TFieldIdEnum { + DB_NAME((short)1, "db_name"), + TBL_NAME((short)2, "tbl_name"), + INDEX_NAME((short)3, "index_name"), + DELETE_DATA((short)4, "deleteData"); + + private static final Map byName = new HashMap(); + + static { + for (_Fields field : EnumSet.allOf(_Fields.class)) { + byName.put(field.getFieldName(), field); + } + } + + /** + * Find the _Fields constant that matches fieldId, or null if its not found. + */ + public static _Fields findByThriftId(int fieldId) { + switch(fieldId) { + case 1: // DB_NAME + return DB_NAME; + case 2: // TBL_NAME + return TBL_NAME; + case 3: // INDEX_NAME + return INDEX_NAME; + case 4: // DELETE_DATA + return DELETE_DATA; + default: + return null; + } + } + + /** + * Find the _Fields constant that matches fieldId, throwing an exception + * if it is not found. + */ + public static _Fields findByThriftIdOrThrow(int fieldId) { + _Fields fields = findByThriftId(fieldId); + if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!"); + return fields; + } + + /** + * Find the _Fields constant that matches name, or null if its not found. + */ + public static _Fields findByName(String name) { + return byName.get(name); + } + + private final short _thriftId; + private final String _fieldName; + + _Fields(short thriftId, String fieldName) { + _thriftId = thriftId; + _fieldName = fieldName; + } + + public short getThriftFieldId() { + return _thriftId; + } + + public String getFieldName() { + return _fieldName; + } + } + + // isset id assignments + private static final int __DELETEDATA_ISSET_ID = 0; + private byte __isset_bitfield = 0; + public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; + static { + Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); + tmpMap.put(_Fields.DB_NAME, new org.apache.thrift.meta_data.FieldMetaData("db_name", org.apache.thrift.TFieldRequirementType.DEFAULT, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); + tmpMap.put(_Fields.TBL_NAME, new org.apache.thrift.meta_data.FieldMetaData("tbl_name", org.apache.thrift.TFieldRequirementType.DEFAULT, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); + tmpMap.put(_Fields.INDEX_NAME, new org.apache.thrift.meta_data.FieldMetaData("index_name", org.apache.thrift.TFieldRequirementType.DEFAULT, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); + tmpMap.put(_Fields.DELETE_DATA, new org.apache.thrift.meta_data.FieldMetaData("deleteData", org.apache.thrift.TFieldRequirementType.DEFAULT, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.BOOL))); + metaDataMap = Collections.unmodifiableMap(tmpMap); + org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(drop_index_by_name_args.class, metaDataMap); + } + + public drop_index_by_name_args() { + } + + public drop_index_by_name_args( + String db_name, + String tbl_name, + String index_name, + boolean deleteData) + { + this(); + this.db_name = db_name; + this.tbl_name = tbl_name; + this.index_name = index_name; + this.deleteData = deleteData; + setDeleteDataIsSet(true); + } + + /** + * Performs a deep copy on other. + */ + public drop_index_by_name_args(drop_index_by_name_args other) { + __isset_bitfield = other.__isset_bitfield; + if (other.isSetDb_name()) { + this.db_name = other.db_name; + } + if (other.isSetTbl_name()) { + this.tbl_name = other.tbl_name; + } + if (other.isSetIndex_name()) { + this.index_name = other.index_name; + } + this.deleteData = other.deleteData; + } + + public drop_index_by_name_args deepCopy() { + return new drop_index_by_name_args(this); + } + + @Override + public void clear() { + this.db_name = null; + this.tbl_name = null; + this.index_name = null; + setDeleteDataIsSet(false); + this.deleteData = false; + } + + public String getDb_name() { + return this.db_name; + } + + public void setDb_name(String db_name) { + this.db_name = db_name; + } + + public void unsetDb_name() { + this.db_name = null; + } + + /** Returns true if field db_name is set (has been assigned a value) and false otherwise */ + public boolean isSetDb_name() { + return this.db_name != null; + } + + public void setDb_nameIsSet(boolean value) { + if (!value) { + this.db_name = null; + } + } + + public String getTbl_name() { + return this.tbl_name; + } + + public void setTbl_name(String tbl_name) { + this.tbl_name = tbl_name; + } + + public void unsetTbl_name() { + this.tbl_name = null; + } + + /** Returns true if field tbl_name is set (has been assigned a value) and false otherwise */ + public boolean isSetTbl_name() { + return this.tbl_name != null; + } + + public void setTbl_nameIsSet(boolean value) { + if (!value) { + this.tbl_name = null; + } + } + + public String getIndex_name() { + return this.index_name; + } + + public void setIndex_name(String index_name) { + this.index_name = index_name; + } + + public void unsetIndex_name() { + this.index_name = null; + } + + /** Returns true if field index_name is set (has been assigned a value) and false otherwise */ + public boolean isSetIndex_name() { + return this.index_name != null; + } + + public void setIndex_nameIsSet(boolean value) { + if (!value) { + this.index_name = null; + } + } + + public boolean isDeleteData() { + return this.deleteData; + } + + public void setDeleteData(boolean deleteData) { + this.deleteData = deleteData; + setDeleteDataIsSet(true); + } + + public void unsetDeleteData() { + __isset_bitfield = EncodingUtils.clearBit(__isset_bitfield, __DELETEDATA_ISSET_ID); + } + + /** Returns true if field deleteData is set (has been assigned a value) and false otherwise */ + public boolean isSetDeleteData() { + return EncodingUtils.testBit(__isset_bitfield, __DELETEDATA_ISSET_ID); + } + + public void setDeleteDataIsSet(boolean value) { + __isset_bitfield = EncodingUtils.setBit(__isset_bitfield, __DELETEDATA_ISSET_ID, value); + } + + public void setFieldValue(_Fields field, Object value) { + switch (field) { + case DB_NAME: + if (value == null) { + unsetDb_name(); + } else { + setDb_name((String)value); + } + break; + + case TBL_NAME: + if (value == null) { + unsetTbl_name(); + } else { + setTbl_name((String)value); + } + break; + + case INDEX_NAME: + if (value == null) { + unsetIndex_name(); + } else { + setIndex_name((String)value); + } + break; + + case DELETE_DATA: + if (value == null) { + unsetDeleteData(); + } else { + setDeleteData((Boolean)value); + } + break; + + } + } + + public Object getFieldValue(_Fields field) { + switch (field) { + case DB_NAME: + return getDb_name(); + + case TBL_NAME: + return getTbl_name(); + + case INDEX_NAME: + return getIndex_name(); + + case DELETE_DATA: + return isDeleteData(); + + } + throw new IllegalStateException(); + } + + /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */ + public boolean isSet(_Fields field) { + if (field == null) { + throw new IllegalArgumentException(); + } + + switch (field) { + case DB_NAME: + return isSetDb_name(); + case TBL_NAME: + return isSetTbl_name(); + case INDEX_NAME: + return isSetIndex_name(); + case DELETE_DATA: + return isSetDeleteData(); + } + throw new IllegalStateException(); + } + + @Override + public boolean equals(Object that) { + if (that == null) + return false; + if (that instanceof drop_index_by_name_args) + return this.equals((drop_index_by_name_args)that); + return false; + } + + public boolean equals(drop_index_by_name_args that) { + if (that == null) + return false; + + boolean this_present_db_name = true && this.isSetDb_name(); + boolean that_present_db_name = true && that.isSetDb_name(); + if (this_present_db_name || that_present_db_name) { + if (!(this_present_db_name && that_present_db_name)) + return false; + if (!this.db_name.equals(that.db_name)) + return false; + } + + boolean this_present_tbl_name = true && this.isSetTbl_name(); + boolean that_present_tbl_name = true && that.isSetTbl_name(); + if (this_present_tbl_name || that_present_tbl_name) { + if (!(this_present_tbl_name && that_present_tbl_name)) + return false; + if (!this.tbl_name.equals(that.tbl_name)) + return false; + } + + boolean this_present_index_name = true && this.isSetIndex_name(); + boolean that_present_index_name = true && that.isSetIndex_name(); + if (this_present_index_name || that_present_index_name) { + if (!(this_present_index_name && that_present_index_name)) + return false; + if (!this.index_name.equals(that.index_name)) + return false; + } + + boolean this_present_deleteData = true; + boolean that_present_deleteData = true; + if (this_present_deleteData || that_present_deleteData) { + if (!(this_present_deleteData && that_present_deleteData)) + return false; + if (this.deleteData != that.deleteData) + return false; + } + + return true; + } + + @Override + public int hashCode() { + List list = new ArrayList(); + + boolean present_db_name = true && (isSetDb_name()); + list.add(present_db_name); + if (present_db_name) + list.add(db_name); + + boolean present_tbl_name = true && (isSetTbl_name()); + list.add(present_tbl_name); + if (present_tbl_name) + list.add(tbl_name); + + boolean present_index_name = true && (isSetIndex_name()); + list.add(present_index_name); + if (present_index_name) + list.add(index_name); + + boolean present_deleteData = true; + list.add(present_deleteData); + if (present_deleteData) + list.add(deleteData); + + return list.hashCode(); + } + + @Override + public int compareTo(drop_index_by_name_args other) { + if (!getClass().equals(other.getClass())) { + return getClass().getName().compareTo(other.getClass().getName()); + } + + int lastComparison = 0; + + lastComparison = Boolean.valueOf(isSetDb_name()).compareTo(other.isSetDb_name()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetDb_name()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.db_name, other.db_name); + if (lastComparison != 0) { + return lastComparison; + } + } + lastComparison = Boolean.valueOf(isSetTbl_name()).compareTo(other.isSetTbl_name()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetTbl_name()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.tbl_name, other.tbl_name); + if (lastComparison != 0) { + return lastComparison; + } + } + lastComparison = Boolean.valueOf(isSetIndex_name()).compareTo(other.isSetIndex_name()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetIndex_name()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.index_name, other.index_name); + if (lastComparison != 0) { + return lastComparison; + } + } + lastComparison = Boolean.valueOf(isSetDeleteData()).compareTo(other.isSetDeleteData()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetDeleteData()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.deleteData, other.deleteData); + if (lastComparison != 0) { + return lastComparison; + } + } + return 0; + } + + public _Fields fieldForId(int fieldId) { + return _Fields.findByThriftId(fieldId); + } + + public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException { + schemes.get(iprot.getScheme()).getScheme().read(iprot, this); + } + + public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException { + schemes.get(oprot.getScheme()).getScheme().write(oprot, this); + } + + @Override + public String toString() { + StringBuilder sb = new StringBuilder("drop_index_by_name_args("); + boolean first = true; + + sb.append("db_name:"); + if (this.db_name == null) { + sb.append("null"); + } else { + sb.append(this.db_name); + } + first = false; + if (!first) sb.append(", "); + sb.append("tbl_name:"); + if (this.tbl_name == null) { + sb.append("null"); + } else { + sb.append(this.tbl_name); + } + first = false; + if (!first) sb.append(", "); + sb.append("index_name:"); + if (this.index_name == null) { + sb.append("null"); + } else { + sb.append(this.index_name); + } + first = false; + if (!first) sb.append(", "); + sb.append("deleteData:"); + sb.append(this.deleteData); + first = false; + sb.append(")"); + return sb.toString(); + } + + public void validate() throws org.apache.thrift.TException { + // check for required fields + // check for sub-struct validity + } + + private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { + try { + write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out))); + } catch (org.apache.thrift.TException te) { + throw new java.io.IOException(te); + } + } + + private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { + try { + // it doesn't seem like you should have to do this, but java serialization is wacky, and doesn't call the default constructor. + __isset_bitfield = 0; + read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); + } catch (org.apache.thrift.TException te) { + throw new java.io.IOException(te); + } + } + + private static class drop_index_by_name_argsStandardSchemeFactory implements SchemeFactory { + public drop_index_by_name_argsStandardScheme getScheme() { + return new drop_index_by_name_argsStandardScheme(); + } + } + + private static class drop_index_by_name_argsStandardScheme extends StandardScheme { + + public void read(org.apache.thrift.protocol.TProtocol iprot, drop_index_by_name_args struct) throws org.apache.thrift.TException { + org.apache.thrift.protocol.TField schemeField; + iprot.readStructBegin(); + while (true) + { + schemeField = iprot.readFieldBegin(); + if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { + break; + } + switch (schemeField.id) { + case 1: // DB_NAME + if (schemeField.type == org.apache.thrift.protocol.TType.STRING) { + struct.db_name = iprot.readString(); + struct.setDb_nameIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; + case 2: // TBL_NAME + if (schemeField.type == org.apache.thrift.protocol.TType.STRING) { + struct.tbl_name = iprot.readString(); + struct.setTbl_nameIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; + case 3: // INDEX_NAME + if (schemeField.type == org.apache.thrift.protocol.TType.STRING) { + struct.index_name = iprot.readString(); + struct.setIndex_nameIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; + case 4: // DELETE_DATA + if (schemeField.type == org.apache.thrift.protocol.TType.BOOL) { + struct.deleteData = iprot.readBool(); + struct.setDeleteDataIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; + default: + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + iprot.readFieldEnd(); + } + iprot.readStructEnd(); + struct.validate(); + } + + public void write(org.apache.thrift.protocol.TProtocol oprot, drop_index_by_name_args struct) throws org.apache.thrift.TException { + struct.validate(); + + oprot.writeStructBegin(STRUCT_DESC); + if (struct.db_name != null) { + oprot.writeFieldBegin(DB_NAME_FIELD_DESC); + oprot.writeString(struct.db_name); + oprot.writeFieldEnd(); + } + if (struct.tbl_name != null) { + oprot.writeFieldBegin(TBL_NAME_FIELD_DESC); + oprot.writeString(struct.tbl_name); + oprot.writeFieldEnd(); + } + if (struct.index_name != null) { + oprot.writeFieldBegin(INDEX_NAME_FIELD_DESC); + oprot.writeString(struct.index_name); + oprot.writeFieldEnd(); + } + oprot.writeFieldBegin(DELETE_DATA_FIELD_DESC); + oprot.writeBool(struct.deleteData); + oprot.writeFieldEnd(); + oprot.writeFieldStop(); + oprot.writeStructEnd(); + } + + } + + private static class drop_index_by_name_argsTupleSchemeFactory implements SchemeFactory { + public drop_index_by_name_argsTupleScheme getScheme() { + return new drop_index_by_name_argsTupleScheme(); + } + } + + private static class drop_index_by_name_argsTupleScheme extends TupleScheme { + + @Override + public void write(org.apache.thrift.protocol.TProtocol prot, drop_index_by_name_args struct) throws org.apache.thrift.TException { + TTupleProtocol oprot = (TTupleProtocol) prot; + BitSet optionals = new BitSet(); + if (struct.isSetDb_name()) { + optionals.set(0); + } + if (struct.isSetTbl_name()) { + optionals.set(1); + } + if (struct.isSetIndex_name()) { + optionals.set(2); + } + if (struct.isSetDeleteData()) { + optionals.set(3); + } + oprot.writeBitSet(optionals, 4); + if (struct.isSetDb_name()) { + oprot.writeString(struct.db_name); + } + if (struct.isSetTbl_name()) { + oprot.writeString(struct.tbl_name); + } + if (struct.isSetIndex_name()) { + oprot.writeString(struct.index_name); + } + if (struct.isSetDeleteData()) { + oprot.writeBool(struct.deleteData); + } + } + + @Override + public void read(org.apache.thrift.protocol.TProtocol prot, drop_index_by_name_args struct) throws org.apache.thrift.TException { + TTupleProtocol iprot = (TTupleProtocol) prot; + BitSet incoming = iprot.readBitSet(4); + if (incoming.get(0)) { + struct.db_name = iprot.readString(); + struct.setDb_nameIsSet(true); + } + if (incoming.get(1)) { + struct.tbl_name = iprot.readString(); + struct.setTbl_nameIsSet(true); + } + if (incoming.get(2)) { + struct.index_name = iprot.readString(); + struct.setIndex_nameIsSet(true); + } + if (incoming.get(3)) { + struct.deleteData = iprot.readBool(); + struct.setDeleteDataIsSet(true); + } + } + } + + } + + public static class drop_index_by_name_result implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { + private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("drop_index_by_name_result"); + + private static final org.apache.thrift.protocol.TField SUCCESS_FIELD_DESC = new org.apache.thrift.protocol.TField("success", org.apache.thrift.protocol.TType.BOOL, (short)0); + private static final org.apache.thrift.protocol.TField O1_FIELD_DESC = new org.apache.thrift.protocol.TField("o1", org.apache.thrift.protocol.TType.STRUCT, (short)1); + private static final org.apache.thrift.protocol.TField O2_FIELD_DESC = new org.apache.thrift.protocol.TField("o2", org.apache.thrift.protocol.TType.STRUCT, (short)2); + + private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); + static { + schemes.put(StandardScheme.class, new drop_index_by_name_resultStandardSchemeFactory()); + schemes.put(TupleScheme.class, new drop_index_by_name_resultTupleSchemeFactory()); + } + + private boolean success; // required + private NoSuchObjectException o1; // required + private MetaException o2; // required + + /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ + public enum _Fields implements org.apache.thrift.TFieldIdEnum { + SUCCESS((short)0, "success"), + O1((short)1, "o1"), + O2((short)2, "o2"); + + private static final Map byName = new HashMap(); + + static { + for (_Fields field : EnumSet.allOf(_Fields.class)) { + byName.put(field.getFieldName(), field); + } + } + + /** + * Find the _Fields constant that matches fieldId, or null if its not found. + */ + public static _Fields findByThriftId(int fieldId) { + switch(fieldId) { + case 0: // SUCCESS + return SUCCESS; + case 1: // O1 + return O1; + case 2: // O2 + return O2; + default: + return null; + } + } + + /** + * Find the _Fields constant that matches fieldId, throwing an exception + * if it is not found. + */ + public static _Fields findByThriftIdOrThrow(int fieldId) { + _Fields fields = findByThriftId(fieldId); + if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!"); + return fields; + } + + /** + * Find the _Fields constant that matches name, or null if its not found. + */ + public static _Fields findByName(String name) { + return byName.get(name); + } + + private final short _thriftId; + private final String _fieldName; + + _Fields(short thriftId, String fieldName) { + _thriftId = thriftId; + _fieldName = fieldName; + } + + public short getThriftFieldId() { + return _thriftId; + } + + public String getFieldName() { + return _fieldName; + } + } + + // isset id assignments + private static final int __SUCCESS_ISSET_ID = 0; + private byte __isset_bitfield = 0; + public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; + static { + Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); + tmpMap.put(_Fields.SUCCESS, new org.apache.thrift.meta_data.FieldMetaData("success", org.apache.thrift.TFieldRequirementType.DEFAULT, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.BOOL))); + tmpMap.put(_Fields.O1, new org.apache.thrift.meta_data.FieldMetaData("o1", org.apache.thrift.TFieldRequirementType.DEFAULT, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRUCT))); + tmpMap.put(_Fields.O2, new org.apache.thrift.meta_data.FieldMetaData("o2", org.apache.thrift.TFieldRequirementType.DEFAULT, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRUCT))); + metaDataMap = Collections.unmodifiableMap(tmpMap); + org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(drop_index_by_name_result.class, metaDataMap); + } + + public drop_index_by_name_result() { + } + + public drop_index_by_name_result( + boolean success, + NoSuchObjectException o1, + MetaException o2) + { + this(); + this.success = success; + setSuccessIsSet(true); + this.o1 = o1; + this.o2 = o2; + } + + /** + * Performs a deep copy on other. + */ + public drop_index_by_name_result(drop_index_by_name_result other) { + __isset_bitfield = other.__isset_bitfield; + this.success = other.success; + if (other.isSetO1()) { + this.o1 = new NoSuchObjectException(other.o1); + } + if (other.isSetO2()) { + this.o2 = new MetaException(other.o2); + } + } + + public drop_index_by_name_result deepCopy() { + return new drop_index_by_name_result(this); + } + + @Override + public void clear() { + setSuccessIsSet(false); + this.success = false; + this.o1 = null; + this.o2 = null; + } + + public boolean isSuccess() { + return this.success; + } + + public void setSuccess(boolean success) { + this.success = success; + setSuccessIsSet(true); + } + + public void unsetSuccess() { + __isset_bitfield = EncodingUtils.clearBit(__isset_bitfield, __SUCCESS_ISSET_ID); + } + + /** Returns true if field success is set (has been assigned a value) and false otherwise */ + public boolean isSetSuccess() { + return EncodingUtils.testBit(__isset_bitfield, __SUCCESS_ISSET_ID); + } + + public void setSuccessIsSet(boolean value) { + __isset_bitfield = EncodingUtils.setBit(__isset_bitfield, __SUCCESS_ISSET_ID, value); + } + + public NoSuchObjectException getO1() { + return this.o1; + } + + public void setO1(NoSuchObjectException o1) { + this.o1 = o1; + } + + public void unsetO1() { + this.o1 = null; + } + + /** Returns true if field o1 is set (has been assigned a value) and false otherwise */ + public boolean isSetO1() { + return this.o1 != null; + } + + public void setO1IsSet(boolean value) { + if (!value) { + this.o1 = null; + } + } + + public MetaException getO2() { + return this.o2; + } + + public void setO2(MetaException o2) { + this.o2 = o2; + } + + public void unsetO2() { + this.o2 = null; + } + + /** Returns true if field o2 is set (has been assigned a value) and false otherwise */ + public boolean isSetO2() { + return this.o2 != null; + } + + public void setO2IsSet(boolean value) { + if (!value) { + this.o2 = null; + } + } + + public void setFieldValue(_Fields field, Object value) { + switch (field) { + case SUCCESS: + if (value == null) { + unsetSuccess(); + } else { + setSuccess((Boolean)value); + } + break; + + case O1: + if (value == null) { + unsetO1(); + } else { + setO1((NoSuchObjectException)value); + } + break; + + case O2: + if (value == null) { + unsetO2(); + } else { + setO2((MetaException)value); + } + break; + + } + } + + public Object getFieldValue(_Fields field) { + switch (field) { + case SUCCESS: + return isSuccess(); + + case O1: + return getO1(); + + case O2: + return getO2(); + + } + throw new IllegalStateException(); + } + + /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */ + public boolean isSet(_Fields field) { + if (field == null) { + throw new IllegalArgumentException(); + } + + switch (field) { + case SUCCESS: + return isSetSuccess(); + case O1: + return isSetO1(); + case O2: + return isSetO2(); + } + throw new IllegalStateException(); + } + + @Override + public boolean equals(Object that) { + if (that == null) + return false; + if (that instanceof drop_index_by_name_result) + return this.equals((drop_index_by_name_result)that); + return false; + } + + public boolean equals(drop_index_by_name_result that) { + if (that == null) + return false; + + boolean this_present_success = true; + boolean that_present_success = true; + if (this_present_success || that_present_success) { + if (!(this_present_success && that_present_success)) + return false; + if (this.success != that.success) + return false; + } + + boolean this_present_o1 = true && this.isSetO1(); + boolean that_present_o1 = true && that.isSetO1(); + if (this_present_o1 || that_present_o1) { + if (!(this_present_o1 && that_present_o1)) + return false; + if (!this.o1.equals(that.o1)) + return false; + } + + boolean this_present_o2 = true && this.isSetO2(); + boolean that_present_o2 = true && that.isSetO2(); + if (this_present_o2 || that_present_o2) { + if (!(this_present_o2 && that_present_o2)) + return false; + if (!this.o2.equals(that.o2)) + return false; + } + + return true; + } + + @Override + public int hashCode() { + List list = new ArrayList(); + + boolean present_success = true; + list.add(present_success); + if (present_success) + list.add(success); + + boolean present_o1 = true && (isSetO1()); + list.add(present_o1); + if (present_o1) + list.add(o1); + + boolean present_o2 = true && (isSetO2()); + list.add(present_o2); + if (present_o2) + list.add(o2); + + return list.hashCode(); + } + + @Override + public int compareTo(drop_index_by_name_result other) { + if (!getClass().equals(other.getClass())) { + return getClass().getName().compareTo(other.getClass().getName()); + } + + int lastComparison = 0; + + lastComparison = Boolean.valueOf(isSetSuccess()).compareTo(other.isSetSuccess()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetSuccess()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.success, other.success); + if (lastComparison != 0) { + return lastComparison; + } + } + lastComparison = Boolean.valueOf(isSetO1()).compareTo(other.isSetO1()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetO1()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.o1, other.o1); + if (lastComparison != 0) { + return lastComparison; + } + } + lastComparison = Boolean.valueOf(isSetO2()).compareTo(other.isSetO2()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetO2()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.o2, other.o2); + if (lastComparison != 0) { + return lastComparison; + } + } + return 0; + } + + public _Fields fieldForId(int fieldId) { + return _Fields.findByThriftId(fieldId); + } + + public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException { + schemes.get(iprot.getScheme()).getScheme().read(iprot, this); + } + + public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException { + schemes.get(oprot.getScheme()).getScheme().write(oprot, this); + } + + @Override + public String toString() { + StringBuilder sb = new StringBuilder("drop_index_by_name_result("); + boolean first = true; + + sb.append("success:"); + sb.append(this.success); + first = false; + if (!first) sb.append(", "); + sb.append("o1:"); + if (this.o1 == null) { + sb.append("null"); + } else { + sb.append(this.o1); + } + first = false; + if (!first) sb.append(", "); + sb.append("o2:"); + if (this.o2 == null) { + sb.append("null"); + } else { + sb.append(this.o2); + } + first = false; + sb.append(")"); + return sb.toString(); + } + + public void validate() throws org.apache.thrift.TException { + // check for required fields + // check for sub-struct validity + } + + private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { + try { + write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out))); + } catch (org.apache.thrift.TException te) { + throw new java.io.IOException(te); + } + } + + private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { + try { + // it doesn't seem like you should have to do this, but java serialization is wacky, and doesn't call the default constructor. + __isset_bitfield = 0; + read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); + } catch (org.apache.thrift.TException te) { + throw new java.io.IOException(te); + } + } + + private static class drop_index_by_name_resultStandardSchemeFactory implements SchemeFactory { + public drop_index_by_name_resultStandardScheme getScheme() { + return new drop_index_by_name_resultStandardScheme(); + } + } + + private static class drop_index_by_name_resultStandardScheme extends StandardScheme { + + public void read(org.apache.thrift.protocol.TProtocol iprot, drop_index_by_name_result struct) throws org.apache.thrift.TException { + org.apache.thrift.protocol.TField schemeField; + iprot.readStructBegin(); + while (true) + { + schemeField = iprot.readFieldBegin(); + if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { + break; + } + switch (schemeField.id) { + case 0: // SUCCESS + if (schemeField.type == org.apache.thrift.protocol.TType.BOOL) { + struct.success = iprot.readBool(); + struct.setSuccessIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; + case 1: // O1 + if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { + struct.o1 = new NoSuchObjectException(); + struct.o1.read(iprot); + struct.setO1IsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; + case 2: // O2 + if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { + struct.o2 = new MetaException(); + struct.o2.read(iprot); + struct.setO2IsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; + default: + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + iprot.readFieldEnd(); + } + iprot.readStructEnd(); + struct.validate(); + } + + public void write(org.apache.thrift.protocol.TProtocol oprot, drop_index_by_name_result struct) throws org.apache.thrift.TException { + struct.validate(); + + oprot.writeStructBegin(STRUCT_DESC); + if (struct.isSetSuccess()) { + oprot.writeFieldBegin(SUCCESS_FIELD_DESC); + oprot.writeBool(struct.success); + oprot.writeFieldEnd(); + } + if (struct.o1 != null) { + oprot.writeFieldBegin(O1_FIELD_DESC); + struct.o1.write(oprot); + oprot.writeFieldEnd(); + } + if (struct.o2 != null) { + oprot.writeFieldBegin(O2_FIELD_DESC); + struct.o2.write(oprot); + oprot.writeFieldEnd(); + } + oprot.writeFieldStop(); + oprot.writeStructEnd(); + } + + } + + private static class drop_index_by_name_resultTupleSchemeFactory implements SchemeFactory { + public drop_index_by_name_resultTupleScheme getScheme() { + return new drop_index_by_name_resultTupleScheme(); + } + } + + private static class drop_index_by_name_resultTupleScheme extends TupleScheme { + + @Override + public void write(org.apache.thrift.protocol.TProtocol prot, drop_index_by_name_result struct) throws org.apache.thrift.TException { + TTupleProtocol oprot = (TTupleProtocol) prot; + BitSet optionals = new BitSet(); + if (struct.isSetSuccess()) { + optionals.set(0); + } + if (struct.isSetO1()) { + optionals.set(1); + } + if (struct.isSetO2()) { + optionals.set(2); + } + oprot.writeBitSet(optionals, 3); + if (struct.isSetSuccess()) { + oprot.writeBool(struct.success); + } + if (struct.isSetO1()) { + struct.o1.write(oprot); + } + if (struct.isSetO2()) { + struct.o2.write(oprot); + } + } + + @Override + public void read(org.apache.thrift.protocol.TProtocol prot, drop_index_by_name_result struct) throws org.apache.thrift.TException { + TTupleProtocol iprot = (TTupleProtocol) prot; + BitSet incoming = iprot.readBitSet(3); + if (incoming.get(0)) { + struct.success = iprot.readBool(); + struct.setSuccessIsSet(true); + } + if (incoming.get(1)) { + struct.o1 = new NoSuchObjectException(); + struct.o1.read(iprot); + struct.setO1IsSet(true); + } + if (incoming.get(2)) { + struct.o2 = new MetaException(); + struct.o2.read(iprot); + struct.setO2IsSet(true); + } + } + } + + } + + public static class get_index_by_name_args implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { + private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("get_index_by_name_args"); + + private static final org.apache.thrift.protocol.TField DB_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("db_name", org.apache.thrift.protocol.TType.STRING, (short)1); + private static final org.apache.thrift.protocol.TField TBL_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("tbl_name", org.apache.thrift.protocol.TType.STRING, (short)2); + private static final org.apache.thrift.protocol.TField INDEX_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("index_name", org.apache.thrift.protocol.TType.STRING, (short)3); + + private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); + static { + schemes.put(StandardScheme.class, new get_index_by_name_argsStandardSchemeFactory()); + schemes.put(TupleScheme.class, new get_index_by_name_argsTupleSchemeFactory()); + } + + private String db_name; // required + private String tbl_name; // required + private String index_name; // required + + /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ + public enum _Fields implements org.apache.thrift.TFieldIdEnum { + DB_NAME((short)1, "db_name"), + TBL_NAME((short)2, "tbl_name"), + INDEX_NAME((short)3, "index_name"); + + private static final Map byName = new HashMap(); + + static { + for (_Fields field : EnumSet.allOf(_Fields.class)) { + byName.put(field.getFieldName(), field); + } + } + + /** + * Find the _Fields constant that matches fieldId, or null if its not found. + */ + public static _Fields findByThriftId(int fieldId) { + switch(fieldId) { + case 1: // DB_NAME + return DB_NAME; + case 2: // TBL_NAME + return TBL_NAME; + case 3: // INDEX_NAME + return INDEX_NAME; + default: + return null; + } + } + + /** + * Find the _Fields constant that matches fieldId, throwing an exception + * if it is not found. + */ + public static _Fields findByThriftIdOrThrow(int fieldId) { + _Fields fields = findByThriftId(fieldId); + if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!"); + return fields; + } + + /** + * Find the _Fields constant that matches name, or null if its not found. + */ + public static _Fields findByName(String name) { + return byName.get(name); + } + + private final short _thriftId; + private final String _fieldName; + + _Fields(short thriftId, String fieldName) { + _thriftId = thriftId; + _fieldName = fieldName; + } + + public short getThriftFieldId() { + return _thriftId; + } + + public String getFieldName() { + return _fieldName; + } + } + + // isset id assignments + public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; + static { + Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); + tmpMap.put(_Fields.DB_NAME, new org.apache.thrift.meta_data.FieldMetaData("db_name", org.apache.thrift.TFieldRequirementType.DEFAULT, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); + tmpMap.put(_Fields.TBL_NAME, new org.apache.thrift.meta_data.FieldMetaData("tbl_name", org.apache.thrift.TFieldRequirementType.DEFAULT, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); + tmpMap.put(_Fields.INDEX_NAME, new org.apache.thrift.meta_data.FieldMetaData("index_name", org.apache.thrift.TFieldRequirementType.DEFAULT, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); + metaDataMap = Collections.unmodifiableMap(tmpMap); + org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(get_index_by_name_args.class, metaDataMap); + } + + public get_index_by_name_args() { + } + + public get_index_by_name_args( + String db_name, + String tbl_name, + String index_name) + { + this(); + this.db_name = db_name; + this.tbl_name = tbl_name; + this.index_name = index_name; + } + + /** + * Performs a deep copy on other. + */ + public get_index_by_name_args(get_index_by_name_args other) { + if (other.isSetDb_name()) { + this.db_name = other.db_name; + } + if (other.isSetTbl_name()) { + this.tbl_name = other.tbl_name; + } + if (other.isSetIndex_name()) { + this.index_name = other.index_name; + } + } + + public get_index_by_name_args deepCopy() { + return new get_index_by_name_args(this); + } + + @Override + public void clear() { + this.db_name = null; + this.tbl_name = null; + this.index_name = null; + } + + public String getDb_name() { + return this.db_name; + } + + public void setDb_name(String db_name) { + this.db_name = db_name; + } + + public void unsetDb_name() { + this.db_name = null; + } + + /** Returns true if field db_name is set (has been assigned a value) and false otherwise */ + public boolean isSetDb_name() { + return this.db_name != null; + } + + public void setDb_nameIsSet(boolean value) { + if (!value) { + this.db_name = null; + } + } + + public String getTbl_name() { + return this.tbl_name; + } + + public void setTbl_name(String tbl_name) { + this.tbl_name = tbl_name; + } + + public void unsetTbl_name() { + this.tbl_name = null; + } + + /** Returns true if field tbl_name is set (has been assigned a value) and false otherwise */ + public boolean isSetTbl_name() { + return this.tbl_name != null; + } + + public void setTbl_nameIsSet(boolean value) { + if (!value) { + this.tbl_name = null; + } + } + + public String getIndex_name() { + return this.index_name; + } + + public void setIndex_name(String index_name) { + this.index_name = index_name; + } + + public void unsetIndex_name() { + this.index_name = null; + } + + /** Returns true if field index_name is set (has been assigned a value) and false otherwise */ + public boolean isSetIndex_name() { + return this.index_name != null; + } + + public void setIndex_nameIsSet(boolean value) { + if (!value) { + this.index_name = null; + } + } + + public void setFieldValue(_Fields field, Object value) { + switch (field) { + case DB_NAME: + if (value == null) { + unsetDb_name(); + } else { + setDb_name((String)value); + } + break; + + case TBL_NAME: + if (value == null) { + unsetTbl_name(); + } else { + setTbl_name((String)value); + } + break; + + case INDEX_NAME: + if (value == null) { + unsetIndex_name(); + } else { + setIndex_name((String)value); + } + break; + + } + } + + public Object getFieldValue(_Fields field) { + switch (field) { + case DB_NAME: + return getDb_name(); + + case TBL_NAME: + return getTbl_name(); + + case INDEX_NAME: + return getIndex_name(); + + } + throw new IllegalStateException(); + } + + /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */ + public boolean isSet(_Fields field) { + if (field == null) { + throw new IllegalArgumentException(); + } + + switch (field) { + case DB_NAME: + return isSetDb_name(); + case TBL_NAME: + return isSetTbl_name(); + case INDEX_NAME: + return isSetIndex_name(); + } + throw new IllegalStateException(); + } + + @Override + public boolean equals(Object that) { + if (that == null) + return false; + if (that instanceof get_index_by_name_args) + return this.equals((get_index_by_name_args)that); + return false; + } + + public boolean equals(get_index_by_name_args that) { + if (that == null) + return false; + + boolean this_present_db_name = true && this.isSetDb_name(); + boolean that_present_db_name = true && that.isSetDb_name(); + if (this_present_db_name || that_present_db_name) { + if (!(this_present_db_name && that_present_db_name)) + return false; + if (!this.db_name.equals(that.db_name)) + return false; + } + + boolean this_present_tbl_name = true && this.isSetTbl_name(); + boolean that_present_tbl_name = true && that.isSetTbl_name(); + if (this_present_tbl_name || that_present_tbl_name) { + if (!(this_present_tbl_name && that_present_tbl_name)) + return false; + if (!this.tbl_name.equals(that.tbl_name)) + return false; + } + + boolean this_present_index_name = true && this.isSetIndex_name(); + boolean that_present_index_name = true && that.isSetIndex_name(); + if (this_present_index_name || that_present_index_name) { + if (!(this_present_index_name && that_present_index_name)) + return false; + if (!this.index_name.equals(that.index_name)) + return false; + } + + return true; + } + + @Override + public int hashCode() { + List list = new ArrayList(); + + boolean present_db_name = true && (isSetDb_name()); + list.add(present_db_name); + if (present_db_name) + list.add(db_name); + + boolean present_tbl_name = true && (isSetTbl_name()); + list.add(present_tbl_name); + if (present_tbl_name) + list.add(tbl_name); + + boolean present_index_name = true && (isSetIndex_name()); + list.add(present_index_name); + if (present_index_name) + list.add(index_name); + + return list.hashCode(); + } + + @Override + public int compareTo(get_index_by_name_args other) { + if (!getClass().equals(other.getClass())) { + return getClass().getName().compareTo(other.getClass().getName()); + } + + int lastComparison = 0; + + lastComparison = Boolean.valueOf(isSetDb_name()).compareTo(other.isSetDb_name()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetDb_name()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.db_name, other.db_name); + if (lastComparison != 0) { + return lastComparison; + } + } + lastComparison = Boolean.valueOf(isSetTbl_name()).compareTo(other.isSetTbl_name()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetTbl_name()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.tbl_name, other.tbl_name); + if (lastComparison != 0) { + return lastComparison; + } + } + lastComparison = Boolean.valueOf(isSetIndex_name()).compareTo(other.isSetIndex_name()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetIndex_name()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.index_name, other.index_name); + if (lastComparison != 0) { + return lastComparison; + } + } + return 0; + } + + public _Fields fieldForId(int fieldId) { + return _Fields.findByThriftId(fieldId); + } + + public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException { + schemes.get(iprot.getScheme()).getScheme().read(iprot, this); + } + + public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException { + schemes.get(oprot.getScheme()).getScheme().write(oprot, this); + } + + @Override + public String toString() { + StringBuilder sb = new StringBuilder("get_index_by_name_args("); + boolean first = true; + + sb.append("db_name:"); + if (this.db_name == null) { + sb.append("null"); + } else { + sb.append(this.db_name); + } + first = false; + if (!first) sb.append(", "); + sb.append("tbl_name:"); + if (this.tbl_name == null) { + sb.append("null"); + } else { + sb.append(this.tbl_name); + } + first = false; + if (!first) sb.append(", "); + sb.append("index_name:"); + if (this.index_name == null) { + sb.append("null"); + } else { + sb.append(this.index_name); + } + first = false; + sb.append(")"); + return sb.toString(); + } + + public void validate() throws org.apache.thrift.TException { + // check for required fields + // check for sub-struct validity + } + + private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { + try { + write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out))); + } catch (org.apache.thrift.TException te) { + throw new java.io.IOException(te); + } + } + + private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { + try { + read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); + } catch (org.apache.thrift.TException te) { + throw new java.io.IOException(te); + } + } + + private static class get_index_by_name_argsStandardSchemeFactory implements SchemeFactory { + public get_index_by_name_argsStandardScheme getScheme() { + return new get_index_by_name_argsStandardScheme(); + } + } + + private static class get_index_by_name_argsStandardScheme extends StandardScheme { + + public void read(org.apache.thrift.protocol.TProtocol iprot, get_index_by_name_args struct) throws org.apache.thrift.TException { + org.apache.thrift.protocol.TField schemeField; + iprot.readStructBegin(); + while (true) + { + schemeField = iprot.readFieldBegin(); + if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { + break; + } + switch (schemeField.id) { + case 1: // DB_NAME + if (schemeField.type == org.apache.thrift.protocol.TType.STRING) { + struct.db_name = iprot.readString(); + struct.setDb_nameIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; + case 2: // TBL_NAME + if (schemeField.type == org.apache.thrift.protocol.TType.STRING) { + struct.tbl_name = iprot.readString(); + struct.setTbl_nameIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; + case 3: // INDEX_NAME + if (schemeField.type == org.apache.thrift.protocol.TType.STRING) { + struct.index_name = iprot.readString(); + struct.setIndex_nameIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; + default: + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + iprot.readFieldEnd(); + } + iprot.readStructEnd(); + struct.validate(); + } + + public void write(org.apache.thrift.protocol.TProtocol oprot, get_index_by_name_args struct) throws org.apache.thrift.TException { + struct.validate(); + + oprot.writeStructBegin(STRUCT_DESC); + if (struct.db_name != null) { + oprot.writeFieldBegin(DB_NAME_FIELD_DESC); + oprot.writeString(struct.db_name); + oprot.writeFieldEnd(); + } + if (struct.tbl_name != null) { + oprot.writeFieldBegin(TBL_NAME_FIELD_DESC); + oprot.writeString(struct.tbl_name); + oprot.writeFieldEnd(); + } + if (struct.index_name != null) { + oprot.writeFieldBegin(INDEX_NAME_FIELD_DESC); + oprot.writeString(struct.index_name); + oprot.writeFieldEnd(); + } + oprot.writeFieldStop(); + oprot.writeStructEnd(); + } + + } + + private static class get_index_by_name_argsTupleSchemeFactory implements SchemeFactory { + public get_index_by_name_argsTupleScheme getScheme() { + return new get_index_by_name_argsTupleScheme(); + } + } + + private static class get_index_by_name_argsTupleScheme extends TupleScheme { + + @Override + public void write(org.apache.thrift.protocol.TProtocol prot, get_index_by_name_args struct) throws org.apache.thrift.TException { + TTupleProtocol oprot = (TTupleProtocol) prot; + BitSet optionals = new BitSet(); + if (struct.isSetDb_name()) { + optionals.set(0); + } + if (struct.isSetTbl_name()) { + optionals.set(1); + } + if (struct.isSetIndex_name()) { + optionals.set(2); + } + oprot.writeBitSet(optionals, 3); + if (struct.isSetDb_name()) { + oprot.writeString(struct.db_name); + } + if (struct.isSetTbl_name()) { + oprot.writeString(struct.tbl_name); + } + if (struct.isSetIndex_name()) { + oprot.writeString(struct.index_name); + } + } + + @Override + public void read(org.apache.thrift.protocol.TProtocol prot, get_index_by_name_args struct) throws org.apache.thrift.TException { + TTupleProtocol iprot = (TTupleProtocol) prot; + BitSet incoming = iprot.readBitSet(3); + if (incoming.get(0)) { + struct.db_name = iprot.readString(); + struct.setDb_nameIsSet(true); + } + if (incoming.get(1)) { + struct.tbl_name = iprot.readString(); + struct.setTbl_nameIsSet(true); + } + if (incoming.get(2)) { + struct.index_name = iprot.readString(); + struct.setIndex_nameIsSet(true); + } + } + } + + } + + public static class get_index_by_name_result implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { + private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("get_index_by_name_result"); + + private static final org.apache.thrift.protocol.TField SUCCESS_FIELD_DESC = new org.apache.thrift.protocol.TField("success", org.apache.thrift.protocol.TType.STRUCT, (short)0); + private static final org.apache.thrift.protocol.TField O1_FIELD_DESC = new org.apache.thrift.protocol.TField("o1", org.apache.thrift.protocol.TType.STRUCT, (short)1); + private static final org.apache.thrift.protocol.TField O2_FIELD_DESC = new org.apache.thrift.protocol.TField("o2", org.apache.thrift.protocol.TType.STRUCT, (short)2); + + private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); + static { + schemes.put(StandardScheme.class, new get_index_by_name_resultStandardSchemeFactory()); + schemes.put(TupleScheme.class, new get_index_by_name_resultTupleSchemeFactory()); + } + + private Index success; // required + private MetaException o1; // required + private NoSuchObjectException o2; // required + + /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ + public enum _Fields implements org.apache.thrift.TFieldIdEnum { + SUCCESS((short)0, "success"), + O1((short)1, "o1"), + O2((short)2, "o2"); + + private static final Map byName = new HashMap(); + + static { + for (_Fields field : EnumSet.allOf(_Fields.class)) { + byName.put(field.getFieldName(), field); + } + } + + /** + * Find the _Fields constant that matches fieldId, or null if its not found. + */ + public static _Fields findByThriftId(int fieldId) { + switch(fieldId) { + case 0: // SUCCESS + return SUCCESS; + case 1: // O1 + return O1; + case 2: // O2 + return O2; + default: + return null; + } + } + + /** + * Find the _Fields constant that matches fieldId, throwing an exception + * if it is not found. + */ + public static _Fields findByThriftIdOrThrow(int fieldId) { + _Fields fields = findByThriftId(fieldId); + if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!"); + return fields; + } + + /** + * Find the _Fields constant that matches name, or null if its not found. + */ + public static _Fields findByName(String name) { + return byName.get(name); + } + + private final short _thriftId; + private final String _fieldName; + + _Fields(short thriftId, String fieldName) { + _thriftId = thriftId; + _fieldName = fieldName; + } + + public short getThriftFieldId() { + return _thriftId; + } + + public String getFieldName() { + return _fieldName; + } + } + + // isset id assignments + public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; + static { + Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); + tmpMap.put(_Fields.SUCCESS, new org.apache.thrift.meta_data.FieldMetaData("success", org.apache.thrift.TFieldRequirementType.DEFAULT, + new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, Index.class))); + tmpMap.put(_Fields.O1, new org.apache.thrift.meta_data.FieldMetaData("o1", org.apache.thrift.TFieldRequirementType.DEFAULT, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRUCT))); + tmpMap.put(_Fields.O2, new org.apache.thrift.meta_data.FieldMetaData("o2", org.apache.thrift.TFieldRequirementType.DEFAULT, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRUCT))); + metaDataMap = Collections.unmodifiableMap(tmpMap); + org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(get_index_by_name_result.class, metaDataMap); + } + + public get_index_by_name_result() { + } + + public get_index_by_name_result( + Index success, + MetaException o1, + NoSuchObjectException o2) + { + this(); + this.success = success; + this.o1 = o1; + this.o2 = o2; + } + + /** + * Performs a deep copy on other. + */ + public get_index_by_name_result(get_index_by_name_result other) { + if (other.isSetSuccess()) { + this.success = new Index(other.success); + } + if (other.isSetO1()) { + this.o1 = new MetaException(other.o1); + } + if (other.isSetO2()) { + this.o2 = new NoSuchObjectException(other.o2); + } + } + + public get_index_by_name_result deepCopy() { + return new get_index_by_name_result(this); + } + + @Override + public void clear() { + this.success = null; + this.o1 = null; + this.o2 = null; + } + + public Index getSuccess() { + return this.success; + } + + public void setSuccess(Index success) { + this.success = success; + } + + public void unsetSuccess() { + this.success = null; + } + + /** Returns true if field success is set (has been assigned a value) and false otherwise */ + public boolean isSetSuccess() { + return this.success != null; + } + + public void setSuccessIsSet(boolean value) { + if (!value) { + this.success = null; + } + } + + public MetaException getO1() { + return this.o1; + } + + public void setO1(MetaException o1) { + this.o1 = o1; + } + + public void unsetO1() { + this.o1 = null; + } + + /** Returns true if field o1 is set (has been assigned a value) and false otherwise */ + public boolean isSetO1() { + return this.o1 != null; + } + + public void setO1IsSet(boolean value) { + if (!value) { + this.o1 = null; + } + } + + public NoSuchObjectException getO2() { + return this.o2; + } + + public void setO2(NoSuchObjectException o2) { + this.o2 = o2; + } + + public void unsetO2() { + this.o2 = null; + } + + /** Returns true if field o2 is set (has been assigned a value) and false otherwise */ + public boolean isSetO2() { + return this.o2 != null; + } + + public void setO2IsSet(boolean value) { + if (!value) { + this.o2 = null; + } + } + + public void setFieldValue(_Fields field, Object value) { + switch (field) { + case SUCCESS: + if (value == null) { + unsetSuccess(); + } else { + setSuccess((Index)value); + } + break; + + case O1: + if (value == null) { + unsetO1(); + } else { + setO1((MetaException)value); + } + break; + + case O2: + if (value == null) { + unsetO2(); + } else { + setO2((NoSuchObjectException)value); + } + break; + + } + } + + public Object getFieldValue(_Fields field) { + switch (field) { + case SUCCESS: + return getSuccess(); + + case O1: + return getO1(); + + case O2: + return getO2(); + + } + throw new IllegalStateException(); + } + + /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */ + public boolean isSet(_Fields field) { + if (field == null) { + throw new IllegalArgumentException(); + } + + switch (field) { + case SUCCESS: + return isSetSuccess(); + case O1: + return isSetO1(); + case O2: + return isSetO2(); + } + throw new IllegalStateException(); + } + + @Override + public boolean equals(Object that) { + if (that == null) + return false; + if (that instanceof get_index_by_name_result) + return this.equals((get_index_by_name_result)that); + return false; + } + + public boolean equals(get_index_by_name_result that) { + if (that == null) + return false; + + boolean this_present_success = true && this.isSetSuccess(); + boolean that_present_success = true && that.isSetSuccess(); + if (this_present_success || that_present_success) { + if (!(this_present_success && that_present_success)) + return false; + if (!this.success.equals(that.success)) + return false; + } + + boolean this_present_o1 = true && this.isSetO1(); + boolean that_present_o1 = true && that.isSetO1(); + if (this_present_o1 || that_present_o1) { + if (!(this_present_o1 && that_present_o1)) + return false; + if (!this.o1.equals(that.o1)) + return false; + } + + boolean this_present_o2 = true && this.isSetO2(); + boolean that_present_o2 = true && that.isSetO2(); + if (this_present_o2 || that_present_o2) { + if (!(this_present_o2 && that_present_o2)) + return false; + if (!this.o2.equals(that.o2)) + return false; + } + + return true; + } + + @Override + public int hashCode() { + List list = new ArrayList(); + + boolean present_success = true && (isSetSuccess()); + list.add(present_success); + if (present_success) + list.add(success); + + boolean present_o1 = true && (isSetO1()); + list.add(present_o1); + if (present_o1) + list.add(o1); + + boolean present_o2 = true && (isSetO2()); + list.add(present_o2); + if (present_o2) + list.add(o2); + + return list.hashCode(); + } + + @Override + public int compareTo(get_index_by_name_result other) { + if (!getClass().equals(other.getClass())) { + return getClass().getName().compareTo(other.getClass().getName()); + } + + int lastComparison = 0; + + lastComparison = Boolean.valueOf(isSetSuccess()).compareTo(other.isSetSuccess()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetSuccess()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.success, other.success); + if (lastComparison != 0) { + return lastComparison; + } + } + lastComparison = Boolean.valueOf(isSetO1()).compareTo(other.isSetO1()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetO1()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.o1, other.o1); + if (lastComparison != 0) { + return lastComparison; + } + } + lastComparison = Boolean.valueOf(isSetO2()).compareTo(other.isSetO2()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetO2()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.o2, other.o2); + if (lastComparison != 0) { + return lastComparison; + } + } + return 0; + } + + public _Fields fieldForId(int fieldId) { + return _Fields.findByThriftId(fieldId); + } + + public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException { + schemes.get(iprot.getScheme()).getScheme().read(iprot, this); + } + + public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException { + schemes.get(oprot.getScheme()).getScheme().write(oprot, this); + } + + @Override + public String toString() { + StringBuilder sb = new StringBuilder("get_index_by_name_result("); + boolean first = true; + + sb.append("success:"); + if (this.success == null) { + sb.append("null"); + } else { + sb.append(this.success); + } + first = false; + if (!first) sb.append(", "); + sb.append("o1:"); + if (this.o1 == null) { + sb.append("null"); + } else { + sb.append(this.o1); + } + first = false; + if (!first) sb.append(", "); + sb.append("o2:"); + if (this.o2 == null) { + sb.append("null"); + } else { + sb.append(this.o2); + } + first = false; + sb.append(")"); + return sb.toString(); + } + + public void validate() throws org.apache.thrift.TException { + // check for required fields + // check for sub-struct validity + if (success != null) { + success.validate(); + } + } + + private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { + try { + write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out))); + } catch (org.apache.thrift.TException te) { + throw new java.io.IOException(te); + } + } + + private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { + try { + read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); + } catch (org.apache.thrift.TException te) { + throw new java.io.IOException(te); + } + } + + private static class get_index_by_name_resultStandardSchemeFactory implements SchemeFactory { + public get_index_by_name_resultStandardScheme getScheme() { + return new get_index_by_name_resultStandardScheme(); } } - private static class alter_index_resultStandardScheme extends StandardScheme { + private static class get_index_by_name_resultStandardScheme extends StandardScheme { - public void read(org.apache.thrift.protocol.TProtocol iprot, alter_index_result struct) throws org.apache.thrift.TException { + public void read(org.apache.thrift.protocol.TProtocol iprot, get_index_by_name_result struct) throws org.apache.thrift.TException { org.apache.thrift.protocol.TField schemeField; iprot.readStructBegin(); while (true) @@ -106625,9 +109305,18 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, alter_index_result break; } switch (schemeField.id) { + case 0: // SUCCESS + if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { + struct.success = new Index(); + struct.success.read(iprot); + struct.setSuccessIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; case 1: // O1 if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { - struct.o1 = new InvalidOperationException(); + struct.o1 = new MetaException(); struct.o1.read(iprot); struct.setO1IsSet(true); } else { @@ -106636,7 +109325,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, alter_index_result break; case 2: // O2 if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { - struct.o2 = new MetaException(); + struct.o2 = new NoSuchObjectException(); struct.o2.read(iprot); struct.setO2IsSet(true); } else { @@ -106652,10 +109341,15 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, alter_index_result struct.validate(); } - public void write(org.apache.thrift.protocol.TProtocol oprot, alter_index_result struct) throws org.apache.thrift.TException { + public void write(org.apache.thrift.protocol.TProtocol oprot, get_index_by_name_result struct) throws org.apache.thrift.TException { struct.validate(); oprot.writeStructBegin(STRUCT_DESC); + if (struct.success != null) { + oprot.writeFieldBegin(SUCCESS_FIELD_DESC); + struct.success.write(oprot); + oprot.writeFieldEnd(); + } if (struct.o1 != null) { oprot.writeFieldBegin(O1_FIELD_DESC); struct.o1.write(oprot); @@ -106672,25 +109366,31 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, alter_index_result } - private static class alter_index_resultTupleSchemeFactory implements SchemeFactory { - public alter_index_resultTupleScheme getScheme() { - return new alter_index_resultTupleScheme(); + private static class get_index_by_name_resultTupleSchemeFactory implements SchemeFactory { + public get_index_by_name_resultTupleScheme getScheme() { + return new get_index_by_name_resultTupleScheme(); } } - private static class alter_index_resultTupleScheme extends TupleScheme { + private static class get_index_by_name_resultTupleScheme extends TupleScheme { @Override - public void write(org.apache.thrift.protocol.TProtocol prot, alter_index_result struct) throws org.apache.thrift.TException { + public void write(org.apache.thrift.protocol.TProtocol prot, get_index_by_name_result struct) throws org.apache.thrift.TException { TTupleProtocol oprot = (TTupleProtocol) prot; BitSet optionals = new BitSet(); - if (struct.isSetO1()) { + if (struct.isSetSuccess()) { optionals.set(0); } - if (struct.isSetO2()) { + if (struct.isSetO1()) { optionals.set(1); } - oprot.writeBitSet(optionals, 2); + if (struct.isSetO2()) { + optionals.set(2); + } + oprot.writeBitSet(optionals, 3); + if (struct.isSetSuccess()) { + struct.success.write(oprot); + } if (struct.isSetO1()) { struct.o1.write(oprot); } @@ -106700,16 +109400,21 @@ public void write(org.apache.thrift.protocol.TProtocol prot, alter_index_result } @Override - public void read(org.apache.thrift.protocol.TProtocol prot, alter_index_result struct) throws org.apache.thrift.TException { + public void read(org.apache.thrift.protocol.TProtocol prot, get_index_by_name_result struct) throws org.apache.thrift.TException { TTupleProtocol iprot = (TTupleProtocol) prot; - BitSet incoming = iprot.readBitSet(2); + BitSet incoming = iprot.readBitSet(3); if (incoming.get(0)) { - struct.o1 = new InvalidOperationException(); + struct.success = new Index(); + struct.success.read(iprot); + struct.setSuccessIsSet(true); + } + if (incoming.get(1)) { + struct.o1 = new MetaException(); struct.o1.read(iprot); struct.setO1IsSet(true); } - if (incoming.get(1)) { - struct.o2 = new MetaException(); + if (incoming.get(2)) { + struct.o2 = new NoSuchObjectException(); struct.o2.read(iprot); struct.setO2IsSet(true); } @@ -106718,31 +109423,28 @@ public void read(org.apache.thrift.protocol.TProtocol prot, alter_index_result s } - public static class drop_index_by_name_args implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { - private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("drop_index_by_name_args"); + public static class get_indexes_args implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { + private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("get_indexes_args"); private static final org.apache.thrift.protocol.TField DB_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("db_name", org.apache.thrift.protocol.TType.STRING, (short)1); private static final org.apache.thrift.protocol.TField TBL_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("tbl_name", org.apache.thrift.protocol.TType.STRING, (short)2); - private static final org.apache.thrift.protocol.TField INDEX_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("index_name", org.apache.thrift.protocol.TType.STRING, (short)3); - private static final org.apache.thrift.protocol.TField DELETE_DATA_FIELD_DESC = new org.apache.thrift.protocol.TField("deleteData", org.apache.thrift.protocol.TType.BOOL, (short)4); + private static final org.apache.thrift.protocol.TField MAX_INDEXES_FIELD_DESC = new org.apache.thrift.protocol.TField("max_indexes", org.apache.thrift.protocol.TType.I16, (short)3); private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); static { - schemes.put(StandardScheme.class, new drop_index_by_name_argsStandardSchemeFactory()); - schemes.put(TupleScheme.class, new drop_index_by_name_argsTupleSchemeFactory()); + schemes.put(StandardScheme.class, new get_indexes_argsStandardSchemeFactory()); + schemes.put(TupleScheme.class, new get_indexes_argsTupleSchemeFactory()); } private String db_name; // required private String tbl_name; // required - private String index_name; // required - private boolean deleteData; // required + private short max_indexes; // required /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ public enum _Fields implements org.apache.thrift.TFieldIdEnum { DB_NAME((short)1, "db_name"), TBL_NAME((short)2, "tbl_name"), - INDEX_NAME((short)3, "index_name"), - DELETE_DATA((short)4, "deleteData"); + MAX_INDEXES((short)3, "max_indexes"); private static final Map byName = new HashMap(); @@ -106761,10 +109463,8 @@ public static _Fields findByThriftId(int fieldId) { return DB_NAME; case 2: // TBL_NAME return TBL_NAME; - case 3: // INDEX_NAME - return INDEX_NAME; - case 4: // DELETE_DATA - return DELETE_DATA; + case 3: // MAX_INDEXES + return MAX_INDEXES; default: return null; } @@ -106805,7 +109505,7 @@ public String getFieldName() { } // isset id assignments - private static final int __DELETEDATA_ISSET_ID = 0; + private static final int __MAX_INDEXES_ISSET_ID = 0; private byte __isset_bitfield = 0; public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; static { @@ -106814,35 +109514,33 @@ public String getFieldName() { new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); tmpMap.put(_Fields.TBL_NAME, new org.apache.thrift.meta_data.FieldMetaData("tbl_name", org.apache.thrift.TFieldRequirementType.DEFAULT, new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); - tmpMap.put(_Fields.INDEX_NAME, new org.apache.thrift.meta_data.FieldMetaData("index_name", org.apache.thrift.TFieldRequirementType.DEFAULT, - new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); - tmpMap.put(_Fields.DELETE_DATA, new org.apache.thrift.meta_data.FieldMetaData("deleteData", org.apache.thrift.TFieldRequirementType.DEFAULT, - new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.BOOL))); + tmpMap.put(_Fields.MAX_INDEXES, new org.apache.thrift.meta_data.FieldMetaData("max_indexes", org.apache.thrift.TFieldRequirementType.DEFAULT, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I16))); metaDataMap = Collections.unmodifiableMap(tmpMap); - org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(drop_index_by_name_args.class, metaDataMap); + org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(get_indexes_args.class, metaDataMap); } - public drop_index_by_name_args() { + public get_indexes_args() { + this.max_indexes = (short)-1; + } - public drop_index_by_name_args( + public get_indexes_args( String db_name, String tbl_name, - String index_name, - boolean deleteData) + short max_indexes) { this(); this.db_name = db_name; this.tbl_name = tbl_name; - this.index_name = index_name; - this.deleteData = deleteData; - setDeleteDataIsSet(true); + this.max_indexes = max_indexes; + setMax_indexesIsSet(true); } /** * Performs a deep copy on other. */ - public drop_index_by_name_args(drop_index_by_name_args other) { + public get_indexes_args(get_indexes_args other) { __isset_bitfield = other.__isset_bitfield; if (other.isSetDb_name()) { this.db_name = other.db_name; @@ -106850,23 +109548,19 @@ public drop_index_by_name_args(drop_index_by_name_args other) { if (other.isSetTbl_name()) { this.tbl_name = other.tbl_name; } - if (other.isSetIndex_name()) { - this.index_name = other.index_name; - } - this.deleteData = other.deleteData; + this.max_indexes = other.max_indexes; } - public drop_index_by_name_args deepCopy() { - return new drop_index_by_name_args(this); + public get_indexes_args deepCopy() { + return new get_indexes_args(this); } @Override public void clear() { this.db_name = null; this.tbl_name = null; - this.index_name = null; - setDeleteDataIsSet(false); - this.deleteData = false; + this.max_indexes = (short)-1; + } public String getDb_name() { @@ -106915,49 +109609,26 @@ public void setTbl_nameIsSet(boolean value) { } } - public String getIndex_name() { - return this.index_name; - } - - public void setIndex_name(String index_name) { - this.index_name = index_name; - } - - public void unsetIndex_name() { - this.index_name = null; - } - - /** Returns true if field index_name is set (has been assigned a value) and false otherwise */ - public boolean isSetIndex_name() { - return this.index_name != null; - } - - public void setIndex_nameIsSet(boolean value) { - if (!value) { - this.index_name = null; - } - } - - public boolean isDeleteData() { - return this.deleteData; + public short getMax_indexes() { + return this.max_indexes; } - public void setDeleteData(boolean deleteData) { - this.deleteData = deleteData; - setDeleteDataIsSet(true); + public void setMax_indexes(short max_indexes) { + this.max_indexes = max_indexes; + setMax_indexesIsSet(true); } - public void unsetDeleteData() { - __isset_bitfield = EncodingUtils.clearBit(__isset_bitfield, __DELETEDATA_ISSET_ID); + public void unsetMax_indexes() { + __isset_bitfield = EncodingUtils.clearBit(__isset_bitfield, __MAX_INDEXES_ISSET_ID); } - /** Returns true if field deleteData is set (has been assigned a value) and false otherwise */ - public boolean isSetDeleteData() { - return EncodingUtils.testBit(__isset_bitfield, __DELETEDATA_ISSET_ID); + /** Returns true if field max_indexes is set (has been assigned a value) and false otherwise */ + public boolean isSetMax_indexes() { + return EncodingUtils.testBit(__isset_bitfield, __MAX_INDEXES_ISSET_ID); } - public void setDeleteDataIsSet(boolean value) { - __isset_bitfield = EncodingUtils.setBit(__isset_bitfield, __DELETEDATA_ISSET_ID, value); + public void setMax_indexesIsSet(boolean value) { + __isset_bitfield = EncodingUtils.setBit(__isset_bitfield, __MAX_INDEXES_ISSET_ID, value); } public void setFieldValue(_Fields field, Object value) { @@ -106978,19 +109649,11 @@ public void setFieldValue(_Fields field, Object value) { } break; - case INDEX_NAME: - if (value == null) { - unsetIndex_name(); - } else { - setIndex_name((String)value); - } - break; - - case DELETE_DATA: + case MAX_INDEXES: if (value == null) { - unsetDeleteData(); + unsetMax_indexes(); } else { - setDeleteData((Boolean)value); + setMax_indexes((Short)value); } break; @@ -107005,11 +109668,8 @@ public Object getFieldValue(_Fields field) { case TBL_NAME: return getTbl_name(); - case INDEX_NAME: - return getIndex_name(); - - case DELETE_DATA: - return isDeleteData(); + case MAX_INDEXES: + return getMax_indexes(); } throw new IllegalStateException(); @@ -107026,10 +109686,8 @@ public boolean isSet(_Fields field) { return isSetDb_name(); case TBL_NAME: return isSetTbl_name(); - case INDEX_NAME: - return isSetIndex_name(); - case DELETE_DATA: - return isSetDeleteData(); + case MAX_INDEXES: + return isSetMax_indexes(); } throw new IllegalStateException(); } @@ -107038,12 +109696,12 @@ public boolean isSet(_Fields field) { public boolean equals(Object that) { if (that == null) return false; - if (that instanceof drop_index_by_name_args) - return this.equals((drop_index_by_name_args)that); + if (that instanceof get_indexes_args) + return this.equals((get_indexes_args)that); return false; } - public boolean equals(drop_index_by_name_args that) { + public boolean equals(get_indexes_args that) { if (that == null) return false; @@ -107065,21 +109723,12 @@ public boolean equals(drop_index_by_name_args that) { return false; } - boolean this_present_index_name = true && this.isSetIndex_name(); - boolean that_present_index_name = true && that.isSetIndex_name(); - if (this_present_index_name || that_present_index_name) { - if (!(this_present_index_name && that_present_index_name)) - return false; - if (!this.index_name.equals(that.index_name)) - return false; - } - - boolean this_present_deleteData = true; - boolean that_present_deleteData = true; - if (this_present_deleteData || that_present_deleteData) { - if (!(this_present_deleteData && that_present_deleteData)) + boolean this_present_max_indexes = true; + boolean that_present_max_indexes = true; + if (this_present_max_indexes || that_present_max_indexes) { + if (!(this_present_max_indexes && that_present_max_indexes)) return false; - if (this.deleteData != that.deleteData) + if (this.max_indexes != that.max_indexes) return false; } @@ -107100,21 +109749,16 @@ public int hashCode() { if (present_tbl_name) list.add(tbl_name); - boolean present_index_name = true && (isSetIndex_name()); - list.add(present_index_name); - if (present_index_name) - list.add(index_name); - - boolean present_deleteData = true; - list.add(present_deleteData); - if (present_deleteData) - list.add(deleteData); + boolean present_max_indexes = true; + list.add(present_max_indexes); + if (present_max_indexes) + list.add(max_indexes); return list.hashCode(); } @Override - public int compareTo(drop_index_by_name_args other) { + public int compareTo(get_indexes_args other) { if (!getClass().equals(other.getClass())) { return getClass().getName().compareTo(other.getClass().getName()); } @@ -107141,22 +109785,12 @@ public int compareTo(drop_index_by_name_args other) { return lastComparison; } } - lastComparison = Boolean.valueOf(isSetIndex_name()).compareTo(other.isSetIndex_name()); - if (lastComparison != 0) { - return lastComparison; - } - if (isSetIndex_name()) { - lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.index_name, other.index_name); - if (lastComparison != 0) { - return lastComparison; - } - } - lastComparison = Boolean.valueOf(isSetDeleteData()).compareTo(other.isSetDeleteData()); + lastComparison = Boolean.valueOf(isSetMax_indexes()).compareTo(other.isSetMax_indexes()); if (lastComparison != 0) { return lastComparison; } - if (isSetDeleteData()) { - lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.deleteData, other.deleteData); + if (isSetMax_indexes()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.max_indexes, other.max_indexes); if (lastComparison != 0) { return lastComparison; } @@ -107178,7 +109812,7 @@ public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache. @Override public String toString() { - StringBuilder sb = new StringBuilder("drop_index_by_name_args("); + StringBuilder sb = new StringBuilder("get_indexes_args("); boolean first = true; sb.append("db_name:"); @@ -107197,16 +109831,8 @@ public String toString() { } first = false; if (!first) sb.append(", "); - sb.append("index_name:"); - if (this.index_name == null) { - sb.append("null"); - } else { - sb.append(this.index_name); - } - first = false; - if (!first) sb.append(", "); - sb.append("deleteData:"); - sb.append(this.deleteData); + sb.append("max_indexes:"); + sb.append(this.max_indexes); first = false; sb.append(")"); return sb.toString(); @@ -107235,15 +109861,15 @@ private void readObject(java.io.ObjectInputStream in) throws java.io.IOException } } - private static class drop_index_by_name_argsStandardSchemeFactory implements SchemeFactory { - public drop_index_by_name_argsStandardScheme getScheme() { - return new drop_index_by_name_argsStandardScheme(); + private static class get_indexes_argsStandardSchemeFactory implements SchemeFactory { + public get_indexes_argsStandardScheme getScheme() { + return new get_indexes_argsStandardScheme(); } } - private static class drop_index_by_name_argsStandardScheme extends StandardScheme { + private static class get_indexes_argsStandardScheme extends StandardScheme { - public void read(org.apache.thrift.protocol.TProtocol iprot, drop_index_by_name_args struct) throws org.apache.thrift.TException { + public void read(org.apache.thrift.protocol.TProtocol iprot, get_indexes_args struct) throws org.apache.thrift.TException { org.apache.thrift.protocol.TField schemeField; iprot.readStructBegin(); while (true) @@ -107269,18 +109895,10 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, drop_index_by_name_ org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); } break; - case 3: // INDEX_NAME - if (schemeField.type == org.apache.thrift.protocol.TType.STRING) { - struct.index_name = iprot.readString(); - struct.setIndex_nameIsSet(true); - } else { - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - break; - case 4: // DELETE_DATA - if (schemeField.type == org.apache.thrift.protocol.TType.BOOL) { - struct.deleteData = iprot.readBool(); - struct.setDeleteDataIsSet(true); + case 3: // MAX_INDEXES + if (schemeField.type == org.apache.thrift.protocol.TType.I16) { + struct.max_indexes = iprot.readI16(); + struct.setMax_indexesIsSet(true); } else { org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); } @@ -107294,7 +109912,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, drop_index_by_name_ struct.validate(); } - public void write(org.apache.thrift.protocol.TProtocol oprot, drop_index_by_name_args struct) throws org.apache.thrift.TException { + public void write(org.apache.thrift.protocol.TProtocol oprot, get_indexes_args struct) throws org.apache.thrift.TException { struct.validate(); oprot.writeStructBegin(STRUCT_DESC); @@ -107308,13 +109926,8 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, drop_index_by_name oprot.writeString(struct.tbl_name); oprot.writeFieldEnd(); } - if (struct.index_name != null) { - oprot.writeFieldBegin(INDEX_NAME_FIELD_DESC); - oprot.writeString(struct.index_name); - oprot.writeFieldEnd(); - } - oprot.writeFieldBegin(DELETE_DATA_FIELD_DESC); - oprot.writeBool(struct.deleteData); + oprot.writeFieldBegin(MAX_INDEXES_FIELD_DESC); + oprot.writeI16(struct.max_indexes); oprot.writeFieldEnd(); oprot.writeFieldStop(); oprot.writeStructEnd(); @@ -107322,16 +109935,16 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, drop_index_by_name } - private static class drop_index_by_name_argsTupleSchemeFactory implements SchemeFactory { - public drop_index_by_name_argsTupleScheme getScheme() { - return new drop_index_by_name_argsTupleScheme(); + private static class get_indexes_argsTupleSchemeFactory implements SchemeFactory { + public get_indexes_argsTupleScheme getScheme() { + return new get_indexes_argsTupleScheme(); } } - private static class drop_index_by_name_argsTupleScheme extends TupleScheme { + private static class get_indexes_argsTupleScheme extends TupleScheme { @Override - public void write(org.apache.thrift.protocol.TProtocol prot, drop_index_by_name_args struct) throws org.apache.thrift.TException { + public void write(org.apache.thrift.protocol.TProtocol prot, get_indexes_args struct) throws org.apache.thrift.TException { TTupleProtocol oprot = (TTupleProtocol) prot; BitSet optionals = new BitSet(); if (struct.isSetDb_name()) { @@ -107340,31 +109953,25 @@ public void write(org.apache.thrift.protocol.TProtocol prot, drop_index_by_name_ if (struct.isSetTbl_name()) { optionals.set(1); } - if (struct.isSetIndex_name()) { + if (struct.isSetMax_indexes()) { optionals.set(2); } - if (struct.isSetDeleteData()) { - optionals.set(3); - } - oprot.writeBitSet(optionals, 4); + oprot.writeBitSet(optionals, 3); if (struct.isSetDb_name()) { oprot.writeString(struct.db_name); } if (struct.isSetTbl_name()) { oprot.writeString(struct.tbl_name); } - if (struct.isSetIndex_name()) { - oprot.writeString(struct.index_name); - } - if (struct.isSetDeleteData()) { - oprot.writeBool(struct.deleteData); + if (struct.isSetMax_indexes()) { + oprot.writeI16(struct.max_indexes); } } @Override - public void read(org.apache.thrift.protocol.TProtocol prot, drop_index_by_name_args struct) throws org.apache.thrift.TException { + public void read(org.apache.thrift.protocol.TProtocol prot, get_indexes_args struct) throws org.apache.thrift.TException { TTupleProtocol iprot = (TTupleProtocol) prot; - BitSet incoming = iprot.readBitSet(4); + BitSet incoming = iprot.readBitSet(3); if (incoming.get(0)) { struct.db_name = iprot.readString(); struct.setDb_nameIsSet(true); @@ -107374,32 +109981,28 @@ public void read(org.apache.thrift.protocol.TProtocol prot, drop_index_by_name_a struct.setTbl_nameIsSet(true); } if (incoming.get(2)) { - struct.index_name = iprot.readString(); - struct.setIndex_nameIsSet(true); - } - if (incoming.get(3)) { - struct.deleteData = iprot.readBool(); - struct.setDeleteDataIsSet(true); + struct.max_indexes = iprot.readI16(); + struct.setMax_indexesIsSet(true); } } } } - public static class drop_index_by_name_result implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { - private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("drop_index_by_name_result"); + public static class get_indexes_result implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { + private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("get_indexes_result"); - private static final org.apache.thrift.protocol.TField SUCCESS_FIELD_DESC = new org.apache.thrift.protocol.TField("success", org.apache.thrift.protocol.TType.BOOL, (short)0); + private static final org.apache.thrift.protocol.TField SUCCESS_FIELD_DESC = new org.apache.thrift.protocol.TField("success", org.apache.thrift.protocol.TType.LIST, (short)0); private static final org.apache.thrift.protocol.TField O1_FIELD_DESC = new org.apache.thrift.protocol.TField("o1", org.apache.thrift.protocol.TType.STRUCT, (short)1); private static final org.apache.thrift.protocol.TField O2_FIELD_DESC = new org.apache.thrift.protocol.TField("o2", org.apache.thrift.protocol.TType.STRUCT, (short)2); private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); static { - schemes.put(StandardScheme.class, new drop_index_by_name_resultStandardSchemeFactory()); - schemes.put(TupleScheme.class, new drop_index_by_name_resultTupleSchemeFactory()); + schemes.put(StandardScheme.class, new get_indexes_resultStandardSchemeFactory()); + schemes.put(TupleScheme.class, new get_indexes_resultTupleSchemeFactory()); } - private boolean success; // required + private List success; // required private NoSuchObjectException o1; // required private MetaException o2; // required @@ -107468,32 +110071,30 @@ public String getFieldName() { } // isset id assignments - private static final int __SUCCESS_ISSET_ID = 0; - private byte __isset_bitfield = 0; public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; static { Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); tmpMap.put(_Fields.SUCCESS, new org.apache.thrift.meta_data.FieldMetaData("success", org.apache.thrift.TFieldRequirementType.DEFAULT, - new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.BOOL))); + new org.apache.thrift.meta_data.ListMetaData(org.apache.thrift.protocol.TType.LIST, + new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, Index.class)))); tmpMap.put(_Fields.O1, new org.apache.thrift.meta_data.FieldMetaData("o1", org.apache.thrift.TFieldRequirementType.DEFAULT, new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRUCT))); tmpMap.put(_Fields.O2, new org.apache.thrift.meta_data.FieldMetaData("o2", org.apache.thrift.TFieldRequirementType.DEFAULT, new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRUCT))); metaDataMap = Collections.unmodifiableMap(tmpMap); - org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(drop_index_by_name_result.class, metaDataMap); + org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(get_indexes_result.class, metaDataMap); } - public drop_index_by_name_result() { + public get_indexes_result() { } - public drop_index_by_name_result( - boolean success, + public get_indexes_result( + List success, NoSuchObjectException o1, MetaException o2) { this(); this.success = success; - setSuccessIsSet(true); this.o1 = o1; this.o2 = o2; } @@ -107501,9 +110102,14 @@ public drop_index_by_name_result( /** * Performs a deep copy on other. */ - public drop_index_by_name_result(drop_index_by_name_result other) { - __isset_bitfield = other.__isset_bitfield; - this.success = other.success; + public get_indexes_result(get_indexes_result other) { + if (other.isSetSuccess()) { + List __this__success = new ArrayList(other.success.size()); + for (Index other_element : other.success) { + __this__success.add(new Index(other_element)); + } + this.success = __this__success; + } if (other.isSetO1()) { this.o1 = new NoSuchObjectException(other.o1); } @@ -107512,38 +110118,53 @@ public drop_index_by_name_result(drop_index_by_name_result other) { } } - public drop_index_by_name_result deepCopy() { - return new drop_index_by_name_result(this); + public get_indexes_result deepCopy() { + return new get_indexes_result(this); } @Override public void clear() { - setSuccessIsSet(false); - this.success = false; + this.success = null; this.o1 = null; this.o2 = null; } - public boolean isSuccess() { + public int getSuccessSize() { + return (this.success == null) ? 0 : this.success.size(); + } + + public java.util.Iterator getSuccessIterator() { + return (this.success == null) ? null : this.success.iterator(); + } + + public void addToSuccess(Index elem) { + if (this.success == null) { + this.success = new ArrayList(); + } + this.success.add(elem); + } + + public List getSuccess() { return this.success; } - public void setSuccess(boolean success) { + public void setSuccess(List success) { this.success = success; - setSuccessIsSet(true); } public void unsetSuccess() { - __isset_bitfield = EncodingUtils.clearBit(__isset_bitfield, __SUCCESS_ISSET_ID); + this.success = null; } /** Returns true if field success is set (has been assigned a value) and false otherwise */ public boolean isSetSuccess() { - return EncodingUtils.testBit(__isset_bitfield, __SUCCESS_ISSET_ID); + return this.success != null; } public void setSuccessIsSet(boolean value) { - __isset_bitfield = EncodingUtils.setBit(__isset_bitfield, __SUCCESS_ISSET_ID, value); + if (!value) { + this.success = null; + } } public NoSuchObjectException getO1() { @@ -107598,7 +110219,7 @@ public void setFieldValue(_Fields field, Object value) { if (value == null) { unsetSuccess(); } else { - setSuccess((Boolean)value); + setSuccess((List)value); } break; @@ -107624,7 +110245,7 @@ public void setFieldValue(_Fields field, Object value) { public Object getFieldValue(_Fields field) { switch (field) { case SUCCESS: - return isSuccess(); + return getSuccess(); case O1: return getO1(); @@ -107657,21 +110278,21 @@ public boolean isSet(_Fields field) { public boolean equals(Object that) { if (that == null) return false; - if (that instanceof drop_index_by_name_result) - return this.equals((drop_index_by_name_result)that); + if (that instanceof get_indexes_result) + return this.equals((get_indexes_result)that); return false; } - public boolean equals(drop_index_by_name_result that) { + public boolean equals(get_indexes_result that) { if (that == null) return false; - boolean this_present_success = true; - boolean that_present_success = true; + boolean this_present_success = true && this.isSetSuccess(); + boolean that_present_success = true && that.isSetSuccess(); if (this_present_success || that_present_success) { if (!(this_present_success && that_present_success)) return false; - if (this.success != that.success) + if (!this.success.equals(that.success)) return false; } @@ -107700,7 +110321,7 @@ public boolean equals(drop_index_by_name_result that) { public int hashCode() { List list = new ArrayList(); - boolean present_success = true; + boolean present_success = true && (isSetSuccess()); list.add(present_success); if (present_success) list.add(success); @@ -107719,7 +110340,7 @@ public int hashCode() { } @Override - public int compareTo(drop_index_by_name_result other) { + public int compareTo(get_indexes_result other) { if (!getClass().equals(other.getClass())) { return getClass().getName().compareTo(other.getClass().getName()); } @@ -107773,11 +110394,15 @@ public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache. @Override public String toString() { - StringBuilder sb = new StringBuilder("drop_index_by_name_result("); + StringBuilder sb = new StringBuilder("get_indexes_result("); boolean first = true; sb.append("success:"); - sb.append(this.success); + if (this.success == null) { + sb.append("null"); + } else { + sb.append(this.success); + } first = false; if (!first) sb.append(", "); sb.append("o1:"); @@ -107814,23 +110439,21 @@ private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOExcept private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { try { - // it doesn't seem like you should have to do this, but java serialization is wacky, and doesn't call the default constructor. - __isset_bitfield = 0; read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); } catch (org.apache.thrift.TException te) { throw new java.io.IOException(te); } } - private static class drop_index_by_name_resultStandardSchemeFactory implements SchemeFactory { - public drop_index_by_name_resultStandardScheme getScheme() { - return new drop_index_by_name_resultStandardScheme(); + private static class get_indexes_resultStandardSchemeFactory implements SchemeFactory { + public get_indexes_resultStandardScheme getScheme() { + return new get_indexes_resultStandardScheme(); } } - private static class drop_index_by_name_resultStandardScheme extends StandardScheme { + private static class get_indexes_resultStandardScheme extends StandardScheme { - public void read(org.apache.thrift.protocol.TProtocol iprot, drop_index_by_name_result struct) throws org.apache.thrift.TException { + public void read(org.apache.thrift.protocol.TProtocol iprot, get_indexes_result struct) throws org.apache.thrift.TException { org.apache.thrift.protocol.TField schemeField; iprot.readStructBegin(); while (true) @@ -107841,8 +110464,19 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, drop_index_by_name_ } switch (schemeField.id) { case 0: // SUCCESS - if (schemeField.type == org.apache.thrift.protocol.TType.BOOL) { - struct.success = iprot.readBool(); + if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { + { + org.apache.thrift.protocol.TList _list1036 = iprot.readListBegin(); + struct.success = new ArrayList(_list1036.size); + Index _elem1037; + for (int _i1038 = 0; _i1038 < _list1036.size; ++_i1038) + { + _elem1037 = new Index(); + _elem1037.read(iprot); + struct.success.add(_elem1037); + } + iprot.readListEnd(); + } struct.setSuccessIsSet(true); } else { org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); @@ -107875,13 +110509,20 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, drop_index_by_name_ struct.validate(); } - public void write(org.apache.thrift.protocol.TProtocol oprot, drop_index_by_name_result struct) throws org.apache.thrift.TException { + public void write(org.apache.thrift.protocol.TProtocol oprot, get_indexes_result struct) throws org.apache.thrift.TException { struct.validate(); oprot.writeStructBegin(STRUCT_DESC); - if (struct.isSetSuccess()) { + if (struct.success != null) { oprot.writeFieldBegin(SUCCESS_FIELD_DESC); - oprot.writeBool(struct.success); + { + oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.success.size())); + for (Index _iter1039 : struct.success) + { + _iter1039.write(oprot); + } + oprot.writeListEnd(); + } oprot.writeFieldEnd(); } if (struct.o1 != null) { @@ -107900,16 +110541,16 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, drop_index_by_name } - private static class drop_index_by_name_resultTupleSchemeFactory implements SchemeFactory { - public drop_index_by_name_resultTupleScheme getScheme() { - return new drop_index_by_name_resultTupleScheme(); + private static class get_indexes_resultTupleSchemeFactory implements SchemeFactory { + public get_indexes_resultTupleScheme getScheme() { + return new get_indexes_resultTupleScheme(); } } - private static class drop_index_by_name_resultTupleScheme extends TupleScheme { + private static class get_indexes_resultTupleScheme extends TupleScheme { @Override - public void write(org.apache.thrift.protocol.TProtocol prot, drop_index_by_name_result struct) throws org.apache.thrift.TException { + public void write(org.apache.thrift.protocol.TProtocol prot, get_indexes_result struct) throws org.apache.thrift.TException { TTupleProtocol oprot = (TTupleProtocol) prot; BitSet optionals = new BitSet(); if (struct.isSetSuccess()) { @@ -107923,7 +110564,13 @@ public void write(org.apache.thrift.protocol.TProtocol prot, drop_index_by_name_ } oprot.writeBitSet(optionals, 3); if (struct.isSetSuccess()) { - oprot.writeBool(struct.success); + { + oprot.writeI32(struct.success.size()); + for (Index _iter1040 : struct.success) + { + _iter1040.write(oprot); + } + } } if (struct.isSetO1()) { struct.o1.write(oprot); @@ -107934,11 +110581,21 @@ public void write(org.apache.thrift.protocol.TProtocol prot, drop_index_by_name_ } @Override - public void read(org.apache.thrift.protocol.TProtocol prot, drop_index_by_name_result struct) throws org.apache.thrift.TException { + public void read(org.apache.thrift.protocol.TProtocol prot, get_indexes_result struct) throws org.apache.thrift.TException { TTupleProtocol iprot = (TTupleProtocol) prot; BitSet incoming = iprot.readBitSet(3); if (incoming.get(0)) { - struct.success = iprot.readBool(); + { + org.apache.thrift.protocol.TList _list1041 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.success = new ArrayList(_list1041.size); + Index _elem1042; + for (int _i1043 = 0; _i1043 < _list1041.size; ++_i1043) + { + _elem1042 = new Index(); + _elem1042.read(iprot); + struct.success.add(_elem1042); + } + } struct.setSuccessIsSet(true); } if (incoming.get(1)) { @@ -107956,28 +110613,28 @@ public void read(org.apache.thrift.protocol.TProtocol prot, drop_index_by_name_r } - public static class get_index_by_name_args implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { - private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("get_index_by_name_args"); + public static class get_index_names_args implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { + private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("get_index_names_args"); private static final org.apache.thrift.protocol.TField DB_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("db_name", org.apache.thrift.protocol.TType.STRING, (short)1); private static final org.apache.thrift.protocol.TField TBL_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("tbl_name", org.apache.thrift.protocol.TType.STRING, (short)2); - private static final org.apache.thrift.protocol.TField INDEX_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("index_name", org.apache.thrift.protocol.TType.STRING, (short)3); + private static final org.apache.thrift.protocol.TField MAX_INDEXES_FIELD_DESC = new org.apache.thrift.protocol.TField("max_indexes", org.apache.thrift.protocol.TType.I16, (short)3); private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); static { - schemes.put(StandardScheme.class, new get_index_by_name_argsStandardSchemeFactory()); - schemes.put(TupleScheme.class, new get_index_by_name_argsTupleSchemeFactory()); + schemes.put(StandardScheme.class, new get_index_names_argsStandardSchemeFactory()); + schemes.put(TupleScheme.class, new get_index_names_argsTupleSchemeFactory()); } private String db_name; // required private String tbl_name; // required - private String index_name; // required + private short max_indexes; // required /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ public enum _Fields implements org.apache.thrift.TFieldIdEnum { DB_NAME((short)1, "db_name"), TBL_NAME((short)2, "tbl_name"), - INDEX_NAME((short)3, "index_name"); + MAX_INDEXES((short)3, "max_indexes"); private static final Map byName = new HashMap(); @@ -107996,8 +110653,8 @@ public static _Fields findByThriftId(int fieldId) { return DB_NAME; case 2: // TBL_NAME return TBL_NAME; - case 3: // INDEX_NAME - return INDEX_NAME; + case 3: // MAX_INDEXES + return MAX_INDEXES; default: return null; } @@ -108038,6 +110695,8 @@ public String getFieldName() { } // isset id assignments + private static final int __MAX_INDEXES_ISSET_ID = 0; + private byte __isset_bitfield = 0; public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; static { Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); @@ -108045,50 +110704,53 @@ public String getFieldName() { new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); tmpMap.put(_Fields.TBL_NAME, new org.apache.thrift.meta_data.FieldMetaData("tbl_name", org.apache.thrift.TFieldRequirementType.DEFAULT, new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); - tmpMap.put(_Fields.INDEX_NAME, new org.apache.thrift.meta_data.FieldMetaData("index_name", org.apache.thrift.TFieldRequirementType.DEFAULT, - new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); + tmpMap.put(_Fields.MAX_INDEXES, new org.apache.thrift.meta_data.FieldMetaData("max_indexes", org.apache.thrift.TFieldRequirementType.DEFAULT, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I16))); metaDataMap = Collections.unmodifiableMap(tmpMap); - org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(get_index_by_name_args.class, metaDataMap); + org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(get_index_names_args.class, metaDataMap); } - public get_index_by_name_args() { + public get_index_names_args() { + this.max_indexes = (short)-1; + } - public get_index_by_name_args( + public get_index_names_args( String db_name, String tbl_name, - String index_name) + short max_indexes) { this(); this.db_name = db_name; this.tbl_name = tbl_name; - this.index_name = index_name; + this.max_indexes = max_indexes; + setMax_indexesIsSet(true); } /** * Performs a deep copy on other. */ - public get_index_by_name_args(get_index_by_name_args other) { + public get_index_names_args(get_index_names_args other) { + __isset_bitfield = other.__isset_bitfield; if (other.isSetDb_name()) { this.db_name = other.db_name; } if (other.isSetTbl_name()) { this.tbl_name = other.tbl_name; } - if (other.isSetIndex_name()) { - this.index_name = other.index_name; - } + this.max_indexes = other.max_indexes; } - public get_index_by_name_args deepCopy() { - return new get_index_by_name_args(this); + public get_index_names_args deepCopy() { + return new get_index_names_args(this); } @Override public void clear() { this.db_name = null; this.tbl_name = null; - this.index_name = null; + this.max_indexes = (short)-1; + } public String getDb_name() { @@ -108137,27 +110799,26 @@ public void setTbl_nameIsSet(boolean value) { } } - public String getIndex_name() { - return this.index_name; + public short getMax_indexes() { + return this.max_indexes; } - public void setIndex_name(String index_name) { - this.index_name = index_name; + public void setMax_indexes(short max_indexes) { + this.max_indexes = max_indexes; + setMax_indexesIsSet(true); } - public void unsetIndex_name() { - this.index_name = null; + public void unsetMax_indexes() { + __isset_bitfield = EncodingUtils.clearBit(__isset_bitfield, __MAX_INDEXES_ISSET_ID); } - /** Returns true if field index_name is set (has been assigned a value) and false otherwise */ - public boolean isSetIndex_name() { - return this.index_name != null; + /** Returns true if field max_indexes is set (has been assigned a value) and false otherwise */ + public boolean isSetMax_indexes() { + return EncodingUtils.testBit(__isset_bitfield, __MAX_INDEXES_ISSET_ID); } - public void setIndex_nameIsSet(boolean value) { - if (!value) { - this.index_name = null; - } + public void setMax_indexesIsSet(boolean value) { + __isset_bitfield = EncodingUtils.setBit(__isset_bitfield, __MAX_INDEXES_ISSET_ID, value); } public void setFieldValue(_Fields field, Object value) { @@ -108178,11 +110839,11 @@ public void setFieldValue(_Fields field, Object value) { } break; - case INDEX_NAME: + case MAX_INDEXES: if (value == null) { - unsetIndex_name(); + unsetMax_indexes(); } else { - setIndex_name((String)value); + setMax_indexes((Short)value); } break; @@ -108197,8 +110858,8 @@ public Object getFieldValue(_Fields field) { case TBL_NAME: return getTbl_name(); - case INDEX_NAME: - return getIndex_name(); + case MAX_INDEXES: + return getMax_indexes(); } throw new IllegalStateException(); @@ -108215,8 +110876,8 @@ public boolean isSet(_Fields field) { return isSetDb_name(); case TBL_NAME: return isSetTbl_name(); - case INDEX_NAME: - return isSetIndex_name(); + case MAX_INDEXES: + return isSetMax_indexes(); } throw new IllegalStateException(); } @@ -108225,12 +110886,12 @@ public boolean isSet(_Fields field) { public boolean equals(Object that) { if (that == null) return false; - if (that instanceof get_index_by_name_args) - return this.equals((get_index_by_name_args)that); + if (that instanceof get_index_names_args) + return this.equals((get_index_names_args)that); return false; } - public boolean equals(get_index_by_name_args that) { + public boolean equals(get_index_names_args that) { if (that == null) return false; @@ -108252,12 +110913,12 @@ public boolean equals(get_index_by_name_args that) { return false; } - boolean this_present_index_name = true && this.isSetIndex_name(); - boolean that_present_index_name = true && that.isSetIndex_name(); - if (this_present_index_name || that_present_index_name) { - if (!(this_present_index_name && that_present_index_name)) + boolean this_present_max_indexes = true; + boolean that_present_max_indexes = true; + if (this_present_max_indexes || that_present_max_indexes) { + if (!(this_present_max_indexes && that_present_max_indexes)) return false; - if (!this.index_name.equals(that.index_name)) + if (this.max_indexes != that.max_indexes) return false; } @@ -108278,16 +110939,16 @@ public int hashCode() { if (present_tbl_name) list.add(tbl_name); - boolean present_index_name = true && (isSetIndex_name()); - list.add(present_index_name); - if (present_index_name) - list.add(index_name); + boolean present_max_indexes = true; + list.add(present_max_indexes); + if (present_max_indexes) + list.add(max_indexes); return list.hashCode(); } @Override - public int compareTo(get_index_by_name_args other) { + public int compareTo(get_index_names_args other) { if (!getClass().equals(other.getClass())) { return getClass().getName().compareTo(other.getClass().getName()); } @@ -108314,12 +110975,12 @@ public int compareTo(get_index_by_name_args other) { return lastComparison; } } - lastComparison = Boolean.valueOf(isSetIndex_name()).compareTo(other.isSetIndex_name()); + lastComparison = Boolean.valueOf(isSetMax_indexes()).compareTo(other.isSetMax_indexes()); if (lastComparison != 0) { return lastComparison; } - if (isSetIndex_name()) { - lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.index_name, other.index_name); + if (isSetMax_indexes()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.max_indexes, other.max_indexes); if (lastComparison != 0) { return lastComparison; } @@ -108341,7 +111002,7 @@ public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache. @Override public String toString() { - StringBuilder sb = new StringBuilder("get_index_by_name_args("); + StringBuilder sb = new StringBuilder("get_index_names_args("); boolean first = true; sb.append("db_name:"); @@ -108360,12 +111021,8 @@ public String toString() { } first = false; if (!first) sb.append(", "); - sb.append("index_name:"); - if (this.index_name == null) { - sb.append("null"); - } else { - sb.append(this.index_name); - } + sb.append("max_indexes:"); + sb.append(this.max_indexes); first = false; sb.append(")"); return sb.toString(); @@ -108386,21 +111043,23 @@ private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOExcept private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { try { + // it doesn't seem like you should have to do this, but java serialization is wacky, and doesn't call the default constructor. + __isset_bitfield = 0; read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); } catch (org.apache.thrift.TException te) { throw new java.io.IOException(te); } } - private static class get_index_by_name_argsStandardSchemeFactory implements SchemeFactory { - public get_index_by_name_argsStandardScheme getScheme() { - return new get_index_by_name_argsStandardScheme(); + private static class get_index_names_argsStandardSchemeFactory implements SchemeFactory { + public get_index_names_argsStandardScheme getScheme() { + return new get_index_names_argsStandardScheme(); } } - private static class get_index_by_name_argsStandardScheme extends StandardScheme { + private static class get_index_names_argsStandardScheme extends StandardScheme { - public void read(org.apache.thrift.protocol.TProtocol iprot, get_index_by_name_args struct) throws org.apache.thrift.TException { + public void read(org.apache.thrift.protocol.TProtocol iprot, get_index_names_args struct) throws org.apache.thrift.TException { org.apache.thrift.protocol.TField schemeField; iprot.readStructBegin(); while (true) @@ -108426,10 +111085,10 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_index_by_name_a org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); } break; - case 3: // INDEX_NAME - if (schemeField.type == org.apache.thrift.protocol.TType.STRING) { - struct.index_name = iprot.readString(); - struct.setIndex_nameIsSet(true); + case 3: // MAX_INDEXES + if (schemeField.type == org.apache.thrift.protocol.TType.I16) { + struct.max_indexes = iprot.readI16(); + struct.setMax_indexesIsSet(true); } else { org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); } @@ -108443,7 +111102,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_index_by_name_a struct.validate(); } - public void write(org.apache.thrift.protocol.TProtocol oprot, get_index_by_name_args struct) throws org.apache.thrift.TException { + public void write(org.apache.thrift.protocol.TProtocol oprot, get_index_names_args struct) throws org.apache.thrift.TException { struct.validate(); oprot.writeStructBegin(STRUCT_DESC); @@ -108457,27 +111116,25 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_index_by_name_ oprot.writeString(struct.tbl_name); oprot.writeFieldEnd(); } - if (struct.index_name != null) { - oprot.writeFieldBegin(INDEX_NAME_FIELD_DESC); - oprot.writeString(struct.index_name); - oprot.writeFieldEnd(); - } + oprot.writeFieldBegin(MAX_INDEXES_FIELD_DESC); + oprot.writeI16(struct.max_indexes); + oprot.writeFieldEnd(); oprot.writeFieldStop(); oprot.writeStructEnd(); } } - private static class get_index_by_name_argsTupleSchemeFactory implements SchemeFactory { - public get_index_by_name_argsTupleScheme getScheme() { - return new get_index_by_name_argsTupleScheme(); + private static class get_index_names_argsTupleSchemeFactory implements SchemeFactory { + public get_index_names_argsTupleScheme getScheme() { + return new get_index_names_argsTupleScheme(); } } - private static class get_index_by_name_argsTupleScheme extends TupleScheme { + private static class get_index_names_argsTupleScheme extends TupleScheme { @Override - public void write(org.apache.thrift.protocol.TProtocol prot, get_index_by_name_args struct) throws org.apache.thrift.TException { + public void write(org.apache.thrift.protocol.TProtocol prot, get_index_names_args struct) throws org.apache.thrift.TException { TTupleProtocol oprot = (TTupleProtocol) prot; BitSet optionals = new BitSet(); if (struct.isSetDb_name()) { @@ -108486,7 +111143,7 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_index_by_name_a if (struct.isSetTbl_name()) { optionals.set(1); } - if (struct.isSetIndex_name()) { + if (struct.isSetMax_indexes()) { optionals.set(2); } oprot.writeBitSet(optionals, 3); @@ -108496,13 +111153,13 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_index_by_name_a if (struct.isSetTbl_name()) { oprot.writeString(struct.tbl_name); } - if (struct.isSetIndex_name()) { - oprot.writeString(struct.index_name); + if (struct.isSetMax_indexes()) { + oprot.writeI16(struct.max_indexes); } } @Override - public void read(org.apache.thrift.protocol.TProtocol prot, get_index_by_name_args struct) throws org.apache.thrift.TException { + public void read(org.apache.thrift.protocol.TProtocol prot, get_index_names_args struct) throws org.apache.thrift.TException { TTupleProtocol iprot = (TTupleProtocol) prot; BitSet incoming = iprot.readBitSet(3); if (incoming.get(0)) { @@ -108514,36 +111171,33 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_index_by_name_ar struct.setTbl_nameIsSet(true); } if (incoming.get(2)) { - struct.index_name = iprot.readString(); - struct.setIndex_nameIsSet(true); + struct.max_indexes = iprot.readI16(); + struct.setMax_indexesIsSet(true); } } } } - public static class get_index_by_name_result implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { - private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("get_index_by_name_result"); + public static class get_index_names_result implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { + private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("get_index_names_result"); - private static final org.apache.thrift.protocol.TField SUCCESS_FIELD_DESC = new org.apache.thrift.protocol.TField("success", org.apache.thrift.protocol.TType.STRUCT, (short)0); - private static final org.apache.thrift.protocol.TField O1_FIELD_DESC = new org.apache.thrift.protocol.TField("o1", org.apache.thrift.protocol.TType.STRUCT, (short)1); - private static final org.apache.thrift.protocol.TField O2_FIELD_DESC = new org.apache.thrift.protocol.TField("o2", org.apache.thrift.protocol.TType.STRUCT, (short)2); + private static final org.apache.thrift.protocol.TField SUCCESS_FIELD_DESC = new org.apache.thrift.protocol.TField("success", org.apache.thrift.protocol.TType.LIST, (short)0); + private static final org.apache.thrift.protocol.TField O2_FIELD_DESC = new org.apache.thrift.protocol.TField("o2", org.apache.thrift.protocol.TType.STRUCT, (short)1); private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); static { - schemes.put(StandardScheme.class, new get_index_by_name_resultStandardSchemeFactory()); - schemes.put(TupleScheme.class, new get_index_by_name_resultTupleSchemeFactory()); + schemes.put(StandardScheme.class, new get_index_names_resultStandardSchemeFactory()); + schemes.put(TupleScheme.class, new get_index_names_resultTupleSchemeFactory()); } - private Index success; // required - private MetaException o1; // required - private NoSuchObjectException o2; // required + private List success; // required + private MetaException o2; // required /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ public enum _Fields implements org.apache.thrift.TFieldIdEnum { SUCCESS((short)0, "success"), - O1((short)1, "o1"), - O2((short)2, "o2"); + O2((short)1, "o2"); private static final Map byName = new HashMap(); @@ -108560,9 +111214,7 @@ public static _Fields findByThriftId(int fieldId) { switch(fieldId) { case 0: // SUCCESS return SUCCESS; - case 1: // O1 - return O1; - case 2: // O2 + case 1: // O2 return O2; default: return null; @@ -108608,60 +111260,69 @@ public String getFieldName() { static { Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); tmpMap.put(_Fields.SUCCESS, new org.apache.thrift.meta_data.FieldMetaData("success", org.apache.thrift.TFieldRequirementType.DEFAULT, - new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, Index.class))); - tmpMap.put(_Fields.O1, new org.apache.thrift.meta_data.FieldMetaData("o1", org.apache.thrift.TFieldRequirementType.DEFAULT, - new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRUCT))); + new org.apache.thrift.meta_data.ListMetaData(org.apache.thrift.protocol.TType.LIST, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING)))); tmpMap.put(_Fields.O2, new org.apache.thrift.meta_data.FieldMetaData("o2", org.apache.thrift.TFieldRequirementType.DEFAULT, new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRUCT))); metaDataMap = Collections.unmodifiableMap(tmpMap); - org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(get_index_by_name_result.class, metaDataMap); + org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(get_index_names_result.class, metaDataMap); } - public get_index_by_name_result() { + public get_index_names_result() { } - public get_index_by_name_result( - Index success, - MetaException o1, - NoSuchObjectException o2) + public get_index_names_result( + List success, + MetaException o2) { this(); this.success = success; - this.o1 = o1; this.o2 = o2; } /** * Performs a deep copy on other. */ - public get_index_by_name_result(get_index_by_name_result other) { + public get_index_names_result(get_index_names_result other) { if (other.isSetSuccess()) { - this.success = new Index(other.success); - } - if (other.isSetO1()) { - this.o1 = new MetaException(other.o1); + List __this__success = new ArrayList(other.success); + this.success = __this__success; } if (other.isSetO2()) { - this.o2 = new NoSuchObjectException(other.o2); + this.o2 = new MetaException(other.o2); } } - public get_index_by_name_result deepCopy() { - return new get_index_by_name_result(this); + public get_index_names_result deepCopy() { + return new get_index_names_result(this); } @Override public void clear() { this.success = null; - this.o1 = null; this.o2 = null; } - public Index getSuccess() { + public int getSuccessSize() { + return (this.success == null) ? 0 : this.success.size(); + } + + public java.util.Iterator getSuccessIterator() { + return (this.success == null) ? null : this.success.iterator(); + } + + public void addToSuccess(String elem) { + if (this.success == null) { + this.success = new ArrayList(); + } + this.success.add(elem); + } + + public List getSuccess() { return this.success; } - public void setSuccess(Index success) { + public void setSuccess(List success) { this.success = success; } @@ -108680,34 +111341,11 @@ public void setSuccessIsSet(boolean value) { } } - public MetaException getO1() { - return this.o1; - } - - public void setO1(MetaException o1) { - this.o1 = o1; - } - - public void unsetO1() { - this.o1 = null; - } - - /** Returns true if field o1 is set (has been assigned a value) and false otherwise */ - public boolean isSetO1() { - return this.o1 != null; - } - - public void setO1IsSet(boolean value) { - if (!value) { - this.o1 = null; - } - } - - public NoSuchObjectException getO2() { + public MetaException getO2() { return this.o2; } - public void setO2(NoSuchObjectException o2) { + public void setO2(MetaException o2) { this.o2 = o2; } @@ -108732,15 +111370,7 @@ public void setFieldValue(_Fields field, Object value) { if (value == null) { unsetSuccess(); } else { - setSuccess((Index)value); - } - break; - - case O1: - if (value == null) { - unsetO1(); - } else { - setO1((MetaException)value); + setSuccess((List)value); } break; @@ -108748,7 +111378,7 @@ public void setFieldValue(_Fields field, Object value) { if (value == null) { unsetO2(); } else { - setO2((NoSuchObjectException)value); + setO2((MetaException)value); } break; @@ -108760,9 +111390,6 @@ public Object getFieldValue(_Fields field) { case SUCCESS: return getSuccess(); - case O1: - return getO1(); - case O2: return getO2(); @@ -108779,8 +111406,6 @@ public boolean isSet(_Fields field) { switch (field) { case SUCCESS: return isSetSuccess(); - case O1: - return isSetO1(); case O2: return isSetO2(); } @@ -108791,12 +111416,12 @@ public boolean isSet(_Fields field) { public boolean equals(Object that) { if (that == null) return false; - if (that instanceof get_index_by_name_result) - return this.equals((get_index_by_name_result)that); + if (that instanceof get_index_names_result) + return this.equals((get_index_names_result)that); return false; } - public boolean equals(get_index_by_name_result that) { + public boolean equals(get_index_names_result that) { if (that == null) return false; @@ -108809,15 +111434,6 @@ public boolean equals(get_index_by_name_result that) { return false; } - boolean this_present_o1 = true && this.isSetO1(); - boolean that_present_o1 = true && that.isSetO1(); - if (this_present_o1 || that_present_o1) { - if (!(this_present_o1 && that_present_o1)) - return false; - if (!this.o1.equals(that.o1)) - return false; - } - boolean this_present_o2 = true && this.isSetO2(); boolean that_present_o2 = true && that.isSetO2(); if (this_present_o2 || that_present_o2) { @@ -108839,11 +111455,6 @@ public int hashCode() { if (present_success) list.add(success); - boolean present_o1 = true && (isSetO1()); - list.add(present_o1); - if (present_o1) - list.add(o1); - boolean present_o2 = true && (isSetO2()); list.add(present_o2); if (present_o2) @@ -108853,7 +111464,7 @@ public int hashCode() { } @Override - public int compareTo(get_index_by_name_result other) { + public int compareTo(get_index_names_result other) { if (!getClass().equals(other.getClass())) { return getClass().getName().compareTo(other.getClass().getName()); } @@ -108870,16 +111481,6 @@ public int compareTo(get_index_by_name_result other) { return lastComparison; } } - lastComparison = Boolean.valueOf(isSetO1()).compareTo(other.isSetO1()); - if (lastComparison != 0) { - return lastComparison; - } - if (isSetO1()) { - lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.o1, other.o1); - if (lastComparison != 0) { - return lastComparison; - } - } lastComparison = Boolean.valueOf(isSetO2()).compareTo(other.isSetO2()); if (lastComparison != 0) { return lastComparison; @@ -108907,7 +111508,7 @@ public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache. @Override public String toString() { - StringBuilder sb = new StringBuilder("get_index_by_name_result("); + StringBuilder sb = new StringBuilder("get_index_names_result("); boolean first = true; sb.append("success:"); @@ -108918,14 +111519,6 @@ public String toString() { } first = false; if (!first) sb.append(", "); - sb.append("o1:"); - if (this.o1 == null) { - sb.append("null"); - } else { - sb.append(this.o1); - } - first = false; - if (!first) sb.append(", "); sb.append("o2:"); if (this.o2 == null) { sb.append("null"); @@ -108940,9 +111533,6 @@ public String toString() { public void validate() throws org.apache.thrift.TException { // check for required fields // check for sub-struct validity - if (success != null) { - success.validate(); - } } private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { @@ -108961,15 +111551,15 @@ private void readObject(java.io.ObjectInputStream in) throws java.io.IOException } } - private static class get_index_by_name_resultStandardSchemeFactory implements SchemeFactory { - public get_index_by_name_resultStandardScheme getScheme() { - return new get_index_by_name_resultStandardScheme(); + private static class get_index_names_resultStandardSchemeFactory implements SchemeFactory { + public get_index_names_resultStandardScheme getScheme() { + return new get_index_names_resultStandardScheme(); } } - private static class get_index_by_name_resultStandardScheme extends StandardScheme { + private static class get_index_names_resultStandardScheme extends StandardScheme { - public void read(org.apache.thrift.protocol.TProtocol iprot, get_index_by_name_result struct) throws org.apache.thrift.TException { + public void read(org.apache.thrift.protocol.TProtocol iprot, get_index_names_result struct) throws org.apache.thrift.TException { org.apache.thrift.protocol.TField schemeField; iprot.readStructBegin(); while (true) @@ -108980,26 +111570,26 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_index_by_name_r } switch (schemeField.id) { case 0: // SUCCESS - if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { - struct.success = new Index(); - struct.success.read(iprot); + if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { + { + org.apache.thrift.protocol.TList _list1044 = iprot.readListBegin(); + struct.success = new ArrayList(_list1044.size); + String _elem1045; + for (int _i1046 = 0; _i1046 < _list1044.size; ++_i1046) + { + _elem1045 = iprot.readString(); + struct.success.add(_elem1045); + } + iprot.readListEnd(); + } struct.setSuccessIsSet(true); } else { org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); } break; - case 1: // O1 - if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { - struct.o1 = new MetaException(); - struct.o1.read(iprot); - struct.setO1IsSet(true); - } else { - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - break; - case 2: // O2 + case 1: // O2 if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { - struct.o2 = new NoSuchObjectException(); + struct.o2 = new MetaException(); struct.o2.read(iprot); struct.setO2IsSet(true); } else { @@ -109015,18 +111605,20 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_index_by_name_r struct.validate(); } - public void write(org.apache.thrift.protocol.TProtocol oprot, get_index_by_name_result struct) throws org.apache.thrift.TException { + public void write(org.apache.thrift.protocol.TProtocol oprot, get_index_names_result struct) throws org.apache.thrift.TException { struct.validate(); oprot.writeStructBegin(STRUCT_DESC); if (struct.success != null) { oprot.writeFieldBegin(SUCCESS_FIELD_DESC); - struct.success.write(oprot); - oprot.writeFieldEnd(); - } - if (struct.o1 != null) { - oprot.writeFieldBegin(O1_FIELD_DESC); - struct.o1.write(oprot); + { + oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.success.size())); + for (String _iter1047 : struct.success) + { + oprot.writeString(_iter1047); + } + oprot.writeListEnd(); + } oprot.writeFieldEnd(); } if (struct.o2 != null) { @@ -109040,33 +111632,33 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_index_by_name_ } - private static class get_index_by_name_resultTupleSchemeFactory implements SchemeFactory { - public get_index_by_name_resultTupleScheme getScheme() { - return new get_index_by_name_resultTupleScheme(); + private static class get_index_names_resultTupleSchemeFactory implements SchemeFactory { + public get_index_names_resultTupleScheme getScheme() { + return new get_index_names_resultTupleScheme(); } } - private static class get_index_by_name_resultTupleScheme extends TupleScheme { + private static class get_index_names_resultTupleScheme extends TupleScheme { @Override - public void write(org.apache.thrift.protocol.TProtocol prot, get_index_by_name_result struct) throws org.apache.thrift.TException { + public void write(org.apache.thrift.protocol.TProtocol prot, get_index_names_result struct) throws org.apache.thrift.TException { TTupleProtocol oprot = (TTupleProtocol) prot; BitSet optionals = new BitSet(); if (struct.isSetSuccess()) { optionals.set(0); } - if (struct.isSetO1()) { - optionals.set(1); - } if (struct.isSetO2()) { - optionals.set(2); + optionals.set(1); } - oprot.writeBitSet(optionals, 3); + oprot.writeBitSet(optionals, 2); if (struct.isSetSuccess()) { - struct.success.write(oprot); - } - if (struct.isSetO1()) { - struct.o1.write(oprot); + { + oprot.writeI32(struct.success.size()); + for (String _iter1048 : struct.success) + { + oprot.writeString(_iter1048); + } + } } if (struct.isSetO2()) { struct.o2.write(oprot); @@ -109074,21 +111666,24 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_index_by_name_r } @Override - public void read(org.apache.thrift.protocol.TProtocol prot, get_index_by_name_result struct) throws org.apache.thrift.TException { + public void read(org.apache.thrift.protocol.TProtocol prot, get_index_names_result struct) throws org.apache.thrift.TException { TTupleProtocol iprot = (TTupleProtocol) prot; - BitSet incoming = iprot.readBitSet(3); + BitSet incoming = iprot.readBitSet(2); if (incoming.get(0)) { - struct.success = new Index(); - struct.success.read(iprot); + { + org.apache.thrift.protocol.TList _list1049 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.success = new ArrayList(_list1049.size); + String _elem1050; + for (int _i1051 = 0; _i1051 < _list1049.size; ++_i1051) + { + _elem1050 = iprot.readString(); + struct.success.add(_elem1050); + } + } struct.setSuccessIsSet(true); } if (incoming.get(1)) { - struct.o1 = new MetaException(); - struct.o1.read(iprot); - struct.setO1IsSet(true); - } - if (incoming.get(2)) { - struct.o2 = new NoSuchObjectException(); + struct.o2 = new MetaException(); struct.o2.read(iprot); struct.setO2IsSet(true); } @@ -109097,28 +111692,25 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_index_by_name_re } - public static class get_indexes_args implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { - private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("get_indexes_args"); + public static class get_primary_keys_args implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { + private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("get_primary_keys_args"); private static final org.apache.thrift.protocol.TField DB_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("db_name", org.apache.thrift.protocol.TType.STRING, (short)1); private static final org.apache.thrift.protocol.TField TBL_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("tbl_name", org.apache.thrift.protocol.TType.STRING, (short)2); - private static final org.apache.thrift.protocol.TField MAX_INDEXES_FIELD_DESC = new org.apache.thrift.protocol.TField("max_indexes", org.apache.thrift.protocol.TType.I16, (short)3); private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); static { - schemes.put(StandardScheme.class, new get_indexes_argsStandardSchemeFactory()); - schemes.put(TupleScheme.class, new get_indexes_argsTupleSchemeFactory()); + schemes.put(StandardScheme.class, new get_primary_keys_argsStandardSchemeFactory()); + schemes.put(TupleScheme.class, new get_primary_keys_argsTupleSchemeFactory()); } private String db_name; // required private String tbl_name; // required - private short max_indexes; // required /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ public enum _Fields implements org.apache.thrift.TFieldIdEnum { DB_NAME((short)1, "db_name"), - TBL_NAME((short)2, "tbl_name"), - MAX_INDEXES((short)3, "max_indexes"); + TBL_NAME((short)2, "tbl_name"); private static final Map byName = new HashMap(); @@ -109137,8 +111729,6 @@ public static _Fields findByThriftId(int fieldId) { return DB_NAME; case 2: // TBL_NAME return TBL_NAME; - case 3: // MAX_INDEXES - return MAX_INDEXES; default: return null; } @@ -109179,8 +111769,6 @@ public String getFieldName() { } // isset id assignments - private static final int __MAX_INDEXES_ISSET_ID = 0; - private byte __isset_bitfield = 0; public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; static { Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); @@ -109188,53 +111776,42 @@ public String getFieldName() { new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); tmpMap.put(_Fields.TBL_NAME, new org.apache.thrift.meta_data.FieldMetaData("tbl_name", org.apache.thrift.TFieldRequirementType.DEFAULT, new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); - tmpMap.put(_Fields.MAX_INDEXES, new org.apache.thrift.meta_data.FieldMetaData("max_indexes", org.apache.thrift.TFieldRequirementType.DEFAULT, - new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I16))); metaDataMap = Collections.unmodifiableMap(tmpMap); - org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(get_indexes_args.class, metaDataMap); + org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(get_primary_keys_args.class, metaDataMap); } - public get_indexes_args() { - this.max_indexes = (short)-1; - + public get_primary_keys_args() { } - public get_indexes_args( + public get_primary_keys_args( String db_name, - String tbl_name, - short max_indexes) + String tbl_name) { this(); this.db_name = db_name; this.tbl_name = tbl_name; - this.max_indexes = max_indexes; - setMax_indexesIsSet(true); } /** * Performs a deep copy on other. */ - public get_indexes_args(get_indexes_args other) { - __isset_bitfield = other.__isset_bitfield; + public get_primary_keys_args(get_primary_keys_args other) { if (other.isSetDb_name()) { this.db_name = other.db_name; } if (other.isSetTbl_name()) { this.tbl_name = other.tbl_name; } - this.max_indexes = other.max_indexes; } - public get_indexes_args deepCopy() { - return new get_indexes_args(this); + public get_primary_keys_args deepCopy() { + return new get_primary_keys_args(this); } @Override public void clear() { this.db_name = null; this.tbl_name = null; - this.max_indexes = (short)-1; - } public String getDb_name() { @@ -109283,28 +111860,6 @@ public void setTbl_nameIsSet(boolean value) { } } - public short getMax_indexes() { - return this.max_indexes; - } - - public void setMax_indexes(short max_indexes) { - this.max_indexes = max_indexes; - setMax_indexesIsSet(true); - } - - public void unsetMax_indexes() { - __isset_bitfield = EncodingUtils.clearBit(__isset_bitfield, __MAX_INDEXES_ISSET_ID); - } - - /** Returns true if field max_indexes is set (has been assigned a value) and false otherwise */ - public boolean isSetMax_indexes() { - return EncodingUtils.testBit(__isset_bitfield, __MAX_INDEXES_ISSET_ID); - } - - public void setMax_indexesIsSet(boolean value) { - __isset_bitfield = EncodingUtils.setBit(__isset_bitfield, __MAX_INDEXES_ISSET_ID, value); - } - public void setFieldValue(_Fields field, Object value) { switch (field) { case DB_NAME: @@ -109323,14 +111878,6 @@ public void setFieldValue(_Fields field, Object value) { } break; - case MAX_INDEXES: - if (value == null) { - unsetMax_indexes(); - } else { - setMax_indexes((Short)value); - } - break; - } } @@ -109342,9 +111889,6 @@ public Object getFieldValue(_Fields field) { case TBL_NAME: return getTbl_name(); - case MAX_INDEXES: - return getMax_indexes(); - } throw new IllegalStateException(); } @@ -109360,8 +111904,6 @@ public boolean isSet(_Fields field) { return isSetDb_name(); case TBL_NAME: return isSetTbl_name(); - case MAX_INDEXES: - return isSetMax_indexes(); } throw new IllegalStateException(); } @@ -109370,12 +111912,12 @@ public boolean isSet(_Fields field) { public boolean equals(Object that) { if (that == null) return false; - if (that instanceof get_indexes_args) - return this.equals((get_indexes_args)that); + if (that instanceof get_primary_keys_args) + return this.equals((get_primary_keys_args)that); return false; } - public boolean equals(get_indexes_args that) { + public boolean equals(get_primary_keys_args that) { if (that == null) return false; @@ -109397,15 +111939,6 @@ public boolean equals(get_indexes_args that) { return false; } - boolean this_present_max_indexes = true; - boolean that_present_max_indexes = true; - if (this_present_max_indexes || that_present_max_indexes) { - if (!(this_present_max_indexes && that_present_max_indexes)) - return false; - if (this.max_indexes != that.max_indexes) - return false; - } - return true; } @@ -109423,16 +111956,11 @@ public int hashCode() { if (present_tbl_name) list.add(tbl_name); - boolean present_max_indexes = true; - list.add(present_max_indexes); - if (present_max_indexes) - list.add(max_indexes); - return list.hashCode(); } @Override - public int compareTo(get_indexes_args other) { + public int compareTo(get_primary_keys_args other) { if (!getClass().equals(other.getClass())) { return getClass().getName().compareTo(other.getClass().getName()); } @@ -109459,16 +111987,6 @@ public int compareTo(get_indexes_args other) { return lastComparison; } } - lastComparison = Boolean.valueOf(isSetMax_indexes()).compareTo(other.isSetMax_indexes()); - if (lastComparison != 0) { - return lastComparison; - } - if (isSetMax_indexes()) { - lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.max_indexes, other.max_indexes); - if (lastComparison != 0) { - return lastComparison; - } - } return 0; } @@ -109486,7 +112004,7 @@ public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache. @Override public String toString() { - StringBuilder sb = new StringBuilder("get_indexes_args("); + StringBuilder sb = new StringBuilder("get_primary_keys_args("); boolean first = true; sb.append("db_name:"); @@ -109504,10 +112022,6 @@ public String toString() { sb.append(this.tbl_name); } first = false; - if (!first) sb.append(", "); - sb.append("max_indexes:"); - sb.append(this.max_indexes); - first = false; sb.append(")"); return sb.toString(); } @@ -109527,23 +112041,21 @@ private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOExcept private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { try { - // it doesn't seem like you should have to do this, but java serialization is wacky, and doesn't call the default constructor. - __isset_bitfield = 0; read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); } catch (org.apache.thrift.TException te) { throw new java.io.IOException(te); } } - private static class get_indexes_argsStandardSchemeFactory implements SchemeFactory { - public get_indexes_argsStandardScheme getScheme() { - return new get_indexes_argsStandardScheme(); + private static class get_primary_keys_argsStandardSchemeFactory implements SchemeFactory { + public get_primary_keys_argsStandardScheme getScheme() { + return new get_primary_keys_argsStandardScheme(); } } - private static class get_indexes_argsStandardScheme extends StandardScheme { + private static class get_primary_keys_argsStandardScheme extends StandardScheme { - public void read(org.apache.thrift.protocol.TProtocol iprot, get_indexes_args struct) throws org.apache.thrift.TException { + public void read(org.apache.thrift.protocol.TProtocol iprot, get_primary_keys_args struct) throws org.apache.thrift.TException { org.apache.thrift.protocol.TField schemeField; iprot.readStructBegin(); while (true) @@ -109569,14 +112081,6 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_indexes_args st org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); } break; - case 3: // MAX_INDEXES - if (schemeField.type == org.apache.thrift.protocol.TType.I16) { - struct.max_indexes = iprot.readI16(); - struct.setMax_indexesIsSet(true); - } else { - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - break; default: org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); } @@ -109586,7 +112090,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_indexes_args st struct.validate(); } - public void write(org.apache.thrift.protocol.TProtocol oprot, get_indexes_args struct) throws org.apache.thrift.TException { + public void write(org.apache.thrift.protocol.TProtocol oprot, get_primary_keys_args struct) throws org.apache.thrift.TException { struct.validate(); oprot.writeStructBegin(STRUCT_DESC); @@ -109600,25 +112104,22 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_indexes_args s oprot.writeString(struct.tbl_name); oprot.writeFieldEnd(); } - oprot.writeFieldBegin(MAX_INDEXES_FIELD_DESC); - oprot.writeI16(struct.max_indexes); - oprot.writeFieldEnd(); oprot.writeFieldStop(); oprot.writeStructEnd(); } } - private static class get_indexes_argsTupleSchemeFactory implements SchemeFactory { - public get_indexes_argsTupleScheme getScheme() { - return new get_indexes_argsTupleScheme(); + private static class get_primary_keys_argsTupleSchemeFactory implements SchemeFactory { + public get_primary_keys_argsTupleScheme getScheme() { + return new get_primary_keys_argsTupleScheme(); } } - private static class get_indexes_argsTupleScheme extends TupleScheme { + private static class get_primary_keys_argsTupleScheme extends TupleScheme { @Override - public void write(org.apache.thrift.protocol.TProtocol prot, get_indexes_args struct) throws org.apache.thrift.TException { + public void write(org.apache.thrift.protocol.TProtocol prot, get_primary_keys_args struct) throws org.apache.thrift.TException { TTupleProtocol oprot = (TTupleProtocol) prot; BitSet optionals = new BitSet(); if (struct.isSetDb_name()) { @@ -109627,25 +112128,19 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_indexes_args st if (struct.isSetTbl_name()) { optionals.set(1); } - if (struct.isSetMax_indexes()) { - optionals.set(2); - } - oprot.writeBitSet(optionals, 3); + oprot.writeBitSet(optionals, 2); if (struct.isSetDb_name()) { oprot.writeString(struct.db_name); } if (struct.isSetTbl_name()) { oprot.writeString(struct.tbl_name); } - if (struct.isSetMax_indexes()) { - oprot.writeI16(struct.max_indexes); - } } @Override - public void read(org.apache.thrift.protocol.TProtocol prot, get_indexes_args struct) throws org.apache.thrift.TException { + public void read(org.apache.thrift.protocol.TProtocol prot, get_primary_keys_args struct) throws org.apache.thrift.TException { TTupleProtocol iprot = (TTupleProtocol) prot; - BitSet incoming = iprot.readBitSet(3); + BitSet incoming = iprot.readBitSet(2); if (incoming.get(0)) { struct.db_name = iprot.readString(); struct.setDb_nameIsSet(true); @@ -109654,17 +112149,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_indexes_args str struct.tbl_name = iprot.readString(); struct.setTbl_nameIsSet(true); } - if (incoming.get(2)) { - struct.max_indexes = iprot.readI16(); - struct.setMax_indexesIsSet(true); - } } } } - public static class get_indexes_result implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { - private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("get_indexes_result"); + public static class get_primary_keys_result implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { + private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("get_primary_keys_result"); private static final org.apache.thrift.protocol.TField SUCCESS_FIELD_DESC = new org.apache.thrift.protocol.TField("success", org.apache.thrift.protocol.TType.LIST, (short)0); private static final org.apache.thrift.protocol.TField O1_FIELD_DESC = new org.apache.thrift.protocol.TField("o1", org.apache.thrift.protocol.TType.STRUCT, (short)1); @@ -109672,13 +112163,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_indexes_args str private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); static { - schemes.put(StandardScheme.class, new get_indexes_resultStandardSchemeFactory()); - schemes.put(TupleScheme.class, new get_indexes_resultTupleSchemeFactory()); + schemes.put(StandardScheme.class, new get_primary_keys_resultStandardSchemeFactory()); + schemes.put(TupleScheme.class, new get_primary_keys_resultTupleSchemeFactory()); } - private List success; // required - private NoSuchObjectException o1; // required - private MetaException o2; // required + private List success; // required + private MetaException o1; // required + private NoSuchObjectException o2; // required /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ public enum _Fields implements org.apache.thrift.TFieldIdEnum { @@ -109750,22 +112241,22 @@ public String getFieldName() { Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); tmpMap.put(_Fields.SUCCESS, new org.apache.thrift.meta_data.FieldMetaData("success", org.apache.thrift.TFieldRequirementType.DEFAULT, new org.apache.thrift.meta_data.ListMetaData(org.apache.thrift.protocol.TType.LIST, - new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, Index.class)))); + new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, SQLPrimaryKey.class)))); tmpMap.put(_Fields.O1, new org.apache.thrift.meta_data.FieldMetaData("o1", org.apache.thrift.TFieldRequirementType.DEFAULT, new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRUCT))); tmpMap.put(_Fields.O2, new org.apache.thrift.meta_data.FieldMetaData("o2", org.apache.thrift.TFieldRequirementType.DEFAULT, new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRUCT))); metaDataMap = Collections.unmodifiableMap(tmpMap); - org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(get_indexes_result.class, metaDataMap); + org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(get_primary_keys_result.class, metaDataMap); } - public get_indexes_result() { + public get_primary_keys_result() { } - public get_indexes_result( - List success, - NoSuchObjectException o1, - MetaException o2) + public get_primary_keys_result( + List success, + MetaException o1, + NoSuchObjectException o2) { this(); this.success = success; @@ -109776,24 +112267,24 @@ public get_indexes_result( /** * Performs a deep copy on other. */ - public get_indexes_result(get_indexes_result other) { + public get_primary_keys_result(get_primary_keys_result other) { if (other.isSetSuccess()) { - List __this__success = new ArrayList(other.success.size()); - for (Index other_element : other.success) { - __this__success.add(new Index(other_element)); + List __this__success = new ArrayList(other.success.size()); + for (SQLPrimaryKey other_element : other.success) { + __this__success.add(new SQLPrimaryKey(other_element)); } this.success = __this__success; } if (other.isSetO1()) { - this.o1 = new NoSuchObjectException(other.o1); + this.o1 = new MetaException(other.o1); } if (other.isSetO2()) { - this.o2 = new MetaException(other.o2); + this.o2 = new NoSuchObjectException(other.o2); } } - public get_indexes_result deepCopy() { - return new get_indexes_result(this); + public get_primary_keys_result deepCopy() { + return new get_primary_keys_result(this); } @Override @@ -109807,22 +112298,22 @@ public int getSuccessSize() { return (this.success == null) ? 0 : this.success.size(); } - public java.util.Iterator getSuccessIterator() { + public java.util.Iterator getSuccessIterator() { return (this.success == null) ? null : this.success.iterator(); } - public void addToSuccess(Index elem) { + public void addToSuccess(SQLPrimaryKey elem) { if (this.success == null) { - this.success = new ArrayList(); + this.success = new ArrayList(); } this.success.add(elem); } - public List getSuccess() { + public List getSuccess() { return this.success; } - public void setSuccess(List success) { + public void setSuccess(List success) { this.success = success; } @@ -109841,11 +112332,11 @@ public void setSuccessIsSet(boolean value) { } } - public NoSuchObjectException getO1() { + public MetaException getO1() { return this.o1; } - public void setO1(NoSuchObjectException o1) { + public void setO1(MetaException o1) { this.o1 = o1; } @@ -109864,11 +112355,11 @@ public void setO1IsSet(boolean value) { } } - public MetaException getO2() { + public NoSuchObjectException getO2() { return this.o2; } - public void setO2(MetaException o2) { + public void setO2(NoSuchObjectException o2) { this.o2 = o2; } @@ -109893,7 +112384,7 @@ public void setFieldValue(_Fields field, Object value) { if (value == null) { unsetSuccess(); } else { - setSuccess((List)value); + setSuccess((List)value); } break; @@ -109901,7 +112392,7 @@ public void setFieldValue(_Fields field, Object value) { if (value == null) { unsetO1(); } else { - setO1((NoSuchObjectException)value); + setO1((MetaException)value); } break; @@ -109909,7 +112400,7 @@ public void setFieldValue(_Fields field, Object value) { if (value == null) { unsetO2(); } else { - setO2((MetaException)value); + setO2((NoSuchObjectException)value); } break; @@ -109952,12 +112443,12 @@ public boolean isSet(_Fields field) { public boolean equals(Object that) { if (that == null) return false; - if (that instanceof get_indexes_result) - return this.equals((get_indexes_result)that); + if (that instanceof get_primary_keys_result) + return this.equals((get_primary_keys_result)that); return false; } - public boolean equals(get_indexes_result that) { + public boolean equals(get_primary_keys_result that) { if (that == null) return false; @@ -110014,7 +112505,7 @@ public int hashCode() { } @Override - public int compareTo(get_indexes_result other) { + public int compareTo(get_primary_keys_result other) { if (!getClass().equals(other.getClass())) { return getClass().getName().compareTo(other.getClass().getName()); } @@ -110068,7 +112559,7 @@ public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache. @Override public String toString() { - StringBuilder sb = new StringBuilder("get_indexes_result("); + StringBuilder sb = new StringBuilder("get_primary_keys_result("); boolean first = true; sb.append("success:"); @@ -110119,15 +112610,15 @@ private void readObject(java.io.ObjectInputStream in) throws java.io.IOException } } - private static class get_indexes_resultStandardSchemeFactory implements SchemeFactory { - public get_indexes_resultStandardScheme getScheme() { - return new get_indexes_resultStandardScheme(); + private static class get_primary_keys_resultStandardSchemeFactory implements SchemeFactory { + public get_primary_keys_resultStandardScheme getScheme() { + return new get_primary_keys_resultStandardScheme(); } } - private static class get_indexes_resultStandardScheme extends StandardScheme { + private static class get_primary_keys_resultStandardScheme extends StandardScheme { - public void read(org.apache.thrift.protocol.TProtocol iprot, get_indexes_result struct) throws org.apache.thrift.TException { + public void read(org.apache.thrift.protocol.TProtocol iprot, get_primary_keys_result struct) throws org.apache.thrift.TException { org.apache.thrift.protocol.TField schemeField; iprot.readStructBegin(); while (true) @@ -110140,14 +112631,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_indexes_result case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1004 = iprot.readListBegin(); - struct.success = new ArrayList(_list1004.size); - Index _elem1005; - for (int _i1006 = 0; _i1006 < _list1004.size; ++_i1006) + org.apache.thrift.protocol.TList _list1052 = iprot.readListBegin(); + struct.success = new ArrayList(_list1052.size); + SQLPrimaryKey _elem1053; + for (int _i1054 = 0; _i1054 < _list1052.size; ++_i1054) { - _elem1005 = new Index(); - _elem1005.read(iprot); - struct.success.add(_elem1005); + _elem1053 = new SQLPrimaryKey(); + _elem1053.read(iprot); + struct.success.add(_elem1053); } iprot.readListEnd(); } @@ -110158,7 +112649,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_indexes_result break; case 1: // O1 if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { - struct.o1 = new NoSuchObjectException(); + struct.o1 = new MetaException(); struct.o1.read(iprot); struct.setO1IsSet(true); } else { @@ -110167,7 +112658,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_indexes_result break; case 2: // O2 if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { - struct.o2 = new MetaException(); + struct.o2 = new NoSuchObjectException(); struct.o2.read(iprot); struct.setO2IsSet(true); } else { @@ -110183,7 +112674,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_indexes_result struct.validate(); } - public void write(org.apache.thrift.protocol.TProtocol oprot, get_indexes_result struct) throws org.apache.thrift.TException { + public void write(org.apache.thrift.protocol.TProtocol oprot, get_primary_keys_result struct) throws org.apache.thrift.TException { struct.validate(); oprot.writeStructBegin(STRUCT_DESC); @@ -110191,9 +112682,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_indexes_result oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.success.size())); - for (Index _iter1007 : struct.success) + for (SQLPrimaryKey _iter1055 : struct.success) { - _iter1007.write(oprot); + _iter1055.write(oprot); } oprot.writeListEnd(); } @@ -110215,16 +112706,16 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_indexes_result } - private static class get_indexes_resultTupleSchemeFactory implements SchemeFactory { - public get_indexes_resultTupleScheme getScheme() { - return new get_indexes_resultTupleScheme(); + private static class get_primary_keys_resultTupleSchemeFactory implements SchemeFactory { + public get_primary_keys_resultTupleScheme getScheme() { + return new get_primary_keys_resultTupleScheme(); } } - private static class get_indexes_resultTupleScheme extends TupleScheme { + private static class get_primary_keys_resultTupleScheme extends TupleScheme { @Override - public void write(org.apache.thrift.protocol.TProtocol prot, get_indexes_result struct) throws org.apache.thrift.TException { + public void write(org.apache.thrift.protocol.TProtocol prot, get_primary_keys_result struct) throws org.apache.thrift.TException { TTupleProtocol oprot = (TTupleProtocol) prot; BitSet optionals = new BitSet(); if (struct.isSetSuccess()) { @@ -110240,9 +112731,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_indexes_result if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (Index _iter1008 : struct.success) + for (SQLPrimaryKey _iter1056 : struct.success) { - _iter1008.write(oprot); + _iter1056.write(oprot); } } } @@ -110255,30 +112746,30 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_indexes_result } @Override - public void read(org.apache.thrift.protocol.TProtocol prot, get_indexes_result struct) throws org.apache.thrift.TException { + public void read(org.apache.thrift.protocol.TProtocol prot, get_primary_keys_result struct) throws org.apache.thrift.TException { TTupleProtocol iprot = (TTupleProtocol) prot; BitSet incoming = iprot.readBitSet(3); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list1009 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.success = new ArrayList(_list1009.size); - Index _elem1010; - for (int _i1011 = 0; _i1011 < _list1009.size; ++_i1011) + org.apache.thrift.protocol.TList _list1057 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.success = new ArrayList(_list1057.size); + SQLPrimaryKey _elem1058; + for (int _i1059 = 0; _i1059 < _list1057.size; ++_i1059) { - _elem1010 = new Index(); - _elem1010.read(iprot); - struct.success.add(_elem1010); + _elem1058 = new SQLPrimaryKey(); + _elem1058.read(iprot); + struct.success.add(_elem1058); } } struct.setSuccessIsSet(true); } if (incoming.get(1)) { - struct.o1 = new NoSuchObjectException(); + struct.o1 = new MetaException(); struct.o1.read(iprot); struct.setO1IsSet(true); } if (incoming.get(2)) { - struct.o2 = new MetaException(); + struct.o2 = new NoSuchObjectException(); struct.o2.read(iprot); struct.setO2IsSet(true); } @@ -110287,28 +112778,31 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_indexes_result s } - public static class get_index_names_args implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { - private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("get_index_names_args"); + public static class get_foreign_keys_args implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { + private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("get_foreign_keys_args"); - private static final org.apache.thrift.protocol.TField DB_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("db_name", org.apache.thrift.protocol.TType.STRING, (short)1); - private static final org.apache.thrift.protocol.TField TBL_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("tbl_name", org.apache.thrift.protocol.TType.STRING, (short)2); - private static final org.apache.thrift.protocol.TField MAX_INDEXES_FIELD_DESC = new org.apache.thrift.protocol.TField("max_indexes", org.apache.thrift.protocol.TType.I16, (short)3); + private static final org.apache.thrift.protocol.TField PARENT_DB_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("parent_db_name", org.apache.thrift.protocol.TType.STRING, (short)1); + private static final org.apache.thrift.protocol.TField PARENT_TBL_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("parent_tbl_name", org.apache.thrift.protocol.TType.STRING, (short)2); + private static final org.apache.thrift.protocol.TField FOREIGN_DB_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("foreign_db_name", org.apache.thrift.protocol.TType.STRING, (short)3); + private static final org.apache.thrift.protocol.TField FOREIGN_TBL_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("foreign_tbl_name", org.apache.thrift.protocol.TType.STRING, (short)4); private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); static { - schemes.put(StandardScheme.class, new get_index_names_argsStandardSchemeFactory()); - schemes.put(TupleScheme.class, new get_index_names_argsTupleSchemeFactory()); + schemes.put(StandardScheme.class, new get_foreign_keys_argsStandardSchemeFactory()); + schemes.put(TupleScheme.class, new get_foreign_keys_argsTupleSchemeFactory()); } - private String db_name; // required - private String tbl_name; // required - private short max_indexes; // required + private String parent_db_name; // required + private String parent_tbl_name; // required + private String foreign_db_name; // required + private String foreign_tbl_name; // required /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ public enum _Fields implements org.apache.thrift.TFieldIdEnum { - DB_NAME((short)1, "db_name"), - TBL_NAME((short)2, "tbl_name"), - MAX_INDEXES((short)3, "max_indexes"); + PARENT_DB_NAME((short)1, "parent_db_name"), + PARENT_TBL_NAME((short)2, "parent_tbl_name"), + FOREIGN_DB_NAME((short)3, "foreign_db_name"), + FOREIGN_TBL_NAME((short)4, "foreign_tbl_name"); private static final Map byName = new HashMap(); @@ -110323,12 +112817,14 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_indexes_result s */ public static _Fields findByThriftId(int fieldId) { switch(fieldId) { - case 1: // DB_NAME - return DB_NAME; - case 2: // TBL_NAME - return TBL_NAME; - case 3: // MAX_INDEXES - return MAX_INDEXES; + case 1: // PARENT_DB_NAME + return PARENT_DB_NAME; + case 2: // PARENT_TBL_NAME + return PARENT_TBL_NAME; + case 3: // FOREIGN_DB_NAME + return FOREIGN_DB_NAME; + case 4: // FOREIGN_TBL_NAME + return FOREIGN_TBL_NAME; default: return null; } @@ -110369,155 +112865,190 @@ public String getFieldName() { } // isset id assignments - private static final int __MAX_INDEXES_ISSET_ID = 0; - private byte __isset_bitfield = 0; public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; static { Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); - tmpMap.put(_Fields.DB_NAME, new org.apache.thrift.meta_data.FieldMetaData("db_name", org.apache.thrift.TFieldRequirementType.DEFAULT, + tmpMap.put(_Fields.PARENT_DB_NAME, new org.apache.thrift.meta_data.FieldMetaData("parent_db_name", org.apache.thrift.TFieldRequirementType.DEFAULT, new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); - tmpMap.put(_Fields.TBL_NAME, new org.apache.thrift.meta_data.FieldMetaData("tbl_name", org.apache.thrift.TFieldRequirementType.DEFAULT, + tmpMap.put(_Fields.PARENT_TBL_NAME, new org.apache.thrift.meta_data.FieldMetaData("parent_tbl_name", org.apache.thrift.TFieldRequirementType.DEFAULT, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); + tmpMap.put(_Fields.FOREIGN_DB_NAME, new org.apache.thrift.meta_data.FieldMetaData("foreign_db_name", org.apache.thrift.TFieldRequirementType.DEFAULT, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); + tmpMap.put(_Fields.FOREIGN_TBL_NAME, new org.apache.thrift.meta_data.FieldMetaData("foreign_tbl_name", org.apache.thrift.TFieldRequirementType.DEFAULT, new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); - tmpMap.put(_Fields.MAX_INDEXES, new org.apache.thrift.meta_data.FieldMetaData("max_indexes", org.apache.thrift.TFieldRequirementType.DEFAULT, - new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I16))); metaDataMap = Collections.unmodifiableMap(tmpMap); - org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(get_index_names_args.class, metaDataMap); + org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(get_foreign_keys_args.class, metaDataMap); } - public get_index_names_args() { - this.max_indexes = (short)-1; - + public get_foreign_keys_args() { } - public get_index_names_args( - String db_name, - String tbl_name, - short max_indexes) + public get_foreign_keys_args( + String parent_db_name, + String parent_tbl_name, + String foreign_db_name, + String foreign_tbl_name) { this(); - this.db_name = db_name; - this.tbl_name = tbl_name; - this.max_indexes = max_indexes; - setMax_indexesIsSet(true); + this.parent_db_name = parent_db_name; + this.parent_tbl_name = parent_tbl_name; + this.foreign_db_name = foreign_db_name; + this.foreign_tbl_name = foreign_tbl_name; } /** * Performs a deep copy on other. */ - public get_index_names_args(get_index_names_args other) { - __isset_bitfield = other.__isset_bitfield; - if (other.isSetDb_name()) { - this.db_name = other.db_name; + public get_foreign_keys_args(get_foreign_keys_args other) { + if (other.isSetParent_db_name()) { + this.parent_db_name = other.parent_db_name; } - if (other.isSetTbl_name()) { - this.tbl_name = other.tbl_name; + if (other.isSetParent_tbl_name()) { + this.parent_tbl_name = other.parent_tbl_name; + } + if (other.isSetForeign_db_name()) { + this.foreign_db_name = other.foreign_db_name; + } + if (other.isSetForeign_tbl_name()) { + this.foreign_tbl_name = other.foreign_tbl_name; } - this.max_indexes = other.max_indexes; } - public get_index_names_args deepCopy() { - return new get_index_names_args(this); + public get_foreign_keys_args deepCopy() { + return new get_foreign_keys_args(this); } @Override public void clear() { - this.db_name = null; - this.tbl_name = null; - this.max_indexes = (short)-1; + this.parent_db_name = null; + this.parent_tbl_name = null; + this.foreign_db_name = null; + this.foreign_tbl_name = null; + } + public String getParent_db_name() { + return this.parent_db_name; } - public String getDb_name() { - return this.db_name; + public void setParent_db_name(String parent_db_name) { + this.parent_db_name = parent_db_name; } - public void setDb_name(String db_name) { - this.db_name = db_name; + public void unsetParent_db_name() { + this.parent_db_name = null; } - public void unsetDb_name() { - this.db_name = null; + /** Returns true if field parent_db_name is set (has been assigned a value) and false otherwise */ + public boolean isSetParent_db_name() { + return this.parent_db_name != null; } - /** Returns true if field db_name is set (has been assigned a value) and false otherwise */ - public boolean isSetDb_name() { - return this.db_name != null; + public void setParent_db_nameIsSet(boolean value) { + if (!value) { + this.parent_db_name = null; + } } - public void setDb_nameIsSet(boolean value) { + public String getParent_tbl_name() { + return this.parent_tbl_name; + } + + public void setParent_tbl_name(String parent_tbl_name) { + this.parent_tbl_name = parent_tbl_name; + } + + public void unsetParent_tbl_name() { + this.parent_tbl_name = null; + } + + /** Returns true if field parent_tbl_name is set (has been assigned a value) and false otherwise */ + public boolean isSetParent_tbl_name() { + return this.parent_tbl_name != null; + } + + public void setParent_tbl_nameIsSet(boolean value) { if (!value) { - this.db_name = null; + this.parent_tbl_name = null; } } - public String getTbl_name() { - return this.tbl_name; + public String getForeign_db_name() { + return this.foreign_db_name; } - public void setTbl_name(String tbl_name) { - this.tbl_name = tbl_name; + public void setForeign_db_name(String foreign_db_name) { + this.foreign_db_name = foreign_db_name; } - public void unsetTbl_name() { - this.tbl_name = null; + public void unsetForeign_db_name() { + this.foreign_db_name = null; } - /** Returns true if field tbl_name is set (has been assigned a value) and false otherwise */ - public boolean isSetTbl_name() { - return this.tbl_name != null; + /** Returns true if field foreign_db_name is set (has been assigned a value) and false otherwise */ + public boolean isSetForeign_db_name() { + return this.foreign_db_name != null; } - public void setTbl_nameIsSet(boolean value) { + public void setForeign_db_nameIsSet(boolean value) { if (!value) { - this.tbl_name = null; + this.foreign_db_name = null; } } - public short getMax_indexes() { - return this.max_indexes; + public String getForeign_tbl_name() { + return this.foreign_tbl_name; } - public void setMax_indexes(short max_indexes) { - this.max_indexes = max_indexes; - setMax_indexesIsSet(true); + public void setForeign_tbl_name(String foreign_tbl_name) { + this.foreign_tbl_name = foreign_tbl_name; } - public void unsetMax_indexes() { - __isset_bitfield = EncodingUtils.clearBit(__isset_bitfield, __MAX_INDEXES_ISSET_ID); + public void unsetForeign_tbl_name() { + this.foreign_tbl_name = null; } - /** Returns true if field max_indexes is set (has been assigned a value) and false otherwise */ - public boolean isSetMax_indexes() { - return EncodingUtils.testBit(__isset_bitfield, __MAX_INDEXES_ISSET_ID); + /** Returns true if field foreign_tbl_name is set (has been assigned a value) and false otherwise */ + public boolean isSetForeign_tbl_name() { + return this.foreign_tbl_name != null; } - public void setMax_indexesIsSet(boolean value) { - __isset_bitfield = EncodingUtils.setBit(__isset_bitfield, __MAX_INDEXES_ISSET_ID, value); + public void setForeign_tbl_nameIsSet(boolean value) { + if (!value) { + this.foreign_tbl_name = null; + } } public void setFieldValue(_Fields field, Object value) { switch (field) { - case DB_NAME: + case PARENT_DB_NAME: if (value == null) { - unsetDb_name(); + unsetParent_db_name(); } else { - setDb_name((String)value); + setParent_db_name((String)value); } break; - case TBL_NAME: + case PARENT_TBL_NAME: if (value == null) { - unsetTbl_name(); + unsetParent_tbl_name(); } else { - setTbl_name((String)value); + setParent_tbl_name((String)value); } break; - case MAX_INDEXES: + case FOREIGN_DB_NAME: if (value == null) { - unsetMax_indexes(); + unsetForeign_db_name(); } else { - setMax_indexes((Short)value); + setForeign_db_name((String)value); + } + break; + + case FOREIGN_TBL_NAME: + if (value == null) { + unsetForeign_tbl_name(); + } else { + setForeign_tbl_name((String)value); } break; @@ -110526,14 +113057,17 @@ public void setFieldValue(_Fields field, Object value) { public Object getFieldValue(_Fields field) { switch (field) { - case DB_NAME: - return getDb_name(); + case PARENT_DB_NAME: + return getParent_db_name(); - case TBL_NAME: - return getTbl_name(); + case PARENT_TBL_NAME: + return getParent_tbl_name(); - case MAX_INDEXES: - return getMax_indexes(); + case FOREIGN_DB_NAME: + return getForeign_db_name(); + + case FOREIGN_TBL_NAME: + return getForeign_tbl_name(); } throw new IllegalStateException(); @@ -110546,12 +113080,14 @@ public boolean isSet(_Fields field) { } switch (field) { - case DB_NAME: - return isSetDb_name(); - case TBL_NAME: - return isSetTbl_name(); - case MAX_INDEXES: - return isSetMax_indexes(); + case PARENT_DB_NAME: + return isSetParent_db_name(); + case PARENT_TBL_NAME: + return isSetParent_tbl_name(); + case FOREIGN_DB_NAME: + return isSetForeign_db_name(); + case FOREIGN_TBL_NAME: + return isSetForeign_tbl_name(); } throw new IllegalStateException(); } @@ -110560,39 +113096,48 @@ public boolean isSet(_Fields field) { public boolean equals(Object that) { if (that == null) return false; - if (that instanceof get_index_names_args) - return this.equals((get_index_names_args)that); + if (that instanceof get_foreign_keys_args) + return this.equals((get_foreign_keys_args)that); return false; } - public boolean equals(get_index_names_args that) { + public boolean equals(get_foreign_keys_args that) { if (that == null) return false; - boolean this_present_db_name = true && this.isSetDb_name(); - boolean that_present_db_name = true && that.isSetDb_name(); - if (this_present_db_name || that_present_db_name) { - if (!(this_present_db_name && that_present_db_name)) + boolean this_present_parent_db_name = true && this.isSetParent_db_name(); + boolean that_present_parent_db_name = true && that.isSetParent_db_name(); + if (this_present_parent_db_name || that_present_parent_db_name) { + if (!(this_present_parent_db_name && that_present_parent_db_name)) return false; - if (!this.db_name.equals(that.db_name)) + if (!this.parent_db_name.equals(that.parent_db_name)) return false; } - boolean this_present_tbl_name = true && this.isSetTbl_name(); - boolean that_present_tbl_name = true && that.isSetTbl_name(); - if (this_present_tbl_name || that_present_tbl_name) { - if (!(this_present_tbl_name && that_present_tbl_name)) + boolean this_present_parent_tbl_name = true && this.isSetParent_tbl_name(); + boolean that_present_parent_tbl_name = true && that.isSetParent_tbl_name(); + if (this_present_parent_tbl_name || that_present_parent_tbl_name) { + if (!(this_present_parent_tbl_name && that_present_parent_tbl_name)) return false; - if (!this.tbl_name.equals(that.tbl_name)) + if (!this.parent_tbl_name.equals(that.parent_tbl_name)) return false; } - boolean this_present_max_indexes = true; - boolean that_present_max_indexes = true; - if (this_present_max_indexes || that_present_max_indexes) { - if (!(this_present_max_indexes && that_present_max_indexes)) + boolean this_present_foreign_db_name = true && this.isSetForeign_db_name(); + boolean that_present_foreign_db_name = true && that.isSetForeign_db_name(); + if (this_present_foreign_db_name || that_present_foreign_db_name) { + if (!(this_present_foreign_db_name && that_present_foreign_db_name)) return false; - if (this.max_indexes != that.max_indexes) + if (!this.foreign_db_name.equals(that.foreign_db_name)) + return false; + } + + boolean this_present_foreign_tbl_name = true && this.isSetForeign_tbl_name(); + boolean that_present_foreign_tbl_name = true && that.isSetForeign_tbl_name(); + if (this_present_foreign_tbl_name || that_present_foreign_tbl_name) { + if (!(this_present_foreign_tbl_name && that_present_foreign_tbl_name)) + return false; + if (!this.foreign_tbl_name.equals(that.foreign_tbl_name)) return false; } @@ -110603,58 +113148,73 @@ public boolean equals(get_index_names_args that) { public int hashCode() { List list = new ArrayList(); - boolean present_db_name = true && (isSetDb_name()); - list.add(present_db_name); - if (present_db_name) - list.add(db_name); + boolean present_parent_db_name = true && (isSetParent_db_name()); + list.add(present_parent_db_name); + if (present_parent_db_name) + list.add(parent_db_name); - boolean present_tbl_name = true && (isSetTbl_name()); - list.add(present_tbl_name); - if (present_tbl_name) - list.add(tbl_name); + boolean present_parent_tbl_name = true && (isSetParent_tbl_name()); + list.add(present_parent_tbl_name); + if (present_parent_tbl_name) + list.add(parent_tbl_name); - boolean present_max_indexes = true; - list.add(present_max_indexes); - if (present_max_indexes) - list.add(max_indexes); + boolean present_foreign_db_name = true && (isSetForeign_db_name()); + list.add(present_foreign_db_name); + if (present_foreign_db_name) + list.add(foreign_db_name); + + boolean present_foreign_tbl_name = true && (isSetForeign_tbl_name()); + list.add(present_foreign_tbl_name); + if (present_foreign_tbl_name) + list.add(foreign_tbl_name); return list.hashCode(); } @Override - public int compareTo(get_index_names_args other) { + public int compareTo(get_foreign_keys_args other) { if (!getClass().equals(other.getClass())) { return getClass().getName().compareTo(other.getClass().getName()); } int lastComparison = 0; - lastComparison = Boolean.valueOf(isSetDb_name()).compareTo(other.isSetDb_name()); + lastComparison = Boolean.valueOf(isSetParent_db_name()).compareTo(other.isSetParent_db_name()); if (lastComparison != 0) { return lastComparison; } - if (isSetDb_name()) { - lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.db_name, other.db_name); + if (isSetParent_db_name()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.parent_db_name, other.parent_db_name); if (lastComparison != 0) { return lastComparison; } } - lastComparison = Boolean.valueOf(isSetTbl_name()).compareTo(other.isSetTbl_name()); + lastComparison = Boolean.valueOf(isSetParent_tbl_name()).compareTo(other.isSetParent_tbl_name()); if (lastComparison != 0) { return lastComparison; } - if (isSetTbl_name()) { - lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.tbl_name, other.tbl_name); + if (isSetParent_tbl_name()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.parent_tbl_name, other.parent_tbl_name); if (lastComparison != 0) { return lastComparison; } } - lastComparison = Boolean.valueOf(isSetMax_indexes()).compareTo(other.isSetMax_indexes()); + lastComparison = Boolean.valueOf(isSetForeign_db_name()).compareTo(other.isSetForeign_db_name()); if (lastComparison != 0) { return lastComparison; } - if (isSetMax_indexes()) { - lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.max_indexes, other.max_indexes); + if (isSetForeign_db_name()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.foreign_db_name, other.foreign_db_name); + if (lastComparison != 0) { + return lastComparison; + } + } + lastComparison = Boolean.valueOf(isSetForeign_tbl_name()).compareTo(other.isSetForeign_tbl_name()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetForeign_tbl_name()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.foreign_tbl_name, other.foreign_tbl_name); if (lastComparison != 0) { return lastComparison; } @@ -110676,27 +113236,39 @@ public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache. @Override public String toString() { - StringBuilder sb = new StringBuilder("get_index_names_args("); + StringBuilder sb = new StringBuilder("get_foreign_keys_args("); boolean first = true; - sb.append("db_name:"); - if (this.db_name == null) { + sb.append("parent_db_name:"); + if (this.parent_db_name == null) { sb.append("null"); } else { - sb.append(this.db_name); + sb.append(this.parent_db_name); } first = false; if (!first) sb.append(", "); - sb.append("tbl_name:"); - if (this.tbl_name == null) { + sb.append("parent_tbl_name:"); + if (this.parent_tbl_name == null) { sb.append("null"); } else { - sb.append(this.tbl_name); + sb.append(this.parent_tbl_name); } first = false; if (!first) sb.append(", "); - sb.append("max_indexes:"); - sb.append(this.max_indexes); + sb.append("foreign_db_name:"); + if (this.foreign_db_name == null) { + sb.append("null"); + } else { + sb.append(this.foreign_db_name); + } + first = false; + if (!first) sb.append(", "); + sb.append("foreign_tbl_name:"); + if (this.foreign_tbl_name == null) { + sb.append("null"); + } else { + sb.append(this.foreign_tbl_name); + } first = false; sb.append(")"); return sb.toString(); @@ -110717,23 +113289,21 @@ private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOExcept private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { try { - // it doesn't seem like you should have to do this, but java serialization is wacky, and doesn't call the default constructor. - __isset_bitfield = 0; read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); } catch (org.apache.thrift.TException te) { throw new java.io.IOException(te); } } - private static class get_index_names_argsStandardSchemeFactory implements SchemeFactory { - public get_index_names_argsStandardScheme getScheme() { - return new get_index_names_argsStandardScheme(); + private static class get_foreign_keys_argsStandardSchemeFactory implements SchemeFactory { + public get_foreign_keys_argsStandardScheme getScheme() { + return new get_foreign_keys_argsStandardScheme(); } } - private static class get_index_names_argsStandardScheme extends StandardScheme { + private static class get_foreign_keys_argsStandardScheme extends StandardScheme { - public void read(org.apache.thrift.protocol.TProtocol iprot, get_index_names_args struct) throws org.apache.thrift.TException { + public void read(org.apache.thrift.protocol.TProtocol iprot, get_foreign_keys_args struct) throws org.apache.thrift.TException { org.apache.thrift.protocol.TField schemeField; iprot.readStructBegin(); while (true) @@ -110743,26 +113313,34 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_index_names_arg break; } switch (schemeField.id) { - case 1: // DB_NAME + case 1: // PARENT_DB_NAME if (schemeField.type == org.apache.thrift.protocol.TType.STRING) { - struct.db_name = iprot.readString(); - struct.setDb_nameIsSet(true); + struct.parent_db_name = iprot.readString(); + struct.setParent_db_nameIsSet(true); } else { org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); } break; - case 2: // TBL_NAME + case 2: // PARENT_TBL_NAME if (schemeField.type == org.apache.thrift.protocol.TType.STRING) { - struct.tbl_name = iprot.readString(); - struct.setTbl_nameIsSet(true); + struct.parent_tbl_name = iprot.readString(); + struct.setParent_tbl_nameIsSet(true); } else { org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); } break; - case 3: // MAX_INDEXES - if (schemeField.type == org.apache.thrift.protocol.TType.I16) { - struct.max_indexes = iprot.readI16(); - struct.setMax_indexesIsSet(true); + case 3: // FOREIGN_DB_NAME + if (schemeField.type == org.apache.thrift.protocol.TType.STRING) { + struct.foreign_db_name = iprot.readString(); + struct.setForeign_db_nameIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; + case 4: // FOREIGN_TBL_NAME + if (schemeField.type == org.apache.thrift.protocol.TType.STRING) { + struct.foreign_tbl_name = iprot.readString(); + struct.setForeign_tbl_nameIsSet(true); } else { org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); } @@ -110776,102 +113354,122 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_index_names_arg struct.validate(); } - public void write(org.apache.thrift.protocol.TProtocol oprot, get_index_names_args struct) throws org.apache.thrift.TException { + public void write(org.apache.thrift.protocol.TProtocol oprot, get_foreign_keys_args struct) throws org.apache.thrift.TException { struct.validate(); oprot.writeStructBegin(STRUCT_DESC); - if (struct.db_name != null) { - oprot.writeFieldBegin(DB_NAME_FIELD_DESC); - oprot.writeString(struct.db_name); + if (struct.parent_db_name != null) { + oprot.writeFieldBegin(PARENT_DB_NAME_FIELD_DESC); + oprot.writeString(struct.parent_db_name); oprot.writeFieldEnd(); } - if (struct.tbl_name != null) { - oprot.writeFieldBegin(TBL_NAME_FIELD_DESC); - oprot.writeString(struct.tbl_name); + if (struct.parent_tbl_name != null) { + oprot.writeFieldBegin(PARENT_TBL_NAME_FIELD_DESC); + oprot.writeString(struct.parent_tbl_name); + oprot.writeFieldEnd(); + } + if (struct.foreign_db_name != null) { + oprot.writeFieldBegin(FOREIGN_DB_NAME_FIELD_DESC); + oprot.writeString(struct.foreign_db_name); + oprot.writeFieldEnd(); + } + if (struct.foreign_tbl_name != null) { + oprot.writeFieldBegin(FOREIGN_TBL_NAME_FIELD_DESC); + oprot.writeString(struct.foreign_tbl_name); oprot.writeFieldEnd(); } - oprot.writeFieldBegin(MAX_INDEXES_FIELD_DESC); - oprot.writeI16(struct.max_indexes); - oprot.writeFieldEnd(); oprot.writeFieldStop(); oprot.writeStructEnd(); } } - private static class get_index_names_argsTupleSchemeFactory implements SchemeFactory { - public get_index_names_argsTupleScheme getScheme() { - return new get_index_names_argsTupleScheme(); + private static class get_foreign_keys_argsTupleSchemeFactory implements SchemeFactory { + public get_foreign_keys_argsTupleScheme getScheme() { + return new get_foreign_keys_argsTupleScheme(); } } - private static class get_index_names_argsTupleScheme extends TupleScheme { + private static class get_foreign_keys_argsTupleScheme extends TupleScheme { @Override - public void write(org.apache.thrift.protocol.TProtocol prot, get_index_names_args struct) throws org.apache.thrift.TException { + public void write(org.apache.thrift.protocol.TProtocol prot, get_foreign_keys_args struct) throws org.apache.thrift.TException { TTupleProtocol oprot = (TTupleProtocol) prot; BitSet optionals = new BitSet(); - if (struct.isSetDb_name()) { + if (struct.isSetParent_db_name()) { optionals.set(0); } - if (struct.isSetTbl_name()) { + if (struct.isSetParent_tbl_name()) { optionals.set(1); } - if (struct.isSetMax_indexes()) { + if (struct.isSetForeign_db_name()) { optionals.set(2); } - oprot.writeBitSet(optionals, 3); - if (struct.isSetDb_name()) { - oprot.writeString(struct.db_name); + if (struct.isSetForeign_tbl_name()) { + optionals.set(3); } - if (struct.isSetTbl_name()) { - oprot.writeString(struct.tbl_name); + oprot.writeBitSet(optionals, 4); + if (struct.isSetParent_db_name()) { + oprot.writeString(struct.parent_db_name); } - if (struct.isSetMax_indexes()) { - oprot.writeI16(struct.max_indexes); + if (struct.isSetParent_tbl_name()) { + oprot.writeString(struct.parent_tbl_name); + } + if (struct.isSetForeign_db_name()) { + oprot.writeString(struct.foreign_db_name); + } + if (struct.isSetForeign_tbl_name()) { + oprot.writeString(struct.foreign_tbl_name); } } @Override - public void read(org.apache.thrift.protocol.TProtocol prot, get_index_names_args struct) throws org.apache.thrift.TException { + public void read(org.apache.thrift.protocol.TProtocol prot, get_foreign_keys_args struct) throws org.apache.thrift.TException { TTupleProtocol iprot = (TTupleProtocol) prot; - BitSet incoming = iprot.readBitSet(3); + BitSet incoming = iprot.readBitSet(4); if (incoming.get(0)) { - struct.db_name = iprot.readString(); - struct.setDb_nameIsSet(true); + struct.parent_db_name = iprot.readString(); + struct.setParent_db_nameIsSet(true); } if (incoming.get(1)) { - struct.tbl_name = iprot.readString(); - struct.setTbl_nameIsSet(true); + struct.parent_tbl_name = iprot.readString(); + struct.setParent_tbl_nameIsSet(true); } if (incoming.get(2)) { - struct.max_indexes = iprot.readI16(); - struct.setMax_indexesIsSet(true); + struct.foreign_db_name = iprot.readString(); + struct.setForeign_db_nameIsSet(true); + } + if (incoming.get(3)) { + struct.foreign_tbl_name = iprot.readString(); + struct.setForeign_tbl_nameIsSet(true); } } } } - public static class get_index_names_result implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { - private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("get_index_names_result"); + public static class get_foreign_keys_result implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { + private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("get_foreign_keys_result"); private static final org.apache.thrift.protocol.TField SUCCESS_FIELD_DESC = new org.apache.thrift.protocol.TField("success", org.apache.thrift.protocol.TType.LIST, (short)0); - private static final org.apache.thrift.protocol.TField O2_FIELD_DESC = new org.apache.thrift.protocol.TField("o2", org.apache.thrift.protocol.TType.STRUCT, (short)1); + private static final org.apache.thrift.protocol.TField O1_FIELD_DESC = new org.apache.thrift.protocol.TField("o1", org.apache.thrift.protocol.TType.STRUCT, (short)1); + private static final org.apache.thrift.protocol.TField O2_FIELD_DESC = new org.apache.thrift.protocol.TField("o2", org.apache.thrift.protocol.TType.STRUCT, (short)2); private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); static { - schemes.put(StandardScheme.class, new get_index_names_resultStandardSchemeFactory()); - schemes.put(TupleScheme.class, new get_index_names_resultTupleSchemeFactory()); + schemes.put(StandardScheme.class, new get_foreign_keys_resultStandardSchemeFactory()); + schemes.put(TupleScheme.class, new get_foreign_keys_resultTupleSchemeFactory()); } - private List success; // required - private MetaException o2; // required + private List success; // required + private MetaException o1; // required + private NoSuchObjectException o2; // required /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ public enum _Fields implements org.apache.thrift.TFieldIdEnum { SUCCESS((short)0, "success"), - O2((short)1, "o2"); + O1((short)1, "o1"), + O2((short)2, "o2"); private static final Map byName = new HashMap(); @@ -110888,7 +113486,9 @@ public static _Fields findByThriftId(int fieldId) { switch(fieldId) { case 0: // SUCCESS return SUCCESS; - case 1: // O2 + case 1: // O1 + return O1; + case 2: // O2 return O2; default: return null; @@ -110935,45 +113535,56 @@ public String getFieldName() { Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); tmpMap.put(_Fields.SUCCESS, new org.apache.thrift.meta_data.FieldMetaData("success", org.apache.thrift.TFieldRequirementType.DEFAULT, new org.apache.thrift.meta_data.ListMetaData(org.apache.thrift.protocol.TType.LIST, - new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING)))); + new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, SQLForeignKey.class)))); + tmpMap.put(_Fields.O1, new org.apache.thrift.meta_data.FieldMetaData("o1", org.apache.thrift.TFieldRequirementType.DEFAULT, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRUCT))); tmpMap.put(_Fields.O2, new org.apache.thrift.meta_data.FieldMetaData("o2", org.apache.thrift.TFieldRequirementType.DEFAULT, new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRUCT))); metaDataMap = Collections.unmodifiableMap(tmpMap); - org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(get_index_names_result.class, metaDataMap); + org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(get_foreign_keys_result.class, metaDataMap); } - public get_index_names_result() { + public get_foreign_keys_result() { } - public get_index_names_result( - List success, - MetaException o2) + public get_foreign_keys_result( + List success, + MetaException o1, + NoSuchObjectException o2) { this(); this.success = success; + this.o1 = o1; this.o2 = o2; } /** * Performs a deep copy on other. */ - public get_index_names_result(get_index_names_result other) { + public get_foreign_keys_result(get_foreign_keys_result other) { if (other.isSetSuccess()) { - List __this__success = new ArrayList(other.success); + List __this__success = new ArrayList(other.success.size()); + for (SQLForeignKey other_element : other.success) { + __this__success.add(new SQLForeignKey(other_element)); + } this.success = __this__success; } + if (other.isSetO1()) { + this.o1 = new MetaException(other.o1); + } if (other.isSetO2()) { - this.o2 = new MetaException(other.o2); + this.o2 = new NoSuchObjectException(other.o2); } } - public get_index_names_result deepCopy() { - return new get_index_names_result(this); + public get_foreign_keys_result deepCopy() { + return new get_foreign_keys_result(this); } @Override public void clear() { this.success = null; + this.o1 = null; this.o2 = null; } @@ -110981,22 +113592,22 @@ public int getSuccessSize() { return (this.success == null) ? 0 : this.success.size(); } - public java.util.Iterator getSuccessIterator() { + public java.util.Iterator getSuccessIterator() { return (this.success == null) ? null : this.success.iterator(); } - public void addToSuccess(String elem) { + public void addToSuccess(SQLForeignKey elem) { if (this.success == null) { - this.success = new ArrayList(); + this.success = new ArrayList(); } this.success.add(elem); } - public List getSuccess() { + public List getSuccess() { return this.success; } - public void setSuccess(List success) { + public void setSuccess(List success) { this.success = success; } @@ -111015,11 +113626,34 @@ public void setSuccessIsSet(boolean value) { } } - public MetaException getO2() { + public MetaException getO1() { + return this.o1; + } + + public void setO1(MetaException o1) { + this.o1 = o1; + } + + public void unsetO1() { + this.o1 = null; + } + + /** Returns true if field o1 is set (has been assigned a value) and false otherwise */ + public boolean isSetO1() { + return this.o1 != null; + } + + public void setO1IsSet(boolean value) { + if (!value) { + this.o1 = null; + } + } + + public NoSuchObjectException getO2() { return this.o2; } - public void setO2(MetaException o2) { + public void setO2(NoSuchObjectException o2) { this.o2 = o2; } @@ -111044,7 +113678,15 @@ public void setFieldValue(_Fields field, Object value) { if (value == null) { unsetSuccess(); } else { - setSuccess((List)value); + setSuccess((List)value); + } + break; + + case O1: + if (value == null) { + unsetO1(); + } else { + setO1((MetaException)value); } break; @@ -111052,7 +113694,7 @@ public void setFieldValue(_Fields field, Object value) { if (value == null) { unsetO2(); } else { - setO2((MetaException)value); + setO2((NoSuchObjectException)value); } break; @@ -111064,6 +113706,9 @@ public Object getFieldValue(_Fields field) { case SUCCESS: return getSuccess(); + case O1: + return getO1(); + case O2: return getO2(); @@ -111080,6 +113725,8 @@ public boolean isSet(_Fields field) { switch (field) { case SUCCESS: return isSetSuccess(); + case O1: + return isSetO1(); case O2: return isSetO2(); } @@ -111090,12 +113737,12 @@ public boolean isSet(_Fields field) { public boolean equals(Object that) { if (that == null) return false; - if (that instanceof get_index_names_result) - return this.equals((get_index_names_result)that); + if (that instanceof get_foreign_keys_result) + return this.equals((get_foreign_keys_result)that); return false; } - public boolean equals(get_index_names_result that) { + public boolean equals(get_foreign_keys_result that) { if (that == null) return false; @@ -111108,6 +113755,15 @@ public boolean equals(get_index_names_result that) { return false; } + boolean this_present_o1 = true && this.isSetO1(); + boolean that_present_o1 = true && that.isSetO1(); + if (this_present_o1 || that_present_o1) { + if (!(this_present_o1 && that_present_o1)) + return false; + if (!this.o1.equals(that.o1)) + return false; + } + boolean this_present_o2 = true && this.isSetO2(); boolean that_present_o2 = true && that.isSetO2(); if (this_present_o2 || that_present_o2) { @@ -111129,6 +113785,11 @@ public int hashCode() { if (present_success) list.add(success); + boolean present_o1 = true && (isSetO1()); + list.add(present_o1); + if (present_o1) + list.add(o1); + boolean present_o2 = true && (isSetO2()); list.add(present_o2); if (present_o2) @@ -111138,7 +113799,7 @@ public int hashCode() { } @Override - public int compareTo(get_index_names_result other) { + public int compareTo(get_foreign_keys_result other) { if (!getClass().equals(other.getClass())) { return getClass().getName().compareTo(other.getClass().getName()); } @@ -111155,6 +113816,16 @@ public int compareTo(get_index_names_result other) { return lastComparison; } } + lastComparison = Boolean.valueOf(isSetO1()).compareTo(other.isSetO1()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetO1()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.o1, other.o1); + if (lastComparison != 0) { + return lastComparison; + } + } lastComparison = Boolean.valueOf(isSetO2()).compareTo(other.isSetO2()); if (lastComparison != 0) { return lastComparison; @@ -111182,7 +113853,7 @@ public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache. @Override public String toString() { - StringBuilder sb = new StringBuilder("get_index_names_result("); + StringBuilder sb = new StringBuilder("get_foreign_keys_result("); boolean first = true; sb.append("success:"); @@ -111193,6 +113864,14 @@ public String toString() { } first = false; if (!first) sb.append(", "); + sb.append("o1:"); + if (this.o1 == null) { + sb.append("null"); + } else { + sb.append(this.o1); + } + first = false; + if (!first) sb.append(", "); sb.append("o2:"); if (this.o2 == null) { sb.append("null"); @@ -111225,15 +113904,15 @@ private void readObject(java.io.ObjectInputStream in) throws java.io.IOException } } - private static class get_index_names_resultStandardSchemeFactory implements SchemeFactory { - public get_index_names_resultStandardScheme getScheme() { - return new get_index_names_resultStandardScheme(); + private static class get_foreign_keys_resultStandardSchemeFactory implements SchemeFactory { + public get_foreign_keys_resultStandardScheme getScheme() { + return new get_foreign_keys_resultStandardScheme(); } } - private static class get_index_names_resultStandardScheme extends StandardScheme { + private static class get_foreign_keys_resultStandardScheme extends StandardScheme { - public void read(org.apache.thrift.protocol.TProtocol iprot, get_index_names_result struct) throws org.apache.thrift.TException { + public void read(org.apache.thrift.protocol.TProtocol iprot, get_foreign_keys_result struct) throws org.apache.thrift.TException { org.apache.thrift.protocol.TField schemeField; iprot.readStructBegin(); while (true) @@ -111246,13 +113925,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_index_names_res case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1012 = iprot.readListBegin(); - struct.success = new ArrayList(_list1012.size); - String _elem1013; - for (int _i1014 = 0; _i1014 < _list1012.size; ++_i1014) + org.apache.thrift.protocol.TList _list1060 = iprot.readListBegin(); + struct.success = new ArrayList(_list1060.size); + SQLForeignKey _elem1061; + for (int _i1062 = 0; _i1062 < _list1060.size; ++_i1062) { - _elem1013 = iprot.readString(); - struct.success.add(_elem1013); + _elem1061 = new SQLForeignKey(); + _elem1061.read(iprot); + struct.success.add(_elem1061); } iprot.readListEnd(); } @@ -111261,9 +113941,18 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_index_names_res org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); } break; - case 1: // O2 + case 1: // O1 if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { - struct.o2 = new MetaException(); + struct.o1 = new MetaException(); + struct.o1.read(iprot); + struct.setO1IsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; + case 2: // O2 + if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { + struct.o2 = new NoSuchObjectException(); struct.o2.read(iprot); struct.setO2IsSet(true); } else { @@ -111279,22 +113968,27 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_index_names_res struct.validate(); } - public void write(org.apache.thrift.protocol.TProtocol oprot, get_index_names_result struct) throws org.apache.thrift.TException { + public void write(org.apache.thrift.protocol.TProtocol oprot, get_foreign_keys_result struct) throws org.apache.thrift.TException { struct.validate(); oprot.writeStructBegin(STRUCT_DESC); if (struct.success != null) { oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { - oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.success.size())); - for (String _iter1015 : struct.success) + oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.success.size())); + for (SQLForeignKey _iter1063 : struct.success) { - oprot.writeString(_iter1015); + _iter1063.write(oprot); } oprot.writeListEnd(); } oprot.writeFieldEnd(); } + if (struct.o1 != null) { + oprot.writeFieldBegin(O1_FIELD_DESC); + struct.o1.write(oprot); + oprot.writeFieldEnd(); + } if (struct.o2 != null) { oprot.writeFieldBegin(O2_FIELD_DESC); struct.o2.write(oprot); @@ -111306,58 +114000,70 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_index_names_re } - private static class get_index_names_resultTupleSchemeFactory implements SchemeFactory { - public get_index_names_resultTupleScheme getScheme() { - return new get_index_names_resultTupleScheme(); + private static class get_foreign_keys_resultTupleSchemeFactory implements SchemeFactory { + public get_foreign_keys_resultTupleScheme getScheme() { + return new get_foreign_keys_resultTupleScheme(); } } - private static class get_index_names_resultTupleScheme extends TupleScheme { + private static class get_foreign_keys_resultTupleScheme extends TupleScheme { @Override - public void write(org.apache.thrift.protocol.TProtocol prot, get_index_names_result struct) throws org.apache.thrift.TException { + public void write(org.apache.thrift.protocol.TProtocol prot, get_foreign_keys_result struct) throws org.apache.thrift.TException { TTupleProtocol oprot = (TTupleProtocol) prot; BitSet optionals = new BitSet(); if (struct.isSetSuccess()) { optionals.set(0); } - if (struct.isSetO2()) { + if (struct.isSetO1()) { optionals.set(1); } - oprot.writeBitSet(optionals, 2); + if (struct.isSetO2()) { + optionals.set(2); + } + oprot.writeBitSet(optionals, 3); if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (String _iter1016 : struct.success) + for (SQLForeignKey _iter1064 : struct.success) { - oprot.writeString(_iter1016); + _iter1064.write(oprot); } } } + if (struct.isSetO1()) { + struct.o1.write(oprot); + } if (struct.isSetO2()) { struct.o2.write(oprot); } } @Override - public void read(org.apache.thrift.protocol.TProtocol prot, get_index_names_result struct) throws org.apache.thrift.TException { + public void read(org.apache.thrift.protocol.TProtocol prot, get_foreign_keys_result struct) throws org.apache.thrift.TException { TTupleProtocol iprot = (TTupleProtocol) prot; - BitSet incoming = iprot.readBitSet(2); + BitSet incoming = iprot.readBitSet(3); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list1017 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.success = new ArrayList(_list1017.size); - String _elem1018; - for (int _i1019 = 0; _i1019 < _list1017.size; ++_i1019) + org.apache.thrift.protocol.TList _list1065 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.success = new ArrayList(_list1065.size); + SQLForeignKey _elem1066; + for (int _i1067 = 0; _i1067 < _list1065.size; ++_i1067) { - _elem1018 = iprot.readString(); - struct.success.add(_elem1018); + _elem1066 = new SQLForeignKey(); + _elem1066.read(iprot); + struct.success.add(_elem1066); } } struct.setSuccessIsSet(true); } if (incoming.get(1)) { - struct.o2 = new MetaException(); + struct.o1 = new MetaException(); + struct.o1.read(iprot); + struct.setO1IsSet(true); + } + if (incoming.get(2)) { + struct.o2 = new NoSuchObjectException(); struct.o2.read(iprot); struct.setO2IsSet(true); } @@ -127086,13 +129792,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_functions_resul case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1020 = iprot.readListBegin(); - struct.success = new ArrayList(_list1020.size); - String _elem1021; - for (int _i1022 = 0; _i1022 < _list1020.size; ++_i1022) + org.apache.thrift.protocol.TList _list1068 = iprot.readListBegin(); + struct.success = new ArrayList(_list1068.size); + String _elem1069; + for (int _i1070 = 0; _i1070 < _list1068.size; ++_i1070) { - _elem1021 = iprot.readString(); - struct.success.add(_elem1021); + _elem1069 = iprot.readString(); + struct.success.add(_elem1069); } iprot.readListEnd(); } @@ -127127,9 +129833,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_functions_resu oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.success.size())); - for (String _iter1023 : struct.success) + for (String _iter1071 : struct.success) { - oprot.writeString(_iter1023); + oprot.writeString(_iter1071); } oprot.writeListEnd(); } @@ -127168,9 +129874,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_functions_resul if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (String _iter1024 : struct.success) + for (String _iter1072 : struct.success) { - oprot.writeString(_iter1024); + oprot.writeString(_iter1072); } } } @@ -127185,13 +129891,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_functions_result BitSet incoming = iprot.readBitSet(2); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list1025 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.success = new ArrayList(_list1025.size); - String _elem1026; - for (int _i1027 = 0; _i1027 < _list1025.size; ++_i1027) + org.apache.thrift.protocol.TList _list1073 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.success = new ArrayList(_list1073.size); + String _elem1074; + for (int _i1075 = 0; _i1075 < _list1073.size; ++_i1075) { - _elem1026 = iprot.readString(); - struct.success.add(_elem1026); + _elem1074 = iprot.readString(); + struct.success.add(_elem1074); } } struct.setSuccessIsSet(true); @@ -131246,13 +133952,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_role_names_resu case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1028 = iprot.readListBegin(); - struct.success = new ArrayList(_list1028.size); - String _elem1029; - for (int _i1030 = 0; _i1030 < _list1028.size; ++_i1030) + org.apache.thrift.protocol.TList _list1076 = iprot.readListBegin(); + struct.success = new ArrayList(_list1076.size); + String _elem1077; + for (int _i1078 = 0; _i1078 < _list1076.size; ++_i1078) { - _elem1029 = iprot.readString(); - struct.success.add(_elem1029); + _elem1077 = iprot.readString(); + struct.success.add(_elem1077); } iprot.readListEnd(); } @@ -131287,9 +133993,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_role_names_res oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.success.size())); - for (String _iter1031 : struct.success) + for (String _iter1079 : struct.success) { - oprot.writeString(_iter1031); + oprot.writeString(_iter1079); } oprot.writeListEnd(); } @@ -131328,9 +134034,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_role_names_resu if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (String _iter1032 : struct.success) + for (String _iter1080 : struct.success) { - oprot.writeString(_iter1032); + oprot.writeString(_iter1080); } } } @@ -131345,13 +134051,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_role_names_resul BitSet incoming = iprot.readBitSet(2); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list1033 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.success = new ArrayList(_list1033.size); - String _elem1034; - for (int _i1035 = 0; _i1035 < _list1033.size; ++_i1035) + org.apache.thrift.protocol.TList _list1081 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.success = new ArrayList(_list1081.size); + String _elem1082; + for (int _i1083 = 0; _i1083 < _list1081.size; ++_i1083) { - _elem1034 = iprot.readString(); - struct.success.add(_elem1034); + _elem1082 = iprot.readString(); + struct.success.add(_elem1082); } } struct.setSuccessIsSet(true); @@ -134642,14 +137348,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, list_roles_result s case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1036 = iprot.readListBegin(); - struct.success = new ArrayList(_list1036.size); - Role _elem1037; - for (int _i1038 = 0; _i1038 < _list1036.size; ++_i1038) + org.apache.thrift.protocol.TList _list1084 = iprot.readListBegin(); + struct.success = new ArrayList(_list1084.size); + Role _elem1085; + for (int _i1086 = 0; _i1086 < _list1084.size; ++_i1086) { - _elem1037 = new Role(); - _elem1037.read(iprot); - struct.success.add(_elem1037); + _elem1085 = new Role(); + _elem1085.read(iprot); + struct.success.add(_elem1085); } iprot.readListEnd(); } @@ -134684,9 +137390,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, list_roles_result oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.success.size())); - for (Role _iter1039 : struct.success) + for (Role _iter1087 : struct.success) { - _iter1039.write(oprot); + _iter1087.write(oprot); } oprot.writeListEnd(); } @@ -134725,9 +137431,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, list_roles_result s if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (Role _iter1040 : struct.success) + for (Role _iter1088 : struct.success) { - _iter1040.write(oprot); + _iter1088.write(oprot); } } } @@ -134742,14 +137448,14 @@ public void read(org.apache.thrift.protocol.TProtocol prot, list_roles_result st BitSet incoming = iprot.readBitSet(2); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list1041 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.success = new ArrayList(_list1041.size); - Role _elem1042; - for (int _i1043 = 0; _i1043 < _list1041.size; ++_i1043) + org.apache.thrift.protocol.TList _list1089 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.success = new ArrayList(_list1089.size); + Role _elem1090; + for (int _i1091 = 0; _i1091 < _list1089.size; ++_i1091) { - _elem1042 = new Role(); - _elem1042.read(iprot); - struct.success.add(_elem1042); + _elem1090 = new Role(); + _elem1090.read(iprot); + struct.success.add(_elem1090); } } struct.setSuccessIsSet(true); @@ -137754,13 +140460,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_privilege_set_a case 3: // GROUP_NAMES if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1044 = iprot.readListBegin(); - struct.group_names = new ArrayList(_list1044.size); - String _elem1045; - for (int _i1046 = 0; _i1046 < _list1044.size; ++_i1046) + org.apache.thrift.protocol.TList _list1092 = iprot.readListBegin(); + struct.group_names = new ArrayList(_list1092.size); + String _elem1093; + for (int _i1094 = 0; _i1094 < _list1092.size; ++_i1094) { - _elem1045 = iprot.readString(); - struct.group_names.add(_elem1045); + _elem1093 = iprot.readString(); + struct.group_names.add(_elem1093); } iprot.readListEnd(); } @@ -137796,9 +140502,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_privilege_set_ oprot.writeFieldBegin(GROUP_NAMES_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.group_names.size())); - for (String _iter1047 : struct.group_names) + for (String _iter1095 : struct.group_names) { - oprot.writeString(_iter1047); + oprot.writeString(_iter1095); } oprot.writeListEnd(); } @@ -137841,9 +140547,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_privilege_set_a if (struct.isSetGroup_names()) { { oprot.writeI32(struct.group_names.size()); - for (String _iter1048 : struct.group_names) + for (String _iter1096 : struct.group_names) { - oprot.writeString(_iter1048); + oprot.writeString(_iter1096); } } } @@ -137864,13 +140570,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_privilege_set_ar } if (incoming.get(2)) { { - org.apache.thrift.protocol.TList _list1049 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.group_names = new ArrayList(_list1049.size); - String _elem1050; - for (int _i1051 = 0; _i1051 < _list1049.size; ++_i1051) + org.apache.thrift.protocol.TList _list1097 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.group_names = new ArrayList(_list1097.size); + String _elem1098; + for (int _i1099 = 0; _i1099 < _list1097.size; ++_i1099) { - _elem1050 = iprot.readString(); - struct.group_names.add(_elem1050); + _elem1098 = iprot.readString(); + struct.group_names.add(_elem1098); } } struct.setGroup_namesIsSet(true); @@ -139328,14 +142034,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, list_privileges_res case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1052 = iprot.readListBegin(); - struct.success = new ArrayList(_list1052.size); - HiveObjectPrivilege _elem1053; - for (int _i1054 = 0; _i1054 < _list1052.size; ++_i1054) + org.apache.thrift.protocol.TList _list1100 = iprot.readListBegin(); + struct.success = new ArrayList(_list1100.size); + HiveObjectPrivilege _elem1101; + for (int _i1102 = 0; _i1102 < _list1100.size; ++_i1102) { - _elem1053 = new HiveObjectPrivilege(); - _elem1053.read(iprot); - struct.success.add(_elem1053); + _elem1101 = new HiveObjectPrivilege(); + _elem1101.read(iprot); + struct.success.add(_elem1101); } iprot.readListEnd(); } @@ -139370,9 +142076,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, list_privileges_re oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.success.size())); - for (HiveObjectPrivilege _iter1055 : struct.success) + for (HiveObjectPrivilege _iter1103 : struct.success) { - _iter1055.write(oprot); + _iter1103.write(oprot); } oprot.writeListEnd(); } @@ -139411,9 +142117,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, list_privileges_res if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (HiveObjectPrivilege _iter1056 : struct.success) + for (HiveObjectPrivilege _iter1104 : struct.success) { - _iter1056.write(oprot); + _iter1104.write(oprot); } } } @@ -139428,14 +142134,14 @@ public void read(org.apache.thrift.protocol.TProtocol prot, list_privileges_resu BitSet incoming = iprot.readBitSet(2); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list1057 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.success = new ArrayList(_list1057.size); - HiveObjectPrivilege _elem1058; - for (int _i1059 = 0; _i1059 < _list1057.size; ++_i1059) + org.apache.thrift.protocol.TList _list1105 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.success = new ArrayList(_list1105.size); + HiveObjectPrivilege _elem1106; + for (int _i1107 = 0; _i1107 < _list1105.size; ++_i1107) { - _elem1058 = new HiveObjectPrivilege(); - _elem1058.read(iprot); - struct.success.add(_elem1058); + _elem1106 = new HiveObjectPrivilege(); + _elem1106.read(iprot); + struct.success.add(_elem1106); } } struct.setSuccessIsSet(true); @@ -142337,13 +145043,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, set_ugi_args struct case 2: // GROUP_NAMES if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1060 = iprot.readListBegin(); - struct.group_names = new ArrayList(_list1060.size); - String _elem1061; - for (int _i1062 = 0; _i1062 < _list1060.size; ++_i1062) + org.apache.thrift.protocol.TList _list1108 = iprot.readListBegin(); + struct.group_names = new ArrayList(_list1108.size); + String _elem1109; + for (int _i1110 = 0; _i1110 < _list1108.size; ++_i1110) { - _elem1061 = iprot.readString(); - struct.group_names.add(_elem1061); + _elem1109 = iprot.readString(); + struct.group_names.add(_elem1109); } iprot.readListEnd(); } @@ -142374,9 +145080,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, set_ugi_args struc oprot.writeFieldBegin(GROUP_NAMES_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.group_names.size())); - for (String _iter1063 : struct.group_names) + for (String _iter1111 : struct.group_names) { - oprot.writeString(_iter1063); + oprot.writeString(_iter1111); } oprot.writeListEnd(); } @@ -142413,9 +145119,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, set_ugi_args struct if (struct.isSetGroup_names()) { { oprot.writeI32(struct.group_names.size()); - for (String _iter1064 : struct.group_names) + for (String _iter1112 : struct.group_names) { - oprot.writeString(_iter1064); + oprot.writeString(_iter1112); } } } @@ -142431,13 +145137,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, set_ugi_args struct) } if (incoming.get(1)) { { - org.apache.thrift.protocol.TList _list1065 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.group_names = new ArrayList(_list1065.size); - String _elem1066; - for (int _i1067 = 0; _i1067 < _list1065.size; ++_i1067) + org.apache.thrift.protocol.TList _list1113 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.group_names = new ArrayList(_list1113.size); + String _elem1114; + for (int _i1115 = 0; _i1115 < _list1113.size; ++_i1115) { - _elem1066 = iprot.readString(); - struct.group_names.add(_elem1066); + _elem1114 = iprot.readString(); + struct.group_names.add(_elem1114); } } struct.setGroup_namesIsSet(true); @@ -142840,13 +145546,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, set_ugi_result stru case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1068 = iprot.readListBegin(); - struct.success = new ArrayList(_list1068.size); - String _elem1069; - for (int _i1070 = 0; _i1070 < _list1068.size; ++_i1070) + org.apache.thrift.protocol.TList _list1116 = iprot.readListBegin(); + struct.success = new ArrayList(_list1116.size); + String _elem1117; + for (int _i1118 = 0; _i1118 < _list1116.size; ++_i1118) { - _elem1069 = iprot.readString(); - struct.success.add(_elem1069); + _elem1117 = iprot.readString(); + struct.success.add(_elem1117); } iprot.readListEnd(); } @@ -142881,9 +145587,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, set_ugi_result str oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.success.size())); - for (String _iter1071 : struct.success) + for (String _iter1119 : struct.success) { - oprot.writeString(_iter1071); + oprot.writeString(_iter1119); } oprot.writeListEnd(); } @@ -142922,9 +145628,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, set_ugi_result stru if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (String _iter1072 : struct.success) + for (String _iter1120 : struct.success) { - oprot.writeString(_iter1072); + oprot.writeString(_iter1120); } } } @@ -142939,13 +145645,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, set_ugi_result struc BitSet incoming = iprot.readBitSet(2); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list1073 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.success = new ArrayList(_list1073.size); - String _elem1074; - for (int _i1075 = 0; _i1075 < _list1073.size; ++_i1075) + org.apache.thrift.protocol.TList _list1121 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.success = new ArrayList(_list1121.size); + String _elem1122; + for (int _i1123 = 0; _i1123 < _list1121.size; ++_i1123) { - _elem1074 = iprot.readString(); - struct.success.add(_elem1074); + _elem1122 = iprot.readString(); + struct.success.add(_elem1122); } } struct.setSuccessIsSet(true); @@ -148236,13 +150942,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_all_token_ident case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1076 = iprot.readListBegin(); - struct.success = new ArrayList(_list1076.size); - String _elem1077; - for (int _i1078 = 0; _i1078 < _list1076.size; ++_i1078) + org.apache.thrift.protocol.TList _list1124 = iprot.readListBegin(); + struct.success = new ArrayList(_list1124.size); + String _elem1125; + for (int _i1126 = 0; _i1126 < _list1124.size; ++_i1126) { - _elem1077 = iprot.readString(); - struct.success.add(_elem1077); + _elem1125 = iprot.readString(); + struct.success.add(_elem1125); } iprot.readListEnd(); } @@ -148268,9 +150974,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_all_token_iden oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.success.size())); - for (String _iter1079 : struct.success) + for (String _iter1127 : struct.success) { - oprot.writeString(_iter1079); + oprot.writeString(_iter1127); } oprot.writeListEnd(); } @@ -148301,9 +151007,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_all_token_ident if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (String _iter1080 : struct.success) + for (String _iter1128 : struct.success) { - oprot.writeString(_iter1080); + oprot.writeString(_iter1128); } } } @@ -148315,13 +151021,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_all_token_identi BitSet incoming = iprot.readBitSet(1); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list1081 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.success = new ArrayList(_list1081.size); - String _elem1082; - for (int _i1083 = 0; _i1083 < _list1081.size; ++_i1083) + org.apache.thrift.protocol.TList _list1129 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.success = new ArrayList(_list1129.size); + String _elem1130; + for (int _i1131 = 0; _i1131 < _list1129.size; ++_i1131) { - _elem1082 = iprot.readString(); - struct.success.add(_elem1082); + _elem1130 = iprot.readString(); + struct.success.add(_elem1130); } } struct.setSuccessIsSet(true); @@ -151351,13 +154057,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_master_keys_res case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list1084 = iprot.readListBegin(); - struct.success = new ArrayList(_list1084.size); - String _elem1085; - for (int _i1086 = 0; _i1086 < _list1084.size; ++_i1086) + org.apache.thrift.protocol.TList _list1132 = iprot.readListBegin(); + struct.success = new ArrayList(_list1132.size); + String _elem1133; + for (int _i1134 = 0; _i1134 < _list1132.size; ++_i1134) { - _elem1085 = iprot.readString(); - struct.success.add(_elem1085); + _elem1133 = iprot.readString(); + struct.success.add(_elem1133); } iprot.readListEnd(); } @@ -151383,9 +154089,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_master_keys_re oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.success.size())); - for (String _iter1087 : struct.success) + for (String _iter1135 : struct.success) { - oprot.writeString(_iter1087); + oprot.writeString(_iter1135); } oprot.writeListEnd(); } @@ -151416,9 +154122,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_master_keys_res if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (String _iter1088 : struct.success) + for (String _iter1136 : struct.success) { - oprot.writeString(_iter1088); + oprot.writeString(_iter1136); } } } @@ -151430,13 +154136,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_master_keys_resu BitSet incoming = iprot.readBitSet(1); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list1089 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.success = new ArrayList(_list1089.size); - String _elem1090; - for (int _i1091 = 0; _i1091 < _list1089.size; ++_i1091) + org.apache.thrift.protocol.TList _list1137 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.success = new ArrayList(_list1137.size); + String _elem1138; + for (int _i1139 = 0; _i1139 < _list1137.size; ++_i1139) { - _elem1090 = iprot.readString(); - struct.success.add(_elem1090); + _elem1138 = iprot.readString(); + struct.success.add(_elem1138); } } struct.setSuccessIsSet(true); diff --git a/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/Type.java b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/Type.java index 14cd42e..892c6af 100644 --- a/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/Type.java +++ b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/Type.java @@ -618,14 +618,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, Type struct) throws case 4: // FIELDS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list0 = iprot.readListBegin(); - struct.fields = new ArrayList(_list0.size); - FieldSchema _elem1; - for (int _i2 = 0; _i2 < _list0.size; ++_i2) + org.apache.thrift.protocol.TList _list16 = iprot.readListBegin(); + struct.fields = new ArrayList(_list16.size); + FieldSchema _elem17; + for (int _i18 = 0; _i18 < _list16.size; ++_i18) { - _elem1 = new FieldSchema(); - _elem1.read(iprot); - struct.fields.add(_elem1); + _elem17 = new FieldSchema(); + _elem17.read(iprot); + struct.fields.add(_elem17); } iprot.readListEnd(); } @@ -671,9 +671,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, Type struct) throw oprot.writeFieldBegin(FIELDS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.fields.size())); - for (FieldSchema _iter3 : struct.fields) + for (FieldSchema _iter19 : struct.fields) { - _iter3.write(oprot); + _iter19.write(oprot); } oprot.writeListEnd(); } @@ -723,9 +723,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, Type struct) throws if (struct.isSetFields()) { { oprot.writeI32(struct.fields.size()); - for (FieldSchema _iter4 : struct.fields) + for (FieldSchema _iter20 : struct.fields) { - _iter4.write(oprot); + _iter20.write(oprot); } } } @@ -749,14 +749,14 @@ public void read(org.apache.thrift.protocol.TProtocol prot, Type struct) throws } if (incoming.get(3)) { { - org.apache.thrift.protocol.TList _list5 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.fields = new ArrayList(_list5.size); - FieldSchema _elem6; - for (int _i7 = 0; _i7 < _list5.size; ++_i7) + org.apache.thrift.protocol.TList _list21 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.fields = new ArrayList(_list21.size); + FieldSchema _elem22; + for (int _i23 = 0; _i23 < _list21.size; ++_i23) { - _elem6 = new FieldSchema(); - _elem6.read(iprot); - struct.fields.add(_elem6); + _elem22 = new FieldSchema(); + _elem22.read(iprot); + struct.fields.add(_elem22); } } struct.setFieldsIsSet(true); diff --git a/metastore/src/gen/thrift/gen-php/metastore/ThriftHiveMetastore.php b/metastore/src/gen/thrift/gen-php/metastore/ThriftHiveMetastore.php index 05a0749..5e29469 100644 --- a/metastore/src/gen/thrift/gen-php/metastore/ThriftHiveMetastore.php +++ b/metastore/src/gen/thrift/gen-php/metastore/ThriftHiveMetastore.php @@ -699,6 +699,24 @@ interface ThriftHiveMetastoreIf extends \FacebookServiceIf { */ public function get_index_names($db_name, $tbl_name, $max_indexes); /** + * @param string $db_name + * @param string $tbl_name + * @return \metastore\SQLPrimaryKey[] + * @throws \metastore\MetaException + * @throws \metastore\NoSuchObjectException + */ + public function get_primary_keys($db_name, $tbl_name); + /** + * @param string $parent_db_name + * @param string $parent_tbl_name + * @param string $foreign_db_name + * @param string $foreign_tbl_name + * @return \metastore\SQLForeignKey[] + * @throws \metastore\MetaException + * @throws \metastore\NoSuchObjectException + */ + public function get_foreign_keys($parent_db_name, $parent_tbl_name, $foreign_db_name, $foreign_tbl_name); + /** * @param \metastore\ColumnStatistics $stats_obj * @return bool * @throws \metastore\NoSuchObjectException @@ -5662,6 +5680,124 @@ class ThriftHiveMetastoreClient extends \FacebookServiceClient implements \metas throw new \Exception("get_index_names failed: unknown result"); } + public function get_primary_keys($db_name, $tbl_name) + { + $this->send_get_primary_keys($db_name, $tbl_name); + return $this->recv_get_primary_keys(); + } + + public function send_get_primary_keys($db_name, $tbl_name) + { + $args = new \metastore\ThriftHiveMetastore_get_primary_keys_args(); + $args->db_name = $db_name; + $args->tbl_name = $tbl_name; + $bin_accel = ($this->output_ instanceof TBinaryProtocolAccelerated) && function_exists('thrift_protocol_write_binary'); + if ($bin_accel) + { + thrift_protocol_write_binary($this->output_, 'get_primary_keys', TMessageType::CALL, $args, $this->seqid_, $this->output_->isStrictWrite()); + } + else + { + $this->output_->writeMessageBegin('get_primary_keys', TMessageType::CALL, $this->seqid_); + $args->write($this->output_); + $this->output_->writeMessageEnd(); + $this->output_->getTransport()->flush(); + } + } + + public function recv_get_primary_keys() + { + $bin_accel = ($this->input_ instanceof TBinaryProtocolAccelerated) && function_exists('thrift_protocol_read_binary'); + if ($bin_accel) $result = thrift_protocol_read_binary($this->input_, '\metastore\ThriftHiveMetastore_get_primary_keys_result', $this->input_->isStrictRead()); + else + { + $rseqid = 0; + $fname = null; + $mtype = 0; + + $this->input_->readMessageBegin($fname, $mtype, $rseqid); + if ($mtype == TMessageType::EXCEPTION) { + $x = new TApplicationException(); + $x->read($this->input_); + $this->input_->readMessageEnd(); + throw $x; + } + $result = new \metastore\ThriftHiveMetastore_get_primary_keys_result(); + $result->read($this->input_); + $this->input_->readMessageEnd(); + } + if ($result->success !== null) { + return $result->success; + } + if ($result->o1 !== null) { + throw $result->o1; + } + if ($result->o2 !== null) { + throw $result->o2; + } + throw new \Exception("get_primary_keys failed: unknown result"); + } + + public function get_foreign_keys($parent_db_name, $parent_tbl_name, $foreign_db_name, $foreign_tbl_name) + { + $this->send_get_foreign_keys($parent_db_name, $parent_tbl_name, $foreign_db_name, $foreign_tbl_name); + return $this->recv_get_foreign_keys(); + } + + public function send_get_foreign_keys($parent_db_name, $parent_tbl_name, $foreign_db_name, $foreign_tbl_name) + { + $args = new \metastore\ThriftHiveMetastore_get_foreign_keys_args(); + $args->parent_db_name = $parent_db_name; + $args->parent_tbl_name = $parent_tbl_name; + $args->foreign_db_name = $foreign_db_name; + $args->foreign_tbl_name = $foreign_tbl_name; + $bin_accel = ($this->output_ instanceof TBinaryProtocolAccelerated) && function_exists('thrift_protocol_write_binary'); + if ($bin_accel) + { + thrift_protocol_write_binary($this->output_, 'get_foreign_keys', TMessageType::CALL, $args, $this->seqid_, $this->output_->isStrictWrite()); + } + else + { + $this->output_->writeMessageBegin('get_foreign_keys', TMessageType::CALL, $this->seqid_); + $args->write($this->output_); + $this->output_->writeMessageEnd(); + $this->output_->getTransport()->flush(); + } + } + + public function recv_get_foreign_keys() + { + $bin_accel = ($this->input_ instanceof TBinaryProtocolAccelerated) && function_exists('thrift_protocol_read_binary'); + if ($bin_accel) $result = thrift_protocol_read_binary($this->input_, '\metastore\ThriftHiveMetastore_get_foreign_keys_result', $this->input_->isStrictRead()); + else + { + $rseqid = 0; + $fname = null; + $mtype = 0; + + $this->input_->readMessageBegin($fname, $mtype, $rseqid); + if ($mtype == TMessageType::EXCEPTION) { + $x = new TApplicationException(); + $x->read($this->input_); + $this->input_->readMessageEnd(); + throw $x; + } + $result = new \metastore\ThriftHiveMetastore_get_foreign_keys_result(); + $result->read($this->input_); + $this->input_->readMessageEnd(); + } + if ($result->success !== null) { + return $result->success; + } + if ($result->o1 !== null) { + throw $result->o1; + } + if ($result->o2 !== null) { + throw $result->o2; + } + throw new \Exception("get_foreign_keys failed: unknown result"); + } + public function update_table_column_statistics(\metastore\ColumnStatistics $stats_obj) { $this->send_update_table_column_statistics($stats_obj); @@ -10412,14 +10548,14 @@ class ThriftHiveMetastore_get_databases_result { case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size525 = 0; - $_etype528 = 0; - $xfer += $input->readListBegin($_etype528, $_size525); - for ($_i529 = 0; $_i529 < $_size525; ++$_i529) + $_size553 = 0; + $_etype556 = 0; + $xfer += $input->readListBegin($_etype556, $_size553); + for ($_i557 = 0; $_i557 < $_size553; ++$_i557) { - $elem530 = null; - $xfer += $input->readString($elem530); - $this->success []= $elem530; + $elem558 = null; + $xfer += $input->readString($elem558); + $this->success []= $elem558; } $xfer += $input->readListEnd(); } else { @@ -10455,9 +10591,9 @@ class ThriftHiveMetastore_get_databases_result { { $output->writeListBegin(TType::STRING, count($this->success)); { - foreach ($this->success as $iter531) + foreach ($this->success as $iter559) { - $xfer += $output->writeString($iter531); + $xfer += $output->writeString($iter559); } } $output->writeListEnd(); @@ -10588,14 +10724,14 @@ class ThriftHiveMetastore_get_all_databases_result { case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size532 = 0; - $_etype535 = 0; - $xfer += $input->readListBegin($_etype535, $_size532); - for ($_i536 = 0; $_i536 < $_size532; ++$_i536) + $_size560 = 0; + $_etype563 = 0; + $xfer += $input->readListBegin($_etype563, $_size560); + for ($_i564 = 0; $_i564 < $_size560; ++$_i564) { - $elem537 = null; - $xfer += $input->readString($elem537); - $this->success []= $elem537; + $elem565 = null; + $xfer += $input->readString($elem565); + $this->success []= $elem565; } $xfer += $input->readListEnd(); } else { @@ -10631,9 +10767,9 @@ class ThriftHiveMetastore_get_all_databases_result { { $output->writeListBegin(TType::STRING, count($this->success)); { - foreach ($this->success as $iter538) + foreach ($this->success as $iter566) { - $xfer += $output->writeString($iter538); + $xfer += $output->writeString($iter566); } } $output->writeListEnd(); @@ -11634,18 +11770,18 @@ class ThriftHiveMetastore_get_type_all_result { case 0: if ($ftype == TType::MAP) { $this->success = array(); - $_size539 = 0; - $_ktype540 = 0; - $_vtype541 = 0; - $xfer += $input->readMapBegin($_ktype540, $_vtype541, $_size539); - for ($_i543 = 0; $_i543 < $_size539; ++$_i543) + $_size567 = 0; + $_ktype568 = 0; + $_vtype569 = 0; + $xfer += $input->readMapBegin($_ktype568, $_vtype569, $_size567); + for ($_i571 = 0; $_i571 < $_size567; ++$_i571) { - $key544 = ''; - $val545 = new \metastore\Type(); - $xfer += $input->readString($key544); - $val545 = new \metastore\Type(); - $xfer += $val545->read($input); - $this->success[$key544] = $val545; + $key572 = ''; + $val573 = new \metastore\Type(); + $xfer += $input->readString($key572); + $val573 = new \metastore\Type(); + $xfer += $val573->read($input); + $this->success[$key572] = $val573; } $xfer += $input->readMapEnd(); } else { @@ -11681,10 +11817,10 @@ class ThriftHiveMetastore_get_type_all_result { { $output->writeMapBegin(TType::STRING, TType::STRUCT, count($this->success)); { - foreach ($this->success as $kiter546 => $viter547) + foreach ($this->success as $kiter574 => $viter575) { - $xfer += $output->writeString($kiter546); - $xfer += $viter547->write($output); + $xfer += $output->writeString($kiter574); + $xfer += $viter575->write($output); } } $output->writeMapEnd(); @@ -11888,15 +12024,15 @@ class ThriftHiveMetastore_get_fields_result { case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size548 = 0; - $_etype551 = 0; - $xfer += $input->readListBegin($_etype551, $_size548); - for ($_i552 = 0; $_i552 < $_size548; ++$_i552) + $_size576 = 0; + $_etype579 = 0; + $xfer += $input->readListBegin($_etype579, $_size576); + for ($_i580 = 0; $_i580 < $_size576; ++$_i580) { - $elem553 = null; - $elem553 = new \metastore\FieldSchema(); - $xfer += $elem553->read($input); - $this->success []= $elem553; + $elem581 = null; + $elem581 = new \metastore\FieldSchema(); + $xfer += $elem581->read($input); + $this->success []= $elem581; } $xfer += $input->readListEnd(); } else { @@ -11948,9 +12084,9 @@ class ThriftHiveMetastore_get_fields_result { { $output->writeListBegin(TType::STRUCT, count($this->success)); { - foreach ($this->success as $iter554) + foreach ($this->success as $iter582) { - $xfer += $iter554->write($output); + $xfer += $iter582->write($output); } } $output->writeListEnd(); @@ -12192,15 +12328,15 @@ class ThriftHiveMetastore_get_fields_with_environment_context_result { case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size555 = 0; - $_etype558 = 0; - $xfer += $input->readListBegin($_etype558, $_size555); - for ($_i559 = 0; $_i559 < $_size555; ++$_i559) + $_size583 = 0; + $_etype586 = 0; + $xfer += $input->readListBegin($_etype586, $_size583); + for ($_i587 = 0; $_i587 < $_size583; ++$_i587) { - $elem560 = null; - $elem560 = new \metastore\FieldSchema(); - $xfer += $elem560->read($input); - $this->success []= $elem560; + $elem588 = null; + $elem588 = new \metastore\FieldSchema(); + $xfer += $elem588->read($input); + $this->success []= $elem588; } $xfer += $input->readListEnd(); } else { @@ -12252,9 +12388,9 @@ class ThriftHiveMetastore_get_fields_with_environment_context_result { { $output->writeListBegin(TType::STRUCT, count($this->success)); { - foreach ($this->success as $iter561) + foreach ($this->success as $iter589) { - $xfer += $iter561->write($output); + $xfer += $iter589->write($output); } } $output->writeListEnd(); @@ -12468,15 +12604,15 @@ class ThriftHiveMetastore_get_schema_result { case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size562 = 0; - $_etype565 = 0; - $xfer += $input->readListBegin($_etype565, $_size562); - for ($_i566 = 0; $_i566 < $_size562; ++$_i566) + $_size590 = 0; + $_etype593 = 0; + $xfer += $input->readListBegin($_etype593, $_size590); + for ($_i594 = 0; $_i594 < $_size590; ++$_i594) { - $elem567 = null; - $elem567 = new \metastore\FieldSchema(); - $xfer += $elem567->read($input); - $this->success []= $elem567; + $elem595 = null; + $elem595 = new \metastore\FieldSchema(); + $xfer += $elem595->read($input); + $this->success []= $elem595; } $xfer += $input->readListEnd(); } else { @@ -12528,9 +12664,9 @@ class ThriftHiveMetastore_get_schema_result { { $output->writeListBegin(TType::STRUCT, count($this->success)); { - foreach ($this->success as $iter568) + foreach ($this->success as $iter596) { - $xfer += $iter568->write($output); + $xfer += $iter596->write($output); } } $output->writeListEnd(); @@ -12772,15 +12908,15 @@ class ThriftHiveMetastore_get_schema_with_environment_context_result { case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size569 = 0; - $_etype572 = 0; - $xfer += $input->readListBegin($_etype572, $_size569); - for ($_i573 = 0; $_i573 < $_size569; ++$_i573) + $_size597 = 0; + $_etype600 = 0; + $xfer += $input->readListBegin($_etype600, $_size597); + for ($_i601 = 0; $_i601 < $_size597; ++$_i601) { - $elem574 = null; - $elem574 = new \metastore\FieldSchema(); - $xfer += $elem574->read($input); - $this->success []= $elem574; + $elem602 = null; + $elem602 = new \metastore\FieldSchema(); + $xfer += $elem602->read($input); + $this->success []= $elem602; } $xfer += $input->readListEnd(); } else { @@ -12832,9 +12968,9 @@ class ThriftHiveMetastore_get_schema_with_environment_context_result { { $output->writeListBegin(TType::STRUCT, count($this->success)); { - foreach ($this->success as $iter575) + foreach ($this->success as $iter603) { - $xfer += $iter575->write($output); + $xfer += $iter603->write($output); } } $output->writeListEnd(); @@ -13989,14 +14125,14 @@ class ThriftHiveMetastore_get_tables_result { case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size576 = 0; - $_etype579 = 0; - $xfer += $input->readListBegin($_etype579, $_size576); - for ($_i580 = 0; $_i580 < $_size576; ++$_i580) + $_size604 = 0; + $_etype607 = 0; + $xfer += $input->readListBegin($_etype607, $_size604); + for ($_i608 = 0; $_i608 < $_size604; ++$_i608) { - $elem581 = null; - $xfer += $input->readString($elem581); - $this->success []= $elem581; + $elem609 = null; + $xfer += $input->readString($elem609); + $this->success []= $elem609; } $xfer += $input->readListEnd(); } else { @@ -14032,9 +14168,9 @@ class ThriftHiveMetastore_get_tables_result { { $output->writeListBegin(TType::STRING, count($this->success)); { - foreach ($this->success as $iter582) + foreach ($this->success as $iter610) { - $xfer += $output->writeString($iter582); + $xfer += $output->writeString($iter610); } } $output->writeListEnd(); @@ -14139,14 +14275,14 @@ class ThriftHiveMetastore_get_table_meta_args { case 3: if ($ftype == TType::LST) { $this->tbl_types = array(); - $_size583 = 0; - $_etype586 = 0; - $xfer += $input->readListBegin($_etype586, $_size583); - for ($_i587 = 0; $_i587 < $_size583; ++$_i587) + $_size611 = 0; + $_etype614 = 0; + $xfer += $input->readListBegin($_etype614, $_size611); + for ($_i615 = 0; $_i615 < $_size611; ++$_i615) { - $elem588 = null; - $xfer += $input->readString($elem588); - $this->tbl_types []= $elem588; + $elem616 = null; + $xfer += $input->readString($elem616); + $this->tbl_types []= $elem616; } $xfer += $input->readListEnd(); } else { @@ -14184,9 +14320,9 @@ class ThriftHiveMetastore_get_table_meta_args { { $output->writeListBegin(TType::STRING, count($this->tbl_types)); { - foreach ($this->tbl_types as $iter589) + foreach ($this->tbl_types as $iter617) { - $xfer += $output->writeString($iter589); + $xfer += $output->writeString($iter617); } } $output->writeListEnd(); @@ -14263,15 +14399,15 @@ class ThriftHiveMetastore_get_table_meta_result { case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size590 = 0; - $_etype593 = 0; - $xfer += $input->readListBegin($_etype593, $_size590); - for ($_i594 = 0; $_i594 < $_size590; ++$_i594) + $_size618 = 0; + $_etype621 = 0; + $xfer += $input->readListBegin($_etype621, $_size618); + for ($_i622 = 0; $_i622 < $_size618; ++$_i622) { - $elem595 = null; - $elem595 = new \metastore\TableMeta(); - $xfer += $elem595->read($input); - $this->success []= $elem595; + $elem623 = null; + $elem623 = new \metastore\TableMeta(); + $xfer += $elem623->read($input); + $this->success []= $elem623; } $xfer += $input->readListEnd(); } else { @@ -14307,9 +14443,9 @@ class ThriftHiveMetastore_get_table_meta_result { { $output->writeListBegin(TType::STRUCT, count($this->success)); { - foreach ($this->success as $iter596) + foreach ($this->success as $iter624) { - $xfer += $iter596->write($output); + $xfer += $iter624->write($output); } } $output->writeListEnd(); @@ -14465,14 +14601,14 @@ class ThriftHiveMetastore_get_all_tables_result { case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size597 = 0; - $_etype600 = 0; - $xfer += $input->readListBegin($_etype600, $_size597); - for ($_i601 = 0; $_i601 < $_size597; ++$_i601) + $_size625 = 0; + $_etype628 = 0; + $xfer += $input->readListBegin($_etype628, $_size625); + for ($_i629 = 0; $_i629 < $_size625; ++$_i629) { - $elem602 = null; - $xfer += $input->readString($elem602); - $this->success []= $elem602; + $elem630 = null; + $xfer += $input->readString($elem630); + $this->success []= $elem630; } $xfer += $input->readListEnd(); } else { @@ -14508,9 +14644,9 @@ class ThriftHiveMetastore_get_all_tables_result { { $output->writeListBegin(TType::STRING, count($this->success)); { - foreach ($this->success as $iter603) + foreach ($this->success as $iter631) { - $xfer += $output->writeString($iter603); + $xfer += $output->writeString($iter631); } } $output->writeListEnd(); @@ -14825,14 +14961,14 @@ class ThriftHiveMetastore_get_table_objects_by_name_args { case 2: if ($ftype == TType::LST) { $this->tbl_names = array(); - $_size604 = 0; - $_etype607 = 0; - $xfer += $input->readListBegin($_etype607, $_size604); - for ($_i608 = 0; $_i608 < $_size604; ++$_i608) + $_size632 = 0; + $_etype635 = 0; + $xfer += $input->readListBegin($_etype635, $_size632); + for ($_i636 = 0; $_i636 < $_size632; ++$_i636) { - $elem609 = null; - $xfer += $input->readString($elem609); - $this->tbl_names []= $elem609; + $elem637 = null; + $xfer += $input->readString($elem637); + $this->tbl_names []= $elem637; } $xfer += $input->readListEnd(); } else { @@ -14865,9 +15001,9 @@ class ThriftHiveMetastore_get_table_objects_by_name_args { { $output->writeListBegin(TType::STRING, count($this->tbl_names)); { - foreach ($this->tbl_names as $iter610) + foreach ($this->tbl_names as $iter638) { - $xfer += $output->writeString($iter610); + $xfer += $output->writeString($iter638); } } $output->writeListEnd(); @@ -14968,15 +15104,15 @@ class ThriftHiveMetastore_get_table_objects_by_name_result { case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size611 = 0; - $_etype614 = 0; - $xfer += $input->readListBegin($_etype614, $_size611); - for ($_i615 = 0; $_i615 < $_size611; ++$_i615) + $_size639 = 0; + $_etype642 = 0; + $xfer += $input->readListBegin($_etype642, $_size639); + for ($_i643 = 0; $_i643 < $_size639; ++$_i643) { - $elem616 = null; - $elem616 = new \metastore\Table(); - $xfer += $elem616->read($input); - $this->success []= $elem616; + $elem644 = null; + $elem644 = new \metastore\Table(); + $xfer += $elem644->read($input); + $this->success []= $elem644; } $xfer += $input->readListEnd(); } else { @@ -15028,9 +15164,9 @@ class ThriftHiveMetastore_get_table_objects_by_name_result { { $output->writeListBegin(TType::STRUCT, count($this->success)); { - foreach ($this->success as $iter617) + foreach ($this->success as $iter645) { - $xfer += $iter617->write($output); + $xfer += $iter645->write($output); } } $output->writeListEnd(); @@ -15266,14 +15402,14 @@ class ThriftHiveMetastore_get_table_names_by_filter_result { case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size618 = 0; - $_etype621 = 0; - $xfer += $input->readListBegin($_etype621, $_size618); - for ($_i622 = 0; $_i622 < $_size618; ++$_i622) + $_size646 = 0; + $_etype649 = 0; + $xfer += $input->readListBegin($_etype649, $_size646); + for ($_i650 = 0; $_i650 < $_size646; ++$_i650) { - $elem623 = null; - $xfer += $input->readString($elem623); - $this->success []= $elem623; + $elem651 = null; + $xfer += $input->readString($elem651); + $this->success []= $elem651; } $xfer += $input->readListEnd(); } else { @@ -15325,9 +15461,9 @@ class ThriftHiveMetastore_get_table_names_by_filter_result { { $output->writeListBegin(TType::STRING, count($this->success)); { - foreach ($this->success as $iter624) + foreach ($this->success as $iter652) { - $xfer += $output->writeString($iter624); + $xfer += $output->writeString($iter652); } } $output->writeListEnd(); @@ -16640,15 +16776,15 @@ class ThriftHiveMetastore_add_partitions_args { case 1: if ($ftype == TType::LST) { $this->new_parts = array(); - $_size625 = 0; - $_etype628 = 0; - $xfer += $input->readListBegin($_etype628, $_size625); - for ($_i629 = 0; $_i629 < $_size625; ++$_i629) + $_size653 = 0; + $_etype656 = 0; + $xfer += $input->readListBegin($_etype656, $_size653); + for ($_i657 = 0; $_i657 < $_size653; ++$_i657) { - $elem630 = null; - $elem630 = new \metastore\Partition(); - $xfer += $elem630->read($input); - $this->new_parts []= $elem630; + $elem658 = null; + $elem658 = new \metastore\Partition(); + $xfer += $elem658->read($input); + $this->new_parts []= $elem658; } $xfer += $input->readListEnd(); } else { @@ -16676,9 +16812,9 @@ class ThriftHiveMetastore_add_partitions_args { { $output->writeListBegin(TType::STRUCT, count($this->new_parts)); { - foreach ($this->new_parts as $iter631) + foreach ($this->new_parts as $iter659) { - $xfer += $iter631->write($output); + $xfer += $iter659->write($output); } } $output->writeListEnd(); @@ -16893,15 +17029,15 @@ class ThriftHiveMetastore_add_partitions_pspec_args { case 1: if ($ftype == TType::LST) { $this->new_parts = array(); - $_size632 = 0; - $_etype635 = 0; - $xfer += $input->readListBegin($_etype635, $_size632); - for ($_i636 = 0; $_i636 < $_size632; ++$_i636) + $_size660 = 0; + $_etype663 = 0; + $xfer += $input->readListBegin($_etype663, $_size660); + for ($_i664 = 0; $_i664 < $_size660; ++$_i664) { - $elem637 = null; - $elem637 = new \metastore\PartitionSpec(); - $xfer += $elem637->read($input); - $this->new_parts []= $elem637; + $elem665 = null; + $elem665 = new \metastore\PartitionSpec(); + $xfer += $elem665->read($input); + $this->new_parts []= $elem665; } $xfer += $input->readListEnd(); } else { @@ -16929,9 +17065,9 @@ class ThriftHiveMetastore_add_partitions_pspec_args { { $output->writeListBegin(TType::STRUCT, count($this->new_parts)); { - foreach ($this->new_parts as $iter638) + foreach ($this->new_parts as $iter666) { - $xfer += $iter638->write($output); + $xfer += $iter666->write($output); } } $output->writeListEnd(); @@ -17181,14 +17317,14 @@ class ThriftHiveMetastore_append_partition_args { case 3: if ($ftype == TType::LST) { $this->part_vals = array(); - $_size639 = 0; - $_etype642 = 0; - $xfer += $input->readListBegin($_etype642, $_size639); - for ($_i643 = 0; $_i643 < $_size639; ++$_i643) + $_size667 = 0; + $_etype670 = 0; + $xfer += $input->readListBegin($_etype670, $_size667); + for ($_i671 = 0; $_i671 < $_size667; ++$_i671) { - $elem644 = null; - $xfer += $input->readString($elem644); - $this->part_vals []= $elem644; + $elem672 = null; + $xfer += $input->readString($elem672); + $this->part_vals []= $elem672; } $xfer += $input->readListEnd(); } else { @@ -17226,9 +17362,9 @@ class ThriftHiveMetastore_append_partition_args { { $output->writeListBegin(TType::STRING, count($this->part_vals)); { - foreach ($this->part_vals as $iter645) + foreach ($this->part_vals as $iter673) { - $xfer += $output->writeString($iter645); + $xfer += $output->writeString($iter673); } } $output->writeListEnd(); @@ -17730,14 +17866,14 @@ class ThriftHiveMetastore_append_partition_with_environment_context_args { case 3: if ($ftype == TType::LST) { $this->part_vals = array(); - $_size646 = 0; - $_etype649 = 0; - $xfer += $input->readListBegin($_etype649, $_size646); - for ($_i650 = 0; $_i650 < $_size646; ++$_i650) + $_size674 = 0; + $_etype677 = 0; + $xfer += $input->readListBegin($_etype677, $_size674); + for ($_i678 = 0; $_i678 < $_size674; ++$_i678) { - $elem651 = null; - $xfer += $input->readString($elem651); - $this->part_vals []= $elem651; + $elem679 = null; + $xfer += $input->readString($elem679); + $this->part_vals []= $elem679; } $xfer += $input->readListEnd(); } else { @@ -17783,9 +17919,9 @@ class ThriftHiveMetastore_append_partition_with_environment_context_args { { $output->writeListBegin(TType::STRING, count($this->part_vals)); { - foreach ($this->part_vals as $iter652) + foreach ($this->part_vals as $iter680) { - $xfer += $output->writeString($iter652); + $xfer += $output->writeString($iter680); } } $output->writeListEnd(); @@ -18639,14 +18775,14 @@ class ThriftHiveMetastore_drop_partition_args { case 3: if ($ftype == TType::LST) { $this->part_vals = array(); - $_size653 = 0; - $_etype656 = 0; - $xfer += $input->readListBegin($_etype656, $_size653); - for ($_i657 = 0; $_i657 < $_size653; ++$_i657) + $_size681 = 0; + $_etype684 = 0; + $xfer += $input->readListBegin($_etype684, $_size681); + for ($_i685 = 0; $_i685 < $_size681; ++$_i685) { - $elem658 = null; - $xfer += $input->readString($elem658); - $this->part_vals []= $elem658; + $elem686 = null; + $xfer += $input->readString($elem686); + $this->part_vals []= $elem686; } $xfer += $input->readListEnd(); } else { @@ -18691,9 +18827,9 @@ class ThriftHiveMetastore_drop_partition_args { { $output->writeListBegin(TType::STRING, count($this->part_vals)); { - foreach ($this->part_vals as $iter659) + foreach ($this->part_vals as $iter687) { - $xfer += $output->writeString($iter659); + $xfer += $output->writeString($iter687); } } $output->writeListEnd(); @@ -18946,14 +19082,14 @@ class ThriftHiveMetastore_drop_partition_with_environment_context_args { case 3: if ($ftype == TType::LST) { $this->part_vals = array(); - $_size660 = 0; - $_etype663 = 0; - $xfer += $input->readListBegin($_etype663, $_size660); - for ($_i664 = 0; $_i664 < $_size660; ++$_i664) + $_size688 = 0; + $_etype691 = 0; + $xfer += $input->readListBegin($_etype691, $_size688); + for ($_i692 = 0; $_i692 < $_size688; ++$_i692) { - $elem665 = null; - $xfer += $input->readString($elem665); - $this->part_vals []= $elem665; + $elem693 = null; + $xfer += $input->readString($elem693); + $this->part_vals []= $elem693; } $xfer += $input->readListEnd(); } else { @@ -19006,9 +19142,9 @@ class ThriftHiveMetastore_drop_partition_with_environment_context_args { { $output->writeListBegin(TType::STRING, count($this->part_vals)); { - foreach ($this->part_vals as $iter666) + foreach ($this->part_vals as $iter694) { - $xfer += $output->writeString($iter666); + $xfer += $output->writeString($iter694); } } $output->writeListEnd(); @@ -20022,14 +20158,14 @@ class ThriftHiveMetastore_get_partition_args { case 3: if ($ftype == TType::LST) { $this->part_vals = array(); - $_size667 = 0; - $_etype670 = 0; - $xfer += $input->readListBegin($_etype670, $_size667); - for ($_i671 = 0; $_i671 < $_size667; ++$_i671) + $_size695 = 0; + $_etype698 = 0; + $xfer += $input->readListBegin($_etype698, $_size695); + for ($_i699 = 0; $_i699 < $_size695; ++$_i699) { - $elem672 = null; - $xfer += $input->readString($elem672); - $this->part_vals []= $elem672; + $elem700 = null; + $xfer += $input->readString($elem700); + $this->part_vals []= $elem700; } $xfer += $input->readListEnd(); } else { @@ -20067,9 +20203,9 @@ class ThriftHiveMetastore_get_partition_args { { $output->writeListBegin(TType::STRING, count($this->part_vals)); { - foreach ($this->part_vals as $iter673) + foreach ($this->part_vals as $iter701) { - $xfer += $output->writeString($iter673); + $xfer += $output->writeString($iter701); } } $output->writeListEnd(); @@ -20311,17 +20447,17 @@ class ThriftHiveMetastore_exchange_partition_args { case 1: if ($ftype == TType::MAP) { $this->partitionSpecs = array(); - $_size674 = 0; - $_ktype675 = 0; - $_vtype676 = 0; - $xfer += $input->readMapBegin($_ktype675, $_vtype676, $_size674); - for ($_i678 = 0; $_i678 < $_size674; ++$_i678) + $_size702 = 0; + $_ktype703 = 0; + $_vtype704 = 0; + $xfer += $input->readMapBegin($_ktype703, $_vtype704, $_size702); + for ($_i706 = 0; $_i706 < $_size702; ++$_i706) { - $key679 = ''; - $val680 = ''; - $xfer += $input->readString($key679); - $xfer += $input->readString($val680); - $this->partitionSpecs[$key679] = $val680; + $key707 = ''; + $val708 = ''; + $xfer += $input->readString($key707); + $xfer += $input->readString($val708); + $this->partitionSpecs[$key707] = $val708; } $xfer += $input->readMapEnd(); } else { @@ -20377,10 +20513,10 @@ class ThriftHiveMetastore_exchange_partition_args { { $output->writeMapBegin(TType::STRING, TType::STRING, count($this->partitionSpecs)); { - foreach ($this->partitionSpecs as $kiter681 => $viter682) + foreach ($this->partitionSpecs as $kiter709 => $viter710) { - $xfer += $output->writeString($kiter681); - $xfer += $output->writeString($viter682); + $xfer += $output->writeString($kiter709); + $xfer += $output->writeString($viter710); } } $output->writeMapEnd(); @@ -20692,17 +20828,17 @@ class ThriftHiveMetastore_exchange_partitions_args { case 1: if ($ftype == TType::MAP) { $this->partitionSpecs = array(); - $_size683 = 0; - $_ktype684 = 0; - $_vtype685 = 0; - $xfer += $input->readMapBegin($_ktype684, $_vtype685, $_size683); - for ($_i687 = 0; $_i687 < $_size683; ++$_i687) + $_size711 = 0; + $_ktype712 = 0; + $_vtype713 = 0; + $xfer += $input->readMapBegin($_ktype712, $_vtype713, $_size711); + for ($_i715 = 0; $_i715 < $_size711; ++$_i715) { - $key688 = ''; - $val689 = ''; - $xfer += $input->readString($key688); - $xfer += $input->readString($val689); - $this->partitionSpecs[$key688] = $val689; + $key716 = ''; + $val717 = ''; + $xfer += $input->readString($key716); + $xfer += $input->readString($val717); + $this->partitionSpecs[$key716] = $val717; } $xfer += $input->readMapEnd(); } else { @@ -20758,10 +20894,10 @@ class ThriftHiveMetastore_exchange_partitions_args { { $output->writeMapBegin(TType::STRING, TType::STRING, count($this->partitionSpecs)); { - foreach ($this->partitionSpecs as $kiter690 => $viter691) + foreach ($this->partitionSpecs as $kiter718 => $viter719) { - $xfer += $output->writeString($kiter690); - $xfer += $output->writeString($viter691); + $xfer += $output->writeString($kiter718); + $xfer += $output->writeString($viter719); } } $output->writeMapEnd(); @@ -20894,15 +21030,15 @@ class ThriftHiveMetastore_exchange_partitions_result { case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size692 = 0; - $_etype695 = 0; - $xfer += $input->readListBegin($_etype695, $_size692); - for ($_i696 = 0; $_i696 < $_size692; ++$_i696) + $_size720 = 0; + $_etype723 = 0; + $xfer += $input->readListBegin($_etype723, $_size720); + for ($_i724 = 0; $_i724 < $_size720; ++$_i724) { - $elem697 = null; - $elem697 = new \metastore\Partition(); - $xfer += $elem697->read($input); - $this->success []= $elem697; + $elem725 = null; + $elem725 = new \metastore\Partition(); + $xfer += $elem725->read($input); + $this->success []= $elem725; } $xfer += $input->readListEnd(); } else { @@ -20962,9 +21098,9 @@ class ThriftHiveMetastore_exchange_partitions_result { { $output->writeListBegin(TType::STRUCT, count($this->success)); { - foreach ($this->success as $iter698) + foreach ($this->success as $iter726) { - $xfer += $iter698->write($output); + $xfer += $iter726->write($output); } } $output->writeListEnd(); @@ -21110,14 +21246,14 @@ class ThriftHiveMetastore_get_partition_with_auth_args { case 3: if ($ftype == TType::LST) { $this->part_vals = array(); - $_size699 = 0; - $_etype702 = 0; - $xfer += $input->readListBegin($_etype702, $_size699); - for ($_i703 = 0; $_i703 < $_size699; ++$_i703) + $_size727 = 0; + $_etype730 = 0; + $xfer += $input->readListBegin($_etype730, $_size727); + for ($_i731 = 0; $_i731 < $_size727; ++$_i731) { - $elem704 = null; - $xfer += $input->readString($elem704); - $this->part_vals []= $elem704; + $elem732 = null; + $xfer += $input->readString($elem732); + $this->part_vals []= $elem732; } $xfer += $input->readListEnd(); } else { @@ -21134,14 +21270,14 @@ class ThriftHiveMetastore_get_partition_with_auth_args { case 5: if ($ftype == TType::LST) { $this->group_names = array(); - $_size705 = 0; - $_etype708 = 0; - $xfer += $input->readListBegin($_etype708, $_size705); - for ($_i709 = 0; $_i709 < $_size705; ++$_i709) + $_size733 = 0; + $_etype736 = 0; + $xfer += $input->readListBegin($_etype736, $_size733); + for ($_i737 = 0; $_i737 < $_size733; ++$_i737) { - $elem710 = null; - $xfer += $input->readString($elem710); - $this->group_names []= $elem710; + $elem738 = null; + $xfer += $input->readString($elem738); + $this->group_names []= $elem738; } $xfer += $input->readListEnd(); } else { @@ -21179,9 +21315,9 @@ class ThriftHiveMetastore_get_partition_with_auth_args { { $output->writeListBegin(TType::STRING, count($this->part_vals)); { - foreach ($this->part_vals as $iter711) + foreach ($this->part_vals as $iter739) { - $xfer += $output->writeString($iter711); + $xfer += $output->writeString($iter739); } } $output->writeListEnd(); @@ -21201,9 +21337,9 @@ class ThriftHiveMetastore_get_partition_with_auth_args { { $output->writeListBegin(TType::STRING, count($this->group_names)); { - foreach ($this->group_names as $iter712) + foreach ($this->group_names as $iter740) { - $xfer += $output->writeString($iter712); + $xfer += $output->writeString($iter740); } } $output->writeListEnd(); @@ -21794,15 +21930,15 @@ class ThriftHiveMetastore_get_partitions_result { case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size713 = 0; - $_etype716 = 0; - $xfer += $input->readListBegin($_etype716, $_size713); - for ($_i717 = 0; $_i717 < $_size713; ++$_i717) + $_size741 = 0; + $_etype744 = 0; + $xfer += $input->readListBegin($_etype744, $_size741); + for ($_i745 = 0; $_i745 < $_size741; ++$_i745) { - $elem718 = null; - $elem718 = new \metastore\Partition(); - $xfer += $elem718->read($input); - $this->success []= $elem718; + $elem746 = null; + $elem746 = new \metastore\Partition(); + $xfer += $elem746->read($input); + $this->success []= $elem746; } $xfer += $input->readListEnd(); } else { @@ -21846,9 +21982,9 @@ class ThriftHiveMetastore_get_partitions_result { { $output->writeListBegin(TType::STRUCT, count($this->success)); { - foreach ($this->success as $iter719) + foreach ($this->success as $iter747) { - $xfer += $iter719->write($output); + $xfer += $iter747->write($output); } } $output->writeListEnd(); @@ -21994,14 +22130,14 @@ class ThriftHiveMetastore_get_partitions_with_auth_args { case 5: if ($ftype == TType::LST) { $this->group_names = array(); - $_size720 = 0; - $_etype723 = 0; - $xfer += $input->readListBegin($_etype723, $_size720); - for ($_i724 = 0; $_i724 < $_size720; ++$_i724) + $_size748 = 0; + $_etype751 = 0; + $xfer += $input->readListBegin($_etype751, $_size748); + for ($_i752 = 0; $_i752 < $_size748; ++$_i752) { - $elem725 = null; - $xfer += $input->readString($elem725); - $this->group_names []= $elem725; + $elem753 = null; + $xfer += $input->readString($elem753); + $this->group_names []= $elem753; } $xfer += $input->readListEnd(); } else { @@ -22049,9 +22185,9 @@ class ThriftHiveMetastore_get_partitions_with_auth_args { { $output->writeListBegin(TType::STRING, count($this->group_names)); { - foreach ($this->group_names as $iter726) + foreach ($this->group_names as $iter754) { - $xfer += $output->writeString($iter726); + $xfer += $output->writeString($iter754); } } $output->writeListEnd(); @@ -22140,15 +22276,15 @@ class ThriftHiveMetastore_get_partitions_with_auth_result { case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size727 = 0; - $_etype730 = 0; - $xfer += $input->readListBegin($_etype730, $_size727); - for ($_i731 = 0; $_i731 < $_size727; ++$_i731) + $_size755 = 0; + $_etype758 = 0; + $xfer += $input->readListBegin($_etype758, $_size755); + for ($_i759 = 0; $_i759 < $_size755; ++$_i759) { - $elem732 = null; - $elem732 = new \metastore\Partition(); - $xfer += $elem732->read($input); - $this->success []= $elem732; + $elem760 = null; + $elem760 = new \metastore\Partition(); + $xfer += $elem760->read($input); + $this->success []= $elem760; } $xfer += $input->readListEnd(); } else { @@ -22192,9 +22328,9 @@ class ThriftHiveMetastore_get_partitions_with_auth_result { { $output->writeListBegin(TType::STRUCT, count($this->success)); { - foreach ($this->success as $iter733) + foreach ($this->success as $iter761) { - $xfer += $iter733->write($output); + $xfer += $iter761->write($output); } } $output->writeListEnd(); @@ -22414,15 +22550,15 @@ class ThriftHiveMetastore_get_partitions_pspec_result { case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size734 = 0; - $_etype737 = 0; - $xfer += $input->readListBegin($_etype737, $_size734); - for ($_i738 = 0; $_i738 < $_size734; ++$_i738) + $_size762 = 0; + $_etype765 = 0; + $xfer += $input->readListBegin($_etype765, $_size762); + for ($_i766 = 0; $_i766 < $_size762; ++$_i766) { - $elem739 = null; - $elem739 = new \metastore\PartitionSpec(); - $xfer += $elem739->read($input); - $this->success []= $elem739; + $elem767 = null; + $elem767 = new \metastore\PartitionSpec(); + $xfer += $elem767->read($input); + $this->success []= $elem767; } $xfer += $input->readListEnd(); } else { @@ -22466,9 +22602,9 @@ class ThriftHiveMetastore_get_partitions_pspec_result { { $output->writeListBegin(TType::STRUCT, count($this->success)); { - foreach ($this->success as $iter740) + foreach ($this->success as $iter768) { - $xfer += $iter740->write($output); + $xfer += $iter768->write($output); } } $output->writeListEnd(); @@ -22675,14 +22811,14 @@ class ThriftHiveMetastore_get_partition_names_result { case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size741 = 0; - $_etype744 = 0; - $xfer += $input->readListBegin($_etype744, $_size741); - for ($_i745 = 0; $_i745 < $_size741; ++$_i745) + $_size769 = 0; + $_etype772 = 0; + $xfer += $input->readListBegin($_etype772, $_size769); + for ($_i773 = 0; $_i773 < $_size769; ++$_i773) { - $elem746 = null; - $xfer += $input->readString($elem746); - $this->success []= $elem746; + $elem774 = null; + $xfer += $input->readString($elem774); + $this->success []= $elem774; } $xfer += $input->readListEnd(); } else { @@ -22718,9 +22854,9 @@ class ThriftHiveMetastore_get_partition_names_result { { $output->writeListBegin(TType::STRING, count($this->success)); { - foreach ($this->success as $iter747) + foreach ($this->success as $iter775) { - $xfer += $output->writeString($iter747); + $xfer += $output->writeString($iter775); } } $output->writeListEnd(); @@ -22836,14 +22972,14 @@ class ThriftHiveMetastore_get_partitions_ps_args { case 3: if ($ftype == TType::LST) { $this->part_vals = array(); - $_size748 = 0; - $_etype751 = 0; - $xfer += $input->readListBegin($_etype751, $_size748); - for ($_i752 = 0; $_i752 < $_size748; ++$_i752) + $_size776 = 0; + $_etype779 = 0; + $xfer += $input->readListBegin($_etype779, $_size776); + for ($_i780 = 0; $_i780 < $_size776; ++$_i780) { - $elem753 = null; - $xfer += $input->readString($elem753); - $this->part_vals []= $elem753; + $elem781 = null; + $xfer += $input->readString($elem781); + $this->part_vals []= $elem781; } $xfer += $input->readListEnd(); } else { @@ -22888,9 +23024,9 @@ class ThriftHiveMetastore_get_partitions_ps_args { { $output->writeListBegin(TType::STRING, count($this->part_vals)); { - foreach ($this->part_vals as $iter754) + foreach ($this->part_vals as $iter782) { - $xfer += $output->writeString($iter754); + $xfer += $output->writeString($iter782); } } $output->writeListEnd(); @@ -22984,15 +23120,15 @@ class ThriftHiveMetastore_get_partitions_ps_result { case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size755 = 0; - $_etype758 = 0; - $xfer += $input->readListBegin($_etype758, $_size755); - for ($_i759 = 0; $_i759 < $_size755; ++$_i759) + $_size783 = 0; + $_etype786 = 0; + $xfer += $input->readListBegin($_etype786, $_size783); + for ($_i787 = 0; $_i787 < $_size783; ++$_i787) { - $elem760 = null; - $elem760 = new \metastore\Partition(); - $xfer += $elem760->read($input); - $this->success []= $elem760; + $elem788 = null; + $elem788 = new \metastore\Partition(); + $xfer += $elem788->read($input); + $this->success []= $elem788; } $xfer += $input->readListEnd(); } else { @@ -23036,9 +23172,9 @@ class ThriftHiveMetastore_get_partitions_ps_result { { $output->writeListBegin(TType::STRUCT, count($this->success)); { - foreach ($this->success as $iter761) + foreach ($this->success as $iter789) { - $xfer += $iter761->write($output); + $xfer += $iter789->write($output); } } $output->writeListEnd(); @@ -23185,14 +23321,14 @@ class ThriftHiveMetastore_get_partitions_ps_with_auth_args { case 3: if ($ftype == TType::LST) { $this->part_vals = array(); - $_size762 = 0; - $_etype765 = 0; - $xfer += $input->readListBegin($_etype765, $_size762); - for ($_i766 = 0; $_i766 < $_size762; ++$_i766) + $_size790 = 0; + $_etype793 = 0; + $xfer += $input->readListBegin($_etype793, $_size790); + for ($_i794 = 0; $_i794 < $_size790; ++$_i794) { - $elem767 = null; - $xfer += $input->readString($elem767); - $this->part_vals []= $elem767; + $elem795 = null; + $xfer += $input->readString($elem795); + $this->part_vals []= $elem795; } $xfer += $input->readListEnd(); } else { @@ -23216,14 +23352,14 @@ class ThriftHiveMetastore_get_partitions_ps_with_auth_args { case 6: if ($ftype == TType::LST) { $this->group_names = array(); - $_size768 = 0; - $_etype771 = 0; - $xfer += $input->readListBegin($_etype771, $_size768); - for ($_i772 = 0; $_i772 < $_size768; ++$_i772) + $_size796 = 0; + $_etype799 = 0; + $xfer += $input->readListBegin($_etype799, $_size796); + for ($_i800 = 0; $_i800 < $_size796; ++$_i800) { - $elem773 = null; - $xfer += $input->readString($elem773); - $this->group_names []= $elem773; + $elem801 = null; + $xfer += $input->readString($elem801); + $this->group_names []= $elem801; } $xfer += $input->readListEnd(); } else { @@ -23261,9 +23397,9 @@ class ThriftHiveMetastore_get_partitions_ps_with_auth_args { { $output->writeListBegin(TType::STRING, count($this->part_vals)); { - foreach ($this->part_vals as $iter774) + foreach ($this->part_vals as $iter802) { - $xfer += $output->writeString($iter774); + $xfer += $output->writeString($iter802); } } $output->writeListEnd(); @@ -23288,9 +23424,9 @@ class ThriftHiveMetastore_get_partitions_ps_with_auth_args { { $output->writeListBegin(TType::STRING, count($this->group_names)); { - foreach ($this->group_names as $iter775) + foreach ($this->group_names as $iter803) { - $xfer += $output->writeString($iter775); + $xfer += $output->writeString($iter803); } } $output->writeListEnd(); @@ -23379,15 +23515,15 @@ class ThriftHiveMetastore_get_partitions_ps_with_auth_result { case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size776 = 0; - $_etype779 = 0; - $xfer += $input->readListBegin($_etype779, $_size776); - for ($_i780 = 0; $_i780 < $_size776; ++$_i780) + $_size804 = 0; + $_etype807 = 0; + $xfer += $input->readListBegin($_etype807, $_size804); + for ($_i808 = 0; $_i808 < $_size804; ++$_i808) { - $elem781 = null; - $elem781 = new \metastore\Partition(); - $xfer += $elem781->read($input); - $this->success []= $elem781; + $elem809 = null; + $elem809 = new \metastore\Partition(); + $xfer += $elem809->read($input); + $this->success []= $elem809; } $xfer += $input->readListEnd(); } else { @@ -23431,9 +23567,9 @@ class ThriftHiveMetastore_get_partitions_ps_with_auth_result { { $output->writeListBegin(TType::STRUCT, count($this->success)); { - foreach ($this->success as $iter782) + foreach ($this->success as $iter810) { - $xfer += $iter782->write($output); + $xfer += $iter810->write($output); } } $output->writeListEnd(); @@ -23554,14 +23690,14 @@ class ThriftHiveMetastore_get_partition_names_ps_args { case 3: if ($ftype == TType::LST) { $this->part_vals = array(); - $_size783 = 0; - $_etype786 = 0; - $xfer += $input->readListBegin($_etype786, $_size783); - for ($_i787 = 0; $_i787 < $_size783; ++$_i787) + $_size811 = 0; + $_etype814 = 0; + $xfer += $input->readListBegin($_etype814, $_size811); + for ($_i815 = 0; $_i815 < $_size811; ++$_i815) { - $elem788 = null; - $xfer += $input->readString($elem788); - $this->part_vals []= $elem788; + $elem816 = null; + $xfer += $input->readString($elem816); + $this->part_vals []= $elem816; } $xfer += $input->readListEnd(); } else { @@ -23606,9 +23742,9 @@ class ThriftHiveMetastore_get_partition_names_ps_args { { $output->writeListBegin(TType::STRING, count($this->part_vals)); { - foreach ($this->part_vals as $iter789) + foreach ($this->part_vals as $iter817) { - $xfer += $output->writeString($iter789); + $xfer += $output->writeString($iter817); } } $output->writeListEnd(); @@ -23701,14 +23837,14 @@ class ThriftHiveMetastore_get_partition_names_ps_result { case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size790 = 0; - $_etype793 = 0; - $xfer += $input->readListBegin($_etype793, $_size790); - for ($_i794 = 0; $_i794 < $_size790; ++$_i794) + $_size818 = 0; + $_etype821 = 0; + $xfer += $input->readListBegin($_etype821, $_size818); + for ($_i822 = 0; $_i822 < $_size818; ++$_i822) { - $elem795 = null; - $xfer += $input->readString($elem795); - $this->success []= $elem795; + $elem823 = null; + $xfer += $input->readString($elem823); + $this->success []= $elem823; } $xfer += $input->readListEnd(); } else { @@ -23752,9 +23888,9 @@ class ThriftHiveMetastore_get_partition_names_ps_result { { $output->writeListBegin(TType::STRING, count($this->success)); { - foreach ($this->success as $iter796) + foreach ($this->success as $iter824) { - $xfer += $output->writeString($iter796); + $xfer += $output->writeString($iter824); } } $output->writeListEnd(); @@ -23997,15 +24133,15 @@ class ThriftHiveMetastore_get_partitions_by_filter_result { case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size797 = 0; - $_etype800 = 0; - $xfer += $input->readListBegin($_etype800, $_size797); - for ($_i801 = 0; $_i801 < $_size797; ++$_i801) + $_size825 = 0; + $_etype828 = 0; + $xfer += $input->readListBegin($_etype828, $_size825); + for ($_i829 = 0; $_i829 < $_size825; ++$_i829) { - $elem802 = null; - $elem802 = new \metastore\Partition(); - $xfer += $elem802->read($input); - $this->success []= $elem802; + $elem830 = null; + $elem830 = new \metastore\Partition(); + $xfer += $elem830->read($input); + $this->success []= $elem830; } $xfer += $input->readListEnd(); } else { @@ -24049,9 +24185,9 @@ class ThriftHiveMetastore_get_partitions_by_filter_result { { $output->writeListBegin(TType::STRUCT, count($this->success)); { - foreach ($this->success as $iter803) + foreach ($this->success as $iter831) { - $xfer += $iter803->write($output); + $xfer += $iter831->write($output); } } $output->writeListEnd(); @@ -24294,15 +24430,15 @@ class ThriftHiveMetastore_get_part_specs_by_filter_result { case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size804 = 0; - $_etype807 = 0; - $xfer += $input->readListBegin($_etype807, $_size804); - for ($_i808 = 0; $_i808 < $_size804; ++$_i808) + $_size832 = 0; + $_etype835 = 0; + $xfer += $input->readListBegin($_etype835, $_size832); + for ($_i836 = 0; $_i836 < $_size832; ++$_i836) { - $elem809 = null; - $elem809 = new \metastore\PartitionSpec(); - $xfer += $elem809->read($input); - $this->success []= $elem809; + $elem837 = null; + $elem837 = new \metastore\PartitionSpec(); + $xfer += $elem837->read($input); + $this->success []= $elem837; } $xfer += $input->readListEnd(); } else { @@ -24346,9 +24482,9 @@ class ThriftHiveMetastore_get_part_specs_by_filter_result { { $output->writeListBegin(TType::STRUCT, count($this->success)); { - foreach ($this->success as $iter810) + foreach ($this->success as $iter838) { - $xfer += $iter810->write($output); + $xfer += $iter838->write($output); } } $output->writeListEnd(); @@ -24914,14 +25050,14 @@ class ThriftHiveMetastore_get_partitions_by_names_args { case 3: if ($ftype == TType::LST) { $this->names = array(); - $_size811 = 0; - $_etype814 = 0; - $xfer += $input->readListBegin($_etype814, $_size811); - for ($_i815 = 0; $_i815 < $_size811; ++$_i815) + $_size839 = 0; + $_etype842 = 0; + $xfer += $input->readListBegin($_etype842, $_size839); + for ($_i843 = 0; $_i843 < $_size839; ++$_i843) { - $elem816 = null; - $xfer += $input->readString($elem816); - $this->names []= $elem816; + $elem844 = null; + $xfer += $input->readString($elem844); + $this->names []= $elem844; } $xfer += $input->readListEnd(); } else { @@ -24959,9 +25095,9 @@ class ThriftHiveMetastore_get_partitions_by_names_args { { $output->writeListBegin(TType::STRING, count($this->names)); { - foreach ($this->names as $iter817) + foreach ($this->names as $iter845) { - $xfer += $output->writeString($iter817); + $xfer += $output->writeString($iter845); } } $output->writeListEnd(); @@ -25050,15 +25186,15 @@ class ThriftHiveMetastore_get_partitions_by_names_result { case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size818 = 0; - $_etype821 = 0; - $xfer += $input->readListBegin($_etype821, $_size818); - for ($_i822 = 0; $_i822 < $_size818; ++$_i822) + $_size846 = 0; + $_etype849 = 0; + $xfer += $input->readListBegin($_etype849, $_size846); + for ($_i850 = 0; $_i850 < $_size846; ++$_i850) { - $elem823 = null; - $elem823 = new \metastore\Partition(); - $xfer += $elem823->read($input); - $this->success []= $elem823; + $elem851 = null; + $elem851 = new \metastore\Partition(); + $xfer += $elem851->read($input); + $this->success []= $elem851; } $xfer += $input->readListEnd(); } else { @@ -25102,9 +25238,9 @@ class ThriftHiveMetastore_get_partitions_by_names_result { { $output->writeListBegin(TType::STRUCT, count($this->success)); { - foreach ($this->success as $iter824) + foreach ($this->success as $iter852) { - $xfer += $iter824->write($output); + $xfer += $iter852->write($output); } } $output->writeListEnd(); @@ -25443,15 +25579,15 @@ class ThriftHiveMetastore_alter_partitions_args { case 3: if ($ftype == TType::LST) { $this->new_parts = array(); - $_size825 = 0; - $_etype828 = 0; - $xfer += $input->readListBegin($_etype828, $_size825); - for ($_i829 = 0; $_i829 < $_size825; ++$_i829) + $_size853 = 0; + $_etype856 = 0; + $xfer += $input->readListBegin($_etype856, $_size853); + for ($_i857 = 0; $_i857 < $_size853; ++$_i857) { - $elem830 = null; - $elem830 = new \metastore\Partition(); - $xfer += $elem830->read($input); - $this->new_parts []= $elem830; + $elem858 = null; + $elem858 = new \metastore\Partition(); + $xfer += $elem858->read($input); + $this->new_parts []= $elem858; } $xfer += $input->readListEnd(); } else { @@ -25489,9 +25625,9 @@ class ThriftHiveMetastore_alter_partitions_args { { $output->writeListBegin(TType::STRUCT, count($this->new_parts)); { - foreach ($this->new_parts as $iter831) + foreach ($this->new_parts as $iter859) { - $xfer += $iter831->write($output); + $xfer += $iter859->write($output); } } $output->writeListEnd(); @@ -25706,15 +25842,15 @@ class ThriftHiveMetastore_alter_partitions_with_environment_context_args { case 3: if ($ftype == TType::LST) { $this->new_parts = array(); - $_size832 = 0; - $_etype835 = 0; - $xfer += $input->readListBegin($_etype835, $_size832); - for ($_i836 = 0; $_i836 < $_size832; ++$_i836) + $_size860 = 0; + $_etype863 = 0; + $xfer += $input->readListBegin($_etype863, $_size860); + for ($_i864 = 0; $_i864 < $_size860; ++$_i864) { - $elem837 = null; - $elem837 = new \metastore\Partition(); - $xfer += $elem837->read($input); - $this->new_parts []= $elem837; + $elem865 = null; + $elem865 = new \metastore\Partition(); + $xfer += $elem865->read($input); + $this->new_parts []= $elem865; } $xfer += $input->readListEnd(); } else { @@ -25760,9 +25896,9 @@ class ThriftHiveMetastore_alter_partitions_with_environment_context_args { { $output->writeListBegin(TType::STRUCT, count($this->new_parts)); { - foreach ($this->new_parts as $iter838) + foreach ($this->new_parts as $iter866) { - $xfer += $iter838->write($output); + $xfer += $iter866->write($output); } } $output->writeListEnd(); @@ -26240,14 +26376,14 @@ class ThriftHiveMetastore_rename_partition_args { case 3: if ($ftype == TType::LST) { $this->part_vals = array(); - $_size839 = 0; - $_etype842 = 0; - $xfer += $input->readListBegin($_etype842, $_size839); - for ($_i843 = 0; $_i843 < $_size839; ++$_i843) + $_size867 = 0; + $_etype870 = 0; + $xfer += $input->readListBegin($_etype870, $_size867); + for ($_i871 = 0; $_i871 < $_size867; ++$_i871) { - $elem844 = null; - $xfer += $input->readString($elem844); - $this->part_vals []= $elem844; + $elem872 = null; + $xfer += $input->readString($elem872); + $this->part_vals []= $elem872; } $xfer += $input->readListEnd(); } else { @@ -26293,9 +26429,9 @@ class ThriftHiveMetastore_rename_partition_args { { $output->writeListBegin(TType::STRING, count($this->part_vals)); { - foreach ($this->part_vals as $iter845) + foreach ($this->part_vals as $iter873) { - $xfer += $output->writeString($iter845); + $xfer += $output->writeString($iter873); } } $output->writeListEnd(); @@ -26480,14 +26616,14 @@ class ThriftHiveMetastore_partition_name_has_valid_characters_args { case 1: if ($ftype == TType::LST) { $this->part_vals = array(); - $_size846 = 0; - $_etype849 = 0; - $xfer += $input->readListBegin($_etype849, $_size846); - for ($_i850 = 0; $_i850 < $_size846; ++$_i850) + $_size874 = 0; + $_etype877 = 0; + $xfer += $input->readListBegin($_etype877, $_size874); + for ($_i878 = 0; $_i878 < $_size874; ++$_i878) { - $elem851 = null; - $xfer += $input->readString($elem851); - $this->part_vals []= $elem851; + $elem879 = null; + $xfer += $input->readString($elem879); + $this->part_vals []= $elem879; } $xfer += $input->readListEnd(); } else { @@ -26522,9 +26658,9 @@ class ThriftHiveMetastore_partition_name_has_valid_characters_args { { $output->writeListBegin(TType::STRING, count($this->part_vals)); { - foreach ($this->part_vals as $iter852) + foreach ($this->part_vals as $iter880) { - $xfer += $output->writeString($iter852); + $xfer += $output->writeString($iter880); } } $output->writeListEnd(); @@ -26978,14 +27114,14 @@ class ThriftHiveMetastore_partition_name_to_vals_result { case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size853 = 0; - $_etype856 = 0; - $xfer += $input->readListBegin($_etype856, $_size853); - for ($_i857 = 0; $_i857 < $_size853; ++$_i857) + $_size881 = 0; + $_etype884 = 0; + $xfer += $input->readListBegin($_etype884, $_size881); + for ($_i885 = 0; $_i885 < $_size881; ++$_i885) { - $elem858 = null; - $xfer += $input->readString($elem858); - $this->success []= $elem858; + $elem886 = null; + $xfer += $input->readString($elem886); + $this->success []= $elem886; } $xfer += $input->readListEnd(); } else { @@ -27021,9 +27157,9 @@ class ThriftHiveMetastore_partition_name_to_vals_result { { $output->writeListBegin(TType::STRING, count($this->success)); { - foreach ($this->success as $iter859) + foreach ($this->success as $iter887) { - $xfer += $output->writeString($iter859); + $xfer += $output->writeString($iter887); } } $output->writeListEnd(); @@ -27183,17 +27319,17 @@ class ThriftHiveMetastore_partition_name_to_spec_result { case 0: if ($ftype == TType::MAP) { $this->success = array(); - $_size860 = 0; - $_ktype861 = 0; - $_vtype862 = 0; - $xfer += $input->readMapBegin($_ktype861, $_vtype862, $_size860); - for ($_i864 = 0; $_i864 < $_size860; ++$_i864) + $_size888 = 0; + $_ktype889 = 0; + $_vtype890 = 0; + $xfer += $input->readMapBegin($_ktype889, $_vtype890, $_size888); + for ($_i892 = 0; $_i892 < $_size888; ++$_i892) { - $key865 = ''; - $val866 = ''; - $xfer += $input->readString($key865); - $xfer += $input->readString($val866); - $this->success[$key865] = $val866; + $key893 = ''; + $val894 = ''; + $xfer += $input->readString($key893); + $xfer += $input->readString($val894); + $this->success[$key893] = $val894; } $xfer += $input->readMapEnd(); } else { @@ -27229,10 +27365,10 @@ class ThriftHiveMetastore_partition_name_to_spec_result { { $output->writeMapBegin(TType::STRING, TType::STRING, count($this->success)); { - foreach ($this->success as $kiter867 => $viter868) + foreach ($this->success as $kiter895 => $viter896) { - $xfer += $output->writeString($kiter867); - $xfer += $output->writeString($viter868); + $xfer += $output->writeString($kiter895); + $xfer += $output->writeString($viter896); } } $output->writeMapEnd(); @@ -27352,17 +27488,17 @@ class ThriftHiveMetastore_markPartitionForEvent_args { case 3: if ($ftype == TType::MAP) { $this->part_vals = array(); - $_size869 = 0; - $_ktype870 = 0; - $_vtype871 = 0; - $xfer += $input->readMapBegin($_ktype870, $_vtype871, $_size869); - for ($_i873 = 0; $_i873 < $_size869; ++$_i873) + $_size897 = 0; + $_ktype898 = 0; + $_vtype899 = 0; + $xfer += $input->readMapBegin($_ktype898, $_vtype899, $_size897); + for ($_i901 = 0; $_i901 < $_size897; ++$_i901) { - $key874 = ''; - $val875 = ''; - $xfer += $input->readString($key874); - $xfer += $input->readString($val875); - $this->part_vals[$key874] = $val875; + $key902 = ''; + $val903 = ''; + $xfer += $input->readString($key902); + $xfer += $input->readString($val903); + $this->part_vals[$key902] = $val903; } $xfer += $input->readMapEnd(); } else { @@ -27407,10 +27543,10 @@ class ThriftHiveMetastore_markPartitionForEvent_args { { $output->writeMapBegin(TType::STRING, TType::STRING, count($this->part_vals)); { - foreach ($this->part_vals as $kiter876 => $viter877) + foreach ($this->part_vals as $kiter904 => $viter905) { - $xfer += $output->writeString($kiter876); - $xfer += $output->writeString($viter877); + $xfer += $output->writeString($kiter904); + $xfer += $output->writeString($viter905); } } $output->writeMapEnd(); @@ -27732,17 +27868,17 @@ class ThriftHiveMetastore_isPartitionMarkedForEvent_args { case 3: if ($ftype == TType::MAP) { $this->part_vals = array(); - $_size878 = 0; - $_ktype879 = 0; - $_vtype880 = 0; - $xfer += $input->readMapBegin($_ktype879, $_vtype880, $_size878); - for ($_i882 = 0; $_i882 < $_size878; ++$_i882) + $_size906 = 0; + $_ktype907 = 0; + $_vtype908 = 0; + $xfer += $input->readMapBegin($_ktype907, $_vtype908, $_size906); + for ($_i910 = 0; $_i910 < $_size906; ++$_i910) { - $key883 = ''; - $val884 = ''; - $xfer += $input->readString($key883); - $xfer += $input->readString($val884); - $this->part_vals[$key883] = $val884; + $key911 = ''; + $val912 = ''; + $xfer += $input->readString($key911); + $xfer += $input->readString($val912); + $this->part_vals[$key911] = $val912; } $xfer += $input->readMapEnd(); } else { @@ -27787,10 +27923,10 @@ class ThriftHiveMetastore_isPartitionMarkedForEvent_args { { $output->writeMapBegin(TType::STRING, TType::STRING, count($this->part_vals)); { - foreach ($this->part_vals as $kiter885 => $viter886) + foreach ($this->part_vals as $kiter913 => $viter914) { - $xfer += $output->writeString($kiter885); - $xfer += $output->writeString($viter886); + $xfer += $output->writeString($kiter913); + $xfer += $output->writeString($viter914); } } $output->writeMapEnd(); @@ -29264,15 +29400,15 @@ class ThriftHiveMetastore_get_indexes_result { case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size887 = 0; - $_etype890 = 0; - $xfer += $input->readListBegin($_etype890, $_size887); - for ($_i891 = 0; $_i891 < $_size887; ++$_i891) + $_size915 = 0; + $_etype918 = 0; + $xfer += $input->readListBegin($_etype918, $_size915); + for ($_i919 = 0; $_i919 < $_size915; ++$_i919) { - $elem892 = null; - $elem892 = new \metastore\Index(); - $xfer += $elem892->read($input); - $this->success []= $elem892; + $elem920 = null; + $elem920 = new \metastore\Index(); + $xfer += $elem920->read($input); + $this->success []= $elem920; } $xfer += $input->readListEnd(); } else { @@ -29316,9 +29452,9 @@ class ThriftHiveMetastore_get_indexes_result { { $output->writeListBegin(TType::STRUCT, count($this->success)); { - foreach ($this->success as $iter893) + foreach ($this->success as $iter921) { - $xfer += $iter893->write($output); + $xfer += $iter921->write($output); } } $output->writeListEnd(); @@ -29525,14 +29661,14 @@ class ThriftHiveMetastore_get_index_names_result { case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size894 = 0; - $_etype897 = 0; - $xfer += $input->readListBegin($_etype897, $_size894); - for ($_i898 = 0; $_i898 < $_size894; ++$_i898) + $_size922 = 0; + $_etype925 = 0; + $xfer += $input->readListBegin($_etype925, $_size922); + for ($_i926 = 0; $_i926 < $_size922; ++$_i926) { - $elem899 = null; - $xfer += $input->readString($elem899); - $this->success []= $elem899; + $elem927 = null; + $xfer += $input->readString($elem927); + $this->success []= $elem927; } $xfer += $input->readListEnd(); } else { @@ -29568,9 +29704,9 @@ class ThriftHiveMetastore_get_index_names_result { { $output->writeListBegin(TType::STRING, count($this->success)); { - foreach ($this->success as $iter900) + foreach ($this->success as $iter928) { - $xfer += $output->writeString($iter900); + $xfer += $output->writeString($iter928); } } $output->writeListEnd(); @@ -29589,6 +29725,554 @@ class ThriftHiveMetastore_get_index_names_result { } +class ThriftHiveMetastore_get_primary_keys_args { + static $_TSPEC; + + /** + * @var string + */ + public $db_name = null; + /** + * @var string + */ + public $tbl_name = null; + + public function __construct($vals=null) { + if (!isset(self::$_TSPEC)) { + self::$_TSPEC = array( + 1 => array( + 'var' => 'db_name', + 'type' => TType::STRING, + ), + 2 => array( + 'var' => 'tbl_name', + 'type' => TType::STRING, + ), + ); + } + if (is_array($vals)) { + if (isset($vals['db_name'])) { + $this->db_name = $vals['db_name']; + } + if (isset($vals['tbl_name'])) { + $this->tbl_name = $vals['tbl_name']; + } + } + } + + public function getName() { + return 'ThriftHiveMetastore_get_primary_keys_args'; + } + + public function read($input) + { + $xfer = 0; + $fname = null; + $ftype = 0; + $fid = 0; + $xfer += $input->readStructBegin($fname); + while (true) + { + $xfer += $input->readFieldBegin($fname, $ftype, $fid); + if ($ftype == TType::STOP) { + break; + } + switch ($fid) + { + case 1: + if ($ftype == TType::STRING) { + $xfer += $input->readString($this->db_name); + } else { + $xfer += $input->skip($ftype); + } + break; + case 2: + if ($ftype == TType::STRING) { + $xfer += $input->readString($this->tbl_name); + } else { + $xfer += $input->skip($ftype); + } + break; + default: + $xfer += $input->skip($ftype); + break; + } + $xfer += $input->readFieldEnd(); + } + $xfer += $input->readStructEnd(); + return $xfer; + } + + public function write($output) { + $xfer = 0; + $xfer += $output->writeStructBegin('ThriftHiveMetastore_get_primary_keys_args'); + if ($this->db_name !== null) { + $xfer += $output->writeFieldBegin('db_name', TType::STRING, 1); + $xfer += $output->writeString($this->db_name); + $xfer += $output->writeFieldEnd(); + } + if ($this->tbl_name !== null) { + $xfer += $output->writeFieldBegin('tbl_name', TType::STRING, 2); + $xfer += $output->writeString($this->tbl_name); + $xfer += $output->writeFieldEnd(); + } + $xfer += $output->writeFieldStop(); + $xfer += $output->writeStructEnd(); + return $xfer; + } + +} + +class ThriftHiveMetastore_get_primary_keys_result { + static $_TSPEC; + + /** + * @var \metastore\SQLPrimaryKey[] + */ + public $success = null; + /** + * @var \metastore\MetaException + */ + public $o1 = null; + /** + * @var \metastore\NoSuchObjectException + */ + public $o2 = null; + + public function __construct($vals=null) { + if (!isset(self::$_TSPEC)) { + self::$_TSPEC = array( + 0 => array( + 'var' => 'success', + 'type' => TType::LST, + 'etype' => TType::STRUCT, + 'elem' => array( + 'type' => TType::STRUCT, + 'class' => '\metastore\SQLPrimaryKey', + ), + ), + 1 => array( + 'var' => 'o1', + 'type' => TType::STRUCT, + 'class' => '\metastore\MetaException', + ), + 2 => array( + 'var' => 'o2', + 'type' => TType::STRUCT, + 'class' => '\metastore\NoSuchObjectException', + ), + ); + } + if (is_array($vals)) { + if (isset($vals['success'])) { + $this->success = $vals['success']; + } + if (isset($vals['o1'])) { + $this->o1 = $vals['o1']; + } + if (isset($vals['o2'])) { + $this->o2 = $vals['o2']; + } + } + } + + public function getName() { + return 'ThriftHiveMetastore_get_primary_keys_result'; + } + + public function read($input) + { + $xfer = 0; + $fname = null; + $ftype = 0; + $fid = 0; + $xfer += $input->readStructBegin($fname); + while (true) + { + $xfer += $input->readFieldBegin($fname, $ftype, $fid); + if ($ftype == TType::STOP) { + break; + } + switch ($fid) + { + case 0: + if ($ftype == TType::LST) { + $this->success = array(); + $_size929 = 0; + $_etype932 = 0; + $xfer += $input->readListBegin($_etype932, $_size929); + for ($_i933 = 0; $_i933 < $_size929; ++$_i933) + { + $elem934 = null; + $elem934 = new \metastore\SQLPrimaryKey(); + $xfer += $elem934->read($input); + $this->success []= $elem934; + } + $xfer += $input->readListEnd(); + } else { + $xfer += $input->skip($ftype); + } + break; + case 1: + if ($ftype == TType::STRUCT) { + $this->o1 = new \metastore\MetaException(); + $xfer += $this->o1->read($input); + } else { + $xfer += $input->skip($ftype); + } + break; + case 2: + if ($ftype == TType::STRUCT) { + $this->o2 = new \metastore\NoSuchObjectException(); + $xfer += $this->o2->read($input); + } else { + $xfer += $input->skip($ftype); + } + break; + default: + $xfer += $input->skip($ftype); + break; + } + $xfer += $input->readFieldEnd(); + } + $xfer += $input->readStructEnd(); + return $xfer; + } + + public function write($output) { + $xfer = 0; + $xfer += $output->writeStructBegin('ThriftHiveMetastore_get_primary_keys_result'); + if ($this->success !== null) { + if (!is_array($this->success)) { + throw new TProtocolException('Bad type in structure.', TProtocolException::INVALID_DATA); + } + $xfer += $output->writeFieldBegin('success', TType::LST, 0); + { + $output->writeListBegin(TType::STRUCT, count($this->success)); + { + foreach ($this->success as $iter935) + { + $xfer += $iter935->write($output); + } + } + $output->writeListEnd(); + } + $xfer += $output->writeFieldEnd(); + } + if ($this->o1 !== null) { + $xfer += $output->writeFieldBegin('o1', TType::STRUCT, 1); + $xfer += $this->o1->write($output); + $xfer += $output->writeFieldEnd(); + } + if ($this->o2 !== null) { + $xfer += $output->writeFieldBegin('o2', TType::STRUCT, 2); + $xfer += $this->o2->write($output); + $xfer += $output->writeFieldEnd(); + } + $xfer += $output->writeFieldStop(); + $xfer += $output->writeStructEnd(); + return $xfer; + } + +} + +class ThriftHiveMetastore_get_foreign_keys_args { + static $_TSPEC; + + /** + * @var string + */ + public $parent_db_name = null; + /** + * @var string + */ + public $parent_tbl_name = null; + /** + * @var string + */ + public $foreign_db_name = null; + /** + * @var string + */ + public $foreign_tbl_name = null; + + public function __construct($vals=null) { + if (!isset(self::$_TSPEC)) { + self::$_TSPEC = array( + 1 => array( + 'var' => 'parent_db_name', + 'type' => TType::STRING, + ), + 2 => array( + 'var' => 'parent_tbl_name', + 'type' => TType::STRING, + ), + 3 => array( + 'var' => 'foreign_db_name', + 'type' => TType::STRING, + ), + 4 => array( + 'var' => 'foreign_tbl_name', + 'type' => TType::STRING, + ), + ); + } + if (is_array($vals)) { + if (isset($vals['parent_db_name'])) { + $this->parent_db_name = $vals['parent_db_name']; + } + if (isset($vals['parent_tbl_name'])) { + $this->parent_tbl_name = $vals['parent_tbl_name']; + } + if (isset($vals['foreign_db_name'])) { + $this->foreign_db_name = $vals['foreign_db_name']; + } + if (isset($vals['foreign_tbl_name'])) { + $this->foreign_tbl_name = $vals['foreign_tbl_name']; + } + } + } + + public function getName() { + return 'ThriftHiveMetastore_get_foreign_keys_args'; + } + + public function read($input) + { + $xfer = 0; + $fname = null; + $ftype = 0; + $fid = 0; + $xfer += $input->readStructBegin($fname); + while (true) + { + $xfer += $input->readFieldBegin($fname, $ftype, $fid); + if ($ftype == TType::STOP) { + break; + } + switch ($fid) + { + case 1: + if ($ftype == TType::STRING) { + $xfer += $input->readString($this->parent_db_name); + } else { + $xfer += $input->skip($ftype); + } + break; + case 2: + if ($ftype == TType::STRING) { + $xfer += $input->readString($this->parent_tbl_name); + } else { + $xfer += $input->skip($ftype); + } + break; + case 3: + if ($ftype == TType::STRING) { + $xfer += $input->readString($this->foreign_db_name); + } else { + $xfer += $input->skip($ftype); + } + break; + case 4: + if ($ftype == TType::STRING) { + $xfer += $input->readString($this->foreign_tbl_name); + } else { + $xfer += $input->skip($ftype); + } + break; + default: + $xfer += $input->skip($ftype); + break; + } + $xfer += $input->readFieldEnd(); + } + $xfer += $input->readStructEnd(); + return $xfer; + } + + public function write($output) { + $xfer = 0; + $xfer += $output->writeStructBegin('ThriftHiveMetastore_get_foreign_keys_args'); + if ($this->parent_db_name !== null) { + $xfer += $output->writeFieldBegin('parent_db_name', TType::STRING, 1); + $xfer += $output->writeString($this->parent_db_name); + $xfer += $output->writeFieldEnd(); + } + if ($this->parent_tbl_name !== null) { + $xfer += $output->writeFieldBegin('parent_tbl_name', TType::STRING, 2); + $xfer += $output->writeString($this->parent_tbl_name); + $xfer += $output->writeFieldEnd(); + } + if ($this->foreign_db_name !== null) { + $xfer += $output->writeFieldBegin('foreign_db_name', TType::STRING, 3); + $xfer += $output->writeString($this->foreign_db_name); + $xfer += $output->writeFieldEnd(); + } + if ($this->foreign_tbl_name !== null) { + $xfer += $output->writeFieldBegin('foreign_tbl_name', TType::STRING, 4); + $xfer += $output->writeString($this->foreign_tbl_name); + $xfer += $output->writeFieldEnd(); + } + $xfer += $output->writeFieldStop(); + $xfer += $output->writeStructEnd(); + return $xfer; + } + +} + +class ThriftHiveMetastore_get_foreign_keys_result { + static $_TSPEC; + + /** + * @var \metastore\SQLForeignKey[] + */ + public $success = null; + /** + * @var \metastore\MetaException + */ + public $o1 = null; + /** + * @var \metastore\NoSuchObjectException + */ + public $o2 = null; + + public function __construct($vals=null) { + if (!isset(self::$_TSPEC)) { + self::$_TSPEC = array( + 0 => array( + 'var' => 'success', + 'type' => TType::LST, + 'etype' => TType::STRUCT, + 'elem' => array( + 'type' => TType::STRUCT, + 'class' => '\metastore\SQLForeignKey', + ), + ), + 1 => array( + 'var' => 'o1', + 'type' => TType::STRUCT, + 'class' => '\metastore\MetaException', + ), + 2 => array( + 'var' => 'o2', + 'type' => TType::STRUCT, + 'class' => '\metastore\NoSuchObjectException', + ), + ); + } + if (is_array($vals)) { + if (isset($vals['success'])) { + $this->success = $vals['success']; + } + if (isset($vals['o1'])) { + $this->o1 = $vals['o1']; + } + if (isset($vals['o2'])) { + $this->o2 = $vals['o2']; + } + } + } + + public function getName() { + return 'ThriftHiveMetastore_get_foreign_keys_result'; + } + + public function read($input) + { + $xfer = 0; + $fname = null; + $ftype = 0; + $fid = 0; + $xfer += $input->readStructBegin($fname); + while (true) + { + $xfer += $input->readFieldBegin($fname, $ftype, $fid); + if ($ftype == TType::STOP) { + break; + } + switch ($fid) + { + case 0: + if ($ftype == TType::LST) { + $this->success = array(); + $_size936 = 0; + $_etype939 = 0; + $xfer += $input->readListBegin($_etype939, $_size936); + for ($_i940 = 0; $_i940 < $_size936; ++$_i940) + { + $elem941 = null; + $elem941 = new \metastore\SQLForeignKey(); + $xfer += $elem941->read($input); + $this->success []= $elem941; + } + $xfer += $input->readListEnd(); + } else { + $xfer += $input->skip($ftype); + } + break; + case 1: + if ($ftype == TType::STRUCT) { + $this->o1 = new \metastore\MetaException(); + $xfer += $this->o1->read($input); + } else { + $xfer += $input->skip($ftype); + } + break; + case 2: + if ($ftype == TType::STRUCT) { + $this->o2 = new \metastore\NoSuchObjectException(); + $xfer += $this->o2->read($input); + } else { + $xfer += $input->skip($ftype); + } + break; + default: + $xfer += $input->skip($ftype); + break; + } + $xfer += $input->readFieldEnd(); + } + $xfer += $input->readStructEnd(); + return $xfer; + } + + public function write($output) { + $xfer = 0; + $xfer += $output->writeStructBegin('ThriftHiveMetastore_get_foreign_keys_result'); + if ($this->success !== null) { + if (!is_array($this->success)) { + throw new TProtocolException('Bad type in structure.', TProtocolException::INVALID_DATA); + } + $xfer += $output->writeFieldBegin('success', TType::LST, 0); + { + $output->writeListBegin(TType::STRUCT, count($this->success)); + { + foreach ($this->success as $iter942) + { + $xfer += $iter942->write($output); + } + } + $output->writeListEnd(); + } + $xfer += $output->writeFieldEnd(); + } + if ($this->o1 !== null) { + $xfer += $output->writeFieldBegin('o1', TType::STRUCT, 1); + $xfer += $this->o1->write($output); + $xfer += $output->writeFieldEnd(); + } + if ($this->o2 !== null) { + $xfer += $output->writeFieldBegin('o2', TType::STRUCT, 2); + $xfer += $this->o2->write($output); + $xfer += $output->writeFieldEnd(); + } + $xfer += $output->writeFieldStop(); + $xfer += $output->writeStructEnd(); + return $xfer; + } + +} + class ThriftHiveMetastore_update_table_column_statistics_args { static $_TSPEC; @@ -33044,14 +33728,14 @@ class ThriftHiveMetastore_get_functions_result { case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size901 = 0; - $_etype904 = 0; - $xfer += $input->readListBegin($_etype904, $_size901); - for ($_i905 = 0; $_i905 < $_size901; ++$_i905) + $_size943 = 0; + $_etype946 = 0; + $xfer += $input->readListBegin($_etype946, $_size943); + for ($_i947 = 0; $_i947 < $_size943; ++$_i947) { - $elem906 = null; - $xfer += $input->readString($elem906); - $this->success []= $elem906; + $elem948 = null; + $xfer += $input->readString($elem948); + $this->success []= $elem948; } $xfer += $input->readListEnd(); } else { @@ -33087,9 +33771,9 @@ class ThriftHiveMetastore_get_functions_result { { $output->writeListBegin(TType::STRING, count($this->success)); { - foreach ($this->success as $iter907) + foreach ($this->success as $iter949) { - $xfer += $output->writeString($iter907); + $xfer += $output->writeString($iter949); } } $output->writeListEnd(); @@ -33958,14 +34642,14 @@ class ThriftHiveMetastore_get_role_names_result { case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size908 = 0; - $_etype911 = 0; - $xfer += $input->readListBegin($_etype911, $_size908); - for ($_i912 = 0; $_i912 < $_size908; ++$_i912) + $_size950 = 0; + $_etype953 = 0; + $xfer += $input->readListBegin($_etype953, $_size950); + for ($_i954 = 0; $_i954 < $_size950; ++$_i954) { - $elem913 = null; - $xfer += $input->readString($elem913); - $this->success []= $elem913; + $elem955 = null; + $xfer += $input->readString($elem955); + $this->success []= $elem955; } $xfer += $input->readListEnd(); } else { @@ -34001,9 +34685,9 @@ class ThriftHiveMetastore_get_role_names_result { { $output->writeListBegin(TType::STRING, count($this->success)); { - foreach ($this->success as $iter914) + foreach ($this->success as $iter956) { - $xfer += $output->writeString($iter914); + $xfer += $output->writeString($iter956); } } $output->writeListEnd(); @@ -34694,15 +35378,15 @@ class ThriftHiveMetastore_list_roles_result { case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size915 = 0; - $_etype918 = 0; - $xfer += $input->readListBegin($_etype918, $_size915); - for ($_i919 = 0; $_i919 < $_size915; ++$_i919) + $_size957 = 0; + $_etype960 = 0; + $xfer += $input->readListBegin($_etype960, $_size957); + for ($_i961 = 0; $_i961 < $_size957; ++$_i961) { - $elem920 = null; - $elem920 = new \metastore\Role(); - $xfer += $elem920->read($input); - $this->success []= $elem920; + $elem962 = null; + $elem962 = new \metastore\Role(); + $xfer += $elem962->read($input); + $this->success []= $elem962; } $xfer += $input->readListEnd(); } else { @@ -34738,9 +35422,9 @@ class ThriftHiveMetastore_list_roles_result { { $output->writeListBegin(TType::STRUCT, count($this->success)); { - foreach ($this->success as $iter921) + foreach ($this->success as $iter963) { - $xfer += $iter921->write($output); + $xfer += $iter963->write($output); } } $output->writeListEnd(); @@ -35402,14 +36086,14 @@ class ThriftHiveMetastore_get_privilege_set_args { case 3: if ($ftype == TType::LST) { $this->group_names = array(); - $_size922 = 0; - $_etype925 = 0; - $xfer += $input->readListBegin($_etype925, $_size922); - for ($_i926 = 0; $_i926 < $_size922; ++$_i926) + $_size964 = 0; + $_etype967 = 0; + $xfer += $input->readListBegin($_etype967, $_size964); + for ($_i968 = 0; $_i968 < $_size964; ++$_i968) { - $elem927 = null; - $xfer += $input->readString($elem927); - $this->group_names []= $elem927; + $elem969 = null; + $xfer += $input->readString($elem969); + $this->group_names []= $elem969; } $xfer += $input->readListEnd(); } else { @@ -35450,9 +36134,9 @@ class ThriftHiveMetastore_get_privilege_set_args { { $output->writeListBegin(TType::STRING, count($this->group_names)); { - foreach ($this->group_names as $iter928) + foreach ($this->group_names as $iter970) { - $xfer += $output->writeString($iter928); + $xfer += $output->writeString($iter970); } } $output->writeListEnd(); @@ -35760,15 +36444,15 @@ class ThriftHiveMetastore_list_privileges_result { case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size929 = 0; - $_etype932 = 0; - $xfer += $input->readListBegin($_etype932, $_size929); - for ($_i933 = 0; $_i933 < $_size929; ++$_i933) + $_size971 = 0; + $_etype974 = 0; + $xfer += $input->readListBegin($_etype974, $_size971); + for ($_i975 = 0; $_i975 < $_size971; ++$_i975) { - $elem934 = null; - $elem934 = new \metastore\HiveObjectPrivilege(); - $xfer += $elem934->read($input); - $this->success []= $elem934; + $elem976 = null; + $elem976 = new \metastore\HiveObjectPrivilege(); + $xfer += $elem976->read($input); + $this->success []= $elem976; } $xfer += $input->readListEnd(); } else { @@ -35804,9 +36488,9 @@ class ThriftHiveMetastore_list_privileges_result { { $output->writeListBegin(TType::STRUCT, count($this->success)); { - foreach ($this->success as $iter935) + foreach ($this->success as $iter977) { - $xfer += $iter935->write($output); + $xfer += $iter977->write($output); } } $output->writeListEnd(); @@ -36438,14 +37122,14 @@ class ThriftHiveMetastore_set_ugi_args { case 2: if ($ftype == TType::LST) { $this->group_names = array(); - $_size936 = 0; - $_etype939 = 0; - $xfer += $input->readListBegin($_etype939, $_size936); - for ($_i940 = 0; $_i940 < $_size936; ++$_i940) + $_size978 = 0; + $_etype981 = 0; + $xfer += $input->readListBegin($_etype981, $_size978); + for ($_i982 = 0; $_i982 < $_size978; ++$_i982) { - $elem941 = null; - $xfer += $input->readString($elem941); - $this->group_names []= $elem941; + $elem983 = null; + $xfer += $input->readString($elem983); + $this->group_names []= $elem983; } $xfer += $input->readListEnd(); } else { @@ -36478,9 +37162,9 @@ class ThriftHiveMetastore_set_ugi_args { { $output->writeListBegin(TType::STRING, count($this->group_names)); { - foreach ($this->group_names as $iter942) + foreach ($this->group_names as $iter984) { - $xfer += $output->writeString($iter942); + $xfer += $output->writeString($iter984); } } $output->writeListEnd(); @@ -36556,14 +37240,14 @@ class ThriftHiveMetastore_set_ugi_result { case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size943 = 0; - $_etype946 = 0; - $xfer += $input->readListBegin($_etype946, $_size943); - for ($_i947 = 0; $_i947 < $_size943; ++$_i947) + $_size985 = 0; + $_etype988 = 0; + $xfer += $input->readListBegin($_etype988, $_size985); + for ($_i989 = 0; $_i989 < $_size985; ++$_i989) { - $elem948 = null; - $xfer += $input->readString($elem948); - $this->success []= $elem948; + $elem990 = null; + $xfer += $input->readString($elem990); + $this->success []= $elem990; } $xfer += $input->readListEnd(); } else { @@ -36599,9 +37283,9 @@ class ThriftHiveMetastore_set_ugi_result { { $output->writeListBegin(TType::STRING, count($this->success)); { - foreach ($this->success as $iter949) + foreach ($this->success as $iter991) { - $xfer += $output->writeString($iter949); + $xfer += $output->writeString($iter991); } } $output->writeListEnd(); @@ -37718,14 +38402,14 @@ class ThriftHiveMetastore_get_all_token_identifiers_result { case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size950 = 0; - $_etype953 = 0; - $xfer += $input->readListBegin($_etype953, $_size950); - for ($_i954 = 0; $_i954 < $_size950; ++$_i954) + $_size992 = 0; + $_etype995 = 0; + $xfer += $input->readListBegin($_etype995, $_size992); + for ($_i996 = 0; $_i996 < $_size992; ++$_i996) { - $elem955 = null; - $xfer += $input->readString($elem955); - $this->success []= $elem955; + $elem997 = null; + $xfer += $input->readString($elem997); + $this->success []= $elem997; } $xfer += $input->readListEnd(); } else { @@ -37753,9 +38437,9 @@ class ThriftHiveMetastore_get_all_token_identifiers_result { { $output->writeListBegin(TType::STRING, count($this->success)); { - foreach ($this->success as $iter956) + foreach ($this->success as $iter998) { - $xfer += $output->writeString($iter956); + $xfer += $output->writeString($iter998); } } $output->writeListEnd(); @@ -38394,14 +39078,14 @@ class ThriftHiveMetastore_get_master_keys_result { case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size957 = 0; - $_etype960 = 0; - $xfer += $input->readListBegin($_etype960, $_size957); - for ($_i961 = 0; $_i961 < $_size957; ++$_i961) + $_size999 = 0; + $_etype1002 = 0; + $xfer += $input->readListBegin($_etype1002, $_size999); + for ($_i1003 = 0; $_i1003 < $_size999; ++$_i1003) { - $elem962 = null; - $xfer += $input->readString($elem962); - $this->success []= $elem962; + $elem1004 = null; + $xfer += $input->readString($elem1004); + $this->success []= $elem1004; } $xfer += $input->readListEnd(); } else { @@ -38429,9 +39113,9 @@ class ThriftHiveMetastore_get_master_keys_result { { $output->writeListBegin(TType::STRING, count($this->success)); { - foreach ($this->success as $iter963) + foreach ($this->success as $iter1005) { - $xfer += $output->writeString($iter963); + $xfer += $output->writeString($iter1005); } } $output->writeListEnd(); diff --git a/metastore/src/gen/thrift/gen-php/metastore/Types.php b/metastore/src/gen/thrift/gen-php/metastore/Types.php index 488a920..2846ccb 100644 --- a/metastore/src/gen/thrift/gen-php/metastore/Types.php +++ b/metastore/src/gen/thrift/gen-php/metastore/Types.php @@ -369,6 +369,701 @@ class FieldSchema { } +class ForeignKeyRel { + static $_TSPEC; + + /** + * @var \metastore\FieldSchema[] + */ + public $foreignCols = null; + /** + * @var \metastore\FieldSchema[] + */ + public $parentCols = null; + /** + * @var string + */ + public $foreignKeyName = null; + /** + * @var string + */ + public $parentTableName = null; + /** + * @var string + */ + public $parentTableSchema = null; + + public function __construct($vals=null) { + if (!isset(self::$_TSPEC)) { + self::$_TSPEC = array( + 1 => array( + 'var' => 'foreignCols', + 'type' => TType::LST, + 'etype' => TType::STRUCT, + 'elem' => array( + 'type' => TType::STRUCT, + 'class' => '\metastore\FieldSchema', + ), + ), + 2 => array( + 'var' => 'parentCols', + 'type' => TType::LST, + 'etype' => TType::STRUCT, + 'elem' => array( + 'type' => TType::STRUCT, + 'class' => '\metastore\FieldSchema', + ), + ), + 3 => array( + 'var' => 'foreignKeyName', + 'type' => TType::STRING, + ), + 4 => array( + 'var' => 'parentTableName', + 'type' => TType::STRING, + ), + 5 => array( + 'var' => 'parentTableSchema', + 'type' => TType::STRING, + ), + ); + } + if (is_array($vals)) { + if (isset($vals['foreignCols'])) { + $this->foreignCols = $vals['foreignCols']; + } + if (isset($vals['parentCols'])) { + $this->parentCols = $vals['parentCols']; + } + if (isset($vals['foreignKeyName'])) { + $this->foreignKeyName = $vals['foreignKeyName']; + } + if (isset($vals['parentTableName'])) { + $this->parentTableName = $vals['parentTableName']; + } + if (isset($vals['parentTableSchema'])) { + $this->parentTableSchema = $vals['parentTableSchema']; + } + } + } + + public function getName() { + return 'ForeignKeyRel'; + } + + public function read($input) + { + $xfer = 0; + $fname = null; + $ftype = 0; + $fid = 0; + $xfer += $input->readStructBegin($fname); + while (true) + { + $xfer += $input->readFieldBegin($fname, $ftype, $fid); + if ($ftype == TType::STOP) { + break; + } + switch ($fid) + { + case 1: + if ($ftype == TType::LST) { + $this->foreignCols = array(); + $_size0 = 0; + $_etype3 = 0; + $xfer += $input->readListBegin($_etype3, $_size0); + for ($_i4 = 0; $_i4 < $_size0; ++$_i4) + { + $elem5 = null; + $elem5 = new \metastore\FieldSchema(); + $xfer += $elem5->read($input); + $this->foreignCols []= $elem5; + } + $xfer += $input->readListEnd(); + } else { + $xfer += $input->skip($ftype); + } + break; + case 2: + if ($ftype == TType::LST) { + $this->parentCols = array(); + $_size6 = 0; + $_etype9 = 0; + $xfer += $input->readListBegin($_etype9, $_size6); + for ($_i10 = 0; $_i10 < $_size6; ++$_i10) + { + $elem11 = null; + $elem11 = new \metastore\FieldSchema(); + $xfer += $elem11->read($input); + $this->parentCols []= $elem11; + } + $xfer += $input->readListEnd(); + } else { + $xfer += $input->skip($ftype); + } + break; + case 3: + if ($ftype == TType::STRING) { + $xfer += $input->readString($this->foreignKeyName); + } else { + $xfer += $input->skip($ftype); + } + break; + case 4: + if ($ftype == TType::STRING) { + $xfer += $input->readString($this->parentTableName); + } else { + $xfer += $input->skip($ftype); + } + break; + case 5: + if ($ftype == TType::STRING) { + $xfer += $input->readString($this->parentTableSchema); + } else { + $xfer += $input->skip($ftype); + } + break; + default: + $xfer += $input->skip($ftype); + break; + } + $xfer += $input->readFieldEnd(); + } + $xfer += $input->readStructEnd(); + return $xfer; + } + + public function write($output) { + $xfer = 0; + $xfer += $output->writeStructBegin('ForeignKeyRel'); + if ($this->foreignCols !== null) { + if (!is_array($this->foreignCols)) { + throw new TProtocolException('Bad type in structure.', TProtocolException::INVALID_DATA); + } + $xfer += $output->writeFieldBegin('foreignCols', TType::LST, 1); + { + $output->writeListBegin(TType::STRUCT, count($this->foreignCols)); + { + foreach ($this->foreignCols as $iter12) + { + $xfer += $iter12->write($output); + } + } + $output->writeListEnd(); + } + $xfer += $output->writeFieldEnd(); + } + if ($this->parentCols !== null) { + if (!is_array($this->parentCols)) { + throw new TProtocolException('Bad type in structure.', TProtocolException::INVALID_DATA); + } + $xfer += $output->writeFieldBegin('parentCols', TType::LST, 2); + { + $output->writeListBegin(TType::STRUCT, count($this->parentCols)); + { + foreach ($this->parentCols as $iter13) + { + $xfer += $iter13->write($output); + } + } + $output->writeListEnd(); + } + $xfer += $output->writeFieldEnd(); + } + if ($this->foreignKeyName !== null) { + $xfer += $output->writeFieldBegin('foreignKeyName', TType::STRING, 3); + $xfer += $output->writeString($this->foreignKeyName); + $xfer += $output->writeFieldEnd(); + } + if ($this->parentTableName !== null) { + $xfer += $output->writeFieldBegin('parentTableName', TType::STRING, 4); + $xfer += $output->writeString($this->parentTableName); + $xfer += $output->writeFieldEnd(); + } + if ($this->parentTableSchema !== null) { + $xfer += $output->writeFieldBegin('parentTableSchema', TType::STRING, 5); + $xfer += $output->writeString($this->parentTableSchema); + $xfer += $output->writeFieldEnd(); + } + $xfer += $output->writeFieldStop(); + $xfer += $output->writeStructEnd(); + return $xfer; + } + +} + +class SQLPrimaryKey { + static $_TSPEC; + + /** + * @var string + */ + public $table_schem = null; + /** + * @var string + */ + public $table_name = null; + /** + * @var string + */ + public $column_name = null; + /** + * @var int + */ + public $key_seq = null; + /** + * @var string + */ + public $pk_name = null; + + public function __construct($vals=null) { + if (!isset(self::$_TSPEC)) { + self::$_TSPEC = array( + 1 => array( + 'var' => 'table_schem', + 'type' => TType::STRING, + ), + 2 => array( + 'var' => 'table_name', + 'type' => TType::STRING, + ), + 3 => array( + 'var' => 'column_name', + 'type' => TType::STRING, + ), + 4 => array( + 'var' => 'key_seq', + 'type' => TType::I32, + ), + 5 => array( + 'var' => 'pk_name', + 'type' => TType::STRING, + ), + ); + } + if (is_array($vals)) { + if (isset($vals['table_schem'])) { + $this->table_schem = $vals['table_schem']; + } + if (isset($vals['table_name'])) { + $this->table_name = $vals['table_name']; + } + if (isset($vals['column_name'])) { + $this->column_name = $vals['column_name']; + } + if (isset($vals['key_seq'])) { + $this->key_seq = $vals['key_seq']; + } + if (isset($vals['pk_name'])) { + $this->pk_name = $vals['pk_name']; + } + } + } + + public function getName() { + return 'SQLPrimaryKey'; + } + + public function read($input) + { + $xfer = 0; + $fname = null; + $ftype = 0; + $fid = 0; + $xfer += $input->readStructBegin($fname); + while (true) + { + $xfer += $input->readFieldBegin($fname, $ftype, $fid); + if ($ftype == TType::STOP) { + break; + } + switch ($fid) + { + case 1: + if ($ftype == TType::STRING) { + $xfer += $input->readString($this->table_schem); + } else { + $xfer += $input->skip($ftype); + } + break; + case 2: + if ($ftype == TType::STRING) { + $xfer += $input->readString($this->table_name); + } else { + $xfer += $input->skip($ftype); + } + break; + case 3: + if ($ftype == TType::STRING) { + $xfer += $input->readString($this->column_name); + } else { + $xfer += $input->skip($ftype); + } + break; + case 4: + if ($ftype == TType::I32) { + $xfer += $input->readI32($this->key_seq); + } else { + $xfer += $input->skip($ftype); + } + break; + case 5: + if ($ftype == TType::STRING) { + $xfer += $input->readString($this->pk_name); + } else { + $xfer += $input->skip($ftype); + } + break; + default: + $xfer += $input->skip($ftype); + break; + } + $xfer += $input->readFieldEnd(); + } + $xfer += $input->readStructEnd(); + return $xfer; + } + + public function write($output) { + $xfer = 0; + $xfer += $output->writeStructBegin('SQLPrimaryKey'); + if ($this->table_schem !== null) { + $xfer += $output->writeFieldBegin('table_schem', TType::STRING, 1); + $xfer += $output->writeString($this->table_schem); + $xfer += $output->writeFieldEnd(); + } + if ($this->table_name !== null) { + $xfer += $output->writeFieldBegin('table_name', TType::STRING, 2); + $xfer += $output->writeString($this->table_name); + $xfer += $output->writeFieldEnd(); + } + if ($this->column_name !== null) { + $xfer += $output->writeFieldBegin('column_name', TType::STRING, 3); + $xfer += $output->writeString($this->column_name); + $xfer += $output->writeFieldEnd(); + } + if ($this->key_seq !== null) { + $xfer += $output->writeFieldBegin('key_seq', TType::I32, 4); + $xfer += $output->writeI32($this->key_seq); + $xfer += $output->writeFieldEnd(); + } + if ($this->pk_name !== null) { + $xfer += $output->writeFieldBegin('pk_name', TType::STRING, 5); + $xfer += $output->writeString($this->pk_name); + $xfer += $output->writeFieldEnd(); + } + $xfer += $output->writeFieldStop(); + $xfer += $output->writeStructEnd(); + return $xfer; + } + +} + +class SQLForeignKey { + static $_TSPEC; + + /** + * @var string + */ + public $pktable_schem = null; + /** + * @var string + */ + public $pktable_name = null; + /** + * @var string + */ + public $pkcolumn_name = null; + /** + * @var string + */ + public $fktable_schem = null; + /** + * @var string + */ + public $fktable_name = null; + /** + * @var string + */ + public $fkcolumn_name = null; + /** + * @var int + */ + public $key_seq = null; + /** + * @var int + */ + public $update_rule = null; + /** + * @var int + */ + public $delete_rule = null; + /** + * @var string + */ + public $fk_name = null; + /** + * @var string + */ + public $pk_name = null; + + public function __construct($vals=null) { + if (!isset(self::$_TSPEC)) { + self::$_TSPEC = array( + 1 => array( + 'var' => 'pktable_schem', + 'type' => TType::STRING, + ), + 2 => array( + 'var' => 'pktable_name', + 'type' => TType::STRING, + ), + 3 => array( + 'var' => 'pkcolumn_name', + 'type' => TType::STRING, + ), + 4 => array( + 'var' => 'fktable_schem', + 'type' => TType::STRING, + ), + 5 => array( + 'var' => 'fktable_name', + 'type' => TType::STRING, + ), + 6 => array( + 'var' => 'fkcolumn_name', + 'type' => TType::STRING, + ), + 7 => array( + 'var' => 'key_seq', + 'type' => TType::I32, + ), + 8 => array( + 'var' => 'update_rule', + 'type' => TType::I32, + ), + 9 => array( + 'var' => 'delete_rule', + 'type' => TType::I32, + ), + 10 => array( + 'var' => 'fk_name', + 'type' => TType::STRING, + ), + 11 => array( + 'var' => 'pk_name', + 'type' => TType::STRING, + ), + ); + } + if (is_array($vals)) { + if (isset($vals['pktable_schem'])) { + $this->pktable_schem = $vals['pktable_schem']; + } + if (isset($vals['pktable_name'])) { + $this->pktable_name = $vals['pktable_name']; + } + if (isset($vals['pkcolumn_name'])) { + $this->pkcolumn_name = $vals['pkcolumn_name']; + } + if (isset($vals['fktable_schem'])) { + $this->fktable_schem = $vals['fktable_schem']; + } + if (isset($vals['fktable_name'])) { + $this->fktable_name = $vals['fktable_name']; + } + if (isset($vals['fkcolumn_name'])) { + $this->fkcolumn_name = $vals['fkcolumn_name']; + } + if (isset($vals['key_seq'])) { + $this->key_seq = $vals['key_seq']; + } + if (isset($vals['update_rule'])) { + $this->update_rule = $vals['update_rule']; + } + if (isset($vals['delete_rule'])) { + $this->delete_rule = $vals['delete_rule']; + } + if (isset($vals['fk_name'])) { + $this->fk_name = $vals['fk_name']; + } + if (isset($vals['pk_name'])) { + $this->pk_name = $vals['pk_name']; + } + } + } + + public function getName() { + return 'SQLForeignKey'; + } + + public function read($input) + { + $xfer = 0; + $fname = null; + $ftype = 0; + $fid = 0; + $xfer += $input->readStructBegin($fname); + while (true) + { + $xfer += $input->readFieldBegin($fname, $ftype, $fid); + if ($ftype == TType::STOP) { + break; + } + switch ($fid) + { + case 1: + if ($ftype == TType::STRING) { + $xfer += $input->readString($this->pktable_schem); + } else { + $xfer += $input->skip($ftype); + } + break; + case 2: + if ($ftype == TType::STRING) { + $xfer += $input->readString($this->pktable_name); + } else { + $xfer += $input->skip($ftype); + } + break; + case 3: + if ($ftype == TType::STRING) { + $xfer += $input->readString($this->pkcolumn_name); + } else { + $xfer += $input->skip($ftype); + } + break; + case 4: + if ($ftype == TType::STRING) { + $xfer += $input->readString($this->fktable_schem); + } else { + $xfer += $input->skip($ftype); + } + break; + case 5: + if ($ftype == TType::STRING) { + $xfer += $input->readString($this->fktable_name); + } else { + $xfer += $input->skip($ftype); + } + break; + case 6: + if ($ftype == TType::STRING) { + $xfer += $input->readString($this->fkcolumn_name); + } else { + $xfer += $input->skip($ftype); + } + break; + case 7: + if ($ftype == TType::I32) { + $xfer += $input->readI32($this->key_seq); + } else { + $xfer += $input->skip($ftype); + } + break; + case 8: + if ($ftype == TType::I32) { + $xfer += $input->readI32($this->update_rule); + } else { + $xfer += $input->skip($ftype); + } + break; + case 9: + if ($ftype == TType::I32) { + $xfer += $input->readI32($this->delete_rule); + } else { + $xfer += $input->skip($ftype); + } + break; + case 10: + if ($ftype == TType::STRING) { + $xfer += $input->readString($this->fk_name); + } else { + $xfer += $input->skip($ftype); + } + break; + case 11: + if ($ftype == TType::STRING) { + $xfer += $input->readString($this->pk_name); + } else { + $xfer += $input->skip($ftype); + } + break; + default: + $xfer += $input->skip($ftype); + break; + } + $xfer += $input->readFieldEnd(); + } + $xfer += $input->readStructEnd(); + return $xfer; + } + + public function write($output) { + $xfer = 0; + $xfer += $output->writeStructBegin('SQLForeignKey'); + if ($this->pktable_schem !== null) { + $xfer += $output->writeFieldBegin('pktable_schem', TType::STRING, 1); + $xfer += $output->writeString($this->pktable_schem); + $xfer += $output->writeFieldEnd(); + } + if ($this->pktable_name !== null) { + $xfer += $output->writeFieldBegin('pktable_name', TType::STRING, 2); + $xfer += $output->writeString($this->pktable_name); + $xfer += $output->writeFieldEnd(); + } + if ($this->pkcolumn_name !== null) { + $xfer += $output->writeFieldBegin('pkcolumn_name', TType::STRING, 3); + $xfer += $output->writeString($this->pkcolumn_name); + $xfer += $output->writeFieldEnd(); + } + if ($this->fktable_schem !== null) { + $xfer += $output->writeFieldBegin('fktable_schem', TType::STRING, 4); + $xfer += $output->writeString($this->fktable_schem); + $xfer += $output->writeFieldEnd(); + } + if ($this->fktable_name !== null) { + $xfer += $output->writeFieldBegin('fktable_name', TType::STRING, 5); + $xfer += $output->writeString($this->fktable_name); + $xfer += $output->writeFieldEnd(); + } + if ($this->fkcolumn_name !== null) { + $xfer += $output->writeFieldBegin('fkcolumn_name', TType::STRING, 6); + $xfer += $output->writeString($this->fkcolumn_name); + $xfer += $output->writeFieldEnd(); + } + if ($this->key_seq !== null) { + $xfer += $output->writeFieldBegin('key_seq', TType::I32, 7); + $xfer += $output->writeI32($this->key_seq); + $xfer += $output->writeFieldEnd(); + } + if ($this->update_rule !== null) { + $xfer += $output->writeFieldBegin('update_rule', TType::I32, 8); + $xfer += $output->writeI32($this->update_rule); + $xfer += $output->writeFieldEnd(); + } + if ($this->delete_rule !== null) { + $xfer += $output->writeFieldBegin('delete_rule', TType::I32, 9); + $xfer += $output->writeI32($this->delete_rule); + $xfer += $output->writeFieldEnd(); + } + if ($this->fk_name !== null) { + $xfer += $output->writeFieldBegin('fk_name', TType::STRING, 10); + $xfer += $output->writeString($this->fk_name); + $xfer += $output->writeFieldEnd(); + } + if ($this->pk_name !== null) { + $xfer += $output->writeFieldBegin('pk_name', TType::STRING, 11); + $xfer += $output->writeString($this->pk_name); + $xfer += $output->writeFieldEnd(); + } + $xfer += $output->writeFieldStop(); + $xfer += $output->writeStructEnd(); + return $xfer; + } + +} + class Type { static $_TSPEC; @@ -474,15 +1169,15 @@ class Type { case 4: if ($ftype == TType::LST) { $this->fields = array(); - $_size0 = 0; - $_etype3 = 0; - $xfer += $input->readListBegin($_etype3, $_size0); - for ($_i4 = 0; $_i4 < $_size0; ++$_i4) + $_size14 = 0; + $_etype17 = 0; + $xfer += $input->readListBegin($_etype17, $_size14); + for ($_i18 = 0; $_i18 < $_size14; ++$_i18) { - $elem5 = null; - $elem5 = new \metastore\FieldSchema(); - $xfer += $elem5->read($input); - $this->fields []= $elem5; + $elem19 = null; + $elem19 = new \metastore\FieldSchema(); + $xfer += $elem19->read($input); + $this->fields []= $elem19; } $xfer += $input->readListEnd(); } else { @@ -525,9 +1220,9 @@ class Type { { $output->writeListBegin(TType::STRUCT, count($this->fields)); { - foreach ($this->fields as $iter6) + foreach ($this->fields as $iter20) { - $xfer += $iter6->write($output); + $xfer += $iter20->write($output); } } $output->writeListEnd(); @@ -656,14 +1351,14 @@ class HiveObjectRef { case 4: if ($ftype == TType::LST) { $this->partValues = array(); - $_size7 = 0; - $_etype10 = 0; - $xfer += $input->readListBegin($_etype10, $_size7); - for ($_i11 = 0; $_i11 < $_size7; ++$_i11) + $_size21 = 0; + $_etype24 = 0; + $xfer += $input->readListBegin($_etype24, $_size21); + for ($_i25 = 0; $_i25 < $_size21; ++$_i25) { - $elem12 = null; - $xfer += $input->readString($elem12); - $this->partValues []= $elem12; + $elem26 = null; + $xfer += $input->readString($elem26); + $this->partValues []= $elem26; } $xfer += $input->readListEnd(); } else { @@ -713,9 +1408,9 @@ class HiveObjectRef { { $output->writeListBegin(TType::STRING, count($this->partValues)); { - foreach ($this->partValues as $iter13) + foreach ($this->partValues as $iter27) { - $xfer += $output->writeString($iter13); + $xfer += $output->writeString($iter27); } } $output->writeListEnd(); @@ -1106,15 +1801,15 @@ class PrivilegeBag { case 1: if ($ftype == TType::LST) { $this->privileges = array(); - $_size14 = 0; - $_etype17 = 0; - $xfer += $input->readListBegin($_etype17, $_size14); - for ($_i18 = 0; $_i18 < $_size14; ++$_i18) + $_size28 = 0; + $_etype31 = 0; + $xfer += $input->readListBegin($_etype31, $_size28); + for ($_i32 = 0; $_i32 < $_size28; ++$_i32) { - $elem19 = null; - $elem19 = new \metastore\HiveObjectPrivilege(); - $xfer += $elem19->read($input); - $this->privileges []= $elem19; + $elem33 = null; + $elem33 = new \metastore\HiveObjectPrivilege(); + $xfer += $elem33->read($input); + $this->privileges []= $elem33; } $xfer += $input->readListEnd(); } else { @@ -1142,9 +1837,9 @@ class PrivilegeBag { { $output->writeListBegin(TType::STRUCT, count($this->privileges)); { - foreach ($this->privileges as $iter20) + foreach ($this->privileges as $iter34) { - $xfer += $iter20->write($output); + $xfer += $iter34->write($output); } } $output->writeListEnd(); @@ -1265,28 +1960,28 @@ class PrincipalPrivilegeSet { case 1: if ($ftype == TType::MAP) { $this->userPrivileges = array(); - $_size21 = 0; - $_ktype22 = 0; - $_vtype23 = 0; - $xfer += $input->readMapBegin($_ktype22, $_vtype23, $_size21); - for ($_i25 = 0; $_i25 < $_size21; ++$_i25) + $_size35 = 0; + $_ktype36 = 0; + $_vtype37 = 0; + $xfer += $input->readMapBegin($_ktype36, $_vtype37, $_size35); + for ($_i39 = 0; $_i39 < $_size35; ++$_i39) { - $key26 = ''; - $val27 = array(); - $xfer += $input->readString($key26); - $val27 = array(); - $_size28 = 0; - $_etype31 = 0; - $xfer += $input->readListBegin($_etype31, $_size28); - for ($_i32 = 0; $_i32 < $_size28; ++$_i32) + $key40 = ''; + $val41 = array(); + $xfer += $input->readString($key40); + $val41 = array(); + $_size42 = 0; + $_etype45 = 0; + $xfer += $input->readListBegin($_etype45, $_size42); + for ($_i46 = 0; $_i46 < $_size42; ++$_i46) { - $elem33 = null; - $elem33 = new \metastore\PrivilegeGrantInfo(); - $xfer += $elem33->read($input); - $val27 []= $elem33; + $elem47 = null; + $elem47 = new \metastore\PrivilegeGrantInfo(); + $xfer += $elem47->read($input); + $val41 []= $elem47; } $xfer += $input->readListEnd(); - $this->userPrivileges[$key26] = $val27; + $this->userPrivileges[$key40] = $val41; } $xfer += $input->readMapEnd(); } else { @@ -1296,28 +1991,28 @@ class PrincipalPrivilegeSet { case 2: if ($ftype == TType::MAP) { $this->groupPrivileges = array(); - $_size34 = 0; - $_ktype35 = 0; - $_vtype36 = 0; - $xfer += $input->readMapBegin($_ktype35, $_vtype36, $_size34); - for ($_i38 = 0; $_i38 < $_size34; ++$_i38) + $_size48 = 0; + $_ktype49 = 0; + $_vtype50 = 0; + $xfer += $input->readMapBegin($_ktype49, $_vtype50, $_size48); + for ($_i52 = 0; $_i52 < $_size48; ++$_i52) { - $key39 = ''; - $val40 = array(); - $xfer += $input->readString($key39); - $val40 = array(); - $_size41 = 0; - $_etype44 = 0; - $xfer += $input->readListBegin($_etype44, $_size41); - for ($_i45 = 0; $_i45 < $_size41; ++$_i45) + $key53 = ''; + $val54 = array(); + $xfer += $input->readString($key53); + $val54 = array(); + $_size55 = 0; + $_etype58 = 0; + $xfer += $input->readListBegin($_etype58, $_size55); + for ($_i59 = 0; $_i59 < $_size55; ++$_i59) { - $elem46 = null; - $elem46 = new \metastore\PrivilegeGrantInfo(); - $xfer += $elem46->read($input); - $val40 []= $elem46; + $elem60 = null; + $elem60 = new \metastore\PrivilegeGrantInfo(); + $xfer += $elem60->read($input); + $val54 []= $elem60; } $xfer += $input->readListEnd(); - $this->groupPrivileges[$key39] = $val40; + $this->groupPrivileges[$key53] = $val54; } $xfer += $input->readMapEnd(); } else { @@ -1327,28 +2022,28 @@ class PrincipalPrivilegeSet { case 3: if ($ftype == TType::MAP) { $this->rolePrivileges = array(); - $_size47 = 0; - $_ktype48 = 0; - $_vtype49 = 0; - $xfer += $input->readMapBegin($_ktype48, $_vtype49, $_size47); - for ($_i51 = 0; $_i51 < $_size47; ++$_i51) + $_size61 = 0; + $_ktype62 = 0; + $_vtype63 = 0; + $xfer += $input->readMapBegin($_ktype62, $_vtype63, $_size61); + for ($_i65 = 0; $_i65 < $_size61; ++$_i65) { - $key52 = ''; - $val53 = array(); - $xfer += $input->readString($key52); - $val53 = array(); - $_size54 = 0; - $_etype57 = 0; - $xfer += $input->readListBegin($_etype57, $_size54); - for ($_i58 = 0; $_i58 < $_size54; ++$_i58) + $key66 = ''; + $val67 = array(); + $xfer += $input->readString($key66); + $val67 = array(); + $_size68 = 0; + $_etype71 = 0; + $xfer += $input->readListBegin($_etype71, $_size68); + for ($_i72 = 0; $_i72 < $_size68; ++$_i72) { - $elem59 = null; - $elem59 = new \metastore\PrivilegeGrantInfo(); - $xfer += $elem59->read($input); - $val53 []= $elem59; + $elem73 = null; + $elem73 = new \metastore\PrivilegeGrantInfo(); + $xfer += $elem73->read($input); + $val67 []= $elem73; } $xfer += $input->readListEnd(); - $this->rolePrivileges[$key52] = $val53; + $this->rolePrivileges[$key66] = $val67; } $xfer += $input->readMapEnd(); } else { @@ -1376,15 +2071,15 @@ class PrincipalPrivilegeSet { { $output->writeMapBegin(TType::STRING, TType::LST, count($this->userPrivileges)); { - foreach ($this->userPrivileges as $kiter60 => $viter61) + foreach ($this->userPrivileges as $kiter74 => $viter75) { - $xfer += $output->writeString($kiter60); + $xfer += $output->writeString($kiter74); { - $output->writeListBegin(TType::STRUCT, count($viter61)); + $output->writeListBegin(TType::STRUCT, count($viter75)); { - foreach ($viter61 as $iter62) + foreach ($viter75 as $iter76) { - $xfer += $iter62->write($output); + $xfer += $iter76->write($output); } } $output->writeListEnd(); @@ -1403,15 +2098,15 @@ class PrincipalPrivilegeSet { { $output->writeMapBegin(TType::STRING, TType::LST, count($this->groupPrivileges)); { - foreach ($this->groupPrivileges as $kiter63 => $viter64) + foreach ($this->groupPrivileges as $kiter77 => $viter78) { - $xfer += $output->writeString($kiter63); + $xfer += $output->writeString($kiter77); { - $output->writeListBegin(TType::STRUCT, count($viter64)); + $output->writeListBegin(TType::STRUCT, count($viter78)); { - foreach ($viter64 as $iter65) + foreach ($viter78 as $iter79) { - $xfer += $iter65->write($output); + $xfer += $iter79->write($output); } } $output->writeListEnd(); @@ -1430,15 +2125,15 @@ class PrincipalPrivilegeSet { { $output->writeMapBegin(TType::STRING, TType::LST, count($this->rolePrivileges)); { - foreach ($this->rolePrivileges as $kiter66 => $viter67) + foreach ($this->rolePrivileges as $kiter80 => $viter81) { - $xfer += $output->writeString($kiter66); + $xfer += $output->writeString($kiter80); { - $output->writeListBegin(TType::STRUCT, count($viter67)); + $output->writeListBegin(TType::STRUCT, count($viter81)); { - foreach ($viter67 as $iter68) + foreach ($viter81 as $iter82) { - $xfer += $iter68->write($output); + $xfer += $iter82->write($output); } } $output->writeListEnd(); @@ -2140,15 +2835,15 @@ class GetRoleGrantsForPrincipalResponse { case 1: if ($ftype == TType::LST) { $this->principalGrants = array(); - $_size69 = 0; - $_etype72 = 0; - $xfer += $input->readListBegin($_etype72, $_size69); - for ($_i73 = 0; $_i73 < $_size69; ++$_i73) + $_size83 = 0; + $_etype86 = 0; + $xfer += $input->readListBegin($_etype86, $_size83); + for ($_i87 = 0; $_i87 < $_size83; ++$_i87) { - $elem74 = null; - $elem74 = new \metastore\RolePrincipalGrant(); - $xfer += $elem74->read($input); - $this->principalGrants []= $elem74; + $elem88 = null; + $elem88 = new \metastore\RolePrincipalGrant(); + $xfer += $elem88->read($input); + $this->principalGrants []= $elem88; } $xfer += $input->readListEnd(); } else { @@ -2176,9 +2871,9 @@ class GetRoleGrantsForPrincipalResponse { { $output->writeListBegin(TType::STRUCT, count($this->principalGrants)); { - foreach ($this->principalGrants as $iter75) + foreach ($this->principalGrants as $iter89) { - $xfer += $iter75->write($output); + $xfer += $iter89->write($output); } } $output->writeListEnd(); @@ -2318,15 +3013,15 @@ class GetPrincipalsInRoleResponse { case 1: if ($ftype == TType::LST) { $this->principalGrants = array(); - $_size76 = 0; - $_etype79 = 0; - $xfer += $input->readListBegin($_etype79, $_size76); - for ($_i80 = 0; $_i80 < $_size76; ++$_i80) + $_size90 = 0; + $_etype93 = 0; + $xfer += $input->readListBegin($_etype93, $_size90); + for ($_i94 = 0; $_i94 < $_size90; ++$_i94) { - $elem81 = null; - $elem81 = new \metastore\RolePrincipalGrant(); - $xfer += $elem81->read($input); - $this->principalGrants []= $elem81; + $elem95 = null; + $elem95 = new \metastore\RolePrincipalGrant(); + $xfer += $elem95->read($input); + $this->principalGrants []= $elem95; } $xfer += $input->readListEnd(); } else { @@ -2354,9 +3049,9 @@ class GetPrincipalsInRoleResponse { { $output->writeListBegin(TType::STRUCT, count($this->principalGrants)); { - foreach ($this->principalGrants as $iter82) + foreach ($this->principalGrants as $iter96) { - $xfer += $iter82->write($output); + $xfer += $iter96->write($output); } } $output->writeListEnd(); @@ -2800,17 +3495,17 @@ class Database { case 4: if ($ftype == TType::MAP) { $this->parameters = array(); - $_size83 = 0; - $_ktype84 = 0; - $_vtype85 = 0; - $xfer += $input->readMapBegin($_ktype84, $_vtype85, $_size83); - for ($_i87 = 0; $_i87 < $_size83; ++$_i87) + $_size97 = 0; + $_ktype98 = 0; + $_vtype99 = 0; + $xfer += $input->readMapBegin($_ktype98, $_vtype99, $_size97); + for ($_i101 = 0; $_i101 < $_size97; ++$_i101) { - $key88 = ''; - $val89 = ''; - $xfer += $input->readString($key88); - $xfer += $input->readString($val89); - $this->parameters[$key88] = $val89; + $key102 = ''; + $val103 = ''; + $xfer += $input->readString($key102); + $xfer += $input->readString($val103); + $this->parameters[$key102] = $val103; } $xfer += $input->readMapEnd(); } else { @@ -2875,10 +3570,10 @@ class Database { { $output->writeMapBegin(TType::STRING, TType::STRING, count($this->parameters)); { - foreach ($this->parameters as $kiter90 => $viter91) + foreach ($this->parameters as $kiter104 => $viter105) { - $xfer += $output->writeString($kiter90); - $xfer += $output->writeString($viter91); + $xfer += $output->writeString($kiter104); + $xfer += $output->writeString($viter105); } } $output->writeMapEnd(); @@ -3000,17 +3695,17 @@ class SerDeInfo { case 3: if ($ftype == TType::MAP) { $this->parameters = array(); - $_size92 = 0; - $_ktype93 = 0; - $_vtype94 = 0; - $xfer += $input->readMapBegin($_ktype93, $_vtype94, $_size92); - for ($_i96 = 0; $_i96 < $_size92; ++$_i96) + $_size106 = 0; + $_ktype107 = 0; + $_vtype108 = 0; + $xfer += $input->readMapBegin($_ktype107, $_vtype108, $_size106); + for ($_i110 = 0; $_i110 < $_size106; ++$_i110) { - $key97 = ''; - $val98 = ''; - $xfer += $input->readString($key97); - $xfer += $input->readString($val98); - $this->parameters[$key97] = $val98; + $key111 = ''; + $val112 = ''; + $xfer += $input->readString($key111); + $xfer += $input->readString($val112); + $this->parameters[$key111] = $val112; } $xfer += $input->readMapEnd(); } else { @@ -3048,10 +3743,10 @@ class SerDeInfo { { $output->writeMapBegin(TType::STRING, TType::STRING, count($this->parameters)); { - foreach ($this->parameters as $kiter99 => $viter100) + foreach ($this->parameters as $kiter113 => $viter114) { - $xfer += $output->writeString($kiter99); - $xfer += $output->writeString($viter100); + $xfer += $output->writeString($kiter113); + $xfer += $output->writeString($viter114); } } $output->writeMapEnd(); @@ -3255,14 +3950,14 @@ class SkewedInfo { case 1: if ($ftype == TType::LST) { $this->skewedColNames = array(); - $_size101 = 0; - $_etype104 = 0; - $xfer += $input->readListBegin($_etype104, $_size101); - for ($_i105 = 0; $_i105 < $_size101; ++$_i105) + $_size115 = 0; + $_etype118 = 0; + $xfer += $input->readListBegin($_etype118, $_size115); + for ($_i119 = 0; $_i119 < $_size115; ++$_i119) { - $elem106 = null; - $xfer += $input->readString($elem106); - $this->skewedColNames []= $elem106; + $elem120 = null; + $xfer += $input->readString($elem120); + $this->skewedColNames []= $elem120; } $xfer += $input->readListEnd(); } else { @@ -3272,24 +3967,24 @@ class SkewedInfo { case 2: if ($ftype == TType::LST) { $this->skewedColValues = array(); - $_size107 = 0; - $_etype110 = 0; - $xfer += $input->readListBegin($_etype110, $_size107); - for ($_i111 = 0; $_i111 < $_size107; ++$_i111) + $_size121 = 0; + $_etype124 = 0; + $xfer += $input->readListBegin($_etype124, $_size121); + for ($_i125 = 0; $_i125 < $_size121; ++$_i125) { - $elem112 = null; - $elem112 = array(); - $_size113 = 0; - $_etype116 = 0; - $xfer += $input->readListBegin($_etype116, $_size113); - for ($_i117 = 0; $_i117 < $_size113; ++$_i117) + $elem126 = null; + $elem126 = array(); + $_size127 = 0; + $_etype130 = 0; + $xfer += $input->readListBegin($_etype130, $_size127); + for ($_i131 = 0; $_i131 < $_size127; ++$_i131) { - $elem118 = null; - $xfer += $input->readString($elem118); - $elem112 []= $elem118; + $elem132 = null; + $xfer += $input->readString($elem132); + $elem126 []= $elem132; } $xfer += $input->readListEnd(); - $this->skewedColValues []= $elem112; + $this->skewedColValues []= $elem126; } $xfer += $input->readListEnd(); } else { @@ -3299,27 +3994,27 @@ class SkewedInfo { case 3: if ($ftype == TType::MAP) { $this->skewedColValueLocationMaps = array(); - $_size119 = 0; - $_ktype120 = 0; - $_vtype121 = 0; - $xfer += $input->readMapBegin($_ktype120, $_vtype121, $_size119); - for ($_i123 = 0; $_i123 < $_size119; ++$_i123) + $_size133 = 0; + $_ktype134 = 0; + $_vtype135 = 0; + $xfer += $input->readMapBegin($_ktype134, $_vtype135, $_size133); + for ($_i137 = 0; $_i137 < $_size133; ++$_i137) { - $key124 = array(); - $val125 = ''; - $key124 = array(); - $_size126 = 0; - $_etype129 = 0; - $xfer += $input->readListBegin($_etype129, $_size126); - for ($_i130 = 0; $_i130 < $_size126; ++$_i130) + $key138 = array(); + $val139 = ''; + $key138 = array(); + $_size140 = 0; + $_etype143 = 0; + $xfer += $input->readListBegin($_etype143, $_size140); + for ($_i144 = 0; $_i144 < $_size140; ++$_i144) { - $elem131 = null; - $xfer += $input->readString($elem131); - $key124 []= $elem131; + $elem145 = null; + $xfer += $input->readString($elem145); + $key138 []= $elem145; } $xfer += $input->readListEnd(); - $xfer += $input->readString($val125); - $this->skewedColValueLocationMaps[$key124] = $val125; + $xfer += $input->readString($val139); + $this->skewedColValueLocationMaps[$key138] = $val139; } $xfer += $input->readMapEnd(); } else { @@ -3347,9 +4042,9 @@ class SkewedInfo { { $output->writeListBegin(TType::STRING, count($this->skewedColNames)); { - foreach ($this->skewedColNames as $iter132) + foreach ($this->skewedColNames as $iter146) { - $xfer += $output->writeString($iter132); + $xfer += $output->writeString($iter146); } } $output->writeListEnd(); @@ -3364,14 +4059,14 @@ class SkewedInfo { { $output->writeListBegin(TType::LST, count($this->skewedColValues)); { - foreach ($this->skewedColValues as $iter133) + foreach ($this->skewedColValues as $iter147) { { - $output->writeListBegin(TType::STRING, count($iter133)); + $output->writeListBegin(TType::STRING, count($iter147)); { - foreach ($iter133 as $iter134) + foreach ($iter147 as $iter148) { - $xfer += $output->writeString($iter134); + $xfer += $output->writeString($iter148); } } $output->writeListEnd(); @@ -3390,19 +4085,19 @@ class SkewedInfo { { $output->writeMapBegin(TType::LST, TType::STRING, count($this->skewedColValueLocationMaps)); { - foreach ($this->skewedColValueLocationMaps as $kiter135 => $viter136) + foreach ($this->skewedColValueLocationMaps as $kiter149 => $viter150) { { - $output->writeListBegin(TType::STRING, count($kiter135)); + $output->writeListBegin(TType::STRING, count($kiter149)); { - foreach ($kiter135 as $iter137) + foreach ($kiter149 as $iter151) { - $xfer += $output->writeString($iter137); + $xfer += $output->writeString($iter151); } } $output->writeListEnd(); } - $xfer += $output->writeString($viter136); + $xfer += $output->writeString($viter150); } } $output->writeMapEnd(); @@ -3607,15 +4302,15 @@ class StorageDescriptor { case 1: if ($ftype == TType::LST) { $this->cols = array(); - $_size138 = 0; - $_etype141 = 0; - $xfer += $input->readListBegin($_etype141, $_size138); - for ($_i142 = 0; $_i142 < $_size138; ++$_i142) + $_size152 = 0; + $_etype155 = 0; + $xfer += $input->readListBegin($_etype155, $_size152); + for ($_i156 = 0; $_i156 < $_size152; ++$_i156) { - $elem143 = null; - $elem143 = new \metastore\FieldSchema(); - $xfer += $elem143->read($input); - $this->cols []= $elem143; + $elem157 = null; + $elem157 = new \metastore\FieldSchema(); + $xfer += $elem157->read($input); + $this->cols []= $elem157; } $xfer += $input->readListEnd(); } else { @@ -3668,14 +4363,14 @@ class StorageDescriptor { case 8: if ($ftype == TType::LST) { $this->bucketCols = array(); - $_size144 = 0; - $_etype147 = 0; - $xfer += $input->readListBegin($_etype147, $_size144); - for ($_i148 = 0; $_i148 < $_size144; ++$_i148) + $_size158 = 0; + $_etype161 = 0; + $xfer += $input->readListBegin($_etype161, $_size158); + for ($_i162 = 0; $_i162 < $_size158; ++$_i162) { - $elem149 = null; - $xfer += $input->readString($elem149); - $this->bucketCols []= $elem149; + $elem163 = null; + $xfer += $input->readString($elem163); + $this->bucketCols []= $elem163; } $xfer += $input->readListEnd(); } else { @@ -3685,15 +4380,15 @@ class StorageDescriptor { case 9: if ($ftype == TType::LST) { $this->sortCols = array(); - $_size150 = 0; - $_etype153 = 0; - $xfer += $input->readListBegin($_etype153, $_size150); - for ($_i154 = 0; $_i154 < $_size150; ++$_i154) + $_size164 = 0; + $_etype167 = 0; + $xfer += $input->readListBegin($_etype167, $_size164); + for ($_i168 = 0; $_i168 < $_size164; ++$_i168) { - $elem155 = null; - $elem155 = new \metastore\Order(); - $xfer += $elem155->read($input); - $this->sortCols []= $elem155; + $elem169 = null; + $elem169 = new \metastore\Order(); + $xfer += $elem169->read($input); + $this->sortCols []= $elem169; } $xfer += $input->readListEnd(); } else { @@ -3703,17 +4398,17 @@ class StorageDescriptor { case 10: if ($ftype == TType::MAP) { $this->parameters = array(); - $_size156 = 0; - $_ktype157 = 0; - $_vtype158 = 0; - $xfer += $input->readMapBegin($_ktype157, $_vtype158, $_size156); - for ($_i160 = 0; $_i160 < $_size156; ++$_i160) + $_size170 = 0; + $_ktype171 = 0; + $_vtype172 = 0; + $xfer += $input->readMapBegin($_ktype171, $_vtype172, $_size170); + for ($_i174 = 0; $_i174 < $_size170; ++$_i174) { - $key161 = ''; - $val162 = ''; - $xfer += $input->readString($key161); - $xfer += $input->readString($val162); - $this->parameters[$key161] = $val162; + $key175 = ''; + $val176 = ''; + $xfer += $input->readString($key175); + $xfer += $input->readString($val176); + $this->parameters[$key175] = $val176; } $xfer += $input->readMapEnd(); } else { @@ -3756,9 +4451,9 @@ class StorageDescriptor { { $output->writeListBegin(TType::STRUCT, count($this->cols)); { - foreach ($this->cols as $iter163) + foreach ($this->cols as $iter177) { - $xfer += $iter163->write($output); + $xfer += $iter177->write($output); } } $output->writeListEnd(); @@ -3806,9 +4501,9 @@ class StorageDescriptor { { $output->writeListBegin(TType::STRING, count($this->bucketCols)); { - foreach ($this->bucketCols as $iter164) + foreach ($this->bucketCols as $iter178) { - $xfer += $output->writeString($iter164); + $xfer += $output->writeString($iter178); } } $output->writeListEnd(); @@ -3823,9 +4518,9 @@ class StorageDescriptor { { $output->writeListBegin(TType::STRUCT, count($this->sortCols)); { - foreach ($this->sortCols as $iter165) + foreach ($this->sortCols as $iter179) { - $xfer += $iter165->write($output); + $xfer += $iter179->write($output); } } $output->writeListEnd(); @@ -3840,10 +4535,10 @@ class StorageDescriptor { { $output->writeMapBegin(TType::STRING, TType::STRING, count($this->parameters)); { - foreach ($this->parameters as $kiter166 => $viter167) + foreach ($this->parameters as $kiter180 => $viter181) { - $xfer += $output->writeString($kiter166); - $xfer += $output->writeString($viter167); + $xfer += $output->writeString($kiter180); + $xfer += $output->writeString($viter181); } } $output->writeMapEnd(); @@ -3929,6 +4624,14 @@ class Table { * @var bool */ public $temporary = false; + /** + * @var \metastore\FieldSchema[] + */ + public $primaryKeys = null; + /** + * @var \metastore\ForeignKeyRel[] + */ + public $foreignKeys = null; public function __construct($vals=null) { if (!isset(self::$_TSPEC)) { @@ -4004,6 +4707,24 @@ class Table { 'var' => 'temporary', 'type' => TType::BOOL, ), + 15 => array( + 'var' => 'primaryKeys', + 'type' => TType::LST, + 'etype' => TType::STRUCT, + 'elem' => array( + 'type' => TType::STRUCT, + 'class' => '\metastore\FieldSchema', + ), + ), + 16 => array( + 'var' => 'foreignKeys', + 'type' => TType::LST, + 'etype' => TType::STRUCT, + 'elem' => array( + 'type' => TType::STRUCT, + 'class' => '\metastore\ForeignKeyRel', + ), + ), ); } if (is_array($vals)) { @@ -4049,6 +4770,12 @@ class Table { if (isset($vals['temporary'])) { $this->temporary = $vals['temporary']; } + if (isset($vals['primaryKeys'])) { + $this->primaryKeys = $vals['primaryKeys']; + } + if (isset($vals['foreignKeys'])) { + $this->foreignKeys = $vals['foreignKeys']; + } } } @@ -4124,15 +4851,15 @@ class Table { case 8: if ($ftype == TType::LST) { $this->partitionKeys = array(); - $_size168 = 0; - $_etype171 = 0; - $xfer += $input->readListBegin($_etype171, $_size168); - for ($_i172 = 0; $_i172 < $_size168; ++$_i172) + $_size182 = 0; + $_etype185 = 0; + $xfer += $input->readListBegin($_etype185, $_size182); + for ($_i186 = 0; $_i186 < $_size182; ++$_i186) { - $elem173 = null; - $elem173 = new \metastore\FieldSchema(); - $xfer += $elem173->read($input); - $this->partitionKeys []= $elem173; + $elem187 = null; + $elem187 = new \metastore\FieldSchema(); + $xfer += $elem187->read($input); + $this->partitionKeys []= $elem187; } $xfer += $input->readListEnd(); } else { @@ -4142,17 +4869,17 @@ class Table { case 9: if ($ftype == TType::MAP) { $this->parameters = array(); - $_size174 = 0; - $_ktype175 = 0; - $_vtype176 = 0; - $xfer += $input->readMapBegin($_ktype175, $_vtype176, $_size174); - for ($_i178 = 0; $_i178 < $_size174; ++$_i178) + $_size188 = 0; + $_ktype189 = 0; + $_vtype190 = 0; + $xfer += $input->readMapBegin($_ktype189, $_vtype190, $_size188); + for ($_i192 = 0; $_i192 < $_size188; ++$_i192) { - $key179 = ''; - $val180 = ''; - $xfer += $input->readString($key179); - $xfer += $input->readString($val180); - $this->parameters[$key179] = $val180; + $key193 = ''; + $val194 = ''; + $xfer += $input->readString($key193); + $xfer += $input->readString($val194); + $this->parameters[$key193] = $val194; } $xfer += $input->readMapEnd(); } else { @@ -4195,6 +4922,42 @@ class Table { $xfer += $input->skip($ftype); } break; + case 15: + if ($ftype == TType::LST) { + $this->primaryKeys = array(); + $_size195 = 0; + $_etype198 = 0; + $xfer += $input->readListBegin($_etype198, $_size195); + for ($_i199 = 0; $_i199 < $_size195; ++$_i199) + { + $elem200 = null; + $elem200 = new \metastore\FieldSchema(); + $xfer += $elem200->read($input); + $this->primaryKeys []= $elem200; + } + $xfer += $input->readListEnd(); + } else { + $xfer += $input->skip($ftype); + } + break; + case 16: + if ($ftype == TType::LST) { + $this->foreignKeys = array(); + $_size201 = 0; + $_etype204 = 0; + $xfer += $input->readListBegin($_etype204, $_size201); + for ($_i205 = 0; $_i205 < $_size201; ++$_i205) + { + $elem206 = null; + $elem206 = new \metastore\ForeignKeyRel(); + $xfer += $elem206->read($input); + $this->foreignKeys []= $elem206; + } + $xfer += $input->readListEnd(); + } else { + $xfer += $input->skip($ftype); + } + break; default: $xfer += $input->skip($ftype); break; @@ -4254,9 +5017,9 @@ class Table { { $output->writeListBegin(TType::STRUCT, count($this->partitionKeys)); { - foreach ($this->partitionKeys as $iter181) + foreach ($this->partitionKeys as $iter207) { - $xfer += $iter181->write($output); + $xfer += $iter207->write($output); } } $output->writeListEnd(); @@ -4271,10 +5034,10 @@ class Table { { $output->writeMapBegin(TType::STRING, TType::STRING, count($this->parameters)); { - foreach ($this->parameters as $kiter182 => $viter183) + foreach ($this->parameters as $kiter208 => $viter209) { - $xfer += $output->writeString($kiter182); - $xfer += $output->writeString($viter183); + $xfer += $output->writeString($kiter208); + $xfer += $output->writeString($viter209); } } $output->writeMapEnd(); @@ -4309,6 +5072,40 @@ class Table { $xfer += $output->writeBool($this->temporary); $xfer += $output->writeFieldEnd(); } + if ($this->primaryKeys !== null) { + if (!is_array($this->primaryKeys)) { + throw new TProtocolException('Bad type in structure.', TProtocolException::INVALID_DATA); + } + $xfer += $output->writeFieldBegin('primaryKeys', TType::LST, 15); + { + $output->writeListBegin(TType::STRUCT, count($this->primaryKeys)); + { + foreach ($this->primaryKeys as $iter210) + { + $xfer += $iter210->write($output); + } + } + $output->writeListEnd(); + } + $xfer += $output->writeFieldEnd(); + } + if ($this->foreignKeys !== null) { + if (!is_array($this->foreignKeys)) { + throw new TProtocolException('Bad type in structure.', TProtocolException::INVALID_DATA); + } + $xfer += $output->writeFieldBegin('foreignKeys', TType::LST, 16); + { + $output->writeListBegin(TType::STRUCT, count($this->foreignKeys)); + { + foreach ($this->foreignKeys as $iter211) + { + $xfer += $iter211->write($output); + } + } + $output->writeListEnd(); + } + $xfer += $output->writeFieldEnd(); + } $xfer += $output->writeFieldStop(); $xfer += $output->writeStructEnd(); return $xfer; @@ -4453,14 +5250,14 @@ class Partition { case 1: if ($ftype == TType::LST) { $this->values = array(); - $_size184 = 0; - $_etype187 = 0; - $xfer += $input->readListBegin($_etype187, $_size184); - for ($_i188 = 0; $_i188 < $_size184; ++$_i188) + $_size212 = 0; + $_etype215 = 0; + $xfer += $input->readListBegin($_etype215, $_size212); + for ($_i216 = 0; $_i216 < $_size212; ++$_i216) { - $elem189 = null; - $xfer += $input->readString($elem189); - $this->values []= $elem189; + $elem217 = null; + $xfer += $input->readString($elem217); + $this->values []= $elem217; } $xfer += $input->readListEnd(); } else { @@ -4506,17 +5303,17 @@ class Partition { case 7: if ($ftype == TType::MAP) { $this->parameters = array(); - $_size190 = 0; - $_ktype191 = 0; - $_vtype192 = 0; - $xfer += $input->readMapBegin($_ktype191, $_vtype192, $_size190); - for ($_i194 = 0; $_i194 < $_size190; ++$_i194) + $_size218 = 0; + $_ktype219 = 0; + $_vtype220 = 0; + $xfer += $input->readMapBegin($_ktype219, $_vtype220, $_size218); + for ($_i222 = 0; $_i222 < $_size218; ++$_i222) { - $key195 = ''; - $val196 = ''; - $xfer += $input->readString($key195); - $xfer += $input->readString($val196); - $this->parameters[$key195] = $val196; + $key223 = ''; + $val224 = ''; + $xfer += $input->readString($key223); + $xfer += $input->readString($val224); + $this->parameters[$key223] = $val224; } $xfer += $input->readMapEnd(); } else { @@ -4552,9 +5349,9 @@ class Partition { { $output->writeListBegin(TType::STRING, count($this->values)); { - foreach ($this->values as $iter197) + foreach ($this->values as $iter225) { - $xfer += $output->writeString($iter197); + $xfer += $output->writeString($iter225); } } $output->writeListEnd(); @@ -4597,10 +5394,10 @@ class Partition { { $output->writeMapBegin(TType::STRING, TType::STRING, count($this->parameters)); { - foreach ($this->parameters as $kiter198 => $viter199) + foreach ($this->parameters as $kiter226 => $viter227) { - $xfer += $output->writeString($kiter198); - $xfer += $output->writeString($viter199); + $xfer += $output->writeString($kiter226); + $xfer += $output->writeString($viter227); } } $output->writeMapEnd(); @@ -4736,14 +5533,14 @@ class PartitionWithoutSD { case 1: if ($ftype == TType::LST) { $this->values = array(); - $_size200 = 0; - $_etype203 = 0; - $xfer += $input->readListBegin($_etype203, $_size200); - for ($_i204 = 0; $_i204 < $_size200; ++$_i204) + $_size228 = 0; + $_etype231 = 0; + $xfer += $input->readListBegin($_etype231, $_size228); + for ($_i232 = 0; $_i232 < $_size228; ++$_i232) { - $elem205 = null; - $xfer += $input->readString($elem205); - $this->values []= $elem205; + $elem233 = null; + $xfer += $input->readString($elem233); + $this->values []= $elem233; } $xfer += $input->readListEnd(); } else { @@ -4774,17 +5571,17 @@ class PartitionWithoutSD { case 5: if ($ftype == TType::MAP) { $this->parameters = array(); - $_size206 = 0; - $_ktype207 = 0; - $_vtype208 = 0; - $xfer += $input->readMapBegin($_ktype207, $_vtype208, $_size206); - for ($_i210 = 0; $_i210 < $_size206; ++$_i210) + $_size234 = 0; + $_ktype235 = 0; + $_vtype236 = 0; + $xfer += $input->readMapBegin($_ktype235, $_vtype236, $_size234); + for ($_i238 = 0; $_i238 < $_size234; ++$_i238) { - $key211 = ''; - $val212 = ''; - $xfer += $input->readString($key211); - $xfer += $input->readString($val212); - $this->parameters[$key211] = $val212; + $key239 = ''; + $val240 = ''; + $xfer += $input->readString($key239); + $xfer += $input->readString($val240); + $this->parameters[$key239] = $val240; } $xfer += $input->readMapEnd(); } else { @@ -4820,9 +5617,9 @@ class PartitionWithoutSD { { $output->writeListBegin(TType::STRING, count($this->values)); { - foreach ($this->values as $iter213) + foreach ($this->values as $iter241) { - $xfer += $output->writeString($iter213); + $xfer += $output->writeString($iter241); } } $output->writeListEnd(); @@ -4852,10 +5649,10 @@ class PartitionWithoutSD { { $output->writeMapBegin(TType::STRING, TType::STRING, count($this->parameters)); { - foreach ($this->parameters as $kiter214 => $viter215) + foreach ($this->parameters as $kiter242 => $viter243) { - $xfer += $output->writeString($kiter214); - $xfer += $output->writeString($viter215); + $xfer += $output->writeString($kiter242); + $xfer += $output->writeString($viter243); } } $output->writeMapEnd(); @@ -4940,15 +5737,15 @@ class PartitionSpecWithSharedSD { case 1: if ($ftype == TType::LST) { $this->partitions = array(); - $_size216 = 0; - $_etype219 = 0; - $xfer += $input->readListBegin($_etype219, $_size216); - for ($_i220 = 0; $_i220 < $_size216; ++$_i220) + $_size244 = 0; + $_etype247 = 0; + $xfer += $input->readListBegin($_etype247, $_size244); + for ($_i248 = 0; $_i248 < $_size244; ++$_i248) { - $elem221 = null; - $elem221 = new \metastore\PartitionWithoutSD(); - $xfer += $elem221->read($input); - $this->partitions []= $elem221; + $elem249 = null; + $elem249 = new \metastore\PartitionWithoutSD(); + $xfer += $elem249->read($input); + $this->partitions []= $elem249; } $xfer += $input->readListEnd(); } else { @@ -4984,9 +5781,9 @@ class PartitionSpecWithSharedSD { { $output->writeListBegin(TType::STRUCT, count($this->partitions)); { - foreach ($this->partitions as $iter222) + foreach ($this->partitions as $iter250) { - $xfer += $iter222->write($output); + $xfer += $iter250->write($output); } } $output->writeListEnd(); @@ -5059,15 +5856,15 @@ class PartitionListComposingSpec { case 1: if ($ftype == TType::LST) { $this->partitions = array(); - $_size223 = 0; - $_etype226 = 0; - $xfer += $input->readListBegin($_etype226, $_size223); - for ($_i227 = 0; $_i227 < $_size223; ++$_i227) + $_size251 = 0; + $_etype254 = 0; + $xfer += $input->readListBegin($_etype254, $_size251); + for ($_i255 = 0; $_i255 < $_size251; ++$_i255) { - $elem228 = null; - $elem228 = new \metastore\Partition(); - $xfer += $elem228->read($input); - $this->partitions []= $elem228; + $elem256 = null; + $elem256 = new \metastore\Partition(); + $xfer += $elem256->read($input); + $this->partitions []= $elem256; } $xfer += $input->readListEnd(); } else { @@ -5095,9 +5892,9 @@ class PartitionListComposingSpec { { $output->writeListBegin(TType::STRUCT, count($this->partitions)); { - foreach ($this->partitions as $iter229) + foreach ($this->partitions as $iter257) { - $xfer += $iter229->write($output); + $xfer += $iter257->write($output); } } $output->writeListEnd(); @@ -5499,17 +6296,17 @@ class Index { case 9: if ($ftype == TType::MAP) { $this->parameters = array(); - $_size230 = 0; - $_ktype231 = 0; - $_vtype232 = 0; - $xfer += $input->readMapBegin($_ktype231, $_vtype232, $_size230); - for ($_i234 = 0; $_i234 < $_size230; ++$_i234) + $_size258 = 0; + $_ktype259 = 0; + $_vtype260 = 0; + $xfer += $input->readMapBegin($_ktype259, $_vtype260, $_size258); + for ($_i262 = 0; $_i262 < $_size258; ++$_i262) { - $key235 = ''; - $val236 = ''; - $xfer += $input->readString($key235); - $xfer += $input->readString($val236); - $this->parameters[$key235] = $val236; + $key263 = ''; + $val264 = ''; + $xfer += $input->readString($key263); + $xfer += $input->readString($val264); + $this->parameters[$key263] = $val264; } $xfer += $input->readMapEnd(); } else { @@ -5587,10 +6384,10 @@ class Index { { $output->writeMapBegin(TType::STRING, TType::STRING, count($this->parameters)); { - foreach ($this->parameters as $kiter237 => $viter238) + foreach ($this->parameters as $kiter265 => $viter266) { - $xfer += $output->writeString($kiter237); - $xfer += $output->writeString($viter238); + $xfer += $output->writeString($kiter265); + $xfer += $output->writeString($viter266); } } $output->writeMapEnd(); @@ -7537,15 +8334,15 @@ class ColumnStatistics { case 2: if ($ftype == TType::LST) { $this->statsObj = array(); - $_size239 = 0; - $_etype242 = 0; - $xfer += $input->readListBegin($_etype242, $_size239); - for ($_i243 = 0; $_i243 < $_size239; ++$_i243) + $_size267 = 0; + $_etype270 = 0; + $xfer += $input->readListBegin($_etype270, $_size267); + for ($_i271 = 0; $_i271 < $_size267; ++$_i271) { - $elem244 = null; - $elem244 = new \metastore\ColumnStatisticsObj(); - $xfer += $elem244->read($input); - $this->statsObj []= $elem244; + $elem272 = null; + $elem272 = new \metastore\ColumnStatisticsObj(); + $xfer += $elem272->read($input); + $this->statsObj []= $elem272; } $xfer += $input->readListEnd(); } else { @@ -7581,9 +8378,9 @@ class ColumnStatistics { { $output->writeListBegin(TType::STRUCT, count($this->statsObj)); { - foreach ($this->statsObj as $iter245) + foreach ($this->statsObj as $iter273) { - $xfer += $iter245->write($output); + $xfer += $iter273->write($output); } } $output->writeListEnd(); @@ -7659,15 +8456,15 @@ class AggrStats { case 1: if ($ftype == TType::LST) { $this->colStats = array(); - $_size246 = 0; - $_etype249 = 0; - $xfer += $input->readListBegin($_etype249, $_size246); - for ($_i250 = 0; $_i250 < $_size246; ++$_i250) + $_size274 = 0; + $_etype277 = 0; + $xfer += $input->readListBegin($_etype277, $_size274); + for ($_i278 = 0; $_i278 < $_size274; ++$_i278) { - $elem251 = null; - $elem251 = new \metastore\ColumnStatisticsObj(); - $xfer += $elem251->read($input); - $this->colStats []= $elem251; + $elem279 = null; + $elem279 = new \metastore\ColumnStatisticsObj(); + $xfer += $elem279->read($input); + $this->colStats []= $elem279; } $xfer += $input->readListEnd(); } else { @@ -7702,9 +8499,9 @@ class AggrStats { { $output->writeListBegin(TType::STRUCT, count($this->colStats)); { - foreach ($this->colStats as $iter252) + foreach ($this->colStats as $iter280) { - $xfer += $iter252->write($output); + $xfer += $iter280->write($output); } } $output->writeListEnd(); @@ -7774,15 +8571,15 @@ class SetPartitionsStatsRequest { case 1: if ($ftype == TType::LST) { $this->colStats = array(); - $_size253 = 0; - $_etype256 = 0; - $xfer += $input->readListBegin($_etype256, $_size253); - for ($_i257 = 0; $_i257 < $_size253; ++$_i257) + $_size281 = 0; + $_etype284 = 0; + $xfer += $input->readListBegin($_etype284, $_size281); + for ($_i285 = 0; $_i285 < $_size281; ++$_i285) { - $elem258 = null; - $elem258 = new \metastore\ColumnStatistics(); - $xfer += $elem258->read($input); - $this->colStats []= $elem258; + $elem286 = null; + $elem286 = new \metastore\ColumnStatistics(); + $xfer += $elem286->read($input); + $this->colStats []= $elem286; } $xfer += $input->readListEnd(); } else { @@ -7810,9 +8607,9 @@ class SetPartitionsStatsRequest { { $output->writeListBegin(TType::STRUCT, count($this->colStats)); { - foreach ($this->colStats as $iter259) + foreach ($this->colStats as $iter287) { - $xfer += $iter259->write($output); + $xfer += $iter287->write($output); } } $output->writeListEnd(); @@ -7896,15 +8693,15 @@ class Schema { case 1: if ($ftype == TType::LST) { $this->fieldSchemas = array(); - $_size260 = 0; - $_etype263 = 0; - $xfer += $input->readListBegin($_etype263, $_size260); - for ($_i264 = 0; $_i264 < $_size260; ++$_i264) + $_size288 = 0; + $_etype291 = 0; + $xfer += $input->readListBegin($_etype291, $_size288); + for ($_i292 = 0; $_i292 < $_size288; ++$_i292) { - $elem265 = null; - $elem265 = new \metastore\FieldSchema(); - $xfer += $elem265->read($input); - $this->fieldSchemas []= $elem265; + $elem293 = null; + $elem293 = new \metastore\FieldSchema(); + $xfer += $elem293->read($input); + $this->fieldSchemas []= $elem293; } $xfer += $input->readListEnd(); } else { @@ -7914,17 +8711,17 @@ class Schema { case 2: if ($ftype == TType::MAP) { $this->properties = array(); - $_size266 = 0; - $_ktype267 = 0; - $_vtype268 = 0; - $xfer += $input->readMapBegin($_ktype267, $_vtype268, $_size266); - for ($_i270 = 0; $_i270 < $_size266; ++$_i270) + $_size294 = 0; + $_ktype295 = 0; + $_vtype296 = 0; + $xfer += $input->readMapBegin($_ktype295, $_vtype296, $_size294); + for ($_i298 = 0; $_i298 < $_size294; ++$_i298) { - $key271 = ''; - $val272 = ''; - $xfer += $input->readString($key271); - $xfer += $input->readString($val272); - $this->properties[$key271] = $val272; + $key299 = ''; + $val300 = ''; + $xfer += $input->readString($key299); + $xfer += $input->readString($val300); + $this->properties[$key299] = $val300; } $xfer += $input->readMapEnd(); } else { @@ -7952,9 +8749,9 @@ class Schema { { $output->writeListBegin(TType::STRUCT, count($this->fieldSchemas)); { - foreach ($this->fieldSchemas as $iter273) + foreach ($this->fieldSchemas as $iter301) { - $xfer += $iter273->write($output); + $xfer += $iter301->write($output); } } $output->writeListEnd(); @@ -7969,10 +8766,10 @@ class Schema { { $output->writeMapBegin(TType::STRING, TType::STRING, count($this->properties)); { - foreach ($this->properties as $kiter274 => $viter275) + foreach ($this->properties as $kiter302 => $viter303) { - $xfer += $output->writeString($kiter274); - $xfer += $output->writeString($viter275); + $xfer += $output->writeString($kiter302); + $xfer += $output->writeString($viter303); } } $output->writeMapEnd(); @@ -8040,17 +8837,17 @@ class EnvironmentContext { case 1: if ($ftype == TType::MAP) { $this->properties = array(); - $_size276 = 0; - $_ktype277 = 0; - $_vtype278 = 0; - $xfer += $input->readMapBegin($_ktype277, $_vtype278, $_size276); - for ($_i280 = 0; $_i280 < $_size276; ++$_i280) + $_size304 = 0; + $_ktype305 = 0; + $_vtype306 = 0; + $xfer += $input->readMapBegin($_ktype305, $_vtype306, $_size304); + for ($_i308 = 0; $_i308 < $_size304; ++$_i308) { - $key281 = ''; - $val282 = ''; - $xfer += $input->readString($key281); - $xfer += $input->readString($val282); - $this->properties[$key281] = $val282; + $key309 = ''; + $val310 = ''; + $xfer += $input->readString($key309); + $xfer += $input->readString($val310); + $this->properties[$key309] = $val310; } $xfer += $input->readMapEnd(); } else { @@ -8078,10 +8875,10 @@ class EnvironmentContext { { $output->writeMapBegin(TType::STRING, TType::STRING, count($this->properties)); { - foreach ($this->properties as $kiter283 => $viter284) + foreach ($this->properties as $kiter311 => $viter312) { - $xfer += $output->writeString($kiter283); - $xfer += $output->writeString($viter284); + $xfer += $output->writeString($kiter311); + $xfer += $output->writeString($viter312); } } $output->writeMapEnd(); @@ -8157,15 +8954,15 @@ class PartitionsByExprResult { case 1: if ($ftype == TType::LST) { $this->partitions = array(); - $_size285 = 0; - $_etype288 = 0; - $xfer += $input->readListBegin($_etype288, $_size285); - for ($_i289 = 0; $_i289 < $_size285; ++$_i289) + $_size313 = 0; + $_etype316 = 0; + $xfer += $input->readListBegin($_etype316, $_size313); + for ($_i317 = 0; $_i317 < $_size313; ++$_i317) { - $elem290 = null; - $elem290 = new \metastore\Partition(); - $xfer += $elem290->read($input); - $this->partitions []= $elem290; + $elem318 = null; + $elem318 = new \metastore\Partition(); + $xfer += $elem318->read($input); + $this->partitions []= $elem318; } $xfer += $input->readListEnd(); } else { @@ -8200,9 +8997,9 @@ class PartitionsByExprResult { { $output->writeListBegin(TType::STRUCT, count($this->partitions)); { - foreach ($this->partitions as $iter291) + foreach ($this->partitions as $iter319) { - $xfer += $iter291->write($output); + $xfer += $iter319->write($output); } } $output->writeListEnd(); @@ -8439,15 +9236,15 @@ class TableStatsResult { case 1: if ($ftype == TType::LST) { $this->tableStats = array(); - $_size292 = 0; - $_etype295 = 0; - $xfer += $input->readListBegin($_etype295, $_size292); - for ($_i296 = 0; $_i296 < $_size292; ++$_i296) + $_size320 = 0; + $_etype323 = 0; + $xfer += $input->readListBegin($_etype323, $_size320); + for ($_i324 = 0; $_i324 < $_size320; ++$_i324) { - $elem297 = null; - $elem297 = new \metastore\ColumnStatisticsObj(); - $xfer += $elem297->read($input); - $this->tableStats []= $elem297; + $elem325 = null; + $elem325 = new \metastore\ColumnStatisticsObj(); + $xfer += $elem325->read($input); + $this->tableStats []= $elem325; } $xfer += $input->readListEnd(); } else { @@ -8475,9 +9272,9 @@ class TableStatsResult { { $output->writeListBegin(TType::STRUCT, count($this->tableStats)); { - foreach ($this->tableStats as $iter298) + foreach ($this->tableStats as $iter326) { - $xfer += $iter298->write($output); + $xfer += $iter326->write($output); } } $output->writeListEnd(); @@ -8550,28 +9347,28 @@ class PartitionsStatsResult { case 1: if ($ftype == TType::MAP) { $this->partStats = array(); - $_size299 = 0; - $_ktype300 = 0; - $_vtype301 = 0; - $xfer += $input->readMapBegin($_ktype300, $_vtype301, $_size299); - for ($_i303 = 0; $_i303 < $_size299; ++$_i303) + $_size327 = 0; + $_ktype328 = 0; + $_vtype329 = 0; + $xfer += $input->readMapBegin($_ktype328, $_vtype329, $_size327); + for ($_i331 = 0; $_i331 < $_size327; ++$_i331) { - $key304 = ''; - $val305 = array(); - $xfer += $input->readString($key304); - $val305 = array(); - $_size306 = 0; - $_etype309 = 0; - $xfer += $input->readListBegin($_etype309, $_size306); - for ($_i310 = 0; $_i310 < $_size306; ++$_i310) + $key332 = ''; + $val333 = array(); + $xfer += $input->readString($key332); + $val333 = array(); + $_size334 = 0; + $_etype337 = 0; + $xfer += $input->readListBegin($_etype337, $_size334); + for ($_i338 = 0; $_i338 < $_size334; ++$_i338) { - $elem311 = null; - $elem311 = new \metastore\ColumnStatisticsObj(); - $xfer += $elem311->read($input); - $val305 []= $elem311; + $elem339 = null; + $elem339 = new \metastore\ColumnStatisticsObj(); + $xfer += $elem339->read($input); + $val333 []= $elem339; } $xfer += $input->readListEnd(); - $this->partStats[$key304] = $val305; + $this->partStats[$key332] = $val333; } $xfer += $input->readMapEnd(); } else { @@ -8599,15 +9396,15 @@ class PartitionsStatsResult { { $output->writeMapBegin(TType::STRING, TType::LST, count($this->partStats)); { - foreach ($this->partStats as $kiter312 => $viter313) + foreach ($this->partStats as $kiter340 => $viter341) { - $xfer += $output->writeString($kiter312); + $xfer += $output->writeString($kiter340); { - $output->writeListBegin(TType::STRUCT, count($viter313)); + $output->writeListBegin(TType::STRUCT, count($viter341)); { - foreach ($viter313 as $iter314) + foreach ($viter341 as $iter342) { - $xfer += $iter314->write($output); + $xfer += $iter342->write($output); } } $output->writeListEnd(); @@ -8711,14 +9508,14 @@ class TableStatsRequest { case 3: if ($ftype == TType::LST) { $this->colNames = array(); - $_size315 = 0; - $_etype318 = 0; - $xfer += $input->readListBegin($_etype318, $_size315); - for ($_i319 = 0; $_i319 < $_size315; ++$_i319) + $_size343 = 0; + $_etype346 = 0; + $xfer += $input->readListBegin($_etype346, $_size343); + for ($_i347 = 0; $_i347 < $_size343; ++$_i347) { - $elem320 = null; - $xfer += $input->readString($elem320); - $this->colNames []= $elem320; + $elem348 = null; + $xfer += $input->readString($elem348); + $this->colNames []= $elem348; } $xfer += $input->readListEnd(); } else { @@ -8756,9 +9553,9 @@ class TableStatsRequest { { $output->writeListBegin(TType::STRING, count($this->colNames)); { - foreach ($this->colNames as $iter321) + foreach ($this->colNames as $iter349) { - $xfer += $output->writeString($iter321); + $xfer += $output->writeString($iter349); } } $output->writeListEnd(); @@ -8873,14 +9670,14 @@ class PartitionsStatsRequest { case 3: if ($ftype == TType::LST) { $this->colNames = array(); - $_size322 = 0; - $_etype325 = 0; - $xfer += $input->readListBegin($_etype325, $_size322); - for ($_i326 = 0; $_i326 < $_size322; ++$_i326) + $_size350 = 0; + $_etype353 = 0; + $xfer += $input->readListBegin($_etype353, $_size350); + for ($_i354 = 0; $_i354 < $_size350; ++$_i354) { - $elem327 = null; - $xfer += $input->readString($elem327); - $this->colNames []= $elem327; + $elem355 = null; + $xfer += $input->readString($elem355); + $this->colNames []= $elem355; } $xfer += $input->readListEnd(); } else { @@ -8890,14 +9687,14 @@ class PartitionsStatsRequest { case 4: if ($ftype == TType::LST) { $this->partNames = array(); - $_size328 = 0; - $_etype331 = 0; - $xfer += $input->readListBegin($_etype331, $_size328); - for ($_i332 = 0; $_i332 < $_size328; ++$_i332) + $_size356 = 0; + $_etype359 = 0; + $xfer += $input->readListBegin($_etype359, $_size356); + for ($_i360 = 0; $_i360 < $_size356; ++$_i360) { - $elem333 = null; - $xfer += $input->readString($elem333); - $this->partNames []= $elem333; + $elem361 = null; + $xfer += $input->readString($elem361); + $this->partNames []= $elem361; } $xfer += $input->readListEnd(); } else { @@ -8935,9 +9732,9 @@ class PartitionsStatsRequest { { $output->writeListBegin(TType::STRING, count($this->colNames)); { - foreach ($this->colNames as $iter334) + foreach ($this->colNames as $iter362) { - $xfer += $output->writeString($iter334); + $xfer += $output->writeString($iter362); } } $output->writeListEnd(); @@ -8952,9 +9749,9 @@ class PartitionsStatsRequest { { $output->writeListBegin(TType::STRING, count($this->partNames)); { - foreach ($this->partNames as $iter335) + foreach ($this->partNames as $iter363) { - $xfer += $output->writeString($iter335); + $xfer += $output->writeString($iter363); } } $output->writeListEnd(); @@ -9019,15 +9816,15 @@ class AddPartitionsResult { case 1: if ($ftype == TType::LST) { $this->partitions = array(); - $_size336 = 0; - $_etype339 = 0; - $xfer += $input->readListBegin($_etype339, $_size336); - for ($_i340 = 0; $_i340 < $_size336; ++$_i340) + $_size364 = 0; + $_etype367 = 0; + $xfer += $input->readListBegin($_etype367, $_size364); + for ($_i368 = 0; $_i368 < $_size364; ++$_i368) { - $elem341 = null; - $elem341 = new \metastore\Partition(); - $xfer += $elem341->read($input); - $this->partitions []= $elem341; + $elem369 = null; + $elem369 = new \metastore\Partition(); + $xfer += $elem369->read($input); + $this->partitions []= $elem369; } $xfer += $input->readListEnd(); } else { @@ -9055,9 +9852,9 @@ class AddPartitionsResult { { $output->writeListBegin(TType::STRUCT, count($this->partitions)); { - foreach ($this->partitions as $iter342) + foreach ($this->partitions as $iter370) { - $xfer += $iter342->write($output); + $xfer += $iter370->write($output); } } $output->writeListEnd(); @@ -9180,15 +9977,15 @@ class AddPartitionsRequest { case 3: if ($ftype == TType::LST) { $this->parts = array(); - $_size343 = 0; - $_etype346 = 0; - $xfer += $input->readListBegin($_etype346, $_size343); - for ($_i347 = 0; $_i347 < $_size343; ++$_i347) + $_size371 = 0; + $_etype374 = 0; + $xfer += $input->readListBegin($_etype374, $_size371); + for ($_i375 = 0; $_i375 < $_size371; ++$_i375) { - $elem348 = null; - $elem348 = new \metastore\Partition(); - $xfer += $elem348->read($input); - $this->parts []= $elem348; + $elem376 = null; + $elem376 = new \metastore\Partition(); + $xfer += $elem376->read($input); + $this->parts []= $elem376; } $xfer += $input->readListEnd(); } else { @@ -9240,9 +10037,9 @@ class AddPartitionsRequest { { $output->writeListBegin(TType::STRUCT, count($this->parts)); { - foreach ($this->parts as $iter349) + foreach ($this->parts as $iter377) { - $xfer += $iter349->write($output); + $xfer += $iter377->write($output); } } $output->writeListEnd(); @@ -9317,15 +10114,15 @@ class DropPartitionsResult { case 1: if ($ftype == TType::LST) { $this->partitions = array(); - $_size350 = 0; - $_etype353 = 0; - $xfer += $input->readListBegin($_etype353, $_size350); - for ($_i354 = 0; $_i354 < $_size350; ++$_i354) + $_size378 = 0; + $_etype381 = 0; + $xfer += $input->readListBegin($_etype381, $_size378); + for ($_i382 = 0; $_i382 < $_size378; ++$_i382) { - $elem355 = null; - $elem355 = new \metastore\Partition(); - $xfer += $elem355->read($input); - $this->partitions []= $elem355; + $elem383 = null; + $elem383 = new \metastore\Partition(); + $xfer += $elem383->read($input); + $this->partitions []= $elem383; } $xfer += $input->readListEnd(); } else { @@ -9353,9 +10150,9 @@ class DropPartitionsResult { { $output->writeListBegin(TType::STRUCT, count($this->partitions)); { - foreach ($this->partitions as $iter356) + foreach ($this->partitions as $iter384) { - $xfer += $iter356->write($output); + $xfer += $iter384->write($output); } } $output->writeListEnd(); @@ -9533,14 +10330,14 @@ class RequestPartsSpec { case 1: if ($ftype == TType::LST) { $this->names = array(); - $_size357 = 0; - $_etype360 = 0; - $xfer += $input->readListBegin($_etype360, $_size357); - for ($_i361 = 0; $_i361 < $_size357; ++$_i361) + $_size385 = 0; + $_etype388 = 0; + $xfer += $input->readListBegin($_etype388, $_size385); + for ($_i389 = 0; $_i389 < $_size385; ++$_i389) { - $elem362 = null; - $xfer += $input->readString($elem362); - $this->names []= $elem362; + $elem390 = null; + $xfer += $input->readString($elem390); + $this->names []= $elem390; } $xfer += $input->readListEnd(); } else { @@ -9550,15 +10347,15 @@ class RequestPartsSpec { case 2: if ($ftype == TType::LST) { $this->exprs = array(); - $_size363 = 0; - $_etype366 = 0; - $xfer += $input->readListBegin($_etype366, $_size363); - for ($_i367 = 0; $_i367 < $_size363; ++$_i367) + $_size391 = 0; + $_etype394 = 0; + $xfer += $input->readListBegin($_etype394, $_size391); + for ($_i395 = 0; $_i395 < $_size391; ++$_i395) { - $elem368 = null; - $elem368 = new \metastore\DropPartitionsExpr(); - $xfer += $elem368->read($input); - $this->exprs []= $elem368; + $elem396 = null; + $elem396 = new \metastore\DropPartitionsExpr(); + $xfer += $elem396->read($input); + $this->exprs []= $elem396; } $xfer += $input->readListEnd(); } else { @@ -9586,9 +10383,9 @@ class RequestPartsSpec { { $output->writeListBegin(TType::STRING, count($this->names)); { - foreach ($this->names as $iter369) + foreach ($this->names as $iter397) { - $xfer += $output->writeString($iter369); + $xfer += $output->writeString($iter397); } } $output->writeListEnd(); @@ -9603,9 +10400,9 @@ class RequestPartsSpec { { $output->writeListBegin(TType::STRUCT, count($this->exprs)); { - foreach ($this->exprs as $iter370) + foreach ($this->exprs as $iter398) { - $xfer += $iter370->write($output); + $xfer += $iter398->write($output); } } $output->writeListEnd(); @@ -10140,15 +10937,15 @@ class Function { case 8: if ($ftype == TType::LST) { $this->resourceUris = array(); - $_size371 = 0; - $_etype374 = 0; - $xfer += $input->readListBegin($_etype374, $_size371); - for ($_i375 = 0; $_i375 < $_size371; ++$_i375) + $_size399 = 0; + $_etype402 = 0; + $xfer += $input->readListBegin($_etype402, $_size399); + for ($_i403 = 0; $_i403 < $_size399; ++$_i403) { - $elem376 = null; - $elem376 = new \metastore\ResourceUri(); - $xfer += $elem376->read($input); - $this->resourceUris []= $elem376; + $elem404 = null; + $elem404 = new \metastore\ResourceUri(); + $xfer += $elem404->read($input); + $this->resourceUris []= $elem404; } $xfer += $input->readListEnd(); } else { @@ -10211,9 +11008,9 @@ class Function { { $output->writeListBegin(TType::STRUCT, count($this->resourceUris)); { - foreach ($this->resourceUris as $iter377) + foreach ($this->resourceUris as $iter405) { - $xfer += $iter377->write($output); + $xfer += $iter405->write($output); } } $output->writeListEnd(); @@ -10509,15 +11306,15 @@ class GetOpenTxnsInfoResponse { case 2: if ($ftype == TType::LST) { $this->open_txns = array(); - $_size378 = 0; - $_etype381 = 0; - $xfer += $input->readListBegin($_etype381, $_size378); - for ($_i382 = 0; $_i382 < $_size378; ++$_i382) + $_size406 = 0; + $_etype409 = 0; + $xfer += $input->readListBegin($_etype409, $_size406); + for ($_i410 = 0; $_i410 < $_size406; ++$_i410) { - $elem383 = null; - $elem383 = new \metastore\TxnInfo(); - $xfer += $elem383->read($input); - $this->open_txns []= $elem383; + $elem411 = null; + $elem411 = new \metastore\TxnInfo(); + $xfer += $elem411->read($input); + $this->open_txns []= $elem411; } $xfer += $input->readListEnd(); } else { @@ -10550,9 +11347,9 @@ class GetOpenTxnsInfoResponse { { $output->writeListBegin(TType::STRUCT, count($this->open_txns)); { - foreach ($this->open_txns as $iter384) + foreach ($this->open_txns as $iter412) { - $xfer += $iter384->write($output); + $xfer += $iter412->write($output); } } $output->writeListEnd(); @@ -10634,17 +11431,17 @@ class GetOpenTxnsResponse { case 2: if ($ftype == TType::SET) { $this->open_txns = array(); - $_size385 = 0; - $_etype388 = 0; - $xfer += $input->readSetBegin($_etype388, $_size385); - for ($_i389 = 0; $_i389 < $_size385; ++$_i389) + $_size413 = 0; + $_etype416 = 0; + $xfer += $input->readSetBegin($_etype416, $_size413); + for ($_i417 = 0; $_i417 < $_size413; ++$_i417) { - $elem390 = null; - $xfer += $input->readI64($elem390); - if (is_scalar($elem390)) { - $this->open_txns[$elem390] = true; + $elem418 = null; + $xfer += $input->readI64($elem418); + if (is_scalar($elem418)) { + $this->open_txns[$elem418] = true; } else { - $this->open_txns []= $elem390; + $this->open_txns []= $elem418; } } $xfer += $input->readSetEnd(); @@ -10678,12 +11475,12 @@ class GetOpenTxnsResponse { { $output->writeSetBegin(TType::I64, count($this->open_txns)); { - foreach ($this->open_txns as $iter391 => $iter392) + foreach ($this->open_txns as $iter419 => $iter420) { - if (is_scalar($iter392)) { - $xfer += $output->writeI64($iter391); + if (is_scalar($iter420)) { + $xfer += $output->writeI64($iter419); } else { - $xfer += $output->writeI64($iter392); + $xfer += $output->writeI64($iter420); } } } @@ -10892,14 +11689,14 @@ class OpenTxnsResponse { case 1: if ($ftype == TType::LST) { $this->txn_ids = array(); - $_size393 = 0; - $_etype396 = 0; - $xfer += $input->readListBegin($_etype396, $_size393); - for ($_i397 = 0; $_i397 < $_size393; ++$_i397) + $_size421 = 0; + $_etype424 = 0; + $xfer += $input->readListBegin($_etype424, $_size421); + for ($_i425 = 0; $_i425 < $_size421; ++$_i425) { - $elem398 = null; - $xfer += $input->readI64($elem398); - $this->txn_ids []= $elem398; + $elem426 = null; + $xfer += $input->readI64($elem426); + $this->txn_ids []= $elem426; } $xfer += $input->readListEnd(); } else { @@ -10927,9 +11724,9 @@ class OpenTxnsResponse { { $output->writeListBegin(TType::I64, count($this->txn_ids)); { - foreach ($this->txn_ids as $iter399) + foreach ($this->txn_ids as $iter427) { - $xfer += $output->writeI64($iter399); + $xfer += $output->writeI64($iter427); } } $output->writeListEnd(); @@ -11355,15 +12152,15 @@ class LockRequest { case 1: if ($ftype == TType::LST) { $this->component = array(); - $_size400 = 0; - $_etype403 = 0; - $xfer += $input->readListBegin($_etype403, $_size400); - for ($_i404 = 0; $_i404 < $_size400; ++$_i404) + $_size428 = 0; + $_etype431 = 0; + $xfer += $input->readListBegin($_etype431, $_size428); + for ($_i432 = 0; $_i432 < $_size428; ++$_i432) { - $elem405 = null; - $elem405 = new \metastore\LockComponent(); - $xfer += $elem405->read($input); - $this->component []= $elem405; + $elem433 = null; + $elem433 = new \metastore\LockComponent(); + $xfer += $elem433->read($input); + $this->component []= $elem433; } $xfer += $input->readListEnd(); } else { @@ -11419,9 +12216,9 @@ class LockRequest { { $output->writeListBegin(TType::STRUCT, count($this->component)); { - foreach ($this->component as $iter406) + foreach ($this->component as $iter434) { - $xfer += $iter406->write($output); + $xfer += $iter434->write($output); } } $output->writeListEnd(); @@ -12364,15 +13161,15 @@ class ShowLocksResponse { case 1: if ($ftype == TType::LST) { $this->locks = array(); - $_size407 = 0; - $_etype410 = 0; - $xfer += $input->readListBegin($_etype410, $_size407); - for ($_i411 = 0; $_i411 < $_size407; ++$_i411) + $_size435 = 0; + $_etype438 = 0; + $xfer += $input->readListBegin($_etype438, $_size435); + for ($_i439 = 0; $_i439 < $_size435; ++$_i439) { - $elem412 = null; - $elem412 = new \metastore\ShowLocksResponseElement(); - $xfer += $elem412->read($input); - $this->locks []= $elem412; + $elem440 = null; + $elem440 = new \metastore\ShowLocksResponseElement(); + $xfer += $elem440->read($input); + $this->locks []= $elem440; } $xfer += $input->readListEnd(); } else { @@ -12400,9 +13197,9 @@ class ShowLocksResponse { { $output->writeListBegin(TType::STRUCT, count($this->locks)); { - foreach ($this->locks as $iter413) + foreach ($this->locks as $iter441) { - $xfer += $iter413->write($output); + $xfer += $iter441->write($output); } } $output->writeListEnd(); @@ -12677,17 +13474,17 @@ class HeartbeatTxnRangeResponse { case 1: if ($ftype == TType::SET) { $this->aborted = array(); - $_size414 = 0; - $_etype417 = 0; - $xfer += $input->readSetBegin($_etype417, $_size414); - for ($_i418 = 0; $_i418 < $_size414; ++$_i418) + $_size442 = 0; + $_etype445 = 0; + $xfer += $input->readSetBegin($_etype445, $_size442); + for ($_i446 = 0; $_i446 < $_size442; ++$_i446) { - $elem419 = null; - $xfer += $input->readI64($elem419); - if (is_scalar($elem419)) { - $this->aborted[$elem419] = true; + $elem447 = null; + $xfer += $input->readI64($elem447); + if (is_scalar($elem447)) { + $this->aborted[$elem447] = true; } else { - $this->aborted []= $elem419; + $this->aborted []= $elem447; } } $xfer += $input->readSetEnd(); @@ -12698,17 +13495,17 @@ class HeartbeatTxnRangeResponse { case 2: if ($ftype == TType::SET) { $this->nosuch = array(); - $_size420 = 0; - $_etype423 = 0; - $xfer += $input->readSetBegin($_etype423, $_size420); - for ($_i424 = 0; $_i424 < $_size420; ++$_i424) + $_size448 = 0; + $_etype451 = 0; + $xfer += $input->readSetBegin($_etype451, $_size448); + for ($_i452 = 0; $_i452 < $_size448; ++$_i452) { - $elem425 = null; - $xfer += $input->readI64($elem425); - if (is_scalar($elem425)) { - $this->nosuch[$elem425] = true; + $elem453 = null; + $xfer += $input->readI64($elem453); + if (is_scalar($elem453)) { + $this->nosuch[$elem453] = true; } else { - $this->nosuch []= $elem425; + $this->nosuch []= $elem453; } } $xfer += $input->readSetEnd(); @@ -12737,12 +13534,12 @@ class HeartbeatTxnRangeResponse { { $output->writeSetBegin(TType::I64, count($this->aborted)); { - foreach ($this->aborted as $iter426 => $iter427) + foreach ($this->aborted as $iter454 => $iter455) { - if (is_scalar($iter427)) { - $xfer += $output->writeI64($iter426); + if (is_scalar($iter455)) { + $xfer += $output->writeI64($iter454); } else { - $xfer += $output->writeI64($iter427); + $xfer += $output->writeI64($iter455); } } } @@ -12758,12 +13555,12 @@ class HeartbeatTxnRangeResponse { { $output->writeSetBegin(TType::I64, count($this->nosuch)); { - foreach ($this->nosuch as $iter428 => $iter429) + foreach ($this->nosuch as $iter456 => $iter457) { - if (is_scalar($iter429)) { - $xfer += $output->writeI64($iter428); + if (is_scalar($iter457)) { + $xfer += $output->writeI64($iter456); } else { - $xfer += $output->writeI64($iter429); + $xfer += $output->writeI64($iter457); } } } @@ -13374,15 +14171,15 @@ class ShowCompactResponse { case 1: if ($ftype == TType::LST) { $this->compacts = array(); - $_size430 = 0; - $_etype433 = 0; - $xfer += $input->readListBegin($_etype433, $_size430); - for ($_i434 = 0; $_i434 < $_size430; ++$_i434) + $_size458 = 0; + $_etype461 = 0; + $xfer += $input->readListBegin($_etype461, $_size458); + for ($_i462 = 0; $_i462 < $_size458; ++$_i462) { - $elem435 = null; - $elem435 = new \metastore\ShowCompactResponseElement(); - $xfer += $elem435->read($input); - $this->compacts []= $elem435; + $elem463 = null; + $elem463 = new \metastore\ShowCompactResponseElement(); + $xfer += $elem463->read($input); + $this->compacts []= $elem463; } $xfer += $input->readListEnd(); } else { @@ -13410,9 +14207,9 @@ class ShowCompactResponse { { $output->writeListBegin(TType::STRUCT, count($this->compacts)); { - foreach ($this->compacts as $iter436) + foreach ($this->compacts as $iter464) { - $xfer += $iter436->write($output); + $xfer += $iter464->write($output); } } $output->writeListEnd(); @@ -13530,14 +14327,14 @@ class AddDynamicPartitions { case 4: if ($ftype == TType::LST) { $this->partitionnames = array(); - $_size437 = 0; - $_etype440 = 0; - $xfer += $input->readListBegin($_etype440, $_size437); - for ($_i441 = 0; $_i441 < $_size437; ++$_i441) + $_size465 = 0; + $_etype468 = 0; + $xfer += $input->readListBegin($_etype468, $_size465); + for ($_i469 = 0; $_i469 < $_size465; ++$_i469) { - $elem442 = null; - $xfer += $input->readString($elem442); - $this->partitionnames []= $elem442; + $elem470 = null; + $xfer += $input->readString($elem470); + $this->partitionnames []= $elem470; } $xfer += $input->readListEnd(); } else { @@ -13580,9 +14377,9 @@ class AddDynamicPartitions { { $output->writeListBegin(TType::STRING, count($this->partitionnames)); { - foreach ($this->partitionnames as $iter443) + foreach ($this->partitionnames as $iter471) { - $xfer += $output->writeString($iter443); + $xfer += $output->writeString($iter471); } } $output->writeListEnd(); @@ -13935,15 +14732,15 @@ class NotificationEventResponse { case 1: if ($ftype == TType::LST) { $this->events = array(); - $_size444 = 0; - $_etype447 = 0; - $xfer += $input->readListBegin($_etype447, $_size444); - for ($_i448 = 0; $_i448 < $_size444; ++$_i448) + $_size472 = 0; + $_etype475 = 0; + $xfer += $input->readListBegin($_etype475, $_size472); + for ($_i476 = 0; $_i476 < $_size472; ++$_i476) { - $elem449 = null; - $elem449 = new \metastore\NotificationEvent(); - $xfer += $elem449->read($input); - $this->events []= $elem449; + $elem477 = null; + $elem477 = new \metastore\NotificationEvent(); + $xfer += $elem477->read($input); + $this->events []= $elem477; } $xfer += $input->readListEnd(); } else { @@ -13971,9 +14768,9 @@ class NotificationEventResponse { { $output->writeListBegin(TType::STRUCT, count($this->events)); { - foreach ($this->events as $iter450) + foreach ($this->events as $iter478) { - $xfer += $iter450->write($output); + $xfer += $iter478->write($output); } } $output->writeListEnd(); @@ -14112,14 +14909,14 @@ class InsertEventRequestData { case 1: if ($ftype == TType::LST) { $this->filesAdded = array(); - $_size451 = 0; - $_etype454 = 0; - $xfer += $input->readListBegin($_etype454, $_size451); - for ($_i455 = 0; $_i455 < $_size451; ++$_i455) + $_size479 = 0; + $_etype482 = 0; + $xfer += $input->readListBegin($_etype482, $_size479); + for ($_i483 = 0; $_i483 < $_size479; ++$_i483) { - $elem456 = null; - $xfer += $input->readString($elem456); - $this->filesAdded []= $elem456; + $elem484 = null; + $xfer += $input->readString($elem484); + $this->filesAdded []= $elem484; } $xfer += $input->readListEnd(); } else { @@ -14147,9 +14944,9 @@ class InsertEventRequestData { { $output->writeListBegin(TType::STRING, count($this->filesAdded)); { - foreach ($this->filesAdded as $iter457) + foreach ($this->filesAdded as $iter485) { - $xfer += $output->writeString($iter457); + $xfer += $output->writeString($iter485); } } $output->writeListEnd(); @@ -14367,14 +15164,14 @@ class FireEventRequest { case 5: if ($ftype == TType::LST) { $this->partitionVals = array(); - $_size458 = 0; - $_etype461 = 0; - $xfer += $input->readListBegin($_etype461, $_size458); - for ($_i462 = 0; $_i462 < $_size458; ++$_i462) + $_size486 = 0; + $_etype489 = 0; + $xfer += $input->readListBegin($_etype489, $_size486); + for ($_i490 = 0; $_i490 < $_size486; ++$_i490) { - $elem463 = null; - $xfer += $input->readString($elem463); - $this->partitionVals []= $elem463; + $elem491 = null; + $xfer += $input->readString($elem491); + $this->partitionVals []= $elem491; } $xfer += $input->readListEnd(); } else { @@ -14425,9 +15222,9 @@ class FireEventRequest { { $output->writeListBegin(TType::STRING, count($this->partitionVals)); { - foreach ($this->partitionVals as $iter464) + foreach ($this->partitionVals as $iter492) { - $xfer += $output->writeString($iter464); + $xfer += $output->writeString($iter492); } } $output->writeListEnd(); @@ -14805,18 +15602,18 @@ class GetFileMetadataByExprResult { case 1: if ($ftype == TType::MAP) { $this->metadata = array(); - $_size465 = 0; - $_ktype466 = 0; - $_vtype467 = 0; - $xfer += $input->readMapBegin($_ktype466, $_vtype467, $_size465); - for ($_i469 = 0; $_i469 < $_size465; ++$_i469) + $_size493 = 0; + $_ktype494 = 0; + $_vtype495 = 0; + $xfer += $input->readMapBegin($_ktype494, $_vtype495, $_size493); + for ($_i497 = 0; $_i497 < $_size493; ++$_i497) { - $key470 = 0; - $val471 = new \metastore\MetadataPpdResult(); - $xfer += $input->readI64($key470); - $val471 = new \metastore\MetadataPpdResult(); - $xfer += $val471->read($input); - $this->metadata[$key470] = $val471; + $key498 = 0; + $val499 = new \metastore\MetadataPpdResult(); + $xfer += $input->readI64($key498); + $val499 = new \metastore\MetadataPpdResult(); + $xfer += $val499->read($input); + $this->metadata[$key498] = $val499; } $xfer += $input->readMapEnd(); } else { @@ -14851,10 +15648,10 @@ class GetFileMetadataByExprResult { { $output->writeMapBegin(TType::I64, TType::STRUCT, count($this->metadata)); { - foreach ($this->metadata as $kiter472 => $viter473) + foreach ($this->metadata as $kiter500 => $viter501) { - $xfer += $output->writeI64($kiter472); - $xfer += $viter473->write($output); + $xfer += $output->writeI64($kiter500); + $xfer += $viter501->write($output); } } $output->writeMapEnd(); @@ -14956,14 +15753,14 @@ class GetFileMetadataByExprRequest { case 1: if ($ftype == TType::LST) { $this->fileIds = array(); - $_size474 = 0; - $_etype477 = 0; - $xfer += $input->readListBegin($_etype477, $_size474); - for ($_i478 = 0; $_i478 < $_size474; ++$_i478) + $_size502 = 0; + $_etype505 = 0; + $xfer += $input->readListBegin($_etype505, $_size502); + for ($_i506 = 0; $_i506 < $_size502; ++$_i506) { - $elem479 = null; - $xfer += $input->readI64($elem479); - $this->fileIds []= $elem479; + $elem507 = null; + $xfer += $input->readI64($elem507); + $this->fileIds []= $elem507; } $xfer += $input->readListEnd(); } else { @@ -15012,9 +15809,9 @@ class GetFileMetadataByExprRequest { { $output->writeListBegin(TType::I64, count($this->fileIds)); { - foreach ($this->fileIds as $iter480) + foreach ($this->fileIds as $iter508) { - $xfer += $output->writeI64($iter480); + $xfer += $output->writeI64($iter508); } } $output->writeListEnd(); @@ -15108,17 +15905,17 @@ class GetFileMetadataResult { case 1: if ($ftype == TType::MAP) { $this->metadata = array(); - $_size481 = 0; - $_ktype482 = 0; - $_vtype483 = 0; - $xfer += $input->readMapBegin($_ktype482, $_vtype483, $_size481); - for ($_i485 = 0; $_i485 < $_size481; ++$_i485) + $_size509 = 0; + $_ktype510 = 0; + $_vtype511 = 0; + $xfer += $input->readMapBegin($_ktype510, $_vtype511, $_size509); + for ($_i513 = 0; $_i513 < $_size509; ++$_i513) { - $key486 = 0; - $val487 = ''; - $xfer += $input->readI64($key486); - $xfer += $input->readString($val487); - $this->metadata[$key486] = $val487; + $key514 = 0; + $val515 = ''; + $xfer += $input->readI64($key514); + $xfer += $input->readString($val515); + $this->metadata[$key514] = $val515; } $xfer += $input->readMapEnd(); } else { @@ -15153,10 +15950,10 @@ class GetFileMetadataResult { { $output->writeMapBegin(TType::I64, TType::STRING, count($this->metadata)); { - foreach ($this->metadata as $kiter488 => $viter489) + foreach ($this->metadata as $kiter516 => $viter517) { - $xfer += $output->writeI64($kiter488); - $xfer += $output->writeString($viter489); + $xfer += $output->writeI64($kiter516); + $xfer += $output->writeString($viter517); } } $output->writeMapEnd(); @@ -15225,14 +16022,14 @@ class GetFileMetadataRequest { case 1: if ($ftype == TType::LST) { $this->fileIds = array(); - $_size490 = 0; - $_etype493 = 0; - $xfer += $input->readListBegin($_etype493, $_size490); - for ($_i494 = 0; $_i494 < $_size490; ++$_i494) + $_size518 = 0; + $_etype521 = 0; + $xfer += $input->readListBegin($_etype521, $_size518); + for ($_i522 = 0; $_i522 < $_size518; ++$_i522) { - $elem495 = null; - $xfer += $input->readI64($elem495); - $this->fileIds []= $elem495; + $elem523 = null; + $xfer += $input->readI64($elem523); + $this->fileIds []= $elem523; } $xfer += $input->readListEnd(); } else { @@ -15260,9 +16057,9 @@ class GetFileMetadataRequest { { $output->writeListBegin(TType::I64, count($this->fileIds)); { - foreach ($this->fileIds as $iter496) + foreach ($this->fileIds as $iter524) { - $xfer += $output->writeI64($iter496); + $xfer += $output->writeI64($iter524); } } $output->writeListEnd(); @@ -15402,14 +16199,14 @@ class PutFileMetadataRequest { case 1: if ($ftype == TType::LST) { $this->fileIds = array(); - $_size497 = 0; - $_etype500 = 0; - $xfer += $input->readListBegin($_etype500, $_size497); - for ($_i501 = 0; $_i501 < $_size497; ++$_i501) + $_size525 = 0; + $_etype528 = 0; + $xfer += $input->readListBegin($_etype528, $_size525); + for ($_i529 = 0; $_i529 < $_size525; ++$_i529) { - $elem502 = null; - $xfer += $input->readI64($elem502); - $this->fileIds []= $elem502; + $elem530 = null; + $xfer += $input->readI64($elem530); + $this->fileIds []= $elem530; } $xfer += $input->readListEnd(); } else { @@ -15419,14 +16216,14 @@ class PutFileMetadataRequest { case 2: if ($ftype == TType::LST) { $this->metadata = array(); - $_size503 = 0; - $_etype506 = 0; - $xfer += $input->readListBegin($_etype506, $_size503); - for ($_i507 = 0; $_i507 < $_size503; ++$_i507) + $_size531 = 0; + $_etype534 = 0; + $xfer += $input->readListBegin($_etype534, $_size531); + for ($_i535 = 0; $_i535 < $_size531; ++$_i535) { - $elem508 = null; - $xfer += $input->readString($elem508); - $this->metadata []= $elem508; + $elem536 = null; + $xfer += $input->readString($elem536); + $this->metadata []= $elem536; } $xfer += $input->readListEnd(); } else { @@ -15461,9 +16258,9 @@ class PutFileMetadataRequest { { $output->writeListBegin(TType::I64, count($this->fileIds)); { - foreach ($this->fileIds as $iter509) + foreach ($this->fileIds as $iter537) { - $xfer += $output->writeI64($iter509); + $xfer += $output->writeI64($iter537); } } $output->writeListEnd(); @@ -15478,9 +16275,9 @@ class PutFileMetadataRequest { { $output->writeListBegin(TType::STRING, count($this->metadata)); { - foreach ($this->metadata as $iter510) + foreach ($this->metadata as $iter538) { - $xfer += $output->writeString($iter510); + $xfer += $output->writeString($iter538); } } $output->writeListEnd(); @@ -15599,14 +16396,14 @@ class ClearFileMetadataRequest { case 1: if ($ftype == TType::LST) { $this->fileIds = array(); - $_size511 = 0; - $_etype514 = 0; - $xfer += $input->readListBegin($_etype514, $_size511); - for ($_i515 = 0; $_i515 < $_size511; ++$_i515) + $_size539 = 0; + $_etype542 = 0; + $xfer += $input->readListBegin($_etype542, $_size539); + for ($_i543 = 0; $_i543 < $_size539; ++$_i543) { - $elem516 = null; - $xfer += $input->readI64($elem516); - $this->fileIds []= $elem516; + $elem544 = null; + $xfer += $input->readI64($elem544); + $this->fileIds []= $elem544; } $xfer += $input->readListEnd(); } else { @@ -15634,9 +16431,9 @@ class ClearFileMetadataRequest { { $output->writeListBegin(TType::I64, count($this->fileIds)); { - foreach ($this->fileIds as $iter517) + foreach ($this->fileIds as $iter545) { - $xfer += $output->writeI64($iter517); + $xfer += $output->writeI64($iter545); } } $output->writeListEnd(); @@ -15920,15 +16717,15 @@ class GetAllFunctionsResponse { case 1: if ($ftype == TType::LST) { $this->functions = array(); - $_size518 = 0; - $_etype521 = 0; - $xfer += $input->readListBegin($_etype521, $_size518); - for ($_i522 = 0; $_i522 < $_size518; ++$_i522) + $_size546 = 0; + $_etype549 = 0; + $xfer += $input->readListBegin($_etype549, $_size546); + for ($_i550 = 0; $_i550 < $_size546; ++$_i550) { - $elem523 = null; - $elem523 = new \metastore\Function(); - $xfer += $elem523->read($input); - $this->functions []= $elem523; + $elem551 = null; + $elem551 = new \metastore\Function(); + $xfer += $elem551->read($input); + $this->functions []= $elem551; } $xfer += $input->readListEnd(); } else { @@ -15956,9 +16753,9 @@ class GetAllFunctionsResponse { { $output->writeListBegin(TType::STRUCT, count($this->functions)); { - foreach ($this->functions as $iter524) + foreach ($this->functions as $iter552) { - $xfer += $iter524->write($output); + $xfer += $iter552->write($output); } } $output->writeListEnd(); diff --git a/metastore/src/gen/thrift/gen-py/hive_metastore/ThriftHiveMetastore-remote b/metastore/src/gen/thrift/gen-py/hive_metastore/ThriftHiveMetastore-remote index 516b926..0ae70ab 100755 --- a/metastore/src/gen/thrift/gen-py/hive_metastore/ThriftHiveMetastore-remote +++ b/metastore/src/gen/thrift/gen-py/hive_metastore/ThriftHiveMetastore-remote @@ -101,6 +101,8 @@ if len(sys.argv) <= 1 or sys.argv[1] == '--help': print(' Index get_index_by_name(string db_name, string tbl_name, string index_name)') print(' get_indexes(string db_name, string tbl_name, i16 max_indexes)') print(' get_index_names(string db_name, string tbl_name, i16 max_indexes)') + print(' get_primary_keys(string db_name, string tbl_name)') + print(' get_foreign_keys(string parent_db_name, string parent_tbl_name, string foreign_db_name, string foreign_tbl_name)') print(' bool update_table_column_statistics(ColumnStatistics stats_obj)') print(' bool update_partition_column_statistics(ColumnStatistics stats_obj)') print(' ColumnStatistics get_table_column_statistics(string db_name, string tbl_name, string col_name)') @@ -698,6 +700,18 @@ elif cmd == 'get_index_names': sys.exit(1) pp.pprint(client.get_index_names(args[0],args[1],eval(args[2]),)) +elif cmd == 'get_primary_keys': + if len(args) != 2: + print('get_primary_keys requires 2 args') + sys.exit(1) + pp.pprint(client.get_primary_keys(args[0],args[1],)) + +elif cmd == 'get_foreign_keys': + if len(args) != 4: + print('get_foreign_keys requires 4 args') + sys.exit(1) + pp.pprint(client.get_foreign_keys(args[0],args[1],args[2],args[3],)) + elif cmd == 'update_table_column_statistics': if len(args) != 1: print('update_table_column_statistics requires 1 args') diff --git a/metastore/src/gen/thrift/gen-py/hive_metastore/ThriftHiveMetastore.py b/metastore/src/gen/thrift/gen-py/hive_metastore/ThriftHiveMetastore.py index ac8d8a4..2e170a2 100644 --- a/metastore/src/gen/thrift/gen-py/hive_metastore/ThriftHiveMetastore.py +++ b/metastore/src/gen/thrift/gen-py/hive_metastore/ThriftHiveMetastore.py @@ -695,6 +695,24 @@ def get_index_names(self, db_name, tbl_name, max_indexes): """ pass + def get_primary_keys(self, db_name, tbl_name): + """ + Parameters: + - db_name + - tbl_name + """ + pass + + def get_foreign_keys(self, parent_db_name, parent_tbl_name, foreign_db_name, foreign_tbl_name): + """ + Parameters: + - parent_db_name + - parent_tbl_name + - foreign_db_name + - foreign_tbl_name + """ + pass + def update_table_column_statistics(self, stats_obj): """ Parameters: @@ -4134,6 +4152,84 @@ def recv_get_index_names(self): raise result.o2 raise TApplicationException(TApplicationException.MISSING_RESULT, "get_index_names failed: unknown result") + def get_primary_keys(self, db_name, tbl_name): + """ + Parameters: + - db_name + - tbl_name + """ + self.send_get_primary_keys(db_name, tbl_name) + return self.recv_get_primary_keys() + + def send_get_primary_keys(self, db_name, tbl_name): + self._oprot.writeMessageBegin('get_primary_keys', TMessageType.CALL, self._seqid) + args = get_primary_keys_args() + args.db_name = db_name + args.tbl_name = tbl_name + args.write(self._oprot) + self._oprot.writeMessageEnd() + self._oprot.trans.flush() + + def recv_get_primary_keys(self): + iprot = self._iprot + (fname, mtype, rseqid) = iprot.readMessageBegin() + if mtype == TMessageType.EXCEPTION: + x = TApplicationException() + x.read(iprot) + iprot.readMessageEnd() + raise x + result = get_primary_keys_result() + result.read(iprot) + iprot.readMessageEnd() + if result.success is not None: + return result.success + if result.o1 is not None: + raise result.o1 + if result.o2 is not None: + raise result.o2 + raise TApplicationException(TApplicationException.MISSING_RESULT, "get_primary_keys failed: unknown result") + + def get_foreign_keys(self, parent_db_name, parent_tbl_name, foreign_db_name, foreign_tbl_name): + """ + Parameters: + - parent_db_name + - parent_tbl_name + - foreign_db_name + - foreign_tbl_name + """ + self.send_get_foreign_keys(parent_db_name, parent_tbl_name, foreign_db_name, foreign_tbl_name) + return self.recv_get_foreign_keys() + + def send_get_foreign_keys(self, parent_db_name, parent_tbl_name, foreign_db_name, foreign_tbl_name): + self._oprot.writeMessageBegin('get_foreign_keys', TMessageType.CALL, self._seqid) + args = get_foreign_keys_args() + args.parent_db_name = parent_db_name + args.parent_tbl_name = parent_tbl_name + args.foreign_db_name = foreign_db_name + args.foreign_tbl_name = foreign_tbl_name + args.write(self._oprot) + self._oprot.writeMessageEnd() + self._oprot.trans.flush() + + def recv_get_foreign_keys(self): + iprot = self._iprot + (fname, mtype, rseqid) = iprot.readMessageBegin() + if mtype == TMessageType.EXCEPTION: + x = TApplicationException() + x.read(iprot) + iprot.readMessageEnd() + raise x + result = get_foreign_keys_result() + result.read(iprot) + iprot.readMessageEnd() + if result.success is not None: + return result.success + if result.o1 is not None: + raise result.o1 + if result.o2 is not None: + raise result.o2 + raise TApplicationException(TApplicationException.MISSING_RESULT, "get_foreign_keys failed: unknown result") + def update_table_column_statistics(self, stats_obj): """ Parameters: @@ -6423,6 +6519,8 @@ def __init__(self, handler): self._processMap["get_index_by_name"] = Processor.process_get_index_by_name self._processMap["get_indexes"] = Processor.process_get_indexes self._processMap["get_index_names"] = Processor.process_get_index_names + self._processMap["get_primary_keys"] = Processor.process_get_primary_keys + self._processMap["get_foreign_keys"] = Processor.process_get_foreign_keys self._processMap["update_table_column_statistics"] = Processor.process_update_table_column_statistics self._processMap["update_partition_column_statistics"] = Processor.process_update_partition_column_statistics self._processMap["get_table_column_statistics"] = Processor.process_get_table_column_statistics @@ -8493,6 +8591,56 @@ def process_get_index_names(self, seqid, iprot, oprot): oprot.writeMessageEnd() oprot.trans.flush() + def process_get_primary_keys(self, seqid, iprot, oprot): + args = get_primary_keys_args() + args.read(iprot) + iprot.readMessageEnd() + result = get_primary_keys_result() + try: + result.success = self._handler.get_primary_keys(args.db_name, args.tbl_name) + msg_type = TMessageType.REPLY + except (TTransport.TTransportException, KeyboardInterrupt, SystemExit): + raise + except MetaException as o1: + msg_type = TMessageType.REPLY + result.o1 = o1 + except NoSuchObjectException as o2: + msg_type = TMessageType.REPLY + result.o2 = o2 + except Exception as ex: + msg_type = TMessageType.EXCEPTION + logging.exception(ex) + result = TApplicationException(TApplicationException.INTERNAL_ERROR, 'Internal error') + oprot.writeMessageBegin("get_primary_keys", msg_type, seqid) + result.write(oprot) + oprot.writeMessageEnd() + oprot.trans.flush() + + def process_get_foreign_keys(self, seqid, iprot, oprot): + args = get_foreign_keys_args() + args.read(iprot) + iprot.readMessageEnd() + result = get_foreign_keys_result() + try: + result.success = self._handler.get_foreign_keys(args.parent_db_name, args.parent_tbl_name, args.foreign_db_name, args.foreign_tbl_name) + msg_type = TMessageType.REPLY + except (TTransport.TTransportException, KeyboardInterrupt, SystemExit): + raise + except MetaException as o1: + msg_type = TMessageType.REPLY + result.o1 = o1 + except NoSuchObjectException as o2: + msg_type = TMessageType.REPLY + result.o2 = o2 + except Exception as ex: + msg_type = TMessageType.EXCEPTION + logging.exception(ex) + result = TApplicationException(TApplicationException.INTERNAL_ERROR, 'Internal error') + oprot.writeMessageBegin("get_foreign_keys", msg_type, seqid) + result.write(oprot) + oprot.writeMessageEnd() + oprot.trans.flush() + def process_update_table_column_statistics(self, seqid, iprot, oprot): args = update_table_column_statistics_args() args.read(iprot) @@ -10879,10 +11027,10 @@ def read(self, iprot): if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype525, _size522) = iprot.readListBegin() - for _i526 in xrange(_size522): - _elem527 = iprot.readString() - self.success.append(_elem527) + (_etype553, _size550) = iprot.readListBegin() + for _i554 in xrange(_size550): + _elem555 = iprot.readString() + self.success.append(_elem555) iprot.readListEnd() else: iprot.skip(ftype) @@ -10905,8 +11053,8 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRING, len(self.success)) - for iter528 in self.success: - oprot.writeString(iter528) + for iter556 in self.success: + oprot.writeString(iter556) oprot.writeListEnd() oprot.writeFieldEnd() if self.o1 is not None: @@ -11011,10 +11159,10 @@ def read(self, iprot): if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype532, _size529) = iprot.readListBegin() - for _i533 in xrange(_size529): - _elem534 = iprot.readString() - self.success.append(_elem534) + (_etype560, _size557) = iprot.readListBegin() + for _i561 in xrange(_size557): + _elem562 = iprot.readString() + self.success.append(_elem562) iprot.readListEnd() else: iprot.skip(ftype) @@ -11037,8 +11185,8 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRING, len(self.success)) - for iter535 in self.success: - oprot.writeString(iter535) + for iter563 in self.success: + oprot.writeString(iter563) oprot.writeListEnd() oprot.writeFieldEnd() if self.o1 is not None: @@ -11808,12 +11956,12 @@ def read(self, iprot): if fid == 0: if ftype == TType.MAP: self.success = {} - (_ktype537, _vtype538, _size536 ) = iprot.readMapBegin() - for _i540 in xrange(_size536): - _key541 = iprot.readString() - _val542 = Type() - _val542.read(iprot) - self.success[_key541] = _val542 + (_ktype565, _vtype566, _size564 ) = iprot.readMapBegin() + for _i568 in xrange(_size564): + _key569 = iprot.readString() + _val570 = Type() + _val570.read(iprot) + self.success[_key569] = _val570 iprot.readMapEnd() else: iprot.skip(ftype) @@ -11836,9 +11984,9 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.MAP, 0) oprot.writeMapBegin(TType.STRING, TType.STRUCT, len(self.success)) - for kiter543,viter544 in self.success.items(): - oprot.writeString(kiter543) - viter544.write(oprot) + for kiter571,viter572 in self.success.items(): + oprot.writeString(kiter571) + viter572.write(oprot) oprot.writeMapEnd() oprot.writeFieldEnd() if self.o2 is not None: @@ -11981,11 +12129,11 @@ def read(self, iprot): if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype548, _size545) = iprot.readListBegin() - for _i549 in xrange(_size545): - _elem550 = FieldSchema() - _elem550.read(iprot) - self.success.append(_elem550) + (_etype576, _size573) = iprot.readListBegin() + for _i577 in xrange(_size573): + _elem578 = FieldSchema() + _elem578.read(iprot) + self.success.append(_elem578) iprot.readListEnd() else: iprot.skip(ftype) @@ -12020,8 +12168,8 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRUCT, len(self.success)) - for iter551 in self.success: - iter551.write(oprot) + for iter579 in self.success: + iter579.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() if self.o1 is not None: @@ -12188,11 +12336,11 @@ def read(self, iprot): if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype555, _size552) = iprot.readListBegin() - for _i556 in xrange(_size552): - _elem557 = FieldSchema() - _elem557.read(iprot) - self.success.append(_elem557) + (_etype583, _size580) = iprot.readListBegin() + for _i584 in xrange(_size580): + _elem585 = FieldSchema() + _elem585.read(iprot) + self.success.append(_elem585) iprot.readListEnd() else: iprot.skip(ftype) @@ -12227,8 +12375,8 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRUCT, len(self.success)) - for iter558 in self.success: - iter558.write(oprot) + for iter586 in self.success: + iter586.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() if self.o1 is not None: @@ -12381,11 +12529,11 @@ def read(self, iprot): if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype562, _size559) = iprot.readListBegin() - for _i563 in xrange(_size559): - _elem564 = FieldSchema() - _elem564.read(iprot) - self.success.append(_elem564) + (_etype590, _size587) = iprot.readListBegin() + for _i591 in xrange(_size587): + _elem592 = FieldSchema() + _elem592.read(iprot) + self.success.append(_elem592) iprot.readListEnd() else: iprot.skip(ftype) @@ -12420,8 +12568,8 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRUCT, len(self.success)) - for iter565 in self.success: - iter565.write(oprot) + for iter593 in self.success: + iter593.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() if self.o1 is not None: @@ -12588,11 +12736,11 @@ def read(self, iprot): if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype569, _size566) = iprot.readListBegin() - for _i570 in xrange(_size566): - _elem571 = FieldSchema() - _elem571.read(iprot) - self.success.append(_elem571) + (_etype597, _size594) = iprot.readListBegin() + for _i598 in xrange(_size594): + _elem599 = FieldSchema() + _elem599.read(iprot) + self.success.append(_elem599) iprot.readListEnd() else: iprot.skip(ftype) @@ -12627,8 +12775,8 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRUCT, len(self.success)) - for iter572 in self.success: - iter572.write(oprot) + for iter600 in self.success: + iter600.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() if self.o1 is not None: @@ -13493,10 +13641,10 @@ def read(self, iprot): if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype576, _size573) = iprot.readListBegin() - for _i577 in xrange(_size573): - _elem578 = iprot.readString() - self.success.append(_elem578) + (_etype604, _size601) = iprot.readListBegin() + for _i605 in xrange(_size601): + _elem606 = iprot.readString() + self.success.append(_elem606) iprot.readListEnd() else: iprot.skip(ftype) @@ -13519,8 +13667,8 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRING, len(self.success)) - for iter579 in self.success: - oprot.writeString(iter579) + for iter607 in self.success: + oprot.writeString(iter607) oprot.writeListEnd() oprot.writeFieldEnd() if self.o1 is not None: @@ -13593,10 +13741,10 @@ def read(self, iprot): elif fid == 3: if ftype == TType.LIST: self.tbl_types = [] - (_etype583, _size580) = iprot.readListBegin() - for _i584 in xrange(_size580): - _elem585 = iprot.readString() - self.tbl_types.append(_elem585) + (_etype611, _size608) = iprot.readListBegin() + for _i612 in xrange(_size608): + _elem613 = iprot.readString() + self.tbl_types.append(_elem613) iprot.readListEnd() else: iprot.skip(ftype) @@ -13621,8 +13769,8 @@ def write(self, oprot): if self.tbl_types is not None: oprot.writeFieldBegin('tbl_types', TType.LIST, 3) oprot.writeListBegin(TType.STRING, len(self.tbl_types)) - for iter586 in self.tbl_types: - oprot.writeString(iter586) + for iter614 in self.tbl_types: + oprot.writeString(iter614) oprot.writeListEnd() oprot.writeFieldEnd() oprot.writeFieldStop() @@ -13678,11 +13826,11 @@ def read(self, iprot): if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype590, _size587) = iprot.readListBegin() - for _i591 in xrange(_size587): - _elem592 = TableMeta() - _elem592.read(iprot) - self.success.append(_elem592) + (_etype618, _size615) = iprot.readListBegin() + for _i619 in xrange(_size615): + _elem620 = TableMeta() + _elem620.read(iprot) + self.success.append(_elem620) iprot.readListEnd() else: iprot.skip(ftype) @@ -13705,8 +13853,8 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRUCT, len(self.success)) - for iter593 in self.success: - iter593.write(oprot) + for iter621 in self.success: + iter621.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() if self.o1 is not None: @@ -13830,10 +13978,10 @@ def read(self, iprot): if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype597, _size594) = iprot.readListBegin() - for _i598 in xrange(_size594): - _elem599 = iprot.readString() - self.success.append(_elem599) + (_etype625, _size622) = iprot.readListBegin() + for _i626 in xrange(_size622): + _elem627 = iprot.readString() + self.success.append(_elem627) iprot.readListEnd() else: iprot.skip(ftype) @@ -13856,8 +14004,8 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRING, len(self.success)) - for iter600 in self.success: - oprot.writeString(iter600) + for iter628 in self.success: + oprot.writeString(iter628) oprot.writeListEnd() oprot.writeFieldEnd() if self.o1 is not None: @@ -14093,10 +14241,10 @@ def read(self, iprot): elif fid == 2: if ftype == TType.LIST: self.tbl_names = [] - (_etype604, _size601) = iprot.readListBegin() - for _i605 in xrange(_size601): - _elem606 = iprot.readString() - self.tbl_names.append(_elem606) + (_etype632, _size629) = iprot.readListBegin() + for _i633 in xrange(_size629): + _elem634 = iprot.readString() + self.tbl_names.append(_elem634) iprot.readListEnd() else: iprot.skip(ftype) @@ -14117,8 +14265,8 @@ def write(self, oprot): if self.tbl_names is not None: oprot.writeFieldBegin('tbl_names', TType.LIST, 2) oprot.writeListBegin(TType.STRING, len(self.tbl_names)) - for iter607 in self.tbl_names: - oprot.writeString(iter607) + for iter635 in self.tbl_names: + oprot.writeString(iter635) oprot.writeListEnd() oprot.writeFieldEnd() oprot.writeFieldStop() @@ -14179,11 +14327,11 @@ def read(self, iprot): if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype611, _size608) = iprot.readListBegin() - for _i612 in xrange(_size608): - _elem613 = Table() - _elem613.read(iprot) - self.success.append(_elem613) + (_etype639, _size636) = iprot.readListBegin() + for _i640 in xrange(_size636): + _elem641 = Table() + _elem641.read(iprot) + self.success.append(_elem641) iprot.readListEnd() else: iprot.skip(ftype) @@ -14218,8 +14366,8 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRUCT, len(self.success)) - for iter614 in self.success: - iter614.write(oprot) + for iter642 in self.success: + iter642.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() if self.o1 is not None: @@ -14385,10 +14533,10 @@ def read(self, iprot): if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype618, _size615) = iprot.readListBegin() - for _i619 in xrange(_size615): - _elem620 = iprot.readString() - self.success.append(_elem620) + (_etype646, _size643) = iprot.readListBegin() + for _i647 in xrange(_size643): + _elem648 = iprot.readString() + self.success.append(_elem648) iprot.readListEnd() else: iprot.skip(ftype) @@ -14423,8 +14571,8 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRING, len(self.success)) - for iter621 in self.success: - oprot.writeString(iter621) + for iter649 in self.success: + oprot.writeString(iter649) oprot.writeListEnd() oprot.writeFieldEnd() if self.o1 is not None: @@ -15394,11 +15542,11 @@ def read(self, iprot): if fid == 1: if ftype == TType.LIST: self.new_parts = [] - (_etype625, _size622) = iprot.readListBegin() - for _i626 in xrange(_size622): - _elem627 = Partition() - _elem627.read(iprot) - self.new_parts.append(_elem627) + (_etype653, _size650) = iprot.readListBegin() + for _i654 in xrange(_size650): + _elem655 = Partition() + _elem655.read(iprot) + self.new_parts.append(_elem655) iprot.readListEnd() else: iprot.skip(ftype) @@ -15415,8 +15563,8 @@ def write(self, oprot): if self.new_parts is not None: oprot.writeFieldBegin('new_parts', TType.LIST, 1) oprot.writeListBegin(TType.STRUCT, len(self.new_parts)) - for iter628 in self.new_parts: - iter628.write(oprot) + for iter656 in self.new_parts: + iter656.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() oprot.writeFieldStop() @@ -15574,11 +15722,11 @@ def read(self, iprot): if fid == 1: if ftype == TType.LIST: self.new_parts = [] - (_etype632, _size629) = iprot.readListBegin() - for _i633 in xrange(_size629): - _elem634 = PartitionSpec() - _elem634.read(iprot) - self.new_parts.append(_elem634) + (_etype660, _size657) = iprot.readListBegin() + for _i661 in xrange(_size657): + _elem662 = PartitionSpec() + _elem662.read(iprot) + self.new_parts.append(_elem662) iprot.readListEnd() else: iprot.skip(ftype) @@ -15595,8 +15743,8 @@ def write(self, oprot): if self.new_parts is not None: oprot.writeFieldBegin('new_parts', TType.LIST, 1) oprot.writeListBegin(TType.STRUCT, len(self.new_parts)) - for iter635 in self.new_parts: - iter635.write(oprot) + for iter663 in self.new_parts: + iter663.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() oprot.writeFieldStop() @@ -15770,10 +15918,10 @@ def read(self, iprot): elif fid == 3: if ftype == TType.LIST: self.part_vals = [] - (_etype639, _size636) = iprot.readListBegin() - for _i640 in xrange(_size636): - _elem641 = iprot.readString() - self.part_vals.append(_elem641) + (_etype667, _size664) = iprot.readListBegin() + for _i668 in xrange(_size664): + _elem669 = iprot.readString() + self.part_vals.append(_elem669) iprot.readListEnd() else: iprot.skip(ftype) @@ -15798,8 +15946,8 @@ def write(self, oprot): if self.part_vals is not None: oprot.writeFieldBegin('part_vals', TType.LIST, 3) oprot.writeListBegin(TType.STRING, len(self.part_vals)) - for iter642 in self.part_vals: - oprot.writeString(iter642) + for iter670 in self.part_vals: + oprot.writeString(iter670) oprot.writeListEnd() oprot.writeFieldEnd() oprot.writeFieldStop() @@ -16152,10 +16300,10 @@ def read(self, iprot): elif fid == 3: if ftype == TType.LIST: self.part_vals = [] - (_etype646, _size643) = iprot.readListBegin() - for _i647 in xrange(_size643): - _elem648 = iprot.readString() - self.part_vals.append(_elem648) + (_etype674, _size671) = iprot.readListBegin() + for _i675 in xrange(_size671): + _elem676 = iprot.readString() + self.part_vals.append(_elem676) iprot.readListEnd() else: iprot.skip(ftype) @@ -16186,8 +16334,8 @@ def write(self, oprot): if self.part_vals is not None: oprot.writeFieldBegin('part_vals', TType.LIST, 3) oprot.writeListBegin(TType.STRING, len(self.part_vals)) - for iter649 in self.part_vals: - oprot.writeString(iter649) + for iter677 in self.part_vals: + oprot.writeString(iter677) oprot.writeListEnd() oprot.writeFieldEnd() if self.environment_context is not None: @@ -16782,10 +16930,10 @@ def read(self, iprot): elif fid == 3: if ftype == TType.LIST: self.part_vals = [] - (_etype653, _size650) = iprot.readListBegin() - for _i654 in xrange(_size650): - _elem655 = iprot.readString() - self.part_vals.append(_elem655) + (_etype681, _size678) = iprot.readListBegin() + for _i682 in xrange(_size678): + _elem683 = iprot.readString() + self.part_vals.append(_elem683) iprot.readListEnd() else: iprot.skip(ftype) @@ -16815,8 +16963,8 @@ def write(self, oprot): if self.part_vals is not None: oprot.writeFieldBegin('part_vals', TType.LIST, 3) oprot.writeListBegin(TType.STRING, len(self.part_vals)) - for iter656 in self.part_vals: - oprot.writeString(iter656) + for iter684 in self.part_vals: + oprot.writeString(iter684) oprot.writeListEnd() oprot.writeFieldEnd() if self.deleteData is not None: @@ -16989,10 +17137,10 @@ def read(self, iprot): elif fid == 3: if ftype == TType.LIST: self.part_vals = [] - (_etype660, _size657) = iprot.readListBegin() - for _i661 in xrange(_size657): - _elem662 = iprot.readString() - self.part_vals.append(_elem662) + (_etype688, _size685) = iprot.readListBegin() + for _i689 in xrange(_size685): + _elem690 = iprot.readString() + self.part_vals.append(_elem690) iprot.readListEnd() else: iprot.skip(ftype) @@ -17028,8 +17176,8 @@ def write(self, oprot): if self.part_vals is not None: oprot.writeFieldBegin('part_vals', TType.LIST, 3) oprot.writeListBegin(TType.STRING, len(self.part_vals)) - for iter663 in self.part_vals: - oprot.writeString(iter663) + for iter691 in self.part_vals: + oprot.writeString(iter691) oprot.writeListEnd() oprot.writeFieldEnd() if self.deleteData is not None: @@ -17766,10 +17914,10 @@ def read(self, iprot): elif fid == 3: if ftype == TType.LIST: self.part_vals = [] - (_etype667, _size664) = iprot.readListBegin() - for _i668 in xrange(_size664): - _elem669 = iprot.readString() - self.part_vals.append(_elem669) + (_etype695, _size692) = iprot.readListBegin() + for _i696 in xrange(_size692): + _elem697 = iprot.readString() + self.part_vals.append(_elem697) iprot.readListEnd() else: iprot.skip(ftype) @@ -17794,8 +17942,8 @@ def write(self, oprot): if self.part_vals is not None: oprot.writeFieldBegin('part_vals', TType.LIST, 3) oprot.writeListBegin(TType.STRING, len(self.part_vals)) - for iter670 in self.part_vals: - oprot.writeString(iter670) + for iter698 in self.part_vals: + oprot.writeString(iter698) oprot.writeListEnd() oprot.writeFieldEnd() oprot.writeFieldStop() @@ -17954,11 +18102,11 @@ def read(self, iprot): if fid == 1: if ftype == TType.MAP: self.partitionSpecs = {} - (_ktype672, _vtype673, _size671 ) = iprot.readMapBegin() - for _i675 in xrange(_size671): - _key676 = iprot.readString() - _val677 = iprot.readString() - self.partitionSpecs[_key676] = _val677 + (_ktype700, _vtype701, _size699 ) = iprot.readMapBegin() + for _i703 in xrange(_size699): + _key704 = iprot.readString() + _val705 = iprot.readString() + self.partitionSpecs[_key704] = _val705 iprot.readMapEnd() else: iprot.skip(ftype) @@ -17995,9 +18143,9 @@ def write(self, oprot): if self.partitionSpecs is not None: oprot.writeFieldBegin('partitionSpecs', TType.MAP, 1) oprot.writeMapBegin(TType.STRING, TType.STRING, len(self.partitionSpecs)) - for kiter678,viter679 in self.partitionSpecs.items(): - oprot.writeString(kiter678) - oprot.writeString(viter679) + for kiter706,viter707 in self.partitionSpecs.items(): + oprot.writeString(kiter706) + oprot.writeString(viter707) oprot.writeMapEnd() oprot.writeFieldEnd() if self.source_db is not None: @@ -18202,11 +18350,11 @@ def read(self, iprot): if fid == 1: if ftype == TType.MAP: self.partitionSpecs = {} - (_ktype681, _vtype682, _size680 ) = iprot.readMapBegin() - for _i684 in xrange(_size680): - _key685 = iprot.readString() - _val686 = iprot.readString() - self.partitionSpecs[_key685] = _val686 + (_ktype709, _vtype710, _size708 ) = iprot.readMapBegin() + for _i712 in xrange(_size708): + _key713 = iprot.readString() + _val714 = iprot.readString() + self.partitionSpecs[_key713] = _val714 iprot.readMapEnd() else: iprot.skip(ftype) @@ -18243,9 +18391,9 @@ def write(self, oprot): if self.partitionSpecs is not None: oprot.writeFieldBegin('partitionSpecs', TType.MAP, 1) oprot.writeMapBegin(TType.STRING, TType.STRING, len(self.partitionSpecs)) - for kiter687,viter688 in self.partitionSpecs.items(): - oprot.writeString(kiter687) - oprot.writeString(viter688) + for kiter715,viter716 in self.partitionSpecs.items(): + oprot.writeString(kiter715) + oprot.writeString(viter716) oprot.writeMapEnd() oprot.writeFieldEnd() if self.source_db is not None: @@ -18328,11 +18476,11 @@ def read(self, iprot): if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype692, _size689) = iprot.readListBegin() - for _i693 in xrange(_size689): - _elem694 = Partition() - _elem694.read(iprot) - self.success.append(_elem694) + (_etype720, _size717) = iprot.readListBegin() + for _i721 in xrange(_size717): + _elem722 = Partition() + _elem722.read(iprot) + self.success.append(_elem722) iprot.readListEnd() else: iprot.skip(ftype) @@ -18373,8 +18521,8 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRUCT, len(self.success)) - for iter695 in self.success: - iter695.write(oprot) + for iter723 in self.success: + iter723.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() if self.o1 is not None: @@ -18468,10 +18616,10 @@ def read(self, iprot): elif fid == 3: if ftype == TType.LIST: self.part_vals = [] - (_etype699, _size696) = iprot.readListBegin() - for _i700 in xrange(_size696): - _elem701 = iprot.readString() - self.part_vals.append(_elem701) + (_etype727, _size724) = iprot.readListBegin() + for _i728 in xrange(_size724): + _elem729 = iprot.readString() + self.part_vals.append(_elem729) iprot.readListEnd() else: iprot.skip(ftype) @@ -18483,10 +18631,10 @@ def read(self, iprot): elif fid == 5: if ftype == TType.LIST: self.group_names = [] - (_etype705, _size702) = iprot.readListBegin() - for _i706 in xrange(_size702): - _elem707 = iprot.readString() - self.group_names.append(_elem707) + (_etype733, _size730) = iprot.readListBegin() + for _i734 in xrange(_size730): + _elem735 = iprot.readString() + self.group_names.append(_elem735) iprot.readListEnd() else: iprot.skip(ftype) @@ -18511,8 +18659,8 @@ def write(self, oprot): if self.part_vals is not None: oprot.writeFieldBegin('part_vals', TType.LIST, 3) oprot.writeListBegin(TType.STRING, len(self.part_vals)) - for iter708 in self.part_vals: - oprot.writeString(iter708) + for iter736 in self.part_vals: + oprot.writeString(iter736) oprot.writeListEnd() oprot.writeFieldEnd() if self.user_name is not None: @@ -18522,8 +18670,8 @@ def write(self, oprot): if self.group_names is not None: oprot.writeFieldBegin('group_names', TType.LIST, 5) oprot.writeListBegin(TType.STRING, len(self.group_names)) - for iter709 in self.group_names: - oprot.writeString(iter709) + for iter737 in self.group_names: + oprot.writeString(iter737) oprot.writeListEnd() oprot.writeFieldEnd() oprot.writeFieldStop() @@ -18952,11 +19100,11 @@ def read(self, iprot): if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype713, _size710) = iprot.readListBegin() - for _i714 in xrange(_size710): - _elem715 = Partition() - _elem715.read(iprot) - self.success.append(_elem715) + (_etype741, _size738) = iprot.readListBegin() + for _i742 in xrange(_size738): + _elem743 = Partition() + _elem743.read(iprot) + self.success.append(_elem743) iprot.readListEnd() else: iprot.skip(ftype) @@ -18985,8 +19133,8 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRUCT, len(self.success)) - for iter716 in self.success: - iter716.write(oprot) + for iter744 in self.success: + iter744.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() if self.o1 is not None: @@ -19080,10 +19228,10 @@ def read(self, iprot): elif fid == 5: if ftype == TType.LIST: self.group_names = [] - (_etype720, _size717) = iprot.readListBegin() - for _i721 in xrange(_size717): - _elem722 = iprot.readString() - self.group_names.append(_elem722) + (_etype748, _size745) = iprot.readListBegin() + for _i749 in xrange(_size745): + _elem750 = iprot.readString() + self.group_names.append(_elem750) iprot.readListEnd() else: iprot.skip(ftype) @@ -19116,8 +19264,8 @@ def write(self, oprot): if self.group_names is not None: oprot.writeFieldBegin('group_names', TType.LIST, 5) oprot.writeListBegin(TType.STRING, len(self.group_names)) - for iter723 in self.group_names: - oprot.writeString(iter723) + for iter751 in self.group_names: + oprot.writeString(iter751) oprot.writeListEnd() oprot.writeFieldEnd() oprot.writeFieldStop() @@ -19178,11 +19326,11 @@ def read(self, iprot): if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype727, _size724) = iprot.readListBegin() - for _i728 in xrange(_size724): - _elem729 = Partition() - _elem729.read(iprot) - self.success.append(_elem729) + (_etype755, _size752) = iprot.readListBegin() + for _i756 in xrange(_size752): + _elem757 = Partition() + _elem757.read(iprot) + self.success.append(_elem757) iprot.readListEnd() else: iprot.skip(ftype) @@ -19211,8 +19359,8 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRUCT, len(self.success)) - for iter730 in self.success: - iter730.write(oprot) + for iter758 in self.success: + iter758.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() if self.o1 is not None: @@ -19370,11 +19518,11 @@ def read(self, iprot): if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype734, _size731) = iprot.readListBegin() - for _i735 in xrange(_size731): - _elem736 = PartitionSpec() - _elem736.read(iprot) - self.success.append(_elem736) + (_etype762, _size759) = iprot.readListBegin() + for _i763 in xrange(_size759): + _elem764 = PartitionSpec() + _elem764.read(iprot) + self.success.append(_elem764) iprot.readListEnd() else: iprot.skip(ftype) @@ -19403,8 +19551,8 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRUCT, len(self.success)) - for iter737 in self.success: - iter737.write(oprot) + for iter765 in self.success: + iter765.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() if self.o1 is not None: @@ -19559,10 +19707,10 @@ def read(self, iprot): if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype741, _size738) = iprot.readListBegin() - for _i742 in xrange(_size738): - _elem743 = iprot.readString() - self.success.append(_elem743) + (_etype769, _size766) = iprot.readListBegin() + for _i770 in xrange(_size766): + _elem771 = iprot.readString() + self.success.append(_elem771) iprot.readListEnd() else: iprot.skip(ftype) @@ -19585,8 +19733,8 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRING, len(self.success)) - for iter744 in self.success: - oprot.writeString(iter744) + for iter772 in self.success: + oprot.writeString(iter772) oprot.writeListEnd() oprot.writeFieldEnd() if self.o2 is not None: @@ -19662,10 +19810,10 @@ def read(self, iprot): elif fid == 3: if ftype == TType.LIST: self.part_vals = [] - (_etype748, _size745) = iprot.readListBegin() - for _i749 in xrange(_size745): - _elem750 = iprot.readString() - self.part_vals.append(_elem750) + (_etype776, _size773) = iprot.readListBegin() + for _i777 in xrange(_size773): + _elem778 = iprot.readString() + self.part_vals.append(_elem778) iprot.readListEnd() else: iprot.skip(ftype) @@ -19695,8 +19843,8 @@ def write(self, oprot): if self.part_vals is not None: oprot.writeFieldBegin('part_vals', TType.LIST, 3) oprot.writeListBegin(TType.STRING, len(self.part_vals)) - for iter751 in self.part_vals: - oprot.writeString(iter751) + for iter779 in self.part_vals: + oprot.writeString(iter779) oprot.writeListEnd() oprot.writeFieldEnd() if self.max_parts is not None: @@ -19760,11 +19908,11 @@ def read(self, iprot): if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype755, _size752) = iprot.readListBegin() - for _i756 in xrange(_size752): - _elem757 = Partition() - _elem757.read(iprot) - self.success.append(_elem757) + (_etype783, _size780) = iprot.readListBegin() + for _i784 in xrange(_size780): + _elem785 = Partition() + _elem785.read(iprot) + self.success.append(_elem785) iprot.readListEnd() else: iprot.skip(ftype) @@ -19793,8 +19941,8 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRUCT, len(self.success)) - for iter758 in self.success: - iter758.write(oprot) + for iter786 in self.success: + iter786.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() if self.o1 is not None: @@ -19881,10 +20029,10 @@ def read(self, iprot): elif fid == 3: if ftype == TType.LIST: self.part_vals = [] - (_etype762, _size759) = iprot.readListBegin() - for _i763 in xrange(_size759): - _elem764 = iprot.readString() - self.part_vals.append(_elem764) + (_etype790, _size787) = iprot.readListBegin() + for _i791 in xrange(_size787): + _elem792 = iprot.readString() + self.part_vals.append(_elem792) iprot.readListEnd() else: iprot.skip(ftype) @@ -19901,10 +20049,10 @@ def read(self, iprot): elif fid == 6: if ftype == TType.LIST: self.group_names = [] - (_etype768, _size765) = iprot.readListBegin() - for _i769 in xrange(_size765): - _elem770 = iprot.readString() - self.group_names.append(_elem770) + (_etype796, _size793) = iprot.readListBegin() + for _i797 in xrange(_size793): + _elem798 = iprot.readString() + self.group_names.append(_elem798) iprot.readListEnd() else: iprot.skip(ftype) @@ -19929,8 +20077,8 @@ def write(self, oprot): if self.part_vals is not None: oprot.writeFieldBegin('part_vals', TType.LIST, 3) oprot.writeListBegin(TType.STRING, len(self.part_vals)) - for iter771 in self.part_vals: - oprot.writeString(iter771) + for iter799 in self.part_vals: + oprot.writeString(iter799) oprot.writeListEnd() oprot.writeFieldEnd() if self.max_parts is not None: @@ -19944,8 +20092,8 @@ def write(self, oprot): if self.group_names is not None: oprot.writeFieldBegin('group_names', TType.LIST, 6) oprot.writeListBegin(TType.STRING, len(self.group_names)) - for iter772 in self.group_names: - oprot.writeString(iter772) + for iter800 in self.group_names: + oprot.writeString(iter800) oprot.writeListEnd() oprot.writeFieldEnd() oprot.writeFieldStop() @@ -20007,11 +20155,11 @@ def read(self, iprot): if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype776, _size773) = iprot.readListBegin() - for _i777 in xrange(_size773): - _elem778 = Partition() - _elem778.read(iprot) - self.success.append(_elem778) + (_etype804, _size801) = iprot.readListBegin() + for _i805 in xrange(_size801): + _elem806 = Partition() + _elem806.read(iprot) + self.success.append(_elem806) iprot.readListEnd() else: iprot.skip(ftype) @@ -20040,9 +20188,9 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRUCT, len(self.success)) - for iter779 in self.success: - iter779.write(oprot) - oprot.writeListEnd() + for iter807 in self.success: + iter807.write(oprot) + oprot.writeListEnd() oprot.writeFieldEnd() if self.o1 is not None: oprot.writeFieldBegin('o1', TType.STRUCT, 1) @@ -20122,10 +20270,10 @@ def read(self, iprot): elif fid == 3: if ftype == TType.LIST: self.part_vals = [] - (_etype783, _size780) = iprot.readListBegin() - for _i784 in xrange(_size780): - _elem785 = iprot.readString() - self.part_vals.append(_elem785) + (_etype811, _size808) = iprot.readListBegin() + for _i812 in xrange(_size808): + _elem813 = iprot.readString() + self.part_vals.append(_elem813) iprot.readListEnd() else: iprot.skip(ftype) @@ -20155,8 +20303,8 @@ def write(self, oprot): if self.part_vals is not None: oprot.writeFieldBegin('part_vals', TType.LIST, 3) oprot.writeListBegin(TType.STRING, len(self.part_vals)) - for iter786 in self.part_vals: - oprot.writeString(iter786) + for iter814 in self.part_vals: + oprot.writeString(iter814) oprot.writeListEnd() oprot.writeFieldEnd() if self.max_parts is not None: @@ -20220,10 +20368,10 @@ def read(self, iprot): if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype790, _size787) = iprot.readListBegin() - for _i791 in xrange(_size787): - _elem792 = iprot.readString() - self.success.append(_elem792) + (_etype818, _size815) = iprot.readListBegin() + for _i819 in xrange(_size815): + _elem820 = iprot.readString() + self.success.append(_elem820) iprot.readListEnd() else: iprot.skip(ftype) @@ -20252,8 +20400,8 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRING, len(self.success)) - for iter793 in self.success: - oprot.writeString(iter793) + for iter821 in self.success: + oprot.writeString(iter821) oprot.writeListEnd() oprot.writeFieldEnd() if self.o1 is not None: @@ -20424,11 +20572,11 @@ def read(self, iprot): if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype797, _size794) = iprot.readListBegin() - for _i798 in xrange(_size794): - _elem799 = Partition() - _elem799.read(iprot) - self.success.append(_elem799) + (_etype825, _size822) = iprot.readListBegin() + for _i826 in xrange(_size822): + _elem827 = Partition() + _elem827.read(iprot) + self.success.append(_elem827) iprot.readListEnd() else: iprot.skip(ftype) @@ -20457,8 +20605,8 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRUCT, len(self.success)) - for iter800 in self.success: - iter800.write(oprot) + for iter828 in self.success: + iter828.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() if self.o1 is not None: @@ -20629,11 +20777,11 @@ def read(self, iprot): if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype804, _size801) = iprot.readListBegin() - for _i805 in xrange(_size801): - _elem806 = PartitionSpec() - _elem806.read(iprot) - self.success.append(_elem806) + (_etype832, _size829) = iprot.readListBegin() + for _i833 in xrange(_size829): + _elem834 = PartitionSpec() + _elem834.read(iprot) + self.success.append(_elem834) iprot.readListEnd() else: iprot.skip(ftype) @@ -20662,8 +20810,8 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRUCT, len(self.success)) - for iter807 in self.success: - iter807.write(oprot) + for iter835 in self.success: + iter835.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() if self.o1 is not None: @@ -21083,10 +21231,10 @@ def read(self, iprot): elif fid == 3: if ftype == TType.LIST: self.names = [] - (_etype811, _size808) = iprot.readListBegin() - for _i812 in xrange(_size808): - _elem813 = iprot.readString() - self.names.append(_elem813) + (_etype839, _size836) = iprot.readListBegin() + for _i840 in xrange(_size836): + _elem841 = iprot.readString() + self.names.append(_elem841) iprot.readListEnd() else: iprot.skip(ftype) @@ -21111,8 +21259,8 @@ def write(self, oprot): if self.names is not None: oprot.writeFieldBegin('names', TType.LIST, 3) oprot.writeListBegin(TType.STRING, len(self.names)) - for iter814 in self.names: - oprot.writeString(iter814) + for iter842 in self.names: + oprot.writeString(iter842) oprot.writeListEnd() oprot.writeFieldEnd() oprot.writeFieldStop() @@ -21171,11 +21319,11 @@ def read(self, iprot): if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype818, _size815) = iprot.readListBegin() - for _i819 in xrange(_size815): - _elem820 = Partition() - _elem820.read(iprot) - self.success.append(_elem820) + (_etype846, _size843) = iprot.readListBegin() + for _i847 in xrange(_size843): + _elem848 = Partition() + _elem848.read(iprot) + self.success.append(_elem848) iprot.readListEnd() else: iprot.skip(ftype) @@ -21204,8 +21352,8 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRUCT, len(self.success)) - for iter821 in self.success: - iter821.write(oprot) + for iter849 in self.success: + iter849.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() if self.o1 is not None: @@ -21455,11 +21603,11 @@ def read(self, iprot): elif fid == 3: if ftype == TType.LIST: self.new_parts = [] - (_etype825, _size822) = iprot.readListBegin() - for _i826 in xrange(_size822): - _elem827 = Partition() - _elem827.read(iprot) - self.new_parts.append(_elem827) + (_etype853, _size850) = iprot.readListBegin() + for _i854 in xrange(_size850): + _elem855 = Partition() + _elem855.read(iprot) + self.new_parts.append(_elem855) iprot.readListEnd() else: iprot.skip(ftype) @@ -21484,8 +21632,8 @@ def write(self, oprot): if self.new_parts is not None: oprot.writeFieldBegin('new_parts', TType.LIST, 3) oprot.writeListBegin(TType.STRUCT, len(self.new_parts)) - for iter828 in self.new_parts: - iter828.write(oprot) + for iter856 in self.new_parts: + iter856.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() oprot.writeFieldStop() @@ -21638,11 +21786,11 @@ def read(self, iprot): elif fid == 3: if ftype == TType.LIST: self.new_parts = [] - (_etype832, _size829) = iprot.readListBegin() - for _i833 in xrange(_size829): - _elem834 = Partition() - _elem834.read(iprot) - self.new_parts.append(_elem834) + (_etype860, _size857) = iprot.readListBegin() + for _i861 in xrange(_size857): + _elem862 = Partition() + _elem862.read(iprot) + self.new_parts.append(_elem862) iprot.readListEnd() else: iprot.skip(ftype) @@ -21673,8 +21821,8 @@ def write(self, oprot): if self.new_parts is not None: oprot.writeFieldBegin('new_parts', TType.LIST, 3) oprot.writeListBegin(TType.STRUCT, len(self.new_parts)) - for iter835 in self.new_parts: - iter835.write(oprot) + for iter863 in self.new_parts: + iter863.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() if self.environment_context is not None: @@ -22018,10 +22166,10 @@ def read(self, iprot): elif fid == 3: if ftype == TType.LIST: self.part_vals = [] - (_etype839, _size836) = iprot.readListBegin() - for _i840 in xrange(_size836): - _elem841 = iprot.readString() - self.part_vals.append(_elem841) + (_etype867, _size864) = iprot.readListBegin() + for _i868 in xrange(_size864): + _elem869 = iprot.readString() + self.part_vals.append(_elem869) iprot.readListEnd() else: iprot.skip(ftype) @@ -22052,8 +22200,8 @@ def write(self, oprot): if self.part_vals is not None: oprot.writeFieldBegin('part_vals', TType.LIST, 3) oprot.writeListBegin(TType.STRING, len(self.part_vals)) - for iter842 in self.part_vals: - oprot.writeString(iter842) + for iter870 in self.part_vals: + oprot.writeString(iter870) oprot.writeListEnd() oprot.writeFieldEnd() if self.new_part is not None: @@ -22195,10 +22343,10 @@ def read(self, iprot): if fid == 1: if ftype == TType.LIST: self.part_vals = [] - (_etype846, _size843) = iprot.readListBegin() - for _i847 in xrange(_size843): - _elem848 = iprot.readString() - self.part_vals.append(_elem848) + (_etype874, _size871) = iprot.readListBegin() + for _i875 in xrange(_size871): + _elem876 = iprot.readString() + self.part_vals.append(_elem876) iprot.readListEnd() else: iprot.skip(ftype) @@ -22220,8 +22368,8 @@ def write(self, oprot): if self.part_vals is not None: oprot.writeFieldBegin('part_vals', TType.LIST, 1) oprot.writeListBegin(TType.STRING, len(self.part_vals)) - for iter849 in self.part_vals: - oprot.writeString(iter849) + for iter877 in self.part_vals: + oprot.writeString(iter877) oprot.writeListEnd() oprot.writeFieldEnd() if self.throw_exception is not None: @@ -22579,10 +22727,10 @@ def read(self, iprot): if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype853, _size850) = iprot.readListBegin() - for _i854 in xrange(_size850): - _elem855 = iprot.readString() - self.success.append(_elem855) + (_etype881, _size878) = iprot.readListBegin() + for _i882 in xrange(_size878): + _elem883 = iprot.readString() + self.success.append(_elem883) iprot.readListEnd() else: iprot.skip(ftype) @@ -22605,8 +22753,8 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRING, len(self.success)) - for iter856 in self.success: - oprot.writeString(iter856) + for iter884 in self.success: + oprot.writeString(iter884) oprot.writeListEnd() oprot.writeFieldEnd() if self.o1 is not None: @@ -22730,11 +22878,11 @@ def read(self, iprot): if fid == 0: if ftype == TType.MAP: self.success = {} - (_ktype858, _vtype859, _size857 ) = iprot.readMapBegin() - for _i861 in xrange(_size857): - _key862 = iprot.readString() - _val863 = iprot.readString() - self.success[_key862] = _val863 + (_ktype886, _vtype887, _size885 ) = iprot.readMapBegin() + for _i889 in xrange(_size885): + _key890 = iprot.readString() + _val891 = iprot.readString() + self.success[_key890] = _val891 iprot.readMapEnd() else: iprot.skip(ftype) @@ -22757,9 +22905,9 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.MAP, 0) oprot.writeMapBegin(TType.STRING, TType.STRING, len(self.success)) - for kiter864,viter865 in self.success.items(): - oprot.writeString(kiter864) - oprot.writeString(viter865) + for kiter892,viter893 in self.success.items(): + oprot.writeString(kiter892) + oprot.writeString(viter893) oprot.writeMapEnd() oprot.writeFieldEnd() if self.o1 is not None: @@ -22835,11 +22983,11 @@ def read(self, iprot): elif fid == 3: if ftype == TType.MAP: self.part_vals = {} - (_ktype867, _vtype868, _size866 ) = iprot.readMapBegin() - for _i870 in xrange(_size866): - _key871 = iprot.readString() - _val872 = iprot.readString() - self.part_vals[_key871] = _val872 + (_ktype895, _vtype896, _size894 ) = iprot.readMapBegin() + for _i898 in xrange(_size894): + _key899 = iprot.readString() + _val900 = iprot.readString() + self.part_vals[_key899] = _val900 iprot.readMapEnd() else: iprot.skip(ftype) @@ -22869,9 +23017,9 @@ def write(self, oprot): if self.part_vals is not None: oprot.writeFieldBegin('part_vals', TType.MAP, 3) oprot.writeMapBegin(TType.STRING, TType.STRING, len(self.part_vals)) - for kiter873,viter874 in self.part_vals.items(): - oprot.writeString(kiter873) - oprot.writeString(viter874) + for kiter901,viter902 in self.part_vals.items(): + oprot.writeString(kiter901) + oprot.writeString(viter902) oprot.writeMapEnd() oprot.writeFieldEnd() if self.eventType is not None: @@ -23085,11 +23233,11 @@ def read(self, iprot): elif fid == 3: if ftype == TType.MAP: self.part_vals = {} - (_ktype876, _vtype877, _size875 ) = iprot.readMapBegin() - for _i879 in xrange(_size875): - _key880 = iprot.readString() - _val881 = iprot.readString() - self.part_vals[_key880] = _val881 + (_ktype904, _vtype905, _size903 ) = iprot.readMapBegin() + for _i907 in xrange(_size903): + _key908 = iprot.readString() + _val909 = iprot.readString() + self.part_vals[_key908] = _val909 iprot.readMapEnd() else: iprot.skip(ftype) @@ -23119,9 +23267,9 @@ def write(self, oprot): if self.part_vals is not None: oprot.writeFieldBegin('part_vals', TType.MAP, 3) oprot.writeMapBegin(TType.STRING, TType.STRING, len(self.part_vals)) - for kiter882,viter883 in self.part_vals.items(): - oprot.writeString(kiter882) - oprot.writeString(viter883) + for kiter910,viter911 in self.part_vals.items(): + oprot.writeString(kiter910) + oprot.writeString(viter911) oprot.writeMapEnd() oprot.writeFieldEnd() if self.eventType is not None: @@ -24176,11 +24324,11 @@ def read(self, iprot): if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype887, _size884) = iprot.readListBegin() - for _i888 in xrange(_size884): - _elem889 = Index() - _elem889.read(iprot) - self.success.append(_elem889) + (_etype915, _size912) = iprot.readListBegin() + for _i916 in xrange(_size912): + _elem917 = Index() + _elem917.read(iprot) + self.success.append(_elem917) iprot.readListEnd() else: iprot.skip(ftype) @@ -24209,8 +24357,8 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRUCT, len(self.success)) - for iter890 in self.success: - iter890.write(oprot) + for iter918 in self.success: + iter918.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() if self.o1 is not None: @@ -24365,10 +24513,10 @@ def read(self, iprot): if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype894, _size891) = iprot.readListBegin() - for _i895 in xrange(_size891): - _elem896 = iprot.readString() - self.success.append(_elem896) + (_etype922, _size919) = iprot.readListBegin() + for _i923 in xrange(_size919): + _elem924 = iprot.readString() + self.success.append(_elem924) iprot.readListEnd() else: iprot.skip(ftype) @@ -24391,8 +24539,8 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRING, len(self.success)) - for iter897 in self.success: - oprot.writeString(iter897) + for iter925 in self.success: + oprot.writeString(iter925) oprot.writeListEnd() oprot.writeFieldEnd() if self.o2 is not None: @@ -24423,6 +24571,390 @@ def __eq__(self, other): def __ne__(self, other): return not (self == other) +class get_primary_keys_args: + """ + Attributes: + - db_name + - tbl_name + """ + + thrift_spec = ( + None, # 0 + (1, TType.STRING, 'db_name', None, None, ), # 1 + (2, TType.STRING, 'tbl_name', None, None, ), # 2 + ) + + def __init__(self, db_name=None, tbl_name=None,): + self.db_name = db_name + self.tbl_name = tbl_name + + def read(self, iprot): + if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: + fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec)) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.STRING: + self.db_name = iprot.readString() + else: + iprot.skip(ftype) + elif fid == 2: + if ftype == TType.STRING: + self.tbl_name = iprot.readString() + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: + oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) + return + oprot.writeStructBegin('get_primary_keys_args') + if self.db_name is not None: + oprot.writeFieldBegin('db_name', TType.STRING, 1) + oprot.writeString(self.db_name) + oprot.writeFieldEnd() + if self.tbl_name is not None: + oprot.writeFieldBegin('tbl_name', TType.STRING, 2) + oprot.writeString(self.tbl_name) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + + def __hash__(self): + value = 17 + value = (value * 31) ^ hash(self.db_name) + value = (value * 31) ^ hash(self.tbl_name) + return value + + def __repr__(self): + L = ['%s=%r' % (key, value) + for key, value in self.__dict__.iteritems()] + return '%s(%s)' % (self.__class__.__name__, ', '.join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + +class get_primary_keys_result: + """ + Attributes: + - success + - o1 + - o2 + """ + + thrift_spec = ( + (0, TType.LIST, 'success', (TType.STRUCT,(SQLPrimaryKey, SQLPrimaryKey.thrift_spec)), None, ), # 0 + (1, TType.STRUCT, 'o1', (MetaException, MetaException.thrift_spec), None, ), # 1 + (2, TType.STRUCT, 'o2', (NoSuchObjectException, NoSuchObjectException.thrift_spec), None, ), # 2 + ) + + def __init__(self, success=None, o1=None, o2=None,): + self.success = success + self.o1 = o1 + self.o2 = o2 + + def read(self, iprot): + if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: + fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec)) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 0: + if ftype == TType.LIST: + self.success = [] + (_etype929, _size926) = iprot.readListBegin() + for _i930 in xrange(_size926): + _elem931 = SQLPrimaryKey() + _elem931.read(iprot) + self.success.append(_elem931) + iprot.readListEnd() + else: + iprot.skip(ftype) + elif fid == 1: + if ftype == TType.STRUCT: + self.o1 = MetaException() + self.o1.read(iprot) + else: + iprot.skip(ftype) + elif fid == 2: + if ftype == TType.STRUCT: + self.o2 = NoSuchObjectException() + self.o2.read(iprot) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: + oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) + return + oprot.writeStructBegin('get_primary_keys_result') + if self.success is not None: + oprot.writeFieldBegin('success', TType.LIST, 0) + oprot.writeListBegin(TType.STRUCT, len(self.success)) + for iter932 in self.success: + iter932.write(oprot) + oprot.writeListEnd() + oprot.writeFieldEnd() + if self.o1 is not None: + oprot.writeFieldBegin('o1', TType.STRUCT, 1) + self.o1.write(oprot) + oprot.writeFieldEnd() + if self.o2 is not None: + oprot.writeFieldBegin('o2', TType.STRUCT, 2) + self.o2.write(oprot) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + + def __hash__(self): + value = 17 + value = (value * 31) ^ hash(self.success) + value = (value * 31) ^ hash(self.o1) + value = (value * 31) ^ hash(self.o2) + return value + + def __repr__(self): + L = ['%s=%r' % (key, value) + for key, value in self.__dict__.iteritems()] + return '%s(%s)' % (self.__class__.__name__, ', '.join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + +class get_foreign_keys_args: + """ + Attributes: + - parent_db_name + - parent_tbl_name + - foreign_db_name + - foreign_tbl_name + """ + + thrift_spec = ( + None, # 0 + (1, TType.STRING, 'parent_db_name', None, None, ), # 1 + (2, TType.STRING, 'parent_tbl_name', None, None, ), # 2 + (3, TType.STRING, 'foreign_db_name', None, None, ), # 3 + (4, TType.STRING, 'foreign_tbl_name', None, None, ), # 4 + ) + + def __init__(self, parent_db_name=None, parent_tbl_name=None, foreign_db_name=None, foreign_tbl_name=None,): + self.parent_db_name = parent_db_name + self.parent_tbl_name = parent_tbl_name + self.foreign_db_name = foreign_db_name + self.foreign_tbl_name = foreign_tbl_name + + def read(self, iprot): + if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: + fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec)) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.STRING: + self.parent_db_name = iprot.readString() + else: + iprot.skip(ftype) + elif fid == 2: + if ftype == TType.STRING: + self.parent_tbl_name = iprot.readString() + else: + iprot.skip(ftype) + elif fid == 3: + if ftype == TType.STRING: + self.foreign_db_name = iprot.readString() + else: + iprot.skip(ftype) + elif fid == 4: + if ftype == TType.STRING: + self.foreign_tbl_name = iprot.readString() + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: + oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) + return + oprot.writeStructBegin('get_foreign_keys_args') + if self.parent_db_name is not None: + oprot.writeFieldBegin('parent_db_name', TType.STRING, 1) + oprot.writeString(self.parent_db_name) + oprot.writeFieldEnd() + if self.parent_tbl_name is not None: + oprot.writeFieldBegin('parent_tbl_name', TType.STRING, 2) + oprot.writeString(self.parent_tbl_name) + oprot.writeFieldEnd() + if self.foreign_db_name is not None: + oprot.writeFieldBegin('foreign_db_name', TType.STRING, 3) + oprot.writeString(self.foreign_db_name) + oprot.writeFieldEnd() + if self.foreign_tbl_name is not None: + oprot.writeFieldBegin('foreign_tbl_name', TType.STRING, 4) + oprot.writeString(self.foreign_tbl_name) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + + def __hash__(self): + value = 17 + value = (value * 31) ^ hash(self.parent_db_name) + value = (value * 31) ^ hash(self.parent_tbl_name) + value = (value * 31) ^ hash(self.foreign_db_name) + value = (value * 31) ^ hash(self.foreign_tbl_name) + return value + + def __repr__(self): + L = ['%s=%r' % (key, value) + for key, value in self.__dict__.iteritems()] + return '%s(%s)' % (self.__class__.__name__, ', '.join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + +class get_foreign_keys_result: + """ + Attributes: + - success + - o1 + - o2 + """ + + thrift_spec = ( + (0, TType.LIST, 'success', (TType.STRUCT,(SQLForeignKey, SQLForeignKey.thrift_spec)), None, ), # 0 + (1, TType.STRUCT, 'o1', (MetaException, MetaException.thrift_spec), None, ), # 1 + (2, TType.STRUCT, 'o2', (NoSuchObjectException, NoSuchObjectException.thrift_spec), None, ), # 2 + ) + + def __init__(self, success=None, o1=None, o2=None,): + self.success = success + self.o1 = o1 + self.o2 = o2 + + def read(self, iprot): + if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: + fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec)) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 0: + if ftype == TType.LIST: + self.success = [] + (_etype936, _size933) = iprot.readListBegin() + for _i937 in xrange(_size933): + _elem938 = SQLForeignKey() + _elem938.read(iprot) + self.success.append(_elem938) + iprot.readListEnd() + else: + iprot.skip(ftype) + elif fid == 1: + if ftype == TType.STRUCT: + self.o1 = MetaException() + self.o1.read(iprot) + else: + iprot.skip(ftype) + elif fid == 2: + if ftype == TType.STRUCT: + self.o2 = NoSuchObjectException() + self.o2.read(iprot) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: + oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) + return + oprot.writeStructBegin('get_foreign_keys_result') + if self.success is not None: + oprot.writeFieldBegin('success', TType.LIST, 0) + oprot.writeListBegin(TType.STRUCT, len(self.success)) + for iter939 in self.success: + iter939.write(oprot) + oprot.writeListEnd() + oprot.writeFieldEnd() + if self.o1 is not None: + oprot.writeFieldBegin('o1', TType.STRUCT, 1) + self.o1.write(oprot) + oprot.writeFieldEnd() + if self.o2 is not None: + oprot.writeFieldBegin('o2', TType.STRUCT, 2) + self.o2.write(oprot) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + + def __hash__(self): + value = 17 + value = (value * 31) ^ hash(self.success) + value = (value * 31) ^ hash(self.o1) + value = (value * 31) ^ hash(self.o2) + return value + + def __repr__(self): + L = ['%s=%r' % (key, value) + for key, value in self.__dict__.iteritems()] + return '%s(%s)' % (self.__class__.__name__, ', '.join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + class update_table_column_statistics_args: """ Attributes: @@ -26940,10 +27472,10 @@ def read(self, iprot): if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype901, _size898) = iprot.readListBegin() - for _i902 in xrange(_size898): - _elem903 = iprot.readString() - self.success.append(_elem903) + (_etype943, _size940) = iprot.readListBegin() + for _i944 in xrange(_size940): + _elem945 = iprot.readString() + self.success.append(_elem945) iprot.readListEnd() else: iprot.skip(ftype) @@ -26966,8 +27498,8 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRING, len(self.success)) - for iter904 in self.success: - oprot.writeString(iter904) + for iter946 in self.success: + oprot.writeString(iter946) oprot.writeListEnd() oprot.writeFieldEnd() if self.o1 is not None: @@ -27655,10 +28187,10 @@ def read(self, iprot): if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype908, _size905) = iprot.readListBegin() - for _i909 in xrange(_size905): - _elem910 = iprot.readString() - self.success.append(_elem910) + (_etype950, _size947) = iprot.readListBegin() + for _i951 in xrange(_size947): + _elem952 = iprot.readString() + self.success.append(_elem952) iprot.readListEnd() else: iprot.skip(ftype) @@ -27681,8 +28213,8 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRING, len(self.success)) - for iter911 in self.success: - oprot.writeString(iter911) + for iter953 in self.success: + oprot.writeString(iter953) oprot.writeListEnd() oprot.writeFieldEnd() if self.o1 is not None: @@ -28196,11 +28728,11 @@ def read(self, iprot): if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype915, _size912) = iprot.readListBegin() - for _i916 in xrange(_size912): - _elem917 = Role() - _elem917.read(iprot) - self.success.append(_elem917) + (_etype957, _size954) = iprot.readListBegin() + for _i958 in xrange(_size954): + _elem959 = Role() + _elem959.read(iprot) + self.success.append(_elem959) iprot.readListEnd() else: iprot.skip(ftype) @@ -28223,8 +28755,8 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRUCT, len(self.success)) - for iter918 in self.success: - iter918.write(oprot) + for iter960 in self.success: + iter960.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() if self.o1 is not None: @@ -28733,10 +29265,10 @@ def read(self, iprot): elif fid == 3: if ftype == TType.LIST: self.group_names = [] - (_etype922, _size919) = iprot.readListBegin() - for _i923 in xrange(_size919): - _elem924 = iprot.readString() - self.group_names.append(_elem924) + (_etype964, _size961) = iprot.readListBegin() + for _i965 in xrange(_size961): + _elem966 = iprot.readString() + self.group_names.append(_elem966) iprot.readListEnd() else: iprot.skip(ftype) @@ -28761,8 +29293,8 @@ def write(self, oprot): if self.group_names is not None: oprot.writeFieldBegin('group_names', TType.LIST, 3) oprot.writeListBegin(TType.STRING, len(self.group_names)) - for iter925 in self.group_names: - oprot.writeString(iter925) + for iter967 in self.group_names: + oprot.writeString(iter967) oprot.writeListEnd() oprot.writeFieldEnd() oprot.writeFieldStop() @@ -28989,11 +29521,11 @@ def read(self, iprot): if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype929, _size926) = iprot.readListBegin() - for _i930 in xrange(_size926): - _elem931 = HiveObjectPrivilege() - _elem931.read(iprot) - self.success.append(_elem931) + (_etype971, _size968) = iprot.readListBegin() + for _i972 in xrange(_size968): + _elem973 = HiveObjectPrivilege() + _elem973.read(iprot) + self.success.append(_elem973) iprot.readListEnd() else: iprot.skip(ftype) @@ -29016,8 +29548,8 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRUCT, len(self.success)) - for iter932 in self.success: - iter932.write(oprot) + for iter974 in self.success: + iter974.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() if self.o1 is not None: @@ -29515,10 +30047,10 @@ def read(self, iprot): elif fid == 2: if ftype == TType.LIST: self.group_names = [] - (_etype936, _size933) = iprot.readListBegin() - for _i937 in xrange(_size933): - _elem938 = iprot.readString() - self.group_names.append(_elem938) + (_etype978, _size975) = iprot.readListBegin() + for _i979 in xrange(_size975): + _elem980 = iprot.readString() + self.group_names.append(_elem980) iprot.readListEnd() else: iprot.skip(ftype) @@ -29539,8 +30071,8 @@ def write(self, oprot): if self.group_names is not None: oprot.writeFieldBegin('group_names', TType.LIST, 2) oprot.writeListBegin(TType.STRING, len(self.group_names)) - for iter939 in self.group_names: - oprot.writeString(iter939) + for iter981 in self.group_names: + oprot.writeString(iter981) oprot.writeListEnd() oprot.writeFieldEnd() oprot.writeFieldStop() @@ -29595,10 +30127,10 @@ def read(self, iprot): if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype943, _size940) = iprot.readListBegin() - for _i944 in xrange(_size940): - _elem945 = iprot.readString() - self.success.append(_elem945) + (_etype985, _size982) = iprot.readListBegin() + for _i986 in xrange(_size982): + _elem987 = iprot.readString() + self.success.append(_elem987) iprot.readListEnd() else: iprot.skip(ftype) @@ -29621,8 +30153,8 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRING, len(self.success)) - for iter946 in self.success: - oprot.writeString(iter946) + for iter988 in self.success: + oprot.writeString(iter988) oprot.writeListEnd() oprot.writeFieldEnd() if self.o1 is not None: @@ -30554,10 +31086,10 @@ def read(self, iprot): if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype950, _size947) = iprot.readListBegin() - for _i951 in xrange(_size947): - _elem952 = iprot.readString() - self.success.append(_elem952) + (_etype992, _size989) = iprot.readListBegin() + for _i993 in xrange(_size989): + _elem994 = iprot.readString() + self.success.append(_elem994) iprot.readListEnd() else: iprot.skip(ftype) @@ -30574,8 +31106,8 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRING, len(self.success)) - for iter953 in self.success: - oprot.writeString(iter953) + for iter995 in self.success: + oprot.writeString(iter995) oprot.writeListEnd() oprot.writeFieldEnd() oprot.writeFieldStop() @@ -31102,10 +31634,10 @@ def read(self, iprot): if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype957, _size954) = iprot.readListBegin() - for _i958 in xrange(_size954): - _elem959 = iprot.readString() - self.success.append(_elem959) + (_etype999, _size996) = iprot.readListBegin() + for _i1000 in xrange(_size996): + _elem1001 = iprot.readString() + self.success.append(_elem1001) iprot.readListEnd() else: iprot.skip(ftype) @@ -31122,8 +31654,8 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRING, len(self.success)) - for iter960 in self.success: - oprot.writeString(iter960) + for iter1002 in self.success: + oprot.writeString(iter1002) oprot.writeListEnd() oprot.writeFieldEnd() oprot.writeFieldStop() diff --git a/metastore/src/gen/thrift/gen-py/hive_metastore/ttypes.py b/metastore/src/gen/thrift/gen-py/hive_metastore/ttypes.py index 10eaf4a..75af073 100644 --- a/metastore/src/gen/thrift/gen-py/hive_metastore/ttypes.py +++ b/metastore/src/gen/thrift/gen-py/hive_metastore/ttypes.py @@ -394,6 +394,453 @@ def __eq__(self, other): def __ne__(self, other): return not (self == other) +class ForeignKeyRel: + """ + Attributes: + - foreignCols + - parentCols + - foreignKeyName + - parentTableName + - parentTableSchema + """ + + thrift_spec = ( + None, # 0 + (1, TType.LIST, 'foreignCols', (TType.STRUCT,(FieldSchema, FieldSchema.thrift_spec)), None, ), # 1 + (2, TType.LIST, 'parentCols', (TType.STRUCT,(FieldSchema, FieldSchema.thrift_spec)), None, ), # 2 + (3, TType.STRING, 'foreignKeyName', None, None, ), # 3 + (4, TType.STRING, 'parentTableName', None, None, ), # 4 + (5, TType.STRING, 'parentTableSchema', None, None, ), # 5 + ) + + def __init__(self, foreignCols=None, parentCols=None, foreignKeyName=None, parentTableName=None, parentTableSchema=None,): + self.foreignCols = foreignCols + self.parentCols = parentCols + self.foreignKeyName = foreignKeyName + self.parentTableName = parentTableName + self.parentTableSchema = parentTableSchema + + def read(self, iprot): + if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: + fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec)) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.LIST: + self.foreignCols = [] + (_etype3, _size0) = iprot.readListBegin() + for _i4 in xrange(_size0): + _elem5 = FieldSchema() + _elem5.read(iprot) + self.foreignCols.append(_elem5) + iprot.readListEnd() + else: + iprot.skip(ftype) + elif fid == 2: + if ftype == TType.LIST: + self.parentCols = [] + (_etype9, _size6) = iprot.readListBegin() + for _i10 in xrange(_size6): + _elem11 = FieldSchema() + _elem11.read(iprot) + self.parentCols.append(_elem11) + iprot.readListEnd() + else: + iprot.skip(ftype) + elif fid == 3: + if ftype == TType.STRING: + self.foreignKeyName = iprot.readString() + else: + iprot.skip(ftype) + elif fid == 4: + if ftype == TType.STRING: + self.parentTableName = iprot.readString() + else: + iprot.skip(ftype) + elif fid == 5: + if ftype == TType.STRING: + self.parentTableSchema = iprot.readString() + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: + oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) + return + oprot.writeStructBegin('ForeignKeyRel') + if self.foreignCols is not None: + oprot.writeFieldBegin('foreignCols', TType.LIST, 1) + oprot.writeListBegin(TType.STRUCT, len(self.foreignCols)) + for iter12 in self.foreignCols: + iter12.write(oprot) + oprot.writeListEnd() + oprot.writeFieldEnd() + if self.parentCols is not None: + oprot.writeFieldBegin('parentCols', TType.LIST, 2) + oprot.writeListBegin(TType.STRUCT, len(self.parentCols)) + for iter13 in self.parentCols: + iter13.write(oprot) + oprot.writeListEnd() + oprot.writeFieldEnd() + if self.foreignKeyName is not None: + oprot.writeFieldBegin('foreignKeyName', TType.STRING, 3) + oprot.writeString(self.foreignKeyName) + oprot.writeFieldEnd() + if self.parentTableName is not None: + oprot.writeFieldBegin('parentTableName', TType.STRING, 4) + oprot.writeString(self.parentTableName) + oprot.writeFieldEnd() + if self.parentTableSchema is not None: + oprot.writeFieldBegin('parentTableSchema', TType.STRING, 5) + oprot.writeString(self.parentTableSchema) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + + def __hash__(self): + value = 17 + value = (value * 31) ^ hash(self.foreignCols) + value = (value * 31) ^ hash(self.parentCols) + value = (value * 31) ^ hash(self.foreignKeyName) + value = (value * 31) ^ hash(self.parentTableName) + value = (value * 31) ^ hash(self.parentTableSchema) + return value + + def __repr__(self): + L = ['%s=%r' % (key, value) + for key, value in self.__dict__.iteritems()] + return '%s(%s)' % (self.__class__.__name__, ', '.join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + +class SQLPrimaryKey: + """ + Attributes: + - table_schem + - table_name + - column_name + - key_seq + - pk_name + """ + + thrift_spec = ( + None, # 0 + (1, TType.STRING, 'table_schem', None, None, ), # 1 + (2, TType.STRING, 'table_name', None, None, ), # 2 + (3, TType.STRING, 'column_name', None, None, ), # 3 + (4, TType.I32, 'key_seq', None, None, ), # 4 + (5, TType.STRING, 'pk_name', None, None, ), # 5 + ) + + def __init__(self, table_schem=None, table_name=None, column_name=None, key_seq=None, pk_name=None,): + self.table_schem = table_schem + self.table_name = table_name + self.column_name = column_name + self.key_seq = key_seq + self.pk_name = pk_name + + def read(self, iprot): + if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: + fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec)) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.STRING: + self.table_schem = iprot.readString() + else: + iprot.skip(ftype) + elif fid == 2: + if ftype == TType.STRING: + self.table_name = iprot.readString() + else: + iprot.skip(ftype) + elif fid == 3: + if ftype == TType.STRING: + self.column_name = iprot.readString() + else: + iprot.skip(ftype) + elif fid == 4: + if ftype == TType.I32: + self.key_seq = iprot.readI32() + else: + iprot.skip(ftype) + elif fid == 5: + if ftype == TType.STRING: + self.pk_name = iprot.readString() + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: + oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) + return + oprot.writeStructBegin('SQLPrimaryKey') + if self.table_schem is not None: + oprot.writeFieldBegin('table_schem', TType.STRING, 1) + oprot.writeString(self.table_schem) + oprot.writeFieldEnd() + if self.table_name is not None: + oprot.writeFieldBegin('table_name', TType.STRING, 2) + oprot.writeString(self.table_name) + oprot.writeFieldEnd() + if self.column_name is not None: + oprot.writeFieldBegin('column_name', TType.STRING, 3) + oprot.writeString(self.column_name) + oprot.writeFieldEnd() + if self.key_seq is not None: + oprot.writeFieldBegin('key_seq', TType.I32, 4) + oprot.writeI32(self.key_seq) + oprot.writeFieldEnd() + if self.pk_name is not None: + oprot.writeFieldBegin('pk_name', TType.STRING, 5) + oprot.writeString(self.pk_name) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + + def __hash__(self): + value = 17 + value = (value * 31) ^ hash(self.table_schem) + value = (value * 31) ^ hash(self.table_name) + value = (value * 31) ^ hash(self.column_name) + value = (value * 31) ^ hash(self.key_seq) + value = (value * 31) ^ hash(self.pk_name) + return value + + def __repr__(self): + L = ['%s=%r' % (key, value) + for key, value in self.__dict__.iteritems()] + return '%s(%s)' % (self.__class__.__name__, ', '.join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + +class SQLForeignKey: + """ + Attributes: + - pktable_schem + - pktable_name + - pkcolumn_name + - fktable_schem + - fktable_name + - fkcolumn_name + - key_seq + - update_rule + - delete_rule + - fk_name + - pk_name + """ + + thrift_spec = ( + None, # 0 + (1, TType.STRING, 'pktable_schem', None, None, ), # 1 + (2, TType.STRING, 'pktable_name', None, None, ), # 2 + (3, TType.STRING, 'pkcolumn_name', None, None, ), # 3 + (4, TType.STRING, 'fktable_schem', None, None, ), # 4 + (5, TType.STRING, 'fktable_name', None, None, ), # 5 + (6, TType.STRING, 'fkcolumn_name', None, None, ), # 6 + (7, TType.I32, 'key_seq', None, None, ), # 7 + (8, TType.I32, 'update_rule', None, None, ), # 8 + (9, TType.I32, 'delete_rule', None, None, ), # 9 + (10, TType.STRING, 'fk_name', None, None, ), # 10 + (11, TType.STRING, 'pk_name', None, None, ), # 11 + ) + + def __init__(self, pktable_schem=None, pktable_name=None, pkcolumn_name=None, fktable_schem=None, fktable_name=None, fkcolumn_name=None, key_seq=None, update_rule=None, delete_rule=None, fk_name=None, pk_name=None,): + self.pktable_schem = pktable_schem + self.pktable_name = pktable_name + self.pkcolumn_name = pkcolumn_name + self.fktable_schem = fktable_schem + self.fktable_name = fktable_name + self.fkcolumn_name = fkcolumn_name + self.key_seq = key_seq + self.update_rule = update_rule + self.delete_rule = delete_rule + self.fk_name = fk_name + self.pk_name = pk_name + + def read(self, iprot): + if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: + fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec)) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.STRING: + self.pktable_schem = iprot.readString() + else: + iprot.skip(ftype) + elif fid == 2: + if ftype == TType.STRING: + self.pktable_name = iprot.readString() + else: + iprot.skip(ftype) + elif fid == 3: + if ftype == TType.STRING: + self.pkcolumn_name = iprot.readString() + else: + iprot.skip(ftype) + elif fid == 4: + if ftype == TType.STRING: + self.fktable_schem = iprot.readString() + else: + iprot.skip(ftype) + elif fid == 5: + if ftype == TType.STRING: + self.fktable_name = iprot.readString() + else: + iprot.skip(ftype) + elif fid == 6: + if ftype == TType.STRING: + self.fkcolumn_name = iprot.readString() + else: + iprot.skip(ftype) + elif fid == 7: + if ftype == TType.I32: + self.key_seq = iprot.readI32() + else: + iprot.skip(ftype) + elif fid == 8: + if ftype == TType.I32: + self.update_rule = iprot.readI32() + else: + iprot.skip(ftype) + elif fid == 9: + if ftype == TType.I32: + self.delete_rule = iprot.readI32() + else: + iprot.skip(ftype) + elif fid == 10: + if ftype == TType.STRING: + self.fk_name = iprot.readString() + else: + iprot.skip(ftype) + elif fid == 11: + if ftype == TType.STRING: + self.pk_name = iprot.readString() + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: + oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) + return + oprot.writeStructBegin('SQLForeignKey') + if self.pktable_schem is not None: + oprot.writeFieldBegin('pktable_schem', TType.STRING, 1) + oprot.writeString(self.pktable_schem) + oprot.writeFieldEnd() + if self.pktable_name is not None: + oprot.writeFieldBegin('pktable_name', TType.STRING, 2) + oprot.writeString(self.pktable_name) + oprot.writeFieldEnd() + if self.pkcolumn_name is not None: + oprot.writeFieldBegin('pkcolumn_name', TType.STRING, 3) + oprot.writeString(self.pkcolumn_name) + oprot.writeFieldEnd() + if self.fktable_schem is not None: + oprot.writeFieldBegin('fktable_schem', TType.STRING, 4) + oprot.writeString(self.fktable_schem) + oprot.writeFieldEnd() + if self.fktable_name is not None: + oprot.writeFieldBegin('fktable_name', TType.STRING, 5) + oprot.writeString(self.fktable_name) + oprot.writeFieldEnd() + if self.fkcolumn_name is not None: + oprot.writeFieldBegin('fkcolumn_name', TType.STRING, 6) + oprot.writeString(self.fkcolumn_name) + oprot.writeFieldEnd() + if self.key_seq is not None: + oprot.writeFieldBegin('key_seq', TType.I32, 7) + oprot.writeI32(self.key_seq) + oprot.writeFieldEnd() + if self.update_rule is not None: + oprot.writeFieldBegin('update_rule', TType.I32, 8) + oprot.writeI32(self.update_rule) + oprot.writeFieldEnd() + if self.delete_rule is not None: + oprot.writeFieldBegin('delete_rule', TType.I32, 9) + oprot.writeI32(self.delete_rule) + oprot.writeFieldEnd() + if self.fk_name is not None: + oprot.writeFieldBegin('fk_name', TType.STRING, 10) + oprot.writeString(self.fk_name) + oprot.writeFieldEnd() + if self.pk_name is not None: + oprot.writeFieldBegin('pk_name', TType.STRING, 11) + oprot.writeString(self.pk_name) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + + def __hash__(self): + value = 17 + value = (value * 31) ^ hash(self.pktable_schem) + value = (value * 31) ^ hash(self.pktable_name) + value = (value * 31) ^ hash(self.pkcolumn_name) + value = (value * 31) ^ hash(self.fktable_schem) + value = (value * 31) ^ hash(self.fktable_name) + value = (value * 31) ^ hash(self.fkcolumn_name) + value = (value * 31) ^ hash(self.key_seq) + value = (value * 31) ^ hash(self.update_rule) + value = (value * 31) ^ hash(self.delete_rule) + value = (value * 31) ^ hash(self.fk_name) + value = (value * 31) ^ hash(self.pk_name) + return value + + def __repr__(self): + L = ['%s=%r' % (key, value) + for key, value in self.__dict__.iteritems()] + return '%s(%s)' % (self.__class__.__name__, ', '.join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + class Type: """ Attributes: @@ -444,11 +891,11 @@ def read(self, iprot): elif fid == 4: if ftype == TType.LIST: self.fields = [] - (_etype3, _size0) = iprot.readListBegin() - for _i4 in xrange(_size0): - _elem5 = FieldSchema() - _elem5.read(iprot) - self.fields.append(_elem5) + (_etype17, _size14) = iprot.readListBegin() + for _i18 in xrange(_size14): + _elem19 = FieldSchema() + _elem19.read(iprot) + self.fields.append(_elem19) iprot.readListEnd() else: iprot.skip(ftype) @@ -477,8 +924,8 @@ def write(self, oprot): if self.fields is not None: oprot.writeFieldBegin('fields', TType.LIST, 4) oprot.writeListBegin(TType.STRUCT, len(self.fields)) - for iter6 in self.fields: - iter6.write(oprot) + for iter20 in self.fields: + iter20.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() oprot.writeFieldStop() @@ -560,10 +1007,10 @@ def read(self, iprot): elif fid == 4: if ftype == TType.LIST: self.partValues = [] - (_etype10, _size7) = iprot.readListBegin() - for _i11 in xrange(_size7): - _elem12 = iprot.readString() - self.partValues.append(_elem12) + (_etype24, _size21) = iprot.readListBegin() + for _i25 in xrange(_size21): + _elem26 = iprot.readString() + self.partValues.append(_elem26) iprot.readListEnd() else: iprot.skip(ftype) @@ -597,8 +1044,8 @@ def write(self, oprot): if self.partValues is not None: oprot.writeFieldBegin('partValues', TType.LIST, 4) oprot.writeListBegin(TType.STRING, len(self.partValues)) - for iter13 in self.partValues: - oprot.writeString(iter13) + for iter27 in self.partValues: + oprot.writeString(iter27) oprot.writeListEnd() oprot.writeFieldEnd() if self.columnName is not None: @@ -881,11 +1328,11 @@ def read(self, iprot): if fid == 1: if ftype == TType.LIST: self.privileges = [] - (_etype17, _size14) = iprot.readListBegin() - for _i18 in xrange(_size14): - _elem19 = HiveObjectPrivilege() - _elem19.read(iprot) - self.privileges.append(_elem19) + (_etype31, _size28) = iprot.readListBegin() + for _i32 in xrange(_size28): + _elem33 = HiveObjectPrivilege() + _elem33.read(iprot) + self.privileges.append(_elem33) iprot.readListEnd() else: iprot.skip(ftype) @@ -902,8 +1349,8 @@ def write(self, oprot): if self.privileges is not None: oprot.writeFieldBegin('privileges', TType.LIST, 1) oprot.writeListBegin(TType.STRUCT, len(self.privileges)) - for iter20 in self.privileges: - iter20.write(oprot) + for iter34 in self.privileges: + iter34.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() oprot.writeFieldStop() @@ -961,51 +1408,51 @@ def read(self, iprot): if fid == 1: if ftype == TType.MAP: self.userPrivileges = {} - (_ktype22, _vtype23, _size21 ) = iprot.readMapBegin() - for _i25 in xrange(_size21): - _key26 = iprot.readString() - _val27 = [] - (_etype31, _size28) = iprot.readListBegin() - for _i32 in xrange(_size28): - _elem33 = PrivilegeGrantInfo() - _elem33.read(iprot) - _val27.append(_elem33) + (_ktype36, _vtype37, _size35 ) = iprot.readMapBegin() + for _i39 in xrange(_size35): + _key40 = iprot.readString() + _val41 = [] + (_etype45, _size42) = iprot.readListBegin() + for _i46 in xrange(_size42): + _elem47 = PrivilegeGrantInfo() + _elem47.read(iprot) + _val41.append(_elem47) iprot.readListEnd() - self.userPrivileges[_key26] = _val27 + self.userPrivileges[_key40] = _val41 iprot.readMapEnd() else: iprot.skip(ftype) elif fid == 2: if ftype == TType.MAP: self.groupPrivileges = {} - (_ktype35, _vtype36, _size34 ) = iprot.readMapBegin() - for _i38 in xrange(_size34): - _key39 = iprot.readString() - _val40 = [] - (_etype44, _size41) = iprot.readListBegin() - for _i45 in xrange(_size41): - _elem46 = PrivilegeGrantInfo() - _elem46.read(iprot) - _val40.append(_elem46) + (_ktype49, _vtype50, _size48 ) = iprot.readMapBegin() + for _i52 in xrange(_size48): + _key53 = iprot.readString() + _val54 = [] + (_etype58, _size55) = iprot.readListBegin() + for _i59 in xrange(_size55): + _elem60 = PrivilegeGrantInfo() + _elem60.read(iprot) + _val54.append(_elem60) iprot.readListEnd() - self.groupPrivileges[_key39] = _val40 + self.groupPrivileges[_key53] = _val54 iprot.readMapEnd() else: iprot.skip(ftype) elif fid == 3: if ftype == TType.MAP: self.rolePrivileges = {} - (_ktype48, _vtype49, _size47 ) = iprot.readMapBegin() - for _i51 in xrange(_size47): - _key52 = iprot.readString() - _val53 = [] - (_etype57, _size54) = iprot.readListBegin() - for _i58 in xrange(_size54): - _elem59 = PrivilegeGrantInfo() - _elem59.read(iprot) - _val53.append(_elem59) + (_ktype62, _vtype63, _size61 ) = iprot.readMapBegin() + for _i65 in xrange(_size61): + _key66 = iprot.readString() + _val67 = [] + (_etype71, _size68) = iprot.readListBegin() + for _i72 in xrange(_size68): + _elem73 = PrivilegeGrantInfo() + _elem73.read(iprot) + _val67.append(_elem73) iprot.readListEnd() - self.rolePrivileges[_key52] = _val53 + self.rolePrivileges[_key66] = _val67 iprot.readMapEnd() else: iprot.skip(ftype) @@ -1022,33 +1469,33 @@ def write(self, oprot): if self.userPrivileges is not None: oprot.writeFieldBegin('userPrivileges', TType.MAP, 1) oprot.writeMapBegin(TType.STRING, TType.LIST, len(self.userPrivileges)) - for kiter60,viter61 in self.userPrivileges.items(): - oprot.writeString(kiter60) - oprot.writeListBegin(TType.STRUCT, len(viter61)) - for iter62 in viter61: - iter62.write(oprot) + for kiter74,viter75 in self.userPrivileges.items(): + oprot.writeString(kiter74) + oprot.writeListBegin(TType.STRUCT, len(viter75)) + for iter76 in viter75: + iter76.write(oprot) oprot.writeListEnd() oprot.writeMapEnd() oprot.writeFieldEnd() if self.groupPrivileges is not None: oprot.writeFieldBegin('groupPrivileges', TType.MAP, 2) oprot.writeMapBegin(TType.STRING, TType.LIST, len(self.groupPrivileges)) - for kiter63,viter64 in self.groupPrivileges.items(): - oprot.writeString(kiter63) - oprot.writeListBegin(TType.STRUCT, len(viter64)) - for iter65 in viter64: - iter65.write(oprot) + for kiter77,viter78 in self.groupPrivileges.items(): + oprot.writeString(kiter77) + oprot.writeListBegin(TType.STRUCT, len(viter78)) + for iter79 in viter78: + iter79.write(oprot) oprot.writeListEnd() oprot.writeMapEnd() oprot.writeFieldEnd() if self.rolePrivileges is not None: oprot.writeFieldBegin('rolePrivileges', TType.MAP, 3) oprot.writeMapBegin(TType.STRING, TType.LIST, len(self.rolePrivileges)) - for kiter66,viter67 in self.rolePrivileges.items(): - oprot.writeString(kiter66) - oprot.writeListBegin(TType.STRUCT, len(viter67)) - for iter68 in viter67: - iter68.write(oprot) + for kiter80,viter81 in self.rolePrivileges.items(): + oprot.writeString(kiter80) + oprot.writeListBegin(TType.STRUCT, len(viter81)) + for iter82 in viter81: + iter82.write(oprot) oprot.writeListEnd() oprot.writeMapEnd() oprot.writeFieldEnd() @@ -1576,11 +2023,11 @@ def read(self, iprot): if fid == 1: if ftype == TType.LIST: self.principalGrants = [] - (_etype72, _size69) = iprot.readListBegin() - for _i73 in xrange(_size69): - _elem74 = RolePrincipalGrant() - _elem74.read(iprot) - self.principalGrants.append(_elem74) + (_etype86, _size83) = iprot.readListBegin() + for _i87 in xrange(_size83): + _elem88 = RolePrincipalGrant() + _elem88.read(iprot) + self.principalGrants.append(_elem88) iprot.readListEnd() else: iprot.skip(ftype) @@ -1597,8 +2044,8 @@ def write(self, oprot): if self.principalGrants is not None: oprot.writeFieldBegin('principalGrants', TType.LIST, 1) oprot.writeListBegin(TType.STRUCT, len(self.principalGrants)) - for iter75 in self.principalGrants: - iter75.write(oprot) + for iter89 in self.principalGrants: + iter89.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() oprot.writeFieldStop() @@ -1719,11 +2166,11 @@ def read(self, iprot): if fid == 1: if ftype == TType.LIST: self.principalGrants = [] - (_etype79, _size76) = iprot.readListBegin() - for _i80 in xrange(_size76): - _elem81 = RolePrincipalGrant() - _elem81.read(iprot) - self.principalGrants.append(_elem81) + (_etype93, _size90) = iprot.readListBegin() + for _i94 in xrange(_size90): + _elem95 = RolePrincipalGrant() + _elem95.read(iprot) + self.principalGrants.append(_elem95) iprot.readListEnd() else: iprot.skip(ftype) @@ -1740,8 +2187,8 @@ def write(self, oprot): if self.principalGrants is not None: oprot.writeFieldBegin('principalGrants', TType.LIST, 1) oprot.writeListBegin(TType.STRUCT, len(self.principalGrants)) - for iter82 in self.principalGrants: - iter82.write(oprot) + for iter96 in self.principalGrants: + iter96.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() oprot.writeFieldStop() @@ -2036,11 +2483,11 @@ def read(self, iprot): elif fid == 4: if ftype == TType.MAP: self.parameters = {} - (_ktype84, _vtype85, _size83 ) = iprot.readMapBegin() - for _i87 in xrange(_size83): - _key88 = iprot.readString() - _val89 = iprot.readString() - self.parameters[_key88] = _val89 + (_ktype98, _vtype99, _size97 ) = iprot.readMapBegin() + for _i101 in xrange(_size97): + _key102 = iprot.readString() + _val103 = iprot.readString() + self.parameters[_key102] = _val103 iprot.readMapEnd() else: iprot.skip(ftype) @@ -2085,9 +2532,9 @@ def write(self, oprot): if self.parameters is not None: oprot.writeFieldBegin('parameters', TType.MAP, 4) oprot.writeMapBegin(TType.STRING, TType.STRING, len(self.parameters)) - for kiter90,viter91 in self.parameters.items(): - oprot.writeString(kiter90) - oprot.writeString(viter91) + for kiter104,viter105 in self.parameters.items(): + oprot.writeString(kiter104) + oprot.writeString(viter105) oprot.writeMapEnd() oprot.writeFieldEnd() if self.privileges is not None: @@ -2173,11 +2620,11 @@ def read(self, iprot): elif fid == 3: if ftype == TType.MAP: self.parameters = {} - (_ktype93, _vtype94, _size92 ) = iprot.readMapBegin() - for _i96 in xrange(_size92): - _key97 = iprot.readString() - _val98 = iprot.readString() - self.parameters[_key97] = _val98 + (_ktype107, _vtype108, _size106 ) = iprot.readMapBegin() + for _i110 in xrange(_size106): + _key111 = iprot.readString() + _val112 = iprot.readString() + self.parameters[_key111] = _val112 iprot.readMapEnd() else: iprot.skip(ftype) @@ -2202,9 +2649,9 @@ def write(self, oprot): if self.parameters is not None: oprot.writeFieldBegin('parameters', TType.MAP, 3) oprot.writeMapBegin(TType.STRING, TType.STRING, len(self.parameters)) - for kiter99,viter100 in self.parameters.items(): - oprot.writeString(kiter99) - oprot.writeString(viter100) + for kiter113,viter114 in self.parameters.items(): + oprot.writeString(kiter113) + oprot.writeString(viter114) oprot.writeMapEnd() oprot.writeFieldEnd() oprot.writeFieldStop() @@ -2342,41 +2789,41 @@ def read(self, iprot): if fid == 1: if ftype == TType.LIST: self.skewedColNames = [] - (_etype104, _size101) = iprot.readListBegin() - for _i105 in xrange(_size101): - _elem106 = iprot.readString() - self.skewedColNames.append(_elem106) + (_etype118, _size115) = iprot.readListBegin() + for _i119 in xrange(_size115): + _elem120 = iprot.readString() + self.skewedColNames.append(_elem120) iprot.readListEnd() else: iprot.skip(ftype) elif fid == 2: if ftype == TType.LIST: self.skewedColValues = [] - (_etype110, _size107) = iprot.readListBegin() - for _i111 in xrange(_size107): - _elem112 = [] - (_etype116, _size113) = iprot.readListBegin() - for _i117 in xrange(_size113): - _elem118 = iprot.readString() - _elem112.append(_elem118) + (_etype124, _size121) = iprot.readListBegin() + for _i125 in xrange(_size121): + _elem126 = [] + (_etype130, _size127) = iprot.readListBegin() + for _i131 in xrange(_size127): + _elem132 = iprot.readString() + _elem126.append(_elem132) iprot.readListEnd() - self.skewedColValues.append(_elem112) + self.skewedColValues.append(_elem126) iprot.readListEnd() else: iprot.skip(ftype) elif fid == 3: if ftype == TType.MAP: self.skewedColValueLocationMaps = {} - (_ktype120, _vtype121, _size119 ) = iprot.readMapBegin() - for _i123 in xrange(_size119): - _key124 = [] - (_etype129, _size126) = iprot.readListBegin() - for _i130 in xrange(_size126): - _elem131 = iprot.readString() - _key124.append(_elem131) + (_ktype134, _vtype135, _size133 ) = iprot.readMapBegin() + for _i137 in xrange(_size133): + _key138 = [] + (_etype143, _size140) = iprot.readListBegin() + for _i144 in xrange(_size140): + _elem145 = iprot.readString() + _key138.append(_elem145) iprot.readListEnd() - _val125 = iprot.readString() - self.skewedColValueLocationMaps[_key124] = _val125 + _val139 = iprot.readString() + self.skewedColValueLocationMaps[_key138] = _val139 iprot.readMapEnd() else: iprot.skip(ftype) @@ -2393,29 +2840,29 @@ def write(self, oprot): if self.skewedColNames is not None: oprot.writeFieldBegin('skewedColNames', TType.LIST, 1) oprot.writeListBegin(TType.STRING, len(self.skewedColNames)) - for iter132 in self.skewedColNames: - oprot.writeString(iter132) + for iter146 in self.skewedColNames: + oprot.writeString(iter146) oprot.writeListEnd() oprot.writeFieldEnd() if self.skewedColValues is not None: oprot.writeFieldBegin('skewedColValues', TType.LIST, 2) oprot.writeListBegin(TType.LIST, len(self.skewedColValues)) - for iter133 in self.skewedColValues: - oprot.writeListBegin(TType.STRING, len(iter133)) - for iter134 in iter133: - oprot.writeString(iter134) + for iter147 in self.skewedColValues: + oprot.writeListBegin(TType.STRING, len(iter147)) + for iter148 in iter147: + oprot.writeString(iter148) oprot.writeListEnd() oprot.writeListEnd() oprot.writeFieldEnd() if self.skewedColValueLocationMaps is not None: oprot.writeFieldBegin('skewedColValueLocationMaps', TType.MAP, 3) oprot.writeMapBegin(TType.LIST, TType.STRING, len(self.skewedColValueLocationMaps)) - for kiter135,viter136 in self.skewedColValueLocationMaps.items(): - oprot.writeListBegin(TType.STRING, len(kiter135)) - for iter137 in kiter135: - oprot.writeString(iter137) + for kiter149,viter150 in self.skewedColValueLocationMaps.items(): + oprot.writeListBegin(TType.STRING, len(kiter149)) + for iter151 in kiter149: + oprot.writeString(iter151) oprot.writeListEnd() - oprot.writeString(viter136) + oprot.writeString(viter150) oprot.writeMapEnd() oprot.writeFieldEnd() oprot.writeFieldStop() @@ -2502,11 +2949,11 @@ def read(self, iprot): if fid == 1: if ftype == TType.LIST: self.cols = [] - (_etype141, _size138) = iprot.readListBegin() - for _i142 in xrange(_size138): - _elem143 = FieldSchema() - _elem143.read(iprot) - self.cols.append(_elem143) + (_etype155, _size152) = iprot.readListBegin() + for _i156 in xrange(_size152): + _elem157 = FieldSchema() + _elem157.read(iprot) + self.cols.append(_elem157) iprot.readListEnd() else: iprot.skip(ftype) @@ -2544,32 +2991,32 @@ def read(self, iprot): elif fid == 8: if ftype == TType.LIST: self.bucketCols = [] - (_etype147, _size144) = iprot.readListBegin() - for _i148 in xrange(_size144): - _elem149 = iprot.readString() - self.bucketCols.append(_elem149) + (_etype161, _size158) = iprot.readListBegin() + for _i162 in xrange(_size158): + _elem163 = iprot.readString() + self.bucketCols.append(_elem163) iprot.readListEnd() else: iprot.skip(ftype) elif fid == 9: if ftype == TType.LIST: self.sortCols = [] - (_etype153, _size150) = iprot.readListBegin() - for _i154 in xrange(_size150): - _elem155 = Order() - _elem155.read(iprot) - self.sortCols.append(_elem155) + (_etype167, _size164) = iprot.readListBegin() + for _i168 in xrange(_size164): + _elem169 = Order() + _elem169.read(iprot) + self.sortCols.append(_elem169) iprot.readListEnd() else: iprot.skip(ftype) elif fid == 10: if ftype == TType.MAP: self.parameters = {} - (_ktype157, _vtype158, _size156 ) = iprot.readMapBegin() - for _i160 in xrange(_size156): - _key161 = iprot.readString() - _val162 = iprot.readString() - self.parameters[_key161] = _val162 + (_ktype171, _vtype172, _size170 ) = iprot.readMapBegin() + for _i174 in xrange(_size170): + _key175 = iprot.readString() + _val176 = iprot.readString() + self.parameters[_key175] = _val176 iprot.readMapEnd() else: iprot.skip(ftype) @@ -2597,8 +3044,8 @@ def write(self, oprot): if self.cols is not None: oprot.writeFieldBegin('cols', TType.LIST, 1) oprot.writeListBegin(TType.STRUCT, len(self.cols)) - for iter163 in self.cols: - iter163.write(oprot) + for iter177 in self.cols: + iter177.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() if self.location is not None: @@ -2628,23 +3075,23 @@ def write(self, oprot): if self.bucketCols is not None: oprot.writeFieldBegin('bucketCols', TType.LIST, 8) oprot.writeListBegin(TType.STRING, len(self.bucketCols)) - for iter164 in self.bucketCols: - oprot.writeString(iter164) + for iter178 in self.bucketCols: + oprot.writeString(iter178) oprot.writeListEnd() oprot.writeFieldEnd() if self.sortCols is not None: oprot.writeFieldBegin('sortCols', TType.LIST, 9) oprot.writeListBegin(TType.STRUCT, len(self.sortCols)) - for iter165 in self.sortCols: - iter165.write(oprot) + for iter179 in self.sortCols: + iter179.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() if self.parameters is not None: oprot.writeFieldBegin('parameters', TType.MAP, 10) oprot.writeMapBegin(TType.STRING, TType.STRING, len(self.parameters)) - for kiter166,viter167 in self.parameters.items(): - oprot.writeString(kiter166) - oprot.writeString(viter167) + for kiter180,viter181 in self.parameters.items(): + oprot.writeString(kiter180) + oprot.writeString(viter181) oprot.writeMapEnd() oprot.writeFieldEnd() if self.skewedInfo is not None: @@ -2706,6 +3153,8 @@ class Table: - tableType - privileges - temporary + - primaryKeys + - foreignKeys """ thrift_spec = ( @@ -2724,9 +3173,11 @@ class Table: (12, TType.STRING, 'tableType', None, None, ), # 12 (13, TType.STRUCT, 'privileges', (PrincipalPrivilegeSet, PrincipalPrivilegeSet.thrift_spec), None, ), # 13 (14, TType.BOOL, 'temporary', None, False, ), # 14 + (15, TType.LIST, 'primaryKeys', (TType.STRUCT,(FieldSchema, FieldSchema.thrift_spec)), None, ), # 15 + (16, TType.LIST, 'foreignKeys', (TType.STRUCT,(ForeignKeyRel, ForeignKeyRel.thrift_spec)), None, ), # 16 ) - def __init__(self, tableName=None, dbName=None, owner=None, createTime=None, lastAccessTime=None, retention=None, sd=None, partitionKeys=None, parameters=None, viewOriginalText=None, viewExpandedText=None, tableType=None, privileges=None, temporary=thrift_spec[14][4],): + def __init__(self, tableName=None, dbName=None, owner=None, createTime=None, lastAccessTime=None, retention=None, sd=None, partitionKeys=None, parameters=None, viewOriginalText=None, viewExpandedText=None, tableType=None, privileges=None, temporary=thrift_spec[14][4], primaryKeys=None, foreignKeys=None,): self.tableName = tableName self.dbName = dbName self.owner = owner @@ -2741,6 +3192,8 @@ def __init__(self, tableName=None, dbName=None, owner=None, createTime=None, las self.tableType = tableType self.privileges = privileges self.temporary = temporary + self.primaryKeys = primaryKeys + self.foreignKeys = foreignKeys def read(self, iprot): if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: @@ -2790,22 +3243,22 @@ def read(self, iprot): elif fid == 8: if ftype == TType.LIST: self.partitionKeys = [] - (_etype171, _size168) = iprot.readListBegin() - for _i172 in xrange(_size168): - _elem173 = FieldSchema() - _elem173.read(iprot) - self.partitionKeys.append(_elem173) + (_etype185, _size182) = iprot.readListBegin() + for _i186 in xrange(_size182): + _elem187 = FieldSchema() + _elem187.read(iprot) + self.partitionKeys.append(_elem187) iprot.readListEnd() else: iprot.skip(ftype) elif fid == 9: if ftype == TType.MAP: self.parameters = {} - (_ktype175, _vtype176, _size174 ) = iprot.readMapBegin() - for _i178 in xrange(_size174): - _key179 = iprot.readString() - _val180 = iprot.readString() - self.parameters[_key179] = _val180 + (_ktype189, _vtype190, _size188 ) = iprot.readMapBegin() + for _i192 in xrange(_size188): + _key193 = iprot.readString() + _val194 = iprot.readString() + self.parameters[_key193] = _val194 iprot.readMapEnd() else: iprot.skip(ftype) @@ -2835,6 +3288,28 @@ def read(self, iprot): self.temporary = iprot.readBool() else: iprot.skip(ftype) + elif fid == 15: + if ftype == TType.LIST: + self.primaryKeys = [] + (_etype198, _size195) = iprot.readListBegin() + for _i199 in xrange(_size195): + _elem200 = FieldSchema() + _elem200.read(iprot) + self.primaryKeys.append(_elem200) + iprot.readListEnd() + else: + iprot.skip(ftype) + elif fid == 16: + if ftype == TType.LIST: + self.foreignKeys = [] + (_etype204, _size201) = iprot.readListBegin() + for _i205 in xrange(_size201): + _elem206 = ForeignKeyRel() + _elem206.read(iprot) + self.foreignKeys.append(_elem206) + iprot.readListEnd() + else: + iprot.skip(ftype) else: iprot.skip(ftype) iprot.readFieldEnd() @@ -2876,16 +3351,16 @@ def write(self, oprot): if self.partitionKeys is not None: oprot.writeFieldBegin('partitionKeys', TType.LIST, 8) oprot.writeListBegin(TType.STRUCT, len(self.partitionKeys)) - for iter181 in self.partitionKeys: - iter181.write(oprot) + for iter207 in self.partitionKeys: + iter207.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() if self.parameters is not None: oprot.writeFieldBegin('parameters', TType.MAP, 9) oprot.writeMapBegin(TType.STRING, TType.STRING, len(self.parameters)) - for kiter182,viter183 in self.parameters.items(): - oprot.writeString(kiter182) - oprot.writeString(viter183) + for kiter208,viter209 in self.parameters.items(): + oprot.writeString(kiter208) + oprot.writeString(viter209) oprot.writeMapEnd() oprot.writeFieldEnd() if self.viewOriginalText is not None: @@ -2908,6 +3383,20 @@ def write(self, oprot): oprot.writeFieldBegin('temporary', TType.BOOL, 14) oprot.writeBool(self.temporary) oprot.writeFieldEnd() + if self.primaryKeys is not None: + oprot.writeFieldBegin('primaryKeys', TType.LIST, 15) + oprot.writeListBegin(TType.STRUCT, len(self.primaryKeys)) + for iter210 in self.primaryKeys: + iter210.write(oprot) + oprot.writeListEnd() + oprot.writeFieldEnd() + if self.foreignKeys is not None: + oprot.writeFieldBegin('foreignKeys', TType.LIST, 16) + oprot.writeListBegin(TType.STRUCT, len(self.foreignKeys)) + for iter211 in self.foreignKeys: + iter211.write(oprot) + oprot.writeListEnd() + oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() @@ -2931,6 +3420,8 @@ def __hash__(self): value = (value * 31) ^ hash(self.tableType) value = (value * 31) ^ hash(self.privileges) value = (value * 31) ^ hash(self.temporary) + value = (value * 31) ^ hash(self.primaryKeys) + value = (value * 31) ^ hash(self.foreignKeys) return value def __repr__(self): @@ -2991,10 +3482,10 @@ def read(self, iprot): if fid == 1: if ftype == TType.LIST: self.values = [] - (_etype187, _size184) = iprot.readListBegin() - for _i188 in xrange(_size184): - _elem189 = iprot.readString() - self.values.append(_elem189) + (_etype215, _size212) = iprot.readListBegin() + for _i216 in xrange(_size212): + _elem217 = iprot.readString() + self.values.append(_elem217) iprot.readListEnd() else: iprot.skip(ftype) @@ -3027,11 +3518,11 @@ def read(self, iprot): elif fid == 7: if ftype == TType.MAP: self.parameters = {} - (_ktype191, _vtype192, _size190 ) = iprot.readMapBegin() - for _i194 in xrange(_size190): - _key195 = iprot.readString() - _val196 = iprot.readString() - self.parameters[_key195] = _val196 + (_ktype219, _vtype220, _size218 ) = iprot.readMapBegin() + for _i222 in xrange(_size218): + _key223 = iprot.readString() + _val224 = iprot.readString() + self.parameters[_key223] = _val224 iprot.readMapEnd() else: iprot.skip(ftype) @@ -3054,8 +3545,8 @@ def write(self, oprot): if self.values is not None: oprot.writeFieldBegin('values', TType.LIST, 1) oprot.writeListBegin(TType.STRING, len(self.values)) - for iter197 in self.values: - oprot.writeString(iter197) + for iter225 in self.values: + oprot.writeString(iter225) oprot.writeListEnd() oprot.writeFieldEnd() if self.dbName is not None: @@ -3081,9 +3572,9 @@ def write(self, oprot): if self.parameters is not None: oprot.writeFieldBegin('parameters', TType.MAP, 7) oprot.writeMapBegin(TType.STRING, TType.STRING, len(self.parameters)) - for kiter198,viter199 in self.parameters.items(): - oprot.writeString(kiter198) - oprot.writeString(viter199) + for kiter226,viter227 in self.parameters.items(): + oprot.writeString(kiter226) + oprot.writeString(viter227) oprot.writeMapEnd() oprot.writeFieldEnd() if self.privileges is not None: @@ -3161,10 +3652,10 @@ def read(self, iprot): if fid == 1: if ftype == TType.LIST: self.values = [] - (_etype203, _size200) = iprot.readListBegin() - for _i204 in xrange(_size200): - _elem205 = iprot.readString() - self.values.append(_elem205) + (_etype231, _size228) = iprot.readListBegin() + for _i232 in xrange(_size228): + _elem233 = iprot.readString() + self.values.append(_elem233) iprot.readListEnd() else: iprot.skip(ftype) @@ -3186,11 +3677,11 @@ def read(self, iprot): elif fid == 5: if ftype == TType.MAP: self.parameters = {} - (_ktype207, _vtype208, _size206 ) = iprot.readMapBegin() - for _i210 in xrange(_size206): - _key211 = iprot.readString() - _val212 = iprot.readString() - self.parameters[_key211] = _val212 + (_ktype235, _vtype236, _size234 ) = iprot.readMapBegin() + for _i238 in xrange(_size234): + _key239 = iprot.readString() + _val240 = iprot.readString() + self.parameters[_key239] = _val240 iprot.readMapEnd() else: iprot.skip(ftype) @@ -3213,8 +3704,8 @@ def write(self, oprot): if self.values is not None: oprot.writeFieldBegin('values', TType.LIST, 1) oprot.writeListBegin(TType.STRING, len(self.values)) - for iter213 in self.values: - oprot.writeString(iter213) + for iter241 in self.values: + oprot.writeString(iter241) oprot.writeListEnd() oprot.writeFieldEnd() if self.createTime is not None: @@ -3232,9 +3723,9 @@ def write(self, oprot): if self.parameters is not None: oprot.writeFieldBegin('parameters', TType.MAP, 5) oprot.writeMapBegin(TType.STRING, TType.STRING, len(self.parameters)) - for kiter214,viter215 in self.parameters.items(): - oprot.writeString(kiter214) - oprot.writeString(viter215) + for kiter242,viter243 in self.parameters.items(): + oprot.writeString(kiter242) + oprot.writeString(viter243) oprot.writeMapEnd() oprot.writeFieldEnd() if self.privileges is not None: @@ -3298,11 +3789,11 @@ def read(self, iprot): if fid == 1: if ftype == TType.LIST: self.partitions = [] - (_etype219, _size216) = iprot.readListBegin() - for _i220 in xrange(_size216): - _elem221 = PartitionWithoutSD() - _elem221.read(iprot) - self.partitions.append(_elem221) + (_etype247, _size244) = iprot.readListBegin() + for _i248 in xrange(_size244): + _elem249 = PartitionWithoutSD() + _elem249.read(iprot) + self.partitions.append(_elem249) iprot.readListEnd() else: iprot.skip(ftype) @@ -3325,8 +3816,8 @@ def write(self, oprot): if self.partitions is not None: oprot.writeFieldBegin('partitions', TType.LIST, 1) oprot.writeListBegin(TType.STRUCT, len(self.partitions)) - for iter222 in self.partitions: - iter222.write(oprot) + for iter250 in self.partitions: + iter250.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() if self.sd is not None: @@ -3383,11 +3874,11 @@ def read(self, iprot): if fid == 1: if ftype == TType.LIST: self.partitions = [] - (_etype226, _size223) = iprot.readListBegin() - for _i227 in xrange(_size223): - _elem228 = Partition() - _elem228.read(iprot) - self.partitions.append(_elem228) + (_etype254, _size251) = iprot.readListBegin() + for _i255 in xrange(_size251): + _elem256 = Partition() + _elem256.read(iprot) + self.partitions.append(_elem256) iprot.readListEnd() else: iprot.skip(ftype) @@ -3404,8 +3895,8 @@ def write(self, oprot): if self.partitions is not None: oprot.writeFieldBegin('partitions', TType.LIST, 1) oprot.writeListBegin(TType.STRUCT, len(self.partitions)) - for iter229 in self.partitions: - iter229.write(oprot) + for iter257 in self.partitions: + iter257.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() oprot.writeFieldStop() @@ -3644,11 +4135,11 @@ def read(self, iprot): elif fid == 9: if ftype == TType.MAP: self.parameters = {} - (_ktype231, _vtype232, _size230 ) = iprot.readMapBegin() - for _i234 in xrange(_size230): - _key235 = iprot.readString() - _val236 = iprot.readString() - self.parameters[_key235] = _val236 + (_ktype259, _vtype260, _size258 ) = iprot.readMapBegin() + for _i262 in xrange(_size258): + _key263 = iprot.readString() + _val264 = iprot.readString() + self.parameters[_key263] = _val264 iprot.readMapEnd() else: iprot.skip(ftype) @@ -3702,9 +4193,9 @@ def write(self, oprot): if self.parameters is not None: oprot.writeFieldBegin('parameters', TType.MAP, 9) oprot.writeMapBegin(TType.STRING, TType.STRING, len(self.parameters)) - for kiter237,viter238 in self.parameters.items(): - oprot.writeString(kiter237) - oprot.writeString(viter238) + for kiter265,viter266 in self.parameters.items(): + oprot.writeString(kiter265) + oprot.writeString(viter266) oprot.writeMapEnd() oprot.writeFieldEnd() if self.deferredRebuild is not None: @@ -5132,11 +5623,11 @@ def read(self, iprot): elif fid == 2: if ftype == TType.LIST: self.statsObj = [] - (_etype242, _size239) = iprot.readListBegin() - for _i243 in xrange(_size239): - _elem244 = ColumnStatisticsObj() - _elem244.read(iprot) - self.statsObj.append(_elem244) + (_etype270, _size267) = iprot.readListBegin() + for _i271 in xrange(_size267): + _elem272 = ColumnStatisticsObj() + _elem272.read(iprot) + self.statsObj.append(_elem272) iprot.readListEnd() else: iprot.skip(ftype) @@ -5157,8 +5648,8 @@ def write(self, oprot): if self.statsObj is not None: oprot.writeFieldBegin('statsObj', TType.LIST, 2) oprot.writeListBegin(TType.STRUCT, len(self.statsObj)) - for iter245 in self.statsObj: - iter245.write(oprot) + for iter273 in self.statsObj: + iter273.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() oprot.writeFieldStop() @@ -5218,11 +5709,11 @@ def read(self, iprot): if fid == 1: if ftype == TType.LIST: self.colStats = [] - (_etype249, _size246) = iprot.readListBegin() - for _i250 in xrange(_size246): - _elem251 = ColumnStatisticsObj() - _elem251.read(iprot) - self.colStats.append(_elem251) + (_etype277, _size274) = iprot.readListBegin() + for _i278 in xrange(_size274): + _elem279 = ColumnStatisticsObj() + _elem279.read(iprot) + self.colStats.append(_elem279) iprot.readListEnd() else: iprot.skip(ftype) @@ -5244,8 +5735,8 @@ def write(self, oprot): if self.colStats is not None: oprot.writeFieldBegin('colStats', TType.LIST, 1) oprot.writeListBegin(TType.STRUCT, len(self.colStats)) - for iter252 in self.colStats: - iter252.write(oprot) + for iter280 in self.colStats: + iter280.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() if self.partsFound is not None: @@ -5306,11 +5797,11 @@ def read(self, iprot): if fid == 1: if ftype == TType.LIST: self.colStats = [] - (_etype256, _size253) = iprot.readListBegin() - for _i257 in xrange(_size253): - _elem258 = ColumnStatistics() - _elem258.read(iprot) - self.colStats.append(_elem258) + (_etype284, _size281) = iprot.readListBegin() + for _i285 in xrange(_size281): + _elem286 = ColumnStatistics() + _elem286.read(iprot) + self.colStats.append(_elem286) iprot.readListEnd() else: iprot.skip(ftype) @@ -5327,8 +5818,8 @@ def write(self, oprot): if self.colStats is not None: oprot.writeFieldBegin('colStats', TType.LIST, 1) oprot.writeListBegin(TType.STRUCT, len(self.colStats)) - for iter259 in self.colStats: - iter259.write(oprot) + for iter287 in self.colStats: + iter287.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() oprot.writeFieldStop() @@ -5385,22 +5876,22 @@ def read(self, iprot): if fid == 1: if ftype == TType.LIST: self.fieldSchemas = [] - (_etype263, _size260) = iprot.readListBegin() - for _i264 in xrange(_size260): - _elem265 = FieldSchema() - _elem265.read(iprot) - self.fieldSchemas.append(_elem265) + (_etype291, _size288) = iprot.readListBegin() + for _i292 in xrange(_size288): + _elem293 = FieldSchema() + _elem293.read(iprot) + self.fieldSchemas.append(_elem293) iprot.readListEnd() else: iprot.skip(ftype) elif fid == 2: if ftype == TType.MAP: self.properties = {} - (_ktype267, _vtype268, _size266 ) = iprot.readMapBegin() - for _i270 in xrange(_size266): - _key271 = iprot.readString() - _val272 = iprot.readString() - self.properties[_key271] = _val272 + (_ktype295, _vtype296, _size294 ) = iprot.readMapBegin() + for _i298 in xrange(_size294): + _key299 = iprot.readString() + _val300 = iprot.readString() + self.properties[_key299] = _val300 iprot.readMapEnd() else: iprot.skip(ftype) @@ -5417,16 +5908,16 @@ def write(self, oprot): if self.fieldSchemas is not None: oprot.writeFieldBegin('fieldSchemas', TType.LIST, 1) oprot.writeListBegin(TType.STRUCT, len(self.fieldSchemas)) - for iter273 in self.fieldSchemas: - iter273.write(oprot) + for iter301 in self.fieldSchemas: + iter301.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() if self.properties is not None: oprot.writeFieldBegin('properties', TType.MAP, 2) oprot.writeMapBegin(TType.STRING, TType.STRING, len(self.properties)) - for kiter274,viter275 in self.properties.items(): - oprot.writeString(kiter274) - oprot.writeString(viter275) + for kiter302,viter303 in self.properties.items(): + oprot.writeString(kiter302) + oprot.writeString(viter303) oprot.writeMapEnd() oprot.writeFieldEnd() oprot.writeFieldStop() @@ -5479,11 +5970,11 @@ def read(self, iprot): if fid == 1: if ftype == TType.MAP: self.properties = {} - (_ktype277, _vtype278, _size276 ) = iprot.readMapBegin() - for _i280 in xrange(_size276): - _key281 = iprot.readString() - _val282 = iprot.readString() - self.properties[_key281] = _val282 + (_ktype305, _vtype306, _size304 ) = iprot.readMapBegin() + for _i308 in xrange(_size304): + _key309 = iprot.readString() + _val310 = iprot.readString() + self.properties[_key309] = _val310 iprot.readMapEnd() else: iprot.skip(ftype) @@ -5500,9 +5991,9 @@ def write(self, oprot): if self.properties is not None: oprot.writeFieldBegin('properties', TType.MAP, 1) oprot.writeMapBegin(TType.STRING, TType.STRING, len(self.properties)) - for kiter283,viter284 in self.properties.items(): - oprot.writeString(kiter283) - oprot.writeString(viter284) + for kiter311,viter312 in self.properties.items(): + oprot.writeString(kiter311) + oprot.writeString(viter312) oprot.writeMapEnd() oprot.writeFieldEnd() oprot.writeFieldStop() @@ -5557,11 +6048,11 @@ def read(self, iprot): if fid == 1: if ftype == TType.LIST: self.partitions = [] - (_etype288, _size285) = iprot.readListBegin() - for _i289 in xrange(_size285): - _elem290 = Partition() - _elem290.read(iprot) - self.partitions.append(_elem290) + (_etype316, _size313) = iprot.readListBegin() + for _i317 in xrange(_size313): + _elem318 = Partition() + _elem318.read(iprot) + self.partitions.append(_elem318) iprot.readListEnd() else: iprot.skip(ftype) @@ -5583,8 +6074,8 @@ def write(self, oprot): if self.partitions is not None: oprot.writeFieldBegin('partitions', TType.LIST, 1) oprot.writeListBegin(TType.STRUCT, len(self.partitions)) - for iter291 in self.partitions: - iter291.write(oprot) + for iter319 in self.partitions: + iter319.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() if self.hasUnknownPartitions is not None: @@ -5768,11 +6259,11 @@ def read(self, iprot): if fid == 1: if ftype == TType.LIST: self.tableStats = [] - (_etype295, _size292) = iprot.readListBegin() - for _i296 in xrange(_size292): - _elem297 = ColumnStatisticsObj() - _elem297.read(iprot) - self.tableStats.append(_elem297) + (_etype323, _size320) = iprot.readListBegin() + for _i324 in xrange(_size320): + _elem325 = ColumnStatisticsObj() + _elem325.read(iprot) + self.tableStats.append(_elem325) iprot.readListEnd() else: iprot.skip(ftype) @@ -5789,8 +6280,8 @@ def write(self, oprot): if self.tableStats is not None: oprot.writeFieldBegin('tableStats', TType.LIST, 1) oprot.writeListBegin(TType.STRUCT, len(self.tableStats)) - for iter298 in self.tableStats: - iter298.write(oprot) + for iter326 in self.tableStats: + iter326.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() oprot.writeFieldStop() @@ -5844,17 +6335,17 @@ def read(self, iprot): if fid == 1: if ftype == TType.MAP: self.partStats = {} - (_ktype300, _vtype301, _size299 ) = iprot.readMapBegin() - for _i303 in xrange(_size299): - _key304 = iprot.readString() - _val305 = [] - (_etype309, _size306) = iprot.readListBegin() - for _i310 in xrange(_size306): - _elem311 = ColumnStatisticsObj() - _elem311.read(iprot) - _val305.append(_elem311) + (_ktype328, _vtype329, _size327 ) = iprot.readMapBegin() + for _i331 in xrange(_size327): + _key332 = iprot.readString() + _val333 = [] + (_etype337, _size334) = iprot.readListBegin() + for _i338 in xrange(_size334): + _elem339 = ColumnStatisticsObj() + _elem339.read(iprot) + _val333.append(_elem339) iprot.readListEnd() - self.partStats[_key304] = _val305 + self.partStats[_key332] = _val333 iprot.readMapEnd() else: iprot.skip(ftype) @@ -5871,11 +6362,11 @@ def write(self, oprot): if self.partStats is not None: oprot.writeFieldBegin('partStats', TType.MAP, 1) oprot.writeMapBegin(TType.STRING, TType.LIST, len(self.partStats)) - for kiter312,viter313 in self.partStats.items(): - oprot.writeString(kiter312) - oprot.writeListBegin(TType.STRUCT, len(viter313)) - for iter314 in viter313: - iter314.write(oprot) + for kiter340,viter341 in self.partStats.items(): + oprot.writeString(kiter340) + oprot.writeListBegin(TType.STRUCT, len(viter341)) + for iter342 in viter341: + iter342.write(oprot) oprot.writeListEnd() oprot.writeMapEnd() oprot.writeFieldEnd() @@ -5946,10 +6437,10 @@ def read(self, iprot): elif fid == 3: if ftype == TType.LIST: self.colNames = [] - (_etype318, _size315) = iprot.readListBegin() - for _i319 in xrange(_size315): - _elem320 = iprot.readString() - self.colNames.append(_elem320) + (_etype346, _size343) = iprot.readListBegin() + for _i347 in xrange(_size343): + _elem348 = iprot.readString() + self.colNames.append(_elem348) iprot.readListEnd() else: iprot.skip(ftype) @@ -5974,8 +6465,8 @@ def write(self, oprot): if self.colNames is not None: oprot.writeFieldBegin('colNames', TType.LIST, 3) oprot.writeListBegin(TType.STRING, len(self.colNames)) - for iter321 in self.colNames: - oprot.writeString(iter321) + for iter349 in self.colNames: + oprot.writeString(iter349) oprot.writeListEnd() oprot.writeFieldEnd() oprot.writeFieldStop() @@ -6054,20 +6545,20 @@ def read(self, iprot): elif fid == 3: if ftype == TType.LIST: self.colNames = [] - (_etype325, _size322) = iprot.readListBegin() - for _i326 in xrange(_size322): - _elem327 = iprot.readString() - self.colNames.append(_elem327) + (_etype353, _size350) = iprot.readListBegin() + for _i354 in xrange(_size350): + _elem355 = iprot.readString() + self.colNames.append(_elem355) iprot.readListEnd() else: iprot.skip(ftype) elif fid == 4: if ftype == TType.LIST: self.partNames = [] - (_etype331, _size328) = iprot.readListBegin() - for _i332 in xrange(_size328): - _elem333 = iprot.readString() - self.partNames.append(_elem333) + (_etype359, _size356) = iprot.readListBegin() + for _i360 in xrange(_size356): + _elem361 = iprot.readString() + self.partNames.append(_elem361) iprot.readListEnd() else: iprot.skip(ftype) @@ -6092,15 +6583,15 @@ def write(self, oprot): if self.colNames is not None: oprot.writeFieldBegin('colNames', TType.LIST, 3) oprot.writeListBegin(TType.STRING, len(self.colNames)) - for iter334 in self.colNames: - oprot.writeString(iter334) + for iter362 in self.colNames: + oprot.writeString(iter362) oprot.writeListEnd() oprot.writeFieldEnd() if self.partNames is not None: oprot.writeFieldBegin('partNames', TType.LIST, 4) oprot.writeListBegin(TType.STRING, len(self.partNames)) - for iter335 in self.partNames: - oprot.writeString(iter335) + for iter363 in self.partNames: + oprot.writeString(iter363) oprot.writeListEnd() oprot.writeFieldEnd() oprot.writeFieldStop() @@ -6163,11 +6654,11 @@ def read(self, iprot): if fid == 1: if ftype == TType.LIST: self.partitions = [] - (_etype339, _size336) = iprot.readListBegin() - for _i340 in xrange(_size336): - _elem341 = Partition() - _elem341.read(iprot) - self.partitions.append(_elem341) + (_etype367, _size364) = iprot.readListBegin() + for _i368 in xrange(_size364): + _elem369 = Partition() + _elem369.read(iprot) + self.partitions.append(_elem369) iprot.readListEnd() else: iprot.skip(ftype) @@ -6184,8 +6675,8 @@ def write(self, oprot): if self.partitions is not None: oprot.writeFieldBegin('partitions', TType.LIST, 1) oprot.writeListBegin(TType.STRUCT, len(self.partitions)) - for iter342 in self.partitions: - iter342.write(oprot) + for iter370 in self.partitions: + iter370.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() oprot.writeFieldStop() @@ -6259,11 +6750,11 @@ def read(self, iprot): elif fid == 3: if ftype == TType.LIST: self.parts = [] - (_etype346, _size343) = iprot.readListBegin() - for _i347 in xrange(_size343): - _elem348 = Partition() - _elem348.read(iprot) - self.parts.append(_elem348) + (_etype374, _size371) = iprot.readListBegin() + for _i375 in xrange(_size371): + _elem376 = Partition() + _elem376.read(iprot) + self.parts.append(_elem376) iprot.readListEnd() else: iprot.skip(ftype) @@ -6298,8 +6789,8 @@ def write(self, oprot): if self.parts is not None: oprot.writeFieldBegin('parts', TType.LIST, 3) oprot.writeListBegin(TType.STRUCT, len(self.parts)) - for iter349 in self.parts: - iter349.write(oprot) + for iter377 in self.parts: + iter377.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() if self.ifNotExists is not None: @@ -6371,11 +6862,11 @@ def read(self, iprot): if fid == 1: if ftype == TType.LIST: self.partitions = [] - (_etype353, _size350) = iprot.readListBegin() - for _i354 in xrange(_size350): - _elem355 = Partition() - _elem355.read(iprot) - self.partitions.append(_elem355) + (_etype381, _size378) = iprot.readListBegin() + for _i382 in xrange(_size378): + _elem383 = Partition() + _elem383.read(iprot) + self.partitions.append(_elem383) iprot.readListEnd() else: iprot.skip(ftype) @@ -6392,8 +6883,8 @@ def write(self, oprot): if self.partitions is not None: oprot.writeFieldBegin('partitions', TType.LIST, 1) oprot.writeListBegin(TType.STRUCT, len(self.partitions)) - for iter356 in self.partitions: - iter356.write(oprot) + for iter384 in self.partitions: + iter384.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() oprot.writeFieldStop() @@ -6528,21 +7019,21 @@ def read(self, iprot): if fid == 1: if ftype == TType.LIST: self.names = [] - (_etype360, _size357) = iprot.readListBegin() - for _i361 in xrange(_size357): - _elem362 = iprot.readString() - self.names.append(_elem362) + (_etype388, _size385) = iprot.readListBegin() + for _i389 in xrange(_size385): + _elem390 = iprot.readString() + self.names.append(_elem390) iprot.readListEnd() else: iprot.skip(ftype) elif fid == 2: if ftype == TType.LIST: self.exprs = [] - (_etype366, _size363) = iprot.readListBegin() - for _i367 in xrange(_size363): - _elem368 = DropPartitionsExpr() - _elem368.read(iprot) - self.exprs.append(_elem368) + (_etype394, _size391) = iprot.readListBegin() + for _i395 in xrange(_size391): + _elem396 = DropPartitionsExpr() + _elem396.read(iprot) + self.exprs.append(_elem396) iprot.readListEnd() else: iprot.skip(ftype) @@ -6559,15 +7050,15 @@ def write(self, oprot): if self.names is not None: oprot.writeFieldBegin('names', TType.LIST, 1) oprot.writeListBegin(TType.STRING, len(self.names)) - for iter369 in self.names: - oprot.writeString(iter369) + for iter397 in self.names: + oprot.writeString(iter397) oprot.writeListEnd() oprot.writeFieldEnd() if self.exprs is not None: oprot.writeFieldBegin('exprs', TType.LIST, 2) oprot.writeListBegin(TType.STRUCT, len(self.exprs)) - for iter370 in self.exprs: - iter370.write(oprot) + for iter398 in self.exprs: + iter398.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() oprot.writeFieldStop() @@ -6918,11 +7409,11 @@ def read(self, iprot): elif fid == 8: if ftype == TType.LIST: self.resourceUris = [] - (_etype374, _size371) = iprot.readListBegin() - for _i375 in xrange(_size371): - _elem376 = ResourceUri() - _elem376.read(iprot) - self.resourceUris.append(_elem376) + (_etype402, _size399) = iprot.readListBegin() + for _i403 in xrange(_size399): + _elem404 = ResourceUri() + _elem404.read(iprot) + self.resourceUris.append(_elem404) iprot.readListEnd() else: iprot.skip(ftype) @@ -6967,8 +7458,8 @@ def write(self, oprot): if self.resourceUris is not None: oprot.writeFieldBegin('resourceUris', TType.LIST, 8) oprot.writeListBegin(TType.STRUCT, len(self.resourceUris)) - for iter377 in self.resourceUris: - iter377.write(oprot) + for iter405 in self.resourceUris: + iter405.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() oprot.writeFieldStop() @@ -7186,11 +7677,11 @@ def read(self, iprot): elif fid == 2: if ftype == TType.LIST: self.open_txns = [] - (_etype381, _size378) = iprot.readListBegin() - for _i382 in xrange(_size378): - _elem383 = TxnInfo() - _elem383.read(iprot) - self.open_txns.append(_elem383) + (_etype409, _size406) = iprot.readListBegin() + for _i410 in xrange(_size406): + _elem411 = TxnInfo() + _elem411.read(iprot) + self.open_txns.append(_elem411) iprot.readListEnd() else: iprot.skip(ftype) @@ -7211,8 +7702,8 @@ def write(self, oprot): if self.open_txns is not None: oprot.writeFieldBegin('open_txns', TType.LIST, 2) oprot.writeListBegin(TType.STRUCT, len(self.open_txns)) - for iter384 in self.open_txns: - iter384.write(oprot) + for iter412 in self.open_txns: + iter412.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() oprot.writeFieldStop() @@ -7277,10 +7768,10 @@ def read(self, iprot): elif fid == 2: if ftype == TType.SET: self.open_txns = set() - (_etype388, _size385) = iprot.readSetBegin() - for _i389 in xrange(_size385): - _elem390 = iprot.readI64() - self.open_txns.add(_elem390) + (_etype416, _size413) = iprot.readSetBegin() + for _i417 in xrange(_size413): + _elem418 = iprot.readI64() + self.open_txns.add(_elem418) iprot.readSetEnd() else: iprot.skip(ftype) @@ -7301,8 +7792,8 @@ def write(self, oprot): if self.open_txns is not None: oprot.writeFieldBegin('open_txns', TType.SET, 2) oprot.writeSetBegin(TType.I64, len(self.open_txns)) - for iter391 in self.open_txns: - oprot.writeI64(iter391) + for iter419 in self.open_txns: + oprot.writeI64(iter419) oprot.writeSetEnd() oprot.writeFieldEnd() oprot.writeFieldStop() @@ -7469,10 +7960,10 @@ def read(self, iprot): if fid == 1: if ftype == TType.LIST: self.txn_ids = [] - (_etype395, _size392) = iprot.readListBegin() - for _i396 in xrange(_size392): - _elem397 = iprot.readI64() - self.txn_ids.append(_elem397) + (_etype423, _size420) = iprot.readListBegin() + for _i424 in xrange(_size420): + _elem425 = iprot.readI64() + self.txn_ids.append(_elem425) iprot.readListEnd() else: iprot.skip(ftype) @@ -7489,8 +7980,8 @@ def write(self, oprot): if self.txn_ids is not None: oprot.writeFieldBegin('txn_ids', TType.LIST, 1) oprot.writeListBegin(TType.I64, len(self.txn_ids)) - for iter398 in self.txn_ids: - oprot.writeI64(iter398) + for iter426 in self.txn_ids: + oprot.writeI64(iter426) oprot.writeListEnd() oprot.writeFieldEnd() oprot.writeFieldStop() @@ -7813,11 +8304,11 @@ def read(self, iprot): if fid == 1: if ftype == TType.LIST: self.component = [] - (_etype402, _size399) = iprot.readListBegin() - for _i403 in xrange(_size399): - _elem404 = LockComponent() - _elem404.read(iprot) - self.component.append(_elem404) + (_etype430, _size427) = iprot.readListBegin() + for _i431 in xrange(_size427): + _elem432 = LockComponent() + _elem432.read(iprot) + self.component.append(_elem432) iprot.readListEnd() else: iprot.skip(ftype) @@ -7854,8 +8345,8 @@ def write(self, oprot): if self.component is not None: oprot.writeFieldBegin('component', TType.LIST, 1) oprot.writeListBegin(TType.STRUCT, len(self.component)) - for iter405 in self.component: - iter405.write(oprot) + for iter433 in self.component: + iter433.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() if self.txnid is not None: @@ -8553,11 +9044,11 @@ def read(self, iprot): if fid == 1: if ftype == TType.LIST: self.locks = [] - (_etype409, _size406) = iprot.readListBegin() - for _i410 in xrange(_size406): - _elem411 = ShowLocksResponseElement() - _elem411.read(iprot) - self.locks.append(_elem411) + (_etype437, _size434) = iprot.readListBegin() + for _i438 in xrange(_size434): + _elem439 = ShowLocksResponseElement() + _elem439.read(iprot) + self.locks.append(_elem439) iprot.readListEnd() else: iprot.skip(ftype) @@ -8574,8 +9065,8 @@ def write(self, oprot): if self.locks is not None: oprot.writeFieldBegin('locks', TType.LIST, 1) oprot.writeListBegin(TType.STRUCT, len(self.locks)) - for iter412 in self.locks: - iter412.write(oprot) + for iter440 in self.locks: + iter440.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() oprot.writeFieldStop() @@ -8790,20 +9281,20 @@ def read(self, iprot): if fid == 1: if ftype == TType.SET: self.aborted = set() - (_etype416, _size413) = iprot.readSetBegin() - for _i417 in xrange(_size413): - _elem418 = iprot.readI64() - self.aborted.add(_elem418) + (_etype444, _size441) = iprot.readSetBegin() + for _i445 in xrange(_size441): + _elem446 = iprot.readI64() + self.aborted.add(_elem446) iprot.readSetEnd() else: iprot.skip(ftype) elif fid == 2: if ftype == TType.SET: self.nosuch = set() - (_etype422, _size419) = iprot.readSetBegin() - for _i423 in xrange(_size419): - _elem424 = iprot.readI64() - self.nosuch.add(_elem424) + (_etype450, _size447) = iprot.readSetBegin() + for _i451 in xrange(_size447): + _elem452 = iprot.readI64() + self.nosuch.add(_elem452) iprot.readSetEnd() else: iprot.skip(ftype) @@ -8820,15 +9311,15 @@ def write(self, oprot): if self.aborted is not None: oprot.writeFieldBegin('aborted', TType.SET, 1) oprot.writeSetBegin(TType.I64, len(self.aborted)) - for iter425 in self.aborted: - oprot.writeI64(iter425) + for iter453 in self.aborted: + oprot.writeI64(iter453) oprot.writeSetEnd() oprot.writeFieldEnd() if self.nosuch is not None: oprot.writeFieldBegin('nosuch', TType.SET, 2) oprot.writeSetBegin(TType.I64, len(self.nosuch)) - for iter426 in self.nosuch: - oprot.writeI64(iter426) + for iter454 in self.nosuch: + oprot.writeI64(iter454) oprot.writeSetEnd() oprot.writeFieldEnd() oprot.writeFieldStop() @@ -9270,11 +9761,11 @@ def read(self, iprot): if fid == 1: if ftype == TType.LIST: self.compacts = [] - (_etype430, _size427) = iprot.readListBegin() - for _i431 in xrange(_size427): - _elem432 = ShowCompactResponseElement() - _elem432.read(iprot) - self.compacts.append(_elem432) + (_etype458, _size455) = iprot.readListBegin() + for _i459 in xrange(_size455): + _elem460 = ShowCompactResponseElement() + _elem460.read(iprot) + self.compacts.append(_elem460) iprot.readListEnd() else: iprot.skip(ftype) @@ -9291,8 +9782,8 @@ def write(self, oprot): if self.compacts is not None: oprot.writeFieldBegin('compacts', TType.LIST, 1) oprot.writeListBegin(TType.STRUCT, len(self.compacts)) - for iter433 in self.compacts: - iter433.write(oprot) + for iter461 in self.compacts: + iter461.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() oprot.writeFieldStop() @@ -9370,10 +9861,10 @@ def read(self, iprot): elif fid == 4: if ftype == TType.LIST: self.partitionnames = [] - (_etype437, _size434) = iprot.readListBegin() - for _i438 in xrange(_size434): - _elem439 = iprot.readString() - self.partitionnames.append(_elem439) + (_etype465, _size462) = iprot.readListBegin() + for _i466 in xrange(_size462): + _elem467 = iprot.readString() + self.partitionnames.append(_elem467) iprot.readListEnd() else: iprot.skip(ftype) @@ -9402,8 +9893,8 @@ def write(self, oprot): if self.partitionnames is not None: oprot.writeFieldBegin('partitionnames', TType.LIST, 4) oprot.writeListBegin(TType.STRING, len(self.partitionnames)) - for iter440 in self.partitionnames: - oprot.writeString(iter440) + for iter468 in self.partitionnames: + oprot.writeString(iter468) oprot.writeListEnd() oprot.writeFieldEnd() oprot.writeFieldStop() @@ -9684,11 +10175,11 @@ def read(self, iprot): if fid == 1: if ftype == TType.LIST: self.events = [] - (_etype444, _size441) = iprot.readListBegin() - for _i445 in xrange(_size441): - _elem446 = NotificationEvent() - _elem446.read(iprot) - self.events.append(_elem446) + (_etype472, _size469) = iprot.readListBegin() + for _i473 in xrange(_size469): + _elem474 = NotificationEvent() + _elem474.read(iprot) + self.events.append(_elem474) iprot.readListEnd() else: iprot.skip(ftype) @@ -9705,8 +10196,8 @@ def write(self, oprot): if self.events is not None: oprot.writeFieldBegin('events', TType.LIST, 1) oprot.writeListBegin(TType.STRUCT, len(self.events)) - for iter447 in self.events: - iter447.write(oprot) + for iter475 in self.events: + iter475.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() oprot.writeFieldStop() @@ -9827,10 +10318,10 @@ def read(self, iprot): if fid == 1: if ftype == TType.LIST: self.filesAdded = [] - (_etype451, _size448) = iprot.readListBegin() - for _i452 in xrange(_size448): - _elem453 = iprot.readString() - self.filesAdded.append(_elem453) + (_etype479, _size476) = iprot.readListBegin() + for _i480 in xrange(_size476): + _elem481 = iprot.readString() + self.filesAdded.append(_elem481) iprot.readListEnd() else: iprot.skip(ftype) @@ -9847,8 +10338,8 @@ def write(self, oprot): if self.filesAdded is not None: oprot.writeFieldBegin('filesAdded', TType.LIST, 1) oprot.writeListBegin(TType.STRING, len(self.filesAdded)) - for iter454 in self.filesAdded: - oprot.writeString(iter454) + for iter482 in self.filesAdded: + oprot.writeString(iter482) oprot.writeListEnd() oprot.writeFieldEnd() oprot.writeFieldStop() @@ -10001,10 +10492,10 @@ def read(self, iprot): elif fid == 5: if ftype == TType.LIST: self.partitionVals = [] - (_etype458, _size455) = iprot.readListBegin() - for _i459 in xrange(_size455): - _elem460 = iprot.readString() - self.partitionVals.append(_elem460) + (_etype486, _size483) = iprot.readListBegin() + for _i487 in xrange(_size483): + _elem488 = iprot.readString() + self.partitionVals.append(_elem488) iprot.readListEnd() else: iprot.skip(ftype) @@ -10037,8 +10528,8 @@ def write(self, oprot): if self.partitionVals is not None: oprot.writeFieldBegin('partitionVals', TType.LIST, 5) oprot.writeListBegin(TType.STRING, len(self.partitionVals)) - for iter461 in self.partitionVals: - oprot.writeString(iter461) + for iter489 in self.partitionVals: + oprot.writeString(iter489) oprot.writeListEnd() oprot.writeFieldEnd() oprot.writeFieldStop() @@ -10359,12 +10850,12 @@ def read(self, iprot): if fid == 1: if ftype == TType.MAP: self.metadata = {} - (_ktype463, _vtype464, _size462 ) = iprot.readMapBegin() - for _i466 in xrange(_size462): - _key467 = iprot.readI64() - _val468 = MetadataPpdResult() - _val468.read(iprot) - self.metadata[_key467] = _val468 + (_ktype491, _vtype492, _size490 ) = iprot.readMapBegin() + for _i494 in xrange(_size490): + _key495 = iprot.readI64() + _val496 = MetadataPpdResult() + _val496.read(iprot) + self.metadata[_key495] = _val496 iprot.readMapEnd() else: iprot.skip(ftype) @@ -10386,9 +10877,9 @@ def write(self, oprot): if self.metadata is not None: oprot.writeFieldBegin('metadata', TType.MAP, 1) oprot.writeMapBegin(TType.I64, TType.STRUCT, len(self.metadata)) - for kiter469,viter470 in self.metadata.items(): - oprot.writeI64(kiter469) - viter470.write(oprot) + for kiter497,viter498 in self.metadata.items(): + oprot.writeI64(kiter497) + viter498.write(oprot) oprot.writeMapEnd() oprot.writeFieldEnd() if self.isSupported is not None: @@ -10458,10 +10949,10 @@ def read(self, iprot): if fid == 1: if ftype == TType.LIST: self.fileIds = [] - (_etype474, _size471) = iprot.readListBegin() - for _i475 in xrange(_size471): - _elem476 = iprot.readI64() - self.fileIds.append(_elem476) + (_etype502, _size499) = iprot.readListBegin() + for _i503 in xrange(_size499): + _elem504 = iprot.readI64() + self.fileIds.append(_elem504) iprot.readListEnd() else: iprot.skip(ftype) @@ -10493,8 +10984,8 @@ def write(self, oprot): if self.fileIds is not None: oprot.writeFieldBegin('fileIds', TType.LIST, 1) oprot.writeListBegin(TType.I64, len(self.fileIds)) - for iter477 in self.fileIds: - oprot.writeI64(iter477) + for iter505 in self.fileIds: + oprot.writeI64(iter505) oprot.writeListEnd() oprot.writeFieldEnd() if self.expr is not None: @@ -10568,11 +11059,11 @@ def read(self, iprot): if fid == 1: if ftype == TType.MAP: self.metadata = {} - (_ktype479, _vtype480, _size478 ) = iprot.readMapBegin() - for _i482 in xrange(_size478): - _key483 = iprot.readI64() - _val484 = iprot.readString() - self.metadata[_key483] = _val484 + (_ktype507, _vtype508, _size506 ) = iprot.readMapBegin() + for _i510 in xrange(_size506): + _key511 = iprot.readI64() + _val512 = iprot.readString() + self.metadata[_key511] = _val512 iprot.readMapEnd() else: iprot.skip(ftype) @@ -10594,9 +11085,9 @@ def write(self, oprot): if self.metadata is not None: oprot.writeFieldBegin('metadata', TType.MAP, 1) oprot.writeMapBegin(TType.I64, TType.STRING, len(self.metadata)) - for kiter485,viter486 in self.metadata.items(): - oprot.writeI64(kiter485) - oprot.writeString(viter486) + for kiter513,viter514 in self.metadata.items(): + oprot.writeI64(kiter513) + oprot.writeString(viter514) oprot.writeMapEnd() oprot.writeFieldEnd() if self.isSupported is not None: @@ -10657,10 +11148,10 @@ def read(self, iprot): if fid == 1: if ftype == TType.LIST: self.fileIds = [] - (_etype490, _size487) = iprot.readListBegin() - for _i491 in xrange(_size487): - _elem492 = iprot.readI64() - self.fileIds.append(_elem492) + (_etype518, _size515) = iprot.readListBegin() + for _i519 in xrange(_size515): + _elem520 = iprot.readI64() + self.fileIds.append(_elem520) iprot.readListEnd() else: iprot.skip(ftype) @@ -10677,8 +11168,8 @@ def write(self, oprot): if self.fileIds is not None: oprot.writeFieldBegin('fileIds', TType.LIST, 1) oprot.writeListBegin(TType.I64, len(self.fileIds)) - for iter493 in self.fileIds: - oprot.writeI64(iter493) + for iter521 in self.fileIds: + oprot.writeI64(iter521) oprot.writeListEnd() oprot.writeFieldEnd() oprot.writeFieldStop() @@ -10784,20 +11275,20 @@ def read(self, iprot): if fid == 1: if ftype == TType.LIST: self.fileIds = [] - (_etype497, _size494) = iprot.readListBegin() - for _i498 in xrange(_size494): - _elem499 = iprot.readI64() - self.fileIds.append(_elem499) + (_etype525, _size522) = iprot.readListBegin() + for _i526 in xrange(_size522): + _elem527 = iprot.readI64() + self.fileIds.append(_elem527) iprot.readListEnd() else: iprot.skip(ftype) elif fid == 2: if ftype == TType.LIST: self.metadata = [] - (_etype503, _size500) = iprot.readListBegin() - for _i504 in xrange(_size500): - _elem505 = iprot.readString() - self.metadata.append(_elem505) + (_etype531, _size528) = iprot.readListBegin() + for _i532 in xrange(_size528): + _elem533 = iprot.readString() + self.metadata.append(_elem533) iprot.readListEnd() else: iprot.skip(ftype) @@ -10819,15 +11310,15 @@ def write(self, oprot): if self.fileIds is not None: oprot.writeFieldBegin('fileIds', TType.LIST, 1) oprot.writeListBegin(TType.I64, len(self.fileIds)) - for iter506 in self.fileIds: - oprot.writeI64(iter506) + for iter534 in self.fileIds: + oprot.writeI64(iter534) oprot.writeListEnd() oprot.writeFieldEnd() if self.metadata is not None: oprot.writeFieldBegin('metadata', TType.LIST, 2) oprot.writeListBegin(TType.STRING, len(self.metadata)) - for iter507 in self.metadata: - oprot.writeString(iter507) + for iter535 in self.metadata: + oprot.writeString(iter535) oprot.writeListEnd() oprot.writeFieldEnd() if self.type is not None: @@ -10935,10 +11426,10 @@ def read(self, iprot): if fid == 1: if ftype == TType.LIST: self.fileIds = [] - (_etype511, _size508) = iprot.readListBegin() - for _i512 in xrange(_size508): - _elem513 = iprot.readI64() - self.fileIds.append(_elem513) + (_etype539, _size536) = iprot.readListBegin() + for _i540 in xrange(_size536): + _elem541 = iprot.readI64() + self.fileIds.append(_elem541) iprot.readListEnd() else: iprot.skip(ftype) @@ -10955,8 +11446,8 @@ def write(self, oprot): if self.fileIds is not None: oprot.writeFieldBegin('fileIds', TType.LIST, 1) oprot.writeListBegin(TType.I64, len(self.fileIds)) - for iter514 in self.fileIds: - oprot.writeI64(iter514) + for iter542 in self.fileIds: + oprot.writeI64(iter542) oprot.writeListEnd() oprot.writeFieldEnd() oprot.writeFieldStop() @@ -11185,11 +11676,11 @@ def read(self, iprot): if fid == 1: if ftype == TType.LIST: self.functions = [] - (_etype518, _size515) = iprot.readListBegin() - for _i519 in xrange(_size515): - _elem520 = Function() - _elem520.read(iprot) - self.functions.append(_elem520) + (_etype546, _size543) = iprot.readListBegin() + for _i547 in xrange(_size543): + _elem548 = Function() + _elem548.read(iprot) + self.functions.append(_elem548) iprot.readListEnd() else: iprot.skip(ftype) @@ -11206,8 +11697,8 @@ def write(self, oprot): if self.functions is not None: oprot.writeFieldBegin('functions', TType.LIST, 1) oprot.writeListBegin(TType.STRUCT, len(self.functions)) - for iter521 in self.functions: - iter521.write(oprot) + for iter549 in self.functions: + iter549.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() oprot.writeFieldStop() diff --git a/metastore/src/gen/thrift/gen-rb/hive_metastore_types.rb b/metastore/src/gen/thrift/gen-rb/hive_metastore_types.rb index 1cf40ae..7ce0546 100644 --- a/metastore/src/gen/thrift/gen-rb/hive_metastore_types.rb +++ b/metastore/src/gen/thrift/gen-rb/hive_metastore_types.rb @@ -145,6 +145,90 @@ class FieldSchema ::Thrift::Struct.generate_accessors self end +class ForeignKeyRel + include ::Thrift::Struct, ::Thrift::Struct_Union + FOREIGNCOLS = 1 + PARENTCOLS = 2 + FOREIGNKEYNAME = 3 + PARENTTABLENAME = 4 + PARENTTABLESCHEMA = 5 + + FIELDS = { + FOREIGNCOLS => {:type => ::Thrift::Types::LIST, :name => 'foreignCols', :element => {:type => ::Thrift::Types::STRUCT, :class => ::FieldSchema}}, + PARENTCOLS => {:type => ::Thrift::Types::LIST, :name => 'parentCols', :element => {:type => ::Thrift::Types::STRUCT, :class => ::FieldSchema}}, + FOREIGNKEYNAME => {:type => ::Thrift::Types::STRING, :name => 'foreignKeyName'}, + PARENTTABLENAME => {:type => ::Thrift::Types::STRING, :name => 'parentTableName'}, + PARENTTABLESCHEMA => {:type => ::Thrift::Types::STRING, :name => 'parentTableSchema'} + } + + def struct_fields; FIELDS; end + + def validate + end + + ::Thrift::Struct.generate_accessors self +end + +class SQLPrimaryKey + include ::Thrift::Struct, ::Thrift::Struct_Union + TABLE_SCHEM = 1 + TABLE_NAME = 2 + COLUMN_NAME = 3 + KEY_SEQ = 4 + PK_NAME = 5 + + FIELDS = { + TABLE_SCHEM => {:type => ::Thrift::Types::STRING, :name => 'table_schem'}, + TABLE_NAME => {:type => ::Thrift::Types::STRING, :name => 'table_name'}, + COLUMN_NAME => {:type => ::Thrift::Types::STRING, :name => 'column_name'}, + KEY_SEQ => {:type => ::Thrift::Types::I32, :name => 'key_seq'}, + PK_NAME => {:type => ::Thrift::Types::STRING, :name => 'pk_name'} + } + + def struct_fields; FIELDS; end + + def validate + end + + ::Thrift::Struct.generate_accessors self +end + +class SQLForeignKey + include ::Thrift::Struct, ::Thrift::Struct_Union + PKTABLE_SCHEM = 1 + PKTABLE_NAME = 2 + PKCOLUMN_NAME = 3 + FKTABLE_SCHEM = 4 + FKTABLE_NAME = 5 + FKCOLUMN_NAME = 6 + KEY_SEQ = 7 + UPDATE_RULE = 8 + DELETE_RULE = 9 + FK_NAME = 10 + PK_NAME = 11 + + FIELDS = { + PKTABLE_SCHEM => {:type => ::Thrift::Types::STRING, :name => 'pktable_schem'}, + PKTABLE_NAME => {:type => ::Thrift::Types::STRING, :name => 'pktable_name'}, + PKCOLUMN_NAME => {:type => ::Thrift::Types::STRING, :name => 'pkcolumn_name'}, + FKTABLE_SCHEM => {:type => ::Thrift::Types::STRING, :name => 'fktable_schem'}, + FKTABLE_NAME => {:type => ::Thrift::Types::STRING, :name => 'fktable_name'}, + FKCOLUMN_NAME => {:type => ::Thrift::Types::STRING, :name => 'fkcolumn_name'}, + KEY_SEQ => {:type => ::Thrift::Types::I32, :name => 'key_seq'}, + UPDATE_RULE => {:type => ::Thrift::Types::I32, :name => 'update_rule'}, + DELETE_RULE => {:type => ::Thrift::Types::I32, :name => 'delete_rule'}, + FK_NAME => {:type => ::Thrift::Types::STRING, :name => 'fk_name'}, + PK_NAME => {:type => ::Thrift::Types::STRING, :name => 'pk_name'} + } + + def struct_fields; FIELDS; end + + def validate + end + + ::Thrift::Struct.generate_accessors self +end + class Type include ::Thrift::Struct, ::Thrift::Struct_Union NAME = 1 @@ -645,6 +729,8 @@ class Table TABLETYPE = 12 PRIVILEGES = 13 TEMPORARY = 14 + PRIMARYKEYS = 15 + FOREIGNKEYS = 16 FIELDS = { TABLENAME => {:type => ::Thrift::Types::STRING, :name => 'tableName'}, @@ -660,7 +746,9 @@ class Table VIEWEXPANDEDTEXT => {:type => ::Thrift::Types::STRING, :name => 'viewExpandedText'}, TABLETYPE => {:type => ::Thrift::Types::STRING, :name => 'tableType'}, PRIVILEGES => {:type => ::Thrift::Types::STRUCT, :name => 'privileges', :class => ::PrincipalPrivilegeSet, :optional => true}, - TEMPORARY => {:type => ::Thrift::Types::BOOL, :name => 'temporary', :default => false, :optional => true} + TEMPORARY => {:type => ::Thrift::Types::BOOL, :name => 'temporary', :default => false, :optional => true}, + PRIMARYKEYS => {:type => ::Thrift::Types::LIST, :name => 'primaryKeys', :element => {:type => ::Thrift::Types::STRUCT, :class => ::FieldSchema}}, + FOREIGNKEYS => {:type => ::Thrift::Types::LIST, :name => 'foreignKeys', :element => {:type => ::Thrift::Types::STRUCT, :class => ::ForeignKeyRel}} } def struct_fields; FIELDS; end diff --git a/metastore/src/gen/thrift/gen-rb/thrift_hive_metastore.rb b/metastore/src/gen/thrift/gen-rb/thrift_hive_metastore.rb index e782bb5..e5bf5c4 100644 --- a/metastore/src/gen/thrift/gen-rb/thrift_hive_metastore.rb +++ b/metastore/src/gen/thrift/gen-rb/thrift_hive_metastore.rb @@ -1324,6 +1324,40 @@ module ThriftHiveMetastore raise ::Thrift::ApplicationException.new(::Thrift::ApplicationException::MISSING_RESULT, 'get_index_names failed: unknown result') end + def get_primary_keys(db_name, tbl_name) + send_get_primary_keys(db_name, tbl_name) + return recv_get_primary_keys() + end + + def send_get_primary_keys(db_name, tbl_name) + send_message('get_primary_keys', Get_primary_keys_args, :db_name => db_name, :tbl_name => tbl_name) + end + + def recv_get_primary_keys() + result = receive_message(Get_primary_keys_result) + return result.success unless result.success.nil? + raise result.o1 unless result.o1.nil? + raise result.o2 unless result.o2.nil? + raise ::Thrift::ApplicationException.new(::Thrift::ApplicationException::MISSING_RESULT, 'get_primary_keys failed: unknown result') + end + + def get_foreign_keys(parent_db_name, parent_tbl_name, foreign_db_name, foreign_tbl_name) + send_get_foreign_keys(parent_db_name, parent_tbl_name, foreign_db_name, foreign_tbl_name) + return recv_get_foreign_keys() + end + + def send_get_foreign_keys(parent_db_name, parent_tbl_name, foreign_db_name, foreign_tbl_name) + send_message('get_foreign_keys', Get_foreign_keys_args, :parent_db_name => parent_db_name, :parent_tbl_name => parent_tbl_name, :foreign_db_name => foreign_db_name, :foreign_tbl_name => foreign_tbl_name) + end + + def recv_get_foreign_keys() + result = receive_message(Get_foreign_keys_result) + return result.success unless result.success.nil? + raise result.o1 unless result.o1.nil? + raise result.o2 unless result.o2.nil? + raise ::Thrift::ApplicationException.new(::Thrift::ApplicationException::MISSING_RESULT, 'get_foreign_keys failed: unknown result') + end + def update_table_column_statistics(stats_obj) send_update_table_column_statistics(stats_obj) return recv_update_table_column_statistics() @@ -3432,6 +3466,32 @@ module ThriftHiveMetastore write_result(result, oprot, 'get_index_names', seqid) end + def process_get_primary_keys(seqid, iprot, oprot) + args = read_args(iprot, Get_primary_keys_args) + result = Get_primary_keys_result.new() + begin + result.success = @handler.get_primary_keys(args.db_name, args.tbl_name) + rescue ::MetaException => o1 + result.o1 = o1 + rescue ::NoSuchObjectException => o2 + result.o2 = o2 + end + write_result(result, oprot, 'get_primary_keys', seqid) + end + + def process_get_foreign_keys(seqid, iprot, oprot) + args = read_args(iprot, Get_foreign_keys_args) + result = Get_foreign_keys_result.new() + begin + result.success = @handler.get_foreign_keys(args.parent_db_name, args.parent_tbl_name, args.foreign_db_name, args.foreign_tbl_name) + rescue ::MetaException => o1 + result.o1 = o1 + rescue ::NoSuchObjectException => o2 + result.o2 = o2 + end + write_result(result, oprot, 'get_foreign_keys', seqid) + end + def process_update_table_column_statistics(seqid, iprot, oprot) args = read_args(iprot, Update_table_column_statistics_args) result = Update_table_column_statistics_result.new() @@ -7205,6 +7265,86 @@ module ThriftHiveMetastore ::Thrift::Struct.generate_accessors self end + class Get_primary_keys_args + include ::Thrift::Struct, ::Thrift::Struct_Union + DB_NAME = 1 + TBL_NAME = 2 + + FIELDS = { + DB_NAME => {:type => ::Thrift::Types::STRING, :name => 'db_name'}, + TBL_NAME => {:type => ::Thrift::Types::STRING, :name => 'tbl_name'} + } + + def struct_fields; FIELDS; end + + def validate + end + + ::Thrift::Struct.generate_accessors self + end + + class Get_primary_keys_result + include ::Thrift::Struct, ::Thrift::Struct_Union + SUCCESS = 0 + O1 = 1 + O2 = 2 + + FIELDS = { + SUCCESS => {:type => ::Thrift::Types::LIST, :name => 'success', :element => {:type => ::Thrift::Types::STRUCT, :class => ::SQLPrimaryKey}}, + O1 => {:type => ::Thrift::Types::STRUCT, :name => 'o1', :class => ::MetaException}, + O2 => {:type => ::Thrift::Types::STRUCT, :name => 'o2', :class => ::NoSuchObjectException} + } + + def struct_fields; FIELDS; end + + def validate + end + + ::Thrift::Struct.generate_accessors self + end + + class Get_foreign_keys_args + include ::Thrift::Struct, ::Thrift::Struct_Union + PARENT_DB_NAME = 1 + PARENT_TBL_NAME = 2 + FOREIGN_DB_NAME = 3 + FOREIGN_TBL_NAME = 4 + + FIELDS = { + PARENT_DB_NAME => {:type => ::Thrift::Types::STRING, :name => 'parent_db_name'}, + PARENT_TBL_NAME => {:type => ::Thrift::Types::STRING, :name => 'parent_tbl_name'}, + FOREIGN_DB_NAME => {:type => ::Thrift::Types::STRING, :name => 'foreign_db_name'}, + FOREIGN_TBL_NAME => {:type => ::Thrift::Types::STRING, :name => 'foreign_tbl_name'} + } + + def struct_fields; FIELDS; end + + def validate + end + + ::Thrift::Struct.generate_accessors self + end + + class Get_foreign_keys_result + include ::Thrift::Struct, ::Thrift::Struct_Union + SUCCESS = 0 + O1 = 1 + O2 = 2 + + FIELDS = { + SUCCESS => {:type => ::Thrift::Types::LIST, :name => 'success', :element => {:type => ::Thrift::Types::STRUCT, :class => ::SQLForeignKey}}, + O1 => {:type => ::Thrift::Types::STRUCT, :name => 'o1', :class => ::MetaException}, + O2 => {:type => ::Thrift::Types::STRUCT, :name => 'o2', :class => ::NoSuchObjectException} + } + + def struct_fields; FIELDS; end + + def validate + end + + ::Thrift::Struct.generate_accessors self + end + class Update_table_column_statistics_args include ::Thrift::Struct, ::Thrift::Struct_Union STATS_OBJ = 1 diff --git a/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java b/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java index c9fadad..804a8e0 100644 --- a/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java +++ b/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java @@ -26,6 +26,7 @@ import com.google.common.collect.ImmutableListMultimap; import com.google.common.collect.Lists; import com.google.common.collect.Multimaps; + import org.apache.commons.cli.OptionBuilder; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileSystem; @@ -112,6 +113,7 @@ import org.slf4j.LoggerFactory; import javax.jdo.JDOException; + import java.io.IOException; import java.nio.ByteBuffer; import java.text.DateFormat; @@ -6131,6 +6133,61 @@ public GetChangeVersionResult get_change_version(GetChangeVersionRequest req) throws TException { return new GetChangeVersionResult(getMS().getChangeVersion(req.getTopic())); } + + @Override + public List get_primary_keys(String db_name, String tbl_name) + throws MetaException, NoSuchObjectException, TException { + startTableFunction("get_primary_keys", db_name, tbl_name); + fireReadTablePreEvent(db_name, tbl_name); + List ret = null; + Exception ex = null; + try { + ret = getMS().getPrimaryKeys(db_name, tbl_name); + } catch (Exception e) { + ex = e; + if (e instanceof MetaException) { + throw (MetaException) e; + } else if (e instanceof NoSuchObjectException) { + throw (NoSuchObjectException) e; + } else { + throw newMetaException(e); + } + } finally { + endFunction("get_primary_keys", ret != null, ex, tbl_name); + } + return ret; + } + + @Override + public List get_foreign_keys(String parent_db_name, + String parent_tbl_name, String foreign_db_name, + String foreign_tbl_name) throws MetaException, + NoSuchObjectException, TException { + startFunction("get_foreign_keys", " : parentdb=" + parent_db_name + + " parenttbl=" + parent_tbl_name + " foreigndb=" + foreign_db_name + + " foreigntbl=" + foreign_tbl_name); + fireReadTablePreEvent(parent_db_name, parent_tbl_name); + fireReadTablePreEvent(foreign_db_name, foreign_tbl_name); + + List ret = null; + Exception ex = null; + try { + ret = getMS().getForeignKeys(parent_db_name, parent_tbl_name, + foreign_db_name, foreign_tbl_name); + } catch (Exception e) { + ex = e; + if (e instanceof MetaException) { + throw (MetaException) e; + } else if (e instanceof NoSuchObjectException) { + throw (NoSuchObjectException) e; + } else { + throw newMetaException(e); + } + } finally { + endFunction("get_foreign_keys", ret != null, ex, foreign_tbl_name); + } + return ret; + } } diff --git a/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java.orig b/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java.orig new file mode 100644 index 0000000..c9fadad --- /dev/null +++ b/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java.orig @@ -0,0 +1,6659 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hive.metastore; + +import com.facebook.fb303.FacebookBase; +import com.facebook.fb303.fb_status; +import com.google.common.annotations.VisibleForTesting; +import com.google.common.base.Splitter; +import com.google.common.collect.ImmutableList; +import com.google.common.collect.ImmutableListMultimap; +import com.google.common.collect.Lists; +import com.google.common.collect.Multimaps; +import org.apache.commons.cli.OptionBuilder; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hive.common.FileUtils; +import org.apache.hadoop.hive.common.JvmPauseMonitor; +import org.apache.hadoop.hive.common.LogUtils; +import org.apache.hadoop.hive.common.StatsSetupConst; +import org.apache.hadoop.hive.common.LogUtils.LogInitializationException; +import org.apache.hadoop.hive.common.classification.InterfaceAudience; +import org.apache.hadoop.hive.common.classification.InterfaceStability; +import org.apache.hadoop.hive.common.cli.CommonCliOptions; +import org.apache.hadoop.hive.common.metrics.common.Metrics; +import org.apache.hadoop.hive.common.metrics.common.MetricsConstant; +import org.apache.hadoop.hive.common.metrics.common.MetricsFactory; +import org.apache.hadoop.hive.common.metrics.common.MetricsVariable; +import org.apache.hadoop.hive.conf.HiveConf; +import org.apache.hadoop.hive.conf.HiveConf.ConfVars; +import org.apache.hadoop.hive.metastore.api.*; +import org.apache.hadoop.hive.metastore.events.AddIndexEvent; +import org.apache.hadoop.hive.metastore.events.AddPartitionEvent; +import org.apache.hadoop.hive.metastore.events.AlterIndexEvent; +import org.apache.hadoop.hive.metastore.events.AlterPartitionEvent; +import org.apache.hadoop.hive.metastore.events.AlterTableEvent; +import org.apache.hadoop.hive.metastore.events.ConfigChangeEvent; +import org.apache.hadoop.hive.metastore.events.CreateDatabaseEvent; +import org.apache.hadoop.hive.metastore.events.CreateTableEvent; +import org.apache.hadoop.hive.metastore.events.DropDatabaseEvent; +import org.apache.hadoop.hive.metastore.events.DropIndexEvent; +import org.apache.hadoop.hive.metastore.events.DropPartitionEvent; +import org.apache.hadoop.hive.metastore.events.DropTableEvent; +import org.apache.hadoop.hive.metastore.events.EventCleanerTask; +import org.apache.hadoop.hive.metastore.events.InsertEvent; +import org.apache.hadoop.hive.metastore.events.LoadPartitionDoneEvent; +import org.apache.hadoop.hive.metastore.events.PreAddIndexEvent; +import org.apache.hadoop.hive.metastore.events.PreAddPartitionEvent; +import org.apache.hadoop.hive.metastore.events.PreAlterIndexEvent; +import org.apache.hadoop.hive.metastore.events.PreAlterPartitionEvent; +import org.apache.hadoop.hive.metastore.events.PreAlterTableEvent; +import org.apache.hadoop.hive.metastore.events.PreAuthorizationCallEvent; +import org.apache.hadoop.hive.metastore.events.PreCreateDatabaseEvent; +import org.apache.hadoop.hive.metastore.events.PreCreateTableEvent; +import org.apache.hadoop.hive.metastore.events.PreDropDatabaseEvent; +import org.apache.hadoop.hive.metastore.events.PreDropIndexEvent; +import org.apache.hadoop.hive.metastore.events.PreDropPartitionEvent; +import org.apache.hadoop.hive.metastore.events.PreDropTableEvent; +import org.apache.hadoop.hive.metastore.events.PreEventContext; +import org.apache.hadoop.hive.metastore.events.PreLoadPartitionDoneEvent; +import org.apache.hadoop.hive.metastore.events.PreReadDatabaseEvent; +import org.apache.hadoop.hive.metastore.events.PreReadTableEvent; +import org.apache.hadoop.hive.metastore.filemeta.OrcFileMetadataHandler; +import org.apache.hadoop.hive.metastore.partition.spec.PartitionSpecProxy; +import org.apache.hadoop.hive.metastore.txn.TxnStore; +import org.apache.hadoop.hive.metastore.txn.TxnUtils; +import org.apache.hadoop.hive.serde2.Deserializer; +import org.apache.hadoop.hive.serde2.SerDeException; +import org.apache.hadoop.hive.shims.HadoopShims; +import org.apache.hadoop.hive.shims.ShimLoader; +import org.apache.hadoop.hive.shims.Utils; +import org.apache.hadoop.hive.thrift.HadoopThriftAuthBridge; +import org.apache.hadoop.hive.thrift.HadoopThriftAuthBridge.Server.ServerMode; +import org.apache.hadoop.hive.thrift.TUGIContainingTransport; +import org.apache.hadoop.hive.thrift.HiveDelegationTokenManager; +import org.apache.hadoop.security.UserGroupInformation; +import org.apache.hadoop.util.ReflectionUtils; +import org.apache.hadoop.util.StringUtils; +import org.apache.hive.common.util.HiveStringUtils; +import org.apache.thrift.TException; +import org.apache.thrift.TProcessor; +import org.apache.thrift.protocol.TBinaryProtocol; +import org.apache.thrift.protocol.TCompactProtocol; +import org.apache.thrift.protocol.TProtocol; +import org.apache.thrift.protocol.TProtocolFactory; +import org.apache.thrift.server.ServerContext; +import org.apache.thrift.server.TServer; +import org.apache.thrift.server.TServerEventHandler; +import org.apache.thrift.server.TThreadPoolServer; +import org.apache.thrift.transport.TFramedTransport; +import org.apache.thrift.transport.TServerSocket; +import org.apache.thrift.transport.TServerTransport; +import org.apache.thrift.transport.TTransport; +import org.apache.thrift.transport.TTransportFactory; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import javax.jdo.JDOException; +import java.io.IOException; +import java.nio.ByteBuffer; +import java.text.DateFormat; +import java.text.SimpleDateFormat; +import java.util.AbstractMap; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collections; +import java.util.Formatter; +import java.util.HashMap; +import java.util.HashSet; +import java.util.Iterator; +import java.util.LinkedHashMap; +import java.util.LinkedList; +import java.util.List; +import java.util.Map; +import java.util.Map.Entry; +import java.util.Properties; +import java.util.Set; +import java.util.Timer; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicBoolean; +import java.util.concurrent.locks.Condition; +import java.util.concurrent.locks.Lock; +import java.util.concurrent.locks.ReentrantLock; +import java.util.regex.Pattern; + +import static org.apache.commons.lang.StringUtils.join; +import static org.apache.hadoop.hive.metastore.MetaStoreUtils.DEFAULT_DATABASE_COMMENT; +import static org.apache.hadoop.hive.metastore.MetaStoreUtils.DEFAULT_DATABASE_NAME; +import static org.apache.hadoop.hive.metastore.MetaStoreUtils.validateName; + +/** + * TODO:pc remove application logic to a separate interface. + */ +public class HiveMetaStore extends ThriftHiveMetastore { + public static final Logger LOG = LoggerFactory.getLogger(HiveMetaStore.class); + + // boolean that tells if the HiveMetaStore (remote) server is being used. + // Can be used to determine if the calls to metastore api (HMSHandler) are being made with + // embedded metastore or a remote one + private static boolean isMetaStoreRemote = false; + + // Used for testing to simulate method timeout. + @VisibleForTesting + static boolean TEST_TIMEOUT_ENABLED = false; + @VisibleForTesting + static long TEST_TIMEOUT_VALUE = -1; + + /** A fixed date format to be used for hive partition column values. */ + public static final ThreadLocal PARTITION_DATE_FORMAT = + new ThreadLocal() { + @Override + protected DateFormat initialValue() { + DateFormat val = new SimpleDateFormat("yyyy-MM-dd"); + val.setLenient(false); // Without this, 2020-20-20 becomes 2021-08-20. + return val; + }; + }; + + /** + * default port on which to start the Hive server + */ + public static final String ADMIN = "admin"; + public static final String PUBLIC = "public"; + + private static HadoopThriftAuthBridge.Server saslServer; + private static HiveDelegationTokenManager delegationTokenManager; + private static boolean useSasl; + + private static final class ChainedTTransportFactory extends TTransportFactory { + private final TTransportFactory parentTransFactory; + private final TTransportFactory childTransFactory; + + private ChainedTTransportFactory( + TTransportFactory parentTransFactory, + TTransportFactory childTransFactory) { + this.parentTransFactory = parentTransFactory; + this.childTransFactory = childTransFactory; + } + + @Override + public TTransport getTransport(TTransport trans) { + return childTransFactory.getTransport(parentTransFactory.getTransport(trans)); + } + } + + /** + * An ugly interface because everything about this file is ugly. RawStore is threadlocal so this + * thread-local disease propagates everywhere, and FileMetadataManager cannot just get a RawStore + * or handlers to use; it will need to have this method to make thread-local handlers and a + * thread-local RawStore. + */ + public interface ThreadLocalRawStore { + RawStore getMS() throws MetaException; + } + + public static class HMSHandler extends FacebookBase implements IHMSHandler, ThreadLocalRawStore { + public static final Logger LOG = HiveMetaStore.LOG; + private String rawStoreClassName; + private final HiveConf hiveConf; // stores datastore (jpox) properties, + // right now they come from jpox.properties + + private static String currentUrl; + private FileMetadataManager fileMetadataManager; + private PartitionExpressionProxy expressionProxy; + + //For Metrics + private int initDatabaseCount, initTableCount, initPartCount; + + private Warehouse wh; // hdfs warehouse + private static final ThreadLocal threadLocalMS = + new ThreadLocal() { + @Override + protected RawStore initialValue() { + return null; + } + }; + + private static final ThreadLocal threadLocalTxn = new ThreadLocal() { + @Override + protected TxnStore initialValue() { + return null; + } + }; + + public static RawStore getRawStore() { + return threadLocalMS.get(); + } + + public static void removeRawStore() { + threadLocalMS.remove(); + } + + // Thread local configuration is needed as many threads could make changes + // to the conf using the connection hook + private static final ThreadLocal threadLocalConf = + new ThreadLocal() { + @Override + protected Configuration initialValue() { + return null; + } + }; + + public static final String AUDIT_FORMAT = + "ugi=%s\t" + // ugi + "ip=%s\t" + // remote IP + "cmd=%s\t"; // command + public static final Logger auditLog = LoggerFactory.getLogger( + HiveMetaStore.class.getName() + ".audit"); + private static final ThreadLocal auditFormatter = + new ThreadLocal() { + @Override + protected Formatter initialValue() { + return new Formatter(new StringBuilder(AUDIT_FORMAT.length() * 4)); + } + }; + + private final void logAuditEvent(String cmd) { + if (cmd == null) { + return; + } + + UserGroupInformation ugi; + try { + ugi = Utils.getUGI(); + } catch (Exception ex) { + throw new RuntimeException(ex); + } + final Formatter fmt = auditFormatter.get(); + ((StringBuilder) fmt.out()).setLength(0); + + String address = getIPAddress(); + if (address == null) { + address = "unknown-ip-addr"; + } + + auditLog.info(fmt.format(AUDIT_FORMAT, ugi.getUserName(), + address, cmd).toString()); + } + + String getIPAddress() { + if (useSasl) { + if (saslServer != null && saslServer.getRemoteAddress() != null) { + return saslServer.getRemoteAddress().getHostAddress(); + } + } else { + // if kerberos is not enabled + return getThreadLocalIpAddress(); + } + return null; + } + + private static int nextSerialNum = 0; + private static ThreadLocal threadLocalId = new ThreadLocal() { + @Override + protected Integer initialValue() { + return new Integer(nextSerialNum++); + } + }; + + // This will only be set if the metastore is being accessed from a metastore Thrift server, + // not if it is from the CLI. Also, only if the TTransport being used to connect is an + // instance of TSocket. This is also not set when kerberos is used. + private static ThreadLocal threadLocalIpAddress = new ThreadLocal() { + @Override + protected String initialValue() { + return null; + } + }; + + public static void setThreadLocalIpAddress(String ipAddress) { + threadLocalIpAddress.set(ipAddress); + } + + // This will return null if the metastore is not being accessed from a metastore Thrift server, + // or if the TTransport being used to connect is not an instance of TSocket, or if kereberos + // is used + public static String getThreadLocalIpAddress() { + return threadLocalIpAddress.get(); + } + + public static Integer get() { + return threadLocalId.get(); + } + + public HMSHandler(String name) throws MetaException { + this(name, new HiveConf(HMSHandler.class), true); + } + + public HMSHandler(String name, HiveConf conf) throws MetaException { + this(name, conf, true); + } + + public HMSHandler(String name, HiveConf conf, boolean init) throws MetaException { + super(name); + hiveConf = conf; + if (init) { + init(); + } + } + + public HiveConf getHiveConf() { + return hiveConf; + } + + private ClassLoader classLoader; + private AlterHandler alterHandler; + private List preListeners; + private List listeners; + private List endFunctionListeners; + private List initListeners; + private Pattern partitionValidationPattern; + + { + classLoader = Thread.currentThread().getContextClassLoader(); + if (classLoader == null) { + classLoader = Configuration.class.getClassLoader(); + } + } + + @Override + public void init() throws MetaException { + rawStoreClassName = hiveConf.getVar(HiveConf.ConfVars.METASTORE_RAW_STORE_IMPL); + initListeners = MetaStoreUtils.getMetaStoreListeners( + MetaStoreInitListener.class, hiveConf, + hiveConf.getVar(HiveConf.ConfVars.METASTORE_INIT_HOOKS)); + for (MetaStoreInitListener singleInitListener: initListeners) { + MetaStoreInitContext context = new MetaStoreInitContext(); + singleInitListener.onInit(context); + } + + String alterHandlerName = hiveConf.get("hive.metastore.alter.impl", + HiveAlterHandler.class.getName()); + alterHandler = (AlterHandler) ReflectionUtils.newInstance(MetaStoreUtils.getClass( + alterHandlerName), hiveConf); + wh = new Warehouse(hiveConf); + + synchronized (HMSHandler.class) { + if (currentUrl == null || !currentUrl.equals(MetaStoreInit.getConnectionURL(hiveConf))) { + createDefaultDB(); + createDefaultRoles(); + addAdminUsers(); + currentUrl = MetaStoreInit.getConnectionURL(hiveConf); + } + } + + //Start Metrics for Embedded mode + if (hiveConf.getBoolVar(ConfVars.METASTORE_METRICS)) { + try { + MetricsFactory.init(hiveConf); + } catch (Exception e) { + // log exception, but ignore inability to start + LOG.error("error in Metrics init: " + e.getClass().getName() + " " + + e.getMessage(), e); + } + } + + Metrics metrics = MetricsFactory.getInstance(); + if (metrics != null && hiveConf.getBoolVar(ConfVars.METASTORE_INIT_METADATA_COUNT_ENABLED)) { + LOG.info("Begin calculating metadata count metrics."); + updateMetrics(); + LOG.info("Finished metadata count metrics: " + initDatabaseCount + " databases, " + initTableCount + + " tables, " + initPartCount + " partitions."); + metrics.addGauge(MetricsConstant.INIT_TOTAL_DATABASES, new MetricsVariable() { + @Override + public Object getValue() { + return initDatabaseCount; + } + }); + metrics.addGauge(MetricsConstant.INIT_TOTAL_TABLES, new MetricsVariable() { + @Override + public Object getValue() { + return initTableCount; + } + }); + metrics.addGauge(MetricsConstant.INIT_TOTAL_PARTITIONS, new MetricsVariable() { + @Override + public Object getValue() { + return initPartCount; + } + }); + } + + preListeners = MetaStoreUtils.getMetaStoreListeners(MetaStorePreEventListener.class, + hiveConf, + hiveConf.getVar(HiveConf.ConfVars.METASTORE_PRE_EVENT_LISTENERS)); + preListeners.add(0, new TransactionalValidationListener(hiveConf)); + listeners = MetaStoreUtils.getMetaStoreListeners(MetaStoreEventListener.class, hiveConf, + hiveConf.getVar(HiveConf.ConfVars.METASTORE_EVENT_LISTENERS)); + listeners.add(new SessionPropertiesListener(hiveConf)); + listeners.add(new AcidEventListener(hiveConf)); + + if (metrics != null) { + listeners.add(new HMSMetricsListener(hiveConf, metrics)); + } + + endFunctionListeners = MetaStoreUtils.getMetaStoreListeners( + MetaStoreEndFunctionListener.class, hiveConf, + hiveConf.getVar(HiveConf.ConfVars.METASTORE_END_FUNCTION_LISTENERS)); + + String partitionValidationRegex = + hiveConf.getVar(HiveConf.ConfVars.METASTORE_PARTITION_NAME_WHITELIST_PATTERN); + if (partitionValidationRegex != null && !partitionValidationRegex.isEmpty()) { + partitionValidationPattern = Pattern.compile(partitionValidationRegex); + } else { + partitionValidationPattern = null; + } + + long cleanFreq = hiveConf.getTimeVar(ConfVars.METASTORE_EVENT_CLEAN_FREQ, TimeUnit.MILLISECONDS); + if (cleanFreq > 0) { + // In default config, there is no timer. + Timer cleaner = new Timer("Metastore Events Cleaner Thread", true); + cleaner.schedule(new EventCleanerTask(this), cleanFreq, cleanFreq); + } + + expressionProxy = PartFilterExprUtil.createExpressionProxy(hiveConf); + fileMetadataManager = new FileMetadataManager((ThreadLocalRawStore)this, hiveConf); + } + + private String addPrefix(String s) { + return threadLocalId.get() + ": " + s; + } + + @Override + public void setConf(Configuration conf) { + threadLocalConf.set(conf); + RawStore ms = threadLocalMS.get(); + if (ms != null) { + ms.setConf(conf); // reload if DS related configuration is changed + } + } + + @Override + public Configuration getConf() { + Configuration conf = threadLocalConf.get(); + if (conf == null) { + conf = new Configuration(hiveConf); + threadLocalConf.set(conf); + } + return conf; + } + + public Warehouse getWh() { + return wh; + } + + @Override + public void setMetaConf(String key, String value) throws MetaException { + ConfVars confVar = HiveConf.getMetaConf(key); + if (confVar == null) { + throw new MetaException("Invalid configuration key " + key); + } + String validate = confVar.validate(value); + if (validate != null) { + throw new MetaException("Invalid configuration value " + value + " for key " + key + + " by " + validate); + } + Configuration configuration = getConf(); + String oldValue = configuration.get(key); + configuration.set(key, value); + + for (MetaStoreEventListener listener : listeners) { + listener.onConfigChange(new ConfigChangeEvent(this, key, oldValue, value)); + } + } + + @Override + public String getMetaConf(String key) throws MetaException { + ConfVars confVar = HiveConf.getMetaConf(key); + if (confVar == null) { + throw new MetaException("Invalid configuration key " + key); + } + return getConf().get(key); + } + + /** + * Get a cached RawStore. + * + * @return the cached RawStore + * @throws MetaException + */ + @InterfaceAudience.LimitedPrivate({"HCATALOG"}) + @InterfaceStability.Evolving + @Override + public RawStore getMS() throws MetaException { + RawStore ms = threadLocalMS.get(); + if (ms == null) { + ms = newRawStore(); + ms.verifySchema(); + threadLocalMS.set(ms); + ms = threadLocalMS.get(); + } + return ms; + } + + private TxnStore getTxnHandler() { + TxnStore txn = threadLocalTxn.get(); + if (txn == null) { + txn = TxnUtils.getTxnStore(hiveConf); + threadLocalTxn.set(txn); + } + return txn; + } + + private RawStore newRawStore() throws MetaException { + LOG.info(addPrefix("Opening raw store with implementation class:" + + rawStoreClassName)); + Configuration conf = getConf(); + + if (hiveConf.getBoolVar(ConfVars.METASTORE_FASTPATH)) { + LOG.info("Fastpath, skipping raw store proxy"); + try { + RawStore rs = ((Class) MetaStoreUtils.getClass( + rawStoreClassName)).newInstance(); + rs.setConf(conf); + return rs; + } catch (Exception e) { + LOG.error("Unable to instantiate raw store directly in fastpath mode", e); + throw new RuntimeException(e); + } + } + + return RawStoreProxy.getProxy(hiveConf, conf, rawStoreClassName, threadLocalId.get()); + } + + private void createDefaultDB_core(RawStore ms) throws MetaException, InvalidObjectException { + try { + ms.getDatabase(DEFAULT_DATABASE_NAME); + } catch (NoSuchObjectException e) { + Database db = new Database(DEFAULT_DATABASE_NAME, DEFAULT_DATABASE_COMMENT, + wh.getDefaultDatabasePath(DEFAULT_DATABASE_NAME).toString(), null); + db.setOwnerName(PUBLIC); + db.setOwnerType(PrincipalType.ROLE); + ms.createDatabase(db); + } + } + + /** + * create default database if it doesn't exist. + * + * This is a potential contention when HiveServer2 using embedded metastore and Metastore + * Server try to concurrently invoke createDefaultDB. If one failed, JDOException was caught + * for one more time try, if failed again, simply ignored by warning, which meant another + * succeeds. + * + * @throws MetaException + */ + private void createDefaultDB() throws MetaException { + try { + createDefaultDB_core(getMS()); + } catch (JDOException e) { + LOG.warn("Retrying creating default database after error: " + e.getMessage(), e); + try { + createDefaultDB_core(getMS()); + } catch (InvalidObjectException e1) { + throw new MetaException(e1.getMessage()); + } + } catch (InvalidObjectException e) { + throw new MetaException(e.getMessage()); + } + } + + /** + * create default roles if they don't exist. + * + * This is a potential contention when HiveServer2 using embedded metastore and Metastore + * Server try to concurrently invoke createDefaultRoles. If one failed, JDOException was caught + * for one more time try, if failed again, simply ignored by warning, which meant another + * succeeds. + * + * @throws MetaException + */ + private void createDefaultRoles() throws MetaException { + try { + createDefaultRoles_core(); + } catch (JDOException e) { + LOG.warn("Retrying creating default roles after error: " + e.getMessage(), e); + createDefaultRoles_core(); + } + } + + private void createDefaultRoles_core() throws MetaException { + + RawStore ms = getMS(); + try { + ms.addRole(ADMIN, ADMIN); + } catch (InvalidObjectException e) { + LOG.debug(ADMIN +" role already exists",e); + } catch (NoSuchObjectException e) { + // This should never be thrown. + LOG.warn("Unexpected exception while adding " +ADMIN+" roles" , e); + } + LOG.info("Added "+ ADMIN+ " role in metastore"); + try { + ms.addRole(PUBLIC, PUBLIC); + } catch (InvalidObjectException e) { + LOG.debug(PUBLIC + " role already exists",e); + } catch (NoSuchObjectException e) { + // This should never be thrown. + LOG.warn("Unexpected exception while adding "+PUBLIC +" roles" , e); + } + LOG.info("Added "+PUBLIC+ " role in metastore"); + // now grant all privs to admin + PrivilegeBag privs = new PrivilegeBag(); + privs.addToPrivileges(new HiveObjectPrivilege( new HiveObjectRef(HiveObjectType.GLOBAL, null, + null, null, null), ADMIN, PrincipalType.ROLE, new PrivilegeGrantInfo("All", 0, ADMIN, + PrincipalType.ROLE, true))); + try { + ms.grantPrivileges(privs); + } catch (InvalidObjectException e) { + // Surprisingly these privs are already granted. + LOG.debug("Failed while granting global privs to admin", e); + } catch (NoSuchObjectException e) { + // Unlikely to be thrown. + LOG.warn("Failed while granting global privs to admin", e); + } + } + + /** + * add admin users if they don't exist. + * + * This is a potential contention when HiveServer2 using embedded metastore and Metastore + * Server try to concurrently invoke addAdminUsers. If one failed, JDOException was caught for + * one more time try, if failed again, simply ignored by warning, which meant another succeeds. + * + * @throws MetaException + */ + private void addAdminUsers() throws MetaException { + try { + addAdminUsers_core(); + } catch (JDOException e) { + LOG.warn("Retrying adding admin users after error: " + e.getMessage(), e); + addAdminUsers_core(); + } + } + + private void addAdminUsers_core() throws MetaException { + + // now add pre-configured users to admin role + String userStr = HiveConf.getVar(hiveConf,ConfVars.USERS_IN_ADMIN_ROLE,"").trim(); + if (userStr.isEmpty()) { + LOG.info("No user is added in admin role, since config is empty"); + return; + } + // Since user names need to be valid unix user names, per IEEE Std 1003.1-2001 they cannot + // contain comma, so we can safely split above string on comma. + + Iterator users = Splitter.on(",").trimResults().omitEmptyStrings().split(userStr).iterator(); + if (!users.hasNext()) { + LOG.info("No user is added in admin role, since config value "+ userStr + + " is in incorrect format. We accept comma seprated list of users."); + return; + } + Role adminRole; + RawStore ms = getMS(); + try { + adminRole = ms.getRole(ADMIN); + } catch (NoSuchObjectException e) { + LOG.error("Failed to retrieve just added admin role",e); + return; + } + while (users.hasNext()) { + String userName = users.next(); + try { + ms.grantRole(adminRole, userName, PrincipalType.USER, ADMIN, PrincipalType.ROLE, true); + LOG.info("Added " + userName + " to admin role"); + } catch (NoSuchObjectException e) { + LOG.error("Failed to add "+ userName + " in admin role",e); + } catch (InvalidObjectException e) { + LOG.debug(userName + " already in admin role", e); + } + } + } + + private void logInfo(String m) { + LOG.info(threadLocalId.get().toString() + ": " + m); + logAuditEvent(m); + } + + private String startFunction(String function, String extraLogInfo) { + incrementCounter(function); + logInfo((getThreadLocalIpAddress() == null ? "" : "source:" + getThreadLocalIpAddress() + " ") + + function + extraLogInfo); + if (MetricsFactory.getInstance() != null) { + try { + MetricsFactory.getInstance().startStoredScope(function); + } catch (IOException e) { + LOG.debug("Exception when starting metrics scope" + + e.getClass().getName() + " " + e.getMessage(), e); + } + } + return function; + } + + private String startFunction(String function) { + return startFunction(function, ""); + } + + private String startTableFunction(String function, String db, String tbl) { + return startFunction(function, " : db=" + db + " tbl=" + tbl); + } + + private String startMultiTableFunction(String function, String db, List tbls) { + String tableNames = join(tbls, ","); + return startFunction(function, " : db=" + db + " tbls=" + tableNames); + } + + private String startPartitionFunction(String function, String db, String tbl, + List partVals) { + return startFunction(function, " : db=" + db + " tbl=" + tbl + + "[" + join(partVals, ",") + "]"); + } + + private String startPartitionFunction(String function, String db, String tbl, + Map partName) { + return startFunction(function, " : db=" + db + " tbl=" + tbl + "partition=" + partName); + } + + private void endFunction(String function, boolean successful, Exception e) { + endFunction(function, successful, e, null); + } + private void endFunction(String function, boolean successful, Exception e, + String inputTableName) { + endFunction(function, new MetaStoreEndFunctionContext(successful, e, inputTableName)); + } + + private void endFunction(String function, MetaStoreEndFunctionContext context) { + if (MetricsFactory.getInstance() != null) { + try { + MetricsFactory.getInstance().endStoredScope(function); + } catch (IOException e) { + LOG.debug("Exception when closing metrics scope" + e); + } + } + + for (MetaStoreEndFunctionListener listener : endFunctionListeners) { + listener.onEndFunction(function, context); + } + } + + @Override + public fb_status getStatus() { + return fb_status.ALIVE; + } + + @Override + public void shutdown() { + logInfo("Metastore shutdown started..."); + RawStore ms = threadLocalMS.get(); + if (ms != null) { + try { + ms.shutdown(); + } finally { + threadLocalMS.remove(); + } + } + logInfo("Metastore shutdown complete."); + } + + @Override + public AbstractMap getCounters() { + AbstractMap counters = super.getCounters(); + + // Allow endFunctionListeners to add any counters they have collected + if (endFunctionListeners != null) { + for (MetaStoreEndFunctionListener listener : endFunctionListeners) { + listener.exportCounters(counters); + } + } + + return counters; + } + + private void create_database_core(RawStore ms, final Database db) + throws AlreadyExistsException, InvalidObjectException, MetaException { + if (!validateName(db.getName(), null)) { + throw new InvalidObjectException(db.getName() + " is not a valid database name"); + } + if (null == db.getLocationUri()) { + db.setLocationUri(wh.getDefaultDatabasePath(db.getName()).toString()); + } else { + db.setLocationUri(wh.getDnsPath(new Path(db.getLocationUri())).toString()); + } + Path dbPath = new Path(db.getLocationUri()); + boolean success = false; + boolean madeDir = false; + + try { + + firePreEvent(new PreCreateDatabaseEvent(db, this)); + + if (!wh.isDir(dbPath)) { + if (!wh.mkdirs(dbPath, true)) { + throw new MetaException("Unable to create database path " + dbPath + + ", failed to create database " + db.getName()); + } + madeDir = true; + } + + ms.openTransaction(); + ms.createDatabase(db); + success = ms.commitTransaction(); + } finally { + if (!success) { + ms.rollbackTransaction(); + if (madeDir) { + wh.deleteDir(dbPath, true); + } + } + for (MetaStoreEventListener listener : listeners) { + listener.onCreateDatabase(new CreateDatabaseEvent(db, success, this)); + } + } + } + + @Override + public void create_database(final Database db) + throws AlreadyExistsException, InvalidObjectException, MetaException { + startFunction("create_database", ": " + db.toString()); + boolean success = false; + Exception ex = null; + try { + try { + if (null != get_database_core(db.getName())) { + throw new AlreadyExistsException("Database " + db.getName() + " already exists"); + } + } catch (NoSuchObjectException e) { + // expected + } + + if (TEST_TIMEOUT_ENABLED) { + try { + Thread.sleep(TEST_TIMEOUT_VALUE); + } catch (InterruptedException e) { + // do nothing + } + Deadline.checkTimeout(); + } + + create_database_core(getMS(), db); + success = true; + } catch (Exception e) { + ex = e; + if (e instanceof MetaException) { + throw (MetaException) e; + } else if (e instanceof InvalidObjectException) { + throw (InvalidObjectException) e; + } else if (e instanceof AlreadyExistsException) { + throw (AlreadyExistsException) e; + } else { + throw newMetaException(e); + } + } finally { + endFunction("create_database", success, ex); + } + } + + @Override + public Database get_database(final String name) throws NoSuchObjectException, MetaException { + startFunction("get_database", ": " + name); + Database db = null; + Exception ex = null; + try { + db = get_database_core(name); + firePreEvent(new PreReadDatabaseEvent(db, this)); + } catch (MetaException e) { + ex = e; + throw e; + } catch (NoSuchObjectException e) { + ex = e; + throw e; + } finally { + endFunction("get_database", db != null, ex); + } + return db; + } + + /** + * Equivalent to get_database, but does not write to audit logs, or fire pre-event listners. + * Meant to be used for internal hive classes that don't use the thrift interface. + * @param name + * @return + * @throws NoSuchObjectException + * @throws MetaException + */ + public Database get_database_core(final String name) throws NoSuchObjectException, + MetaException { + Database db = null; + try { + db = getMS().getDatabase(name); + } catch (MetaException e) { + throw e; + } catch (NoSuchObjectException e) { + throw e; + } catch (Exception e) { + assert (e instanceof RuntimeException); + throw (RuntimeException) e; + } + return db; + } + + @Override + public void alter_database(final String dbName, final Database db) + throws NoSuchObjectException, TException, MetaException { + startFunction("alter_database" + dbName); + boolean success = false; + Exception ex = null; + try { + getMS().alterDatabase(dbName, db); + success = true; + } catch (Exception e) { + ex = e; + rethrowException(e); + } finally { + endFunction("alter_database", success, ex); + } + } + + private void drop_database_core(RawStore ms, + final String name, final boolean deleteData, final boolean cascade) + throws NoSuchObjectException, InvalidOperationException, MetaException, + IOException, InvalidObjectException, InvalidInputException { + boolean success = false; + Database db = null; + List tablePaths = new ArrayList(); + List partitionPaths = new ArrayList(); + try { + ms.openTransaction(); + db = ms.getDatabase(name); + + firePreEvent(new PreDropDatabaseEvent(db, this)); + + List allTables = get_all_tables(db.getName()); + List allFunctions = get_functions(db.getName(), "*"); + + if (!cascade) { + if (!allTables.isEmpty()) { + throw new InvalidOperationException( + "Database " + db.getName() + " is not empty. One or more tables exist."); + } + if (!allFunctions.isEmpty()) { + throw new InvalidOperationException( + "Database " + db.getName() + " is not empty. One or more functions exist."); + } + } + Path path = new Path(db.getLocationUri()).getParent(); + if (!wh.isWritable(path)) { + throw new MetaException("Database not dropped since " + + path + " is not writable by " + + hiveConf.getUser()); + } + + Path databasePath = wh.getDnsPath(wh.getDatabasePath(db)); + + // drop any functions before dropping db + for (String funcName : allFunctions) { + drop_function(name, funcName); + } + + // drop tables before dropping db + int tableBatchSize = HiveConf.getIntVar(hiveConf, + ConfVars.METASTORE_BATCH_RETRIEVE_MAX); + + int startIndex = 0; + // retrieve the tables from the metastore in batches to alleviate memory constraints + while (startIndex < allTables.size()) { + int endIndex = Math.min(startIndex + tableBatchSize, allTables.size()); + + List
tables = null; + try { + tables = ms.getTableObjectsByName(name, allTables.subList(startIndex, endIndex)); + } catch (UnknownDBException e) { + throw new MetaException(e.getMessage()); + } + + if (tables != null && !tables.isEmpty()) { + for (Table table : tables) { + + // If the table is not external and it might not be in a subdirectory of the database + // add it's locations to the list of paths to delete + Path tablePath = null; + if (table.getSd().getLocation() != null && !isExternal(table)) { + tablePath = wh.getDnsPath(new Path(table.getSd().getLocation())); + if (!wh.isWritable(tablePath.getParent())) { + throw new MetaException("Database metadata not deleted since table: " + + table.getTableName() + " has a parent location " + tablePath.getParent() + + " which is not writable by " + hiveConf.getUser()); + } + + if (!isSubdirectory(databasePath, tablePath)) { + tablePaths.add(tablePath); + } + } + + // For each partition in each table, drop the partitions and get a list of + // partitions' locations which might need to be deleted + partitionPaths = dropPartitionsAndGetLocations(ms, name, table.getTableName(), + tablePath, table.getPartitionKeys(), deleteData && !isExternal(table)); + + // Drop the table but not its data + drop_table(name, table.getTableName(), false); + } + + startIndex = endIndex; + } + } + + if (ms.dropDatabase(name)) { + success = ms.commitTransaction(); + } + } finally { + if (!success) { + ms.rollbackTransaction(); + } else if (deleteData) { + // Delete the data in the partitions which have other locations + deletePartitionData(partitionPaths); + // Delete the data in the tables which have other locations + for (Path tablePath : tablePaths) { + deleteTableData(tablePath); + } + // Delete the data in the database + try { + wh.deleteDir(new Path(db.getLocationUri()), true); + } catch (Exception e) { + LOG.error("Failed to delete database directory: " + db.getLocationUri() + + " " + e.getMessage()); + } + // it is not a terrible thing even if the data is not deleted + } + for (MetaStoreEventListener listener : listeners) { + listener.onDropDatabase(new DropDatabaseEvent(db, success, this)); + } + } + } + + /** + * Returns a BEST GUESS as to whether or not other is a subdirectory of parent. It does not + * take into account any intricacies of the underlying file system, which is assumed to be + * HDFS. This should not return any false positives, but may return false negatives. + * + * @param parent + * @param other + * @return + */ + private boolean isSubdirectory(Path parent, Path other) { + return other.toString().startsWith(parent.toString().endsWith(Path.SEPARATOR) ? + parent.toString() : parent.toString() + Path.SEPARATOR); + } + + @Override + public void drop_database(final String dbName, final boolean deleteData, final boolean cascade) + throws NoSuchObjectException, InvalidOperationException, MetaException { + + startFunction("drop_database", ": " + dbName); + if (DEFAULT_DATABASE_NAME.equalsIgnoreCase(dbName)) { + endFunction("drop_database", false, null); + throw new MetaException("Can not drop default database"); + } + + boolean success = false; + Exception ex = null; + try { + drop_database_core(getMS(), dbName, deleteData, cascade); + success = true; + } catch (IOException e) { + ex = e; + throw new MetaException(e.getMessage()); + } catch (Exception e) { + ex = e; + if (e instanceof MetaException) { + throw (MetaException) e; + } else if (e instanceof InvalidOperationException) { + throw (InvalidOperationException) e; + } else if (e instanceof NoSuchObjectException) { + throw (NoSuchObjectException) e; + } else { + throw newMetaException(e); + } + } finally { + endFunction("drop_database", success, ex); + } + } + + @Override + public List get_databases(final String pattern) throws MetaException { + startFunction("get_databases", ": " + pattern); + + List ret = null; + Exception ex = null; + try { + ret = getMS().getDatabases(pattern); + } catch (Exception e) { + ex = e; + if (e instanceof MetaException) { + throw (MetaException) e; + } else { + throw newMetaException(e); + } + } finally { + endFunction("get_databases", ret != null, ex); + } + return ret; + } + + @Override + public List get_all_databases() throws MetaException { + startFunction("get_all_databases"); + + List ret = null; + Exception ex = null; + try { + ret = getMS().getAllDatabases(); + } catch (Exception e) { + ex = e; + if (e instanceof MetaException) { + throw (MetaException) e; + } else { + throw newMetaException(e); + } + } finally { + endFunction("get_all_databases", ret != null, ex); + } + return ret; + } + + private void create_type_core(final RawStore ms, final Type type) + throws AlreadyExistsException, MetaException, InvalidObjectException { + if (!MetaStoreUtils.validateName(type.getName(), null)) { + throw new InvalidObjectException("Invalid type name"); + } + + boolean success = false; + try { + ms.openTransaction(); + if (is_type_exists(ms, type.getName())) { + throw new AlreadyExistsException("Type " + type.getName() + " already exists"); + } + ms.createType(type); + success = ms.commitTransaction(); + } finally { + if (!success) { + ms.rollbackTransaction(); + } + } + } + + @Override + public boolean create_type(final Type type) throws AlreadyExistsException, + MetaException, InvalidObjectException { + startFunction("create_type", ": " + type.toString()); + boolean success = false; + Exception ex = null; + try { + create_type_core(getMS(), type); + success = true; + } catch (Exception e) { + ex = e; + if (e instanceof MetaException) { + throw (MetaException) e; + } else if (e instanceof InvalidObjectException) { + throw (InvalidObjectException) e; + } else if (e instanceof AlreadyExistsException) { + throw (AlreadyExistsException) e; + } else { + throw newMetaException(e); + } + } finally { + endFunction("create_type", success, ex); + } + + return success; + } + + @Override + public Type get_type(final String name) throws MetaException, NoSuchObjectException { + startFunction("get_type", ": " + name); + + Type ret = null; + Exception ex = null; + try { + ret = getMS().getType(name); + if (null == ret) { + throw new NoSuchObjectException("Type \"" + name + "\" not found."); + } + } catch (Exception e) { + ex = e; + if (e instanceof MetaException) { + throw (MetaException) e; + } else if (e instanceof NoSuchObjectException) { + throw (NoSuchObjectException) e; + } else { + throw newMetaException(e); + } + } finally { + endFunction("get_type", ret != null, ex); + } + return ret; + } + + private boolean is_type_exists(RawStore ms, String typeName) + throws MetaException { + return (ms.getType(typeName) != null); + } + + private void drop_type_core(final RawStore ms, String typeName) + throws NoSuchObjectException, MetaException { + boolean success = false; + try { + ms.openTransaction(); + // drop any partitions + if (!is_type_exists(ms, typeName)) { + throw new NoSuchObjectException(typeName + " doesn't exist"); + } + if (!ms.dropType(typeName)) { + throw new MetaException("Unable to drop type " + typeName); + } + success = ms.commitTransaction(); + } finally { + if (!success) { + ms.rollbackTransaction(); + } + } + } + + @Override + public boolean drop_type(final String name) throws MetaException, NoSuchObjectException { + startFunction("drop_type", ": " + name); + + boolean success = false; + Exception ex = null; + try { + // TODO:pc validate that there are no types that refer to this + success = getMS().dropType(name); + } catch (Exception e) { + ex = e; + if (e instanceof MetaException) { + throw (MetaException) e; + } else if (e instanceof NoSuchObjectException) { + throw (NoSuchObjectException) e; + } else { + throw newMetaException(e); + } + } finally { + endFunction("drop_type", success, ex); + } + return success; + } + + @Override + public Map get_type_all(String name) throws MetaException { + // TODO Auto-generated method stub + startFunction("get_type_all", ": " + name); + endFunction("get_type_all", false, null); + throw new MetaException("Not yet implemented"); + } + + private void create_table_core(final RawStore ms, final Table tbl, + final EnvironmentContext envContext) + throws AlreadyExistsException, MetaException, + InvalidObjectException, NoSuchObjectException { + + if (!MetaStoreUtils.validateName(tbl.getTableName(), hiveConf)) { + throw new InvalidObjectException(tbl.getTableName() + + " is not a valid object name"); + } + String validate = MetaStoreUtils.validateTblColumns(tbl.getSd().getCols()); + if (validate != null) { + throw new InvalidObjectException("Invalid column " + validate); + } + if (tbl.getPartitionKeys() != null) { + validate = MetaStoreUtils.validateTblColumns(tbl.getPartitionKeys()); + if (validate != null) { + throw new InvalidObjectException("Invalid partition column " + validate); + } + } + SkewedInfo skew = tbl.getSd().getSkewedInfo(); + if (skew != null) { + validate = MetaStoreUtils.validateSkewedColNames(skew.getSkewedColNames()); + if (validate != null) { + throw new InvalidObjectException("Invalid skew column " + validate); + } + validate = MetaStoreUtils.validateSkewedColNamesSubsetCol( + skew.getSkewedColNames(), tbl.getSd().getCols()); + if (validate != null) { + throw new InvalidObjectException("Invalid skew column " + validate); + } + } + + Path tblPath = null; + boolean success = false, madeDir = false; + try { + firePreEvent(new PreCreateTableEvent(tbl, this)); + + ms.openTransaction(); + + Database db = ms.getDatabase(tbl.getDbName()); + if (db == null) { + throw new NoSuchObjectException("The database " + tbl.getDbName() + " does not exist"); + } + + // get_table checks whether database exists, it should be moved here + if (is_table_exists(ms, tbl.getDbName(), tbl.getTableName())) { + throw new AlreadyExistsException("Table " + tbl.getTableName() + + " already exists"); + } + + if (!TableType.VIRTUAL_VIEW.toString().equals(tbl.getTableType())) { + if (tbl.getSd().getLocation() == null + || tbl.getSd().getLocation().isEmpty()) { + tblPath = wh.getTablePath( + ms.getDatabase(tbl.getDbName()), tbl.getTableName()); + } else { + if (!isExternal(tbl) && !MetaStoreUtils.isNonNativeTable(tbl)) { + LOG.warn("Location: " + tbl.getSd().getLocation() + + " specified for non-external table:" + tbl.getTableName()); + } + tblPath = wh.getDnsPath(new Path(tbl.getSd().getLocation())); + } + tbl.getSd().setLocation(tblPath.toString()); + } + + if (tblPath != null) { + if (!wh.isDir(tblPath)) { + if (!wh.mkdirs(tblPath, true)) { + throw new MetaException(tblPath + + " is not a directory or unable to create one"); + } + madeDir = true; + } + } + if (HiveConf.getBoolVar(hiveConf, HiveConf.ConfVars.HIVESTATSAUTOGATHER) && + !MetaStoreUtils.isView(tbl)) { + MetaStoreUtils.updateTableStatsFast(db, tbl, wh, madeDir, envContext); + } + + // set create time + long time = System.currentTimeMillis() / 1000; + tbl.setCreateTime((int) time); + if (tbl.getParameters() == null || + tbl.getParameters().get(hive_metastoreConstants.DDL_TIME) == null) { + tbl.putToParameters(hive_metastoreConstants.DDL_TIME, Long.toString(time)); + } + ms.createTable(tbl); + success = ms.commitTransaction(); + + } finally { + if (!success) { + ms.rollbackTransaction(); + if (madeDir) { + wh.deleteDir(tblPath, true); + } + } + for (MetaStoreEventListener listener : listeners) { + CreateTableEvent createTableEvent = + new CreateTableEvent(tbl, success, this); + createTableEvent.setEnvironmentContext(envContext); + listener.onCreateTable(createTableEvent); + } + } + } + + @Override + public void create_table(final Table tbl) throws AlreadyExistsException, + MetaException, InvalidObjectException { + create_table_with_environment_context(tbl, null); + } + + @Override + public void create_table_with_environment_context(final Table tbl, + final EnvironmentContext envContext) + throws AlreadyExistsException, MetaException, InvalidObjectException { + startFunction("create_table", ": " + tbl.toString()); + boolean success = false; + Exception ex = null; + try { + create_table_core(getMS(), tbl, envContext); + success = true; + } catch (NoSuchObjectException e) { + ex = e; + throw new InvalidObjectException(e.getMessage()); + } catch (Exception e) { + ex = e; + if (e instanceof MetaException) { + throw (MetaException) e; + } else if (e instanceof InvalidObjectException) { + throw (InvalidObjectException) e; + } else if (e instanceof AlreadyExistsException) { + throw (AlreadyExistsException) e; + } else { + throw newMetaException(e); + } + } finally { + endFunction("create_table", success, ex, tbl.getTableName()); + } + } + + private boolean is_table_exists(RawStore ms, String dbname, String name) + throws MetaException { + return (ms.getTable(dbname, name) != null); + } + + private boolean drop_table_core(final RawStore ms, final String dbname, final String name, + final boolean deleteData, final EnvironmentContext envContext, + final String indexName) throws NoSuchObjectException, + MetaException, IOException, InvalidObjectException, InvalidInputException { + boolean success = false; + boolean isExternal = false; + Path tblPath = null; + List partPaths = null; + Table tbl = null; + boolean ifPurge = false; + try { + ms.openTransaction(); + // drop any partitions + tbl = get_table_core(dbname, name); + if (tbl == null) { + throw new NoSuchObjectException(name + " doesn't exist"); + } + if (tbl.getSd() == null) { + throw new MetaException("Table metadata is corrupted"); + } + ifPurge = isMustPurge(envContext, tbl); + + firePreEvent(new PreDropTableEvent(tbl, deleteData, this)); + + boolean isIndexTable = isIndexTable(tbl); + if (indexName == null && isIndexTable) { + throw new RuntimeException( + "The table " + name + " is an index table. Please do drop index instead."); + } + + if (!isIndexTable) { + try { + List indexes = ms.getIndexes(dbname, name, Short.MAX_VALUE); + while (indexes != null && indexes.size() > 0) { + for (Index idx : indexes) { + this.drop_index_by_name(dbname, name, idx.getIndexName(), true); + } + indexes = ms.getIndexes(dbname, name, Short.MAX_VALUE); + } + } catch (TException e) { + throw new MetaException(e.getMessage()); + } + } + isExternal = isExternal(tbl); + if (tbl.getSd().getLocation() != null) { + tblPath = new Path(tbl.getSd().getLocation()); + if (!wh.isWritable(tblPath.getParent())) { + String target = indexName == null ? "Table" : "Index table"; + throw new MetaException(target + " metadata not deleted since " + + tblPath.getParent() + " is not writable by " + + hiveConf.getUser()); + } + } + + checkTrashPurgeCombination(tblPath, dbname + "." + name, ifPurge, deleteData && !isExternal); + // Drop the partitions and get a list of locations which need to be deleted + partPaths = dropPartitionsAndGetLocations(ms, dbname, name, tblPath, + tbl.getPartitionKeys(), deleteData && !isExternal); + + if (!ms.dropTable(dbname, name)) { + String tableName = dbname + "." + name; + throw new MetaException(indexName == null ? "Unable to drop table " + tableName: + "Unable to drop index table " + tableName + " for index " + indexName); + } + success = ms.commitTransaction(); + } finally { + if (!success) { + ms.rollbackTransaction(); + } else if (deleteData && !isExternal) { + // Data needs deletion. Check if trash may be skipped. + // Delete the data in the partitions which have other locations + deletePartitionData(partPaths, ifPurge); + // Delete the data in the table + deleteTableData(tblPath, ifPurge); + // ok even if the data is not deleted + } + for (MetaStoreEventListener listener : listeners) { + DropTableEvent dropTableEvent = new DropTableEvent(tbl, success, deleteData, this); + dropTableEvent.setEnvironmentContext(envContext); + listener.onDropTable(dropTableEvent); + } + } + return success; + } + + /** + * Will throw MetaException if combination of trash policy/purge can't be satisfied + * @param pathToData path to data which may potentially be moved to trash + * @param objectName db.table, or db.table.part + * @param ifPurge if PURGE options is specified + */ + private void checkTrashPurgeCombination(Path pathToData, String objectName, boolean ifPurge, + boolean deleteData) throws MetaException { + // There is no need to check TrashPurgeCombination in following cases since Purge/Trash + // is not applicable: + // a) deleteData is false -- drop an external table + // b) pathToData is null -- a view + // c) ifPurge is true -- force delete without Trash + if (!deleteData || pathToData == null || ifPurge) { + return; + } + + boolean trashEnabled = false; + try { + trashEnabled = 0 < hiveConf.getFloat("fs.trash.interval", -1); + } catch(NumberFormatException ex) { + // nothing to do + } + + if (trashEnabled) { + try { + HadoopShims.HdfsEncryptionShim shim = + ShimLoader.getHadoopShims().createHdfsEncryptionShim(FileSystem.get(hiveConf), hiveConf); + if (shim.isPathEncrypted(pathToData)) { + throw new MetaException("Unable to drop " + objectName + " because it is in an encryption zone" + + " and trash is enabled. Use PURGE option to skip trash."); + } + } catch (IOException ex) { + MetaException e = new MetaException(ex.getMessage()); + e.initCause(ex); + throw e; + } + } + } + + /** + * Deletes the data in a table's location, if it fails logs an error + * + * @param tablePath + */ + private void deleteTableData(Path tablePath) { + deleteTableData(tablePath, false); + } + + /** + * Deletes the data in a table's location, if it fails logs an error + * + * @param tablePath + * @param ifPurge completely purge the table (skipping trash) while removing + * data from warehouse + */ + private void deleteTableData(Path tablePath, boolean ifPurge) { + + if (tablePath != null) { + try { + wh.deleteDir(tablePath, true, ifPurge); + } catch (Exception e) { + LOG.error("Failed to delete table directory: " + tablePath + + " " + e.getMessage()); + } + } + } + + /** + * Give a list of partitions' locations, tries to delete each one + * and for each that fails logs an error. + * + * @param partPaths + */ + private void deletePartitionData(List partPaths) { + deletePartitionData(partPaths, false); + } + + /** + * Give a list of partitions' locations, tries to delete each one + * and for each that fails logs an error. + * + * @param partPaths + * @param ifPurge completely purge the partition (skipping trash) while + * removing data from warehouse + */ + private void deletePartitionData(List partPaths, boolean ifPurge) { + if (partPaths != null && !partPaths.isEmpty()) { + for (Path partPath : partPaths) { + try { + wh.deleteDir(partPath, true, ifPurge); + } catch (Exception e) { + LOG.error("Failed to delete partition directory: " + partPath + + " " + e.getMessage()); + } + } + } + } + + /** + * Retrieves the partitions specified by partitionKeys. If checkLocation, for locations of + * partitions which may not be subdirectories of tablePath checks to make the locations are + * writable. + * + * Drops the metadata for each partition. + * + * Provides a list of locations of partitions which may not be subdirectories of tablePath. + * + * @param ms + * @param dbName + * @param tableName + * @param tablePath + * @param partitionKeys + * @param checkLocation + * @return + * @throws MetaException + * @throws IOException + * @throws InvalidInputException + * @throws InvalidObjectException + * @throws NoSuchObjectException + */ + private List dropPartitionsAndGetLocations(RawStore ms, String dbName, + String tableName, Path tablePath, List partitionKeys, boolean checkLocation) + throws MetaException, IOException, NoSuchObjectException, InvalidObjectException, + InvalidInputException { + int partitionBatchSize = HiveConf.getIntVar(hiveConf, + ConfVars.METASTORE_BATCH_RETRIEVE_MAX); + Path tableDnsPath = null; + if (tablePath != null) { + tableDnsPath = wh.getDnsPath(tablePath); + } + List partPaths = new ArrayList(); + Table tbl = ms.getTable(dbName, tableName); + + // call dropPartition on each of the table's partitions to follow the + // procedure for cleanly dropping partitions. + while (true) { + List partsToDelete = ms.getPartitions(dbName, tableName, partitionBatchSize); + if (partsToDelete == null || partsToDelete.isEmpty()) { + break; + } + List partNames = new ArrayList(); + for (Partition part : partsToDelete) { + if (checkLocation && part.getSd() != null && + part.getSd().getLocation() != null) { + + Path partPath = wh.getDnsPath(new Path(part.getSd().getLocation())); + if (tableDnsPath == null || + (partPath != null && !isSubdirectory(tableDnsPath, partPath))) { + if (!wh.isWritable(partPath.getParent())) { + throw new MetaException("Table metadata not deleted since the partition " + + Warehouse.makePartName(partitionKeys, part.getValues()) + + " has parent location " + partPath.getParent() + " which is not writable " + + "by " + hiveConf.getUser()); + } + partPaths.add(partPath); + } + } + partNames.add(Warehouse.makePartName(tbl.getPartitionKeys(), part.getValues())); + } + for (MetaStoreEventListener listener : listeners) { + //No drop part listener events fired for public listeners historically, for drop table case. + //Limiting to internal listeners for now, to avoid unexpected calls for public listeners. + if (listener instanceof HMSMetricsListener) { + for (Partition part : partsToDelete) { + listener.onDropPartition(null); + } + } + } + ms.dropPartitions(dbName, tableName, partNames); + } + + return partPaths; + } + + @Override + public void drop_table(final String dbname, final String name, final boolean deleteData) + throws NoSuchObjectException, MetaException { + drop_table_with_environment_context(dbname, name, deleteData, null); + } + + @Override + public void drop_table_with_environment_context(final String dbname, final String name, + final boolean deleteData, final EnvironmentContext envContext) + throws NoSuchObjectException, MetaException { + startTableFunction("drop_table", dbname, name); + + boolean success = false; + Exception ex = null; + try { + success = drop_table_core(getMS(), dbname, name, deleteData, envContext, null); + } catch (IOException e) { + ex = e; + throw new MetaException(e.getMessage()); + } catch (Exception e) { + ex = e; + if (e instanceof MetaException) { + throw (MetaException) e; + } else if (e instanceof NoSuchObjectException) { + throw (NoSuchObjectException) e; + } else { + throw newMetaException(e); + } + } finally { + endFunction("drop_table", success, ex, name); + } + + } + + /** + * Is this an external table? + * + * @param table + * Check if this table is external. + * @return True if the table is external, otherwise false. + */ + private boolean isExternal(Table table) { + return MetaStoreUtils.isExternalTable(table); + } + + private boolean isIndexTable(Table table) { + return MetaStoreUtils.isIndexTable(table); + } + + @Override + public Table get_table(final String dbname, final String name) throws MetaException, + NoSuchObjectException { + Table t = null; + startTableFunction("get_table", dbname, name); + Exception ex = null; + try { + t = get_table_core(dbname, name); + firePreEvent(new PreReadTableEvent(t, this)); + } catch (MetaException e) { + ex = e; + throw e; + } catch (NoSuchObjectException e) { + ex = e; + throw e; + } finally { + endFunction("get_table", t != null, ex, name); + } + return t; + } + + @Override + public List get_table_meta(String dbnames, String tblNames, List tblTypes) + throws MetaException, NoSuchObjectException { + List t = null; + startTableFunction("get_table_metas", dbnames, tblNames); + Exception ex = null; + try { + t = getMS().getTableMeta(dbnames, tblNames, tblTypes); + } catch (Exception e) { + ex = e; + throw newMetaException(e); + } finally { + endFunction("get_table_metas", t != null, ex); + } + return t; + } + + /** + * Equivalent of get_table, but does not log audits and fire pre-event listener. + * Meant to be used for calls made by other hive classes, that are not using the + * thrift interface. + * @param dbname + * @param name + * @return Table object + * @throws MetaException + * @throws NoSuchObjectException + */ + public Table get_table_core(final String dbname, final String name) throws MetaException, + NoSuchObjectException { + Table t; + try { + t = getMS().getTable(dbname, name); + if (t == null) { + throw new NoSuchObjectException(dbname + "." + name + + " table not found"); + } + } catch (Exception e) { + if (e instanceof MetaException) { + throw (MetaException) e; + } else if (e instanceof NoSuchObjectException) { + throw (NoSuchObjectException) e; + } else { + throw newMetaException(e); + } + } + return t; + } + + /** + * Gets multiple tables from the hive metastore. + * + * @param dbName + * The name of the database in which the tables reside + * @param tableNames + * The names of the tables to get. + * + * @return A list of tables whose names are in the the list "names" and + * are retrievable from the database specified by "dbnames." + * There is no guarantee of the order of the returned tables. + * If there are duplicate names, only one instance of the table will be returned. + * @throws MetaException + * @throws InvalidOperationException + * @throws UnknownDBException + */ + @Override + public List
get_table_objects_by_name(final String dbName, final List tableNames) + throws MetaException, InvalidOperationException, UnknownDBException { + List
tables = new ArrayList
(); + startMultiTableFunction("get_multi_table", dbName, tableNames); + Exception ex = null; + int tableBatchSize = HiveConf.getIntVar(hiveConf, + ConfVars.METASTORE_BATCH_RETRIEVE_MAX); + + try { + if (dbName == null || dbName.isEmpty()) { + throw new UnknownDBException("DB name is null or empty"); + } + if (tableNames == null) + { + throw new InvalidOperationException(dbName + " cannot find null tables"); + } + + // The list of table names could contain duplicates. RawStore.getTableObjectsByName() + // only guarantees returning no duplicate table objects in one batch. If we need + // to break into multiple batches, remove duplicates first. + List distinctTableNames = tableNames; + if (distinctTableNames.size() > tableBatchSize) { + List lowercaseTableNames = new ArrayList(); + for (String tableName : tableNames) { + lowercaseTableNames.add(HiveStringUtils.normalizeIdentifier(tableName)); + } + distinctTableNames = new ArrayList(new HashSet(lowercaseTableNames)); + } + + RawStore ms = getMS(); + int startIndex = 0; + // Retrieve the tables from the metastore in batches. Some databases like + // Oracle cannot have over 1000 expressions in a in-list + while (startIndex < distinctTableNames.size()) { + int endIndex = Math.min(startIndex + tableBatchSize, distinctTableNames.size()); + tables.addAll(ms.getTableObjectsByName(dbName, distinctTableNames.subList(startIndex, endIndex))); + startIndex = endIndex; + } + } catch (Exception e) { + ex = e; + if (e instanceof MetaException) { + throw (MetaException) e; + } else if (e instanceof InvalidOperationException) { + throw (InvalidOperationException) e; + } else if (e instanceof UnknownDBException) { + throw (UnknownDBException) e; + } else { + throw newMetaException(e); + } + } finally { + endFunction("get_multi_table", tables != null, ex, join(tableNames, ",")); + } + return tables; + } + + @Override + public List get_table_names_by_filter( + final String dbName, final String filter, final short maxTables) + throws MetaException, InvalidOperationException, UnknownDBException { + List tables = null; + startFunction("get_table_names_by_filter", ": db = " + dbName + ", filter = " + filter); + Exception ex = null; + try { + if (dbName == null || dbName.isEmpty()) { + throw new UnknownDBException("DB name is null or empty"); + } + if (filter == null) { + throw new InvalidOperationException(filter + " cannot apply null filter"); + } + tables = getMS().listTableNamesByFilter(dbName, filter, maxTables); + } catch (Exception e) { + ex = e; + if (e instanceof MetaException) { + throw (MetaException) e; + } else if (e instanceof InvalidOperationException) { + throw (InvalidOperationException) e; + } else if (e instanceof UnknownDBException) { + throw (UnknownDBException) e; + } else { + throw newMetaException(e); + } + } finally { + endFunction("get_table_names_by_filter", tables != null, ex, join(tables, ",")); + } + return tables; + } + + private Partition append_partition_common(RawStore ms, String dbName, String tableName, + List part_vals, EnvironmentContext envContext) throws InvalidObjectException, + AlreadyExistsException, MetaException { + + Partition part = new Partition(); + boolean success = false, madeDir = false; + Path partLocation = null; + Table tbl = null; + try { + ms.openTransaction(); + part.setDbName(dbName); + part.setTableName(tableName); + part.setValues(part_vals); + + MetaStoreUtils.validatePartitionNameCharacters(part_vals, partitionValidationPattern); + + tbl = ms.getTable(part.getDbName(), part.getTableName()); + if (tbl == null) { + throw new InvalidObjectException( + "Unable to add partition because table or database do not exist"); + } + if (tbl.getSd().getLocation() == null) { + throw new MetaException( + "Cannot append a partition to a view"); + } + + firePreEvent(new PreAddPartitionEvent(tbl, part, this)); + + part.setSd(tbl.getSd().deepCopy()); + partLocation = new Path(tbl.getSd().getLocation(), Warehouse + .makePartName(tbl.getPartitionKeys(), part_vals)); + part.getSd().setLocation(partLocation.toString()); + + Partition old_part = null; + try { + old_part = ms.getPartition(part.getDbName(), part + .getTableName(), part.getValues()); + } catch (NoSuchObjectException e) { + // this means there is no existing partition + old_part = null; + } + if (old_part != null) { + throw new AlreadyExistsException("Partition already exists:" + part); + } + + if (!wh.isDir(partLocation)) { + if (!wh.mkdirs(partLocation, true)) { + throw new MetaException(partLocation + + " is not a directory or unable to create one"); + } + madeDir = true; + } + + // set create time + long time = System.currentTimeMillis() / 1000; + part.setCreateTime((int) time); + part.putToParameters(hive_metastoreConstants.DDL_TIME, Long.toString(time)); + + if (HiveConf.getBoolVar(hiveConf, HiveConf.ConfVars.HIVESTATSAUTOGATHER) && + !MetaStoreUtils.isView(tbl)) { + MetaStoreUtils.updatePartitionStatsFast(part, wh, madeDir, envContext); + } + + success = ms.addPartition(part); + if (success) { + success = ms.commitTransaction(); + } + } finally { + if (!success) { + ms.rollbackTransaction(); + if (madeDir) { + wh.deleteDir(partLocation, true); + } + } + + for (MetaStoreEventListener listener : listeners) { + AddPartitionEvent addPartitionEvent = + new AddPartitionEvent(tbl, part, success, this); + addPartitionEvent.setEnvironmentContext(envContext); + listener.onAddPartition(addPartitionEvent); + } + } + return part; + } + + private void firePreEvent(PreEventContext event) throws MetaException { + for (MetaStorePreEventListener listener : preListeners) { + try { + listener.onEvent(event); + } catch (NoSuchObjectException e) { + throw new MetaException(e.getMessage()); + } catch (InvalidOperationException e) { + throw new MetaException(e.getMessage()); + } + } + } + + @Override + public Partition append_partition(final String dbName, final String tableName, + final List part_vals) throws InvalidObjectException, + AlreadyExistsException, MetaException { + return append_partition_with_environment_context(dbName, tableName, part_vals, null); + } + + @Override + public Partition append_partition_with_environment_context(final String dbName, + final String tableName, final List part_vals, final EnvironmentContext envContext) + throws InvalidObjectException, AlreadyExistsException, MetaException { + startPartitionFunction("append_partition", dbName, tableName, part_vals); + if (LOG.isDebugEnabled()) { + for (String part : part_vals) { + LOG.debug(part); + } + } + + Partition ret = null; + Exception ex = null; + try { + ret = append_partition_common(getMS(), dbName, tableName, part_vals, envContext); + } catch (Exception e) { + ex = e; + if (e instanceof MetaException) { + throw (MetaException) e; + } else if (e instanceof InvalidObjectException) { + throw (InvalidObjectException) e; + } else if (e instanceof AlreadyExistsException) { + throw (AlreadyExistsException) e; + } else { + throw newMetaException(e); + } + } finally { + endFunction("append_partition", ret != null, ex, tableName); + } + return ret; + } + + private static class PartValEqWrapper { + Partition partition; + + public PartValEqWrapper(Partition partition) { + this.partition = partition; + } + + @Override + public int hashCode() { + return partition.isSetValues() ? partition.getValues().hashCode() : 0; + } + + @Override + public boolean equals(Object obj) { + if (this == obj) return true; + if (obj == null || !(obj instanceof PartValEqWrapper)) return false; + Partition p1 = this.partition, p2 = ((PartValEqWrapper)obj).partition; + if (!p1.isSetValues() || !p2.isSetValues()) return p1.isSetValues() == p2.isSetValues(); + if (p1.getValues().size() != p2.getValues().size()) return false; + for (int i = 0; i < p1.getValues().size(); ++i) { + String v1 = p1.getValues().get(i); + String v2 = p2.getValues().get(i); + if (v1 == null && v2 == null) { + continue; + } + if (v1 == null || !v1.equals(v2)) { + return false; + } + } + return true; + } + } + + private static class PartValEqWrapperLite { + List values; + String location; + + public PartValEqWrapperLite(Partition partition) { + this.values = partition.isSetValues()? partition.getValues() : null; + this.location = partition.getSd().getLocation(); + } + + @Override + public int hashCode() { + return values == null ? 0 : values.hashCode(); + } + + @Override + public boolean equals(Object obj) { + if (this == obj) { + return true; + } + if (obj == null || !(obj instanceof PartValEqWrapperLite)) { + return false; + } + + List lhsValues = this.values; + List rhsValues = ((PartValEqWrapperLite)obj).values; + + if (lhsValues == null || rhsValues == null) + return lhsValues == rhsValues; + + if (lhsValues.size() != rhsValues.size()) + return false; + + for (int i=0; i add_partitions_core( + RawStore ms, String dbName, String tblName, List parts, boolean ifNotExists) + throws MetaException, InvalidObjectException, AlreadyExistsException, TException { + logInfo("add_partitions"); + boolean success = false; + // Ensures that the list doesn't have dups, and keeps track of directories we have created. + Map addedPartitions = new HashMap(); + List result = new ArrayList(); + List existingParts = null; + Table tbl = null; + try { + ms.openTransaction(); + tbl = ms.getTable(dbName, tblName); + if (tbl == null) { + throw new InvalidObjectException("Unable to add partitions because " + + "database or table " + dbName + "." + tblName + " does not exist"); + } + + if (!parts.isEmpty()) { + firePreEvent(new PreAddPartitionEvent(tbl, parts, this)); + } + + for (Partition part : parts) { + if (!part.getTableName().equals(tblName) || !part.getDbName().equals(dbName)) { + throw new MetaException("Partition does not belong to target table " + + dbName + "." + tblName + ": " + part); + } + boolean shouldAdd = startAddPartition(ms, part, ifNotExists); + if (!shouldAdd) { + if (existingParts == null) { + existingParts = new ArrayList(); + } + existingParts.add(part); + LOG.info("Not adding partition " + part + " as it already exists"); + continue; + } + boolean madeDir = createLocationForAddedPartition(tbl, part); + if (addedPartitions.put(new PartValEqWrapper(part), madeDir) != null) { + // Technically, for ifNotExists case, we could insert one and discard the other + // because the first one now "exists", but it seems better to report the problem + // upstream as such a command doesn't make sense. + throw new MetaException("Duplicate partitions in the list: " + part); + } + initializeAddedPartition(tbl, part, madeDir); + result.add(part); + } + if (!result.isEmpty()) { + success = ms.addPartitions(dbName, tblName, result); + } else { + success = true; + } + success = success && ms.commitTransaction(); + } finally { + if (!success) { + ms.rollbackTransaction(); + for (Entry e : addedPartitions.entrySet()) { + if (e.getValue()) { + wh.deleteDir(new Path(e.getKey().partition.getSd().getLocation()), true); + // we just created this directory - it's not a case of pre-creation, so we nuke + } + } + fireMetaStoreAddPartitionEvent(tbl, parts, null, false); + } else { + fireMetaStoreAddPartitionEvent(tbl, result, null, true); + if (existingParts != null) { + // The request has succeeded but we failed to add these partitions. + fireMetaStoreAddPartitionEvent(tbl, existingParts, null, false); + } + } + } + return result; + } + + @Override + public AddPartitionsResult add_partitions_req(AddPartitionsRequest request) + throws InvalidObjectException, AlreadyExistsException, MetaException, TException { + AddPartitionsResult result = new AddPartitionsResult(); + if (request.getParts().isEmpty()) { + return result; + } + try { + List parts = add_partitions_core(getMS(), request.getDbName(), + request.getTblName(), request.getParts(), request.isIfNotExists()); + if (request.isNeedResult()) { + result.setPartitions(parts); + } + } catch (TException te) { + throw te; + } catch (Exception e) { + throw newMetaException(e); + } + return result; + } + + @Override + public int add_partitions(final List parts) throws MetaException, + InvalidObjectException, AlreadyExistsException { + startFunction("add_partition"); + if (parts.size() == 0) { + return 0; + } + + Integer ret = null; + Exception ex = null; + try { + // Old API assumed all partitions belong to the same table; keep the same assumption + ret = add_partitions_core(getMS(), parts.get(0).getDbName(), + parts.get(0).getTableName(), parts, false).size(); + assert ret == parts.size(); + } catch (Exception e) { + ex = e; + if (e instanceof MetaException) { + throw (MetaException) e; + } else if (e instanceof InvalidObjectException) { + throw (InvalidObjectException) e; + } else if (e instanceof AlreadyExistsException) { + throw (AlreadyExistsException) e; + } else { + throw newMetaException(e); + } + } finally { + String tableName = parts.get(0).getTableName(); + endFunction("add_partition", ret != null, ex, tableName); + } + return ret; + } + + @Override + public int add_partitions_pspec(final List partSpecs) + throws TException { + logInfo("add_partitions_pspec"); + + if (partSpecs.isEmpty()) { + return 0; + } + + String dbName = partSpecs.get(0).getDbName(); + String tableName = partSpecs.get(0).getTableName(); + + return add_partitions_pspec_core(getMS(), dbName, tableName, partSpecs, false); + } + + private int add_partitions_pspec_core( + RawStore ms, String dbName, String tblName, List partSpecs, boolean ifNotExists) + throws TException { + boolean success = false; + // Ensures that the list doesn't have dups, and keeps track of directories we have created. + Map addedPartitions = new HashMap(); + PartitionSpecProxy partitionSpecProxy = PartitionSpecProxy.Factory.get(partSpecs); + PartitionSpecProxy.PartitionIterator partitionIterator = partitionSpecProxy.getPartitionIterator(); + Table tbl = null; + try { + ms.openTransaction(); + tbl = ms.getTable(dbName, tblName); + if (tbl == null) { + throw new InvalidObjectException("Unable to add partitions because " + + "database or table " + dbName + "." + tblName + " does not exist"); + } + + firePreEvent(new PreAddPartitionEvent(tbl, partitionSpecProxy, this)); + + int nPartitions = 0; + while(partitionIterator.hasNext()) { + + Partition part = partitionIterator.getCurrent(); + + if (!part.getTableName().equals(tblName) || !part.getDbName().equals(dbName)) { + throw new MetaException("Partition does not belong to target table " + + dbName + "." + tblName + ": " + part); + } + boolean shouldAdd = startAddPartition(ms, part, ifNotExists); + if (!shouldAdd) { + LOG.info("Not adding partition " + part + " as it already exists"); + continue; + } + boolean madeDir = createLocationForAddedPartition(tbl, part); + if (addedPartitions.put(new PartValEqWrapperLite(part), madeDir) != null) { + // Technically, for ifNotExists case, we could insert one and discard the other + // because the first one now "exists", but it seems better to report the problem + // upstream as such a command doesn't make sense. + throw new MetaException("Duplicate partitions in the list: " + part); + } + initializeAddedPartition(tbl, partitionIterator, madeDir); + + ++nPartitions; + partitionIterator.next(); + } + + success = ms.addPartitions(dbName, tblName, partitionSpecProxy, ifNotExists) + && ms.commitTransaction(); + + return nPartitions; + } finally { + if (!success) { + ms.rollbackTransaction(); + for (Entry e : addedPartitions.entrySet()) { + if (e.getValue()) { + wh.deleteDir(new Path(e.getKey().location), true); + // we just created this directory - it's not a case of pre-creation, so we nuke + } + } + } + fireMetaStoreAddPartitionEvent(tbl, partitionSpecProxy, null, true); + } + } + + private boolean startAddPartition( + RawStore ms, Partition part, boolean ifNotExists) throws MetaException, TException { + MetaStoreUtils.validatePartitionNameCharacters(part.getValues(), + partitionValidationPattern); + boolean doesExist = ms.doesPartitionExist( + part.getDbName(), part.getTableName(), part.getValues()); + if (doesExist && !ifNotExists) { + throw new AlreadyExistsException("Partition already exists: " + part); + } + return !doesExist; + } + + /** + * Handles the location for a partition being created. + * @param tbl Table. + * @param part Partition. + * @return Whether the partition SD location is set to a newly created directory. + */ + private boolean createLocationForAddedPartition( + final Table tbl, final Partition part) throws MetaException { + Path partLocation = null; + String partLocationStr = null; + if (part.getSd() != null) { + partLocationStr = part.getSd().getLocation(); + } + + if (partLocationStr == null || partLocationStr.isEmpty()) { + // set default location if not specified and this is + // a physical table partition (not a view) + if (tbl.getSd().getLocation() != null) { + partLocation = new Path(tbl.getSd().getLocation(), Warehouse + .makePartName(tbl.getPartitionKeys(), part.getValues())); + } + } else { + if (tbl.getSd().getLocation() == null) { + throw new MetaException("Cannot specify location for a view partition"); + } + partLocation = wh.getDnsPath(new Path(partLocationStr)); + } + + boolean result = false; + if (partLocation != null) { + part.getSd().setLocation(partLocation.toString()); + + // Check to see if the directory already exists before calling + // mkdirs() because if the file system is read-only, mkdirs will + // throw an exception even if the directory already exists. + if (!wh.isDir(partLocation)) { + if (!wh.mkdirs(partLocation, true)) { + throw new MetaException(partLocation + + " is not a directory or unable to create one"); + } + result = true; + } + } + return result; + } + + private void initializeAddedPartition( + final Table tbl, final Partition part, boolean madeDir) throws MetaException { + initializeAddedPartition(tbl, new PartitionSpecProxy.SimplePartitionWrapperIterator(part), madeDir); + } + + private void initializeAddedPartition( + final Table tbl, final PartitionSpecProxy.PartitionIterator part, boolean madeDir) throws MetaException { + if (HiveConf.getBoolVar(hiveConf, HiveConf.ConfVars.HIVESTATSAUTOGATHER) && + !MetaStoreUtils.isView(tbl)) { + MetaStoreUtils.updatePartitionStatsFast(part, wh, madeDir, false, null); + } + + // set create time + long time = System.currentTimeMillis() / 1000; + part.setCreateTime((int) time); + if (part.getParameters() == null || + part.getParameters().get(hive_metastoreConstants.DDL_TIME) == null) { + part.putToParameters(hive_metastoreConstants.DDL_TIME, Long.toString(time)); + } + + // Inherit table properties into partition properties. + Map tblParams = tbl.getParameters(); + String inheritProps = hiveConf.getVar(ConfVars.METASTORE_PART_INHERIT_TBL_PROPS).trim(); + // Default value is empty string in which case no properties will be inherited. + // * implies all properties needs to be inherited + Set inheritKeys = new HashSet(Arrays.asList(inheritProps.split(","))); + if (inheritKeys.contains("*")) { + inheritKeys = tblParams.keySet(); + } + + for (String key : inheritKeys) { + String paramVal = tblParams.get(key); + if (null != paramVal) { // add the property only if it exists in table properties + part.putToParameters(key, paramVal); + } + } + } + + private Partition add_partition_core(final RawStore ms, + final Partition part, final EnvironmentContext envContext) + throws InvalidObjectException, AlreadyExistsException, MetaException, TException { + boolean success = false; + Table tbl = null; + try { + ms.openTransaction(); + tbl = ms.getTable(part.getDbName(), part.getTableName()); + if (tbl == null) { + throw new InvalidObjectException( + "Unable to add partition because table or database do not exist"); + } + + firePreEvent(new PreAddPartitionEvent(tbl, part, this)); + + boolean shouldAdd = startAddPartition(ms, part, false); + assert shouldAdd; // start would throw if it already existed here + boolean madeDir = createLocationForAddedPartition(tbl, part); + try { + initializeAddedPartition(tbl, part, madeDir); + success = ms.addPartition(part); + } finally { + if (!success && madeDir) { + wh.deleteDir(new Path(part.getSd().getLocation()), true); + } + } + // we proceed only if we'd actually succeeded anyway, otherwise, + // we'd have thrown an exception + success = success && ms.commitTransaction(); + } finally { + if (!success) { + ms.rollbackTransaction(); + } + fireMetaStoreAddPartitionEvent(tbl, Arrays.asList(part), envContext, success); + } + return part; + } + + private void fireMetaStoreAddPartitionEvent(final Table tbl, + final List parts, final EnvironmentContext envContext, boolean success) + throws MetaException { + if (tbl != null && parts != null && !parts.isEmpty()) { + AddPartitionEvent addPartitionEvent = + new AddPartitionEvent(tbl, parts, success, this); + addPartitionEvent.setEnvironmentContext(envContext); + + for (MetaStoreEventListener listener : listeners) { + listener.onAddPartition(addPartitionEvent); + } + } + } + + private void fireMetaStoreAddPartitionEvent(final Table tbl, + final PartitionSpecProxy partitionSpec, final EnvironmentContext envContext, boolean success) + throws MetaException { + if (tbl != null && partitionSpec != null) { + AddPartitionEvent addPartitionEvent = + new AddPartitionEvent(tbl, partitionSpec, success, this); + addPartitionEvent.setEnvironmentContext(envContext); + + for (MetaStoreEventListener listener : listeners) { + listener.onAddPartition(addPartitionEvent); + } + } + } + + @Override + public Partition add_partition(final Partition part) + throws InvalidObjectException, AlreadyExistsException, MetaException { + return add_partition_with_environment_context(part, null); + } + + @Override + public Partition add_partition_with_environment_context( + final Partition part, EnvironmentContext envContext) + throws InvalidObjectException, AlreadyExistsException, + MetaException { + startTableFunction("add_partition", + part.getDbName(), part.getTableName()); + Partition ret = null; + Exception ex = null; + try { + ret = add_partition_core(getMS(), part, envContext); + } catch (Exception e) { + ex = e; + if (e instanceof MetaException) { + throw (MetaException) e; + } else if (e instanceof InvalidObjectException) { + throw (InvalidObjectException) e; + } else if (e instanceof AlreadyExistsException) { + throw (AlreadyExistsException) e; + } else { + throw newMetaException(e); + } + } finally { + endFunction("add_partition", ret != null, ex, part != null ? part.getTableName(): null); + } + return ret; + } + + @Override + public Partition exchange_partition(Map partitionSpecs, + String sourceDbName, String sourceTableName, String destDbName, + String destTableName) throws MetaException, NoSuchObjectException, + InvalidObjectException, InvalidInputException, TException { + exchange_partitions(partitionSpecs, sourceDbName, sourceTableName, destDbName, destTableName); + return new Partition(); + } + + @Override + public List exchange_partitions(Map partitionSpecs, + String sourceDbName, String sourceTableName, String destDbName, + String destTableName) throws MetaException, NoSuchObjectException, + InvalidObjectException, InvalidInputException, TException { + boolean success = false; + boolean pathCreated = false; + RawStore ms = getMS(); + ms.openTransaction(); + Table destinationTable = ms.getTable(destDbName, destTableName); + Table sourceTable = ms.getTable(sourceDbName, sourceTableName); + List partVals = MetaStoreUtils.getPvals(sourceTable.getPartitionKeys(), + partitionSpecs); + List partValsPresent = new ArrayList (); + List partitionKeysPresent = new ArrayList (); + int i = 0; + for (FieldSchema fs: sourceTable.getPartitionKeys()) { + String partVal = partVals.get(i); + if (partVal != null && !partVal.equals("")) { + partValsPresent.add(partVal); + partitionKeysPresent.add(fs); + } + i++; + } + List partitionsToExchange = get_partitions_ps(sourceDbName, sourceTableName, + partVals, (short)-1); + boolean sameColumns = MetaStoreUtils.compareFieldColumns( + sourceTable.getSd().getCols(), destinationTable.getSd().getCols()); + boolean samePartitions = MetaStoreUtils.compareFieldColumns( + sourceTable.getPartitionKeys(), destinationTable.getPartitionKeys()); + if (!sameColumns || !samePartitions) { + throw new MetaException("The tables have different schemas." + + " Their partitions cannot be exchanged."); + } + Path sourcePath = new Path(sourceTable.getSd().getLocation(), + Warehouse.makePartName(partitionKeysPresent, partValsPresent)); + Path destPath = new Path(destinationTable.getSd().getLocation(), + Warehouse.makePartName(partitionKeysPresent, partValsPresent)); + try { + List destPartitions = new ArrayList(); + for (Partition partition: partitionsToExchange) { + Partition destPartition = new Partition(partition); + destPartition.setDbName(destDbName); + destPartition.setTableName(destinationTable.getTableName()); + Path destPartitionPath = new Path(destinationTable.getSd().getLocation(), + Warehouse.makePartName(destinationTable.getPartitionKeys(), partition.getValues())); + destPartition.getSd().setLocation(destPartitionPath.toString()); + ms.addPartition(destPartition); + destPartitions.add(destPartition); + ms.dropPartition(partition.getDbName(), sourceTable.getTableName(), + partition.getValues()); + } + Path destParentPath = destPath.getParent(); + if (!wh.isDir(destParentPath)) { + if (!wh.mkdirs(destParentPath, true)) { + throw new MetaException("Unable to create path " + destParentPath); + } + } + /** + * TODO: Use the hard link feature of hdfs + * once https://issues.apache.org/jira/browse/HDFS-3370 is done + */ + pathCreated = wh.renameDir(sourcePath, destPath); + success = ms.commitTransaction(); + return destPartitions; + } finally { + if (!success || !pathCreated) { + ms.rollbackTransaction(); + if (pathCreated) { + wh.renameDir(destPath, sourcePath); + } + } + } + } + + private boolean drop_partition_common(RawStore ms, String db_name, String tbl_name, + List part_vals, final boolean deleteData, final EnvironmentContext envContext) + throws MetaException, NoSuchObjectException, IOException, InvalidObjectException, + InvalidInputException { + boolean success = false; + Path partPath = null; + Table tbl = null; + Partition part = null; + boolean isArchived = false; + Path archiveParentDir = null; + boolean mustPurge = false; + boolean isExternalTbl = false; + + try { + ms.openTransaction(); + part = ms.getPartition(db_name, tbl_name, part_vals); + tbl = get_table_core(db_name, tbl_name); + isExternalTbl = isExternal(tbl); + firePreEvent(new PreDropPartitionEvent(tbl, part, deleteData, this)); + mustPurge = isMustPurge(envContext, tbl); + + if (part == null) { + throw new NoSuchObjectException("Partition doesn't exist. " + + part_vals); + } + + isArchived = MetaStoreUtils.isArchived(part); + if (isArchived) { + archiveParentDir = MetaStoreUtils.getOriginalLocation(part); + verifyIsWritablePath(archiveParentDir); + checkTrashPurgeCombination(archiveParentDir, db_name + "." + tbl_name + "." + part_vals, + mustPurge, deleteData && !isExternalTbl); + } + if (!ms.dropPartition(db_name, tbl_name, part_vals)) { + throw new MetaException("Unable to drop partition"); + } + success = ms.commitTransaction(); + if ((part.getSd() != null) && (part.getSd().getLocation() != null)) { + partPath = new Path(part.getSd().getLocation()); + verifyIsWritablePath(partPath); + checkTrashPurgeCombination(partPath, db_name + "." + tbl_name + "." + part_vals, + mustPurge, deleteData && !isExternalTbl); + } + } finally { + if (!success) { + ms.rollbackTransaction(); + } else if (deleteData && ((partPath != null) || (archiveParentDir != null))) { + if (!isExternalTbl) { + if (mustPurge) { + LOG.info("dropPartition() will purge " + partPath + " directly, skipping trash."); + } + else { + LOG.info("dropPartition() will move " + partPath + " to trash-directory."); + } + // Archived partitions have har:/to_har_file as their location. + // The original directory was saved in params + if (isArchived) { + assert (archiveParentDir != null); + wh.deleteDir(archiveParentDir, true, mustPurge); + } else { + assert (partPath != null); + wh.deleteDir(partPath, true, mustPurge); + deleteParentRecursive(partPath.getParent(), part_vals.size() - 1, mustPurge); + } + // ok even if the data is not deleted + } + } + for (MetaStoreEventListener listener : listeners) { + DropPartitionEvent dropPartitionEvent = + new DropPartitionEvent(tbl, part, success, deleteData, this); + dropPartitionEvent.setEnvironmentContext(envContext); + listener.onDropPartition(dropPartitionEvent); + } + } + return true; + } + + private static boolean isMustPurge(EnvironmentContext envContext, Table tbl) { + // Data needs deletion. Check if trash may be skipped. + // Trash may be skipped iff: + // 1. deleteData == true, obviously. + // 2. tbl is external. + // 3. Either + // 3.1. User has specified PURGE from the commandline, and if not, + // 3.2. User has set the table to auto-purge. + return ((envContext != null) && Boolean.parseBoolean(envContext.getProperties().get("ifPurge"))) + || (tbl.isSetParameters() && "true".equalsIgnoreCase(tbl.getParameters().get("auto.purge"))); + + } + private void deleteParentRecursive(Path parent, int depth, boolean mustPurge) throws IOException, MetaException { + if (depth > 0 && parent != null && wh.isWritable(parent) && wh.isEmpty(parent)) { + wh.deleteDir(parent, true, mustPurge); + deleteParentRecursive(parent.getParent(), depth - 1, mustPurge); + } + } + + @Override + public boolean drop_partition(final String db_name, final String tbl_name, + final List part_vals, final boolean deleteData) + throws NoSuchObjectException, MetaException, TException { + return drop_partition_with_environment_context(db_name, tbl_name, part_vals, deleteData, + null); + } + + private static class PathAndPartValSize { + public PathAndPartValSize(Path path, int partValSize) { + this.path = path; + this.partValSize = partValSize; + } + public Path path; + public int partValSize; + } + + @Override + public DropPartitionsResult drop_partitions_req( + DropPartitionsRequest request) throws MetaException, NoSuchObjectException, TException { + RawStore ms = getMS(); + String dbName = request.getDbName(), tblName = request.getTblName(); + boolean ifExists = request.isSetIfExists() && request.isIfExists(); + boolean deleteData = request.isSetDeleteData() && request.isDeleteData(); + boolean ignoreProtection = request.isSetIgnoreProtection() && request.isIgnoreProtection(); + boolean needResult = !request.isSetNeedResult() || request.isNeedResult(); + List dirsToDelete = new ArrayList(); + List archToDelete = new ArrayList(); + EnvironmentContext envContext = request.isSetEnvironmentContext() + ? request.getEnvironmentContext() : null; + + boolean success = false; + ms.openTransaction(); + Table tbl = null; + List parts = null; + boolean mustPurge = false; + boolean isExternalTbl = false; + try { + // We need Partition-s for firing events and for result; DN needs MPartition-s to drop. + // Great... Maybe we could bypass fetching MPartitions by issuing direct SQL deletes. + tbl = get_table_core(dbName, tblName); + isExternalTbl = isExternal(tbl); + mustPurge = isMustPurge(envContext, tbl); + int minCount = 0; + RequestPartsSpec spec = request.getParts(); + List partNames = null; + if (spec.isSetExprs()) { + // Dropping by expressions. + parts = new ArrayList(spec.getExprs().size()); + for (DropPartitionsExpr expr : spec.getExprs()) { + ++minCount; // At least one partition per expression, if not ifExists + List result = new ArrayList(); + boolean hasUnknown = ms.getPartitionsByExpr( + dbName, tblName, expr.getExpr(), null, (short)-1, result); + if (hasUnknown) { + // Expr is built by DDLSA, it should only contain part cols and simple ops + throw new MetaException("Unexpected unknown partitions to drop"); + } + // this is to prevent dropping archived partition which is archived in a + // different level the drop command specified. + if (!ignoreProtection && expr.isSetPartArchiveLevel()) { + for (Partition part : parts) { + if (MetaStoreUtils.isArchived(part) + && MetaStoreUtils.getArchivingLevel(part) < expr.getPartArchiveLevel()) { + throw new MetaException("Cannot drop a subset of partitions " + + " in an archive, partition " + part); + } + } + } + parts.addAll(result); + } + } else if (spec.isSetNames()) { + partNames = spec.getNames(); + minCount = partNames.size(); + parts = ms.getPartitionsByNames(dbName, tblName, partNames); + } else { + throw new MetaException("Partition spec is not set"); + } + + if ((parts.size() < minCount) && !ifExists) { + throw new NoSuchObjectException("Some partitions to drop are missing"); + } + + List colNames = null; + if (partNames == null) { + partNames = new ArrayList(parts.size()); + colNames = new ArrayList(tbl.getPartitionKeys().size()); + for (FieldSchema col : tbl.getPartitionKeys()) { + colNames.add(col.getName()); + } + } + + for (Partition part : parts) { + + // TODO - we need to speed this up for the normal path where all partitions are under + // the table and we don't have to stat every partition + + firePreEvent(new PreDropPartitionEvent(tbl, part, deleteData, this)); + if (colNames != null) { + partNames.add(FileUtils.makePartName(colNames, part.getValues())); + } + // Preserve the old behavior of failing when we cannot write, even w/o deleteData, + // and even if the table is external. That might not make any sense. + if (MetaStoreUtils.isArchived(part)) { + Path archiveParentDir = MetaStoreUtils.getOriginalLocation(part); + verifyIsWritablePath(archiveParentDir); + checkTrashPurgeCombination(archiveParentDir, dbName + "." + tblName + "." + + part.getValues(), mustPurge, deleteData && !isExternalTbl); + archToDelete.add(archiveParentDir); + } + if ((part.getSd() != null) && (part.getSd().getLocation() != null)) { + Path partPath = new Path(part.getSd().getLocation()); + verifyIsWritablePath(partPath); + checkTrashPurgeCombination(partPath, dbName + "." + tblName + "." + part.getValues(), + mustPurge, deleteData && !isExternalTbl); + dirsToDelete.add(new PathAndPartValSize(partPath, part.getValues().size())); + } + } + + ms.dropPartitions(dbName, tblName, partNames); + success = ms.commitTransaction(); + DropPartitionsResult result = new DropPartitionsResult(); + if (needResult) { + result.setPartitions(parts); + } + return result; + } finally { + if (!success) { + ms.rollbackTransaction(); + } else if (deleteData && !isExternal(tbl)) { + LOG.info( mustPurge? + "dropPartition() will purge partition-directories directly, skipping trash." + : "dropPartition() will move partition-directories to trash-directory."); + // Archived partitions have har:/to_har_file as their location. + // The original directory was saved in params + for (Path path : archToDelete) { + wh.deleteDir(path, true, mustPurge); + } + for (PathAndPartValSize p : dirsToDelete) { + wh.deleteDir(p.path, true, mustPurge); + try { + deleteParentRecursive(p.path.getParent(), p.partValSize - 1, mustPurge); + } catch (IOException ex) { + LOG.warn("Error from deleteParentRecursive", ex); + throw new MetaException("Failed to delete parent: " + ex.getMessage()); + } + } + } + if (parts != null) { + for (Partition part : parts) { + for (MetaStoreEventListener listener : listeners) { + DropPartitionEvent dropPartitionEvent = + new DropPartitionEvent(tbl, part, success, deleteData, this); + dropPartitionEvent.setEnvironmentContext(envContext); + listener.onDropPartition(dropPartitionEvent); + } + } + } + } + } + + private void verifyIsWritablePath(Path dir) throws MetaException { + try { + if (!wh.isWritable(dir.getParent())) { + throw new MetaException("Table partition not deleted since " + dir.getParent() + + " is not writable by " + hiveConf.getUser()); + } + } catch (IOException ex) { + LOG.warn("Error from isWritable", ex); + throw new MetaException("Table partition not deleted since " + dir.getParent() + + " access cannot be checked: " + ex.getMessage()); + } + } + + @Override + public boolean drop_partition_with_environment_context(final String db_name, + final String tbl_name, final List part_vals, final boolean deleteData, + final EnvironmentContext envContext) + throws NoSuchObjectException, MetaException, TException { + startPartitionFunction("drop_partition", db_name, tbl_name, part_vals); + LOG.info("Partition values:" + part_vals); + + boolean ret = false; + Exception ex = null; + try { + ret = drop_partition_common(getMS(), db_name, tbl_name, part_vals, deleteData, envContext); + } catch (IOException e) { + ex = e; + throw new MetaException(e.getMessage()); + } catch (Exception e) { + ex = e; + rethrowException(e); + } finally { + endFunction("drop_partition", ret, ex, tbl_name); + } + return ret; + + } + + @Override + public Partition get_partition(final String db_name, final String tbl_name, + final List part_vals) throws MetaException, NoSuchObjectException { + startPartitionFunction("get_partition", db_name, tbl_name, part_vals); + + Partition ret = null; + Exception ex = null; + try { + fireReadTablePreEvent(db_name, tbl_name); + ret = getMS().getPartition(db_name, tbl_name, part_vals); + } catch (Exception e) { + ex = e; + if (e instanceof MetaException) { + throw (MetaException) e; + } else if (e instanceof NoSuchObjectException) { + throw (NoSuchObjectException) e; + } else { + throw newMetaException(e); + } + } finally { + endFunction("get_partition", ret != null, ex, tbl_name); + } + return ret; + } + + /** + * Fire a pre-event for read table operation, if there are any + * pre-event listeners registered + * + * @param dbName + * @param tblName + * @throws MetaException + * @throws NoSuchObjectException + */ + private void fireReadTablePreEvent(String dbName, String tblName) throws MetaException, NoSuchObjectException { + if(preListeners.size() > 0) { + // do this only if there is a pre event listener registered (avoid unnecessary + // metastore api call) + Table t = getMS().getTable(dbName, tblName); + if (t == null) { + throw new NoSuchObjectException(dbName + "." + tblName + + " table not found"); + } + firePreEvent(new PreReadTableEvent(t, this)); + } + } + + @Override + public Partition get_partition_with_auth(final String db_name, + final String tbl_name, final List part_vals, + final String user_name, final List group_names) + throws MetaException, NoSuchObjectException, TException { + startPartitionFunction("get_partition_with_auth", db_name, tbl_name, + part_vals); + fireReadTablePreEvent(db_name, tbl_name); + Partition ret = null; + Exception ex = null; + try { + ret = getMS().getPartitionWithAuth(db_name, tbl_name, part_vals, + user_name, group_names); + } catch (InvalidObjectException e) { + ex = e; + throw new NoSuchObjectException(e.getMessage()); + } catch (Exception e) { + ex = e; + rethrowException(e); + } finally { + endFunction("get_partition_with_auth", ret != null, ex, tbl_name); + } + return ret; + } + + @Override + public List get_partitions(final String db_name, final String tbl_name, + final short max_parts) throws NoSuchObjectException, MetaException { + startTableFunction("get_partitions", db_name, tbl_name); + fireReadTablePreEvent(db_name, tbl_name); + List ret = null; + Exception ex = null; + try { + ret = getMS().getPartitions(db_name, tbl_name, max_parts); + } catch (Exception e) { + ex = e; + if (e instanceof MetaException) { + throw (MetaException) e; + } else if (e instanceof NoSuchObjectException) { + throw (NoSuchObjectException) e; + } else { + throw newMetaException(e); + } + } finally { + endFunction("get_partitions", ret != null, ex, tbl_name); + } + return ret; + + } + + @Override + public List get_partitions_with_auth(final String dbName, + final String tblName, final short maxParts, final String userName, + final List groupNames) throws NoSuchObjectException, + MetaException, TException { + startTableFunction("get_partitions_with_auth", dbName, tblName); + + List ret = null; + Exception ex = null; + try { + ret = getMS().getPartitionsWithAuth(dbName, tblName, maxParts, + userName, groupNames); + } catch (InvalidObjectException e) { + ex = e; + throw new NoSuchObjectException(e.getMessage()); + } catch (Exception e) { + ex = e; + rethrowException(e); + } finally { + endFunction("get_partitions_with_auth", ret != null, ex, tblName); + } + return ret; + + } + + @Override + public List get_partitions_pspec(final String db_name, final String tbl_name, final int max_parts) + throws NoSuchObjectException, MetaException { + + String dbName = db_name.toLowerCase(); + String tableName = tbl_name.toLowerCase(); + + startTableFunction("get_partitions_pspec", dbName, tableName); + + List partitionSpecs = null; + try { + Table table = get_table_core(dbName, tableName); + List partitions = get_partitions(dbName, tableName, (short) max_parts); + + if (is_partition_spec_grouping_enabled(table)) { + partitionSpecs = get_partitionspecs_grouped_by_storage_descriptor(table, partitions); + } + else { + PartitionSpec pSpec = new PartitionSpec(); + pSpec.setPartitionList(new PartitionListComposingSpec(partitions)); + pSpec.setDbName(dbName); + pSpec.setTableName(tableName); + pSpec.setRootPath(table.getSd().getLocation()); + partitionSpecs = Arrays.asList(pSpec); + } + + return partitionSpecs; + } + finally { + endFunction("get_partitions_pspec", partitionSpecs != null && !partitionSpecs.isEmpty(), null, tbl_name); + } + } + + private static class StorageDescriptorKey { + + private final StorageDescriptor sd; + + StorageDescriptorKey(StorageDescriptor sd) { this.sd = sd; } + + StorageDescriptor getSd() { + return sd; + } + + private String hashCodeKey() { + return sd.getInputFormat() + "\t" + + sd.getOutputFormat() + "\t" + + sd.getSerdeInfo().getSerializationLib() + "\t" + + sd.getCols(); + } + + @Override + public int hashCode() { + return hashCodeKey().hashCode(); + } + + @Override + public boolean equals(Object rhs) { + if (rhs == this) + return true; + + if (!(rhs instanceof StorageDescriptorKey)) + return false; + + return (hashCodeKey().equals(((StorageDescriptorKey) rhs).hashCodeKey())); + } + } + + private List get_partitionspecs_grouped_by_storage_descriptor(Table table, List partitions) + throws NoSuchObjectException, MetaException { + + assert is_partition_spec_grouping_enabled(table); + + final String tablePath = table.getSd().getLocation(); + + ImmutableListMultimap partitionsWithinTableDirectory + = Multimaps.index(partitions, new com.google.common.base.Function() { + + @Override + public Boolean apply(Partition input) { + return input.getSd().getLocation().startsWith(tablePath); + } + }); + + List partSpecs = new ArrayList(); + + // Classify partitions within the table directory into groups, + // based on shared SD properties. + + Map> sdToPartList + = new HashMap>(); + + if (partitionsWithinTableDirectory.containsKey(true)) { + + ImmutableList partsWithinTableDir = partitionsWithinTableDirectory.get(true); + for (Partition partition : partsWithinTableDir) { + + PartitionWithoutSD partitionWithoutSD + = new PartitionWithoutSD( partition.getValues(), + partition.getCreateTime(), + partition.getLastAccessTime(), + partition.getSd().getLocation().substring(tablePath.length()), partition.getParameters()); + + StorageDescriptorKey sdKey = new StorageDescriptorKey(partition.getSd()); + if (!sdToPartList.containsKey(sdKey)) { + sdToPartList.put(sdKey, new ArrayList()); + } + + sdToPartList.get(sdKey).add(partitionWithoutSD); + + } // for (partitionsWithinTableDirectory); + + for (Map.Entry> entry : sdToPartList.entrySet()) { + partSpecs.add(getSharedSDPartSpec(table, entry.getKey(), entry.getValue())); + } + + } // Done grouping partitions within table-dir. + + // Lump all partitions outside the tablePath into one PartSpec. + if (partitionsWithinTableDirectory.containsKey(false)) { + List partitionsOutsideTableDir = partitionsWithinTableDirectory.get(false); + if (!partitionsOutsideTableDir.isEmpty()) { + PartitionSpec partListSpec = new PartitionSpec(); + partListSpec.setDbName(table.getDbName()); + partListSpec.setTableName(table.getTableName()); + partListSpec.setPartitionList(new PartitionListComposingSpec(partitionsOutsideTableDir)); + partSpecs.add(partListSpec); + } + + } + return partSpecs; + } + + private PartitionSpec getSharedSDPartSpec(Table table, StorageDescriptorKey sdKey, List partitions) { + + StorageDescriptor sd = new StorageDescriptor(sdKey.getSd()); + sd.setLocation(table.getSd().getLocation()); // Use table-dir as root-dir. + PartitionSpecWithSharedSD sharedSDPartSpec = + new PartitionSpecWithSharedSD(partitions, sd); + + PartitionSpec ret = new PartitionSpec(); + ret.setRootPath(sd.getLocation()); + ret.setSharedSDPartitionSpec(sharedSDPartSpec); + ret.setDbName(table.getDbName()); + ret.setTableName(table.getTableName()); + + return ret; + } + + private static boolean is_partition_spec_grouping_enabled(Table table) { + + Map parameters = table.getParameters(); + return parameters.containsKey("hive.hcatalog.partition.spec.grouping.enabled") + && parameters.get("hive.hcatalog.partition.spec.grouping.enabled").equalsIgnoreCase("true"); + } + + @Override + public List get_partition_names(final String db_name, final String tbl_name, + final short max_parts) throws MetaException, NoSuchObjectException { + startTableFunction("get_partition_names", db_name, tbl_name); + fireReadTablePreEvent(db_name, tbl_name); + List ret = null; + Exception ex = null; + try { + ret = getMS().listPartitionNames(db_name, tbl_name, max_parts); + } catch (Exception e) { + ex = e; + if (e instanceof MetaException) { + throw (MetaException) e; + } else { + throw newMetaException(e); + } + } finally { + endFunction("get_partition_names", ret != null, ex, tbl_name); + } + return ret; + } + + @Override + public void alter_partition(final String db_name, final String tbl_name, + final Partition new_part) + throws InvalidOperationException, MetaException, + TException { + rename_partition(db_name, tbl_name, null, new_part); + } + + @Override + public void alter_partition_with_environment_context(final String dbName, + final String tableName, final Partition newPartition, + final EnvironmentContext envContext) + throws InvalidOperationException, MetaException, TException { + rename_partition(dbName, tableName, null, + newPartition, envContext); + } + + @Override + public void rename_partition(final String db_name, final String tbl_name, + final List part_vals, final Partition new_part) + throws InvalidOperationException, MetaException, TException { + // Call rename_partition without an environment context. + rename_partition(db_name, tbl_name, part_vals, new_part, null); + } + + private void rename_partition(final String db_name, final String tbl_name, + final List part_vals, final Partition new_part, + final EnvironmentContext envContext) + throws InvalidOperationException, MetaException, + TException { + startTableFunction("alter_partition", db_name, tbl_name); + + if (LOG.isInfoEnabled()) { + LOG.info("New partition values:" + new_part.getValues()); + if (part_vals != null && part_vals.size() > 0) { + LOG.info("Old Partition values:" + part_vals); + } + } + + // Adds the missing scheme/authority for the new partition location + if (new_part.getSd() != null) { + String newLocation = new_part.getSd().getLocation(); + if (org.apache.commons.lang.StringUtils.isNotEmpty(newLocation)) { + Path tblPath = wh.getDnsPath(new Path(newLocation)); + new_part.getSd().setLocation(tblPath.toString()); + } + } + + Partition oldPart = null; + Exception ex = null; + try { + firePreEvent(new PreAlterPartitionEvent(db_name, tbl_name, part_vals, new_part, this)); + + if (part_vals != null && !part_vals.isEmpty()) { + MetaStoreUtils.validatePartitionNameCharacters(new_part.getValues(), + partitionValidationPattern); + } + + oldPart = alterHandler.alterPartition(getMS(), wh, db_name, tbl_name, part_vals, new_part, envContext); + + // Only fetch the table if we actually have a listener + Table table = null; + for (MetaStoreEventListener listener : listeners) { + if (table == null) { + table = getMS().getTable(db_name, tbl_name); + } + AlterPartitionEvent alterPartitionEvent = + new AlterPartitionEvent(oldPart, new_part, table, true, this); + alterPartitionEvent.setEnvironmentContext(envContext); + listener.onAlterPartition(alterPartitionEvent); + } + } catch (InvalidObjectException e) { + ex = e; + throw new InvalidOperationException(e.getMessage()); + } catch (AlreadyExistsException e) { + ex = e; + throw new InvalidOperationException(e.getMessage()); + } catch (Exception e) { + ex = e; + if (e instanceof MetaException) { + throw (MetaException) e; + } else if (e instanceof InvalidOperationException) { + throw (InvalidOperationException) e; + } else if (e instanceof TException) { + throw (TException) e; + } else { + throw newMetaException(e); + } + } finally { + endFunction("alter_partition", oldPart != null, ex, tbl_name); + } + return; + } + + @Override + public void alter_partitions(final String db_name, final String tbl_name, + final List new_parts) throws InvalidOperationException, MetaException, + TException { + alter_partitions_with_environment_context(db_name, tbl_name, new_parts, null); + } + + @Override + public void alter_partitions_with_environment_context(final String db_name, final String tbl_name, + final List new_parts, EnvironmentContext environmentContext) + throws InvalidOperationException, MetaException, + TException { + + startTableFunction("alter_partitions", db_name, tbl_name); + + if (LOG.isInfoEnabled()) { + for (Partition tmpPart : new_parts) { + LOG.info("New partition values:" + tmpPart.getValues()); + } + } + // all partitions are altered atomically + // all prehooks are fired together followed by all post hooks + List oldParts = null; + Exception ex = null; + try { + for (Partition tmpPart : new_parts) { + firePreEvent(new PreAlterPartitionEvent(db_name, tbl_name, null, tmpPart, this)); + } + oldParts = alterHandler.alterPartitions(getMS(), wh, db_name, tbl_name, new_parts, environmentContext); + + Iterator olditr = oldParts.iterator(); + // Only fetch the table if we have a listener that needs it. + Table table = null; + for (Partition tmpPart : new_parts) { + Partition oldTmpPart = null; + if (olditr.hasNext()) { + oldTmpPart = olditr.next(); + } + else { + throw new InvalidOperationException("failed to alterpartitions"); + } + for (MetaStoreEventListener listener : listeners) { + if (table == null) { + table = getMS().getTable(db_name, tbl_name); + } + AlterPartitionEvent alterPartitionEvent = + new AlterPartitionEvent(oldTmpPart, tmpPart, table, true, this); + listener.onAlterPartition(alterPartitionEvent); + } + } + } catch (InvalidObjectException e) { + ex = e; + throw new InvalidOperationException(e.getMessage()); + } catch (AlreadyExistsException e) { + ex = e; + throw new InvalidOperationException(e.getMessage()); + } catch (Exception e) { + ex = e; + if (e instanceof MetaException) { + throw (MetaException) e; + } else if (e instanceof InvalidOperationException) { + throw (InvalidOperationException) e; + } else if (e instanceof TException) { + throw (TException) e; + } else { + throw newMetaException(e); + } + } finally { + endFunction("alter_partition", oldParts != null, ex, tbl_name); + } + return; + } + + @Override + public void alter_index(final String dbname, final String base_table_name, + final String index_name, final Index newIndex) + throws InvalidOperationException, MetaException { + startFunction("alter_index", ": db=" + dbname + " base_tbl=" + base_table_name + + " idx=" + index_name + " newidx=" + newIndex.getIndexName()); + newIndex.putToParameters(hive_metastoreConstants.DDL_TIME, Long.toString(System + .currentTimeMillis() / 1000)); + + boolean success = false; + Exception ex = null; + Index oldIndex = null; + try { + oldIndex = get_index_by_name(dbname, base_table_name, index_name); + + firePreEvent(new PreAlterIndexEvent(oldIndex, newIndex, this)); + + getMS().alterIndex(dbname, base_table_name, index_name, newIndex); + success = true; + } catch (InvalidObjectException e) { + ex = e; + throw new InvalidOperationException(e.getMessage()); + } catch (Exception e) { + ex = e; + if (e instanceof MetaException) { + throw (MetaException) e; + } else if (e instanceof InvalidOperationException) { + throw (InvalidOperationException) e; + } else { + throw newMetaException(e); + } + } finally { + endFunction("alter_index", success, ex, base_table_name); + for (MetaStoreEventListener listener : listeners) { + AlterIndexEvent alterIndexEvent = new AlterIndexEvent(oldIndex, newIndex, success, this); + listener.onAlterIndex(alterIndexEvent); + } + } + return; + } + + @Override + public String getVersion() throws TException { + endFunction(startFunction("getVersion"), true, null); + return "3.0"; + } + + @Override + public void alter_table(final String dbname, final String name, + final Table newTable) + throws InvalidOperationException, MetaException { + // Do not set an environment context. + alter_table_core(dbname,name, newTable, null); + } + + @Override + public void alter_table_with_cascade(final String dbname, final String name, + final Table newTable, final boolean cascade) + throws InvalidOperationException, MetaException { + EnvironmentContext envContext = null; + if (cascade) { + envContext = new EnvironmentContext(); + envContext.putToProperties(StatsSetupConst.CASCADE, StatsSetupConst.TRUE); + } + alter_table_core(dbname, name, newTable, envContext); + } + + @Override + public void alter_table_with_environment_context(final String dbname, + final String name, final Table newTable, + final EnvironmentContext envContext) + throws InvalidOperationException, MetaException { + alter_table_core(dbname, name, newTable, envContext); + } + + private void alter_table_core(final String dbname, final String name, final Table newTable, + final EnvironmentContext envContext) + throws InvalidOperationException, MetaException { + startFunction("alter_table", ": db=" + dbname + " tbl=" + name + + " newtbl=" + newTable.getTableName()); + // Update the time if it hasn't been specified. + if (newTable.getParameters() == null || + newTable.getParameters().get(hive_metastoreConstants.DDL_TIME) == null) { + newTable.putToParameters(hive_metastoreConstants.DDL_TIME, Long.toString(System + .currentTimeMillis() / 1000)); + } + + // Adds the missing scheme/authority for the new table location + if (newTable.getSd() != null) { + String newLocation = newTable.getSd().getLocation(); + if (org.apache.commons.lang.StringUtils.isNotEmpty(newLocation)) { + Path tblPath = wh.getDnsPath(new Path(newLocation)); + newTable.getSd().setLocation(tblPath.toString()); + } + } + + boolean success = false; + Exception ex = null; + try { + Table oldt = get_table_core(dbname, name); + firePreEvent(new PreAlterTableEvent(oldt, newTable, this)); + alterHandler.alterTable(getMS(), wh, dbname, name, newTable, envContext); + success = true; + + for (MetaStoreEventListener listener : listeners) { + AlterTableEvent alterTableEvent = + new AlterTableEvent(oldt, newTable, success, this); + alterTableEvent.setEnvironmentContext(envContext); + listener.onAlterTable(alterTableEvent); + } + } catch (NoSuchObjectException e) { + // thrown when the table to be altered does not exist + ex = e; + throw new InvalidOperationException(e.getMessage()); + } catch (Exception e) { + ex = e; + if (e instanceof MetaException) { + throw (MetaException) e; + } else if (e instanceof InvalidOperationException) { + throw (InvalidOperationException) e; + } else { + throw newMetaException(e); + } + } finally { + endFunction("alter_table", success, ex, name); + } + } + + @Override + public List get_tables(final String dbname, final String pattern) + throws MetaException { + startFunction("get_tables", ": db=" + dbname + " pat=" + pattern); + + List ret = null; + Exception ex = null; + try { + ret = getMS().getTables(dbname, pattern); + } catch (Exception e) { + ex = e; + if (e instanceof MetaException) { + throw (MetaException) e; + } else { + throw newMetaException(e); + } + } finally { + endFunction("get_tables", ret != null, ex); + } + return ret; + } + + @Override + public List get_all_tables(final String dbname) throws MetaException { + startFunction("get_all_tables", ": db=" + dbname); + + List ret = null; + Exception ex = null; + try { + ret = getMS().getAllTables(dbname); + } catch (Exception e) { + ex = e; + if (e instanceof MetaException) { + throw (MetaException) e; + } else { + throw newMetaException(e); + } + } finally { + endFunction("get_all_tables", ret != null, ex); + } + return ret; + } + + @Override + public List get_fields(String db, String tableName) + throws MetaException, UnknownTableException, UnknownDBException { + return get_fields_with_environment_context(db, tableName, null); + } + + @Override + public List get_fields_with_environment_context(String db, String tableName, + final EnvironmentContext envContext) + throws MetaException, UnknownTableException, UnknownDBException { + startFunction("get_fields_with_environment_context", ": db=" + db + "tbl=" + tableName); + String[] names = tableName.split("\\."); + String base_table_name = names[0]; + + Table tbl; + List ret = null; + Exception ex = null; + ClassLoader orgHiveLoader = null; + Configuration curConf = hiveConf; + try { + try { + tbl = get_table_core(db, base_table_name); + } catch (NoSuchObjectException e) { + throw new UnknownTableException(e.getMessage()); + } + if (null == tbl.getSd().getSerdeInfo().getSerializationLib() || + hiveConf.getStringCollection(ConfVars.SERDESUSINGMETASTOREFORSCHEMA.varname).contains + (tbl.getSd().getSerdeInfo().getSerializationLib())) { + ret = tbl.getSd().getCols(); + } else { + try { + if (envContext != null) { + String addedJars = envContext.getProperties().get("hive.added.jars.path"); + if (org.apache.commons.lang.StringUtils.isNotBlank(addedJars)) { + //for thread safe + curConf = getConf(); + orgHiveLoader = curConf.getClassLoader(); + ClassLoader loader = MetaStoreUtils.addToClassPath(orgHiveLoader, org.apache.commons.lang.StringUtils.split(addedJars, ",")); + curConf.setClassLoader(loader); + } + } + + Deserializer s = MetaStoreUtils.getDeserializer(curConf, tbl, false); + ret = MetaStoreUtils.getFieldsFromDeserializer(tableName, s); + } catch (SerDeException e) { + StringUtils.stringifyException(e); + throw new MetaException(e.getMessage()); + } + } + } catch (Exception e) { + ex = e; + if (e instanceof UnknownDBException) { + throw (UnknownDBException) e; + } else if (e instanceof UnknownTableException) { + throw (UnknownTableException) e; + } else if (e instanceof MetaException) { + throw (MetaException) e; + } else { + throw newMetaException(e); + } + } finally { + if (orgHiveLoader != null) { + curConf.setClassLoader(orgHiveLoader); + } + endFunction("get_fields_with_environment_context", ret != null, ex, tableName); + } + + return ret; + } + + /** + * Return the schema of the table. This function includes partition columns + * in addition to the regular columns. + * + * @param db + * Name of the database + * @param tableName + * Name of the table + * @return List of columns, each column is a FieldSchema structure + * @throws MetaException + * @throws UnknownTableException + * @throws UnknownDBException + */ + @Override + public List get_schema(String db, String tableName) + throws MetaException, UnknownTableException, UnknownDBException { + return get_schema_with_environment_context(db,tableName, null); + } + + + /** + * Return the schema of the table. This function includes partition columns + * in addition to the regular columns. + * + * @param db + * Name of the database + * @param tableName + * Name of the table + * @param envContext + * Store session based properties + * @return List of columns, each column is a FieldSchema structure + * @throws MetaException + * @throws UnknownTableException + * @throws UnknownDBException + */ + @Override + public List get_schema_with_environment_context(String db, String tableName, + final EnvironmentContext envContext) + throws MetaException, UnknownTableException, UnknownDBException { + startFunction("get_schema_with_environment_context", ": db=" + db + "tbl=" + tableName); + boolean success = false; + Exception ex = null; + try { + String[] names = tableName.split("\\."); + String base_table_name = names[0]; + + Table tbl; + try { + tbl = get_table_core(db, base_table_name); + } catch (NoSuchObjectException e) { + throw new UnknownTableException(e.getMessage()); + } + List fieldSchemas = get_fields_with_environment_context(db, base_table_name,envContext); + + if (tbl == null || fieldSchemas == null) { + throw new UnknownTableException(tableName + " doesn't exist"); + } + + if (tbl.getPartitionKeys() != null) { + // Combine the column field schemas and the partition keys to create the + // whole schema + fieldSchemas.addAll(tbl.getPartitionKeys()); + } + success = true; + return fieldSchemas; + } catch (Exception e) { + ex = e; + if (e instanceof UnknownDBException) { + throw (UnknownDBException) e; + } else if (e instanceof UnknownTableException) { + throw (UnknownTableException) e; + } else if (e instanceof MetaException) { + throw (MetaException) e; + } else { + MetaException me = new MetaException(e.toString()); + me.initCause(e); + throw me; + } + } finally { + endFunction("get_schema_with_environment_context", success, ex, tableName); + } + } + + @Override + public String getCpuProfile(int profileDurationInSec) throws TException { + return ""; + } + + /** + * Returns the value of the given configuration variable name. If the + * configuration variable with the given name doesn't exist, or if there + * were an exception thrown while retrieving the variable, or if name is + * null, defaultValue is returned. + */ + @Override + public String get_config_value(String name, String defaultValue) + throws TException, ConfigValSecurityException { + startFunction("get_config_value", ": name=" + name + " defaultValue=" + + defaultValue); + boolean success = false; + Exception ex = null; + try { + if (name == null) { + success = true; + return defaultValue; + } + // Allow only keys that start with hive.*, hdfs.*, mapred.* for security + // i.e. don't allow access to db password + if (!Pattern.matches("(hive|hdfs|mapred).*", name)) { + throw new ConfigValSecurityException("For security reasons, the " + + "config key " + name + " cannot be accessed"); + } + + String toReturn = defaultValue; + try { + toReturn = hiveConf.get(name, defaultValue); + } catch (RuntimeException e) { + LOG.error(threadLocalId.get().toString() + ": " + + "RuntimeException thrown in get_config_value - msg: " + + e.getMessage() + " cause: " + e.getCause()); + } + success = true; + return toReturn; + } catch (Exception e) { + ex = e; + if (e instanceof ConfigValSecurityException) { + throw (ConfigValSecurityException) e; + } else if (e instanceof TException) { + throw (TException) e; + } else { + TException te = new TException(e.toString()); + te.initCause(e); + throw te; + } + } finally { + endFunction("get_config_value", success, ex); + } + } + + private List getPartValsFromName(RawStore ms, String dbName, String tblName, + String partName) throws MetaException, InvalidObjectException { + // Unescape the partition name + LinkedHashMap hm = Warehouse.makeSpecFromName(partName); + + // getPartition expects partition values in a list. use info from the + // table to put the partition column values in order + Table t = ms.getTable(dbName, tblName); + if (t == null) { + throw new InvalidObjectException(dbName + "." + tblName + + " table not found"); + } + + List partVals = new ArrayList(); + for (FieldSchema field : t.getPartitionKeys()) { + String key = field.getName(); + String val = hm.get(key); + if (val == null) { + throw new InvalidObjectException("incomplete partition name - missing " + key); + } + partVals.add(val); + } + return partVals; + } + + private Partition get_partition_by_name_core(final RawStore ms, final String db_name, + final String tbl_name, final String part_name) + throws MetaException, NoSuchObjectException, TException { + fireReadTablePreEvent(db_name, tbl_name); + List partVals = null; + try { + partVals = getPartValsFromName(ms, db_name, tbl_name, part_name); + } catch (InvalidObjectException e) { + throw new NoSuchObjectException(e.getMessage()); + } + Partition p = ms.getPartition(db_name, tbl_name, partVals); + + if (p == null) { + throw new NoSuchObjectException(db_name + "." + tbl_name + + " partition (" + part_name + ") not found"); + } + return p; + } + + @Override + public Partition get_partition_by_name(final String db_name, final String tbl_name, + final String part_name) throws MetaException, NoSuchObjectException, TException { + + startFunction("get_partition_by_name", ": db=" + db_name + " tbl=" + + tbl_name + " part=" + part_name); + Partition ret = null; + Exception ex = null; + try { + ret = get_partition_by_name_core(getMS(), db_name, tbl_name, part_name); + } catch (Exception e) { + ex = e; + rethrowException(e); + } finally { + endFunction("get_partition_by_name", ret != null, ex, tbl_name); + } + return ret; + } + + @Override + public Partition append_partition_by_name(final String db_name, final String tbl_name, + final String part_name) throws InvalidObjectException, + AlreadyExistsException, MetaException, TException { + return append_partition_by_name_with_environment_context(db_name, tbl_name, part_name, null); + } + + @Override + public Partition append_partition_by_name_with_environment_context(final String db_name, + final String tbl_name, final String part_name, final EnvironmentContext env_context) + throws InvalidObjectException, AlreadyExistsException, MetaException, TException { + startFunction("append_partition_by_name", ": db=" + db_name + " tbl=" + + tbl_name + " part=" + part_name); + + Partition ret = null; + Exception ex = null; + try { + RawStore ms = getMS(); + List partVals = getPartValsFromName(ms, db_name, tbl_name, part_name); + ret = append_partition_common(ms, db_name, tbl_name, partVals, env_context); + } catch (Exception e) { + ex = e; + if (e instanceof InvalidObjectException) { + throw (InvalidObjectException) e; + } else if (e instanceof AlreadyExistsException) { + throw (AlreadyExistsException) e; + } else if (e instanceof MetaException) { + throw (MetaException) e; + } else if (e instanceof TException) { + throw (TException) e; + } else { + throw newMetaException(e); + } + } finally { + endFunction("append_partition_by_name", ret != null, ex, tbl_name); + } + return ret; + } + + private boolean drop_partition_by_name_core(final RawStore ms, final String db_name, + final String tbl_name, final String part_name, final boolean deleteData, + final EnvironmentContext envContext) throws NoSuchObjectException, MetaException, + TException, IOException, InvalidObjectException, InvalidInputException { + + List partVals = null; + try { + partVals = getPartValsFromName(ms, db_name, tbl_name, part_name); + } catch (InvalidObjectException e) { + throw new NoSuchObjectException(e.getMessage()); + } + + return drop_partition_common(ms, db_name, tbl_name, partVals, deleteData, envContext); + } + + @Override + public boolean drop_partition_by_name(final String db_name, final String tbl_name, + final String part_name, final boolean deleteData) throws NoSuchObjectException, + MetaException, TException { + return drop_partition_by_name_with_environment_context(db_name, tbl_name, part_name, + deleteData, null); + } + + @Override + public boolean drop_partition_by_name_with_environment_context(final String db_name, + final String tbl_name, final String part_name, final boolean deleteData, + final EnvironmentContext envContext) throws NoSuchObjectException, + MetaException, TException { + startFunction("drop_partition_by_name", ": db=" + db_name + " tbl=" + + tbl_name + " part=" + part_name); + + boolean ret = false; + Exception ex = null; + try { + ret = drop_partition_by_name_core(getMS(), db_name, tbl_name, + part_name, deleteData, envContext); + } catch (IOException e) { + ex = e; + throw new MetaException(e.getMessage()); + } catch (Exception e) { + ex = e; + rethrowException(e); + } finally { + endFunction("drop_partition_by_name", ret, ex, tbl_name); + } + + return ret; + } + + @Override + public List get_partitions_ps(final String db_name, + final String tbl_name, final List part_vals, + final short max_parts) throws MetaException, TException, NoSuchObjectException { + startPartitionFunction("get_partitions_ps", db_name, tbl_name, part_vals); + + List ret = null; + Exception ex = null; + try { + ret = get_partitions_ps_with_auth(db_name, tbl_name, part_vals, + max_parts, null, null); + } catch (Exception e) { + ex = e; + rethrowException(e); + } finally { + endFunction("get_partitions_ps", ret != null, ex, tbl_name); + } + + return ret; + } + + @Override + public List get_partitions_ps_with_auth(final String db_name, + final String tbl_name, final List part_vals, + final short max_parts, final String userName, + final List groupNames) throws MetaException, TException, NoSuchObjectException { + startPartitionFunction("get_partitions_ps_with_auth", db_name, tbl_name, + part_vals); + fireReadTablePreEvent(db_name, tbl_name); + List ret = null; + Exception ex = null; + try { + ret = getMS().listPartitionsPsWithAuth(db_name, tbl_name, part_vals, max_parts, + userName, groupNames); + } catch (InvalidObjectException e) { + ex = e; + throw new MetaException(e.getMessage()); + } catch (Exception e) { + ex = e; + rethrowException(e); + } finally { + endFunction("get_partitions_ps_with_auth", ret != null, ex, tbl_name); + } + return ret; + } + + @Override + public List get_partition_names_ps(final String db_name, + final String tbl_name, final List part_vals, final short max_parts) + throws MetaException, TException, NoSuchObjectException { + startPartitionFunction("get_partitions_names_ps", db_name, tbl_name, part_vals); + fireReadTablePreEvent(db_name, tbl_name); + List ret = null; + Exception ex = null; + try { + ret = getMS().listPartitionNamesPs(db_name, tbl_name, part_vals, max_parts); + } catch (Exception e) { + ex = e; + rethrowException(e); + } finally { + endFunction("get_partitions_names_ps", ret != null, ex, tbl_name); + } + return ret; + } + + @Override + public List partition_name_to_vals(String part_name) + throws MetaException, TException { + if (part_name.length() == 0) { + return new ArrayList(); + } + LinkedHashMap map = Warehouse.makeSpecFromName(part_name); + List part_vals = new ArrayList(); + part_vals.addAll(map.values()); + return part_vals; + } + + @Override + public Map partition_name_to_spec(String part_name) throws MetaException, + TException { + if (part_name.length() == 0) { + return new HashMap(); + } + return Warehouse.makeSpecFromName(part_name); + } + + @Override + public Index add_index(final Index newIndex, final Table indexTable) + throws InvalidObjectException, AlreadyExistsException, MetaException, TException { + startFunction("add_index", ": " + newIndex.toString() + " " + indexTable.toString()); + Index ret = null; + Exception ex = null; + try { + ret = add_index_core(getMS(), newIndex, indexTable); + } catch (Exception e) { + ex = e; + if (e instanceof InvalidObjectException) { + throw (InvalidObjectException) e; + } else if (e instanceof AlreadyExistsException) { + throw (AlreadyExistsException) e; + } else if (e instanceof MetaException) { + throw (MetaException) e; + } else if (e instanceof TException) { + throw (TException) e; + } else { + throw newMetaException(e); + } + } finally { + String tableName = indexTable != null ? indexTable.getTableName() : null; + endFunction("add_index", ret != null, ex, tableName); + } + return ret; + } + + private Index add_index_core(final RawStore ms, final Index index, final Table indexTable) + throws InvalidObjectException, AlreadyExistsException, MetaException { + + boolean success = false, indexTableCreated = false; + + String[] qualified = + MetaStoreUtils.getQualifiedName(index.getDbName(), index.getIndexTableName()); + + try { + ms.openTransaction(); + firePreEvent(new PreAddIndexEvent(index, this)); + + Index old_index = null; + try { + old_index = get_index_by_name(index.getDbName(), index + .getOrigTableName(), index.getIndexName()); + } catch (Exception e) { + } + if (old_index != null) { + throw new AlreadyExistsException("Index already exists:" + index); + } + Table origTbl = ms.getTable(index.getDbName(), index.getOrigTableName()); + if (origTbl == null) { + throw new InvalidObjectException( + "Unable to add index because database or the orginal table do not exist"); + } + + // set create time + long time = System.currentTimeMillis() / 1000; + Table indexTbl = indexTable; + if (indexTbl != null) { + try { + indexTbl = ms.getTable(qualified[0], qualified[1]); + } catch (Exception e) { + } + if (indexTbl != null) { + throw new InvalidObjectException( + "Unable to add index because index table already exists"); + } + this.create_table(indexTable); + indexTableCreated = true; + } + + index.setCreateTime((int) time); + index.putToParameters(hive_metastoreConstants.DDL_TIME, Long.toString(time)); + + ms.addIndex(index); + success = ms.commitTransaction(); + return index; + } finally { + if (!success) { + if (indexTableCreated) { + try { + drop_table(qualified[0], qualified[1], false); + } catch (Exception e) { + } + } + ms.rollbackTransaction(); + } + for (MetaStoreEventListener listener : listeners) { + AddIndexEvent addIndexEvent = new AddIndexEvent(index, success, this); + listener.onAddIndex(addIndexEvent); + } + } + } + + @Override + public boolean drop_index_by_name(final String dbName, final String tblName, + final String indexName, final boolean deleteData) throws NoSuchObjectException, + MetaException, TException { + startFunction("drop_index_by_name", ": db=" + dbName + " tbl=" + + tblName + " index=" + indexName); + + boolean ret = false; + Exception ex = null; + try { + ret = drop_index_by_name_core(getMS(), dbName, tblName, + indexName, deleteData); + } catch (IOException e) { + ex = e; + throw new MetaException(e.getMessage()); + } catch (Exception e) { + ex = e; + rethrowException(e); + } finally { + endFunction("drop_index_by_name", ret, ex, tblName); + } + + return ret; + } + + private boolean drop_index_by_name_core(final RawStore ms, + final String dbName, final String tblName, + final String indexName, final boolean deleteData) throws NoSuchObjectException, + MetaException, TException, IOException, InvalidObjectException, InvalidInputException { + + boolean success = false; + Index index = null; + Path tblPath = null; + List partPaths = null; + try { + ms.openTransaction(); + + // drop the underlying index table + index = get_index_by_name(dbName, tblName, indexName); // throws exception if not exists + + firePreEvent(new PreDropIndexEvent(index, this)); + + ms.dropIndex(dbName, tblName, indexName); + + String idxTblName = index.getIndexTableName(); + if (idxTblName != null) { + String[] qualified = MetaStoreUtils.getQualifiedName(index.getDbName(), idxTblName); + Table tbl = get_table_core(qualified[0], qualified[1]); + if (tbl.getSd() == null) { + throw new MetaException("Table metadata is corrupted"); + } + + if (tbl.getSd().getLocation() != null) { + tblPath = new Path(tbl.getSd().getLocation()); + if (!wh.isWritable(tblPath.getParent())) { + throw new MetaException("Index table metadata not deleted since " + + tblPath.getParent() + " is not writable by " + + hiveConf.getUser()); + } + } + + // Drop the partitions and get a list of partition locations which need to be deleted + partPaths = dropPartitionsAndGetLocations(ms, qualified[0], qualified[1], tblPath, + tbl.getPartitionKeys(), deleteData); + + if (!ms.dropTable(qualified[0], qualified[1])) { + throw new MetaException("Unable to drop underlying data table " + + qualified[0] + "." + qualified[1] + " for index " + indexName); + } + } + success = ms.commitTransaction(); + } finally { + if (!success) { + ms.rollbackTransaction(); + } else if (deleteData && tblPath != null) { + deletePartitionData(partPaths); + deleteTableData(tblPath); + // ok even if the data is not deleted + } + for (MetaStoreEventListener listener : listeners) { + DropIndexEvent dropIndexEvent = new DropIndexEvent(index, success, this); + listener.onDropIndex(dropIndexEvent); + } + } + return success; + } + + @Override + public Index get_index_by_name(final String dbName, final String tblName, + final String indexName) throws MetaException, NoSuchObjectException, + TException { + + startFunction("get_index_by_name", ": db=" + dbName + " tbl=" + + tblName + " index=" + indexName); + + Index ret = null; + Exception ex = null; + try { + ret = get_index_by_name_core(getMS(), dbName, tblName, indexName); + } catch (Exception e) { + ex = e; + rethrowException(e); + } finally { + endFunction("get_index_by_name", ret != null, ex, tblName); + } + return ret; + } + + private Index get_index_by_name_core(final RawStore ms, final String db_name, + final String tbl_name, final String index_name) + throws MetaException, NoSuchObjectException, TException { + Index index = ms.getIndex(db_name, tbl_name, index_name); + + if (index == null) { + throw new NoSuchObjectException(db_name + "." + tbl_name + + " index=" + index_name + " not found"); + } + return index; + } + + @Override + public List get_index_names(final String dbName, final String tblName, + final short maxIndexes) throws MetaException, TException { + startTableFunction("get_index_names", dbName, tblName); + + List ret = null; + Exception ex = null; + try { + ret = getMS().listIndexNames(dbName, tblName, maxIndexes); + } catch (Exception e) { + ex = e; + if (e instanceof MetaException) { + throw (MetaException) e; + } else if (e instanceof TException) { + throw (TException) e; + } else { + throw newMetaException(e); + } + } finally { + endFunction("get_index_names", ret != null, ex, tblName); + } + return ret; + } + + @Override + public List get_indexes(final String dbName, final String tblName, + final short maxIndexes) throws NoSuchObjectException, MetaException, + TException { + startTableFunction("get_indexes", dbName, tblName); + + List ret = null; + Exception ex = null; + try { + ret = getMS().getIndexes(dbName, tblName, maxIndexes); + } catch (Exception e) { + ex = e; + rethrowException(e); + } finally { + endFunction("get_indexes", ret != null, ex, tblName); + } + return ret; + } + + private String lowerCaseConvertPartName(String partName) throws MetaException { + boolean isFirst = true; + Map partSpec = Warehouse.makeEscSpecFromName(partName); + String convertedPartName = new String(); + + for (Map.Entry entry : partSpec.entrySet()) { + String partColName = entry.getKey(); + String partColVal = entry.getValue(); + + if (!isFirst) { + convertedPartName += "/"; + } else { + isFirst = false; + } + convertedPartName += partColName.toLowerCase() + "=" + partColVal; + } + return convertedPartName; + } + + @Override + public ColumnStatistics get_table_column_statistics(String dbName, String tableName, + String colName) throws NoSuchObjectException, MetaException, TException, + InvalidInputException, InvalidObjectException + { + dbName = dbName.toLowerCase(); + tableName = tableName.toLowerCase(); + colName = colName.toLowerCase(); + startFunction("get_column_statistics_by_table: db=" + dbName + " table=" + tableName + + " column=" + colName); + ColumnStatistics statsObj = null; + try { + statsObj = getMS().getTableColumnStatistics( + dbName, tableName, Lists.newArrayList(colName)); + assert statsObj.getStatsObjSize() <= 1; + return statsObj; + } finally { + endFunction("get_column_statistics_by_table: ", statsObj != null, null, tableName); + } + } + + @Override + public TableStatsResult get_table_statistics_req(TableStatsRequest request) + throws MetaException, NoSuchObjectException, TException { + String dbName = request.getDbName().toLowerCase(); + String tblName = request.getTblName().toLowerCase(); + startFunction("get_table_statistics_req: db=" + dbName + " table=" + tblName); + TableStatsResult result = null; + List lowerCaseColNames = new ArrayList(request.getColNames().size()); + for (String colName : request.getColNames()) { + lowerCaseColNames.add(colName.toLowerCase()); + } + try { + ColumnStatistics cs = getMS().getTableColumnStatistics(dbName, tblName, lowerCaseColNames); + result = new TableStatsResult((cs == null || cs.getStatsObj() == null) + ? Lists.newArrayList() : cs.getStatsObj()); + } finally { + endFunction("get_table_statistics_req: ", result == null, null, tblName); + } + return result; + } + + @Override + public ColumnStatistics get_partition_column_statistics(String dbName, String tableName, + String partName, String colName) throws NoSuchObjectException, MetaException, + InvalidInputException, TException, InvalidObjectException { + dbName = dbName.toLowerCase(); + tableName = tableName.toLowerCase(); + colName = colName.toLowerCase(); + String convertedPartName = lowerCaseConvertPartName(partName); + startFunction("get_column_statistics_by_partition: db=" + dbName + " table=" + tableName + + " partition=" + convertedPartName + " column=" + colName); + ColumnStatistics statsObj = null; + + try { + List list = getMS().getPartitionColumnStatistics(dbName, tableName, + Lists.newArrayList(convertedPartName), Lists.newArrayList(colName)); + if (list.isEmpty()) return null; + if (list.size() != 1) { + throw new MetaException(list.size() + " statistics for single column and partition"); + } + statsObj = list.get(0); + } finally { + endFunction("get_column_statistics_by_partition: ", statsObj != null, null, tableName); + } + return statsObj; + } + + @Override + public PartitionsStatsResult get_partitions_statistics_req(PartitionsStatsRequest request) + throws MetaException, NoSuchObjectException, TException { + String dbName = request.getDbName().toLowerCase(); + String tblName = request.getTblName().toLowerCase(); + startFunction("get_partitions_statistics_req: db=" + dbName + " table=" + tblName); + + PartitionsStatsResult result = null; + List lowerCaseColNames = new ArrayList(request.getColNames().size()); + for (String colName : request.getColNames()) { + lowerCaseColNames.add(colName.toLowerCase()); + } + List lowerCasePartNames = new ArrayList(request.getPartNames().size()); + for (String partName : request.getPartNames()) { + lowerCasePartNames.add(lowerCaseConvertPartName(partName)); + } + try { + List stats = getMS().getPartitionColumnStatistics( + dbName, tblName, lowerCasePartNames, lowerCaseColNames); + Map> map = + new HashMap>(); + for (ColumnStatistics stat : stats) { + map.put(stat.getStatsDesc().getPartName(), stat.getStatsObj()); + } + result = new PartitionsStatsResult(map); + } finally { + endFunction("get_partitions_statistics_req: ", result == null, null, tblName); + } + return result; + } + + @Override + public boolean update_table_column_statistics(ColumnStatistics colStats) + throws NoSuchObjectException,InvalidObjectException,MetaException,TException, + InvalidInputException + { + String dbName = null; + String tableName = null; + String colName = null; + ColumnStatisticsDesc statsDesc = colStats.getStatsDesc(); + dbName = statsDesc.getDbName().toLowerCase(); + tableName = statsDesc.getTableName().toLowerCase(); + + statsDesc.setDbName(dbName); + statsDesc.setTableName(tableName); + long time = System.currentTimeMillis() / 1000; + statsDesc.setLastAnalyzed(time); + + List statsObjs = colStats.getStatsObj(); + + for (ColumnStatisticsObj statsObj:statsObjs) { + colName = statsObj.getColName().toLowerCase(); + statsObj.setColName(colName); + startFunction("write_column_statistics: db=" + dbName + " table=" + tableName + + " column=" + colName); + } + + colStats.setStatsDesc(statsDesc); + colStats.setStatsObj(statsObjs); + + boolean ret = false; + + try { + ret = getMS().updateTableColumnStatistics(colStats); + return ret; + } finally { + endFunction("write_column_statistics: ", ret != false, null, tableName); + } + } + + @Override + public boolean update_partition_column_statistics(ColumnStatistics colStats) + throws NoSuchObjectException,InvalidObjectException,MetaException,TException, + InvalidInputException + { + + String dbName = null; + String tableName = null; + String partName = null; + String colName = null; + + ColumnStatisticsDesc statsDesc = colStats.getStatsDesc(); + dbName = statsDesc.getDbName().toLowerCase(); + tableName = statsDesc.getTableName().toLowerCase(); + partName = lowerCaseConvertPartName(statsDesc.getPartName()); + + statsDesc.setDbName(dbName); + statsDesc.setTableName(tableName); + statsDesc.setPartName(partName); + + long time = System.currentTimeMillis() / 1000; + statsDesc.setLastAnalyzed(time); + + List statsObjs = colStats.getStatsObj(); + + for (ColumnStatisticsObj statsObj:statsObjs) { + colName = statsObj.getColName().toLowerCase(); + statsObj.setColName(colName); + startFunction("write_partition_column_statistics: db=" + dbName + " table=" + tableName + + " part=" + partName + "column=" + colName); + } + + colStats.setStatsDesc(statsDesc); + colStats.setStatsObj(statsObjs); + + boolean ret = false; + + try { + List partVals = getPartValsFromName(getMS(), dbName, + tableName, partName); + ret = getMS().updatePartitionColumnStatistics(colStats, partVals); + return ret; + } finally { + endFunction("write_partition_column_statistics: ", ret != false, null, tableName); + } + } + + @Override + public boolean delete_partition_column_statistics(String dbName, String tableName, + String partName, String colName) throws NoSuchObjectException, MetaException, + InvalidObjectException, TException, InvalidInputException + { + dbName = dbName.toLowerCase(); + tableName = tableName.toLowerCase(); + if (colName != null) { + colName = colName.toLowerCase(); + } + String convertedPartName = lowerCaseConvertPartName(partName); + startFunction("delete_column_statistics_by_partition: db=" + dbName + " table=" + tableName + + " partition=" + convertedPartName + " column=" + colName); + boolean ret = false; + + try { + List partVals = getPartValsFromName(getMS(), dbName, tableName, convertedPartName); + ret = getMS().deletePartitionColumnStatistics(dbName, tableName, + convertedPartName, partVals, colName); + } finally { + endFunction("delete_column_statistics_by_partition: ", ret != false, null, tableName); + } + return ret; + } + + @Override + public boolean delete_table_column_statistics(String dbName, String tableName, String colName) + throws NoSuchObjectException, MetaException, InvalidObjectException, TException, + InvalidInputException { + dbName = dbName.toLowerCase(); + tableName = tableName.toLowerCase(); + + if (colName != null) { + colName = colName.toLowerCase(); + } + startFunction("delete_column_statistics_by_table: db=" + dbName + " table=" + tableName + + " column=" + colName); + + boolean ret = false; + try { + ret = getMS().deleteTableColumnStatistics(dbName, tableName, colName); + } finally { + endFunction("delete_column_statistics_by_table: ", ret != false, null, tableName); + } + return ret; + } + + @Override + public List get_partitions_by_filter(final String dbName, + final String tblName, final String filter, final short maxParts) + throws MetaException, NoSuchObjectException, TException { + startTableFunction("get_partitions_by_filter", dbName, tblName); + fireReadTablePreEvent(dbName, tblName); + List ret = null; + Exception ex = null; + try { + ret = getMS().getPartitionsByFilter(dbName, tblName, filter, maxParts); + } catch (Exception e) { + ex = e; + rethrowException(e); + } finally { + endFunction("get_partitions_by_filter", ret != null, ex, tblName); + } + return ret; + } + + @Override + public List get_part_specs_by_filter(final String dbName, + final String tblName, final String filter, final int maxParts) + throws MetaException, NoSuchObjectException, TException { + + startTableFunction("get_partitions_by_filter_pspec", dbName, tblName); + + List partitionSpecs = null; + try { + Table table = get_table_core(dbName, tblName); + List partitions = get_partitions_by_filter(dbName, tblName, filter, (short) maxParts); + + if (is_partition_spec_grouping_enabled(table)) { + partitionSpecs = get_partitionspecs_grouped_by_storage_descriptor(table, partitions); + } + else { + PartitionSpec pSpec = new PartitionSpec(); + pSpec.setPartitionList(new PartitionListComposingSpec(partitions)); + pSpec.setRootPath(table.getSd().getLocation()); + pSpec.setDbName(dbName); + pSpec.setTableName(tblName); + partitionSpecs = Arrays.asList(pSpec); + } + + return partitionSpecs; + } + finally { + endFunction("get_partitions_by_filter_pspec", partitionSpecs != null && !partitionSpecs.isEmpty(), null, tblName); + } + } + + @Override + public PartitionsByExprResult get_partitions_by_expr( + PartitionsByExprRequest req) throws TException { + String dbName = req.getDbName(), tblName = req.getTblName(); + startTableFunction("get_partitions_by_expr", dbName, tblName); + fireReadTablePreEvent(dbName, tblName); + PartitionsByExprResult ret = null; + Exception ex = null; + try { + List partitions = new LinkedList(); + boolean hasUnknownPartitions = getMS().getPartitionsByExpr(dbName, tblName, + req.getExpr(), req.getDefaultPartitionName(), req.getMaxParts(), partitions); + ret = new PartitionsByExprResult(partitions, hasUnknownPartitions); + } catch (Exception e) { + ex = e; + rethrowException(e); + } finally { + endFunction("get_partitions_by_expr", ret != null, ex, tblName); + } + return ret; + } + + private void rethrowException(Exception e) + throws MetaException, NoSuchObjectException, TException { + // TODO: Both of these are TException, why do we need these separate clauses? + if (e instanceof MetaException) { + throw (MetaException) e; + } else if (e instanceof NoSuchObjectException) { + throw (NoSuchObjectException) e; + } else if (e instanceof TException) { + throw (TException) e; + } else { + throw newMetaException(e); + } + } + + @Override + public int get_num_partitions_by_filter(final String dbName, + final String tblName, final String filter) + throws MetaException, NoSuchObjectException, TException { + startTableFunction("get_num_partitions_by_filter", dbName, tblName); + + int ret = -1; + Exception ex = null; + try { + ret = getMS().getNumPartitionsByFilter(dbName, tblName, filter); + } catch (Exception e) { + ex = e; + rethrowException(e); + } finally { + endFunction("get_num_partitions_by_filter", ret != -1, ex, tblName); + } + return ret; + } + + @Override + public List get_partitions_by_names(final String dbName, + final String tblName, final List partNames) + throws MetaException, NoSuchObjectException, TException { + + startTableFunction("get_partitions_by_names", dbName, tblName); + fireReadTablePreEvent(dbName, tblName); + List ret = null; + Exception ex = null; + try { + ret = getMS().getPartitionsByNames(dbName, tblName, partNames); + } catch (Exception e) { + ex = e; + rethrowException(e); + } finally { + endFunction("get_partitions_by_names", ret != null, ex, tblName); + } + return ret; + } + + @Override + public PrincipalPrivilegeSet get_privilege_set(HiveObjectRef hiveObject, + String userName, List groupNames) throws MetaException, + TException { + firePreEvent(new PreAuthorizationCallEvent(this)); + if (hiveObject.getObjectType() == HiveObjectType.COLUMN) { + String partName = getPartName(hiveObject); + return this.get_column_privilege_set(hiveObject.getDbName(), hiveObject + .getObjectName(), partName, hiveObject.getColumnName(), userName, + groupNames); + } else if (hiveObject.getObjectType() == HiveObjectType.PARTITION) { + String partName = getPartName(hiveObject); + return this.get_partition_privilege_set(hiveObject.getDbName(), + hiveObject.getObjectName(), partName, userName, groupNames); + } else if (hiveObject.getObjectType() == HiveObjectType.DATABASE) { + return this.get_db_privilege_set(hiveObject.getDbName(), userName, + groupNames); + } else if (hiveObject.getObjectType() == HiveObjectType.TABLE) { + return this.get_table_privilege_set(hiveObject.getDbName(), hiveObject + .getObjectName(), userName, groupNames); + } else if (hiveObject.getObjectType() == HiveObjectType.GLOBAL) { + return this.get_user_privilege_set(userName, groupNames); + } + return null; + } + + private String getPartName(HiveObjectRef hiveObject) throws MetaException { + String partName = null; + List partValue = hiveObject.getPartValues(); + if (partValue != null && partValue.size() > 0) { + try { + Table table = get_table_core(hiveObject.getDbName(), hiveObject + .getObjectName()); + partName = Warehouse + .makePartName(table.getPartitionKeys(), partValue); + } catch (NoSuchObjectException e) { + throw new MetaException(e.getMessage()); + } + } + return partName; + } + + private PrincipalPrivilegeSet get_column_privilege_set(final String dbName, + final String tableName, final String partName, final String columnName, + final String userName, final List groupNames) throws MetaException, + TException { + incrementCounter("get_column_privilege_set"); + + PrincipalPrivilegeSet ret = null; + try { + ret = getMS().getColumnPrivilegeSet( + dbName, tableName, partName, columnName, userName, groupNames); + } catch (MetaException e) { + throw e; + } catch (Exception e) { + throw new RuntimeException(e); + } + return ret; + } + + private PrincipalPrivilegeSet get_db_privilege_set(final String dbName, + final String userName, final List groupNames) throws MetaException, + TException { + incrementCounter("get_db_privilege_set"); + + PrincipalPrivilegeSet ret = null; + try { + ret = getMS().getDBPrivilegeSet(dbName, userName, groupNames); + } catch (MetaException e) { + throw e; + } catch (Exception e) { + throw new RuntimeException(e); + } + return ret; + } + + private PrincipalPrivilegeSet get_partition_privilege_set( + final String dbName, final String tableName, final String partName, + final String userName, final List groupNames) + throws MetaException, TException { + incrementCounter("get_partition_privilege_set"); + + PrincipalPrivilegeSet ret = null; + try { + ret = getMS().getPartitionPrivilegeSet(dbName, tableName, partName, + userName, groupNames); + } catch (MetaException e) { + throw e; + } catch (Exception e) { + throw new RuntimeException(e); + } + return ret; + } + + private PrincipalPrivilegeSet get_table_privilege_set(final String dbName, + final String tableName, final String userName, + final List groupNames) throws MetaException, TException { + incrementCounter("get_table_privilege_set"); + + PrincipalPrivilegeSet ret = null; + try { + ret = getMS().getTablePrivilegeSet(dbName, tableName, userName, + groupNames); + } catch (MetaException e) { + throw e; + } catch (Exception e) { + throw new RuntimeException(e); + } + return ret; + } + + @Override + public boolean grant_role(final String roleName, + final String principalName, final PrincipalType principalType, + final String grantor, final PrincipalType grantorType, final boolean grantOption) + throws MetaException, TException { + incrementCounter("add_role_member"); + firePreEvent(new PreAuthorizationCallEvent(this)); + if (PUBLIC.equals(roleName)) { + throw new MetaException("No user can be added to " + PUBLIC +". Since all users implictly" + + " belong to " + PUBLIC + " role."); + } + Boolean ret = null; + try { + RawStore ms = getMS(); + Role role = ms.getRole(roleName); + if(principalType == PrincipalType.ROLE){ + //check if this grant statement will end up creating a cycle + if(isNewRoleAParent(principalName, roleName)){ + throw new MetaException("Cannot grant role " + principalName + " to " + roleName + + " as " + roleName + " already belongs to the role " + principalName + + ". (no cycles allowed)"); + } + } + ret = ms.grantRole(role, principalName, principalType, grantor, grantorType, grantOption); + } catch (MetaException e) { + throw e; + } catch (Exception e) { + throw new RuntimeException(e); + } + return ret; + } + + + + /** + * Check if newRole is in parent hierarchy of curRole + * @param newRole + * @param curRole + * @return true if newRole is curRole or present in its hierarchy + * @throws MetaException + */ + private boolean isNewRoleAParent(String newRole, String curRole) throws MetaException { + if(newRole.equals(curRole)){ + return true; + } + //do this check recursively on all the parent roles of curRole + List parentRoleMaps = getMS().listRoles(curRole, PrincipalType.ROLE); + for(Role parentRole : parentRoleMaps){ + if(isNewRoleAParent(newRole, parentRole.getRoleName())){ + return true; + } + } + return false; + } + + @Override + public List list_roles(final String principalName, + final PrincipalType principalType) throws MetaException, TException { + incrementCounter("list_roles"); + firePreEvent(new PreAuthorizationCallEvent(this)); + return getMS().listRoles(principalName, principalType); + } + + @Override + public boolean create_role(final Role role) + throws MetaException, TException { + incrementCounter("create_role"); + firePreEvent(new PreAuthorizationCallEvent(this)); + if (PUBLIC.equals(role.getRoleName())) { + throw new MetaException(PUBLIC + " role implictly exists. It can't be created."); + } + Boolean ret = null; + try { + ret = getMS().addRole(role.getRoleName(), role.getOwnerName()); + } catch (MetaException e) { + throw e; + } catch (Exception e) { + throw new RuntimeException(e); + } + return ret; + } + + @Override + public boolean drop_role(final String roleName) + throws MetaException, TException { + incrementCounter("drop_role"); + firePreEvent(new PreAuthorizationCallEvent(this)); + if (ADMIN.equals(roleName) || PUBLIC.equals(roleName)) { + throw new MetaException(PUBLIC + "," + ADMIN + " roles can't be dropped."); + } + Boolean ret = null; + try { + ret = getMS().removeRole(roleName); + } catch (MetaException e) { + throw e; + } catch (Exception e) { + throw new RuntimeException(e); + } + return ret; + } + + @Override + public List get_role_names() throws MetaException, TException { + incrementCounter("get_role_names"); + firePreEvent(new PreAuthorizationCallEvent(this)); + List ret = null; + try { + ret = getMS().listRoleNames(); + return ret; + } catch (MetaException e) { + throw e; + } catch (Exception e) { + throw new RuntimeException(e); + } + } + + @Override + public boolean grant_privileges(final PrivilegeBag privileges) throws MetaException, + TException { + incrementCounter("grant_privileges"); + firePreEvent(new PreAuthorizationCallEvent(this)); + Boolean ret = null; + try { + ret = getMS().grantPrivileges(privileges); + } catch (MetaException e) { + throw e; + } catch (Exception e) { + throw new RuntimeException(e); + } + return ret; + } + + @Override + public boolean revoke_role(final String roleName, final String userName, + final PrincipalType principalType) throws MetaException, TException { + return revoke_role(roleName, userName, principalType, false); + } + + private boolean revoke_role(final String roleName, final String userName, + final PrincipalType principalType, boolean grantOption) throws MetaException, TException { + incrementCounter("remove_role_member"); + firePreEvent(new PreAuthorizationCallEvent(this)); + if (PUBLIC.equals(roleName)) { + throw new MetaException(PUBLIC + " role can't be revoked."); + } + Boolean ret = null; + try { + RawStore ms = getMS(); + Role mRole = ms.getRole(roleName); + ret = ms.revokeRole(mRole, userName, principalType, grantOption); + } catch (MetaException e) { + throw e; + } catch (Exception e) { + throw new RuntimeException(e); + } + return ret; + } + + @Override + public GrantRevokeRoleResponse grant_revoke_role(GrantRevokeRoleRequest request) + throws MetaException, org.apache.thrift.TException { + GrantRevokeRoleResponse response = new GrantRevokeRoleResponse(); + boolean grantOption = false; + if (request.isSetGrantOption()) { + grantOption = request.isGrantOption(); + } + switch (request.getRequestType()) { + case GRANT: { + boolean result = grant_role(request.getRoleName(), + request.getPrincipalName(), request.getPrincipalType(), + request.getGrantor(), request.getGrantorType(), grantOption); + response.setSuccess(result); + break; + } + case REVOKE: { + boolean result = revoke_role(request.getRoleName(), request.getPrincipalName(), + request.getPrincipalType(), grantOption); + response.setSuccess(result); + break; + } + default: + throw new MetaException("Unknown request type " + request.getRequestType()); + } + + return response; + } + + @Override + public GrantRevokePrivilegeResponse grant_revoke_privileges(GrantRevokePrivilegeRequest request) + throws MetaException, org.apache.thrift.TException { + GrantRevokePrivilegeResponse response = new GrantRevokePrivilegeResponse(); + switch (request.getRequestType()) { + case GRANT: { + boolean result = grant_privileges(request.getPrivileges()); + response.setSuccess(result); + break; + } + case REVOKE: { + boolean revokeGrantOption = false; + if (request.isSetRevokeGrantOption()) { + revokeGrantOption = request.isRevokeGrantOption(); + } + boolean result = revoke_privileges(request.getPrivileges(), revokeGrantOption); + response.setSuccess(result); + break; + } + default: + throw new MetaException("Unknown request type " + request.getRequestType()); + } + + return response; + } + + @Override + public boolean revoke_privileges(final PrivilegeBag privileges) + throws MetaException, TException { + return revoke_privileges(privileges, false); + } + + public boolean revoke_privileges(final PrivilegeBag privileges, boolean grantOption) + throws MetaException, TException { + incrementCounter("revoke_privileges"); + firePreEvent(new PreAuthorizationCallEvent(this)); + Boolean ret = null; + try { + ret = getMS().revokePrivileges(privileges, grantOption); + } catch (MetaException e) { + throw e; + } catch (Exception e) { + throw new RuntimeException(e); + } + return ret; + } + + private PrincipalPrivilegeSet get_user_privilege_set(final String userName, + final List groupNames) throws MetaException, TException { + incrementCounter("get_user_privilege_set"); + PrincipalPrivilegeSet ret = null; + try { + ret = getMS().getUserPrivilegeSet(userName, groupNames); + } catch (MetaException e) { + throw e; + } catch (Exception e) { + throw new RuntimeException(e); + } + return ret; + } + + @Override + public List list_privileges(String principalName, + PrincipalType principalType, HiveObjectRef hiveObject) + throws MetaException, TException { + firePreEvent(new PreAuthorizationCallEvent(this)); + if (hiveObject.getObjectType() == null) { + return getAllPrivileges(principalName, principalType); + } + if (hiveObject.getObjectType() == HiveObjectType.GLOBAL) { + return list_global_privileges(principalName, principalType); + } + if (hiveObject.getObjectType() == HiveObjectType.DATABASE) { + return list_db_privileges(principalName, principalType, hiveObject + .getDbName()); + } + if (hiveObject.getObjectType() == HiveObjectType.TABLE) { + return list_table_privileges(principalName, principalType, + hiveObject.getDbName(), hiveObject.getObjectName()); + } + if (hiveObject.getObjectType() == HiveObjectType.PARTITION) { + return list_partition_privileges(principalName, principalType, + hiveObject.getDbName(), hiveObject.getObjectName(), hiveObject + .getPartValues()); + } + if (hiveObject.getObjectType() == HiveObjectType.COLUMN) { + if (hiveObject.getPartValues() == null || hiveObject.getPartValues().isEmpty()) { + return list_table_column_privileges(principalName, principalType, + hiveObject.getDbName(), hiveObject.getObjectName(), hiveObject.getColumnName()); + } + return list_partition_column_privileges(principalName, principalType, + hiveObject.getDbName(), hiveObject.getObjectName(), hiveObject + .getPartValues(), hiveObject.getColumnName()); + } + return null; + } + + private List getAllPrivileges(String principalName, + PrincipalType principalType) throws TException { + List privs = new ArrayList(); + privs.addAll(list_global_privileges(principalName, principalType)); + privs.addAll(list_db_privileges(principalName, principalType, null)); + privs.addAll(list_table_privileges(principalName, principalType, null, null)); + privs.addAll(list_partition_privileges(principalName, principalType, null, null, null)); + privs.addAll(list_table_column_privileges(principalName, principalType, null, null, null)); + privs.addAll(list_partition_column_privileges(principalName, principalType, + null, null, null, null)); + return privs; + } + + private List list_table_column_privileges( + final String principalName, final PrincipalType principalType, + final String dbName, final String tableName, final String columnName) + throws MetaException, TException { + incrementCounter("list_table_column_privileges"); + + try { + if (dbName == null) { + return getMS().listPrincipalTableColumnGrantsAll(principalName, principalType); + } + if (principalName == null) { + return getMS().listTableColumnGrantsAll(dbName, tableName, columnName); + } + List result = getMS() + .listPrincipalTableColumnGrants(principalName, principalType, + dbName, tableName, columnName); + return result; + } catch (MetaException e) { + throw e; + } catch (Exception e) { + throw new RuntimeException(e); + } + } + + private List list_partition_column_privileges( + final String principalName, final PrincipalType principalType, + final String dbName, final String tableName, final List partValues, + final String columnName) throws MetaException, TException { + incrementCounter("list_partition_column_privileges"); + + try { + if (dbName == null) { + return getMS().listPrincipalPartitionColumnGrantsAll(principalName, principalType); + } + Table tbl = get_table_core(dbName, tableName); + String partName = Warehouse.makePartName(tbl.getPartitionKeys(), partValues); + if (principalName == null) { + return getMS().listPartitionColumnGrantsAll(dbName, tableName, partName, columnName); + } + + List result = + getMS().listPrincipalPartitionColumnGrants(principalName, principalType, dbName, + tableName, partValues, partName, columnName); + + return result; + } catch (MetaException e) { + throw e; + } catch (Exception e) { + throw new RuntimeException(e); + } + } + + private List list_db_privileges(final String principalName, + final PrincipalType principalType, final String dbName) + throws MetaException, TException { + incrementCounter("list_security_db_grant"); + + try { + if (dbName == null) { + return getMS().listPrincipalDBGrantsAll(principalName, principalType); + } + if (principalName == null) { + return getMS().listDBGrantsAll(dbName); + } else { + return getMS().listPrincipalDBGrants(principalName, principalType, dbName); + } + } catch (MetaException e) { + throw e; + } catch (Exception e) { + throw new RuntimeException(e); + } + } + + private List list_partition_privileges( + final String principalName, final PrincipalType principalType, + final String dbName, final String tableName, final List partValues) + throws MetaException, TException { + incrementCounter("list_security_partition_grant"); + + try { + if (dbName == null) { + return getMS().listPrincipalPartitionGrantsAll(principalName, principalType); + } + Table tbl = get_table_core(dbName, tableName); + String partName = Warehouse.makePartName(tbl.getPartitionKeys(), partValues); + if (principalName == null) { + return getMS().listPartitionGrantsAll(dbName, tableName, partName); + } + List result = getMS().listPrincipalPartitionGrants( + principalName, principalType, dbName, tableName, partValues, partName); + + return result; + } catch (MetaException e) { + throw e; + } catch (Exception e) { + throw new RuntimeException(e); + } + } + + private List list_table_privileges( + final String principalName, final PrincipalType principalType, + final String dbName, final String tableName) throws MetaException, + TException { + incrementCounter("list_security_table_grant"); + + try { + if (dbName == null) { + return getMS().listPrincipalTableGrantsAll(principalName, principalType); + } + if (principalName == null) { + return getMS().listTableGrantsAll(dbName, tableName); + } + List result = getMS() + .listAllTableGrants(principalName, principalType, dbName, tableName); + + return result; + } catch (MetaException e) { + throw e; + } catch (Exception e) { + throw new RuntimeException(e); + } + } + + private List list_global_privileges( + final String principalName, final PrincipalType principalType) + throws MetaException, TException { + incrementCounter("list_security_user_grant"); + + try { + if (principalName == null) { + return getMS().listGlobalGrantsAll(); + } + List result = getMS().listPrincipalGlobalGrants( + principalName, principalType); + + return result; + } catch (MetaException e) { + throw e; + } catch (Exception e) { + throw new RuntimeException(e); + } + } + + @Override + public void cancel_delegation_token(String token_str_form) + throws MetaException, TException { + startFunction("cancel_delegation_token"); + boolean success = false; + Exception ex = null; + try { + HiveMetaStore.cancelDelegationToken(token_str_form); + success = true; + } catch (IOException e) { + ex = e; + throw new MetaException(e.getMessage()); + } catch (Exception e) { + ex = e; + if (e instanceof MetaException) { + throw (MetaException) e; + } else if (e instanceof TException) { + throw (TException) e; + } else { + throw newMetaException(e); + } + } finally { + endFunction("cancel_delegation_token", success, ex); + } + } + + @Override + public long renew_delegation_token(String token_str_form) + throws MetaException, TException { + startFunction("renew_delegation_token"); + Long ret = null; + Exception ex = null; + try { + ret = HiveMetaStore.renewDelegationToken(token_str_form); + } catch (IOException e) { + ex = e; + throw new MetaException(e.getMessage()); + } catch (Exception e) { + ex = e; + if (e instanceof MetaException) { + throw (MetaException) e; + } else if (e instanceof TException) { + throw (TException) e; + } else { + throw newMetaException(e); + } + } finally { + endFunction("renew_delegation_token", ret != null, ex); + } + return ret; + } + + @Override + public String get_delegation_token(String token_owner, + String renewer_kerberos_principal_name) + throws MetaException, TException { + startFunction("get_delegation_token"); + String ret = null; + Exception ex = null; + try { + ret = + HiveMetaStore.getDelegationToken(token_owner, + renewer_kerberos_principal_name, getIPAddress()); + } catch (IOException e) { + ex = e; + throw new MetaException(e.getMessage()); + } catch (InterruptedException e) { + ex = e; + throw new MetaException(e.getMessage()); + } catch (Exception e) { + ex = e; + if (e instanceof MetaException) { + throw (MetaException) e; + } else if (e instanceof TException) { + throw (TException) e; + } else { + throw newMetaException(e); + } + } finally { + endFunction("get_delegation_token", ret != null, ex); + } + return ret; + } + + @Override + public boolean add_token(String token_identifier, String delegation_token) throws TException { + startFunction("add_token", ": " + token_identifier); + boolean ret = false; + Exception ex = null; + try { + ret = getMS().addToken(token_identifier, delegation_token); + } catch (Exception e) { + ex = e; + if (e instanceof MetaException) { + throw (MetaException) e; + } else { + throw newMetaException(e); + } + } finally { + endFunction("add_token", ret == true, ex); + } + return ret; + } + + @Override + public boolean remove_token(String token_identifier) throws TException { + startFunction("remove_token", ": " + token_identifier); + boolean ret = false; + Exception ex = null; + try { + ret = getMS().removeToken(token_identifier); + } catch (Exception e) { + ex = e; + if (e instanceof MetaException) { + throw (MetaException) e; + } else { + throw newMetaException(e); + } + } finally { + endFunction("remove_token", ret == true, ex); + } + return ret; + } + + @Override + public String get_token(String token_identifier) throws TException { + startFunction("get_token for", ": " + token_identifier); + String ret = null; + Exception ex = null; + try { + ret = getMS().getToken(token_identifier); + } catch (Exception e) { + ex = e; + if (e instanceof MetaException) { + throw (MetaException) e; + } else { + throw newMetaException(e); + } + } finally { + endFunction("get_token", ret != null, ex); + } + return ret; + } + + @Override + public List get_all_token_identifiers() throws TException { + startFunction("get_all_token_identifiers."); + List ret = null; + Exception ex = null; + try { + ret = getMS().getAllTokenIdentifiers(); + } catch (Exception e) { + ex = e; + if (e instanceof MetaException) { + throw (MetaException) e; + } else { + throw newMetaException(e); + } + } finally { + endFunction("get_all_token_identifiers.", ex == null, ex); + } + return ret; + } + + @Override + public int add_master_key(String key) throws MetaException, TException { + startFunction("add_master_key."); + int ret = -1; + Exception ex = null; + try { + ret = getMS().addMasterKey(key); + } catch (Exception e) { + ex = e; + if (e instanceof MetaException) { + throw (MetaException) e; + } else { + throw newMetaException(e); + } + } finally { + endFunction("add_master_key.", ex == null, ex); + } + return ret; + } + + @Override + public void update_master_key(int seq_number, String key) throws NoSuchObjectException, + MetaException, TException { + startFunction("update_master_key."); + Exception ex = null; + try { + getMS().updateMasterKey(seq_number, key); + } catch (Exception e) { + ex = e; + if (e instanceof MetaException) { + throw (MetaException) e; + } else { + throw newMetaException(e); + } + } finally { + endFunction("update_master_key.", ex == null, ex); + } + } + + @Override + public boolean remove_master_key(int key_seq) throws TException { + startFunction("remove_master_key."); + Exception ex = null; + boolean ret; + try { + ret = getMS().removeMasterKey(key_seq); + } catch (Exception e) { + ex = e; + if (e instanceof MetaException) { + throw (MetaException) e; + } else { + throw newMetaException(e); + } + } finally { + endFunction("remove_master_key.", ex == null, ex); + } + return ret; + } + + @Override + public List get_master_keys() throws TException { + startFunction("get_master_keys."); + Exception ex = null; + String [] ret = null; + try { + ret = getMS().getMasterKeys(); + } catch (Exception e) { + ex = e; + if (e instanceof MetaException) { + throw (MetaException) e; + } else { + throw newMetaException(e); + } + } finally { + endFunction("get_master_keys.", ret != null, ex); + } + return Arrays.asList(ret); + } + + @Override + public void markPartitionForEvent(final String db_name, final String tbl_name, + final Map partName, final PartitionEventType evtType) throws + MetaException, TException, NoSuchObjectException, UnknownDBException, + UnknownTableException, + InvalidPartitionException, UnknownPartitionException { + + Table tbl = null; + Exception ex = null; + try { + startPartitionFunction("markPartitionForEvent", db_name, tbl_name, partName); + firePreEvent(new PreLoadPartitionDoneEvent(db_name, tbl_name, partName, this)); + tbl = getMS().markPartitionForEvent(db_name, tbl_name, partName, evtType); + if (null == tbl) { + throw new UnknownTableException("Table: " + tbl_name + " not found."); + } else { + for (MetaStoreEventListener listener : listeners) { + listener.onLoadPartitionDone(new LoadPartitionDoneEvent(true, tbl, partName, this)); + } + } + } catch (Exception original) { + ex = original; + LOG.error("Exception caught in mark partition event ", original); + if (original instanceof NoSuchObjectException) { + throw (NoSuchObjectException) original; + } else if (original instanceof UnknownTableException) { + throw (UnknownTableException) original; + } else if (original instanceof UnknownDBException) { + throw (UnknownDBException) original; + } else if (original instanceof UnknownPartitionException) { + throw (UnknownPartitionException) original; + } else if (original instanceof InvalidPartitionException) { + throw (InvalidPartitionException) original; + } else if (original instanceof MetaException) { + throw (MetaException) original; + } else { + throw newMetaException(original); + } + } finally { + endFunction("markPartitionForEvent", tbl != null, ex, tbl_name); + } + } + + @Override + public boolean isPartitionMarkedForEvent(final String db_name, final String tbl_name, + final Map partName, final PartitionEventType evtType) throws + MetaException, NoSuchObjectException, UnknownDBException, UnknownTableException, + TException, UnknownPartitionException, InvalidPartitionException { + + startPartitionFunction("isPartitionMarkedForEvent", db_name, tbl_name, partName); + Boolean ret = null; + Exception ex = null; + try { + ret = getMS().isPartitionMarkedForEvent(db_name, tbl_name, partName, evtType); + } catch (Exception original) { + LOG.error("Exception caught for isPartitionMarkedForEvent ",original); + ex = original; + if (original instanceof NoSuchObjectException) { + throw (NoSuchObjectException) original; + } else if (original instanceof UnknownTableException) { + throw (UnknownTableException) original; + } else if (original instanceof UnknownDBException) { + throw (UnknownDBException) original; + } else if (original instanceof UnknownPartitionException) { + throw (UnknownPartitionException) original; + } else if (original instanceof InvalidPartitionException) { + throw (InvalidPartitionException) original; + } else if (original instanceof MetaException) { + throw (MetaException) original; + } else { + throw newMetaException(original); + } + } finally { + endFunction("isPartitionMarkedForEvent", ret != null, ex, tbl_name); + } + + return ret; + } + + @Override + public List set_ugi(String username, List groupNames) throws MetaException, + TException { + Collections.addAll(groupNames, username); + return groupNames; + } + + @Override + public boolean partition_name_has_valid_characters(List part_vals, + boolean throw_exception) throws TException, MetaException { + startFunction("partition_name_has_valid_characters"); + boolean ret = false; + Exception ex = null; + try { + if (throw_exception) { + MetaStoreUtils.validatePartitionNameCharacters(part_vals, partitionValidationPattern); + ret = true; + } else { + ret = MetaStoreUtils.partitionNameHasValidCharacters(part_vals, + partitionValidationPattern); + } + } catch (Exception e) { + if (e instanceof MetaException) { + throw (MetaException)e; + } else { + ex = e; + throw newMetaException(e); + } + } + endFunction("partition_name_has_valid_characters", true, null); + return ret; + } + + private static MetaException newMetaException(Exception e) { + if (e instanceof MetaException) { + return (MetaException)e; + } + MetaException me = new MetaException(e.toString()); + me.initCause(e); + return me; + } + + private void validateFunctionInfo(Function func) throws InvalidObjectException, MetaException { + if (!MetaStoreUtils.validateName(func.getFunctionName(), null)) { + throw new InvalidObjectException(func.getFunctionName() + " is not a valid object name"); + } + String className = func.getClassName(); + if (className == null) { + throw new InvalidObjectException("Function class name cannot be null"); + } + } + + @Override + public void create_function(Function func) throws AlreadyExistsException, + InvalidObjectException, MetaException, NoSuchObjectException, + TException { + validateFunctionInfo(func); + + boolean success = false; + RawStore ms = getMS(); + try { + ms.openTransaction(); + + Database db = ms.getDatabase(func.getDbName()); + if (db == null) { + throw new NoSuchObjectException("The database " + func.getDbName() + " does not exist"); + } + Function existingFunc = ms.getFunction(func.getDbName(), func.getFunctionName()); + if (existingFunc != null) { + throw new AlreadyExistsException( + "Function " + func.getFunctionName() + " already exists"); + } + + // set create time + long time = System.currentTimeMillis() / 1000; + func.setCreateTime((int) time); + ms.createFunction(func); + success = ms.commitTransaction(); + } finally { + if (!success) { + ms.rollbackTransaction(); + } + } + } + + @Override + public void drop_function(String dbName, String funcName) + throws NoSuchObjectException, MetaException, + InvalidObjectException, InvalidInputException { + boolean success = false; + Function func = null; + RawStore ms = getMS(); + + try { + ms.openTransaction(); + + func = ms.getFunction(dbName, funcName); + if (func == null) { + throw new NoSuchObjectException("Function " + funcName + " does not exist"); + } + ms.dropFunction(dbName, funcName); + success = ms.commitTransaction(); + } finally { + if (!success) { + ms.rollbackTransaction(); + } + } + } + + @Override + public void alter_function(String dbName, String funcName, Function newFunc) + throws InvalidOperationException, MetaException, TException { + validateFunctionInfo(newFunc); + boolean success = false; + RawStore ms = getMS(); + try { + ms.openTransaction(); + ms.alterFunction(dbName, funcName, newFunc); + success = ms.commitTransaction(); + } finally { + if (!success) { + ms.rollbackTransaction(); + } + } + } + + @Override + public List get_functions(String dbName, String pattern) + throws MetaException { + startFunction("get_functions", ": db=" + dbName + " pat=" + pattern); + + RawStore ms = getMS(); + Exception ex = null; + List funcNames = null; + + try { + funcNames = ms.getFunctions(dbName, pattern); + } catch (Exception e) { + ex = e; + throw newMetaException(e); + } finally { + endFunction("get_functions", funcNames != null, ex); + } + + return funcNames; + } + + @Override + public GetAllFunctionsResponse get_all_functions() + throws MetaException { + GetAllFunctionsResponse response = new GetAllFunctionsResponse(); + startFunction("get_all_functions"); + RawStore ms = getMS(); + List allFunctions = null; + Exception ex = null; + try { + allFunctions = ms.getAllFunctions(); + } catch (Exception e) { + ex = e; + throw newMetaException(e); + } finally { + endFunction("get_all_functions", allFunctions != null, ex); + } + response.setFunctions(allFunctions); + return response; + } + + @Override + public Function get_function(String dbName, String funcName) + throws MetaException, NoSuchObjectException, TException { + startFunction("get_function", ": " + dbName + "." + funcName); + + RawStore ms = getMS(); + Function func = null; + Exception ex = null; + + try { + func = ms.getFunction(dbName, funcName); + if (func == null) { + throw new NoSuchObjectException( + "Function " + dbName + "." + funcName + " does not exist"); + } + } catch (Exception e) { + ex = e; + throw newMetaException(e); + } finally { + endFunction("get_function", func != null, ex); + } + + return func; + } + + // Transaction and locking methods + @Override + public GetOpenTxnsResponse get_open_txns() throws TException { + return getTxnHandler().getOpenTxns(); + } + + // Transaction and locking methods + @Override + public GetOpenTxnsInfoResponse get_open_txns_info() throws TException { + return getTxnHandler().getOpenTxnsInfo(); + } + + @Override + public OpenTxnsResponse open_txns(OpenTxnRequest rqst) throws TException { + return getTxnHandler().openTxns(rqst); + } + + @Override + public void abort_txn(AbortTxnRequest rqst) throws NoSuchTxnException, TException { + getTxnHandler().abortTxn(rqst); + } + + @Override + public void commit_txn(CommitTxnRequest rqst) + throws NoSuchTxnException, TxnAbortedException, TException { + getTxnHandler().commitTxn(rqst); + } + + @Override + public LockResponse lock(LockRequest rqst) + throws NoSuchTxnException, TxnAbortedException, TException { + return getTxnHandler().lock(rqst); + } + + @Override + public LockResponse check_lock(CheckLockRequest rqst) + throws NoSuchTxnException, TxnAbortedException, NoSuchLockException, TException { + return getTxnHandler().checkLock(rqst); + } + + @Override + public void unlock(UnlockRequest rqst) + throws NoSuchLockException, TxnOpenException, TException { + getTxnHandler().unlock(rqst); + } + + @Override + public ShowLocksResponse show_locks(ShowLocksRequest rqst) throws TException { + return getTxnHandler().showLocks(rqst); + } + + @Override + public void heartbeat(HeartbeatRequest ids) + throws NoSuchLockException, NoSuchTxnException, TxnAbortedException, TException { + getTxnHandler().heartbeat(ids); + } + + @Override + public HeartbeatTxnRangeResponse heartbeat_txn_range(HeartbeatTxnRangeRequest rqst) + throws TException { + return getTxnHandler().heartbeatTxnRange(rqst); + } + + @Override + public void compact(CompactionRequest rqst) throws TException { + getTxnHandler().compact(rqst); + } + + @Override + public ShowCompactResponse show_compact(ShowCompactRequest rqst) throws TException { + return getTxnHandler().showCompact(rqst); + } + + @Override + public void flushCache() throws TException { + getMS().flushCache(); + } + + @Override + public void add_dynamic_partitions(AddDynamicPartitions rqst) + throws NoSuchTxnException, TxnAbortedException, TException { + getTxnHandler().addDynamicPartitions(rqst); + } + + @Override + public GetPrincipalsInRoleResponse get_principals_in_role(GetPrincipalsInRoleRequest request) + throws MetaException, TException { + + incrementCounter("get_principals_in_role"); + firePreEvent(new PreAuthorizationCallEvent(this)); + Exception ex = null; + GetPrincipalsInRoleResponse response = null; + try { + response = new GetPrincipalsInRoleResponse(getMS().listRoleMembers(request.getRoleName())); + } catch (MetaException e) { + throw e; + } catch (Exception e) { + ex = e; + rethrowException(e); + } finally { + endFunction("get_principals_in_role", ex == null, ex); + } + return response; + } + + @Override + public GetRoleGrantsForPrincipalResponse get_role_grants_for_principal( + GetRoleGrantsForPrincipalRequest request) throws MetaException, TException { + + incrementCounter("get_role_grants_for_principal"); + firePreEvent(new PreAuthorizationCallEvent(this)); + Exception ex = null; + List roleMaps = null; + try { + roleMaps = getMS().listRolesWithGrants(request.getPrincipal_name(), request.getPrincipal_type()); + } catch (MetaException e) { + throw e; + } catch (Exception e) { + ex = e; + rethrowException(e); + } finally { + endFunction("get_role_grants_for_principal", ex == null, ex); + } + + //List roleGrantsList = getRolePrincipalGrants(roleMaps); + return new GetRoleGrantsForPrincipalResponse(roleMaps); + } + + /** + * Convert each MRoleMap object into a thrift RolePrincipalGrant object + * @param roles + * @return + */ + private List getRolePrincipalGrants(List roles) throws MetaException { + List rolePrinGrantList = new ArrayList(); + if (roles != null) { + for (Role role : roles) { + rolePrinGrantList.addAll(getMS().listRoleMembers(role.getRoleName())); + } + } + return rolePrinGrantList; + } + + @Override + public AggrStats get_aggr_stats_for(PartitionsStatsRequest request) + throws NoSuchObjectException, MetaException, TException { + String dbName = request.getDbName().toLowerCase(); + String tblName = request.getTblName().toLowerCase(); + startFunction("get_aggr_stats_for: db=" + request.getDbName() + " table=" + request.getTblName()); + + List lowerCaseColNames = new ArrayList(request.getColNames().size()); + for (String colName : request.getColNames()) { + lowerCaseColNames.add(colName.toLowerCase()); + } + List lowerCasePartNames = new ArrayList(request.getPartNames().size()); + for (String partName : request.getPartNames()) { + lowerCasePartNames.add(lowerCaseConvertPartName(partName)); + } + AggrStats aggrStats = null; + + try { + aggrStats = new AggrStats(getMS().get_aggr_stats_for(dbName, tblName, lowerCasePartNames, + lowerCaseColNames)); + return aggrStats; + } finally { + endFunction("get_partitions_statistics_req: ", aggrStats == null, null, request.getTblName()); + } + + } + + @Override + public boolean set_aggr_stats_for(SetPartitionsStatsRequest request) + throws NoSuchObjectException, InvalidObjectException, MetaException, InvalidInputException, + TException { + boolean ret = true; + for (ColumnStatistics colStats : request.getColStats()) { + ret = ret && update_partition_column_statistics(colStats); + } + return ret; + } + + @Override + public NotificationEventResponse get_next_notification(NotificationEventRequest rqst) + throws TException { + RawStore ms = getMS(); + return ms.getNextNotification(rqst); + } + + @Override + public CurrentNotificationEventId get_current_notificationEventId() throws TException { + RawStore ms = getMS(); + return ms.getCurrentNotificationEventId(); + } + + @Override + public FireEventResponse fire_listener_event(FireEventRequest rqst) throws TException { + switch (rqst.getData().getSetField()) { + case INSERT_DATA: + InsertEvent event = new InsertEvent(rqst.getDbName(), rqst.getTableName(), + rqst.getPartitionVals(), rqst.getData().getInsertData().getFilesAdded(), + rqst.isSuccessful(), this); + for (MetaStoreEventListener listener : listeners) { + listener.onInsert(event); + } + return new FireEventResponse(); + + default: + throw new TException("Event type " + rqst.getData().getSetField().toString() + + " not currently supported."); + } + + } + + @Override + public GetFileMetadataByExprResult get_file_metadata_by_expr(GetFileMetadataByExprRequest req) + throws TException { + GetFileMetadataByExprResult result = new GetFileMetadataByExprResult(); + RawStore ms = getMS(); + if (!ms.isFileMetadataSupported()) { + result.setIsSupported(false); + result.setMetadata(EMPTY_MAP_FM2); // Set the required field. + return result; + } + result.setIsSupported(true); + + List fileIds = req.getFileIds(); + boolean needMetadata = !req.isSetDoGetFooters() || req.isDoGetFooters(); + FileMetadataExprType type = req.isSetType() ? req.getType() : FileMetadataExprType.ORC_SARG; + + ByteBuffer[] metadatas = needMetadata ? new ByteBuffer[fileIds.size()] : null; + ByteBuffer[] ppdResults = new ByteBuffer[fileIds.size()]; + boolean[] eliminated = new boolean[fileIds.size()]; + + getMS().getFileMetadataByExpr(fileIds, type, req.getExpr(), metadatas, ppdResults, eliminated); + for (int i = 0; i < fileIds.size(); ++i) { + if (!eliminated[i] && ppdResults[i] == null) continue; // No metadata => no ppd. + MetadataPpdResult mpr = new MetadataPpdResult(); + ByteBuffer ppdResult = eliminated[i] ? null : handleReadOnlyBufferForThrift(ppdResults[i]); + mpr.setIncludeBitset(ppdResult); + if (needMetadata) { + ByteBuffer metadata = eliminated[i] ? null : handleReadOnlyBufferForThrift(metadatas[i]); + mpr.setMetadata(metadata); + } + result.putToMetadata(fileIds.get(i), mpr); + } + if (!result.isSetMetadata()) { + result.setMetadata(EMPTY_MAP_FM2); // Set the required field. + } + return result; + } + + private final static Map EMPTY_MAP_FM1 = new HashMap(1); + private final static Map EMPTY_MAP_FM2 = + new HashMap(1); + + @Override + public GetFileMetadataResult get_file_metadata(GetFileMetadataRequest req) throws TException { + GetFileMetadataResult result = new GetFileMetadataResult(); + RawStore ms = getMS(); + if (!ms.isFileMetadataSupported()) { + result.setIsSupported(false); + result.setMetadata(EMPTY_MAP_FM1); // Set the required field. + return result; + } + result.setIsSupported(true); + List fileIds = req.getFileIds(); + ByteBuffer[] metadatas = ms.getFileMetadata(fileIds); + assert metadatas.length == fileIds.size(); + for (int i = 0; i < metadatas.length; ++i) { + ByteBuffer bb = metadatas[i]; + if (bb == null) continue; + bb = handleReadOnlyBufferForThrift(bb); + result.putToMetadata(fileIds.get(i), bb); + } + if (!result.isSetMetadata()) { + result.setMetadata(EMPTY_MAP_FM1); // Set the required field. + } + return result; + } + + private ByteBuffer handleReadOnlyBufferForThrift(ByteBuffer bb) { + if (!bb.isReadOnly()) return bb; + // Thrift cannot write read-only buffers... oh well. + // TODO: actually thrift never writes to the buffer, so we could use reflection to + // unset the unnecessary read-only flag if allocation/copy perf becomes a problem. + ByteBuffer copy = ByteBuffer.allocate(bb.capacity()); + copy.put(bb); + copy.flip(); + return copy; + } + + @Override + public PutFileMetadataResult put_file_metadata(PutFileMetadataRequest req) throws TException { + RawStore ms = getMS(); + if (ms.isFileMetadataSupported()) { + ms.putFileMetadata(req.getFileIds(), req.getMetadata(), req.getType()); + } + return new PutFileMetadataResult(); + } + + @Override + public ClearFileMetadataResult clear_file_metadata(ClearFileMetadataRequest req) + throws TException { + getMS().putFileMetadata(req.getFileIds(), null, null); + return new ClearFileMetadataResult(); + } + + @Override + public CacheFileMetadataResult cache_file_metadata( + CacheFileMetadataRequest req) throws TException { + RawStore ms = getMS(); + if (!ms.isFileMetadataSupported()) { + return new CacheFileMetadataResult(false); + } + String dbName = req.getDbName(), tblName = req.getTblName(), + partName = req.isSetPartName() ? req.getPartName() : null; + boolean isAllPart = req.isSetIsAllParts() && req.isIsAllParts(); + ms.openTransaction(); + boolean success = false; + try { + Table tbl = ms.getTable(dbName, tblName); + if (tbl == null) { + throw new NoSuchObjectException(dbName + "." + tblName + " not found"); + } + boolean isPartitioned = tbl.isSetPartitionKeys() && tbl.getPartitionKeysSize() > 0; + String tableInputFormat = tbl.isSetSd() ? tbl.getSd().getInputFormat() : null; + if (!isPartitioned) { + if (partName != null || isAllPart) { + throw new MetaException("Table is not partitioned"); + } + if (!tbl.isSetSd() || !tbl.getSd().isSetLocation()) { + throw new MetaException( + "Table does not have storage location; this operation is not supported on views"); + } + FileMetadataExprType type = expressionProxy.getMetadataType(tableInputFormat); + if (type == null) { + throw new MetaException("The operation is not supported for " + tableInputFormat); + } + fileMetadataManager.queueCacheMetadata(tbl.getSd().getLocation(), type); + success = true; + } else { + List partNames = null; + if (partName != null) { + partNames = Lists.newArrayList(partName); + } else if (isAllPart) { + partNames = ms.listPartitionNames(dbName, tblName, (short)-1); + } else { + throw new MetaException("Table is partitioned"); + } + int batchSize = HiveConf.getIntVar( + hiveConf, ConfVars.METASTORE_BATCH_RETRIEVE_OBJECTS_MAX); + int index = 0; + int successCount = 0, failCount = 0; + HashSet failFormats = null; + while (index < partNames.size()) { + int currentBatchSize = Math.min(batchSize, partNames.size() - index); + List nameBatch = partNames.subList(index, index + currentBatchSize); + index += currentBatchSize; + List parts = ms.getPartitionsByNames(dbName, tblName, nameBatch); + for (Partition part : parts) { + if (!part.isSetSd() || !part.getSd().isSetLocation()) { + throw new MetaException("Partition does not have storage location;" + + " this operation is not supported on views"); + } + String inputFormat = part.getSd().isSetInputFormat() + ? part.getSd().getInputFormat() : tableInputFormat; + FileMetadataExprType type = expressionProxy.getMetadataType(inputFormat); + if (type == null) { + ++failCount; + if (failFormats == null) { + failFormats = new HashSet<>(); + } + failFormats.add(inputFormat); + } else { + ++successCount; + fileMetadataManager.queueCacheMetadata(part.getSd().getLocation(), type); + } + } + } + success = true; // Regardless of the following exception + if (failCount > 0) { + String errorMsg = "The operation failed for " + failCount + " partitions and " + + "succeeded for " + successCount + " partitions; unsupported formats: "; + boolean isFirst = true; + for (String s : failFormats) { + if (!isFirst) { + errorMsg += ", "; + } + isFirst = false; + errorMsg += s; + } + throw new MetaException(errorMsg); + } + } + } finally { + if (success) { + if (!ms.commitTransaction()) { + throw new MetaException("Failed to commit"); + } + } else { + ms.rollbackTransaction(); + } + } + return new CacheFileMetadataResult(true); + } + + @VisibleForTesting + public void updateMetrics() throws MetaException { + initTableCount = getMS().getTableCount(); + initPartCount = getMS().getPartitionCount(); + initDatabaseCount = getMS().getDatabaseCount(); + } + + @Override + public GetChangeVersionResult get_change_version(GetChangeVersionRequest req) + throws TException { + return new GetChangeVersionResult(getMS().getChangeVersion(req.getTopic())); + } + } + + + public static IHMSHandler newRetryingHMSHandler(IHMSHandler baseHandler, HiveConf hiveConf) + throws MetaException { + return newRetryingHMSHandler(baseHandler, hiveConf, false); + } + + public static IHMSHandler newRetryingHMSHandler(IHMSHandler baseHandler, HiveConf hiveConf, + boolean local) throws MetaException { + return RetryingHMSHandler.getProxy(hiveConf, baseHandler, local); + } + + public static Iface newRetryingHMSHandler(String name, HiveConf conf, boolean local) + throws MetaException { + HMSHandler baseHandler = new HiveMetaStore.HMSHandler(name, conf, false); + return RetryingHMSHandler.getProxy(conf, baseHandler, local); + } + + /** + * Discard a current delegation token. + * + * @param tokenStrForm + * the token in string form + */ + public static void cancelDelegationToken(String tokenStrForm + ) throws IOException { + delegationTokenManager.cancelDelegationToken(tokenStrForm); + } + + /** + * Get a new delegation token. + * + * @param renewer + * the designated renewer + */ + public static String getDelegationToken(String owner, String renewer, String remoteAddr) + throws IOException, InterruptedException { + return delegationTokenManager.getDelegationToken(owner, renewer, remoteAddr); + } + + /** + * @return true if remote metastore has been created + */ + public static boolean isMetaStoreRemote() { + return isMetaStoreRemote; + } + + /** + * Renew a delegation token to extend its lifetime. + * + * @param tokenStrForm + * the token in string form + */ + public static long renewDelegationToken(String tokenStrForm + ) throws IOException { + return delegationTokenManager.renewDelegationToken(tokenStrForm); + } + + /** + * HiveMetaStore specific CLI + * + */ + static public class HiveMetastoreCli extends CommonCliOptions { + private int port; + + @SuppressWarnings("static-access") + public HiveMetastoreCli(Configuration configuration) { + super("hivemetastore", true); + this.port = HiveConf.getIntVar(configuration, HiveConf.ConfVars.METASTORE_SERVER_PORT); + + // -p port + OPTIONS.addOption(OptionBuilder + .hasArg() + .withArgName("port") + .withDescription("Hive Metastore port number, default:" + + this.port) + .create('p')); + + } + + @Override + public void parse(String[] args) { + super.parse(args); + + // support the old syntax "hivemetastore [port]" but complain + args = commandLine.getArgs(); + if (args.length > 0) { + // complain about the deprecated syntax -- but still run + System.err.println( + "This usage has been deprecated, consider using the new command " + + "line syntax (run with -h to see usage information)"); + + this.port = new Integer(args[0]); + } + + // notice that command line options take precedence over the + // deprecated (old style) naked args... + + if (commandLine.hasOption('p')) { + this.port = Integer.parseInt(commandLine.getOptionValue('p')); + } else { + // legacy handling + String metastorePort = System.getenv("METASTORE_PORT"); + if (metastorePort != null) { + this.port = Integer.parseInt(metastorePort); + } + } + } + + public int getPort() { + return this.port; + } + } + + /** + * @param args + */ + public static void main(String[] args) throws Throwable { + HiveConf.setLoadMetastoreConfig(true); + final HiveConf conf = new HiveConf(HMSHandler.class); + + HiveMetastoreCli cli = new HiveMetastoreCli(conf); + cli.parse(args); + final boolean isCliVerbose = cli.isVerbose(); + // NOTE: It is critical to do this prior to initializing log4j, otherwise + // any log specific settings via hiveconf will be ignored + Properties hiveconf = cli.addHiveconfToSystemProperties(); + + // If the log4j.configuration property hasn't already been explicitly set, + // use Hive's default log4j configuration + if (System.getProperty("log4j.configurationFile") == null) { + // NOTE: It is critical to do this here so that log4j is reinitialized + // before any of the other core hive classes are loaded + try { + LogUtils.initHiveLog4j(); + } catch (LogInitializationException e) { + HMSHandler.LOG.warn(e.getMessage()); + } + } + HiveStringUtils.startupShutdownMessage(HiveMetaStore.class, args, LOG); + + try { + String msg = "Starting hive metastore on port " + cli.port; + HMSHandler.LOG.info(msg); + if (cli.isVerbose()) { + System.err.println(msg); + } + + + // set all properties specified on the command line + for (Map.Entry item : hiveconf.entrySet()) { + conf.set((String) item.getKey(), (String) item.getValue()); + } + + // Add shutdown hook. + Runtime.getRuntime().addShutdownHook(new Thread() { + @Override + public void run() { + String shutdownMsg = "Shutting down hive metastore."; + HMSHandler.LOG.info(shutdownMsg); + if (isCliVerbose) { + System.err.println(shutdownMsg); + } + if (conf.getBoolVar(ConfVars.METASTORE_METRICS)) { + try { + MetricsFactory.close(); + } catch (Exception e) { + LOG.error("error in Metrics deinit: " + e.getClass().getName() + " " + + e.getMessage(), e); + } + } + } + }); + + //Start Metrics for Standalone (Remote) Mode + if (conf.getBoolVar(ConfVars.METASTORE_METRICS)) { + try { + MetricsFactory.init(conf); + } catch (Exception e) { + // log exception, but ignore inability to start + LOG.error("error in Metrics init: " + e.getClass().getName() + " " + + e.getMessage(), e); + } + } + + Lock startLock = new ReentrantLock(); + Condition startCondition = startLock.newCondition(); + AtomicBoolean startedServing = new AtomicBoolean(); + startMetaStoreThreads(conf, startLock, startCondition, startedServing); + startMetaStore(cli.getPort(), ShimLoader.getHadoopThriftAuthBridge(), conf, startLock, + startCondition, startedServing); + } catch (Throwable t) { + // Catch the exception, log it and rethrow it. + HMSHandler.LOG + .error("Metastore Thrift Server threw an exception...", t); + throw t; + } + } + + /** + * Start Metastore based on a passed {@link HadoopThriftAuthBridge} + * + * @param port + * @param bridge + * @throws Throwable + */ + public static void startMetaStore(int port, HadoopThriftAuthBridge bridge) + throws Throwable { + startMetaStore(port, bridge, new HiveConf(HMSHandler.class), null, null, null); + } + + /** + * Start the metastore store. + * @param port + * @param bridge + * @param conf + * @throws Throwable + */ + public static void startMetaStore(int port, HadoopThriftAuthBridge bridge, + HiveConf conf) throws Throwable { + startMetaStore(port, bridge, conf, null, null, null); + } + + /** + * Start Metastore based on a passed {@link HadoopThriftAuthBridge} + * + * @param port + * @param bridge + * @param conf + * configuration overrides + * @throws Throwable + */ + public static void startMetaStore(int port, HadoopThriftAuthBridge bridge, + HiveConf conf, Lock startLock, Condition startCondition, + AtomicBoolean startedServing) throws Throwable { + try { + isMetaStoreRemote = true; + // Server will create new threads up to max as necessary. After an idle + // period, it will destroy threads to keep the number of threads in the + // pool to min. + long maxMessageSize = conf.getIntVar(HiveConf.ConfVars.METASTORESERVERMAXMESSAGESIZE); + int minWorkerThreads = conf.getIntVar(HiveConf.ConfVars.METASTORESERVERMINTHREADS); + int maxWorkerThreads = conf.getIntVar(HiveConf.ConfVars.METASTORESERVERMAXTHREADS); + boolean tcpKeepAlive = conf.getBoolVar(HiveConf.ConfVars.METASTORE_TCP_KEEP_ALIVE); + boolean useFramedTransport = conf.getBoolVar(ConfVars.METASTORE_USE_THRIFT_FRAMED_TRANSPORT); + boolean useCompactProtocol = conf.getBoolVar(ConfVars.METASTORE_USE_THRIFT_COMPACT_PROTOCOL); + useSasl = conf.getBoolVar(HiveConf.ConfVars.METASTORE_USE_THRIFT_SASL); + + + TProcessor processor; + TTransportFactory transFactory; + final TProtocolFactory protocolFactory; + final TProtocolFactory inputProtoFactory; + if (useCompactProtocol) { + protocolFactory = new TCompactProtocol.Factory(); + inputProtoFactory = new TCompactProtocol.Factory(maxMessageSize, maxMessageSize); + } else { + protocolFactory = new TBinaryProtocol.Factory(); + inputProtoFactory = new TBinaryProtocol.Factory(true, true, maxMessageSize, maxMessageSize); + } + HMSHandler baseHandler = new HiveMetaStore.HMSHandler("new db based metaserver", conf, + false); + IHMSHandler handler = newRetryingHMSHandler(baseHandler, conf); + if (useSasl) { + // we are in secure mode. + if (useFramedTransport) { + throw new HiveMetaException("Framed transport is not supported with SASL enabled."); + } + saslServer = bridge.createServer( + conf.getVar(HiveConf.ConfVars.METASTORE_KERBEROS_KEYTAB_FILE), + conf.getVar(HiveConf.ConfVars.METASTORE_KERBEROS_PRINCIPAL)); + // Start delegation token manager + delegationTokenManager = new HiveDelegationTokenManager(); + delegationTokenManager.startDelegationTokenSecretManager(conf, baseHandler, + ServerMode.METASTORE); + saslServer.setSecretManager(delegationTokenManager.getSecretManager()); + transFactory = saslServer.createTransportFactory( + MetaStoreUtils.getMetaStoreSaslProperties(conf)); + processor = saslServer.wrapProcessor( + new ThriftHiveMetastore.Processor(handler)); + LOG.info("Starting DB backed MetaStore Server in Secure Mode"); + } else { + // we are in unsecure mode. + if (conf.getBoolVar(ConfVars.METASTORE_EXECUTE_SET_UGI)) { + transFactory = useFramedTransport ? + new ChainedTTransportFactory(new TFramedTransport.Factory(), + new TUGIContainingTransport.Factory()) + : new TUGIContainingTransport.Factory(); + + processor = new TUGIBasedProcessor(handler); + LOG.info("Starting DB backed MetaStore Server with SetUGI enabled"); + } else { + transFactory = useFramedTransport ? + new TFramedTransport.Factory() : new TTransportFactory(); + processor = new TSetIpAddressProcessor(handler); + LOG.info("Starting DB backed MetaStore Server"); + } + } + + TServerTransport serverTransport = tcpKeepAlive ? + new TServerSocketKeepAlive(port) : new TServerSocket(port); + + TThreadPoolServer.Args args = new TThreadPoolServer.Args(serverTransport) + .processor(processor) + .transportFactory(transFactory) + .protocolFactory(protocolFactory) + .inputProtocolFactory(inputProtoFactory) + .minWorkerThreads(minWorkerThreads) + .maxWorkerThreads(maxWorkerThreads); + + TServer tServer = new TThreadPoolServer(args); + TServerEventHandler tServerEventHandler = new TServerEventHandler() { + @Override + public void preServe() { + } + + @Override + public ServerContext createContext(TProtocol tProtocol, TProtocol tProtocol1) { + try { + Metrics metrics = MetricsFactory.getInstance(); + if (metrics != null) { + metrics.incrementCounter(MetricsConstant.OPEN_CONNECTIONS); + } + } catch (Exception e) { + LOG.warn("Error Reporting Metastore open connection to Metrics system", e); + } + return null; + } + + @Override + public void deleteContext(ServerContext serverContext, TProtocol tProtocol, TProtocol tProtocol1) { + try { + Metrics metrics = MetricsFactory.getInstance(); + if (metrics != null) { + metrics.decrementCounter(MetricsConstant.OPEN_CONNECTIONS); + } + } catch (Exception e) { + LOG.warn("Error Reporting Metastore close connection to Metrics system", e); + } + } + + @Override + public void processContext(ServerContext serverContext, TTransport tTransport, TTransport tTransport1) { + } + }; + + tServer.setServerEventHandler(tServerEventHandler); + HMSHandler.LOG.info("Started the new metaserver on port [" + port + + "]..."); + HMSHandler.LOG.info("Options.minWorkerThreads = " + + minWorkerThreads); + HMSHandler.LOG.info("Options.maxWorkerThreads = " + + maxWorkerThreads); + HMSHandler.LOG.info("TCP keepalive = " + tcpKeepAlive); + + if (startLock != null) { + signalOtherThreadsToStart(tServer, startLock, startCondition, startedServing); + } + tServer.serve(); + } catch (Throwable x) { + x.printStackTrace(); + HMSHandler.LOG.error(StringUtils.stringifyException(x)); + throw x; + } + } + + private static void signalOtherThreadsToStart(final TServer server, final Lock startLock, + final Condition startCondition, + final AtomicBoolean startedServing) { + // A simple thread to wait until the server has started and then signal the other threads to + // begin + Thread t = new Thread() { + @Override + public void run() { + do { + try { + Thread.sleep(1000); + } catch (InterruptedException e) { + LOG.warn("Signalling thread was interuppted: " + e.getMessage()); + } + } while (!server.isServing()); + startLock.lock(); + try { + startedServing.set(true); + startCondition.signalAll(); + } finally { + startLock.unlock(); + } + } + }; + t.start(); + } + + /** + * Start threads outside of the thrift service, such as the compactor threads. + * @param conf Hive configuration object + */ + private static void startMetaStoreThreads(final HiveConf conf, final Lock startLock, + final Condition startCondition, final + AtomicBoolean startedServing) { + // A thread is spun up to start these other threads. That's because we can't start them + // until after the TServer has started, but once TServer.serve is called we aren't given back + // control. + Thread t = new Thread() { + @Override + public void run() { + // This is a massive hack. The compactor threads have to access packages in ql (such as + // AcidInputFormat). ql depends on metastore so we can't directly access those. To deal + // with this the compactor thread classes have been put in ql and they are instantiated here + // dyanmically. This is not ideal but it avoids a massive refactoring of Hive packages. + // + // Wrap the start of the threads in a catch Throwable loop so that any failures + // don't doom the rest of the metastore. + startLock.lock(); + try { + JvmPauseMonitor pauseMonitor = new JvmPauseMonitor(conf); + pauseMonitor.start(); + } catch (Throwable t) { + LOG.warn("Could not initiate the JvmPauseMonitor thread." + " GCs and Pauses may not be " + + "warned upon.", t); + } + + try { + // Per the javadocs on Condition, do not depend on the condition alone as a start gate + // since spurious wake ups are possible. + while (!startedServing.get()) startCondition.await(); + startCompactorInitiator(conf); + startCompactorWorkers(conf); + startCompactorCleaner(conf); + startHouseKeeperService(conf); + } catch (Throwable e) { + LOG.error("Failure when starting the compactor, compactions may not happen, " + + StringUtils.stringifyException(e)); + } finally { + startLock.unlock(); + } + } + }; + t.setDaemon(true); + t.setName("Metastore threads starter thread"); + t.start(); + } + + private static void startCompactorInitiator(HiveConf conf) throws Exception { + if (HiveConf.getBoolVar(conf, HiveConf.ConfVars.HIVE_COMPACTOR_INITIATOR_ON)) { + MetaStoreThread initiator = + instantiateThread("org.apache.hadoop.hive.ql.txn.compactor.Initiator"); + initializeAndStartThread(initiator, conf); + } + } + + private static void startCompactorWorkers(HiveConf conf) throws Exception { + int numWorkers = HiveConf.getIntVar(conf, HiveConf.ConfVars.HIVE_COMPACTOR_WORKER_THREADS); + for (int i = 0; i < numWorkers; i++) { + MetaStoreThread worker = + instantiateThread("org.apache.hadoop.hive.ql.txn.compactor.Worker"); + initializeAndStartThread(worker, conf); + } + } + + private static void startCompactorCleaner(HiveConf conf) throws Exception { + if (HiveConf.getBoolVar(conf, HiveConf.ConfVars.HIVE_COMPACTOR_INITIATOR_ON)) { + MetaStoreThread cleaner = + instantiateThread("org.apache.hadoop.hive.ql.txn.compactor.Cleaner"); + initializeAndStartThread(cleaner, conf); + } + } + + private static MetaStoreThread instantiateThread(String classname) throws Exception { + Class c = Class.forName(classname); + Object o = c.newInstance(); + if (MetaStoreThread.class.isAssignableFrom(o.getClass())) { + return (MetaStoreThread)o; + } else { + String s = classname + " is not an instance of MetaStoreThread."; + LOG.error(s); + throw new IOException(s); + } + } + + private static int nextThreadId = 1000000; + + private static void initializeAndStartThread(MetaStoreThread thread, HiveConf conf) throws + MetaException { + LOG.info("Starting metastore thread of type " + thread.getClass().getName()); + thread.setHiveConf(conf); + thread.setThreadId(nextThreadId++); + thread.init(new AtomicBoolean(), new AtomicBoolean()); + thread.start(); + } + private static void startHouseKeeperService(HiveConf conf) throws Exception { + if(!HiveConf.getBoolVar(conf, HiveConf.ConfVars.HIVE_COMPACTOR_INITIATOR_ON)) { + return; + } + startHouseKeeperService(conf, Class.forName("org.apache.hadoop.hive.ql.txn.AcidHouseKeeperService")); + startHouseKeeperService(conf, Class.forName("org.apache.hadoop.hive.ql.txn.AcidCompactionHistoryService")); + } + private static void startHouseKeeperService(HiveConf conf, Class c) throws Exception { + //todo: when metastore adds orderly-shutdown logic, houseKeeper.stop() + //should be called form it + HouseKeeperService houseKeeper = (HouseKeeperService)c.newInstance(); + try { + houseKeeper.start(conf); + } + catch (Exception ex) { + LOG.error("Failed to start {}" , houseKeeper.getClass() + + ". The system will not handle {} " , houseKeeper.getServiceDescription(), + ". Root Cause: ", ex); + } + } + + public static Map createHandlerMap() { + Map fmHandlers = new HashMap<>(); + for (FileMetadataExprType v : FileMetadataExprType.values()) { + switch (v) { + case ORC_SARG: + fmHandlers.put(v, new OrcFileMetadataHandler()); + break; + default: + throw new AssertionError("Unsupported type " + v); + } + } + return fmHandlers; + } +} diff --git a/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClient.java b/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClient.java index cdd12ab..8939b1f 100644 --- a/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClient.java +++ b/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClient.java @@ -100,6 +100,8 @@ import org.apache.hadoop.hive.metastore.api.PutFileMetadataRequest; import org.apache.hadoop.hive.metastore.api.RequestPartsSpec; import org.apache.hadoop.hive.metastore.api.Role; +import org.apache.hadoop.hive.metastore.api.SQLForeignKey; +import org.apache.hadoop.hive.metastore.api.SQLPrimaryKey; import org.apache.hadoop.hive.metastore.api.SetPartitionsStatsRequest; import org.apache.hadoop.hive.metastore.api.ShowCompactRequest; import org.apache.hadoop.hive.metastore.api.ShowCompactResponse; @@ -136,6 +138,7 @@ import org.slf4j.LoggerFactory; import javax.security.auth.login.LoginException; + import java.io.IOException; import java.lang.reflect.Constructor; import java.lang.reflect.InvocationHandler; @@ -1528,6 +1531,21 @@ public Index getIndex(String dbName, String tblName, String indexName) return filterHook.filterIndexes(client.get_indexes(dbName, tblName, max)); } + @Override + public List getPrimaryKeys(String dbName, String tblName) + throws MetaException, NoSuchObjectException, TException { + return client.get_primary_keys(dbName, tblName); + } + + @Override + public List getForeignKeys(String parentDbName, + String parentTblName, String foreignDbName, + String foreignTblName) throws MetaException, + NoSuchObjectException, TException { + return client.get_foreign_keys(parentDbName, parentTblName, + foreignDbName, foreignTblName); + } + /** {@inheritDoc} */ @Override public boolean updateTableColumnStatistics(ColumnStatistics statsObj) diff --git a/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClient.java.orig b/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClient.java.orig new file mode 100644 index 0000000..cdd12ab --- /dev/null +++ b/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClient.java.orig @@ -0,0 +1,2378 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hive.metastore; + +import org.apache.hadoop.hive.common.ObjectPair; +import org.apache.hadoop.hive.common.ValidTxnList; +import org.apache.hadoop.hive.common.classification.InterfaceAudience; +import org.apache.hadoop.hive.common.classification.InterfaceAudience.Public; +import org.apache.hadoop.hive.common.classification.InterfaceStability.Unstable; +import org.apache.hadoop.hive.conf.HiveConf; +import org.apache.hadoop.hive.conf.HiveConf.ConfVars; +import org.apache.hadoop.hive.conf.HiveConfUtil; +import org.apache.hadoop.hive.metastore.api.AbortTxnRequest; +import org.apache.hadoop.hive.metastore.api.AddDynamicPartitions; +import org.apache.hadoop.hive.metastore.api.AddPartitionsRequest; +import org.apache.hadoop.hive.metastore.api.AddPartitionsResult; +import org.apache.hadoop.hive.metastore.api.AggrStats; +import org.apache.hadoop.hive.metastore.api.AlreadyExistsException; +import org.apache.hadoop.hive.metastore.api.CacheFileMetadataRequest; +import org.apache.hadoop.hive.metastore.api.CacheFileMetadataResult; +import org.apache.hadoop.hive.metastore.api.CheckLockRequest; +import org.apache.hadoop.hive.metastore.api.ClearFileMetadataRequest; +import org.apache.hadoop.hive.metastore.api.ColumnStatistics; +import org.apache.hadoop.hive.metastore.api.ColumnStatisticsObj; +import org.apache.hadoop.hive.metastore.api.CommitTxnRequest; +import org.apache.hadoop.hive.metastore.api.CompactionRequest; +import org.apache.hadoop.hive.metastore.api.CompactionType; +import org.apache.hadoop.hive.metastore.api.ConfigValSecurityException; +import org.apache.hadoop.hive.metastore.api.CurrentNotificationEventId; +import org.apache.hadoop.hive.metastore.api.Database; +import org.apache.hadoop.hive.metastore.api.DropPartitionsExpr; +import org.apache.hadoop.hive.metastore.api.DropPartitionsRequest; +import org.apache.hadoop.hive.metastore.api.EnvironmentContext; +import org.apache.hadoop.hive.metastore.api.FieldSchema; +import org.apache.hadoop.hive.metastore.api.FireEventRequest; +import org.apache.hadoop.hive.metastore.api.FireEventResponse; +import org.apache.hadoop.hive.metastore.api.Function; +import org.apache.hadoop.hive.metastore.api.GetAllFunctionsResponse; +import org.apache.hadoop.hive.metastore.api.GetChangeVersionRequest; +import org.apache.hadoop.hive.metastore.api.GetFileMetadataByExprRequest; +import org.apache.hadoop.hive.metastore.api.GetFileMetadataByExprResult; +import org.apache.hadoop.hive.metastore.api.GetFileMetadataRequest; +import org.apache.hadoop.hive.metastore.api.GetFileMetadataResult; +import org.apache.hadoop.hive.metastore.api.GetOpenTxnsInfoResponse; +import org.apache.hadoop.hive.metastore.api.GetPrincipalsInRoleRequest; +import org.apache.hadoop.hive.metastore.api.GetPrincipalsInRoleResponse; +import org.apache.hadoop.hive.metastore.api.GetRoleGrantsForPrincipalRequest; +import org.apache.hadoop.hive.metastore.api.GetRoleGrantsForPrincipalResponse; +import org.apache.hadoop.hive.metastore.api.GrantRevokePrivilegeRequest; +import org.apache.hadoop.hive.metastore.api.GrantRevokePrivilegeResponse; +import org.apache.hadoop.hive.metastore.api.GrantRevokeRoleRequest; +import org.apache.hadoop.hive.metastore.api.GrantRevokeRoleResponse; +import org.apache.hadoop.hive.metastore.api.GrantRevokeType; +import org.apache.hadoop.hive.metastore.api.HeartbeatRequest; +import org.apache.hadoop.hive.metastore.api.HeartbeatTxnRangeRequest; +import org.apache.hadoop.hive.metastore.api.HeartbeatTxnRangeResponse; +import org.apache.hadoop.hive.metastore.api.HiveObjectPrivilege; +import org.apache.hadoop.hive.metastore.api.HiveObjectRef; +import org.apache.hadoop.hive.metastore.api.Index; +import org.apache.hadoop.hive.metastore.api.InvalidInputException; +import org.apache.hadoop.hive.metastore.api.InvalidObjectException; +import org.apache.hadoop.hive.metastore.api.InvalidOperationException; +import org.apache.hadoop.hive.metastore.api.InvalidPartitionException; +import org.apache.hadoop.hive.metastore.api.LockRequest; +import org.apache.hadoop.hive.metastore.api.LockResponse; +import org.apache.hadoop.hive.metastore.api.MetaException; +import org.apache.hadoop.hive.metastore.api.MetadataPpdResult; +import org.apache.hadoop.hive.metastore.api.NoSuchLockException; +import org.apache.hadoop.hive.metastore.api.NoSuchObjectException; +import org.apache.hadoop.hive.metastore.api.NoSuchTxnException; +import org.apache.hadoop.hive.metastore.api.NotificationEvent; +import org.apache.hadoop.hive.metastore.api.NotificationEventRequest; +import org.apache.hadoop.hive.metastore.api.NotificationEventResponse; +import org.apache.hadoop.hive.metastore.api.OpenTxnRequest; +import org.apache.hadoop.hive.metastore.api.OpenTxnsResponse; +import org.apache.hadoop.hive.metastore.api.Partition; +import org.apache.hadoop.hive.metastore.api.PartitionEventType; +import org.apache.hadoop.hive.metastore.api.PartitionsByExprRequest; +import org.apache.hadoop.hive.metastore.api.PartitionsByExprResult; +import org.apache.hadoop.hive.metastore.api.PartitionsStatsRequest; +import org.apache.hadoop.hive.metastore.api.PrincipalPrivilegeSet; +import org.apache.hadoop.hive.metastore.api.PrincipalType; +import org.apache.hadoop.hive.metastore.api.PrivilegeBag; +import org.apache.hadoop.hive.metastore.api.PutFileMetadataRequest; +import org.apache.hadoop.hive.metastore.api.RequestPartsSpec; +import org.apache.hadoop.hive.metastore.api.Role; +import org.apache.hadoop.hive.metastore.api.SetPartitionsStatsRequest; +import org.apache.hadoop.hive.metastore.api.ShowCompactRequest; +import org.apache.hadoop.hive.metastore.api.ShowCompactResponse; +import org.apache.hadoop.hive.metastore.api.ShowLocksRequest; +import org.apache.hadoop.hive.metastore.api.ShowLocksResponse; +import org.apache.hadoop.hive.metastore.api.Table; +import org.apache.hadoop.hive.metastore.api.TableMeta; +import org.apache.hadoop.hive.metastore.api.TableStatsRequest; +import org.apache.hadoop.hive.metastore.api.ThriftHiveMetastore; +import org.apache.hadoop.hive.metastore.api.TxnAbortedException; +import org.apache.hadoop.hive.metastore.api.TxnOpenException; +import org.apache.hadoop.hive.metastore.api.Type; +import org.apache.hadoop.hive.metastore.api.UnknownDBException; +import org.apache.hadoop.hive.metastore.api.UnknownPartitionException; +import org.apache.hadoop.hive.metastore.api.UnknownTableException; +import org.apache.hadoop.hive.metastore.api.UnlockRequest; +import org.apache.hadoop.hive.metastore.partition.spec.PartitionSpecProxy; +import org.apache.hadoop.hive.metastore.txn.TxnUtils; +import org.apache.hadoop.hive.shims.ShimLoader; +import org.apache.hadoop.hive.shims.Utils; +import org.apache.hadoop.hive.thrift.HadoopThriftAuthBridge; +import org.apache.hadoop.security.UserGroupInformation; +import org.apache.hadoop.util.StringUtils; +import org.apache.thrift.TApplicationException; +import org.apache.thrift.TException; +import org.apache.thrift.protocol.TBinaryProtocol; +import org.apache.thrift.protocol.TCompactProtocol; +import org.apache.thrift.protocol.TProtocol; +import org.apache.thrift.transport.TFramedTransport; +import org.apache.thrift.transport.TSocket; +import org.apache.thrift.transport.TTransport; +import org.apache.thrift.transport.TTransportException; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import javax.security.auth.login.LoginException; +import java.io.IOException; +import java.lang.reflect.Constructor; +import java.lang.reflect.InvocationHandler; +import java.lang.reflect.InvocationTargetException; +import java.lang.reflect.Method; +import java.lang.reflect.Proxy; +import java.net.InetAddress; +import java.net.URI; +import java.net.UnknownHostException; +import java.nio.ByteBuffer; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collection; +import java.util.HashMap; +import java.util.Iterator; +import java.util.LinkedHashMap; +import java.util.List; +import java.util.Map; +import java.util.Map.Entry; +import java.util.NoSuchElementException; +import java.util.Random; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicInteger; + +import static org.apache.hadoop.hive.metastore.MetaStoreUtils.DEFAULT_DATABASE_NAME; +import static org.apache.hadoop.hive.metastore.MetaStoreUtils.isIndexTable; + +/** + * Hive Metastore Client. + * The public implementation of IMetaStoreClient. Methods not inherited from IMetaStoreClient + * are not public and can change. Hence this is marked as unstable. + * For users who require retry mechanism when the connection between metastore and client is + * broken, RetryingMetaStoreClient class should be used. + */ +@Public +@Unstable +public class HiveMetaStoreClient implements IMetaStoreClient { + ThriftHiveMetastore.Iface client = null; + private TTransport transport = null; + private boolean isConnected = false; + private URI metastoreUris[]; + private final HiveMetaHookLoader hookLoader; + protected final HiveConf conf; + protected boolean fastpath = false; + private String tokenStrForm; + private final boolean localMetaStore; + private final MetaStoreFilterHook filterHook; + private final int fileMetadataBatchSize; + + private Map currentMetaVars; + + private static final AtomicInteger connCount = new AtomicInteger(0); + + // for thrift connects + private int retries = 5; + private long retryDelaySeconds = 0; + + static final protected Logger LOG = LoggerFactory.getLogger("hive.metastore"); + + public HiveMetaStoreClient(HiveConf conf) throws MetaException { + this(conf, null, true); + } + + public HiveMetaStoreClient(HiveConf conf, HiveMetaHookLoader hookLoader, Boolean allowEmbedded) + throws MetaException { + + this.hookLoader = hookLoader; + if (conf == null) { + conf = new HiveConf(HiveMetaStoreClient.class); + } + this.conf = conf; + filterHook = loadFilterHooks(); + fileMetadataBatchSize = HiveConf.getIntVar( + conf, HiveConf.ConfVars.METASTORE_BATCH_RETRIEVE_OBJECTS_MAX); + + String msUri = conf.getVar(ConfVars.METASTOREURIS); + localMetaStore = HiveConfUtil.isEmbeddedMetaStore(msUri); + if (localMetaStore) { + if (!allowEmbedded) { + throw new MetaException("Embedded metastore is not allowed here. Please configure " + + ConfVars.METASTOREURIS.varname + "; it is currently set to [" + msUri + "]"); + } + // instantiate the metastore server handler directly instead of connecting + // through the network + if (conf.getBoolVar(ConfVars.METASTORE_FASTPATH)) { + client = new HiveMetaStore.HMSHandler("hive client", conf, true); + fastpath = true; + } else { + client = HiveMetaStore.newRetryingHMSHandler("hive client", conf, true); + } + isConnected = true; + snapshotActiveConf(); + return; + } else { + if (conf.getBoolVar(ConfVars.METASTORE_FASTPATH)) { + throw new RuntimeException("You can't set hive.metastore.fastpath to true when you're " + + "talking to the thrift metastore service. You must run the metastore locally."); + } + } + + // get the number retries + retries = HiveConf.getIntVar(conf, HiveConf.ConfVars.METASTORETHRIFTCONNECTIONRETRIES); + retryDelaySeconds = conf.getTimeVar( + ConfVars.METASTORE_CLIENT_CONNECT_RETRY_DELAY, TimeUnit.SECONDS); + + // user wants file store based configuration + if (conf.getVar(HiveConf.ConfVars.METASTOREURIS) != null) { + String metastoreUrisString[] = conf.getVar( + HiveConf.ConfVars.METASTOREURIS).split(","); + metastoreUris = new URI[metastoreUrisString.length]; + try { + int i = 0; + for (String s : metastoreUrisString) { + URI tmpUri = new URI(s); + if (tmpUri.getScheme() == null) { + throw new IllegalArgumentException("URI: " + s + + " does not have a scheme"); + } + metastoreUris[i++] = tmpUri; + + } + } catch (IllegalArgumentException e) { + throw (e); + } catch (Exception e) { + MetaStoreUtils.logAndThrowMetaException(e); + } + } else { + LOG.error("NOT getting uris from conf"); + throw new MetaException("MetaStoreURIs not found in conf file"); + } + // finally open the store + open(); + } + + private MetaStoreFilterHook loadFilterHooks() throws IllegalStateException { + Class authProviderClass = conf. + getClass(HiveConf.ConfVars.METASTORE_FILTER_HOOK.varname, + DefaultMetaStoreFilterHookImpl.class, + MetaStoreFilterHook.class); + String msg = "Unable to create instance of " + authProviderClass.getName() + ": "; + try { + Constructor constructor = + authProviderClass.getConstructor(HiveConf.class); + return constructor.newInstance(conf); + } catch (NoSuchMethodException e) { + throw new IllegalStateException(msg + e.getMessage(), e); + } catch (SecurityException e) { + throw new IllegalStateException(msg + e.getMessage(), e); + } catch (InstantiationException e) { + throw new IllegalStateException(msg + e.getMessage(), e); + } catch (IllegalAccessException e) { + throw new IllegalStateException(msg + e.getMessage(), e); + } catch (IllegalArgumentException e) { + throw new IllegalStateException(msg + e.getMessage(), e); + } catch (InvocationTargetException e) { + throw new IllegalStateException(msg + e.getMessage(), e); + } + } + + /** + * Swaps the first element of the metastoreUris array with a random element from the + * remainder of the array. + */ + private void promoteRandomMetaStoreURI() { + if (metastoreUris.length <= 1) { + return; + } + Random rng = new Random(); + int index = rng.nextInt(metastoreUris.length - 1) + 1; + URI tmp = metastoreUris[0]; + metastoreUris[0] = metastoreUris[index]; + metastoreUris[index] = tmp; + } + + @Override + public boolean isLocalMetaStore() { + return localMetaStore; + } + + @Override + public boolean isCompatibleWith(HiveConf conf) { + // Make a copy of currentMetaVars, there is a race condition that + // currentMetaVars might be changed during the execution of the method + Map currentMetaVarsCopy = currentMetaVars; + if (currentMetaVarsCopy == null) { + return false; // recreate + } + boolean compatible = true; + for (ConfVars oneVar : HiveConf.metaVars) { + // Since metaVars are all of different types, use string for comparison + String oldVar = currentMetaVarsCopy.get(oneVar.varname); + String newVar = conf.get(oneVar.varname, ""); + if (oldVar == null || + (oneVar.isCaseSensitive() ? !oldVar.equals(newVar) : !oldVar.equalsIgnoreCase(newVar))) { + LOG.info("Mestastore configuration " + oneVar.varname + + " changed from " + oldVar + " to " + newVar); + compatible = false; + } + } + return compatible; + } + + @Override + public void setHiveAddedJars(String addedJars) { + HiveConf.setVar(conf, ConfVars.HIVEADDEDJARS, addedJars); + } + + @Override + public void reconnect() throws MetaException { + if (localMetaStore) { + // For direct DB connections we don't yet support reestablishing connections. + throw new MetaException("For direct MetaStore DB connections, we don't support retries" + + " at the client level."); + } else { + close(); + // Swap the first element of the metastoreUris[] with a random element from the rest + // of the array. Rationale being that this method will generally be called when the default + // connection has died and the default connection is likely to be the first array element. + promoteRandomMetaStoreURI(); + open(); + } + } + + /** + * @param dbname + * @param tbl_name + * @param new_tbl + * @throws InvalidOperationException + * @throws MetaException + * @throws TException + * @see + * org.apache.hadoop.hive.metastore.api.ThriftHiveMetastore.Iface#alter_table( + * java.lang.String, java.lang.String, + * org.apache.hadoop.hive.metastore.api.Table) + */ + @Override + public void alter_table(String dbname, String tbl_name, Table new_tbl) + throws InvalidOperationException, MetaException, TException { + alter_table_with_environmentContext(dbname, tbl_name, new_tbl, null); + } + + public void alter_table_with_environmentContext(String dbname, String tbl_name, Table new_tbl, + EnvironmentContext envContext) throws InvalidOperationException, MetaException, TException { + client.alter_table_with_environment_context(dbname, tbl_name, new_tbl, envContext); + } + + /** + * @param dbname + * @param name + * @param part_vals + * @param newPart + * @throws InvalidOperationException + * @throws MetaException + * @throws TException + * @see org.apache.hadoop.hive.metastore.api.ThriftHiveMetastore.Iface#rename_partition( + * java.lang.String, java.lang.String, java.util.List, org.apache.hadoop.hive.metastore.api.Partition) + */ + @Override + public void renamePartition(final String dbname, final String name, final List part_vals, final Partition newPart) + throws InvalidOperationException, MetaException, TException { + client.rename_partition(dbname, name, part_vals, newPart); + } + + private void open() throws MetaException { + isConnected = false; + TTransportException tte = null; + boolean useSasl = conf.getBoolVar(ConfVars.METASTORE_USE_THRIFT_SASL); + boolean useFramedTransport = conf.getBoolVar(ConfVars.METASTORE_USE_THRIFT_FRAMED_TRANSPORT); + boolean useCompactProtocol = conf.getBoolVar(ConfVars.METASTORE_USE_THRIFT_COMPACT_PROTOCOL); + int clientSocketTimeout = (int) conf.getTimeVar( + ConfVars.METASTORE_CLIENT_SOCKET_TIMEOUT, TimeUnit.MILLISECONDS); + + for (int attempt = 0; !isConnected && attempt < retries; ++attempt) { + for (URI store : metastoreUris) { + LOG.info("Trying to connect to metastore with URI " + store); + try { + transport = new TSocket(store.getHost(), store.getPort(), clientSocketTimeout); + if (useSasl) { + // Wrap thrift connection with SASL for secure connection. + try { + HadoopThriftAuthBridge.Client authBridge = + ShimLoader.getHadoopThriftAuthBridge().createClient(); + + // check if we should use delegation tokens to authenticate + // the call below gets hold of the tokens if they are set up by hadoop + // this should happen on the map/reduce tasks if the client added the + // tokens into hadoop's credential store in the front end during job + // submission. + String tokenSig = conf.get("hive.metastore.token.signature"); + // tokenSig could be null + tokenStrForm = Utils.getTokenStrForm(tokenSig); + if(tokenStrForm != null) { + // authenticate using delegation tokens via the "DIGEST" mechanism + transport = authBridge.createClientTransport(null, store.getHost(), + "DIGEST", tokenStrForm, transport, + MetaStoreUtils.getMetaStoreSaslProperties(conf)); + } else { + String principalConfig = + conf.getVar(HiveConf.ConfVars.METASTORE_KERBEROS_PRINCIPAL); + transport = authBridge.createClientTransport( + principalConfig, store.getHost(), "KERBEROS", null, + transport, MetaStoreUtils.getMetaStoreSaslProperties(conf)); + } + } catch (IOException ioe) { + LOG.error("Couldn't create client transport", ioe); + throw new MetaException(ioe.toString()); + } + } else if (useFramedTransport) { + transport = new TFramedTransport(transport); + } + final TProtocol protocol; + if (useCompactProtocol) { + protocol = new TCompactProtocol(transport); + } else { + protocol = new TBinaryProtocol(transport); + } + client = new ThriftHiveMetastore.Client(protocol); + try { + transport.open(); + LOG.info("Opened a connection to metastore, current connections: " + connCount.incrementAndGet()); + isConnected = true; + } catch (TTransportException e) { + tte = e; + if (LOG.isDebugEnabled()) { + LOG.warn("Failed to connect to the MetaStore Server...", e); + } else { + // Don't print full exception trace if DEBUG is not on. + LOG.warn("Failed to connect to the MetaStore Server..."); + } + } + + if (isConnected && !useSasl && conf.getBoolVar(ConfVars.METASTORE_EXECUTE_SET_UGI)){ + // Call set_ugi, only in unsecure mode. + try { + UserGroupInformation ugi = Utils.getUGI(); + client.set_ugi(ugi.getUserName(), Arrays.asList(ugi.getGroupNames())); + } catch (LoginException e) { + LOG.warn("Failed to do login. set_ugi() is not successful, " + + "Continuing without it.", e); + } catch (IOException e) { + LOG.warn("Failed to find ugi of client set_ugi() is not successful, " + + "Continuing without it.", e); + } catch (TException e) { + LOG.warn("set_ugi() not successful, Likely cause: new client talking to old server. " + + "Continuing without it.", e); + } + } + } catch (MetaException e) { + LOG.error("Unable to connect to metastore with URI " + store + + " in attempt " + attempt, e); + } + if (isConnected) { + break; + } + } + // Wait before launching the next round of connection retries. + if (!isConnected && retryDelaySeconds > 0) { + try { + LOG.info("Waiting " + retryDelaySeconds + " seconds before next connection attempt."); + Thread.sleep(retryDelaySeconds * 1000); + } catch (InterruptedException ignore) {} + } + } + + if (!isConnected) { + throw new MetaException("Could not connect to meta store using any of the URIs provided." + + " Most recent failure: " + StringUtils.stringifyException(tte)); + } + + snapshotActiveConf(); + + LOG.info("Connected to metastore."); + } + + private void snapshotActiveConf() { + currentMetaVars = new HashMap(HiveConf.metaVars.length); + for (ConfVars oneVar : HiveConf.metaVars) { + currentMetaVars.put(oneVar.varname, conf.get(oneVar.varname, "")); + } + } + + public String getTokenStrForm() throws IOException { + return tokenStrForm; + } + + @Override + public void close() { + isConnected = false; + currentMetaVars = null; + try { + if (null != client) { + client.shutdown(); + } + } catch (TException e) { + LOG.debug("Unable to shutdown metastore client. Will try closing transport directly.", e); + } + // Transport would have got closed via client.shutdown(), so we dont need this, but + // just in case, we make this call. + if ((transport != null) && transport.isOpen()) { + transport.close(); + LOG.info("Closed a connection to metastore, current connections: " + connCount.decrementAndGet()); + } + } + + @Override + public void setMetaConf(String key, String value) throws TException { + client.setMetaConf(key, value); + } + + @Override + public String getMetaConf(String key) throws TException { + return client.getMetaConf(key); + } + + /** + * @param new_part + * @return the added partition + * @throws InvalidObjectException + * @throws AlreadyExistsException + * @throws MetaException + * @throws TException + * @see org.apache.hadoop.hive.metastore.api.ThriftHiveMetastore.Iface#add_partition(org.apache.hadoop.hive.metastore.api.Partition) + */ + @Override + public Partition add_partition(Partition new_part) + throws InvalidObjectException, AlreadyExistsException, MetaException, + TException { + return add_partition(new_part, null); + } + + public Partition add_partition(Partition new_part, EnvironmentContext envContext) + throws InvalidObjectException, AlreadyExistsException, MetaException, + TException { + Partition p = client.add_partition_with_environment_context(new_part, envContext); + return fastpath ? p : deepCopy(p); + } + + /** + * @param new_parts + * @throws InvalidObjectException + * @throws AlreadyExistsException + * @throws MetaException + * @throws TException + * @see org.apache.hadoop.hive.metastore.api.ThriftHiveMetastore.Iface#add_partitions(List) + */ + @Override + public int add_partitions(List new_parts) + throws InvalidObjectException, AlreadyExistsException, MetaException, + TException { + return client.add_partitions(new_parts); + } + + @Override + public List add_partitions( + List parts, boolean ifNotExists, boolean needResults) + throws InvalidObjectException, AlreadyExistsException, MetaException, TException { + if (parts.isEmpty()) { + return needResults ? new ArrayList() : null; + } + Partition part = parts.get(0); + AddPartitionsRequest req = new AddPartitionsRequest( + part.getDbName(), part.getTableName(), parts, ifNotExists); + req.setNeedResult(needResults); + AddPartitionsResult result = client.add_partitions_req(req); + return needResults ? filterHook.filterPartitions(result.getPartitions()) : null; + } + + @Override + public int add_partitions_pspec(PartitionSpecProxy partitionSpec) throws TException { + return client.add_partitions_pspec(partitionSpec.toPartitionSpec()); + } + + /** + * @param table_name + * @param db_name + * @param part_vals + * @return the appended partition + * @throws InvalidObjectException + * @throws AlreadyExistsException + * @throws MetaException + * @throws TException + * @see org.apache.hadoop.hive.metastore.api.ThriftHiveMetastore.Iface#append_partition(java.lang.String, + * java.lang.String, java.util.List) + */ + @Override + public Partition appendPartition(String db_name, String table_name, + List part_vals) throws InvalidObjectException, + AlreadyExistsException, MetaException, TException { + return appendPartition(db_name, table_name, part_vals, null); + } + + public Partition appendPartition(String db_name, String table_name, List part_vals, + EnvironmentContext envContext) throws InvalidObjectException, AlreadyExistsException, + MetaException, TException { + Partition p = client.append_partition_with_environment_context(db_name, table_name, + part_vals, envContext); + return fastpath ? p : deepCopy(p); + } + + @Override + public Partition appendPartition(String dbName, String tableName, String partName) + throws InvalidObjectException, AlreadyExistsException, MetaException, TException { + return appendPartition(dbName, tableName, partName, null); + } + + public Partition appendPartition(String dbName, String tableName, String partName, + EnvironmentContext envContext) throws InvalidObjectException, AlreadyExistsException, + MetaException, TException { + Partition p = client.append_partition_by_name_with_environment_context(dbName, tableName, + partName, envContext); + return fastpath ? p : deepCopy(p); + } + + /** + * Exchange the partition between two tables + * @param partitionSpecs partitions specs of the parent partition to be exchanged + * @param destDb the db of the destination table + * @param destinationTableName the destination table name + @ @return new partition after exchanging + */ + @Override + public Partition exchange_partition(Map partitionSpecs, + String sourceDb, String sourceTable, String destDb, + String destinationTableName) throws MetaException, + NoSuchObjectException, InvalidObjectException, TException { + return client.exchange_partition(partitionSpecs, sourceDb, sourceTable, + destDb, destinationTableName); + } + + /** + * Exchange the partitions between two tables + * @param partitionSpecs partitions specs of the parent partition to be exchanged + * @param destDb the db of the destination table + * @param destinationTableName the destination table name + @ @return new partitions after exchanging + */ + @Override + public List exchange_partitions(Map partitionSpecs, + String sourceDb, String sourceTable, String destDb, + String destinationTableName) throws MetaException, + NoSuchObjectException, InvalidObjectException, TException { + return client.exchange_partitions(partitionSpecs, sourceDb, sourceTable, + destDb, destinationTableName); + } + + @Override + public void validatePartitionNameCharacters(List partVals) + throws TException, MetaException { + client.partition_name_has_valid_characters(partVals, true); + } + + /** + * Create a new Database + * @param db + * @throws AlreadyExistsException + * @throws InvalidObjectException + * @throws MetaException + * @throws TException + * @see org.apache.hadoop.hive.metastore.api.ThriftHiveMetastore.Iface#create_database(Database) + */ + @Override + public void createDatabase(Database db) + throws AlreadyExistsException, InvalidObjectException, MetaException, TException { + client.create_database(db); + } + + /** + * @param tbl + * @throws MetaException + * @throws NoSuchObjectException + * @throws TException + * @see org.apache.hadoop.hive.metastore.api.ThriftHiveMetastore.Iface#create_table(org.apache.hadoop.hive.metastore.api.Table) + */ + @Override + public void createTable(Table tbl) throws AlreadyExistsException, + InvalidObjectException, MetaException, NoSuchObjectException, TException { + createTable(tbl, null); + } + + public void createTable(Table tbl, EnvironmentContext envContext) throws AlreadyExistsException, + InvalidObjectException, MetaException, NoSuchObjectException, TException { + HiveMetaHook hook = getHook(tbl); + if (hook != null) { + hook.preCreateTable(tbl); + } + boolean success = false; + try { + // Subclasses can override this step (for example, for temporary tables) + create_table_with_environment_context(tbl, envContext); + if (hook != null) { + hook.commitCreateTable(tbl); + } + success = true; + } finally { + if (!success && (hook != null)) { + hook.rollbackCreateTable(tbl); + } + } + } + + /** + * @param type + * @return true or false + * @throws AlreadyExistsException + * @throws InvalidObjectException + * @throws MetaException + * @throws TException + * @see org.apache.hadoop.hive.metastore.api.ThriftHiveMetastore.Iface#create_type(org.apache.hadoop.hive.metastore.api.Type) + */ + public boolean createType(Type type) throws AlreadyExistsException, + InvalidObjectException, MetaException, TException { + return client.create_type(type); + } + + /** + * @param name + * @throws NoSuchObjectException + * @throws InvalidOperationException + * @throws MetaException + * @throws TException + * @see org.apache.hadoop.hive.metastore.api.ThriftHiveMetastore.Iface#drop_database(java.lang.String, boolean, boolean) + */ + @Override + public void dropDatabase(String name) + throws NoSuchObjectException, InvalidOperationException, MetaException, TException { + dropDatabase(name, true, false, false); + } + + @Override + public void dropDatabase(String name, boolean deleteData, boolean ignoreUnknownDb) + throws NoSuchObjectException, InvalidOperationException, MetaException, TException { + dropDatabase(name, deleteData, ignoreUnknownDb, false); + } + + @Override + public void dropDatabase(String name, boolean deleteData, boolean ignoreUnknownDb, boolean cascade) + throws NoSuchObjectException, InvalidOperationException, MetaException, TException { + try { + getDatabase(name); + } catch (NoSuchObjectException e) { + if (!ignoreUnknownDb) { + throw e; + } + return; + } + + if (cascade) { + List tableList = getAllTables(name); + for (String table : tableList) { + try { + // Subclasses can override this step (for example, for temporary tables) + dropTable(name, table, deleteData, true); + } catch (UnsupportedOperationException e) { + // Ignore Index tables, those will be dropped with parent tables + } + } + } + client.drop_database(name, deleteData, cascade); + } + + /** + * @param tbl_name + * @param db_name + * @param part_vals + * @return true or false + * @throws NoSuchObjectException + * @throws MetaException + * @throws TException + * @see org.apache.hadoop.hive.metastore.api.ThriftHiveMetastore.Iface#drop_partition(java.lang.String, + * java.lang.String, java.util.List, boolean) + */ + public boolean dropPartition(String db_name, String tbl_name, + List part_vals) throws NoSuchObjectException, MetaException, + TException { + return dropPartition(db_name, tbl_name, part_vals, true, null); + } + + public boolean dropPartition(String db_name, String tbl_name, List part_vals, + EnvironmentContext env_context) throws NoSuchObjectException, MetaException, TException { + return dropPartition(db_name, tbl_name, part_vals, true, env_context); + } + + @Override + public boolean dropPartition(String dbName, String tableName, String partName, boolean deleteData) + throws NoSuchObjectException, MetaException, TException { + return dropPartition(dbName, tableName, partName, deleteData, null); + } + + private static EnvironmentContext getEnvironmentContextWithIfPurgeSet() { + Map warehouseOptions = new HashMap(); + warehouseOptions.put("ifPurge", "TRUE"); + return new EnvironmentContext(warehouseOptions); + } + + /* + public boolean dropPartition(String dbName, String tableName, String partName, boolean deleteData, boolean ifPurge) + throws NoSuchObjectException, MetaException, TException { + + return dropPartition(dbName, tableName, partName, deleteData, + ifPurge? getEnvironmentContextWithIfPurgeSet() : null); + } + */ + + public boolean dropPartition(String dbName, String tableName, String partName, boolean deleteData, + EnvironmentContext envContext) throws NoSuchObjectException, MetaException, TException { + return client.drop_partition_by_name_with_environment_context(dbName, tableName, partName, + deleteData, envContext); + } + + /** + * @param db_name + * @param tbl_name + * @param part_vals + * @param deleteData + * delete the underlying data or just delete the table in metadata + * @return true or false + * @throws NoSuchObjectException + * @throws MetaException + * @throws TException + * @see org.apache.hadoop.hive.metastore.api.ThriftHiveMetastore.Iface#drop_partition(java.lang.String, + * java.lang.String, java.util.List, boolean) + */ + @Override + public boolean dropPartition(String db_name, String tbl_name, + List part_vals, boolean deleteData) throws NoSuchObjectException, + MetaException, TException { + return dropPartition(db_name, tbl_name, part_vals, deleteData, null); + } + + @Override + public boolean dropPartition(String db_name, String tbl_name, + List part_vals, PartitionDropOptions options) throws TException { + return dropPartition(db_name, tbl_name, part_vals, options.deleteData, + options.purgeData? getEnvironmentContextWithIfPurgeSet() : null); + } + + public boolean dropPartition(String db_name, String tbl_name, List part_vals, + boolean deleteData, EnvironmentContext envContext) throws NoSuchObjectException, + MetaException, TException { + return client.drop_partition_with_environment_context(db_name, tbl_name, part_vals, deleteData, + envContext); + } + + @Override + public List dropPartitions(String dbName, String tblName, + List> partExprs, PartitionDropOptions options) + throws TException { + RequestPartsSpec rps = new RequestPartsSpec(); + List exprs = new ArrayList(partExprs.size()); + for (ObjectPair partExpr : partExprs) { + DropPartitionsExpr dpe = new DropPartitionsExpr(); + dpe.setExpr(partExpr.getSecond()); + dpe.setPartArchiveLevel(partExpr.getFirst()); + exprs.add(dpe); + } + rps.setExprs(exprs); + DropPartitionsRequest req = new DropPartitionsRequest(dbName, tblName, rps); + req.setDeleteData(options.deleteData); + req.setNeedResult(options.returnResults); + req.setIfExists(options.ifExists); + if (options.purgeData) { + LOG.info("Dropped partitions will be purged!"); + req.setEnvironmentContext(getEnvironmentContextWithIfPurgeSet()); + } + return client.drop_partitions_req(req).getPartitions(); + } + + @Override + public List dropPartitions(String dbName, String tblName, + List> partExprs, boolean deleteData, + boolean ifExists, boolean needResult) throws NoSuchObjectException, MetaException, TException { + + return dropPartitions(dbName, tblName, partExprs, + PartitionDropOptions.instance() + .deleteData(deleteData) + .ifExists(ifExists) + .returnResults(needResult)); + + } + + @Override + public List dropPartitions(String dbName, String tblName, + List> partExprs, boolean deleteData, + boolean ifExists) throws NoSuchObjectException, MetaException, TException { + // By default, we need the results from dropPartitions(); + return dropPartitions(dbName, tblName, partExprs, + PartitionDropOptions.instance() + .deleteData(deleteData) + .ifExists(ifExists)); + } + + /** + * {@inheritDoc} + * @see #dropTable(String, String, boolean, boolean, EnvironmentContext) + */ + @Override + public void dropTable(String dbname, String name, boolean deleteData, + boolean ignoreUnknownTab) throws MetaException, TException, + NoSuchObjectException, UnsupportedOperationException { + dropTable(dbname, name, deleteData, ignoreUnknownTab, null); + } + + /** + * Drop the table and choose whether to save the data in the trash. + * @param ifPurge completely purge the table (skipping trash) while removing + * data from warehouse + * @see #dropTable(String, String, boolean, boolean, EnvironmentContext) + */ + @Override + public void dropTable(String dbname, String name, boolean deleteData, + boolean ignoreUnknownTab, boolean ifPurge) + throws MetaException, TException, NoSuchObjectException, UnsupportedOperationException { + //build new environmentContext with ifPurge; + EnvironmentContext envContext = null; + if(ifPurge){ + Map warehouseOptions = null; + warehouseOptions = new HashMap(); + warehouseOptions.put("ifPurge", "TRUE"); + envContext = new EnvironmentContext(warehouseOptions); + } + dropTable(dbname, name, deleteData, ignoreUnknownTab, envContext); + } + + /** {@inheritDoc} */ + @Override + @Deprecated + public void dropTable(String tableName, boolean deleteData) + throws MetaException, UnknownTableException, TException, NoSuchObjectException { + dropTable(DEFAULT_DATABASE_NAME, tableName, deleteData, false, null); + } + + /** + * @see #dropTable(String, String, boolean, boolean, EnvironmentContext) + */ + @Override + public void dropTable(String dbname, String name) + throws NoSuchObjectException, MetaException, TException { + dropTable(dbname, name, true, true, null); + } + + /** + * Drop the table and choose whether to: delete the underlying table data; + * throw if the table doesn't exist; save the data in the trash. + * + * @param dbname + * @param name + * @param deleteData + * delete the underlying data or just delete the table in metadata + * @param ignoreUnknownTab + * don't throw if the requested table doesn't exist + * @param envContext + * for communicating with thrift + * @throws MetaException + * could not drop table properly + * @throws NoSuchObjectException + * the table wasn't found + * @throws TException + * a thrift communication error occurred + * @throws UnsupportedOperationException + * dropping an index table is not allowed + * @see org.apache.hadoop.hive.metastore.api.ThriftHiveMetastore.Iface#drop_table(java.lang.String, + * java.lang.String, boolean) + */ + public void dropTable(String dbname, String name, boolean deleteData, + boolean ignoreUnknownTab, EnvironmentContext envContext) throws MetaException, TException, + NoSuchObjectException, UnsupportedOperationException { + Table tbl; + try { + tbl = getTable(dbname, name); + } catch (NoSuchObjectException e) { + if (!ignoreUnknownTab) { + throw e; + } + return; + } + if (isIndexTable(tbl)) { + throw new UnsupportedOperationException("Cannot drop index tables"); + } + HiveMetaHook hook = getHook(tbl); + if (hook != null) { + hook.preDropTable(tbl); + } + boolean success = false; + try { + drop_table_with_environment_context(dbname, name, deleteData, envContext); + if (hook != null) { + hook.commitDropTable(tbl, deleteData); + } + success=true; + } catch (NoSuchObjectException e) { + if (!ignoreUnknownTab) { + throw e; + } + } finally { + if (!success && (hook != null)) { + hook.rollbackDropTable(tbl); + } + } + } + + /** + * @param type + * @return true if the type is dropped + * @throws MetaException + * @throws TException + * @see org.apache.hadoop.hive.metastore.api.ThriftHiveMetastore.Iface#drop_type(java.lang.String) + */ + public boolean dropType(String type) throws NoSuchObjectException, MetaException, TException { + return client.drop_type(type); + } + + /** + * @param name + * @return map of types + * @throws MetaException + * @throws TException + * @see org.apache.hadoop.hive.metastore.api.ThriftHiveMetastore.Iface#get_type_all(java.lang.String) + */ + public Map getTypeAll(String name) throws MetaException, + TException { + Map result = null; + Map fromClient = client.get_type_all(name); + if (fromClient != null) { + result = new LinkedHashMap(); + for (String key : fromClient.keySet()) { + result.put(key, deepCopy(fromClient.get(key))); + } + } + return result; + } + + /** {@inheritDoc} */ + @Override + public List getDatabases(String databasePattern) + throws MetaException { + try { + return filterHook.filterDatabases(client.get_databases(databasePattern)); + } catch (Exception e) { + MetaStoreUtils.logAndThrowMetaException(e); + } + return null; + } + + /** {@inheritDoc} */ + @Override + public List getAllDatabases() throws MetaException { + try { + return filterHook.filterDatabases(client.get_all_databases()); + } catch (Exception e) { + MetaStoreUtils.logAndThrowMetaException(e); + } + return null; + } + + /** + * @param tbl_name + * @param db_name + * @param max_parts + * @return list of partitions + * @throws NoSuchObjectException + * @throws MetaException + * @throws TException + */ + @Override + public List listPartitions(String db_name, String tbl_name, + short max_parts) throws NoSuchObjectException, MetaException, TException { + List parts = client.get_partitions(db_name, tbl_name, max_parts); + return fastpath ? parts : deepCopyPartitions(filterHook.filterPartitions(parts)); + } + + @Override + public PartitionSpecProxy listPartitionSpecs(String dbName, String tableName, int maxParts) throws TException { + return PartitionSpecProxy.Factory.get(filterHook.filterPartitionSpecs( + client.get_partitions_pspec(dbName, tableName, maxParts))); + } + + @Override + public List listPartitions(String db_name, String tbl_name, + List part_vals, short max_parts) + throws NoSuchObjectException, MetaException, TException { + List parts = client.get_partitions_ps(db_name, tbl_name, part_vals, max_parts); + return fastpath ? parts : deepCopyPartitions(filterHook.filterPartitions(parts)); + } + + @Override + public List listPartitionsWithAuthInfo(String db_name, + String tbl_name, short max_parts, String user_name, List group_names) + throws NoSuchObjectException, MetaException, TException { + List parts = client.get_partitions_with_auth(db_name, tbl_name, max_parts, + user_name, group_names); + return fastpath ? parts :deepCopyPartitions(filterHook.filterPartitions(parts)); + } + + @Override + public List listPartitionsWithAuthInfo(String db_name, + String tbl_name, List part_vals, short max_parts, + String user_name, List group_names) throws NoSuchObjectException, + MetaException, TException { + List parts = client.get_partitions_ps_with_auth(db_name, + tbl_name, part_vals, max_parts, user_name, group_names); + return fastpath ? parts : deepCopyPartitions(filterHook.filterPartitions(parts)); + } + + /** + * Get list of partitions matching specified filter + * @param db_name the database name + * @param tbl_name the table name + * @param filter the filter string, + * for example "part1 = \"p1_abc\" and part2 <= "\p2_test\"". Filtering can + * be done only on string partition keys. + * @param max_parts the maximum number of partitions to return, + * all partitions are returned if -1 is passed + * @return list of partitions + * @throws MetaException + * @throws NoSuchObjectException + * @throws TException + */ + @Override + public List listPartitionsByFilter(String db_name, String tbl_name, + String filter, short max_parts) throws MetaException, + NoSuchObjectException, TException { + List parts = client.get_partitions_by_filter(db_name, tbl_name, filter, max_parts); + return fastpath ? parts :deepCopyPartitions(filterHook.filterPartitions(parts)); + } + + @Override + public PartitionSpecProxy listPartitionSpecsByFilter(String db_name, String tbl_name, + String filter, int max_parts) throws MetaException, + NoSuchObjectException, TException { + return PartitionSpecProxy.Factory.get(filterHook.filterPartitionSpecs( + client.get_part_specs_by_filter(db_name, tbl_name, filter, max_parts))); + } + + @Override + public boolean listPartitionsByExpr(String db_name, String tbl_name, byte[] expr, + String default_partition_name, short max_parts, List result) + throws TException { + assert result != null; + PartitionsByExprRequest req = new PartitionsByExprRequest( + db_name, tbl_name, ByteBuffer.wrap(expr)); + if (default_partition_name != null) { + req.setDefaultPartitionName(default_partition_name); + } + if (max_parts >= 0) { + req.setMaxParts(max_parts); + } + PartitionsByExprResult r = null; + try { + r = client.get_partitions_by_expr(req); + } catch (TApplicationException te) { + // TODO: backward compat for Hive <= 0.12. Can be removed later. + if (te.getType() != TApplicationException.UNKNOWN_METHOD + && te.getType() != TApplicationException.WRONG_METHOD_NAME) { + throw te; + } + throw new IncompatibleMetastoreException( + "Metastore doesn't support listPartitionsByExpr: " + te.getMessage()); + } + if (fastpath) { + result.addAll(r.getPartitions()); + } else { + r.setPartitions(filterHook.filterPartitions(r.getPartitions())); + // TODO: in these methods, do we really need to deepcopy? + deepCopyPartitions(r.getPartitions(), result); + } + return !r.isSetHasUnknownPartitions() || r.isHasUnknownPartitions(); // Assume the worst. + } + + /** + * @param name + * @return the database + * @throws NoSuchObjectException + * @throws MetaException + * @throws TException + * @see org.apache.hadoop.hive.metastore.api.ThriftHiveMetastore.Iface#get_database(java.lang.String) + */ + @Override + public Database getDatabase(String name) throws NoSuchObjectException, + MetaException, TException { + Database d = client.get_database(name); + return fastpath ? d :deepCopy(filterHook.filterDatabase(d)); + } + + /** + * @param tbl_name + * @param db_name + * @param part_vals + * @return the partition + * @throws MetaException + * @throws TException + * @see org.apache.hadoop.hive.metastore.api.ThriftHiveMetastore.Iface#get_partition(java.lang.String, + * java.lang.String, java.util.List) + */ + @Override + public Partition getPartition(String db_name, String tbl_name, + List part_vals) throws NoSuchObjectException, MetaException, TException { + Partition p = client.get_partition(db_name, tbl_name, part_vals); + return fastpath ? p : deepCopy(filterHook.filterPartition(p)); + } + + @Override + public List getPartitionsByNames(String db_name, String tbl_name, + List part_names) throws NoSuchObjectException, MetaException, TException { + List parts = client.get_partitions_by_names(db_name, tbl_name, part_names); + return fastpath ? parts : deepCopyPartitions(filterHook.filterPartitions(parts)); + } + + @Override + public Partition getPartitionWithAuthInfo(String db_name, String tbl_name, + List part_vals, String user_name, List group_names) + throws MetaException, UnknownTableException, NoSuchObjectException, + TException { + Partition p = client.get_partition_with_auth(db_name, tbl_name, part_vals, user_name, + group_names); + return fastpath ? p : deepCopy(filterHook.filterPartition(p)); + } + + /** + * @param name + * @param dbname + * @return the table + * @throws NoSuchObjectException + * @throws MetaException + * @throws TException + * @throws NoSuchObjectException + * @see org.apache.hadoop.hive.metastore.api.ThriftHiveMetastore.Iface#get_table(java.lang.String, + * java.lang.String) + */ + @Override + public Table getTable(String dbname, String name) throws MetaException, + TException, NoSuchObjectException { + Table t = client.get_table(dbname, name); + return fastpath ? t : deepCopy(filterHook.filterTable(t)); + } + + /** {@inheritDoc} */ + @Override + @Deprecated + public Table getTable(String tableName) throws MetaException, TException, + NoSuchObjectException { + Table t = getTable(DEFAULT_DATABASE_NAME, tableName); + return fastpath ? t : filterHook.filterTable(t); + } + + /** {@inheritDoc} */ + @Override + public List
getTableObjectsByName(String dbName, List tableNames) + throws MetaException, InvalidOperationException, UnknownDBException, TException { + List
tabs = client.get_table_objects_by_name(dbName, tableNames); + return fastpath ? tabs : deepCopyTables(filterHook.filterTables(tabs)); + } + + /** {@inheritDoc} */ + @Override + public List listTableNamesByFilter(String dbName, String filter, short maxTables) + throws MetaException, TException, InvalidOperationException, UnknownDBException { + return filterHook.filterTableNames(dbName, + client.get_table_names_by_filter(dbName, filter, maxTables)); + } + + /** + * @param name + * @return the type + * @throws MetaException + * @throws TException + * @throws NoSuchObjectException + * @see org.apache.hadoop.hive.metastore.api.ThriftHiveMetastore.Iface#get_type(java.lang.String) + */ + public Type getType(String name) throws NoSuchObjectException, MetaException, TException { + return deepCopy(client.get_type(name)); + } + + /** {@inheritDoc} */ + @Override + public List getTables(String dbname, String tablePattern) throws MetaException { + try { + return filterHook.filterTableNames(dbname, client.get_tables(dbname, tablePattern)); + } catch (Exception e) { + MetaStoreUtils.logAndThrowMetaException(e); + } + return null; + } + + @Override + public List getTableMeta(String dbPatterns, String tablePatterns, List tableTypes) + throws MetaException { + try { + return filterNames(client.get_table_meta(dbPatterns, tablePatterns, tableTypes)); + } catch (Exception e) { + MetaStoreUtils.logAndThrowMetaException(e); + } + return null; + } + + private List filterNames(List metas) throws MetaException { + Map sources = new LinkedHashMap<>(); + Map> dbTables = new LinkedHashMap<>(); + for (TableMeta meta : metas) { + sources.put(meta.getDbName() + "." + meta.getTableName(), meta); + List tables = dbTables.get(meta.getDbName()); + if (tables == null) { + dbTables.put(meta.getDbName(), tables = new ArrayList()); + } + tables.add(meta.getTableName()); + } + List filtered = new ArrayList<>(); + for (Map.Entry> entry : dbTables.entrySet()) { + for (String table : filterHook.filterTableNames(entry.getKey(), entry.getValue())) { + filtered.add(sources.get(entry.getKey() + "." + table)); + } + } + return filtered; + } + + /** {@inheritDoc} */ + @Override + public List getAllTables(String dbname) throws MetaException { + try { + return filterHook.filterTableNames(dbname, client.get_all_tables(dbname)); + } catch (Exception e) { + MetaStoreUtils.logAndThrowMetaException(e); + } + return null; + } + + @Override + public boolean tableExists(String databaseName, String tableName) throws MetaException, + TException, UnknownDBException { + try { + return filterHook.filterTable(client.get_table(databaseName, tableName)) != null; + } catch (NoSuchObjectException e) { + return false; + } + } + + /** {@inheritDoc} */ + @Override + @Deprecated + public boolean tableExists(String tableName) throws MetaException, + TException, UnknownDBException { + return tableExists(DEFAULT_DATABASE_NAME, tableName); + } + + @Override + public List listPartitionNames(String dbName, String tblName, + short max) throws MetaException, TException { + return filterHook.filterPartitionNames(dbName, tblName, + client.get_partition_names(dbName, tblName, max)); + } + + @Override + public List listPartitionNames(String db_name, String tbl_name, + List part_vals, short max_parts) + throws MetaException, TException, NoSuchObjectException { + return filterHook.filterPartitionNames(db_name, tbl_name, + client.get_partition_names_ps(db_name, tbl_name, part_vals, max_parts)); + } + + /** + * Get number of partitions matching specified filter + * @param db_name the database name + * @param tbl_name the table name + * @param filter the filter string, + * for example "part1 = \"p1_abc\" and part2 <= "\p2_test\"". Filtering can + * be done only on string partition keys. + * @return number of partitions + * @throws MetaException + * @throws NoSuchObjectException + * @throws TException + */ + public int getNumPartitionsByFilter(String db_name, String tbl_name, + String filter) throws MetaException, + NoSuchObjectException, TException { + return client.get_num_partitions_by_filter(db_name, tbl_name, filter); + } + + @Override + public void alter_partition(String dbName, String tblName, Partition newPart, EnvironmentContext environmentContext) + throws InvalidOperationException, MetaException, TException { + client.alter_partition_with_environment_context(dbName, tblName, newPart, environmentContext); + } + + @Override + public void alter_partitions(String dbName, String tblName, List newParts, EnvironmentContext environmentContext) + throws InvalidOperationException, MetaException, TException { + client.alter_partitions_with_environment_context(dbName, tblName, newParts, environmentContext); +} + + @Override + public void alterDatabase(String dbName, Database db) + throws MetaException, NoSuchObjectException, TException { + client.alter_database(dbName, db); + } + /** + * @param db + * @param tableName + * @throws UnknownTableException + * @throws UnknownDBException + * @throws MetaException + * @throws TException + * @see org.apache.hadoop.hive.metastore.api.ThriftHiveMetastore.Iface#get_fields(java.lang.String, + * java.lang.String) + */ + @Override + public List getFields(String db, String tableName) + throws MetaException, TException, UnknownTableException, + UnknownDBException { + List fields = client.get_fields(db, tableName); + return fastpath ? fields : deepCopyFieldSchemas(fields); + } + + /** + * create an index + * @param index the index object + * @param indexTable which stores the index data + * @throws InvalidObjectException + * @throws MetaException + * @throws NoSuchObjectException + * @throws TException + * @throws AlreadyExistsException + */ + @Override + public void createIndex(Index index, Table indexTable) throws AlreadyExistsException, InvalidObjectException, MetaException, NoSuchObjectException, TException { + client.add_index(index, indexTable); + } + + /** + * @param dbname + * @param base_tbl_name + * @param idx_name + * @param new_idx + * @throws InvalidOperationException + * @throws MetaException + * @throws TException + * @see org.apache.hadoop.hive.metastore.api.ThriftHiveMetastore.Iface#alter_index(java.lang.String, + * java.lang.String, java.lang.String, org.apache.hadoop.hive.metastore.api.Index) + */ + @Override + public void alter_index(String dbname, String base_tbl_name, String idx_name, Index new_idx) + throws InvalidOperationException, MetaException, TException { + client.alter_index(dbname, base_tbl_name, idx_name, new_idx); + } + + /** + * @param dbName + * @param tblName + * @param indexName + * @return the index + * @throws MetaException + * @throws UnknownTableException + * @throws NoSuchObjectException + * @throws TException + */ + @Override + public Index getIndex(String dbName, String tblName, String indexName) + throws MetaException, UnknownTableException, NoSuchObjectException, + TException { + return deepCopy(filterHook.filterIndex(client.get_index_by_name(dbName, tblName, indexName))); + } + + /** + * list indexes of the give base table + * @param dbName + * @param tblName + * @param max + * @return the list of indexes + * @throws NoSuchObjectException + * @throws MetaException + * @throws TException + */ + @Override + public List listIndexNames(String dbName, String tblName, short max) + throws MetaException, TException { + return filterHook.filterIndexNames(dbName, tblName, client.get_index_names(dbName, tblName, max)); + } + + /** + * list all the index names of the give base table. + * + * @param dbName + * @param tblName + * @param max + * @return list of indexes + * @throws MetaException + * @throws TException + */ + @Override + public List listIndexes(String dbName, String tblName, short max) + throws NoSuchObjectException, MetaException, TException { + return filterHook.filterIndexes(client.get_indexes(dbName, tblName, max)); + } + + /** {@inheritDoc} */ + @Override + public boolean updateTableColumnStatistics(ColumnStatistics statsObj) + throws NoSuchObjectException, InvalidObjectException, MetaException, TException, + InvalidInputException{ + return client.update_table_column_statistics(statsObj); + } + + /** {@inheritDoc} */ + @Override + public boolean updatePartitionColumnStatistics(ColumnStatistics statsObj) + throws NoSuchObjectException, InvalidObjectException, MetaException, TException, + InvalidInputException{ + return client.update_partition_column_statistics(statsObj); + } + + /** {@inheritDoc} */ + @Override + public boolean setPartitionColumnStatistics(SetPartitionsStatsRequest request) + throws NoSuchObjectException, InvalidObjectException, MetaException, TException, + InvalidInputException{ + return client.set_aggr_stats_for(request); + } + + @Override + public void flushCache() { + try { + client.flushCache(); + } catch (TException e) { + // Not much we can do about it honestly + LOG.warn("Got error flushing the cache", e); + } + } + + /** {@inheritDoc} */ + @Override + public List getTableColumnStatistics(String dbName, String tableName, + List colNames) throws NoSuchObjectException, MetaException, TException, + InvalidInputException, InvalidObjectException { + return client.get_table_statistics_req( + new TableStatsRequest(dbName, tableName, colNames)).getTableStats(); + } + + /** {@inheritDoc} */ + @Override + public Map> getPartitionColumnStatistics( + String dbName, String tableName, List partNames, List colNames) + throws NoSuchObjectException, MetaException, TException { + return client.get_partitions_statistics_req( + new PartitionsStatsRequest(dbName, tableName, colNames, partNames)).getPartStats(); + } + + /** {@inheritDoc} */ + @Override + public boolean deletePartitionColumnStatistics(String dbName, String tableName, String partName, + String colName) throws NoSuchObjectException, InvalidObjectException, MetaException, + TException, InvalidInputException + { + return client.delete_partition_column_statistics(dbName, tableName, partName, colName); + } + + /** {@inheritDoc} */ + @Override + public boolean deleteTableColumnStatistics(String dbName, String tableName, String colName) + throws NoSuchObjectException, InvalidObjectException, MetaException, TException, + InvalidInputException + { + return client.delete_table_column_statistics(dbName, tableName, colName); + } + + /** + * @param db + * @param tableName + * @throws UnknownTableException + * @throws UnknownDBException + * @throws MetaException + * @throws TException + * @see org.apache.hadoop.hive.metastore.api.ThriftHiveMetastore.Iface#get_schema(java.lang.String, + * java.lang.String) + */ + @Override + public List getSchema(String db, String tableName) + throws MetaException, TException, UnknownTableException, + UnknownDBException { + EnvironmentContext envCxt = null; + String addedJars = conf.getVar(ConfVars.HIVEADDEDJARS); + if(org.apache.commons.lang.StringUtils.isNotBlank(addedJars)) { + Map props = new HashMap(); + props.put("hive.added.jars.path", addedJars); + envCxt = new EnvironmentContext(props); + } + + List fields = client.get_schema_with_environment_context(db, tableName, envCxt); + return fastpath ? fields : deepCopyFieldSchemas(fields); + } + + @Override + public String getConfigValue(String name, String defaultValue) + throws TException, ConfigValSecurityException { + return client.get_config_value(name, defaultValue); + } + + @Override + public Partition getPartition(String db, String tableName, String partName) + throws MetaException, TException, UnknownTableException, NoSuchObjectException { + Partition p = client.get_partition_by_name(db, tableName, partName); + return fastpath ? p : deepCopy(filterHook.filterPartition(p)); + } + + public Partition appendPartitionByName(String dbName, String tableName, String partName) + throws InvalidObjectException, AlreadyExistsException, MetaException, TException { + return appendPartitionByName(dbName, tableName, partName, null); + } + + public Partition appendPartitionByName(String dbName, String tableName, String partName, + EnvironmentContext envContext) throws InvalidObjectException, AlreadyExistsException, + MetaException, TException { + Partition p = client.append_partition_by_name_with_environment_context(dbName, tableName, + partName, envContext); + return fastpath ? p : deepCopy(p); + } + + public boolean dropPartitionByName(String dbName, String tableName, String partName, + boolean deleteData) throws NoSuchObjectException, MetaException, TException { + return dropPartitionByName(dbName, tableName, partName, deleteData, null); + } + + public boolean dropPartitionByName(String dbName, String tableName, String partName, + boolean deleteData, EnvironmentContext envContext) throws NoSuchObjectException, + MetaException, TException { + return client.drop_partition_by_name_with_environment_context(dbName, tableName, partName, + deleteData, envContext); + } + + private HiveMetaHook getHook(Table tbl) throws MetaException { + if (hookLoader == null) { + return null; + } + return hookLoader.getHook(tbl); + } + + @Override + public List partitionNameToVals(String name) throws MetaException, TException { + return client.partition_name_to_vals(name); + } + + @Override + public Map partitionNameToSpec(String name) throws MetaException, TException{ + return client.partition_name_to_spec(name); + } + + /** + * @param partition + * @return + */ + private Partition deepCopy(Partition partition) { + Partition copy = null; + if (partition != null) { + copy = new Partition(partition); + } + return copy; + } + + private Database deepCopy(Database database) { + Database copy = null; + if (database != null) { + copy = new Database(database); + } + return copy; + } + + protected Table deepCopy(Table table) { + Table copy = null; + if (table != null) { + copy = new Table(table); + } + return copy; + } + + private Index deepCopy(Index index) { + Index copy = null; + if (index != null) { + copy = new Index(index); + } + return copy; + } + + private Type deepCopy(Type type) { + Type copy = null; + if (type != null) { + copy = new Type(type); + } + return copy; + } + + private FieldSchema deepCopy(FieldSchema schema) { + FieldSchema copy = null; + if (schema != null) { + copy = new FieldSchema(schema); + } + return copy; + } + + private Function deepCopy(Function func) { + Function copy = null; + if (func != null) { + copy = new Function(func); + } + return copy; + } + + protected PrincipalPrivilegeSet deepCopy(PrincipalPrivilegeSet pps) { + PrincipalPrivilegeSet copy = null; + if (pps != null) { + copy = new PrincipalPrivilegeSet(pps); + } + return copy; + } + + private List deepCopyPartitions(List partitions) { + return deepCopyPartitions(partitions, null); + } + + private List deepCopyPartitions( + Collection src, List dest) { + if (src == null) { + return dest; + } + if (dest == null) { + dest = new ArrayList(src.size()); + } + for (Partition part : src) { + dest.add(deepCopy(part)); + } + return dest; + } + + private List
deepCopyTables(List
tables) { + List
copy = null; + if (tables != null) { + copy = new ArrayList
(); + for (Table tab : tables) { + copy.add(deepCopy(tab)); + } + } + return copy; + } + + protected List deepCopyFieldSchemas(List schemas) { + List copy = null; + if (schemas != null) { + copy = new ArrayList(); + for (FieldSchema schema : schemas) { + copy.add(deepCopy(schema)); + } + } + return copy; + } + + @Override + public boolean dropIndex(String dbName, String tblName, String name, + boolean deleteData) throws NoSuchObjectException, MetaException, + TException { + return client.drop_index_by_name(dbName, tblName, name, deleteData); + } + + @Override + public boolean grant_role(String roleName, String userName, + PrincipalType principalType, String grantor, PrincipalType grantorType, + boolean grantOption) throws MetaException, TException { + GrantRevokeRoleRequest req = new GrantRevokeRoleRequest(); + req.setRequestType(GrantRevokeType.GRANT); + req.setRoleName(roleName); + req.setPrincipalName(userName); + req.setPrincipalType(principalType); + req.setGrantor(grantor); + req.setGrantorType(grantorType); + req.setGrantOption(grantOption); + GrantRevokeRoleResponse res = client.grant_revoke_role(req); + if (!res.isSetSuccess()) { + throw new MetaException("GrantRevokeResponse missing success field"); + } + return res.isSuccess(); + } + + @Override + public boolean create_role(Role role) + throws MetaException, TException { + return client.create_role(role); + } + + @Override + public boolean drop_role(String roleName) throws MetaException, TException { + return client.drop_role(roleName); + } + + @Override + public List list_roles(String principalName, + PrincipalType principalType) throws MetaException, TException { + return client.list_roles(principalName, principalType); + } + + @Override + public List listRoleNames() throws MetaException, TException { + return client.get_role_names(); + } + + @Override + public GetPrincipalsInRoleResponse get_principals_in_role(GetPrincipalsInRoleRequest req) + throws MetaException, TException { + return client.get_principals_in_role(req); + } + + @Override + public GetRoleGrantsForPrincipalResponse get_role_grants_for_principal( + GetRoleGrantsForPrincipalRequest getRolePrincReq) throws MetaException, TException { + return client.get_role_grants_for_principal(getRolePrincReq); + } + + @Override + public boolean grant_privileges(PrivilegeBag privileges) + throws MetaException, TException { + GrantRevokePrivilegeRequest req = new GrantRevokePrivilegeRequest(); + req.setRequestType(GrantRevokeType.GRANT); + req.setPrivileges(privileges); + GrantRevokePrivilegeResponse res = client.grant_revoke_privileges(req); + if (!res.isSetSuccess()) { + throw new MetaException("GrantRevokePrivilegeResponse missing success field"); + } + return res.isSuccess(); + } + + @Override + public boolean revoke_role(String roleName, String userName, + PrincipalType principalType, boolean grantOption) throws MetaException, TException { + GrantRevokeRoleRequest req = new GrantRevokeRoleRequest(); + req.setRequestType(GrantRevokeType.REVOKE); + req.setRoleName(roleName); + req.setPrincipalName(userName); + req.setPrincipalType(principalType); + req.setGrantOption(grantOption); + GrantRevokeRoleResponse res = client.grant_revoke_role(req); + if (!res.isSetSuccess()) { + throw new MetaException("GrantRevokeResponse missing success field"); + } + return res.isSuccess(); + } + + @Override + public boolean revoke_privileges(PrivilegeBag privileges, boolean grantOption) throws MetaException, + TException { + GrantRevokePrivilegeRequest req = new GrantRevokePrivilegeRequest(); + req.setRequestType(GrantRevokeType.REVOKE); + req.setPrivileges(privileges); + req.setRevokeGrantOption(grantOption); + GrantRevokePrivilegeResponse res = client.grant_revoke_privileges(req); + if (!res.isSetSuccess()) { + throw new MetaException("GrantRevokePrivilegeResponse missing success field"); + } + return res.isSuccess(); + } + + @Override + public PrincipalPrivilegeSet get_privilege_set(HiveObjectRef hiveObject, + String userName, List groupNames) throws MetaException, + TException { + return client.get_privilege_set(hiveObject, userName, groupNames); + } + + @Override + public List list_privileges(String principalName, + PrincipalType principalType, HiveObjectRef hiveObject) + throws MetaException, TException { + return client.list_privileges(principalName, principalType, hiveObject); + } + + public String getDelegationToken(String renewerKerberosPrincipalName) throws + MetaException, TException, IOException { + //a convenience method that makes the intended owner for the delegation + //token request the current user + String owner = conf.getUser(); + return getDelegationToken(owner, renewerKerberosPrincipalName); + } + + @Override + public String getDelegationToken(String owner, String renewerKerberosPrincipalName) throws + MetaException, TException { + // This is expected to be a no-op, so we will return null when we use local metastore. + if (localMetaStore) { + return null; + } + return client.get_delegation_token(owner, renewerKerberosPrincipalName); + } + + @Override + public long renewDelegationToken(String tokenStrForm) throws MetaException, TException { + if (localMetaStore) { + return 0; + } + return client.renew_delegation_token(tokenStrForm); + + } + + @Override + public void cancelDelegationToken(String tokenStrForm) throws MetaException, TException { + if (localMetaStore) { + return; + } + client.cancel_delegation_token(tokenStrForm); + } + + @Override + public boolean addToken(String tokenIdentifier, String delegationToken) throws TException { + return client.add_token(tokenIdentifier, delegationToken); + } + + @Override + public boolean removeToken(String tokenIdentifier) throws TException { + return client.remove_token(tokenIdentifier); + } + + @Override + public String getToken(String tokenIdentifier) throws TException { + return client.get_token(tokenIdentifier); + } + + @Override + public List getAllTokenIdentifiers() throws TException { + return client.get_all_token_identifiers(); + } + + @Override + public int addMasterKey(String key) throws MetaException, TException { + return client.add_master_key(key); + } + + @Override + public void updateMasterKey(Integer seqNo, String key) + throws NoSuchObjectException, MetaException, TException { + client.update_master_key(seqNo, key); + } + + @Override + public boolean removeMasterKey(Integer keySeq) throws TException { + return client.remove_master_key(keySeq); + } + + @Override + public String[] getMasterKeys() throws TException { + List keyList = client.get_master_keys(); + return keyList.toArray(new String[keyList.size()]); + } + + @Override + public ValidTxnList getValidTxns() throws TException { + return TxnUtils.createValidReadTxnList(client.get_open_txns(), 0); + } + + @Override + public ValidTxnList getValidTxns(long currentTxn) throws TException { + return TxnUtils.createValidReadTxnList(client.get_open_txns(), currentTxn); + } + + @Override + public long openTxn(String user) throws TException { + OpenTxnsResponse txns = openTxns(user, 1); + return txns.getTxn_ids().get(0); + } + + @Override + public OpenTxnsResponse openTxns(String user, int numTxns) throws TException { + String hostname = null; + try { + hostname = InetAddress.getLocalHost().getHostName(); + } catch (UnknownHostException e) { + LOG.error("Unable to resolve my host name " + e.getMessage()); + throw new RuntimeException(e); + } + return client.open_txns(new OpenTxnRequest(numTxns, user, hostname)); + } + + @Override + public void rollbackTxn(long txnid) throws NoSuchTxnException, TException { + client.abort_txn(new AbortTxnRequest(txnid)); + } + + @Override + public void commitTxn(long txnid) + throws NoSuchTxnException, TxnAbortedException, TException { + client.commit_txn(new CommitTxnRequest(txnid)); + } + + @Override + public GetOpenTxnsInfoResponse showTxns() throws TException { + return client.get_open_txns_info(); + } + + @Override + public LockResponse lock(LockRequest request) + throws NoSuchTxnException, TxnAbortedException, TException { + return client.lock(request); + } + + @Override + public LockResponse checkLock(long lockid) + throws NoSuchTxnException, TxnAbortedException, NoSuchLockException, + TException { + return client.check_lock(new CheckLockRequest(lockid)); + } + + @Override + public void unlock(long lockid) + throws NoSuchLockException, TxnOpenException, TException { + client.unlock(new UnlockRequest(lockid)); + } + + @Override + public ShowLocksResponse showLocks() throws TException { + return client.show_locks(new ShowLocksRequest()); + } + + @Override + public void heartbeat(long txnid, long lockid) + throws NoSuchLockException, NoSuchTxnException, TxnAbortedException, + TException { + HeartbeatRequest hb = new HeartbeatRequest(); + hb.setLockid(lockid); + hb.setTxnid(txnid); + client.heartbeat(hb); + } + + @Override + public HeartbeatTxnRangeResponse heartbeatTxnRange(long min, long max) + throws NoSuchTxnException, TxnAbortedException, TException { + HeartbeatTxnRangeRequest rqst = new HeartbeatTxnRangeRequest(min, max); + return client.heartbeat_txn_range(rqst); + } + + @Override + public void compact(String dbname, String tableName, String partitionName, CompactionType type) + throws TException { + CompactionRequest cr = new CompactionRequest(); + if (dbname == null) cr.setDbname(DEFAULT_DATABASE_NAME); + else cr.setDbname(dbname); + cr.setTablename(tableName); + if (partitionName != null) cr.setPartitionname(partitionName); + cr.setType(type); + client.compact(cr); + } + + @Override + public ShowCompactResponse showCompactions() throws TException { + return client.show_compact(new ShowCompactRequest()); + } + + @Override + public void addDynamicPartitions(long txnId, String dbName, String tableName, + List partNames) throws TException { + client.add_dynamic_partitions(new AddDynamicPartitions(txnId, dbName, tableName, partNames)); + } + + @InterfaceAudience.LimitedPrivate({"HCatalog"}) + @Override + public NotificationEventResponse getNextNotification(long lastEventId, int maxEvents, + NotificationFilter filter) throws TException { + NotificationEventRequest rqst = new NotificationEventRequest(lastEventId); + rqst.setMaxEvents(maxEvents); + NotificationEventResponse rsp = client.get_next_notification(rqst); + LOG.debug("Got back " + rsp.getEventsSize() + " events"); + if (filter == null) { + return rsp; + } else { + NotificationEventResponse filtered = new NotificationEventResponse(); + if (rsp != null && rsp.getEvents() != null) { + for (NotificationEvent e : rsp.getEvents()) { + if (filter.accept(e)) filtered.addToEvents(e); + } + } + return filtered; + } + } + + @InterfaceAudience.LimitedPrivate({"HCatalog"}) + @Override + public CurrentNotificationEventId getCurrentNotificationEventId() throws TException { + return client.get_current_notificationEventId(); + } + + @InterfaceAudience.LimitedPrivate({"Apache Hive, HCatalog"}) + @Override + public FireEventResponse fireListenerEvent(FireEventRequest rqst) throws TException { + return client.fire_listener_event(rqst); + } + + /** + * Creates a synchronized wrapper for any {@link IMetaStoreClient}. + * This may be used by multi-threaded applications until we have + * fixed all reentrancy bugs. + * + * @param client unsynchronized client + * + * @return synchronized client + */ + public static IMetaStoreClient newSynchronizedClient( + IMetaStoreClient client) { + return (IMetaStoreClient) Proxy.newProxyInstance( + HiveMetaStoreClient.class.getClassLoader(), + new Class [] { IMetaStoreClient.class }, + new SynchronizedHandler(client)); + } + + private static class SynchronizedHandler implements InvocationHandler { + private final IMetaStoreClient client; + + SynchronizedHandler(IMetaStoreClient client) { + this.client = client; + } + + @Override + public synchronized Object invoke(Object proxy, Method method, Object [] args) + throws Throwable { + try { + return method.invoke(client, args); + } catch (InvocationTargetException e) { + throw e.getTargetException(); + } + } + } + + @Override + public void markPartitionForEvent(String db_name, String tbl_name, Map partKVs, PartitionEventType eventType) + throws MetaException, TException, NoSuchObjectException, UnknownDBException, + UnknownTableException, + InvalidPartitionException, UnknownPartitionException { + assert db_name != null; + assert tbl_name != null; + assert partKVs != null; + client.markPartitionForEvent(db_name, tbl_name, partKVs, eventType); + } + + @Override + public boolean isPartitionMarkedForEvent(String db_name, String tbl_name, Map partKVs, PartitionEventType eventType) + throws MetaException, NoSuchObjectException, UnknownTableException, UnknownDBException, TException, + InvalidPartitionException, UnknownPartitionException { + assert db_name != null; + assert tbl_name != null; + assert partKVs != null; + return client.isPartitionMarkedForEvent(db_name, tbl_name, partKVs, eventType); + } + + @Override + public void createFunction(Function func) throws InvalidObjectException, + MetaException, TException { + client.create_function(func); + } + + @Override + public void alterFunction(String dbName, String funcName, Function newFunction) + throws InvalidObjectException, MetaException, TException { + client.alter_function(dbName, funcName, newFunction); + } + + @Override + public void dropFunction(String dbName, String funcName) + throws MetaException, NoSuchObjectException, InvalidObjectException, + InvalidInputException, TException { + client.drop_function(dbName, funcName); + } + + @Override + public Function getFunction(String dbName, String funcName) + throws MetaException, TException { + Function f = client.get_function(dbName, funcName); + return fastpath ? f : deepCopy(f); + } + + @Override + public List getFunctions(String dbName, String pattern) + throws MetaException, TException { + return client.get_functions(dbName, pattern); + } + + @Override + public GetAllFunctionsResponse getAllFunctions() + throws MetaException, TException { + return client.get_all_functions(); + } + + protected void create_table_with_environment_context(Table tbl, EnvironmentContext envContext) + throws AlreadyExistsException, InvalidObjectException, + MetaException, NoSuchObjectException, TException { + client.create_table_with_environment_context(tbl, envContext); + } + + protected void drop_table_with_environment_context(String dbname, String name, + boolean deleteData, EnvironmentContext envContext) throws MetaException, TException, + NoSuchObjectException, UnsupportedOperationException { + client.drop_table_with_environment_context(dbname, name, deleteData, envContext); + } + + @Override + public AggrStats getAggrColStatsFor(String dbName, String tblName, + List colNames, List partNames) throws NoSuchObjectException, MetaException, TException { + if (colNames.isEmpty() || partNames.isEmpty()) { + LOG.debug("Columns is empty or partNames is empty : Short-circuiting stats eval on client side."); + return new AggrStats(new ArrayList(),0); // Nothing to aggregate + } + PartitionsStatsRequest req = new PartitionsStatsRequest(dbName, tblName, colNames, partNames); + return client.get_aggr_stats_for(req); + } + + @Override + public Iterable> getFileMetadata( + final List fileIds) throws TException { + return new MetastoreMapIterable() { + private int listIndex = 0; + @Override + protected Map fetchNextBatch() throws TException { + if (listIndex == fileIds.size()) return null; + int endIndex = Math.min(listIndex + fileMetadataBatchSize, fileIds.size()); + List subList = fileIds.subList(listIndex, endIndex); + GetFileMetadataResult resp = sendGetFileMetadataReq(subList); + // TODO: we could remember if it's unsupported and stop sending calls; although, it might + // be a bad idea for HS2+standalone metastore that could be updated with support. + // Maybe we should just remember this for some time. + if (!resp.isIsSupported()) return null; + listIndex = endIndex; + return resp.getMetadata(); + } + }; + } + + private GetFileMetadataResult sendGetFileMetadataReq(List fileIds) throws TException { + return client.get_file_metadata(new GetFileMetadataRequest(fileIds)); + } + + @Override + public Iterable> getFileMetadataBySarg( + final List fileIds, final ByteBuffer sarg, final boolean doGetFooters) + throws TException { + return new MetastoreMapIterable() { + private int listIndex = 0; + @Override + protected Map fetchNextBatch() throws TException { + if (listIndex == fileIds.size()) return null; + int endIndex = Math.min(listIndex + fileMetadataBatchSize, fileIds.size()); + List subList = fileIds.subList(listIndex, endIndex); + GetFileMetadataByExprResult resp = sendGetFileMetadataBySargReq( + sarg, subList, doGetFooters); + if (!resp.isIsSupported()) return null; + listIndex = endIndex; + return resp.getMetadata(); + } + }; + } + + private GetFileMetadataByExprResult sendGetFileMetadataBySargReq( + ByteBuffer sarg, List fileIds, boolean doGetFooters) throws TException { + GetFileMetadataByExprRequest req = new GetFileMetadataByExprRequest(fileIds, sarg); + req.setDoGetFooters(doGetFooters); // No need to get footers + return client.get_file_metadata_by_expr(req); + } + + public static abstract class MetastoreMapIterable + implements Iterable>, Iterator> { + private Iterator> currentIter; + + protected abstract Map fetchNextBatch() throws TException; + + @Override + public Iterator> iterator() { + return this; + } + + @Override + public boolean hasNext() { + ensureCurrentBatch(); + return currentIter != null; + } + + private void ensureCurrentBatch() { + if (currentIter != null && currentIter.hasNext()) return; + currentIter = null; + Map currentBatch; + do { + try { + currentBatch = fetchNextBatch(); + } catch (TException ex) { + throw new RuntimeException(ex); + } + if (currentBatch == null) return; // No more data. + } while (currentBatch.isEmpty()); + currentIter = currentBatch.entrySet().iterator(); + } + + @Override + public Entry next() { + ensureCurrentBatch(); + if (currentIter == null) throw new NoSuchElementException(); + return currentIter.next(); + } + + @Override + public void remove() { + throw new UnsupportedOperationException(); + } + } + + @Override + public void clearFileMetadata(List fileIds) throws TException { + ClearFileMetadataRequest req = new ClearFileMetadataRequest(); + req.setFileIds(fileIds); + client.clear_file_metadata(req); + } + + @Override + public void putFileMetadata(List fileIds, List metadata) throws TException { + PutFileMetadataRequest req = new PutFileMetadataRequest(); + req.setFileIds(fileIds); + req.setMetadata(metadata); + client.put_file_metadata(req); + } + + @Override + public boolean isSameConfObj(HiveConf c) { + return conf == c; + } + + @Override + public boolean cacheFileMetadata( + String dbName, String tableName, String partName, boolean allParts) throws TException { + CacheFileMetadataRequest req = new CacheFileMetadataRequest(); + req.setDbName(dbName); + req.setTblName(tableName); + if (partName != null) { + req.setPartName(partName); + } else { + req.setIsAllParts(allParts); + } + CacheFileMetadataResult result = client.cache_file_metadata(req); + return result.isIsSupported(); + } + + @Override + public long getChangeVersion(String topic) throws TException { + return client.get_change_version(new GetChangeVersionRequest(topic)).getVersion(); + } +} diff --git a/metastore/src/java/org/apache/hadoop/hive/metastore/IMetaStoreClient.java b/metastore/src/java/org/apache/hadoop/hive/metastore/IMetaStoreClient.java index 39cf927..b513798 100644 --- a/metastore/src/java/org/apache/hadoop/hive/metastore/IMetaStoreClient.java +++ b/metastore/src/java/org/apache/hadoop/hive/metastore/IMetaStoreClient.java @@ -68,6 +68,8 @@ import org.apache.hadoop.hive.metastore.api.PrincipalType; import org.apache.hadoop.hive.metastore.api.PrivilegeBag; import org.apache.hadoop.hive.metastore.api.Role; +import org.apache.hadoop.hive.metastore.api.SQLForeignKey; +import org.apache.hadoop.hive.metastore.api.SQLPrimaryKey; import org.apache.hadoop.hive.metastore.api.SetPartitionsStatsRequest; import org.apache.hadoop.hive.metastore.api.ShowCompactResponse; import org.apache.hadoop.hive.metastore.api.ShowLocksResponse; @@ -1554,4 +1556,11 @@ boolean cacheFileMetadata(String dbName, String tableName, String partName, boolean allParts) throws TException; long getChangeVersion(String topic) throws TException; + + List getPrimaryKeys(String dbName, String tblName) + throws MetaException, NoSuchObjectException, TException; + + List getForeignKeys(String parentDbName, String parentTbName, + String foreignDbName, String foreignTblName) throws MetaException, + NoSuchObjectException, TException; } diff --git a/metastore/src/java/org/apache/hadoop/hive/metastore/IMetaStoreClient.java.orig b/metastore/src/java/org/apache/hadoop/hive/metastore/IMetaStoreClient.java.orig new file mode 100644 index 0000000..39cf927 --- /dev/null +++ b/metastore/src/java/org/apache/hadoop/hive/metastore/IMetaStoreClient.java.orig @@ -0,0 +1,1557 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hive.metastore; + + +import org.apache.hadoop.hive.common.ObjectPair; +import org.apache.hadoop.hive.common.ValidTxnList; +import org.apache.hadoop.hive.common.classification.InterfaceAudience; +import org.apache.hadoop.hive.common.classification.InterfaceAudience.Public; +import org.apache.hadoop.hive.common.classification.InterfaceStability.Evolving; +import org.apache.hadoop.hive.conf.HiveConf; +import org.apache.hadoop.hive.metastore.api.AggrStats; +import org.apache.hadoop.hive.metastore.api.AlreadyExistsException; +import org.apache.hadoop.hive.metastore.api.ColumnStatistics; +import org.apache.hadoop.hive.metastore.api.ColumnStatisticsObj; +import org.apache.hadoop.hive.metastore.api.CompactionType; +import org.apache.hadoop.hive.metastore.api.ConfigValSecurityException; +import org.apache.hadoop.hive.metastore.api.CurrentNotificationEventId; +import org.apache.hadoop.hive.metastore.api.Database; +import org.apache.hadoop.hive.metastore.api.EnvironmentContext; +import org.apache.hadoop.hive.metastore.api.FieldSchema; +import org.apache.hadoop.hive.metastore.api.FireEventRequest; +import org.apache.hadoop.hive.metastore.api.FireEventResponse; +import org.apache.hadoop.hive.metastore.api.Function; +import org.apache.hadoop.hive.metastore.api.GetAllFunctionsResponse; +import org.apache.hadoop.hive.metastore.api.GetOpenTxnsInfoResponse; +import org.apache.hadoop.hive.metastore.api.GetPrincipalsInRoleRequest; +import org.apache.hadoop.hive.metastore.api.GetPrincipalsInRoleResponse; +import org.apache.hadoop.hive.metastore.api.GetRoleGrantsForPrincipalRequest; +import org.apache.hadoop.hive.metastore.api.GetRoleGrantsForPrincipalResponse; +import org.apache.hadoop.hive.metastore.api.HeartbeatTxnRangeResponse; +import org.apache.hadoop.hive.metastore.api.HiveObjectPrivilege; +import org.apache.hadoop.hive.metastore.api.HiveObjectRef; +import org.apache.hadoop.hive.metastore.api.Index; +import org.apache.hadoop.hive.metastore.api.InvalidInputException; +import org.apache.hadoop.hive.metastore.api.InvalidObjectException; +import org.apache.hadoop.hive.metastore.api.InvalidOperationException; +import org.apache.hadoop.hive.metastore.api.InvalidPartitionException; +import org.apache.hadoop.hive.metastore.api.LockRequest; +import org.apache.hadoop.hive.metastore.api.LockResponse; +import org.apache.hadoop.hive.metastore.api.MetaException; +import org.apache.hadoop.hive.metastore.api.MetadataPpdResult; +import org.apache.hadoop.hive.metastore.api.NoSuchLockException; +import org.apache.hadoop.hive.metastore.api.NoSuchObjectException; +import org.apache.hadoop.hive.metastore.api.NoSuchTxnException; +import org.apache.hadoop.hive.metastore.api.NotificationEvent; +import org.apache.hadoop.hive.metastore.api.NotificationEventResponse; +import org.apache.hadoop.hive.metastore.api.OpenTxnsResponse; +import org.apache.hadoop.hive.metastore.api.Partition; +import org.apache.hadoop.hive.metastore.api.PartitionEventType; +import org.apache.hadoop.hive.metastore.api.PrincipalPrivilegeSet; +import org.apache.hadoop.hive.metastore.api.PrincipalType; +import org.apache.hadoop.hive.metastore.api.PrivilegeBag; +import org.apache.hadoop.hive.metastore.api.Role; +import org.apache.hadoop.hive.metastore.api.SetPartitionsStatsRequest; +import org.apache.hadoop.hive.metastore.api.ShowCompactResponse; +import org.apache.hadoop.hive.metastore.api.ShowLocksResponse; +import org.apache.hadoop.hive.metastore.api.Table; +import org.apache.hadoop.hive.metastore.api.TableMeta; +import org.apache.hadoop.hive.metastore.api.TxnAbortedException; +import org.apache.hadoop.hive.metastore.api.TxnOpenException; +import org.apache.hadoop.hive.metastore.api.UnknownDBException; +import org.apache.hadoop.hive.metastore.api.UnknownPartitionException; +import org.apache.hadoop.hive.metastore.api.UnknownTableException; +import org.apache.hadoop.hive.metastore.partition.spec.PartitionSpecProxy; +import org.apache.thrift.TException; + +import java.io.IOException; +import java.nio.ByteBuffer; +import java.util.List; +import java.util.Map; +import java.util.Map.Entry; + +/** + * Wrapper around hive metastore thrift api + */ +@Public +@Evolving +public interface IMetaStoreClient { + public static final String PERMANENT_FUNCTION_CV = "PERMANENT_FUNCTION"; + + /** + * Returns whether current client is compatible with conf argument or not + * @return + */ + boolean isCompatibleWith(HiveConf conf); + + /** + * Set added jars path info to MetaStoreClient. + * @param addedJars the hive.added.jars.path. It is qualified paths separated by commas. + */ + void setHiveAddedJars(String addedJars); + + /** + * Returns true if the current client is using an in process metastore (local metastore). + * + * @return + */ + boolean isLocalMetaStore(); + + /** + * Tries to reconnect this MetaStoreClient to the MetaStore. + */ + void reconnect() throws MetaException; + + /** + * close connection to meta store + */ + void close(); + + /** + * set meta variable which is open to end users + */ + void setMetaConf(String key, String value) throws MetaException, TException; + + /** + * get current meta variable + */ + String getMetaConf(String key) throws MetaException, TException; + + /** + * Get the names of all databases in the MetaStore that match the given pattern. + * @param databasePattern + * @return List of database names. + * @throws MetaException + * @throws TException + */ + List getDatabases(String databasePattern) throws MetaException, TException; + + /** + * Get the names of all databases in the MetaStore. + * @return List of database names. + * @throws MetaException + * @throws TException + */ + List getAllDatabases() throws MetaException, TException; + + /** + * Get the names of all tables in the specified database that satisfy the supplied + * table name pattern. + * @param dbName + * @param tablePattern + * @return List of table names. + * @throws MetaException + * @throws TException + * @throws UnknownDBException + */ + List getTables(String dbName, String tablePattern) + throws MetaException, TException, UnknownDBException; + + /** + * For quick GetTablesOperation + */ + List getTableMeta(String dbPatterns, String tablePatterns, List tableTypes) + throws MetaException, TException, UnknownDBException; + + /** + * Get the names of all tables in the specified database. + * @param dbName + * @return List of table names. + * @throws MetaException + * @throws TException + * @throws UnknownDBException + */ + List getAllTables(String dbName) throws MetaException, TException, UnknownDBException; + + /** + * Get a list of table names that match a filter. + * The filter operators are LIKE, <, <=, >, >=, =, <> + * + * In the filter statement, values interpreted as strings must be enclosed in quotes, + * while values interpreted as integers should not be. Strings and integers are the only + * supported value types. + * + * The currently supported key names in the filter are: + * Constants.HIVE_FILTER_FIELD_OWNER, which filters on the tables' owner's name + * and supports all filter operators + * Constants.HIVE_FILTER_FIELD_LAST_ACCESS, which filters on the last access times + * and supports all filter operators except LIKE + * Constants.HIVE_FILTER_FIELD_PARAMS, which filters on the tables' parameter keys and values + * and only supports the filter operators = and <>. + * Append the parameter key name to HIVE_FILTER_FIELD_PARAMS in the filter statement. + * For example, to filter on parameter keys called "retention", the key name in the filter + * statement should be Constants.HIVE_FILTER_FIELD_PARAMS + "retention" + * Also, = and <> only work for keys that exist in the tables. + * E.g., filtering on tables where key1 <> value will only + * return tables that have a value for the parameter key1. + * Some example filter statements include: + * filter = Constants.HIVE_FILTER_FIELD_OWNER + " like \".*test.*\" and " + + * Constants.HIVE_FILTER_FIELD_LAST_ACCESS + " = 0"; + * filter = Constants.HIVE_FILTER_FIELD_OWNER + " = \"test_user\" and (" + + * Constants.HIVE_FILTER_FIELD_PARAMS + "retention = \"30\" or " + + * Constants.HIVE_FILTER_FIELD_PARAMS + "retention = \"90\")" + * + * @param dbName + * The name of the database from which you will retrieve the table names + * @param filter + * The filter string + * @param maxTables + * The maximum number of tables returned + * @return A list of table names that match the desired filter + */ + List listTableNamesByFilter(String dbName, String filter, short maxTables) + throws MetaException, TException, InvalidOperationException, UnknownDBException; + + + /** + * Drop the table. + * + * @param dbname + * The database for this table + * @param tableName + * The table to drop + * @param deleteData + * Should we delete the underlying data + * @param ignoreUnknownTab + * don't throw if the requested table doesn't exist + * @throws MetaException + * Could not drop table properly. + * @throws NoSuchObjectException + * The table wasn't found. + * @throws TException + * A thrift communication error occurred + */ + void dropTable(String dbname, String tableName, boolean deleteData, + boolean ignoreUnknownTab) throws MetaException, TException, + NoSuchObjectException; + + /** + * @param ifPurge + * completely purge the table (skipping trash) while removing data from warehouse + * @see #dropTable(String, String, boolean, boolean) + */ + public void dropTable(String dbname, String tableName, boolean deleteData, + boolean ignoreUnknownTab, boolean ifPurge) throws MetaException, TException, + NoSuchObjectException; + + /** + * Drop the table in the DEFAULT database. + * + * @param tableName + * The table to drop + * @param deleteData + * Should we delete the underlying data + * @throws MetaException + * Could not drop table properly. + * @throws UnknownTableException + * The table wasn't found. + * @throws TException + * A thrift communication error occurred + * @throws NoSuchObjectException + * The table wasn't found. + * + * @deprecated As of release 0.6.0 replaced by {@link #dropTable(String, String, boolean, boolean)}. + * This method will be removed in release 0.7.0. + */ + @Deprecated + void dropTable(String tableName, boolean deleteData) + throws MetaException, UnknownTableException, TException, NoSuchObjectException; + + /** + * @see #dropTable(String, String, boolean, boolean) + */ + void dropTable(String dbname, String tableName) + throws MetaException, TException, NoSuchObjectException; + + boolean tableExists(String databaseName, String tableName) throws MetaException, + TException, UnknownDBException; + + /** + * Check to see if the specified table exists in the DEFAULT database. + * @param tableName + * @return TRUE if DEFAULT.tableName exists, FALSE otherwise. + * @throws MetaException + * @throws TException + * @throws UnknownDBException + * @deprecated As of release 0.6.0 replaced by {@link #tableExists(String, String)}. + * This method will be removed in release 0.7.0. + */ + @Deprecated + boolean tableExists(String tableName) throws MetaException, + TException, UnknownDBException; + + /** + * Get a table object from the DEFAULT database. + * + * @param tableName + * Name of the table to fetch. + * @return An object representing the table. + * @throws MetaException + * Could not fetch the table + * @throws TException + * A thrift communication error occurred + * @throws NoSuchObjectException + * In case the table wasn't found. + * @deprecated As of release 0.6.0 replaced by {@link #getTable(String, String)}. + * This method will be removed in release 0.7.0. + */ + @Deprecated + Table getTable(String tableName) throws MetaException, TException, NoSuchObjectException; + + /** + * Get a Database Object + * @param databaseName name of the database to fetch + * @return the database + * @throws NoSuchObjectException The database does not exist + * @throws MetaException Could not fetch the database + * @throws TException A thrift communication error occurred + */ + Database getDatabase(String databaseName) + throws NoSuchObjectException, MetaException, TException; + + + /** + * Get a table object. + * + * @param dbName + * The database the table is located in. + * @param tableName + * Name of the table to fetch. + * @return An object representing the table. + * @throws MetaException + * Could not fetch the table + * @throws TException + * A thrift communication error occurred + * @throws NoSuchObjectException + * In case the table wasn't found. + */ + Table getTable(String dbName, String tableName) throws MetaException, + TException, NoSuchObjectException; + + /** + * + * @param dbName + * The database the tables are located in. + * @param tableNames + * The names of the tables to fetch + * @return A list of objects representing the tables. + * Only the tables that can be retrieved from the database are returned. For example, + * if none of the requested tables could be retrieved, an empty list is returned. + * There is no guarantee of ordering of the returned tables. + * @throws InvalidOperationException + * The input to this operation is invalid (e.g., the list of tables names is null) + * @throws UnknownDBException + * The requested database could not be fetched. + * @throws TException + * A thrift communication error occurred + * @throws MetaException + * Any other errors + */ + List
getTableObjectsByName(String dbName, List tableNames) + throws MetaException, InvalidOperationException, UnknownDBException, TException; + + /** + * @param tableName + * @param dbName + * @param partVals + * @return the partition object + * @throws InvalidObjectException + * @throws AlreadyExistsException + * @throws MetaException + * @throws TException + * @see org.apache.hadoop.hive.metastore.api.ThriftHiveMetastore.Iface#append_partition(java.lang.String, + * java.lang.String, java.util.List) + */ + Partition appendPartition(String tableName, String dbName, + List partVals) throws InvalidObjectException, + AlreadyExistsException, MetaException, TException; + + Partition appendPartition(String tableName, String dbName, String name) + throws InvalidObjectException, AlreadyExistsException, MetaException, TException; + + /** + * Add a partition to the table. + * + * @param partition + * The partition to add + * @return The partition added + * @throws InvalidObjectException + * Could not find table to add to + * @throws AlreadyExistsException + * Partition already exists + * @throws MetaException + * Could not add partition + * @throws TException + * Thrift exception + */ + Partition add_partition(Partition partition) + throws InvalidObjectException, AlreadyExistsException, MetaException, TException; + + /** + * Add partitions to the table. + * + * @param partitions + * The partitions to add + * @throws InvalidObjectException + * Could not find table to add to + * @throws AlreadyExistsException + * Partition already exists + * @throws MetaException + * Could not add partition + * @throws TException + * Thrift exception + */ + int add_partitions(List partitions) + throws InvalidObjectException, AlreadyExistsException, MetaException, TException; + + int add_partitions_pspec(PartitionSpecProxy partitionSpec) + throws InvalidObjectException, AlreadyExistsException, MetaException, TException; + + /** + * Add partitions to the table. + * + * @param partitions The partitions to add + * @param ifNotExists only add partitions if they don't exist + * @param needResults Whether the results are needed + * @return the partitions that were added, or null if !needResults + */ + List add_partitions( + List partitions, boolean ifNotExists, boolean needResults) + throws InvalidObjectException, AlreadyExistsException, MetaException, TException; + + /** + * @param dbName + * @param tblName + * @param partVals + * @return the partition object + * @throws MetaException + * @throws TException + * @see org.apache.hadoop.hive.metastore.api.ThriftHiveMetastore.Iface#get_partition(java.lang.String, + * java.lang.String, java.util.List) + */ + Partition getPartition(String dbName, String tblName, + List partVals) throws NoSuchObjectException, MetaException, TException; + + /** + * @param partitionSpecs + * @param sourceDb + * @param sourceTable + * @param destdb + * @param destTableName + * @return partition object + */ + Partition exchange_partition(Map partitionSpecs, + String sourceDb, String sourceTable, String destdb, + String destTableName) throws MetaException, NoSuchObjectException, + InvalidObjectException, TException; + + /** + * With the one partitionSpecs to exchange, multiple partitions could be exchanged. + * e.g., year=2015/month/day, exchanging partition year=2015 results to all the partitions + * belonging to it exchanged. This function returns the list of affected partitions. + * @param partitionSpecs + * @param sourceDb + * @param sourceTable + * @param destdb + * @param destTableName + * @return the list of the new partitions + */ + List exchange_partitions(Map partitionSpecs, + String sourceDb, String sourceTable, String destdb, + String destTableName) throws MetaException, NoSuchObjectException, + InvalidObjectException, TException; + + /** + * @param dbName + * @param tblName + * @param name - partition name i.e. 'ds=2010-02-03/ts=2010-02-03 18%3A16%3A01' + * @return the partition object + * @throws MetaException + * @throws TException + * @see org.apache.hadoop.hive.metastore.api.ThriftHiveMetastore.Iface#get_partition(java.lang.String, + * java.lang.String, java.util.List) + */ + Partition getPartition(String dbName, String tblName, + String name) throws MetaException, UnknownTableException, NoSuchObjectException, TException; + + + /** + * @param dbName + * @param tableName + * @param pvals + * @param userName + * @param groupNames + * @return the partition + * @throws MetaException + * @throws UnknownTableException + * @throws NoSuchObjectException + * @throws TException + */ + Partition getPartitionWithAuthInfo(String dbName, String tableName, + List pvals, String userName, List groupNames) + throws MetaException, UnknownTableException, NoSuchObjectException, TException; + + /** + * @param tbl_name + * @param db_name + * @param max_parts + * @return the list of partitions + * @throws NoSuchObjectException + * @throws MetaException + * @throws TException + */ + List listPartitions(String db_name, String tbl_name, + short max_parts) throws NoSuchObjectException, MetaException, TException; + + public PartitionSpecProxy listPartitionSpecs(String dbName, String tableName, int maxParts) + throws TException; + List listPartitions(String db_name, String tbl_name, + List part_vals, short max_parts) throws NoSuchObjectException, MetaException, TException; + + List listPartitionNames(String db_name, String tbl_name, + short max_parts) throws MetaException, TException; + + List listPartitionNames(String db_name, String tbl_name, + List part_vals, short max_parts) + throws MetaException, TException, NoSuchObjectException; + + /** + * Get number of partitions matching specified filter + * @param dbName the database name + * @param tableName the table name + * @param filter the filter string, + * for example "part1 = \"p1_abc\" and part2 <= "\p2_test\"". Filtering can + * be done only on string partition keys. + * @return number of partitions + * @throws MetaException + * @throws NoSuchObjectException + * @throws TException + */ + public int getNumPartitionsByFilter(String dbName, String tableName, + String filter) throws MetaException, NoSuchObjectException, TException; + + + /** + * Get list of partitions matching specified filter + * @param db_name the database name + * @param tbl_name the table name + * @param filter the filter string, + * for example "part1 = \"p1_abc\" and part2 <= "\p2_test\"". Filtering can + * be done only on string partition keys. + * @param max_parts the maximum number of partitions to return, + * all partitions are returned if -1 is passed + * @return list of partitions + * @throws MetaException + * @throws NoSuchObjectException + * @throws TException + */ + List listPartitionsByFilter(String db_name, String tbl_name, + String filter, short max_parts) throws MetaException, + NoSuchObjectException, TException; + + PartitionSpecProxy listPartitionSpecsByFilter(String db_name, String tbl_name, + String filter, int max_parts) throws MetaException, + NoSuchObjectException, TException; + + /** + * Get list of partitions matching specified serialized expression + * @param db_name the database name + * @param tbl_name the table name + * @param expr expression, serialized from ExprNodeDesc + * @param max_parts the maximum number of partitions to return, + * all partitions are returned if -1 is passed + * @param default_partition_name Default partition name from configuration. If blank, the + * metastore server-side configuration is used. + * @param result the resulting list of partitions + * @return whether the resulting list contains partitions which may or may not match the expr + */ + boolean listPartitionsByExpr(String db_name, String tbl_name, + byte[] expr, String default_partition_name, short max_parts, List result) + throws TException; + + /** + * @param dbName + * @param tableName + * @param s + * @param userName + * @param groupNames + * @return the list of partitions + * @throws NoSuchObjectException + */ + List listPartitionsWithAuthInfo(String dbName, + String tableName, short s, String userName, List groupNames) + throws MetaException, TException, NoSuchObjectException; + + /** + * Get partitions by a list of partition names. + * @param db_name database name + * @param tbl_name table name + * @param part_names list of partition names + * @return list of Partition objects + * @throws NoSuchObjectException + * @throws MetaException + * @throws TException + */ + List getPartitionsByNames(String db_name, String tbl_name, + List part_names) throws NoSuchObjectException, MetaException, TException; + + /** + * @param dbName + * @param tableName + * @param partialPvals + * @param s + * @param userName + * @param groupNames + * @return the list of paritions + * @throws NoSuchObjectException + */ + List listPartitionsWithAuthInfo(String dbName, + String tableName, List partialPvals, short s, String userName, + List groupNames) throws MetaException, TException, NoSuchObjectException; + + /** + * @param db_name + * @param tbl_name + * @param partKVs + * @param eventType + * @throws MetaException + * @throws NoSuchObjectException + * @throws TException + * @throws UnknownTableException + * @throws UnknownDBException + * @throws UnknownPartitionException + * @throws InvalidPartitionException + */ + void markPartitionForEvent(String db_name, String tbl_name, Map partKVs, + PartitionEventType eventType) throws MetaException, NoSuchObjectException, TException, + UnknownTableException, UnknownDBException, UnknownPartitionException, InvalidPartitionException; + + /** + * @param db_name + * @param tbl_name + * @param partKVs + * @param eventType + * @throws MetaException + * @throws NoSuchObjectException + * @throws TException + * @throws UnknownTableException + * @throws UnknownDBException + * @throws UnknownPartitionException + * @throws InvalidPartitionException + */ + boolean isPartitionMarkedForEvent(String db_name, String tbl_name, Map partKVs, + PartitionEventType eventType) throws MetaException, NoSuchObjectException, TException, + UnknownTableException, UnknownDBException, UnknownPartitionException, InvalidPartitionException; + + /** + * @param partVals + * @throws TException + * @throws MetaException + */ + void validatePartitionNameCharacters(List partVals) throws TException, MetaException; + + /** + * @param tbl + * @throws AlreadyExistsException + * @throws InvalidObjectException + * @throws MetaException + * @throws NoSuchObjectException + * @throws TException + * @see org.apache.hadoop.hive.metastore.api.ThriftHiveMetastore.Iface#create_table(org.apache.hadoop.hive.metastore.api.Table) + */ + + void createTable(Table tbl) throws AlreadyExistsException, + InvalidObjectException, MetaException, NoSuchObjectException, TException; + + void alter_table(String defaultDatabaseName, String tblName, + Table table) throws InvalidOperationException, MetaException, TException; + + //wrapper of alter_table_with_cascade + void alter_table_with_environmentContext(String defaultDatabaseName, String tblName, Table table, + EnvironmentContext environmentContext) throws InvalidOperationException, MetaException, + TException; + + void createDatabase(Database db) + throws InvalidObjectException, AlreadyExistsException, MetaException, TException; + + void dropDatabase(String name) + throws NoSuchObjectException, InvalidOperationException, MetaException, TException; + + void dropDatabase(String name, boolean deleteData, boolean ignoreUnknownDb) + throws NoSuchObjectException, InvalidOperationException, MetaException, TException; + + void dropDatabase(String name, boolean deleteData, boolean ignoreUnknownDb, boolean cascade) + throws NoSuchObjectException, InvalidOperationException, MetaException, TException; + + void alterDatabase(String name, Database db) + throws NoSuchObjectException, MetaException, TException; + + /** + * @param db_name + * @param tbl_name + * @param part_vals + * @param deleteData + * delete the underlying data or just delete the table in metadata + * @return true or false + * @throws NoSuchObjectException + * @throws MetaException + * @throws TException + * @see org.apache.hadoop.hive.metastore.api.ThriftHiveMetastore.Iface#drop_partition(java.lang.String, + * java.lang.String, java.util.List, boolean) + */ + boolean dropPartition(String db_name, String tbl_name, + List part_vals, boolean deleteData) throws NoSuchObjectException, + MetaException, TException; + + /** + * Method to dropPartitions() with the option to purge the partition data directly, + * rather than to move data to trash. + * @param db_name Name of the database. + * @param tbl_name Name of the table. + * @param part_vals Specification of the partitions being dropped. + * @param options PartitionDropOptions for the operation. + * @return True (if partitions are dropped), else false. + * @throws TException + */ + boolean dropPartition(String db_name, String tbl_name, List part_vals, + PartitionDropOptions options) throws TException; + + List dropPartitions(String dbName, String tblName, + List> partExprs, boolean deleteData, + boolean ifExists) throws NoSuchObjectException, MetaException, TException; + + List dropPartitions(String dbName, String tblName, + List> partExprs, boolean deleteData, + boolean ifExists, boolean needResults) throws NoSuchObjectException, MetaException, TException; + + /** + * Generalization of dropPartitions(), + * @param dbName Name of the database + * @param tblName Name of the table + * @param partExprs Partition-specification + * @param options Boolean options for dropping partitions + * @return List of Partitions dropped + * @throws TException On failure + */ + List dropPartitions(String dbName, String tblName, + List> partExprs, PartitionDropOptions options) throws TException; + + boolean dropPartition(String db_name, String tbl_name, + String name, boolean deleteData) throws NoSuchObjectException, + MetaException, TException; + /** + * updates a partition to new partition + * + * @param dbName + * database of the old partition + * @param tblName + * table name of the old partition + * @param newPart + * new partition + * @throws InvalidOperationException + * if the old partition does not exist + * @throws MetaException + * if error in updating metadata + * @throws TException + * if error in communicating with metastore server + */ + void alter_partition(String dbName, String tblName, Partition newPart, EnvironmentContext environmentContext) + throws InvalidOperationException, MetaException, TException; + + /** + * updates a list of partitions + * + * @param dbName + * database of the old partition + * @param tblName + * table name of the old partition + * @param newParts + * list of partitions + * @throws InvalidOperationException + * if the old partition does not exist + * @throws MetaException + * if error in updating metadata + * @throws TException + * if error in communicating with metastore server + */ + void alter_partitions(String dbName, String tblName, List newParts, EnvironmentContext environmentContext) + throws InvalidOperationException, MetaException, TException; + + /** + * rename a partition to a new partition + * + * @param dbname + * database of the old partition + * @param name + * table name of the old partition + * @param part_vals + * values of the old partition + * @param newPart + * new partition + * @throws InvalidOperationException + * if srcFs and destFs are different + * @throws MetaException + * if error in updating metadata + * @throws TException + * if error in communicating with metastore server + */ + void renamePartition(final String dbname, final String name, final List part_vals, final Partition newPart) + throws InvalidOperationException, MetaException, TException; + + /** + * @param db + * @param tableName + * @throws UnknownTableException + * @throws UnknownDBException + * @throws MetaException + * @throws TException + * @see org.apache.hadoop.hive.metastore.api.ThriftHiveMetastore.Iface#get_fields(java.lang.String, + * java.lang.String) + */ + List getFields(String db, String tableName) + throws MetaException, TException, UnknownTableException, + UnknownDBException; + + /** + * @param db + * @param tableName + * @throws UnknownTableException + * @throws UnknownDBException + * @throws MetaException + * @throws TException + * @see org.apache.hadoop.hive.metastore.api.ThriftHiveMetastore.Iface#get_schema(java.lang.String, + * java.lang.String) + */ + List getSchema(String db, String tableName) + throws MetaException, TException, UnknownTableException, + UnknownDBException; + + /** + * @param name + * name of the configuration property to get the value of + * @param defaultValue + * the value to return if property with the given name doesn't exist + * @return value of the specified configuration property + * @throws TException + * @throws ConfigValSecurityException + */ + String getConfigValue(String name, String defaultValue) + throws TException, ConfigValSecurityException; + + /** + * + * @param name + * the partition name e.g. ("ds=2010-03-03/hr=12") + * @return a list containing the partition col values, in the same order as the name + * @throws MetaException + * @throws TException + */ + List partitionNameToVals(String name) + throws MetaException, TException; + /** + * + * @param name + * the partition name e.g. ("ds=2010-03-03/hr=12") + * @return a map from the partition col to the value, as listed in the name + * @throws MetaException + * @throws TException + */ + Map partitionNameToSpec(String name) + throws MetaException, TException; + + /** + * create an index + * @param index the index object + * @throws InvalidObjectException + * @throws MetaException + * @throws NoSuchObjectException + * @throws TException + * @throws AlreadyExistsException + */ + void createIndex(Index index, Table indexTable) throws InvalidObjectException, + MetaException, NoSuchObjectException, TException, AlreadyExistsException; + + void alter_index(String dbName, String tblName, String indexName, + Index index) throws InvalidOperationException, MetaException, TException; + + /** + * + * @param dbName + * @param tblName + * @param indexName + * @return the index + * @throws MetaException + * @throws UnknownTableException + * @throws NoSuchObjectException + * @throws TException + */ + Index getIndex(String dbName, String tblName, String indexName) + throws MetaException, UnknownTableException, NoSuchObjectException, + TException; + + + /** + * list indexes of the give base table + * @param db_name + * @param tbl_name + * @param max + * @return the list of indexes + * @throws NoSuchObjectException + * @throws MetaException + * @throws TException + */ + List listIndexes(String db_name, String tbl_name, + short max) throws NoSuchObjectException, MetaException, TException; + + /** + * list all the index names of the give base table. + * + * @param db_name + * @param tbl_name + * @param max + * @return the list of names + * @throws MetaException + * @throws TException + */ + List listIndexNames(String db_name, String tbl_name, + short max) throws MetaException, TException; + + /** + * @param db_name + * @param tbl_name + * @param name index name + * @param deleteData + * @return true on success + * @throws NoSuchObjectException + * @throws MetaException + * @throws TException + */ + boolean dropIndex(String db_name, String tbl_name, + String name, boolean deleteData) throws NoSuchObjectException, + MetaException, TException; + + /** + * Write table level column statistics to persistent store + * @param statsObj + * @return boolean indicating the status of the operation + * @throws NoSuchObjectException + * @throws InvalidObjectException + * @throws MetaException + * @throws TException + * @throws InvalidInputException + */ + + boolean updateTableColumnStatistics(ColumnStatistics statsObj) + throws NoSuchObjectException, InvalidObjectException, MetaException, TException, + InvalidInputException; + + /** + * Write partition level column statistics to persistent store + * @param statsObj + * @return boolean indicating the status of the operation + * @throws NoSuchObjectException + * @throws InvalidObjectException + * @throws MetaException + * @throws TException + * @throws InvalidInputException + */ + + boolean updatePartitionColumnStatistics(ColumnStatistics statsObj) + throws NoSuchObjectException, InvalidObjectException, MetaException, TException, + InvalidInputException; + + /** + * Get table column statistics given dbName, tableName and multiple colName-s + * @return ColumnStatistics struct for a given db, table and columns + */ + List getTableColumnStatistics(String dbName, String tableName, + List colNames) throws NoSuchObjectException, MetaException, TException; + + /** + * Get partitions column statistics given dbName, tableName, multiple partitions and colName-s + * @return ColumnStatistics struct for a given db, table and columns + */ + Map> getPartitionColumnStatistics(String dbName, + String tableName, List partNames, List colNames) + throws NoSuchObjectException, MetaException, TException; + + /** + * Delete partition level column statistics given dbName, tableName, partName and colName + * @param dbName + * @param tableName + * @param partName + * @param colName + * @return boolean indicating outcome of the operation + * @throws NoSuchObjectException + * @throws InvalidObjectException + * @throws MetaException + * @throws TException + * @throws InvalidInputException + */ + + boolean deletePartitionColumnStatistics(String dbName, String tableName, + String partName, String colName) throws NoSuchObjectException, MetaException, + InvalidObjectException, TException, InvalidInputException; + + /** + * Delete table level column statistics given dbName, tableName and colName + * @param dbName + * @param tableName + * @param colName + * @return boolean indicating the outcome of the operation + * @throws NoSuchObjectException + * @throws MetaException + * @throws InvalidObjectException + * @throws TException + * @throws InvalidInputException + */ + boolean deleteTableColumnStatistics(String dbName, String tableName, String colName) throws + NoSuchObjectException, MetaException, InvalidObjectException, TException, InvalidInputException; + + /** + * @param role + * role object + * @return true on success + * @throws MetaException + * @throws TException + */ + boolean create_role(Role role) + throws MetaException, TException; + + /** + * @param role_name + * role name + * + * @return true on success + * @throws MetaException + * @throws TException + */ + boolean drop_role(String role_name) throws MetaException, TException; + + /** + * list all role names + * @return list of names + * @throws TException + * @throws MetaException + */ + List listRoleNames() throws MetaException, TException; + + /** + * + * @param role_name + * @param user_name + * @param principalType + * @param grantor + * @param grantorType + * @param grantOption + * @return true on success + * @throws MetaException + * @throws TException + */ + boolean grant_role(String role_name, String user_name, + PrincipalType principalType, String grantor, PrincipalType grantorType, + boolean grantOption) throws MetaException, TException; + + /** + * @param role_name + * role name + * @param user_name + * user name + * @param principalType + * + * @return true on success + * @throws MetaException + * @throws TException + */ + boolean revoke_role(String role_name, String user_name, + PrincipalType principalType, boolean grantOption) throws MetaException, TException; + + /** + * + * @param principalName + * @param principalType + * @return list of roles + * @throws MetaException + * @throws TException + */ + List list_roles(String principalName, PrincipalType principalType) + throws MetaException, TException; + + /** + * Return the privileges that the user, group have directly and indirectly through roles + * on the given hiveObject + * @param hiveObject + * @param user_name + * @param group_names + * @return the privilege set + * @throws MetaException + * @throws TException + */ + PrincipalPrivilegeSet get_privilege_set(HiveObjectRef hiveObject, + String user_name, List group_names) throws MetaException, + TException; + + /** + * Return the privileges that this principal has directly over the object (not through roles). + * @param principal_name + * @param principal_type + * @param hiveObject + * @return list of privileges + * @throws MetaException + * @throws TException + */ + List list_privileges(String principal_name, + PrincipalType principal_type, HiveObjectRef hiveObject) + throws MetaException, TException; + + /** + * @param privileges + * @return true on success + * @throws MetaException + * @throws TException + */ + boolean grant_privileges(PrivilegeBag privileges) + throws MetaException, TException; + + /** + * @param privileges + * @return true on success + * @throws MetaException + * @throws TException + */ + boolean revoke_privileges(PrivilegeBag privileges, boolean grantOption) + throws MetaException, TException; + + /** + * This is expected to be a no-op when in local mode, + * which means that the implementation will return null. + * @param owner the intended owner for the token + * @param renewerKerberosPrincipalName + * @return the string of the token + * @throws MetaException + * @throws TException + */ + String getDelegationToken(String owner, String renewerKerberosPrincipalName) + throws MetaException, TException; + + /** + * @param tokenStrForm + * @return the new expiration time + * @throws MetaException + * @throws TException + */ + long renewDelegationToken(String tokenStrForm) throws MetaException, TException; + + /** + * @param tokenStrForm + * @throws MetaException + * @throws TException + */ + void cancelDelegationToken(String tokenStrForm) throws MetaException, TException; + + String getTokenStrForm() throws IOException; + + boolean addToken(String tokenIdentifier, String delegationToken) throws TException; + + boolean removeToken(String tokenIdentifier) throws TException; + + String getToken(String tokenIdentifier) throws TException; + + List getAllTokenIdentifiers() throws TException; + + int addMasterKey(String key) throws MetaException, TException; + + void updateMasterKey(Integer seqNo, String key) + throws NoSuchObjectException, MetaException, TException; + + boolean removeMasterKey(Integer keySeq) throws TException; + + String[] getMasterKeys() throws TException; + + void createFunction(Function func) + throws InvalidObjectException, MetaException, TException; + + void alterFunction(String dbName, String funcName, Function newFunction) + throws InvalidObjectException, MetaException, TException; + + void dropFunction(String dbName, String funcName) throws MetaException, + NoSuchObjectException, InvalidObjectException, InvalidInputException, TException; + + Function getFunction(String dbName, String funcName) + throws MetaException, TException; + + List getFunctions(String dbName, String pattern) + throws MetaException, TException; + + GetAllFunctionsResponse getAllFunctions() + throws MetaException, TException; + + /** + * Get a structure that details valid transactions. + * @return list of valid transactions + * @throws TException + */ + ValidTxnList getValidTxns() throws TException; + + /** + * Get a structure that details valid transactions. + * @param currentTxn The current transaction of the caller. This will be removed from the + * exceptions list so that the caller sees records from his own transaction. + * @return list of valid transactions + * @throws TException + */ + ValidTxnList getValidTxns(long currentTxn) throws TException; + + /** + * Initiate a transaction. + * @param user User who is opening this transaction. This is the Hive user, + * not necessarily the OS user. It is assumed that this user has already been + * authenticated and authorized at this point. + * @return transaction identifier + * @throws TException + */ + long openTxn(String user) throws TException; + + /** + * Initiate a batch of transactions. It is not guaranteed that the + * requested number of transactions will be instantiated. The system has a + * maximum number instantiated per request, controlled by hive.txn.max + * .batch.open in hive-site.xml. If the user requests more than this + * value, only the configured max will be returned. + * + *

Increasing the number of transactions requested in the batch will + * allow applications that stream data into Hive to place more commits in a + * single file, thus reducing load on the namenode and making reads of the + * data more efficient. However, opening more transactions in a batch will + * also result in readers needing to keep a larger list of open + * transactions to ignore, potentially slowing their reads. Users will + * need to test in their system to understand the optimal number of + * transactions to request in a batch. + *

+ * @param user User who is opening this transaction. This is the Hive user, + * not necessarily the OS user. It is assumed that this user has already been + * authenticated and authorized at this point. + * @param numTxns number of requested transactions to open + * @return list of opened txn ids. As noted above, this may be less than + * requested, so the user should check how many were returned rather than + * optimistically assuming that the result matches the request. + * @throws TException + */ + OpenTxnsResponse openTxns(String user, int numTxns) throws TException; + + /** + * Rollback a transaction. This will also unlock any locks associated with + * this transaction. + * @param txnid id of transaction to be rolled back. + * @throws NoSuchTxnException if the requested transaction does not exist. + * Note that this can result from the transaction having timed out and been + * deleted. + * @throws TException + */ + void rollbackTxn(long txnid) throws NoSuchTxnException, TException; + + /** + * Commit a transaction. This will also unlock any locks associated with + * this transaction. + * @param txnid id of transaction to be committed. + * @throws NoSuchTxnException if the requested transaction does not exist. + * This can result fro the transaction having timed out and been deleted by + * the compactor. + * @throws TxnAbortedException if the requested transaction has been + * aborted. This can result from the transaction timing out. + * @throws TException + */ + void commitTxn(long txnid) + throws NoSuchTxnException, TxnAbortedException, TException; + + /** + * Show the list of currently open transactions. This is for use by "show transactions" in the + * grammar, not for applications that want to find a list of current transactions to work with. + * Those wishing the latter should call {@link #getValidTxns()}. + * @return List of currently opened transactions, included aborted ones. + * @throws TException + */ + GetOpenTxnsInfoResponse showTxns() throws TException; + + /** + * Request a set of locks. All locks needed for a particular query, DML, + * or DDL operation should be batched together and requested in one lock + * call. This avoids deadlocks. It also avoids blocking other users who + * only require some of the locks required by this user. + * + *

If the operation requires a transaction (INSERT, UPDATE, + * or DELETE) that transaction id must be provided as part this lock + * request. All locks associated with a transaction will be released when + * that transaction is committed or rolled back.

+ * * + *

Once a lock is acquired, {@link #heartbeat(long, long)} must be called + * on a regular basis to avoid the lock being timed out by the system.

+ * @param request The lock request. {@link LockRequestBuilder} can be used + * construct this request. + * @return a lock response, which will provide two things, + * the id of the lock (to be used in all further calls regarding this lock) + * as well as a state of the lock. If the state is ACQUIRED then the user + * can proceed. If it is WAITING the user should wait and call + * {@link #checkLock(long)} before proceeding. All components of the lock + * will have the same state. + * @throws NoSuchTxnException if the requested transaction does not exist. + * This can result fro the transaction having timed out and been deleted by + * the compactor. + * @throws TxnAbortedException if the requested transaction has been + * aborted. This can result from the transaction timing out. + * @throws TException + */ + LockResponse lock(LockRequest request) + throws NoSuchTxnException, TxnAbortedException, TException; + + /** + * Check the status of a set of locks requested via a + * {@link #lock(org.apache.hadoop.hive.metastore.api.LockRequest)} call. + * Once a lock is acquired, {@link #heartbeat(long, long)} must be called + * on a regular basis to avoid the lock being timed out by the system. + * @param lockid lock id returned by lock(). + * @return a lock response, which will provide two things, + * the id of the lock (to be used in all further calls regarding this lock) + * as well as a state of the lock. If the state is ACQUIRED then the user + * can proceed. If it is WAITING the user should wait and call + * this method again before proceeding. All components of the lock + * will have the same state. + * @throws NoSuchTxnException if the requested transaction does not exist. + * This can result fro the transaction having timed out and been deleted by + * the compactor. + * @throws TxnAbortedException if the requested transaction has been + * aborted. This can result from the transaction timing out. + * @throws NoSuchLockException if the requested lockid does not exist. + * This can result from the lock timing out and being unlocked by the system. + * @throws TException + */ + LockResponse checkLock(long lockid) + throws NoSuchTxnException, TxnAbortedException, NoSuchLockException, + TException; + + /** + * Unlock a set of locks. This can only be called when the locks are not + * assocaited with a transaction. + * @param lockid lock id returned by + * {@link #lock(org.apache.hadoop.hive.metastore.api.LockRequest)} + * @throws NoSuchLockException if the requested lockid does not exist. + * This can result from the lock timing out and being unlocked by the system. + * @throws TxnOpenException if the locks are are associated with a + * transaction. + * @throws TException + */ + void unlock(long lockid) + throws NoSuchLockException, TxnOpenException, TException; + + /** + * Show all currently held and waiting locks. + * @return List of currently held and waiting locks. + * @throws TException + */ + ShowLocksResponse showLocks() throws TException; + + /** + * Send a heartbeat to indicate that the client holding these locks (if + * any) and that opened this transaction (if one exists) is still alive. + * The default timeout for transactions and locks is 300 seconds, + * though it is configurable. To determine how often to heartbeat you will + * need to ask your system administrator how the metastore thrift service + * has been configured. + * @param txnid the id of the open transaction. If no transaction is open + * (it is a DDL or query) then this can be set to 0. + * @param lockid the id of the locks obtained. If no locks have been + * obtained then this can be set to 0. + * @throws NoSuchTxnException if the requested transaction does not exist. + * This can result fro the transaction having timed out and been deleted by + * the compactor. + * @throws TxnAbortedException if the requested transaction has been + * aborted. This can result from the transaction timing out. + * @throws NoSuchLockException if the requested lockid does not exist. + * This can result from the lock timing out and being unlocked by the system. + * @throws TException + */ + void heartbeat(long txnid, long lockid) + throws NoSuchLockException, NoSuchTxnException, TxnAbortedException, + TException; + + /** + * Send heartbeats for a range of transactions. This is for the streaming ingest client that + * will have many transactions open at once. Everyone else should use + * {@link #heartbeat(long, long)}. + * @param min minimum transaction id to heartbeat, inclusive + * @param max maximum transaction id to heartbeat, inclusive + * @return a pair of lists that tell which transactions in the list did not exist (they may + * have already been closed) and which were aborted. + * @throws TException + */ + HeartbeatTxnRangeResponse heartbeatTxnRange(long min, long max) throws TException; + + /** + * Send a request to compact a table or partition. This will not block until the compaction is + * complete. It will instead put a request on the queue for that table or partition to be + * compacted. No checking is done on the dbname, tableName, or partitionName to make sure they + * refer to valid objects. It is assumed this has already been done by the caller. + * @param dbname Name of the database the table is in. If null, this will be assumed to be + * 'default'. + * @param tableName Name of the table to be compacted. This cannot be null. If partitionName + * is null, this must be a non-partitioned table. + * @param partitionName Name of the partition to be compacted + * @param type Whether this is a major or minor compaction. + * @throws TException + */ + void compact(String dbname, String tableName, String partitionName, CompactionType type) + throws TException; + + /** + * Get a list of all current compactions. + * @return List of all current compactions. This includes compactions waiting to happen, + * in progress, and finished but waiting to clean the existing files. + * @throws TException + */ + ShowCompactResponse showCompactions() throws TException; + + /** + * Send a list of partitions to the metastore to indicate which partitions were loaded + * dynamically. + * @param txnId id of the transaction + * @param dbName database name + * @param tableName table name + * @param partNames partition name, as constructed by Warehouse.makePartName + * @throws TException + */ + void addDynamicPartitions(long txnId, String dbName, String tableName, List partNames) + throws TException; + + /** + * A filter provided by the client that determines if a given notification event should be + * returned. + */ + @InterfaceAudience.LimitedPrivate({"HCatalog"}) + interface NotificationFilter { + /** + * Whether a notification event should be accepted + * @param event + * @return if true, event will be added to list, if false it will be ignored + */ + boolean accept(NotificationEvent event); + } + + /** + * Get the next set of notifications from the database. + * @param lastEventId The last event id that was consumed by this reader. The returned + * notifications will start at the next eventId available after this eventId. + * @param maxEvents Maximum number of events to return. If < 1, then all available events will + * be returned. + * @param filter User provided filter to remove unwanted events. If null, all events will be + * returned. + * @return list of notifications, sorted by eventId. It is guaranteed that the events are in + * the order that the operations were done on the database. + * @throws TException + */ + @InterfaceAudience.LimitedPrivate({"HCatalog"}) + NotificationEventResponse getNextNotification(long lastEventId, int maxEvents, + NotificationFilter filter) throws TException; + + /** + * Get the last used notification event id. + * @return last used id + * @throws TException + */ + @InterfaceAudience.LimitedPrivate({"HCatalog"}) + CurrentNotificationEventId getCurrentNotificationEventId() throws TException; + + /** + * Request that the metastore fire an event. Currently this is only supported for DML + * operations, since the metastore knows when DDL operations happen. + * @param request + * @return response, type depends on type of request + * @throws TException + */ + + @InterfaceAudience.LimitedPrivate({"Apache Hive, HCatalog"}) + FireEventResponse fireListenerEvent(FireEventRequest request) throws TException; + + class IncompatibleMetastoreException extends MetaException { + IncompatibleMetastoreException(String message) { + super(message); + } + } + + /** + * get all role-grants for users/roles that have been granted the given role + * Note that in the returned list of RolePrincipalGrants, the roleName is + * redundant as it would match the role_name argument of this function + * @param getPrincRoleReq + * @return + * @throws MetaException + * @throws TException + */ + GetPrincipalsInRoleResponse get_principals_in_role(GetPrincipalsInRoleRequest getPrincRoleReq) + throws MetaException, TException; + + /** + * get all role-grants for roles that have been granted to given principal + * Note that in the returned list of RolePrincipalGrants, the principal information + * redundant as it would match the principal information in request + * @param getRolePrincReq + * @return + * @throws MetaException + * @throws TException + */ + GetRoleGrantsForPrincipalResponse get_role_grants_for_principal( + GetRoleGrantsForPrincipalRequest getRolePrincReq) throws MetaException, TException; + + public AggrStats getAggrColStatsFor(String dbName, String tblName, + List colNames, List partName) throws NoSuchObjectException, MetaException, TException; + + boolean setPartitionColumnStatistics(SetPartitionsStatsRequest request) throws NoSuchObjectException, InvalidObjectException, MetaException, TException, InvalidInputException; + + /** + * Flush any catalog objects held by the metastore implementation. Note that this does not + * flush statistics objects. This should be called at the beginning of each query. + */ + void flushCache(); + + /** + * Gets file metadata, as cached by metastore, for respective file IDs. + * The metadata that is not cached in metastore may be missing. + */ + Iterable> getFileMetadata(List fileIds) throws TException; + + Iterable> getFileMetadataBySarg( + List fileIds, ByteBuffer sarg, boolean doGetFooters) throws TException; + + /** + * Cleares the file metadata cache for respective file IDs. + */ + void clearFileMetadata(List fileIds) throws TException; + + /** + * Adds file metadata for respective file IDs to metadata cache in metastore. + */ + void putFileMetadata(List fileIds, List metadata) throws TException; + + boolean isSameConfObj(HiveConf c); + + boolean cacheFileMetadata(String dbName, String tableName, String partName, + boolean allParts) throws TException; + + long getChangeVersion(String topic) throws TException; +} diff --git a/metastore/src/java/org/apache/hadoop/hive/metastore/MetaStoreDirectSql.java b/metastore/src/java/org/apache/hadoop/hive/metastore/MetaStoreDirectSql.java index 06e9f78..2749649 100644 --- a/metastore/src/java/org/apache/hadoop/hive/metastore/MetaStoreDirectSql.java +++ b/metastore/src/java/org/apache/hadoop/hive/metastore/MetaStoreDirectSql.java @@ -49,6 +49,7 @@ import org.apache.hadoop.hive.metastore.api.ColumnStatisticsObj; import org.apache.hadoop.hive.metastore.api.Database; import org.apache.hadoop.hive.metastore.api.FieldSchema; +import org.apache.hadoop.hive.metastore.api.ForeignKeyRel; import org.apache.hadoop.hive.metastore.api.MetaException; import org.apache.hadoop.hive.metastore.api.Order; import org.apache.hadoop.hive.metastore.api.Partition; @@ -1809,4 +1810,26 @@ public void closeAllQueries() { } return result; } + + public String[] getFKParentTable(final String constraintName) throws MetaException { + if (constraintName == null) { + return null; + } + final String queryText = "select \"TBLS\".\"TBL_NAME\", \"DBS\".\"NAME\" from \"TBLS\" " + + "inner join \"PK_FK_MAPPING\" on" + + " \"TBLS\".\"TBL_ID\" = \"PK_FK_MAPPING\".\"REF_TBL_ID\" " + + "inner join \"DBS\" on \"TBLS\".\"DB_ID\" = \"DBS\".\"DB_ID\" where " + + " \"PK_FK_MAPPING\".\"PK_FK_CONSTRAINT_NAME\" = ?"; + Object[] params = new Object[] { constraintName}; + Query query = pm.newQuery("javax.jdo.query.SQL", queryText); + query.setUnique(true); + Object[] sqlResult = executeWithArray(query, params, queryText); + if (sqlResult ==null) { + return null; + } + String[] result = new String[2]; + result[0] = extractSqlString(sqlResult[0]); + result[1] = extractSqlString(sqlResult[1]); + return result; + } } diff --git a/metastore/src/java/org/apache/hadoop/hive/metastore/ObjectStore.java b/metastore/src/java/org/apache/hadoop/hive/metastore/ObjectStore.java index ac293b9..9e552d7 100644 --- a/metastore/src/java/org/apache/hadoop/hive/metastore/ObjectStore.java +++ b/metastore/src/java/org/apache/hadoop/hive/metastore/ObjectStore.java @@ -72,6 +72,7 @@ import org.apache.hadoop.hive.metastore.api.Database; import org.apache.hadoop.hive.metastore.api.FieldSchema; import org.apache.hadoop.hive.metastore.api.FileMetadataExprType; +import org.apache.hadoop.hive.metastore.api.ForeignKeyRel; import org.apache.hadoop.hive.metastore.api.Function; import org.apache.hadoop.hive.metastore.api.FunctionType; import org.apache.hadoop.hive.metastore.api.HiveObjectPrivilege; @@ -97,6 +98,8 @@ import org.apache.hadoop.hive.metastore.api.ResourceUri; import org.apache.hadoop.hive.metastore.api.Role; import org.apache.hadoop.hive.metastore.api.RolePrincipalGrant; +import org.apache.hadoop.hive.metastore.api.SQLForeignKey; +import org.apache.hadoop.hive.metastore.api.SQLPrimaryKey; import org.apache.hadoop.hive.metastore.api.SerDeInfo; import org.apache.hadoop.hive.metastore.api.SkewedInfo; import org.apache.hadoop.hive.metastore.api.StorageDescriptor; @@ -112,6 +115,7 @@ import org.apache.hadoop.hive.metastore.model.MDatabase; import org.apache.hadoop.hive.metastore.model.MDelegationToken; import org.apache.hadoop.hive.metastore.model.MFieldSchema; +import org.apache.hadoop.hive.metastore.model.MForeignKeyRel; import org.apache.hadoop.hive.metastore.model.MFunction; import org.apache.hadoop.hive.metastore.model.MGlobalPrivilege; import org.apache.hadoop.hive.metastore.model.MIndex; @@ -119,6 +123,7 @@ import org.apache.hadoop.hive.metastore.model.MNotificationLog; import org.apache.hadoop.hive.metastore.model.MNotificationNextId; import org.apache.hadoop.hive.metastore.model.MOrder; +import org.apache.hadoop.hive.metastore.model.MPKFKMapping; import org.apache.hadoop.hive.metastore.model.MPartition; import org.apache.hadoop.hive.metastore.model.MPartitionColumnPrivilege; import org.apache.hadoop.hive.metastore.model.MPartitionColumnStatistics; @@ -905,6 +910,10 @@ public void createTable(Table tbl) throws InvalidObjectException, MetaException openTransaction(); MTable mtbl = convertToMTable(tbl); pm.makePersistent(mtbl); + + List fks = tbl.getForeignKeys(); + addPKFKMapping(mtbl, fks); + PrincipalPrivilegeSet principalPrivs = tbl.getPrivileges(); List toPersistPrivObjs = new ArrayList(); if (principalPrivs != null) { @@ -1293,7 +1302,8 @@ private Table convertToTable(MTable mtbl) throws MetaException { .getOwner(), mtbl.getCreateTime(), mtbl.getLastAccessTime(), mtbl .getRetention(), convertToStorageDescriptor(mtbl.getSd()), convertToFieldSchemas(mtbl.getPartitionKeys()), convertMap(mtbl.getParameters()), - mtbl.getViewOriginalText(), mtbl.getViewExpandedText(), tableType); + mtbl.getViewOriginalText(), mtbl.getViewExpandedText(), tableType, + convertToFieldSchemas(mtbl.getPrimaryKeys()), convertToForeignKeyRels(mtbl.getForeignKeys())); } private MTable convertToMTable(Table tbl) throws InvalidObjectException, @@ -1327,11 +1337,45 @@ private MTable convertToMTable(Table tbl) throws InvalidObjectException, // A new table is always created with a new column descriptor return new MTable(HiveStringUtils.normalizeIdentifier(tbl.getTableName()), mdb, - convertToMStorageDescriptor(tbl.getSd()), tbl.getOwner(), tbl - .getCreateTime(), tbl.getLastAccessTime(), tbl.getRetention(), + convertToMStorageDescriptor(tbl.getSd()), tbl.getOwner(), tbl.getCreateTime(), + tbl.getLastAccessTime(), tbl.getRetention(), convertToMFieldSchemas(tbl.getPartitionKeys()), tbl.getParameters(), tbl.getViewOriginalText(), tbl.getViewExpandedText(), - tableType); + tableType, convertToMFieldSchemas(tbl.getPrimaryKeys()), + convertToMForeignKeyRels(tbl.getForeignKeys())); + } + + private List convertToMForeignKeyRels(List fkRels) { + List mfkRels= new ArrayList(fkRels.size()); + + for (ForeignKeyRel fkRel : fkRels) { + MForeignKeyRel mfkRel = new MForeignKeyRel( + new MColumnDescriptor(convertToMFieldSchemas(fkRel.getForeignCols())), + new MColumnDescriptor(convertToMFieldSchemas(fkRel.getParentCols())), + fkRel.getForeignKeyName() + ); + mfkRels.add(mfkRel); + } + return mfkRels; + } + + private List convertToForeignKeyRels( + List mfkRels) throws MetaException { + if (mfkRels == null) { + return null; + } + List fkRels = new ArrayList(mfkRels.size()); + + for (MForeignKeyRel mfkRel : mfkRels) { + ForeignKeyRel fk; + String[] parentTableInfo = directSql.getFKParentTable(mfkRel.getForeignKeyName()); + fk = new ForeignKeyRel( + convertToFieldSchemas(mfkRel.getForeignCols().getCols()), + convertToFieldSchemas(mfkRel.getParentCols().getCols()), + mfkRel.getForeignKeyName(), parentTableInfo[0], parentTableInfo[1]); + fkRels.add(fk); + } + return fkRels; } private List convertToMFieldSchemas(List keys) { @@ -3200,6 +3244,19 @@ private void preDropStorageDescriptor(MStorageDescriptor msd) { return sds; } + private void addPKFKMapping(MTable mtbl, + List fks) throws InvalidObjectException, + MetaException { + List mpkfks = new ArrayList(); + for (int i = 0; i < fks.size(); i++) { + MPKFKMapping mpkfk = new MPKFKMapping(fks.get(i).getForeignKeyName(), mtbl, + getMTable(fks.get(i).getParentTableSchema(), fks.get(i).getParentTableName()) + ); + mpkfks.add(mpkfk); + } + pm.makePersistentAll(mpkfks); + } + @Override public boolean addIndex(Index index) throws InvalidObjectException, MetaException { @@ -7940,4 +7997,100 @@ public static void unCacheDataNucleusClassLoaders() { } } } + + private List getMPrimaryKeys(String dbName, String tableName) + throws MetaException { + MTable mtbl = getMTable(dbName, tableName); + return mtbl == null ? null : mtbl.getPrimaryKeys(); + } + + private List getMForeignKeyRels(String dbName, String tableName) + throws MetaException { + MTable mtbl = getMTable(dbName, tableName); + return mtbl == null ? null : mtbl.getForeignKeys(); + } + + private List getConstraintNames(String parent_db_name, + String parent_tbl_name, String foreign_db_name, String foreign_tbl_name) + throws MetaException { + boolean commited = false; + + List mpkfks = null; + Query query = null; + try { + openTransaction(); + parent_db_name = HiveStringUtils.normalizeIdentifier(parent_db_name); + parent_tbl_name = HiveStringUtils.normalizeIdentifier(parent_tbl_name); + foreign_db_name = HiveStringUtils.normalizeIdentifier(foreign_db_name); + foreign_tbl_name = HiveStringUtils.normalizeIdentifier(foreign_tbl_name); + String queryStr = "select foreignKeyName from " + + "org.apache.hadoop.hive.metastore.model.MPKFKMapping where " + + "parentTable.tableName == parent_tbl_name && " + + "foreignTable.tableName == foreign_tbl_name && " + + "parentTable.database.name == parent_db_name && " + + "foreignTable.database.name == foreign_db_name "; + + query = pm.newQuery(queryStr); + query.declareParameters("java.lang.String parent_tbl_name," + + "java.lang.String foreign_tbl_name," + + "java.lang.String parent_db_name," + + "java.lang.String foreign_db_name"); + query.setUnique(false); + mpkfks = (List) query.executeWithArray( + parent_tbl_name, foreign_tbl_name, + parent_db_name, foreign_db_name); + pm.retrieve(mpkfks); + commited = commitTransaction(); + } finally { + if (!commited) { + rollbackTransaction(); + } + if (query != null) { + query.closeAll(); + } + } + return mpkfks; + } + + @Override + public List getPrimaryKeys(String db_name, String tbl_name) throws MetaException { + List primaryKeys = getMPrimaryKeys(db_name, tbl_name); + if (primaryKeys == null) { + return null; + } + List ret = new ArrayList(); + int cnt = 1; + for (MFieldSchema fs : primaryKeys) { + ret.add(new SQLPrimaryKey(db_name, tbl_name, fs.getName(), cnt++, null)); + } + return ret; + } + + @Override + public List getForeignKeys(String parent_db_name, + String parent_tbl_name, String foreign_db_name, String foreign_tbl_name) throws MetaException { + List mfkRels = getMForeignKeyRels(foreign_db_name, foreign_tbl_name); + if (mfkRels == null) { + return null; + } + List constraintNames = getConstraintNames(parent_db_name, + parent_tbl_name, foreign_db_name, foreign_tbl_name); + if (constraintNames == null) { + return null; + } + List ret = new ArrayList(); + for (MForeignKeyRel mfkRel : mfkRels) { + if (constraintNames.contains(mfkRel.getForeignKeyName())) { + List mfkCols = mfkRel.getForeignCols().getCols(); + List mpkCols = mfkRel.getParentCols().getCols(); + for (int i = 0; i < mfkCols.size(); i++) { + SQLForeignKey currKey = new SQLForeignKey(parent_db_name, parent_tbl_name, mpkCols.get(i).getName(), + foreign_db_name, foreign_tbl_name, mfkCols.get(i).getName(), + i+1, 0, 0, mfkRel.getForeignKeyName(), null); + ret.add(currKey); + } + } + } + return ret; + } } diff --git a/metastore/src/java/org/apache/hadoop/hive/metastore/RawStore.java b/metastore/src/java/org/apache/hadoop/hive/metastore/RawStore.java index e49f757..d09d506 100644 --- a/metastore/src/java/org/apache/hadoop/hive/metastore/RawStore.java +++ b/metastore/src/java/org/apache/hadoop/hive/metastore/RawStore.java @@ -51,6 +51,8 @@ import org.apache.hadoop.hive.metastore.api.PrivilegeBag; import org.apache.hadoop.hive.metastore.api.Role; import org.apache.hadoop.hive.metastore.api.RolePrincipalGrant; +import org.apache.hadoop.hive.metastore.api.SQLForeignKey; +import org.apache.hadoop.hive.metastore.api.SQLPrimaryKey; import org.apache.hadoop.hive.metastore.api.Table; import org.apache.hadoop.hive.metastore.api.TableMeta; import org.apache.hadoop.hive.metastore.api.Type; @@ -663,4 +665,10 @@ void getFileMetadataByExpr(List fileIds, FileMetadataExprType type, byte[] @InterfaceStability.Evolving long getChangeVersion(String topic) throws MetaException; + + public abstract List getPrimaryKeys(String db_name, + String tbl_name) throws MetaException; + + public abstract List getForeignKeys(String parent_db_name, + String parent_tbl_name, String foreign_db_name, String foreign_tbl_name) throws MetaException; } diff --git a/metastore/src/java/org/apache/hadoop/hive/metastore/hbase/HBaseStore.java b/metastore/src/java/org/apache/hadoop/hive/metastore/hbase/HBaseStore.java index a73dbeb..478e2ee 100644 --- a/metastore/src/java/org/apache/hadoop/hive/metastore/hbase/HBaseStore.java +++ b/metastore/src/java/org/apache/hadoop/hive/metastore/hbase/HBaseStore.java @@ -63,6 +63,8 @@ import org.apache.hadoop.hive.metastore.api.PrivilegeGrantInfo; import org.apache.hadoop.hive.metastore.api.Role; import org.apache.hadoop.hive.metastore.api.RolePrincipalGrant; +import org.apache.hadoop.hive.metastore.api.SQLForeignKey; +import org.apache.hadoop.hive.metastore.api.SQLPrimaryKey; import org.apache.hadoop.hive.metastore.api.Table; import org.apache.hadoop.hive.metastore.api.TableMeta; import org.apache.hadoop.hive.metastore.api.Type; @@ -2591,4 +2593,19 @@ public long getChangeVersion(String topic) throws MetaException { commitOrRoleBack(commit); } } + + @Override + public List getPrimaryKeys(String db_name, String tbl_name) + throws MetaException { + // TODO Auto-generated method stub + return null; + } + + @Override + public List getForeignKeys(String parent_db_name, + String parent_tbl_name, String foreign_db_name, String foreign_tbl_name) + throws MetaException { + // TODO Auto-generated method stub + return null; + } } diff --git a/metastore/src/model/org/apache/hadoop/hive/metastore/model/MForeignKeyRel.java b/metastore/src/model/org/apache/hadoop/hive/metastore/model/MForeignKeyRel.java new file mode 100644 index 0000000..dd8c4df --- /dev/null +++ b/metastore/src/model/org/apache/hadoop/hive/metastore/model/MForeignKeyRel.java @@ -0,0 +1,39 @@ +package org.apache.hadoop.hive.metastore.model; + +public class MForeignKeyRel { + private MColumnDescriptor foreignCols; // required + private MColumnDescriptor parentCols; // required + private String foreignKeyName; // required + + public MForeignKeyRel() {} + + public MForeignKeyRel(MColumnDescriptor foreignCols, MColumnDescriptor parentCols, String foreignKeyName) { + this.foreignCols = foreignCols; + this.parentCols = parentCols; + this.foreignKeyName = foreignKeyName; + } + + public MColumnDescriptor getForeignCols() { + return foreignCols; + } + + public void setForeignCols(MColumnDescriptor foreignCols) { + this.foreignCols = foreignCols; + } + + public MColumnDescriptor getParentCols() { + return parentCols; + } + + public void setParentCols(MColumnDescriptor parentCols) { + this.parentCols = parentCols; + } + + public String getForeignKeyName() { + return foreignKeyName; + } + + public void setForeignKeyName(String fkName) { + this.foreignKeyName = fkName; + } +} diff --git a/metastore/src/model/org/apache/hadoop/hive/metastore/model/MPKFKMapping.java b/metastore/src/model/org/apache/hadoop/hive/metastore/model/MPKFKMapping.java new file mode 100644 index 0000000..5331bf1 --- /dev/null +++ b/metastore/src/model/org/apache/hadoop/hive/metastore/model/MPKFKMapping.java @@ -0,0 +1,39 @@ +package org.apache.hadoop.hive.metastore.model; + +public class MPKFKMapping { + String constraintName; + MTable foreignTable; + MTable parentTable; + + public MPKFKMapping() {} + + public MPKFKMapping(String cn, MTable ft, MTable pt) { + constraintName = cn; + foreignTable = ft; + parentTable = pt; + } + + public String getConstraintName() { + return constraintName; + } + + public void setConstraintName(String fkName) { + this.constraintName = fkName; + } + + public MTable getForeignTable() { + return foreignTable; + } + + public void setForeignTable(MTable ft) { + this.foreignTable = ft; + } + + public MTable getParentTable() { + return parentTable; + } + + public void setParentTable(MTable pt) { + this.parentTable = pt; + } +} diff --git a/metastore/src/model/org/apache/hadoop/hive/metastore/model/MTable.java b/metastore/src/model/org/apache/hadoop/hive/metastore/model/MTable.java index 2a78ce9..1cef586 100644 --- a/metastore/src/model/org/apache/hadoop/hive/metastore/model/MTable.java +++ b/metastore/src/model/org/apache/hadoop/hive/metastore/model/MTable.java @@ -21,6 +21,8 @@ import java.util.List; import java.util.Map; +import org.apache.hadoop.hive.metastore.api.ForeignKeyRel; + public class MTable { private String tableName; @@ -35,6 +37,8 @@ private String viewOriginalText; private String viewExpandedText; private String tableType; + private List primaryKeys; + private List foreignKeys; public MTable() {} @@ -55,7 +59,9 @@ public MTable() {} public MTable(String tableName, MDatabase database, MStorageDescriptor sd, String owner, int createTime, int lastAccessTime, int retention, List partitionKeys, Map parameters, - String viewOriginalText, String viewExpandedText, String tableType) { + String viewOriginalText, String viewExpandedText, String tableType, List primaryKeys, + List foreignKeys +) { this.tableName = tableName; this.database = database; this.sd = sd; @@ -68,6 +74,8 @@ public MTable(String tableName, MDatabase database, MStorageDescriptor sd, Strin this.viewOriginalText = viewOriginalText; this.viewExpandedText = viewExpandedText; this.tableType = tableType; + this.primaryKeys = primaryKeys; + this.foreignKeys = foreignKeys; } /** @@ -106,6 +114,34 @@ public void setSd(MStorageDescriptor sd) { } /** + * @param primaryKeys + */ + public void setPrimaryKeys(List primaryKeys) { + this.primaryKeys = primaryKeys; + } + + /** + * @return the primaryKeys + */ + public List getPrimaryKeys() { + return primaryKeys; + } + + /** + * @param foreignKeys + */ + public void setForeignKeys(List foreignKeys) { + this.foreignKeys = foreignKeys; + } + + /** + * @return the foreignKeys + */ + public List getForeignKeys() { + return foreignKeys; + } + + /** * @param partKeys the partKeys to set */ public void setPartitionKeys(List partKeys) { diff --git a/metastore/src/model/package.jdo b/metastore/src/model/package.jdo index 7385a13..dc7df92 100644 --- a/metastore/src/model/package.jdo +++ b/metastore/src/model/package.jdo @@ -146,6 +146,48 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + @@ -184,6 +226,30 @@ + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/metastore/src/test/org/apache/hadoop/hive/metastore/DummyRawStoreControlledCommit.java b/metastore/src/test/org/apache/hadoop/hive/metastore/DummyRawStoreControlledCommit.java index 94ca835..f7b20a4 100644 --- a/metastore/src/test/org/apache/hadoop/hive/metastore/DummyRawStoreControlledCommit.java +++ b/metastore/src/test/org/apache/hadoop/hive/metastore/DummyRawStoreControlledCommit.java @@ -49,6 +49,8 @@ import org.apache.hadoop.hive.metastore.api.PrivilegeBag; import org.apache.hadoop.hive.metastore.api.Role; import org.apache.hadoop.hive.metastore.api.RolePrincipalGrant; +import org.apache.hadoop.hive.metastore.api.SQLForeignKey; +import org.apache.hadoop.hive.metastore.api.SQLPrimaryKey; import org.apache.hadoop.hive.metastore.api.Table; import org.apache.hadoop.hive.metastore.api.TableMeta; import org.apache.hadoop.hive.metastore.api.Type; @@ -820,4 +822,19 @@ public FileMetadataHandler getFileMetadataHandler(FileMetadataExprType type) { public long getChangeVersion(String topic) throws MetaException { return 0; } + +@Override +public List getPrimaryKeys(String db_name, String tbl_name) + throws MetaException { + // TODO Auto-generated method stub + return null; +} + +@Override +public List getForeignKeys(String parent_db_name, + String parent_tbl_name, String foreign_db_name, String foreign_tbl_name) + throws MetaException { + // TODO Auto-generated method stub + return null; +} } diff --git a/metastore/src/test/org/apache/hadoop/hive/metastore/DummyRawStoreForJdoConnection.java b/metastore/src/test/org/apache/hadoop/hive/metastore/DummyRawStoreForJdoConnection.java index b108f95..b3b44e1 100644 --- a/metastore/src/test/org/apache/hadoop/hive/metastore/DummyRawStoreForJdoConnection.java +++ b/metastore/src/test/org/apache/hadoop/hive/metastore/DummyRawStoreForJdoConnection.java @@ -50,6 +50,8 @@ import org.apache.hadoop.hive.metastore.api.PrivilegeBag; import org.apache.hadoop.hive.metastore.api.Role; import org.apache.hadoop.hive.metastore.api.RolePrincipalGrant; +import org.apache.hadoop.hive.metastore.api.SQLForeignKey; +import org.apache.hadoop.hive.metastore.api.SQLPrimaryKey; import org.apache.hadoop.hive.metastore.api.Table; import org.apache.hadoop.hive.metastore.api.TableMeta; import org.apache.hadoop.hive.metastore.api.Type; @@ -836,6 +838,21 @@ public FileMetadataHandler getFileMetadataHandler(FileMetadataExprType type) { public long getChangeVersion(String topic) throws MetaException { return 0; } + +@Override +public List getPrimaryKeys(String db_name, String tbl_name) + throws MetaException { + // TODO Auto-generated method stub + return null; +} + +@Override +public List getForeignKeys(String parent_db_name, + String parent_tbl_name, String foreign_db_name, String foreign_tbl_name) + throws MetaException { + // TODO Auto-generated method stub + return null; +} } diff --git a/metastore/src/test/org/apache/hadoop/hive/metastore/TestHiveMetaStorePartitionSpecs.java b/metastore/src/test/org/apache/hadoop/hive/metastore/TestHiveMetaStorePartitionSpecs.java index 922a4bf..51f882c 100644 --- a/metastore/src/test/org/apache/hadoop/hive/metastore/TestHiveMetaStorePartitionSpecs.java +++ b/metastore/src/test/org/apache/hadoop/hive/metastore/TestHiveMetaStorePartitionSpecs.java @@ -140,7 +140,7 @@ private static void createTable(HiveMetaStoreClient hmsc, boolean enablePartitio Map tableParameters = new HashMap(); tableParameters.put("hive.hcatalog.partition.spec.grouping.enabled", enablePartitionGrouping? "true":"false"); - Table table = new Table(tableName, dbName, "", 0, 0, 0, storageDescriptor, partColumns, tableParameters, "", "", ""); + Table table = new Table(tableName, dbName, "", 0, 0, 0, storageDescriptor, partColumns, tableParameters, "", "", "", null, null); hmsc.createTable(table); Assert.assertTrue("Table " + dbName + "." + tableName + " does not exist", diff --git a/metastore/src/test/org/apache/hadoop/hive/metastore/TestObjectStore.java b/metastore/src/test/org/apache/hadoop/hive/metastore/TestObjectStore.java index 2e1f5f4..5751dcb 100644 --- a/metastore/src/test/org/apache/hadoop/hive/metastore/TestObjectStore.java +++ b/metastore/src/test/org/apache/hadoop/hive/metastore/TestObjectStore.java @@ -134,14 +134,14 @@ public void testTableOps() throws MetaException, InvalidObjectException, NoSuchO StorageDescriptor sd = new StorageDescriptor(null, "location", null, null, false, 0, new SerDeInfo("SerDeName", "serializationLib", null), null, null, null); HashMap params = new HashMap(); params.put("EXTERNAL", "false"); - Table tbl1 = new Table(TABLE1, DB1, "owner", 1, 2, 3, sd, null, params, "viewOriginalText", "viewExpandedText", "MANAGED_TABLE"); + Table tbl1 = new Table(TABLE1, DB1, "owner", 1, 2, 3, sd, null, params, "viewOriginalText", "viewExpandedText", "MANAGED_TABLE", null, null); objectStore.createTable(tbl1); List tables = objectStore.getAllTables(DB1); Assert.assertEquals(1, tables.size()); Assert.assertEquals(TABLE1, tables.get(0)); - Table newTbl1 = new Table("new" + TABLE1, DB1, "owner", 1, 2, 3, sd, null, params, "viewOriginalText", "viewExpandedText", "MANAGED_TABLE"); + Table newTbl1 = new Table("new" + TABLE1, DB1, "owner", 1, 2, 3, sd, null, params, "viewOriginalText", "viewExpandedText", "MANAGED_TABLE", null, null); objectStore.alterTable(DB1, TABLE1, newTbl1); tables = objectStore.getTables(DB1, "new*"); Assert.assertEquals(1, tables.size()); @@ -166,7 +166,7 @@ public void testPartitionOps() throws MetaException, InvalidObjectException, NoS tableParams.put("EXTERNAL", "false"); FieldSchema partitionKey1 = new FieldSchema("Country", serdeConstants.STRING_TYPE_NAME, ""); FieldSchema partitionKey2 = new FieldSchema("State", serdeConstants.STRING_TYPE_NAME, ""); - Table tbl1 = new Table(TABLE1, DB1, "owner", 1, 2, 3, sd, Arrays.asList(partitionKey1, partitionKey2), tableParams, "viewOriginalText", "viewExpandedText", "MANAGED_TABLE"); + Table tbl1 = new Table(TABLE1, DB1, "owner", 1, 2, 3, sd, Arrays.asList(partitionKey1, partitionKey2), tableParams, "viewOriginalText", "viewExpandedText", "MANAGED_TABLE", null, null); objectStore.createTable(tbl1); HashMap partitionParams = new HashMap(); partitionParams.put("PARTITION_LEVEL_PRIVILEGE", "true"); diff --git a/metastore/src/test/org/apache/hadoop/hive/metastore/hbase/TestHBaseAggregateStatsCache.java b/metastore/src/test/org/apache/hadoop/hive/metastore/hbase/TestHBaseAggregateStatsCache.java index 6cd3a46..302bf4b 100644 --- a/metastore/src/test/org/apache/hadoop/hive/metastore/hbase/TestHBaseAggregateStatsCache.java +++ b/metastore/src/test/org/apache/hadoop/hive/metastore/hbase/TestHBaseAggregateStatsCache.java @@ -91,7 +91,7 @@ public void allWithStats() throws Exception { List partCols = new ArrayList<>(); partCols.add(new FieldSchema("ds", "string", "")); Table table = new Table(tableName, dbName, "me", (int)now, (int)now, 0, sd, partCols, - Collections.emptyMap(), null, null, null); + Collections.emptyMap(), null, null, null, null, null); store.createTable(table); for (List partVals : Arrays.asList(partVals1, partVals2)) { @@ -173,7 +173,7 @@ public void noneWithStats() throws Exception { List partCols = new ArrayList<>(); partCols.add(new FieldSchema("ds", "string", "")); Table table = new Table(tableName, dbName, "me", (int)now, (int)now, 0, sd, partCols, - Collections.emptyMap(), null, null, null); + Collections.emptyMap(), null, null, null, null, null); store.createTable(table); for (List partVals : Arrays.asList(partVals1, partVals2)) { @@ -212,7 +212,7 @@ public void someNonexistentPartitions() throws Exception { List partCols = new ArrayList<>(); partCols.add(new FieldSchema("ds", "string", "")); Table table = new Table(tableName, dbName, "me", (int)now, (int)now, 0, sd, partCols, - Collections.emptyMap(), null, null, null); + Collections.emptyMap(), null, null, null, null, null); store.createTable(table); StorageDescriptor psd = new StorageDescriptor(sd); @@ -293,7 +293,7 @@ public void nonexistentPartitions() throws Exception { List partCols = new ArrayList<>(); partCols.add(new FieldSchema("ds", "string", "")); Table table = new Table(tableName, dbName, "me", (int)now, (int)now, 0, sd, partCols, - Collections.emptyMap(), null, null, null); + Collections.emptyMap(), null, null, null, null, null); store.createTable(table); Checker statChecker = new Checker() { diff --git a/metastore/src/test/org/apache/hadoop/hive/metastore/hbase/TestHBaseAggregateStatsCacheWithBitVector.java b/metastore/src/test/org/apache/hadoop/hive/metastore/hbase/TestHBaseAggregateStatsCacheWithBitVector.java index e0c4094..bf16f16 100644 --- a/metastore/src/test/org/apache/hadoop/hive/metastore/hbase/TestHBaseAggregateStatsCacheWithBitVector.java +++ b/metastore/src/test/org/apache/hadoop/hive/metastore/hbase/TestHBaseAggregateStatsCacheWithBitVector.java @@ -87,7 +87,7 @@ public void allPartitions() throws Exception { List partCols = new ArrayList<>(); partCols.add(new FieldSchema("ds", "string", "")); Table table = new Table(tableName, dbName, "me", (int) now, (int) now, 0, sd, partCols, - Collections. emptyMap(), null, null, null); + Collections. emptyMap(), null, null, null, null, null); store.createTable(table); StorageDescriptor psd = new StorageDescriptor(sd); diff --git a/metastore/src/test/org/apache/hadoop/hive/metastore/hbase/TestHBaseAggregateStatsExtrapolation.java b/metastore/src/test/org/apache/hadoop/hive/metastore/hbase/TestHBaseAggregateStatsExtrapolation.java index f4e55ed..21731c5 100644 --- a/metastore/src/test/org/apache/hadoop/hive/metastore/hbase/TestHBaseAggregateStatsExtrapolation.java +++ b/metastore/src/test/org/apache/hadoop/hive/metastore/hbase/TestHBaseAggregateStatsExtrapolation.java @@ -91,7 +91,7 @@ public void allPartitionsHaveBitVectorStatusLong() throws Exception { List partCols = new ArrayList<>(); partCols.add(new FieldSchema("ds", "string", "")); Table table = new Table(tableName, dbName, "me", (int) now, (int) now, 0, sd, partCols, - Collections. emptyMap(), null, null, null); + Collections. emptyMap(), null, null, null, null, null); store.createTable(table); List> partVals = new ArrayList<>(); @@ -161,7 +161,7 @@ public void allPartitionsHaveBitVectorStatusDecimal() throws Exception { List partCols = new ArrayList<>(); partCols.add(new FieldSchema("ds", "string", "")); Table table = new Table(tableName, dbName, "me", (int) now, (int) now, 0, sd, partCols, - Collections. emptyMap(), null, null, null); + Collections. emptyMap(), null, null, null, null, null); store.createTable(table); List> partVals = new ArrayList<>(); @@ -231,7 +231,7 @@ public void allPartitionsHaveBitVectorStatusDouble() throws Exception { List partCols = new ArrayList<>(); partCols.add(new FieldSchema("ds", "string", "")); Table table = new Table(tableName, dbName, "me", (int) now, (int) now, 0, sd, partCols, - Collections. emptyMap(), null, null, null); + Collections. emptyMap(), null, null, null, null, null); store.createTable(table); List> partVals = new ArrayList<>(); @@ -301,7 +301,7 @@ public void allPartitionsHaveBitVectorStatusString() throws Exception { List partCols = new ArrayList<>(); partCols.add(new FieldSchema("ds", "string", "")); Table table = new Table(tableName, dbName, "me", (int) now, (int) now, 0, sd, partCols, - Collections. emptyMap(), null, null, null); + Collections. emptyMap(), null, null, null, null, null); store.createTable(table); List> partVals = new ArrayList<>(); @@ -371,7 +371,7 @@ public void noPartitionsHaveBitVectorStatus() throws Exception { List partCols = new ArrayList<>(); partCols.add(new FieldSchema("ds", "string", "")); Table table = new Table(tableName, dbName, "me", (int) now, (int) now, 0, sd, partCols, - Collections. emptyMap(), null, null, null); + Collections. emptyMap(), null, null, null, null, null); store.createTable(table); List> partVals = new ArrayList<>(); @@ -440,7 +440,7 @@ public void TwoEndsOfPartitionsHaveBitVectorStatus() throws Exception { List partCols = new ArrayList<>(); partCols.add(new FieldSchema("ds", "string", "")); Table table = new Table(tableName, dbName, "me", (int) now, (int) now, 0, sd, partCols, - Collections. emptyMap(), null, null, null); + Collections. emptyMap(), null, null, null, null, null); store.createTable(table); List> partVals = new ArrayList<>(); @@ -512,7 +512,7 @@ public void MiddleOfPartitionsHaveBitVectorStatus() throws Exception { List partCols = new ArrayList<>(); partCols.add(new FieldSchema("ds", "string", "")); Table table = new Table(tableName, dbName, "me", (int) now, (int) now, 0, sd, partCols, - Collections. emptyMap(), null, null, null); + Collections. emptyMap(), null, null, null, null, null); store.createTable(table); List> partVals = new ArrayList<>(); @@ -584,7 +584,7 @@ public void TwoEndsAndMiddleOfPartitionsHaveBitVectorStatusLong() throws Excepti List partCols = new ArrayList<>(); partCols.add(new FieldSchema("ds", "string", "")); Table table = new Table(tableName, dbName, "me", (int) now, (int) now, 0, sd, partCols, - Collections. emptyMap(), null, null, null); + Collections. emptyMap(), null, null, null, null, null); store.createTable(table); List> partVals = new ArrayList<>(); @@ -656,7 +656,7 @@ public void TwoEndsAndMiddleOfPartitionsHaveBitVectorStatusDouble() throws Excep List partCols = new ArrayList<>(); partCols.add(new FieldSchema("ds", "string", "")); Table table = new Table(tableName, dbName, "me", (int) now, (int) now, 0, sd, partCols, - Collections. emptyMap(), null, null, null); + Collections. emptyMap(), null, null, null, null, null); store.createTable(table); List> partVals = new ArrayList<>(); diff --git a/metastore/src/test/org/apache/hadoop/hive/metastore/hbase/TestHBaseAggregateStatsNDVUniformDist.java b/metastore/src/test/org/apache/hadoop/hive/metastore/hbase/TestHBaseAggregateStatsNDVUniformDist.java index 62918be..c1d4958 100644 --- a/metastore/src/test/org/apache/hadoop/hive/metastore/hbase/TestHBaseAggregateStatsNDVUniformDist.java +++ b/metastore/src/test/org/apache/hadoop/hive/metastore/hbase/TestHBaseAggregateStatsNDVUniformDist.java @@ -93,7 +93,7 @@ public void allPartitionsHaveBitVectorStatus() throws Exception { List partCols = new ArrayList<>(); partCols.add(new FieldSchema("ds", "string", "")); Table table = new Table(tableName, dbName, "me", (int) now, (int) now, 0, sd, partCols, - Collections. emptyMap(), null, null, null); + Collections. emptyMap(), null, null, null, null, null); store.createTable(table); List> partVals = new ArrayList<>(); @@ -163,7 +163,7 @@ public void noPartitionsHaveBitVectorStatus() throws Exception { List partCols = new ArrayList<>(); partCols.add(new FieldSchema("ds", "string", "")); Table table = new Table(tableName, dbName, "me", (int) now, (int) now, 0, sd, partCols, - Collections. emptyMap(), null, null, null); + Collections. emptyMap(), null, null, null, null, null); store.createTable(table); List> partVals = new ArrayList<>(); @@ -232,7 +232,7 @@ public void TwoEndsOfPartitionsHaveBitVectorStatus() throws Exception { List partCols = new ArrayList<>(); partCols.add(new FieldSchema("ds", "string", "")); Table table = new Table(tableName, dbName, "me", (int) now, (int) now, 0, sd, partCols, - Collections. emptyMap(), null, null, null); + Collections. emptyMap(), null, null, null, null, null); store.createTable(table); List> partVals = new ArrayList<>(); @@ -304,7 +304,7 @@ public void MiddleOfPartitionsHaveBitVectorStatus() throws Exception { List partCols = new ArrayList<>(); partCols.add(new FieldSchema("ds", "string", "")); Table table = new Table(tableName, dbName, "me", (int) now, (int) now, 0, sd, partCols, - Collections. emptyMap(), null, null, null); + Collections. emptyMap(), null, null, null, null, null); store.createTable(table); List> partVals = new ArrayList<>(); @@ -376,7 +376,7 @@ public void TwoEndsAndMiddleOfPartitionsHaveBitVectorStatusLong() throws Excepti List partCols = new ArrayList<>(); partCols.add(new FieldSchema("ds", "string", "")); Table table = new Table(tableName, dbName, "me", (int) now, (int) now, 0, sd, partCols, - Collections. emptyMap(), null, null, null); + Collections. emptyMap(), null, null, null, null, null); store.createTable(table); List> partVals = new ArrayList<>(); @@ -448,7 +448,7 @@ public void TwoEndsAndMiddleOfPartitionsHaveBitVectorStatusDecimal() throws Exce List partCols = new ArrayList<>(); partCols.add(new FieldSchema("ds", "string", "")); Table table = new Table(tableName, dbName, "me", (int) now, (int) now, 0, sd, partCols, - Collections. emptyMap(), null, null, null); + Collections. emptyMap(), null, null, null, null, null); store.createTable(table); List> partVals = new ArrayList<>(); @@ -520,7 +520,7 @@ public void TwoEndsAndMiddleOfPartitionsHaveBitVectorStatusDouble() throws Excep List partCols = new ArrayList<>(); partCols.add(new FieldSchema("ds", "string", "")); Table table = new Table(tableName, dbName, "me", (int) now, (int) now, 0, sd, partCols, - Collections. emptyMap(), null, null, null); + Collections. emptyMap(), null, null, null, null, null); store.createTable(table); List> partVals = new ArrayList<>(); diff --git a/metastore/src/test/org/apache/hadoop/hive/metastore/hbase/TestHBaseStore.java b/metastore/src/test/org/apache/hadoop/hive/metastore/hbase/TestHBaseStore.java index e4723f6..fa8506d 100644 --- a/metastore/src/test/org/apache/hadoop/hive/metastore/hbase/TestHBaseStore.java +++ b/metastore/src/test/org/apache/hadoop/hive/metastore/hbase/TestHBaseStore.java @@ -404,7 +404,7 @@ public void createTable() throws Exception { StorageDescriptor sd = new StorageDescriptor(cols, "file:/tmp", "input", "output", false, 17, serde, Arrays.asList("bucketcol"), Arrays.asList(new Order("sortcol", 1)), params); Table table = new Table(tableName, "default", "me", startTime, startTime, 0, sd, null, - emptyParameters, null, null, null); + emptyParameters, null, null, null, null, null); store.createTable(table); Table t = store.getTable("default", tableName); @@ -448,7 +448,7 @@ public void skewInfo() throws Exception { map); sd.setSkewedInfo(skew); Table table = new Table(tableName, "default", "me", startTime, startTime, 0, sd, null, - emptyParameters, null, null, null); + emptyParameters, null, null, null, null, null); store.createTable(table); Table t = store.getTable("default", tableName); @@ -518,7 +518,7 @@ public void alterTable() throws Exception { StorageDescriptor sd = new StorageDescriptor(cols, "file:/tmp", "input", "output", false, 0, serde, null, null, emptyParameters); Table table = new Table(tableName, "default", "me", startTime, startTime, 0, sd, null, - emptyParameters, null, null, null); + emptyParameters, null, null, null, null, null); store.createTable(table); startTime += 10; @@ -551,7 +551,7 @@ public void dropTable() throws Exception { StorageDescriptor sd = new StorageDescriptor(cols, "file:/tmp", "input", "output", false, 0, serde, null, null, emptyParameters); Table table = new Table(tableName, "default", "me", startTime, startTime, 0, sd, null, - emptyParameters, null, null, null); + emptyParameters, null, null, null, null, null); store.createTable(table); Table t = store.getTable("default", tableName); @@ -573,7 +573,7 @@ public void createPartition() throws Exception { List partCols = new ArrayList(); partCols.add(new FieldSchema("pc", "string", "")); Table table = new Table(tableName, DB, "me", startTime, startTime, 0, sd, partCols, - emptyParameters, null, null, null); + emptyParameters, null, null, null, null, null); store.createTable(table); List vals = Arrays.asList("fred"); @@ -614,7 +614,7 @@ public void alterPartition() throws Exception { List partCols = new ArrayList(); partCols.add(new FieldSchema("pc", "string", "")); Table table = new Table(tableName, DB, "me", startTime, startTime, 0, sd, partCols, - emptyParameters, null, null, null); + emptyParameters, null, null, null, null, null); store.createTable(table); List vals = Arrays.asList("fred"); @@ -659,7 +659,7 @@ public void getPartitions() throws Exception { List partCols = new ArrayList(); partCols.add(new FieldSchema("pc", "string", "")); Table table = new Table(tableName, DB, "me", startTime, startTime, 0, sd, partCols, - emptyParameters, null, null, null); + emptyParameters, null, null, null, null, null); store.createTable(table); List partVals = Arrays.asList("alan", "bob", "carl", "doug", "ethan"); @@ -697,7 +697,7 @@ public void listGetDropPartitionNames() throws Exception { partCols.add(new FieldSchema("pc", "string", "")); partCols.add(new FieldSchema("region", "string", "")); Table table = new Table(tableName, DB, "me", startTime, startTime, 0, sd, partCols, - emptyParameters, null, null, null); + emptyParameters, null, null, null, null, null); store.createTable(table); String[][] partVals = new String[][]{{"today", "north america"}, {"tomorrow", "europe"}}; @@ -740,7 +740,7 @@ public void dropPartition() throws Exception { List partCols = new ArrayList(); partCols.add(new FieldSchema("pc", "string", "")); Table table = new Table(tableName, DB, "me", startTime, startTime, 0, sd, partCols, - emptyParameters, null, null, null); + emptyParameters, null, null, null, null, null); store.createTable(table); List vals = Arrays.asList("fred"); @@ -1276,7 +1276,7 @@ private Table createMockTableAndPartition(String partType, String partVal) throw serde, Arrays.asList("bucketcol"), Arrays.asList(new Order("sortcol", 1)), params); int currentTime = (int)(System.currentTimeMillis() / 1000); Table table = new Table(TBL, DB, "me", currentTime, currentTime, 0, sd, cols, - emptyParameters, null, null, null); + emptyParameters, null, null, null, null, null); store.createTable(table); Partition part = new Partition(vals, DB, TBL, currentTime, currentTime, sd, emptyParameters); @@ -1294,7 +1294,7 @@ private Table createMockTable(String type) throws Exception { serde, Arrays.asList("bucketcol"), Arrays.asList(new Order("sortcol", 1)), params); int currentTime = (int)(System.currentTimeMillis() / 1000); Table table = new Table(TBL, DB, "me", currentTime, currentTime, 0, sd, cols, - emptyParameters, null, null, null); + emptyParameters, null, null, null, null, null); store.createTable(table); return table; } diff --git a/metastore/src/test/org/apache/hadoop/hive/metastore/hbase/TestHBaseStoreBitVector.java b/metastore/src/test/org/apache/hadoop/hive/metastore/hbase/TestHBaseStoreBitVector.java index b1dc542..834ec8a 100644 --- a/metastore/src/test/org/apache/hadoop/hive/metastore/hbase/TestHBaseStoreBitVector.java +++ b/metastore/src/test/org/apache/hadoop/hive/metastore/hbase/TestHBaseStoreBitVector.java @@ -569,7 +569,7 @@ private Table createMockTable(String name, String type) throws Exception { serde, new ArrayList(), new ArrayList(), params); int currentTime = (int)(System.currentTimeMillis() / 1000); Table table = new Table(TBL, DB, "me", currentTime, currentTime, 0, sd, cols, - emptyParameters, null, null, null); + emptyParameters, null, null, null, null, null); store.createTable(table); return table; } @@ -586,7 +586,7 @@ private Table createMockTableAndPartition(String partType, String partVal) throw serde, Arrays.asList("bucketcol"), Arrays.asList(new Order("sortcol", 1)), params); int currentTime = (int)(System.currentTimeMillis() / 1000); Table table = new Table(TBL, DB, "me", currentTime, currentTime, 0, sd, cols, - emptyParameters, null, null, null); + emptyParameters, null, null, null, null, null); store.createTable(table); Partition part = new Partition(vals, DB, TBL, currentTime, currentTime, sd, emptyParameters); diff --git a/metastore/src/test/org/apache/hadoop/hive/metastore/hbase/TestHBaseStoreCached.java b/metastore/src/test/org/apache/hadoop/hive/metastore/hbase/TestHBaseStoreCached.java index cfe9cd0..d59510e 100644 --- a/metastore/src/test/org/apache/hadoop/hive/metastore/hbase/TestHBaseStoreCached.java +++ b/metastore/src/test/org/apache/hadoop/hive/metastore/hbase/TestHBaseStoreCached.java @@ -101,7 +101,7 @@ public void createTable() throws Exception { StorageDescriptor sd = new StorageDescriptor(cols, "file:/tmp", "input", "output", false, 0, serde, null, null, emptyParameters); Table table = new Table(tableName, "default", "me", startTime, startTime, 0, sd, null, - emptyParameters, null, null, null); + emptyParameters, null, null, null, null, null); store.createTable(table); Table t = store.getTable("default", tableName); @@ -129,7 +129,7 @@ public void alterTable() throws Exception { StorageDescriptor sd = new StorageDescriptor(cols, "file:/tmp", "input", "output", false, 0, serde, null, null, emptyParameters); Table table = new Table(tableName, "default", "me", startTime, startTime, 0, sd, null, - emptyParameters, null, null, null); + emptyParameters, null, null, null, null, null); store.createTable(table); startTime += 10; @@ -162,7 +162,7 @@ public void dropTable() throws Exception { StorageDescriptor sd = new StorageDescriptor(cols, "file:/tmp", "input", "output", false, 0, serde, null, null, emptyParameters); Table table = new Table(tableName, "default", "me", startTime, startTime, 0, sd, null, - emptyParameters, null, null, null); + emptyParameters, null, null, null, null, null); store.createTable(table); Table t = store.getTable("default", tableName); @@ -185,7 +185,7 @@ public void createPartition() throws Exception { List partCols = new ArrayList(); partCols.add(new FieldSchema("pc", "string", "")); Table table = new Table(tableName, dbName, "me", startTime, startTime, 0, sd, partCols, - emptyParameters, null, null, null); + emptyParameters, null, null, null, null, null); store.createTable(table); List vals = Arrays.asList("fred"); @@ -224,7 +224,7 @@ public void getPartitions() throws Exception { List partCols = new ArrayList(); partCols.add(new FieldSchema("pc", "string", "")); Table table = new Table(tableName, dbName, "me", startTime, startTime, 0, sd, partCols, - emptyParameters, null, null, null); + emptyParameters, null, null, null, null, null); store.createTable(table); List partVals = Arrays.asList("alan", "bob", "carl", "doug", "ethan"); @@ -263,7 +263,7 @@ public void listGetDropPartitionNames() throws Exception { partCols.add(new FieldSchema("pc", "string", "")); partCols.add(new FieldSchema("region", "string", "")); Table table = new Table(tableName, dbName, "me", startTime, startTime, 0, sd, partCols, - emptyParameters, null, null, null); + emptyParameters, null, null, null, null, null); store.createTable(table); String[][] partVals = new String[][]{{"today", "north america"}, {"tomorrow", "europe"}}; @@ -307,7 +307,7 @@ public void dropPartition() throws Exception { List partCols = new ArrayList(); partCols.add(new FieldSchema("pc", "string", "")); Table table = new Table(tableName, dbName, "me", startTime, startTime, 0, sd, partCols, - emptyParameters, null, null, null); + emptyParameters, null, null, null, null, null); store.createTable(table); List vals = Arrays.asList("fred"); @@ -338,7 +338,7 @@ public void booleanTableStatistics() throws Exception { StorageDescriptor sd = new StorageDescriptor(cols, "file:/tmp", "input", "output", false, 0, serde, null, null, emptyParameters); Table table = new Table(tableName, dbname, "me", startTime, startTime, 0, sd, null, - emptyParameters, null, null, null); + emptyParameters, null, null, null, null, null); store.createTable(table); long trues = 37; diff --git a/ql/src/java/org/apache/hadoop/hive/ql/ErrorMsg.java b/ql/src/java/org/apache/hadoop/hive/ql/ErrorMsg.java index f091f67..ab01512 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/ErrorMsg.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/ErrorMsg.java @@ -447,6 +447,7 @@ INVALID_LOAD_TABLE_FILE_WORK(10322, "Invalid Load Table Work or Load File Work"), CLASSPATH_ERROR(10323, "Classpath error"), IMPORT_SEMANTIC_ERROR(10324, "Import Semantic Analyzer Error"), + INVALID_FK_SYNTAX(10325, "Invalid Foreign Key syntax"), //========================== 20000 range starts here ========================// SCRIPT_INIT_ERROR(20000, "Unable to initialize custom script."), SCRIPT_IO_ERROR(20001, "An error occurred while reading or writing to your custom script. " @@ -463,7 +464,6 @@ OP_NOT_ALLOWED_IN_AUTOCOMMIT(20006, "Operation {0} is not allowed when autoCommit=true.", true),//todo: better SQLState? OP_NOT_ALLOWED_IN_TXN(20007, "Operation {0} is not allowed in a transaction. TransactionID={1}.", true), OP_NOT_ALLOWED_WITHOUT_TXN(20008, "Operation {0} is not allowed since autoCommit=false and there is no active transaction", true), - //========================== 30000 range starts here ========================// STATSPUBLISHER_NOT_OBTAINED(30000, "StatsPublisher cannot be obtained. " + "There was a error to retrieve the StatsPublisher, and retrying " + diff --git a/ql/src/java/org/apache/hadoop/hive/ql/metadata/Table.java b/ql/src/java/org/apache/hadoop/hive/ql/metadata/Table.java index 5a72af1..f509128 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/metadata/Table.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/metadata/Table.java @@ -38,6 +38,7 @@ import org.apache.hadoop.hive.metastore.MetaStoreUtils; import org.apache.hadoop.hive.metastore.TableType; import org.apache.hadoop.hive.metastore.api.FieldSchema; +import org.apache.hadoop.hive.metastore.api.ForeignKeyRel; import org.apache.hadoop.hive.metastore.api.MetaException; import org.apache.hadoop.hive.metastore.api.Order; import org.apache.hadoop.hive.metastore.api.SerDeInfo; @@ -653,10 +654,18 @@ public void setPartCols(List partCols) { tTable.setPartitionKeys(partCols); } + public void setPrimaryKeys(List primaryKeys) { + tTable.setPrimaryKeys(primaryKeys); + } + public String getDbName() { return tTable.getDbName(); } + public void setForeignKeys(List foreignKeys) { + tTable.setForeignKeys(foreignKeys);; + } + public int getNumBuckets() { return tTable.getSd().getNumBuckets(); } @@ -784,6 +793,14 @@ public void setDbName(String databaseName) { return tTable.getPartitionKeys(); } + public List getPrimaryKeys() { + return tTable.getPrimaryKeys(); + } + + public List getForeignKeys() { + return tTable.getForeignKeys(); + } + /** * @return the original view text, or null if this table is not a view */ @@ -979,4 +996,6 @@ public boolean hasDeserializer() { return deserializer != null; } + + }; diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/BaseSemanticAnalyzer.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/BaseSemanticAnalyzer.java index f6ba521..7da70fe 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/parse/BaseSemanticAnalyzer.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/BaseSemanticAnalyzer.java @@ -43,6 +43,7 @@ import org.apache.hadoop.hive.metastore.HiveMetaStore; import org.apache.hadoop.hive.metastore.api.Database; import org.apache.hadoop.hive.metastore.api.FieldSchema; +import org.apache.hadoop.hive.metastore.api.ForeignKeyRel; import org.apache.hadoop.hive.metastore.api.Order; import org.apache.hadoop.hive.ql.CompilationOpContext; import org.apache.hadoop.hive.ql.Context; @@ -616,29 +617,99 @@ private static String spliceString(String str, int i, int length, String replace * Get the list of FieldSchema out of the ASTNode. */ public static List getColumns(ASTNode ast, boolean lowerCase) throws SemanticException { + return getColumns(ast, lowerCase, new ArrayList(), new ArrayList()); + } + + /** + * Get the list of FieldSchema out of the ASTNode. + */ + public static List getColumns(ASTNode ast, boolean lowerCase, + List primaryKeys, List foreignKeys) throws SemanticException { List colList = new ArrayList(); int numCh = ast.getChildCount(); + List pkNames = new ArrayList(); + Map nametoFS = new HashMap(); + Tree parent = ast.getParent(); + for (int i = 0; i < numCh; i++) { FieldSchema col = new FieldSchema(); ASTNode child = (ASTNode) ast.getChild(i); - Tree grandChild = child.getChild(0); - if(grandChild != null) { - String name = grandChild.getText(); - if(lowerCase) { - name = name.toLowerCase(); + if (child.getToken().getType() == HiveParser.TOK_PRIMARY_KEY) { + for (int j = 0; j < child.getChild(1).getChildCount(); j++) { + Tree grandChild = child.getChild(1).getChild(j); + pkNames.add(unescapeIdentifier(grandChild.getText().toLowerCase())); } - // child 0 is the name of the column - col.setName(unescapeIdentifier(name)); - // child 1 is the type of the column - ASTNode typeChild = (ASTNode) (child.getChild(1)); - col.setType(getTypeStringFromAST(typeChild)); - - // child 2 is the optional comment of the column - if (child.getChildCount() == 3) { - col.setComment(unescapeSQLString(child.getChild(2).getText())); + } else if (child.getToken().getType() == HiveParser.TOK_FOREIGN_KEY) { + String[] qualifiedTabName = getQualifiedTableName((ASTNode) parent.getChild(0)); + String dbDotTab = getDotName(qualifiedTabName); + ForeignKeyInfo fkInfo = new ForeignKeyInfo(); + // The grammar looks like : + // 1. KW_CONSTRAINT idfr=identifier KW_FOREIGN key fkCols=columnParenthesesList + // KW_REFERENCES tabName=tableName parCols=columnParenthesesList -> + // ^(TOK_FOREIGN_KEY $idfr $fkCols $tabName $parCols + // when the user specifies the constraint name (i.e. child.getChildCount() == 8) + // 2. KW_FOREIGN key fkCols=columnParenthesesList + // KW_REFERENCES tabName=tableName parCols=columnParenthesesList -> + // ^(TOK_FOREIGN_KEY $fkCols $tabName $parCols) when no constraint name is specified + boolean userSpecifiedConstraintName = child.getChildCount() == 8; + int fkIndex = userSpecifiedConstraintName ? 2 : 1; + int pkIndex = userSpecifiedConstraintName ? 6 : 5; + int ptIndex = userSpecifiedConstraintName ? 4 : 3; + + if (child.getChildCount() <= fkIndex ||child.getChildCount() <= pkIndex || + child.getChildCount() <= ptIndex) { + throw new SemanticException(ErrorMsg.INVALID_FK_SYNTAX.getMsg()); + } + + fkInfo.qualifiedParentTableName = getQualifiedTableName((ASTNode) child.getChild(ptIndex)); + if (child.getChild(fkIndex).getChildCount() != child.getChild(pkIndex).getChildCount()) { + throw new SemanticException(ErrorMsg.INVALID_FK_SYNTAX.getMsg(fkInfo.constraintName + + " : the number of foreign key columns should be same as number of parent key columns ")); + } + for (int j = 0; j < child.getChild(fkIndex).getChildCount(); j++) { + Tree grandChild = child.getChild(fkIndex).getChild(j); + fkInfo.foreignKeyCols.add(unescapeIdentifier(grandChild.getText().toLowerCase())); + } + for (int j = 0; j < child.getChild(pkIndex).getChildCount(); j++) { + Tree grandChild = child.getChild(pkIndex).getChild(j); + fkInfo.parentKeyCols.add(unescapeIdentifier(grandChild.getText().toLowerCase())); + } + if (userSpecifiedConstraintName) { + fkInfo.constraintName = unescapeIdentifier(child.getChild(0).getText().toLowerCase()); + } else { + fkInfo.constraintName = ParseUtils. + generateFKName(dbDotTab, fkInfo.foreignKeyCols, fkInfo.parentKeyCols, getDotName(fkInfo.qualifiedParentTableName)); + } + foreignKeys.add(fkInfo); + } + else { + Tree grandChild = child.getChild(0); + if(grandChild != null) { + String name = grandChild.getText(); + if(lowerCase) { + name = name.toLowerCase(); + } + // child 0 is the name of the column + col.setName(unescapeIdentifier(name)); + // child 1 is the type of the column + ASTNode typeChild = (ASTNode) (child.getChild(1)); + col.setType(getTypeStringFromAST(typeChild)); + + // child 2 is the optional comment of the column + if (child.getChildCount() == 3) { + col.setComment(unescapeSQLString(child.getChild(2).getText())); + } } + nametoFS.put(col.getName(), col); + colList.add(col); + } + } + for (String pk : pkNames) { + if (nametoFS.containsKey(pk)) { + primaryKeys.add(new FieldSchema(nametoFS.get(pk))); + } else { + throw new SemanticException(ErrorMsg.INVALID_COLUMN.getMsg(pk)); } - colList.add(col); } return colList; } diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/ForeignKeyInfo.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/ForeignKeyInfo.java new file mode 100644 index 0000000..88dc47d --- /dev/null +++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/ForeignKeyInfo.java @@ -0,0 +1,30 @@ +package org.apache.hadoop.hive.ql.parse; + +import java.util.ArrayList; +import java.util.List; + +import org.apache.commons.lang3.ArrayUtils; +import org.apache.hadoop.hive.metastore.api.ForeignKeyRel; +import org.apache.hadoop.hive.ql.metadata.Hive; + +/* + * Intermediate structure to capture foreignKey information for a table in BaseSemanticAnalyzer.getColumns() + */ +public class ForeignKeyInfo { + String constraintName; + String[] qualifiedParentTableName; + List foreignKeyCols; + List parentKeyCols; + + public ForeignKeyInfo(String cn, String[] pt, List fkCols, List pkCols) { + constraintName = cn; + qualifiedParentTableName = pt; + foreignKeyCols = fkCols; + parentKeyCols = pkCols; + } + + public ForeignKeyInfo() { + foreignKeyCols = new ArrayList(); + parentKeyCols = new ArrayList(); + } +} \ No newline at end of file diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/HiveLexer.g b/ql/src/java/org/apache/hadoop/hive/ql/parse/HiveLexer.g index dd997f0..a3be3d3 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/parse/HiveLexer.g +++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/HiveLexer.g @@ -318,6 +318,13 @@ KW_LEVEL: 'LEVEL'; KW_SNAPSHOT: 'SNAPSHOT'; KW_AUTOCOMMIT: 'AUTOCOMMIT'; KW_CACHE: 'CACHE'; +KW_PRIMARY: 'PRIMARY'; +KW_FOREIGN: 'FOREIGN'; +KW_REFERENCES: 'REFERENCES'; +KW_CONSTRAINT: 'CONSTRAINT'; +KW_NOVALIDATE: 'NOVALIDATE'; +KW_RELY: 'RELY'; +KW_NORELY: 'NORELY'; // Operators // NOTE: if you add a new function/operator, add it to sysFuncNames so that describe function _FUNC_ will work. diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/HiveParser.g b/ql/src/java/org/apache/hadoop/hive/ql/parse/HiveParser.g index 50c53db..8811472 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/parse/HiveParser.g +++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/HiveParser.g @@ -102,6 +102,12 @@ TOK_METADATA; TOK_NULL; TOK_ISNULL; TOK_ISNOTNULL; +TOK_PRIMARY_KEY; +TOK_FOREIGN_KEY; +TOK_DISABLE; +TOK_NOVALIDATE; +TOK_RELY; +TOK_NORELY; TOK_TINYINT; TOK_SMALLINT; TOK_INT; @@ -175,6 +181,12 @@ TOK_ALTERTABLE_CLUSTER_SORT; TOK_ALTERTABLE_COMPACT; TOK_ALTERINDEX_REBUILD; TOK_ALTERINDEX_PROPERTIES; +TOK_ALTERTABLE_ADDFOREIGNKEY; +TOK_ALTERTABLE_ADDPRIMARYKEY; +TOK_ALTERTABLE_ADDCONSTRAINT; +TOK_ALTERTABLE_DROPFOREIGNKEY; +TOK_ALTERTABLE_DROPPRIMARYKEY; +TOK_ALTERTABLE_DROPCONSTRAINT; TOK_MSCK; TOK_SHOWDATABASES; TOK_SHOWTABLES; @@ -515,7 +527,15 @@ import org.apache.hadoop.hive.conf.HiveConf; xlateMap.put("KW_UPDATE", "UPDATE"); xlateMap.put("KW_VALUES", "VALUES"); xlateMap.put("KW_PURGE", "PURGE"); - + xlateMap.put("KW_PRIMARY", "PRIMARY"); + xlateMap.put("KW_FOREIGN", "FOREIGN"); + xlateMap.put("KW_KEY", "KEY"); + xlateMap.put("KW_REFERENCES", "REFERENCES"); + xlateMap.put("KW_CONSTRAINT", "CONSTRAINT"); + xlateMap.put("KW_DISABLE", "DISABLE"); + xlateMap.put("KW_NOVALIDATE", "NOVALIDATE"); + xlateMap.put("KW_RELY", "RELY"); + xlateMap.put("KW_NORELY", "NORELY"); // Operators xlateMap.put("DOT", "."); @@ -890,7 +910,7 @@ createTableStatement tableFileFormat? tableLocation? tablePropertiesPrefixed? - | (LPAREN columnNameTypeList RPAREN)? + | (LPAREN columnNameTypeOrPKOrFKList RPAREN)? tableComment? tablePartition? tableBuckets? @@ -903,7 +923,7 @@ createTableStatement ) -> ^(TOK_CREATETABLE $name $temp? $ext? ifNotExists? ^(TOK_LIKETABLE $likeName?) - columnNameTypeList? + columnNameTypeOrPKOrFKList? tableComment? tablePartition? tableBuckets? @@ -1943,6 +1963,11 @@ columnNameTypeList @after { popMsg(state); } : columnNameType (COMMA columnNameType)* -> ^(TOK_TABCOLLIST columnNameType+) ; +columnNameTypeOrPKOrFKList +@init { pushMsg("column name type list with PK and FK", state); } +@after { popMsg(state); } + : columnNameTypeOrPKOrFK (COMMA columnNameTypeOrPKOrFK)* -> ^(TOK_TABCOLLIST columnNameTypeOrPKOrFK+) + ; columnNameColonTypeList @init { pushMsg("column name type list", state); } @@ -1976,6 +2001,39 @@ columnNameOrderList : columnNameOrder (COMMA columnNameOrder)* -> ^(TOK_TABCOLNAME columnNameOrder+) ; +columnParenthesesList +@init { pushMsg("column parentheses list", state); } +@after { popMsg(state); } + : LPAREN columnNameList RPAREN + ; + +primaryKeyWithoutName +@init { pushMsg("primary key without key name", state); } +@after { popMsg(state); } + : KW_PRIMARY idfr=identifier columnParenthesesList + -> {$idfr.tree.toString().equalsIgnoreCase("key")}? + ^(TOK_PRIMARY_KEY columnParenthesesList) + -> ^(TOK_PRIMARY_KEY) + ; + +foreignKeyWithName +@init { pushMsg("foreign key with key name", state); } +@after { popMsg(state); } + : KW_CONSTRAINT idfr=identifier KW_FOREIGN idfr2=identifier fkCols=columnParenthesesList KW_REFERENCES tabName=tableName parCols=columnParenthesesList + -> {$idfr2.tree.toString().equalsIgnoreCase("key")}? + ^(TOK_FOREIGN_KEY $idfr $fkCols $tabName $parCols) + -> ^(TOK_FOREIGN_KEY) + ; + +foreignKeyWithoutName +@init { pushMsg("foreign key without key name", state); } +@after { popMsg(state); } + : KW_FOREIGN idfr=identifier fkCols=columnParenthesesList KW_REFERENCES tabName=tableName parCols=columnParenthesesList + -> {$idfr.tree.toString().equalsIgnoreCase("key")}? + ^(TOK_FOREIGN_KEY $fkCols $tabName $parCols) + -> ^(TOK_FOREIGN_KEY) + ; + skewedValueElement @init { pushMsg("skewed value element", state); } @after { popMsg(state); } @@ -2087,6 +2145,15 @@ columnNameType -> ^(TOK_TABCOL $colName colType $comment) ; +columnNameTypeOrPKOrFK +@init { pushMsg("column name or primary key or foreign key", state); } +@after { popMsg(state); } + : ( foreignKeyWithName ) + | ( primaryKeyWithoutName ) + | ( foreignKeyWithoutName ) + | ( columnNameType ) + ; + columnNameColonType @init { pushMsg("column specification", state); } @after { popMsg(state); } diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/ImportSemanticAnalyzer.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/ImportSemanticAnalyzer.java index 549d24f..2b0098a 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/parse/ImportSemanticAnalyzer.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/ImportSemanticAnalyzer.java @@ -312,7 +312,7 @@ private CreateTableDesc getBaseCreateTableDescFromTable(String dbName, (null == table.getSd().getSkewedInfo()) ? null : table.getSd().getSkewedInfo() .getSkewedColNames(), (null == table.getSd().getSkewedInfo()) ? null : table.getSd().getSkewedInfo() - .getSkewedColValues()); + .getSkewedColValues(), table.getPrimaryKeys(), table.getForeignKeys()); tblDesc.setStoredAsSubDirectories(table.getSd().isStoredAsSubDirectories()); return tblDesc; } diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/ParseUtils.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/ParseUtils.java index 5f13277..ed11921 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/parse/ParseUtils.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/ParseUtils.java @@ -20,6 +20,7 @@ import java.util.ArrayDeque; import java.util.ArrayList; +import java.util.Arrays; import java.util.HashSet; import java.util.Iterator; import java.util.List; @@ -28,12 +29,17 @@ import java.util.Stack; import org.antlr.runtime.tree.Tree; +import org.apache.commons.lang3.ArrayUtils; import org.apache.hadoop.hive.common.type.HiveDecimal; +import org.apache.hadoop.hive.metastore.MetaStoreUtils; import org.apache.hadoop.hive.metastore.api.FieldSchema; +import org.apache.hadoop.hive.metastore.api.ForeignKeyRel; import org.apache.hadoop.hive.ql.ErrorMsg; import org.apache.hadoop.hive.ql.exec.PTFUtils; import org.apache.hadoop.hive.ql.exec.Utilities; import org.apache.hadoop.hive.ql.lib.Node; +import org.apache.hadoop.hive.ql.metadata.Hive; +import org.apache.hadoop.hive.ql.metadata.Table; import org.apache.hadoop.hive.ql.plan.ExprNodeDesc; import org.apache.hadoop.hive.serde2.typeinfo.CharTypeInfo; import org.apache.hadoop.hive.serde2.typeinfo.DecimalTypeInfo; @@ -313,4 +319,63 @@ public static boolean sameTree(ASTNode node, ASTNode otherNode) { return stack.empty() && otherStack.empty(); } + + public static String generateFKName(String dotTabName, List pkNames, List fkNames, String parentTableName) { + int hashcode = (ArrayUtils.toString(pkNames) + ArrayUtils.toString(fkNames) + parentTableName).hashCode(); + // TODO: we need to verify if this is duplicate name in the metastore. + return dotTabName + hashcode; + } + + public static List convertToForeignKeyRel(List foreignKeyInfos, + BaseSemanticAnalyzer semAnalyzer, List fkCols) throws SemanticException { + List fkRels = new ArrayList(); + + // TODO : The below linear searches need to be improved. Maybe, using a mapping? + for (ForeignKeyInfo fkInfo : foreignKeyInfos) { + ForeignKeyRel currFKRel = new ForeignKeyRel(); + currFKRel.setForeignKeyName(fkInfo.constraintName); + currFKRel.setParentTableName(fkInfo.qualifiedParentTableName[1]); + currFKRel.setParentTableSchema(fkInfo.qualifiedParentTableName[0]); + Table tb = semAnalyzer.getTable(BaseSemanticAnalyzer.getDotName(fkInfo.qualifiedParentTableName)); + if (tb == null) { + throw new SemanticException( + ErrorMsg.INVALID_FK_SYNTAX.getMsg("Invalid parent table name " + + BaseSemanticAnalyzer.getDotName(fkInfo.qualifiedParentTableName))); + } + List allParentCols = tb.getAllCols(); + + for (String pkColName : fkInfo.parentKeyCols) { + boolean found = false; + for (FieldSchema fs : allParentCols) { + if (pkColName.equalsIgnoreCase(fs.getName())) { + currFKRel.addToParentCols(fs); + found = true; + break; + } + } + if (!found) { + throw new SemanticException( + ErrorMsg.INVALID_FK_SYNTAX.getMsg("Invalid parent column name " + pkColName)); + } + } + + for (String fkColName : fkInfo.foreignKeyCols) { + boolean found = false; + for (FieldSchema fs : fkCols) { + if (fkColName.equalsIgnoreCase(fs.getName())) { + currFKRel.addToForeignCols(fs); + found = true; + break; + } + } + if (!found) { + throw new SemanticException( + ErrorMsg.INVALID_FK_SYNTAX.getMsg("Invalid foreign column name " + fkColName)); + } + } + fkRels.add(currFKRel); + } + return fkRels; + } + } diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java index e81d46e..6287ed9 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java @@ -66,6 +66,7 @@ import org.apache.hadoop.hive.metastore.Warehouse; import org.apache.hadoop.hive.metastore.api.Database; import org.apache.hadoop.hive.metastore.api.FieldSchema; +import org.apache.hadoop.hive.metastore.api.ForeignKeyRel; import org.apache.hadoop.hive.metastore.api.MetaException; import org.apache.hadoop.hive.metastore.api.Order; import org.apache.hadoop.hive.ql.CompilationOpContext; @@ -11269,6 +11270,9 @@ ASTNode analyzeCreateTable( List cols = new ArrayList(); List partCols = new ArrayList(); List bucketCols = new ArrayList(); + List primaryKeys = new ArrayList(); + List foreignKeyInfos = new ArrayList(); + List foreignKeys = new ArrayList(); List sortCols = new ArrayList(); int numBuckets = -1; String comment = null; @@ -11361,7 +11365,10 @@ ASTNode analyzeCreateTable( selectStmt = child; break; case HiveParser.TOK_TABCOLLIST: - cols = getColumns(child); + cols = getColumns(child, true, primaryKeys, foreignKeyInfos); + if (!foreignKeyInfos.isEmpty()) { + foreignKeys = ParseUtils.convertToForeignKeyRel(foreignKeyInfos, this, cols); + } break; case HiveParser.TOK_TABLECOMMENT: comment = unescapeSQLString(child.getChild(0).getText()); @@ -11473,7 +11480,7 @@ ASTNode analyzeCreateTable( comment, storageFormat.getInputFormat(), storageFormat.getOutputFormat(), location, storageFormat.getSerde(), storageFormat.getStorageHandler(), storageFormat.getSerdeProps(), tblProps, ifNotExists, skewedColNames, - skewedValues); + skewedValues, primaryKeys, foreignKeys); crtTblDesc.setStoredAsSubDirectories(storedAsDirs); crtTblDesc.setNullFormat(rowFormatParams.nullFormat); @@ -11567,7 +11574,7 @@ ASTNode analyzeCreateTable( rowFormatParams.lineDelim, comment, storageFormat.getInputFormat(), storageFormat.getOutputFormat(), location, storageFormat.getSerde(), storageFormat.getStorageHandler(), storageFormat.getSerdeProps(), tblProps, ifNotExists, - skewedColNames, skewedValues, true); + skewedColNames, skewedValues, true, primaryKeys, foreignKeys); tableDesc.setMaterialization(isMaterialization); tableDesc.setStoredAsSubDirectories(storedAsDirs); tableDesc.setNullFormat(rowFormatParams.nullFormat); diff --git a/ql/src/java/org/apache/hadoop/hive/ql/plan/CreateTableDesc.java b/ql/src/java/org/apache/hadoop/hive/ql/plan/CreateTableDesc.java index 03b4d8b..5c3d175 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/plan/CreateTableDesc.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/plan/CreateTableDesc.java @@ -28,6 +28,7 @@ import org.apache.hadoop.hive.conf.HiveConf; import org.apache.hadoop.hive.metastore.TableType; import org.apache.hadoop.hive.metastore.api.FieldSchema; +import org.apache.hadoop.hive.metastore.api.ForeignKeyRel; import org.apache.hadoop.hive.metastore.api.Order; import org.apache.hadoop.hive.ql.ErrorMsg; import org.apache.hadoop.hive.ql.exec.DDLTask; @@ -88,6 +89,8 @@ private boolean isMaterialization = false; private boolean replaceMode = false; private boolean isCTAS = false; + List primaryKeys; + List foreignKeys; public CreateTableDesc() { } @@ -101,13 +104,14 @@ public CreateTableDesc(String databaseName, String tableName, boolean isExternal String storageHandler, Map serdeProps, Map tblProps, - boolean ifNotExists, List skewedColNames, List> skewedColValues) { + boolean ifNotExists, List skewedColNames, List> skewedColValues, + List primaryKeys, List foreignKeys) { this(tableName, isExternal, isTemporary, cols, partCols, bucketCols, sortCols, numBuckets, fieldDelim, fieldEscape, collItemDelim, mapKeyDelim, lineDelim, comment, inputFormat, outputFormat, location, serName, storageHandler, serdeProps, - tblProps, ifNotExists, skewedColNames, skewedColValues); + tblProps, ifNotExists, skewedColNames, skewedColValues, primaryKeys, foreignKeys); this.databaseName = databaseName; } @@ -122,12 +126,12 @@ public CreateTableDesc(String databaseName, String tableName, boolean isExternal Map serdeProps, Map tblProps, boolean ifNotExists, List skewedColNames, List> skewedColValues, - boolean isCTAS) { + boolean isCTAS, List primaryKeys, List foreignKeys) { this(databaseName, tableName, isExternal, isTemporary, cols, partCols, bucketCols, sortCols, numBuckets, fieldDelim, fieldEscape, collItemDelim, mapKeyDelim, lineDelim, comment, inputFormat, outputFormat, location, serName, storageHandler, serdeProps, - tblProps, ifNotExists, skewedColNames, skewedColValues); + tblProps, ifNotExists, skewedColNames, skewedColValues, primaryKeys, foreignKeys); this.isCTAS = isCTAS; } @@ -142,7 +146,8 @@ public CreateTableDesc(String tableName, boolean isExternal, boolean isTemporary String storageHandler, Map serdeProps, Map tblProps, - boolean ifNotExists, List skewedColNames, List> skewedColValues) { + boolean ifNotExists, List skewedColNames, List> skewedColValues, + List primaryKeys, List foreignKeys) { this.tableName = tableName; this.isExternal = isExternal; this.isTemporary = isTemporary; @@ -167,6 +172,8 @@ public CreateTableDesc(String tableName, boolean isExternal, boolean isTemporary this.ifNotExists = ifNotExists; this.skewedColNames = copyList(skewedColNames); this.skewedColValues = copyList(skewedColValues); + this.primaryKeys = new ArrayList(primaryKeys); + this.foreignKeys = new ArrayList(foreignKeys); } private static List copyList(List copy) { @@ -183,6 +190,11 @@ public CreateTableDesc(String tableName, boolean isExternal, boolean isTemporary return Utilities.getFieldSchemaString(getPartCols()); } + @Explain(displayName = "primary keys") + public List getPrimaryKeysString() { + return Utilities.getFieldSchemaString(getPrimaryKeys()); + } + @Explain(displayName = "if not exists", displayOnlyOnTrue = true) public boolean getIfNotExists() { return ifNotExists; @@ -221,6 +233,22 @@ public void setPartCols(ArrayList partCols) { this.partCols = partCols; } + public List getPrimaryKeys() { + return primaryKeys; + } + + public void setPrimaryKeys(ArrayList primaryKeys) { + this.primaryKeys = primaryKeys; + } + + public List getForeignKeys() { + return foreignKeys; + } + + public void setForeignKeys(ArrayList foreignKeys) { + this.foreignKeys = foreignKeys; + } + @Explain(displayName = "bucket columns") public List getBucketCols() { return bucketCols; @@ -634,6 +662,15 @@ public Table toTable(HiveConf conf) throws HiveException { if (getPartCols() != null) { tbl.setPartCols(getPartCols()); } + + if (getPrimaryKeys() != null) { + tbl.setPrimaryKeys(getPrimaryKeys()); + } + + if (getForeignKeys() != null) { + tbl.setForeignKeys(getForeignKeys()); + } + if (getNumBuckets() != -1) { tbl.setNumBuckets(getNumBuckets()); } diff --git a/ql/src/test/queries/clientpositive/perf/query100.q b/ql/src/test/queries/clientpositive/perf/query100.q new file mode 100644 index 0000000..edf2e6a --- /dev/null +++ b/ql/src/test/queries/clientpositive/perf/query100.q @@ -0,0 +1,12 @@ +set hive.transpose.aggr.join=true; + +explain +SELECT `date_dim`.`d_day_name` AS `d_day_name`, + `item`.`i_category` AS `i_category` +FROM `store_sales` `store_sales` + INNER JOIN `item` `item` + ON ( `store_sales`.`ss_item_sk` = `item`.`i_item_sk` ) + INNER JOIN `date_dim` `date_dim` + ON ( `store_sales`.`ss_sold_date_sk` = `date_dim`.`d_date_sk` ) +GROUP BY `d_day_name`, + `i_category`; diff --git a/ql/src/test/results/clientpositive/alter1.q.out b/ql/src/test/results/clientpositive/alter1.q.out index 99248c3..8e2fabf 100644 --- a/ql/src/test/results/clientpositive/alter1.q.out +++ b/ql/src/test/results/clientpositive/alter1.q.out @@ -193,21 +193,6 @@ PREHOOK: Input: database:default POSTHOOK: query: SHOW TABLES POSTHOOK: type: SHOWTABLES POSTHOOK: Input: database:default -alltypesorc -cbo_t1 -cbo_t2 -cbo_t3 -lineitem -part -src -src1 -src_cbo -src_json -src_sequencefile -src_thrift -srcbucket -srcbucket2 -srcpart PREHOOK: query: -- With non-default Database CREATE DATABASE alter1_db diff --git a/service-rpc/if/TCLIService.thrift b/service-rpc/if/TCLIService.thrift index aa28b6e..efe7750 100644 --- a/service-rpc/if/TCLIService.thrift +++ b/service-rpc/if/TCLIService.thrift @@ -955,6 +955,52 @@ struct TGetFunctionsResp { 2: optional TOperationHandle operationHandle } +struct TGetPrimaryKeysReq { + // Session to run this request against + 1: required TSessionHandle sessionHandle + + // Name of the catalog. + 2: optional TIdentifier catalogName + + // Name of the schema. + 3: optional TIdentifier schemaName + + // Name of the table. + 4: optional TIdentifier tableName +} + +struct TGetPrimaryKeysResp { + 1: required TStatus status + 2: optional TOperationHandle operationHandle +} + +struct TGetCrossReferenceReq { + // Session to run this request against + 1: required TSessionHandle sessionHandle + + // Name of the parent catalog. + 2: optional TIdentifier parentCatalogName + + // Name of the parent schema. + 3: optional TIdentifier parentSchemaName + + // Name of the parent table. + 4: optional TIdentifier parentTableName + + // Name of the foreign catalog. + 5: optional TIdentifier foreignCatalogName + + // Name of the foreign schema. + 6: optional TIdentifier foreignSchemaName + + // Name of the foreign table. + 7: optional TIdentifier foreignTableName +} + +struct TGetCrossReferenceResp { + 1: required TStatus status + 2: optional TOperationHandle operationHandle +} // GetOperationStatus() // @@ -1165,6 +1211,10 @@ service TCLIService { TGetFunctionsResp GetFunctions(1:TGetFunctionsReq req); + TGetPrimaryKeysResp GetPrimaryKeys(1:TGetPrimaryKeysReq req); + + TGetCrossReferenceResp GetCrossReference(1:TGetCrossReferenceReq req); + TGetOperationStatusResp GetOperationStatus(1:TGetOperationStatusReq req); TCancelOperationResp CancelOperation(1:TCancelOperationReq req); diff --git a/service-rpc/if/TCLIService.thrift.orig b/service-rpc/if/TCLIService.thrift.orig new file mode 100644 index 0000000..aa28b6e --- /dev/null +++ b/service-rpc/if/TCLIService.thrift.orig @@ -0,0 +1,1183 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Coding Conventions for this file: +// +// Structs/Enums/Unions +// * Struct, Enum, and Union names begin with a "T", +// and use a capital letter for each new word, with no underscores. +// * All fields should be declared as either optional or required. +// +// Functions +// * Function names start with a capital letter and have a capital letter for +// each new word, with no underscores. +// * Each function should take exactly one parameter, named TFunctionNameReq, +// and should return either void or TFunctionNameResp. This convention allows +// incremental updates. +// +// Services +// * Service names begin with the letter "T", use a capital letter for each +// new word (with no underscores), and end with the word "Service". + +namespace java org.apache.hive.service.rpc.thrift +namespace cpp apache.hive.service.rpc.thrift + +// List of protocol versions. A new token should be +// added to the end of this list every time a change is made. +enum TProtocolVersion { + HIVE_CLI_SERVICE_PROTOCOL_V1, + + // V2 adds support for asynchronous execution + HIVE_CLI_SERVICE_PROTOCOL_V2 + + // V3 add varchar type, primitive type qualifiers + HIVE_CLI_SERVICE_PROTOCOL_V3 + + // V4 add decimal precision/scale, char type + HIVE_CLI_SERVICE_PROTOCOL_V4 + + // V5 adds error details when GetOperationStatus returns in error state + HIVE_CLI_SERVICE_PROTOCOL_V5 + + // V6 uses binary type for binary payload (was string) and uses columnar result set + HIVE_CLI_SERVICE_PROTOCOL_V6 + + // V7 adds support for delegation token based connection + HIVE_CLI_SERVICE_PROTOCOL_V7 + + // V8 adds support for interval types + HIVE_CLI_SERVICE_PROTOCOL_V8 +} + +enum TTypeId { + BOOLEAN_TYPE, + TINYINT_TYPE, + SMALLINT_TYPE, + INT_TYPE, + BIGINT_TYPE, + FLOAT_TYPE, + DOUBLE_TYPE, + STRING_TYPE, + TIMESTAMP_TYPE, + BINARY_TYPE, + ARRAY_TYPE, + MAP_TYPE, + STRUCT_TYPE, + UNION_TYPE, + USER_DEFINED_TYPE, + DECIMAL_TYPE, + NULL_TYPE, + DATE_TYPE, + VARCHAR_TYPE, + CHAR_TYPE, + INTERVAL_YEAR_MONTH_TYPE, + INTERVAL_DAY_TIME_TYPE +} + +const set PRIMITIVE_TYPES = [ + TTypeId.BOOLEAN_TYPE, + TTypeId.TINYINT_TYPE, + TTypeId.SMALLINT_TYPE, + TTypeId.INT_TYPE, + TTypeId.BIGINT_TYPE, + TTypeId.FLOAT_TYPE, + TTypeId.DOUBLE_TYPE, + TTypeId.STRING_TYPE, + TTypeId.TIMESTAMP_TYPE, + TTypeId.BINARY_TYPE, + TTypeId.DECIMAL_TYPE, + TTypeId.NULL_TYPE, + TTypeId.DATE_TYPE, + TTypeId.VARCHAR_TYPE, + TTypeId.CHAR_TYPE, + TTypeId.INTERVAL_YEAR_MONTH_TYPE, + TTypeId.INTERVAL_DAY_TIME_TYPE +] + +const set COMPLEX_TYPES = [ + TTypeId.ARRAY_TYPE + TTypeId.MAP_TYPE + TTypeId.STRUCT_TYPE + TTypeId.UNION_TYPE + TTypeId.USER_DEFINED_TYPE +] + +const set COLLECTION_TYPES = [ + TTypeId.ARRAY_TYPE + TTypeId.MAP_TYPE +] + +const map TYPE_NAMES = { + TTypeId.BOOLEAN_TYPE: "BOOLEAN", + TTypeId.TINYINT_TYPE: "TINYINT", + TTypeId.SMALLINT_TYPE: "SMALLINT", + TTypeId.INT_TYPE: "INT", + TTypeId.BIGINT_TYPE: "BIGINT", + TTypeId.FLOAT_TYPE: "FLOAT", + TTypeId.DOUBLE_TYPE: "DOUBLE", + TTypeId.STRING_TYPE: "STRING", + TTypeId.TIMESTAMP_TYPE: "TIMESTAMP", + TTypeId.BINARY_TYPE: "BINARY", + TTypeId.ARRAY_TYPE: "ARRAY", + TTypeId.MAP_TYPE: "MAP", + TTypeId.STRUCT_TYPE: "STRUCT", + TTypeId.UNION_TYPE: "UNIONTYPE", + TTypeId.DECIMAL_TYPE: "DECIMAL", + TTypeId.NULL_TYPE: "NULL" + TTypeId.DATE_TYPE: "DATE" + TTypeId.VARCHAR_TYPE: "VARCHAR" + TTypeId.CHAR_TYPE: "CHAR" + TTypeId.INTERVAL_YEAR_MONTH_TYPE: "INTERVAL_YEAR_MONTH" + TTypeId.INTERVAL_DAY_TIME_TYPE: "INTERVAL_DAY_TIME" +} + +// Thrift does not support recursively defined types or forward declarations, +// which makes it difficult to represent Hive's nested types. +// To get around these limitations TTypeDesc employs a type list that maps +// integer "pointers" to TTypeEntry objects. The following examples show +// how different types are represented using this scheme: +// +// "INT": +// TTypeDesc { +// types = [ +// TTypeEntry.primitive_entry { +// type = INT_TYPE +// } +// ] +// } +// +// "ARRAY": +// TTypeDesc { +// types = [ +// TTypeEntry.array_entry { +// object_type_ptr = 1 +// }, +// TTypeEntry.primitive_entry { +// type = INT_TYPE +// } +// ] +// } +// +// "MAP": +// TTypeDesc { +// types = [ +// TTypeEntry.map_entry { +// key_type_ptr = 1 +// value_type_ptr = 2 +// }, +// TTypeEntry.primitive_entry { +// type = INT_TYPE +// }, +// TTypeEntry.primitive_entry { +// type = STRING_TYPE +// } +// ] +// } + +typedef i32 TTypeEntryPtr + +// Valid TTypeQualifiers key names +const string CHARACTER_MAXIMUM_LENGTH = "characterMaximumLength" + +// Type qualifier key name for decimal +const string PRECISION = "precision" +const string SCALE = "scale" + +union TTypeQualifierValue { + 1: optional i32 i32Value + 2: optional string stringValue +} + +// Type qualifiers for primitive type. +struct TTypeQualifiers { + 1: required map qualifiers +} + +// Type entry for a primitive type. +struct TPrimitiveTypeEntry { + // The primitive type token. This must satisfy the condition + // that type is in the PRIMITIVE_TYPES set. + 1: required TTypeId type + 2: optional TTypeQualifiers typeQualifiers +} + +// Type entry for an ARRAY type. +struct TArrayTypeEntry { + 1: required TTypeEntryPtr objectTypePtr +} + +// Type entry for a MAP type. +struct TMapTypeEntry { + 1: required TTypeEntryPtr keyTypePtr + 2: required TTypeEntryPtr valueTypePtr +} + +// Type entry for a STRUCT type. +struct TStructTypeEntry { + 1: required map nameToTypePtr +} + +// Type entry for a UNIONTYPE type. +struct TUnionTypeEntry { + 1: required map nameToTypePtr +} + +struct TUserDefinedTypeEntry { + // The fully qualified name of the class implementing this type. + 1: required string typeClassName +} + +// We use a union here since Thrift does not support inheritance. +union TTypeEntry { + 1: TPrimitiveTypeEntry primitiveEntry + 2: TArrayTypeEntry arrayEntry + 3: TMapTypeEntry mapEntry + 4: TStructTypeEntry structEntry + 5: TUnionTypeEntry unionEntry + 6: TUserDefinedTypeEntry userDefinedTypeEntry +} + +// Type descriptor for columns. +struct TTypeDesc { + // The "top" type is always the first element of the list. + // If the top type is an ARRAY, MAP, STRUCT, or UNIONTYPE + // type, then subsequent elements represent nested types. + 1: required list types +} + +// A result set column descriptor. +struct TColumnDesc { + // The name of the column + 1: required string columnName + + // The type descriptor for this column + 2: required TTypeDesc typeDesc + + // The ordinal position of this column in the schema + 3: required i32 position + + 4: optional string comment +} + +// Metadata used to describe the schema (column names, types, comments) +// of result sets. +struct TTableSchema { + 1: required list columns +} + +// A Boolean column value. +struct TBoolValue { + // NULL if value is unset. + 1: optional bool value +} + +// A Byte column value. +struct TByteValue { + // NULL if value is unset. + 1: optional byte value +} + +// A signed, 16 bit column value. +struct TI16Value { + // NULL if value is unset + 1: optional i16 value +} + +// A signed, 32 bit column value +struct TI32Value { + // NULL if value is unset + 1: optional i32 value +} + +// A signed 64 bit column value +struct TI64Value { + // NULL if value is unset + 1: optional i64 value +} + +// A floating point 64 bit column value +struct TDoubleValue { + // NULL if value is unset + 1: optional double value +} + +struct TStringValue { + // NULL if value is unset + 1: optional string value +} + +// A single column value in a result set. +// Note that Hive's type system is richer than Thrift's, +// so in some cases we have to map multiple Hive types +// to the same Thrift type. On the client-side this is +// disambiguated by looking at the Schema of the +// result set. +union TColumnValue { + 1: TBoolValue boolVal // BOOLEAN + 2: TByteValue byteVal // TINYINT + 3: TI16Value i16Val // SMALLINT + 4: TI32Value i32Val // INT + 5: TI64Value i64Val // BIGINT, TIMESTAMP + 6: TDoubleValue doubleVal // FLOAT, DOUBLE + 7: TStringValue stringVal // STRING, LIST, MAP, STRUCT, UNIONTYPE, BINARY, DECIMAL, NULL, INTERVAL_YEAR_MONTH, INTERVAL_DAY_TIME +} + +// Represents a row in a rowset. +struct TRow { + 1: required list colVals +} + +struct TBoolColumn { + 1: required list values + 2: required binary nulls +} + +struct TByteColumn { + 1: required list values + 2: required binary nulls +} + +struct TI16Column { + 1: required list values + 2: required binary nulls +} + +struct TI32Column { + 1: required list values + 2: required binary nulls +} + +struct TI64Column { + 1: required list values + 2: required binary nulls +} + +struct TDoubleColumn { + 1: required list values + 2: required binary nulls +} + +struct TStringColumn { + 1: required list values + 2: required binary nulls +} + +struct TBinaryColumn { + 1: required list values + 2: required binary nulls +} + +// Note that Hive's type system is richer than Thrift's, +// so in some cases we have to map multiple Hive types +// to the same Thrift type. On the client-side this is +// disambiguated by looking at the Schema of the +// result set. +union TColumn { + 1: TBoolColumn boolVal // BOOLEAN + 2: TByteColumn byteVal // TINYINT + 3: TI16Column i16Val // SMALLINT + 4: TI32Column i32Val // INT + 5: TI64Column i64Val // BIGINT, TIMESTAMP + 6: TDoubleColumn doubleVal // FLOAT, DOUBLE + 7: TStringColumn stringVal // STRING, LIST, MAP, STRUCT, UNIONTYPE, DECIMAL, NULL + 8: TBinaryColumn binaryVal // BINARY +} + +// Represents a rowset +struct TRowSet { + // The starting row offset of this rowset. + 1: required i64 startRowOffset + 2: required list rows + 3: optional list columns +} + +// The return status code contained in each response. +enum TStatusCode { + SUCCESS_STATUS, + SUCCESS_WITH_INFO_STATUS, + STILL_EXECUTING_STATUS, + ERROR_STATUS, + INVALID_HANDLE_STATUS +} + +// The return status of a remote request +struct TStatus { + 1: required TStatusCode statusCode + + // If status is SUCCESS_WITH_INFO, info_msgs may be populated with + // additional diagnostic information. + 2: optional list infoMessages + + // If status is ERROR, then the following fields may be set + 3: optional string sqlState // as defined in the ISO/IEF CLI specification + 4: optional i32 errorCode // internal error code + 5: optional string errorMessage +} + +// The state of an operation (i.e. a query or other +// asynchronous operation that generates a result set) +// on the server. +enum TOperationState { + // The operation has been initialized + INITIALIZED_STATE, + + // The operation is running. In this state the result + // set is not available. + RUNNING_STATE, + + // The operation has completed. When an operation is in + // this state its result set may be fetched. + FINISHED_STATE, + + // The operation was canceled by a client + CANCELED_STATE, + + // The operation was closed by a client + CLOSED_STATE, + + // The operation failed due to an error + ERROR_STATE, + + // The operation is in an unrecognized state + UKNOWN_STATE, + + // The operation is in an pending state + PENDING_STATE, +} + +// A string identifier. This is interpreted literally. +typedef string TIdentifier + +// A search pattern. +// +// Valid search pattern characters: +// '_': Any single character. +// '%': Any sequence of zero or more characters. +// '\': Escape character used to include special characters, +// e.g. '_', '%', '\'. If a '\' precedes a non-special +// character it has no special meaning and is interpreted +// literally. +typedef string TPattern + + +// A search pattern or identifier. Used as input +// parameter for many of the catalog functions. +typedef string TPatternOrIdentifier + +struct THandleIdentifier { + // 16 byte globally unique identifier + // This is the public ID of the handle and + // can be used for reporting. + 1: required binary guid, + + // 16 byte secret generated by the server + // and used to verify that the handle is not + // being hijacked by another user. + 2: required binary secret, +} + +// Client-side handle to persistent +// session information on the server-side. +struct TSessionHandle { + 1: required THandleIdentifier sessionId +} + +// The subtype of an OperationHandle. +enum TOperationType { + EXECUTE_STATEMENT, + GET_TYPE_INFO, + GET_CATALOGS, + GET_SCHEMAS, + GET_TABLES, + GET_TABLE_TYPES, + GET_COLUMNS, + GET_FUNCTIONS, + UNKNOWN, +} + +// Client-side reference to a task running +// asynchronously on the server. +struct TOperationHandle { + 1: required THandleIdentifier operationId + 2: required TOperationType operationType + + // If hasResultSet = TRUE, then this operation + // generates a result set that can be fetched. + // Note that the result set may be empty. + // + // If hasResultSet = FALSE, then this operation + // does not generate a result set, and calling + // GetResultSetMetadata or FetchResults against + // this OperationHandle will generate an error. + 3: required bool hasResultSet + + // For operations that don't generate result sets, + // modifiedRowCount is either: + // + // 1) The number of rows that were modified by + // the DML operation (e.g. number of rows inserted, + // number of rows deleted, etc). + // + // 2) 0 for operations that don't modify or add rows. + // + // 3) < 0 if the operation is capable of modifiying rows, + // but Hive is unable to determine how many rows were + // modified. For example, Hive's LOAD DATA command + // doesn't generate row count information because + // Hive doesn't inspect the data as it is loaded. + // + // modifiedRowCount is unset if the operation generates + // a result set. + 4: optional double modifiedRowCount +} + + +// OpenSession() +// +// Open a session (connection) on the server against +// which operations may be executed. +struct TOpenSessionReq { + // The version of the HiveServer2 protocol that the client is using. + 1: required TProtocolVersion client_protocol = TProtocolVersion.HIVE_CLI_SERVICE_PROTOCOL_V8 + + // Username and password for authentication. + // Depending on the authentication scheme being used, + // this information may instead be provided by a lower + // protocol layer, in which case these fields may be + // left unset. + 2: optional string username + 3: optional string password + + // Configuration overlay which is applied when the session is + // first created. + 4: optional map configuration +} + +struct TOpenSessionResp { + 1: required TStatus status + + // The protocol version that the server is using. + 2: required TProtocolVersion serverProtocolVersion = TProtocolVersion.HIVE_CLI_SERVICE_PROTOCOL_V8 + + // Session Handle + 3: optional TSessionHandle sessionHandle + + // The configuration settings for this session. + 4: optional map configuration +} + + +// CloseSession() +// +// Closes the specified session and frees any resources +// currently allocated to that session. Any open +// operations in that session will be canceled. +struct TCloseSessionReq { + 1: required TSessionHandle sessionHandle +} + +struct TCloseSessionResp { + 1: required TStatus status +} + + + +enum TGetInfoType { + CLI_MAX_DRIVER_CONNECTIONS = 0, + CLI_MAX_CONCURRENT_ACTIVITIES = 1, + CLI_DATA_SOURCE_NAME = 2, + CLI_FETCH_DIRECTION = 8, + CLI_SERVER_NAME = 13, + CLI_SEARCH_PATTERN_ESCAPE = 14, + CLI_DBMS_NAME = 17, + CLI_DBMS_VER = 18, + CLI_ACCESSIBLE_TABLES = 19, + CLI_ACCESSIBLE_PROCEDURES = 20, + CLI_CURSOR_COMMIT_BEHAVIOR = 23, + CLI_DATA_SOURCE_READ_ONLY = 25, + CLI_DEFAULT_TXN_ISOLATION = 26, + CLI_IDENTIFIER_CASE = 28, + CLI_IDENTIFIER_QUOTE_CHAR = 29, + CLI_MAX_COLUMN_NAME_LEN = 30, + CLI_MAX_CURSOR_NAME_LEN = 31, + CLI_MAX_SCHEMA_NAME_LEN = 32, + CLI_MAX_CATALOG_NAME_LEN = 34, + CLI_MAX_TABLE_NAME_LEN = 35, + CLI_SCROLL_CONCURRENCY = 43, + CLI_TXN_CAPABLE = 46, + CLI_USER_NAME = 47, + CLI_TXN_ISOLATION_OPTION = 72, + CLI_INTEGRITY = 73, + CLI_GETDATA_EXTENSIONS = 81, + CLI_NULL_COLLATION = 85, + CLI_ALTER_TABLE = 86, + CLI_ORDER_BY_COLUMNS_IN_SELECT = 90, + CLI_SPECIAL_CHARACTERS = 94, + CLI_MAX_COLUMNS_IN_GROUP_BY = 97, + CLI_MAX_COLUMNS_IN_INDEX = 98, + CLI_MAX_COLUMNS_IN_ORDER_BY = 99, + CLI_MAX_COLUMNS_IN_SELECT = 100, + CLI_MAX_COLUMNS_IN_TABLE = 101, + CLI_MAX_INDEX_SIZE = 102, + CLI_MAX_ROW_SIZE = 104, + CLI_MAX_STATEMENT_LEN = 105, + CLI_MAX_TABLES_IN_SELECT = 106, + CLI_MAX_USER_NAME_LEN = 107, + CLI_OJ_CAPABILITIES = 115, + + CLI_XOPEN_CLI_YEAR = 10000, + CLI_CURSOR_SENSITIVITY = 10001, + CLI_DESCRIBE_PARAMETER = 10002, + CLI_CATALOG_NAME = 10003, + CLI_COLLATION_SEQ = 10004, + CLI_MAX_IDENTIFIER_LEN = 10005, +} + +union TGetInfoValue { + 1: string stringValue + 2: i16 smallIntValue + 3: i32 integerBitmask + 4: i32 integerFlag + 5: i32 binaryValue + 6: i64 lenValue +} + +// GetInfo() +// +// This function is based on ODBC's CLIGetInfo() function. +// The function returns general information about the data source +// using the same keys as ODBC. +struct TGetInfoReq { + // The sesssion to run this request against + 1: required TSessionHandle sessionHandle + + 2: required TGetInfoType infoType +} + +struct TGetInfoResp { + 1: required TStatus status + + 2: required TGetInfoValue infoValue +} + + +// ExecuteStatement() +// +// Execute a statement. +// The returned OperationHandle can be used to check on the +// status of the statement, and to fetch results once the +// statement has finished executing. +struct TExecuteStatementReq { + // The session to execute the statement against + 1: required TSessionHandle sessionHandle + + // The statement to be executed (DML, DDL, SET, etc) + 2: required string statement + + // Configuration properties that are overlayed on top of the + // the existing session configuration before this statement + // is executed. These properties apply to this statement + // only and will not affect the subsequent state of the Session. + 3: optional map confOverlay + + // Execute asynchronously when runAsync is true + 4: optional bool runAsync = false +} + +struct TExecuteStatementResp { + 1: required TStatus status + 2: optional TOperationHandle operationHandle +} + +// GetTypeInfo() +// +// Get information about types supported by the HiveServer instance. +// The information is returned as a result set which can be fetched +// using the OperationHandle provided in the response. +// +// Refer to the documentation for ODBC's CLIGetTypeInfo function for +// the format of the result set. +struct TGetTypeInfoReq { + // The session to run this request against. + 1: required TSessionHandle sessionHandle +} + +struct TGetTypeInfoResp { + 1: required TStatus status + 2: optional TOperationHandle operationHandle +} + + +// GetCatalogs() +// +// Returns the list of catalogs (databases) +// Results are ordered by TABLE_CATALOG +// +// Resultset columns : +// col1 +// name: TABLE_CAT +// type: STRING +// desc: Catalog name. NULL if not applicable. +// +struct TGetCatalogsReq { + // Session to run this request against + 1: required TSessionHandle sessionHandle +} + +struct TGetCatalogsResp { + 1: required TStatus status + 2: optional TOperationHandle operationHandle +} + + +// GetSchemas() +// +// Retrieves the schema names available in this database. +// The results are ordered by TABLE_CATALOG and TABLE_SCHEM. +// col1 +// name: TABLE_SCHEM +// type: STRING +// desc: schema name +// col2 +// name: TABLE_CATALOG +// type: STRING +// desc: catalog name +struct TGetSchemasReq { + // Session to run this request against + 1: required TSessionHandle sessionHandle + + // Name of the catalog. Must not contain a search pattern. + 2: optional TIdentifier catalogName + + // schema name or pattern + 3: optional TPatternOrIdentifier schemaName +} + +struct TGetSchemasResp { + 1: required TStatus status + 2: optional TOperationHandle operationHandle +} + + +// GetTables() +// +// Returns a list of tables with catalog, schema, and table +// type information. The information is returned as a result +// set which can be fetched using the OperationHandle +// provided in the response. +// Results are ordered by TABLE_TYPE, TABLE_CAT, TABLE_SCHEM, and TABLE_NAME +// +// Result Set Columns: +// +// col1 +// name: TABLE_CAT +// type: STRING +// desc: Catalog name. NULL if not applicable. +// +// col2 +// name: TABLE_SCHEM +// type: STRING +// desc: Schema name. +// +// col3 +// name: TABLE_NAME +// type: STRING +// desc: Table name. +// +// col4 +// name: TABLE_TYPE +// type: STRING +// desc: The table type, e.g. "TABLE", "VIEW", etc. +// +// col5 +// name: REMARKS +// type: STRING +// desc: Comments about the table +// +struct TGetTablesReq { + // Session to run this request against + 1: required TSessionHandle sessionHandle + + // Name of the catalog or a search pattern. + 2: optional TPatternOrIdentifier catalogName + + // Name of the schema or a search pattern. + 3: optional TPatternOrIdentifier schemaName + + // Name of the table or a search pattern. + 4: optional TPatternOrIdentifier tableName + + // List of table types to match + // e.g. "TABLE", "VIEW", "SYSTEM TABLE", "GLOBAL TEMPORARY", + // "LOCAL TEMPORARY", "ALIAS", "SYNONYM", etc. + 5: optional list tableTypes +} + +struct TGetTablesResp { + 1: required TStatus status + 2: optional TOperationHandle operationHandle +} + + +// GetTableTypes() +// +// Returns the table types available in this database. +// The results are ordered by table type. +// +// col1 +// name: TABLE_TYPE +// type: STRING +// desc: Table type name. +struct TGetTableTypesReq { + // Session to run this request against + 1: required TSessionHandle sessionHandle +} + +struct TGetTableTypesResp { + 1: required TStatus status + 2: optional TOperationHandle operationHandle +} + + +// GetColumns() +// +// Returns a list of columns in the specified tables. +// The information is returned as a result set which can be fetched +// using the OperationHandle provided in the response. +// Results are ordered by TABLE_CAT, TABLE_SCHEM, TABLE_NAME, +// and ORDINAL_POSITION. +// +// Result Set Columns are the same as those for the ODBC CLIColumns +// function. +// +struct TGetColumnsReq { + // Session to run this request against + 1: required TSessionHandle sessionHandle + + // Name of the catalog. Must not contain a search pattern. + 2: optional TIdentifier catalogName + + // Schema name or search pattern + 3: optional TPatternOrIdentifier schemaName + + // Table name or search pattern + 4: optional TPatternOrIdentifier tableName + + // Column name or search pattern + 5: optional TPatternOrIdentifier columnName +} + +struct TGetColumnsResp { + 1: required TStatus status + 2: optional TOperationHandle operationHandle +} + + +// GetFunctions() +// +// Returns a list of functions supported by the data source. The +// behavior of this function matches +// java.sql.DatabaseMetaData.getFunctions() both in terms of +// inputs and outputs. +// +// Result Set Columns: +// +// col1 +// name: FUNCTION_CAT +// type: STRING +// desc: Function catalog (may be null) +// +// col2 +// name: FUNCTION_SCHEM +// type: STRING +// desc: Function schema (may be null) +// +// col3 +// name: FUNCTION_NAME +// type: STRING +// desc: Function name. This is the name used to invoke the function. +// +// col4 +// name: REMARKS +// type: STRING +// desc: Explanatory comment on the function. +// +// col5 +// name: FUNCTION_TYPE +// type: SMALLINT +// desc: Kind of function. One of: +// * functionResultUnknown - Cannot determine if a return value or a table +// will be returned. +// * functionNoTable - Does not a return a table. +// * functionReturnsTable - Returns a table. +// +// col6 +// name: SPECIFIC_NAME +// type: STRING +// desc: The name which uniquely identifies this function within its schema. +// In this case this is the fully qualified class name of the class +// that implements this function. +// +struct TGetFunctionsReq { + // Session to run this request against + 1: required TSessionHandle sessionHandle + + // A catalog name; must match the catalog name as it is stored in the + // database; "" retrieves those without a catalog; null means + // that the catalog name should not be used to narrow the search. + 2: optional TIdentifier catalogName + + // A schema name pattern; must match the schema name as it is stored + // in the database; "" retrieves those without a schema; null means + // that the schema name should not be used to narrow the search. + 3: optional TPatternOrIdentifier schemaName + + // A function name pattern; must match the function name as it is stored + // in the database. + 4: required TPatternOrIdentifier functionName +} + +struct TGetFunctionsResp { + 1: required TStatus status + 2: optional TOperationHandle operationHandle +} + + +// GetOperationStatus() +// +// Get the status of an operation running on the server. +struct TGetOperationStatusReq { + // Session to run this request against + 1: required TOperationHandle operationHandle +} + +struct TGetOperationStatusResp { + 1: required TStatus status + 2: optional TOperationState operationState + + // If operationState is ERROR_STATE, then the following fields may be set + // sqlState as defined in the ISO/IEF CLI specification + 3: optional string sqlState + + // Internal error code + 4: optional i32 errorCode + + // Error message + 5: optional string errorMessage + + // List of statuses of sub tasks + 6: optional string taskStatus + + // When was the operation started + 7: optional i64 operationStarted + // When was the operation completed + 8: optional i64 operationCompleted + +} + + +// CancelOperation() +// +// Cancels processing on the specified operation handle and +// frees any resources which were allocated. +struct TCancelOperationReq { + // Operation to cancel + 1: required TOperationHandle operationHandle +} + +struct TCancelOperationResp { + 1: required TStatus status +} + + +// CloseOperation() +// +// Given an operation in the FINISHED, CANCELED, +// or ERROR states, CloseOperation() will free +// all of the resources which were allocated on +// the server to service the operation. +struct TCloseOperationReq { + 1: required TOperationHandle operationHandle +} + +struct TCloseOperationResp { + 1: required TStatus status +} + + +// GetResultSetMetadata() +// +// Retrieves schema information for the specified operation +struct TGetResultSetMetadataReq { + // Operation for which to fetch result set schema information + 1: required TOperationHandle operationHandle +} + +struct TGetResultSetMetadataResp { + 1: required TStatus status + 2: optional TTableSchema schema +} + + +enum TFetchOrientation { + // Get the next rowset. The fetch offset is ignored. + FETCH_NEXT, + + // Get the previous rowset. The fetch offset is ignored. + // NOT SUPPORTED + FETCH_PRIOR, + + // Return the rowset at the given fetch offset relative + // to the curren rowset. + // NOT SUPPORTED + FETCH_RELATIVE, + + // Return the rowset at the specified fetch offset. + // NOT SUPPORTED + FETCH_ABSOLUTE, + + // Get the first rowset in the result set. + FETCH_FIRST, + + // Get the last rowset in the result set. + // NOT SUPPORTED + FETCH_LAST +} + +// FetchResults() +// +// Fetch rows from the server corresponding to +// a particular OperationHandle. +struct TFetchResultsReq { + // Operation from which to fetch results. + 1: required TOperationHandle operationHandle + + // The fetch orientation. For V1 this must be either + // FETCH_NEXT or FETCH_FIRST. Defaults to FETCH_NEXT. + 2: required TFetchOrientation orientation = TFetchOrientation.FETCH_NEXT + + // Max number of rows that should be returned in + // the rowset. + 3: required i64 maxRows + + // The type of a fetch results request. 0 represents Query output. 1 represents Log + 4: optional i16 fetchType = 0 +} + +struct TFetchResultsResp { + 1: required TStatus status + + // TRUE if there are more rows left to fetch from the server. + 2: optional bool hasMoreRows + + // The rowset. This is optional so that we have the + // option in the future of adding alternate formats for + // representing result set data, e.g. delimited strings, + // binary encoded, etc. + 3: optional TRowSet results +} + +// GetDelegationToken() +// Retrieve delegation token for the current user +struct TGetDelegationTokenReq { + // session handle + 1: required TSessionHandle sessionHandle + + // userid for the proxy user + 2: required string owner + + // designated renewer userid + 3: required string renewer +} + +struct TGetDelegationTokenResp { + // status of the request + 1: required TStatus status + + // delegation token string + 2: optional string delegationToken +} + +// CancelDelegationToken() +// Cancel the given delegation token +struct TCancelDelegationTokenReq { + // session handle + 1: required TSessionHandle sessionHandle + + // delegation token to cancel + 2: required string delegationToken +} + +struct TCancelDelegationTokenResp { + // status of the request + 1: required TStatus status +} + +// RenewDelegationToken() +// Renew the given delegation token +struct TRenewDelegationTokenReq { + // session handle + 1: required TSessionHandle sessionHandle + + // delegation token to renew + 2: required string delegationToken +} + +struct TRenewDelegationTokenResp { + // status of the request + 1: required TStatus status +} + +service TCLIService { + + TOpenSessionResp OpenSession(1:TOpenSessionReq req); + + TCloseSessionResp CloseSession(1:TCloseSessionReq req); + + TGetInfoResp GetInfo(1:TGetInfoReq req); + + TExecuteStatementResp ExecuteStatement(1:TExecuteStatementReq req); + + TGetTypeInfoResp GetTypeInfo(1:TGetTypeInfoReq req); + + TGetCatalogsResp GetCatalogs(1:TGetCatalogsReq req); + + TGetSchemasResp GetSchemas(1:TGetSchemasReq req); + + TGetTablesResp GetTables(1:TGetTablesReq req); + + TGetTableTypesResp GetTableTypes(1:TGetTableTypesReq req); + + TGetColumnsResp GetColumns(1:TGetColumnsReq req); + + TGetFunctionsResp GetFunctions(1:TGetFunctionsReq req); + + TGetOperationStatusResp GetOperationStatus(1:TGetOperationStatusReq req); + + TCancelOperationResp CancelOperation(1:TCancelOperationReq req); + + TCloseOperationResp CloseOperation(1:TCloseOperationReq req); + + TGetResultSetMetadataResp GetResultSetMetadata(1:TGetResultSetMetadataReq req); + + TFetchResultsResp FetchResults(1:TFetchResultsReq req); + + TGetDelegationTokenResp GetDelegationToken(1:TGetDelegationTokenReq req); + + TCancelDelegationTokenResp CancelDelegationToken(1:TCancelDelegationTokenReq req); + + TRenewDelegationTokenResp RenewDelegationToken(1:TRenewDelegationTokenReq req); +} diff --git a/service-rpc/src/gen/thrift/gen-cpp/TCLIService.cpp b/service-rpc/src/gen/thrift/gen-cpp/TCLIService.cpp index fc82b88..3597d44 100644 --- a/service-rpc/src/gen/thrift/gen-cpp/TCLIService.cpp +++ b/service-rpc/src/gen/thrift/gen-cpp/TCLIService.cpp @@ -2066,6 +2066,380 @@ uint32_t TCLIService_GetFunctions_presult::read(::apache::thrift::protocol::TPro } +TCLIService_GetPrimaryKeys_args::~TCLIService_GetPrimaryKeys_args() throw() { +} + + +uint32_t TCLIService_GetPrimaryKeys_args::read(::apache::thrift::protocol::TProtocol* iprot) { + + apache::thrift::protocol::TInputRecursionTracker tracker(*iprot); + uint32_t xfer = 0; + std::string fname; + ::apache::thrift::protocol::TType ftype; + int16_t fid; + + xfer += iprot->readStructBegin(fname); + + using ::apache::thrift::protocol::TProtocolException; + + + while (true) + { + xfer += iprot->readFieldBegin(fname, ftype, fid); + if (ftype == ::apache::thrift::protocol::T_STOP) { + break; + } + switch (fid) + { + case 1: + if (ftype == ::apache::thrift::protocol::T_STRUCT) { + xfer += this->req.read(iprot); + this->__isset.req = true; + } else { + xfer += iprot->skip(ftype); + } + break; + default: + xfer += iprot->skip(ftype); + break; + } + xfer += iprot->readFieldEnd(); + } + + xfer += iprot->readStructEnd(); + + return xfer; +} + +uint32_t TCLIService_GetPrimaryKeys_args::write(::apache::thrift::protocol::TProtocol* oprot) const { + uint32_t xfer = 0; + apache::thrift::protocol::TOutputRecursionTracker tracker(*oprot); + xfer += oprot->writeStructBegin("TCLIService_GetPrimaryKeys_args"); + + xfer += oprot->writeFieldBegin("req", ::apache::thrift::protocol::T_STRUCT, 1); + xfer += this->req.write(oprot); + xfer += oprot->writeFieldEnd(); + + xfer += oprot->writeFieldStop(); + xfer += oprot->writeStructEnd(); + return xfer; +} + + +TCLIService_GetPrimaryKeys_pargs::~TCLIService_GetPrimaryKeys_pargs() throw() { +} + + +uint32_t TCLIService_GetPrimaryKeys_pargs::write(::apache::thrift::protocol::TProtocol* oprot) const { + uint32_t xfer = 0; + apache::thrift::protocol::TOutputRecursionTracker tracker(*oprot); + xfer += oprot->writeStructBegin("TCLIService_GetPrimaryKeys_pargs"); + + xfer += oprot->writeFieldBegin("req", ::apache::thrift::protocol::T_STRUCT, 1); + xfer += (*(this->req)).write(oprot); + xfer += oprot->writeFieldEnd(); + + xfer += oprot->writeFieldStop(); + xfer += oprot->writeStructEnd(); + return xfer; +} + + +TCLIService_GetPrimaryKeys_result::~TCLIService_GetPrimaryKeys_result() throw() { +} + + +uint32_t TCLIService_GetPrimaryKeys_result::read(::apache::thrift::protocol::TProtocol* iprot) { + + apache::thrift::protocol::TInputRecursionTracker tracker(*iprot); + uint32_t xfer = 0; + std::string fname; + ::apache::thrift::protocol::TType ftype; + int16_t fid; + + xfer += iprot->readStructBegin(fname); + + using ::apache::thrift::protocol::TProtocolException; + + + while (true) + { + xfer += iprot->readFieldBegin(fname, ftype, fid); + if (ftype == ::apache::thrift::protocol::T_STOP) { + break; + } + switch (fid) + { + case 0: + if (ftype == ::apache::thrift::protocol::T_STRUCT) { + xfer += this->success.read(iprot); + this->__isset.success = true; + } else { + xfer += iprot->skip(ftype); + } + break; + default: + xfer += iprot->skip(ftype); + break; + } + xfer += iprot->readFieldEnd(); + } + + xfer += iprot->readStructEnd(); + + return xfer; +} + +uint32_t TCLIService_GetPrimaryKeys_result::write(::apache::thrift::protocol::TProtocol* oprot) const { + + uint32_t xfer = 0; + + xfer += oprot->writeStructBegin("TCLIService_GetPrimaryKeys_result"); + + if (this->__isset.success) { + xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_STRUCT, 0); + xfer += this->success.write(oprot); + xfer += oprot->writeFieldEnd(); + } + xfer += oprot->writeFieldStop(); + xfer += oprot->writeStructEnd(); + return xfer; +} + + +TCLIService_GetPrimaryKeys_presult::~TCLIService_GetPrimaryKeys_presult() throw() { +} + + +uint32_t TCLIService_GetPrimaryKeys_presult::read(::apache::thrift::protocol::TProtocol* iprot) { + + apache::thrift::protocol::TInputRecursionTracker tracker(*iprot); + uint32_t xfer = 0; + std::string fname; + ::apache::thrift::protocol::TType ftype; + int16_t fid; + + xfer += iprot->readStructBegin(fname); + + using ::apache::thrift::protocol::TProtocolException; + + + while (true) + { + xfer += iprot->readFieldBegin(fname, ftype, fid); + if (ftype == ::apache::thrift::protocol::T_STOP) { + break; + } + switch (fid) + { + case 0: + if (ftype == ::apache::thrift::protocol::T_STRUCT) { + xfer += (*(this->success)).read(iprot); + this->__isset.success = true; + } else { + xfer += iprot->skip(ftype); + } + break; + default: + xfer += iprot->skip(ftype); + break; + } + xfer += iprot->readFieldEnd(); + } + + xfer += iprot->readStructEnd(); + + return xfer; +} + + +TCLIService_GetCrossReference_args::~TCLIService_GetCrossReference_args() throw() { +} + + +uint32_t TCLIService_GetCrossReference_args::read(::apache::thrift::protocol::TProtocol* iprot) { + + apache::thrift::protocol::TInputRecursionTracker tracker(*iprot); + uint32_t xfer = 0; + std::string fname; + ::apache::thrift::protocol::TType ftype; + int16_t fid; + + xfer += iprot->readStructBegin(fname); + + using ::apache::thrift::protocol::TProtocolException; + + + while (true) + { + xfer += iprot->readFieldBegin(fname, ftype, fid); + if (ftype == ::apache::thrift::protocol::T_STOP) { + break; + } + switch (fid) + { + case 1: + if (ftype == ::apache::thrift::protocol::T_STRUCT) { + xfer += this->req.read(iprot); + this->__isset.req = true; + } else { + xfer += iprot->skip(ftype); + } + break; + default: + xfer += iprot->skip(ftype); + break; + } + xfer += iprot->readFieldEnd(); + } + + xfer += iprot->readStructEnd(); + + return xfer; +} + +uint32_t TCLIService_GetCrossReference_args::write(::apache::thrift::protocol::TProtocol* oprot) const { + uint32_t xfer = 0; + apache::thrift::protocol::TOutputRecursionTracker tracker(*oprot); + xfer += oprot->writeStructBegin("TCLIService_GetCrossReference_args"); + + xfer += oprot->writeFieldBegin("req", ::apache::thrift::protocol::T_STRUCT, 1); + xfer += this->req.write(oprot); + xfer += oprot->writeFieldEnd(); + + xfer += oprot->writeFieldStop(); + xfer += oprot->writeStructEnd(); + return xfer; +} + + +TCLIService_GetCrossReference_pargs::~TCLIService_GetCrossReference_pargs() throw() { +} + + +uint32_t TCLIService_GetCrossReference_pargs::write(::apache::thrift::protocol::TProtocol* oprot) const { + uint32_t xfer = 0; + apache::thrift::protocol::TOutputRecursionTracker tracker(*oprot); + xfer += oprot->writeStructBegin("TCLIService_GetCrossReference_pargs"); + + xfer += oprot->writeFieldBegin("req", ::apache::thrift::protocol::T_STRUCT, 1); + xfer += (*(this->req)).write(oprot); + xfer += oprot->writeFieldEnd(); + + xfer += oprot->writeFieldStop(); + xfer += oprot->writeStructEnd(); + return xfer; +} + + +TCLIService_GetCrossReference_result::~TCLIService_GetCrossReference_result() throw() { +} + + +uint32_t TCLIService_GetCrossReference_result::read(::apache::thrift::protocol::TProtocol* iprot) { + + apache::thrift::protocol::TInputRecursionTracker tracker(*iprot); + uint32_t xfer = 0; + std::string fname; + ::apache::thrift::protocol::TType ftype; + int16_t fid; + + xfer += iprot->readStructBegin(fname); + + using ::apache::thrift::protocol::TProtocolException; + + + while (true) + { + xfer += iprot->readFieldBegin(fname, ftype, fid); + if (ftype == ::apache::thrift::protocol::T_STOP) { + break; + } + switch (fid) + { + case 0: + if (ftype == ::apache::thrift::protocol::T_STRUCT) { + xfer += this->success.read(iprot); + this->__isset.success = true; + } else { + xfer += iprot->skip(ftype); + } + break; + default: + xfer += iprot->skip(ftype); + break; + } + xfer += iprot->readFieldEnd(); + } + + xfer += iprot->readStructEnd(); + + return xfer; +} + +uint32_t TCLIService_GetCrossReference_result::write(::apache::thrift::protocol::TProtocol* oprot) const { + + uint32_t xfer = 0; + + xfer += oprot->writeStructBegin("TCLIService_GetCrossReference_result"); + + if (this->__isset.success) { + xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_STRUCT, 0); + xfer += this->success.write(oprot); + xfer += oprot->writeFieldEnd(); + } + xfer += oprot->writeFieldStop(); + xfer += oprot->writeStructEnd(); + return xfer; +} + + +TCLIService_GetCrossReference_presult::~TCLIService_GetCrossReference_presult() throw() { +} + + +uint32_t TCLIService_GetCrossReference_presult::read(::apache::thrift::protocol::TProtocol* iprot) { + + apache::thrift::protocol::TInputRecursionTracker tracker(*iprot); + uint32_t xfer = 0; + std::string fname; + ::apache::thrift::protocol::TType ftype; + int16_t fid; + + xfer += iprot->readStructBegin(fname); + + using ::apache::thrift::protocol::TProtocolException; + + + while (true) + { + xfer += iprot->readFieldBegin(fname, ftype, fid); + if (ftype == ::apache::thrift::protocol::T_STOP) { + break; + } + switch (fid) + { + case 0: + if (ftype == ::apache::thrift::protocol::T_STRUCT) { + xfer += (*(this->success)).read(iprot); + this->__isset.success = true; + } else { + xfer += iprot->skip(ftype); + } + break; + default: + xfer += iprot->skip(ftype); + break; + } + xfer += iprot->readFieldEnd(); + } + + xfer += iprot->readStructEnd(); + + return xfer; +} + + TCLIService_GetOperationStatus_args::~TCLIService_GetOperationStatus_args() throw() { } @@ -4065,12 +4439,128 @@ void TCLIServiceClient::recv_GetTableTypes(TGetTableTypesResp& _return) iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - if (fname.compare("GetTableTypes") != 0) { + if (fname.compare("GetTableTypes") != 0) { + iprot_->skip(::apache::thrift::protocol::T_STRUCT); + iprot_->readMessageEnd(); + iprot_->getTransport()->readEnd(); + } + TCLIService_GetTableTypes_presult result; + result.success = &_return; + result.read(iprot_); + iprot_->readMessageEnd(); + iprot_->getTransport()->readEnd(); + + if (result.__isset.success) { + // _return pointer has now been filled + return; + } + throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "GetTableTypes failed: unknown result"); +} + +void TCLIServiceClient::GetColumns(TGetColumnsResp& _return, const TGetColumnsReq& req) +{ + send_GetColumns(req); + recv_GetColumns(_return); +} + +void TCLIServiceClient::send_GetColumns(const TGetColumnsReq& req) +{ + int32_t cseqid = 0; + oprot_->writeMessageBegin("GetColumns", ::apache::thrift::protocol::T_CALL, cseqid); + + TCLIService_GetColumns_pargs args; + args.req = &req; + args.write(oprot_); + + oprot_->writeMessageEnd(); + oprot_->getTransport()->writeEnd(); + oprot_->getTransport()->flush(); +} + +void TCLIServiceClient::recv_GetColumns(TGetColumnsResp& _return) +{ + + int32_t rseqid = 0; + std::string fname; + ::apache::thrift::protocol::TMessageType mtype; + + iprot_->readMessageBegin(fname, mtype, rseqid); + if (mtype == ::apache::thrift::protocol::T_EXCEPTION) { + ::apache::thrift::TApplicationException x; + x.read(iprot_); + iprot_->readMessageEnd(); + iprot_->getTransport()->readEnd(); + throw x; + } + if (mtype != ::apache::thrift::protocol::T_REPLY) { + iprot_->skip(::apache::thrift::protocol::T_STRUCT); + iprot_->readMessageEnd(); + iprot_->getTransport()->readEnd(); + } + if (fname.compare("GetColumns") != 0) { + iprot_->skip(::apache::thrift::protocol::T_STRUCT); + iprot_->readMessageEnd(); + iprot_->getTransport()->readEnd(); + } + TCLIService_GetColumns_presult result; + result.success = &_return; + result.read(iprot_); + iprot_->readMessageEnd(); + iprot_->getTransport()->readEnd(); + + if (result.__isset.success) { + // _return pointer has now been filled + return; + } + throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "GetColumns failed: unknown result"); +} + +void TCLIServiceClient::GetFunctions(TGetFunctionsResp& _return, const TGetFunctionsReq& req) +{ + send_GetFunctions(req); + recv_GetFunctions(_return); +} + +void TCLIServiceClient::send_GetFunctions(const TGetFunctionsReq& req) +{ + int32_t cseqid = 0; + oprot_->writeMessageBegin("GetFunctions", ::apache::thrift::protocol::T_CALL, cseqid); + + TCLIService_GetFunctions_pargs args; + args.req = &req; + args.write(oprot_); + + oprot_->writeMessageEnd(); + oprot_->getTransport()->writeEnd(); + oprot_->getTransport()->flush(); +} + +void TCLIServiceClient::recv_GetFunctions(TGetFunctionsResp& _return) +{ + + int32_t rseqid = 0; + std::string fname; + ::apache::thrift::protocol::TMessageType mtype; + + iprot_->readMessageBegin(fname, mtype, rseqid); + if (mtype == ::apache::thrift::protocol::T_EXCEPTION) { + ::apache::thrift::TApplicationException x; + x.read(iprot_); + iprot_->readMessageEnd(); + iprot_->getTransport()->readEnd(); + throw x; + } + if (mtype != ::apache::thrift::protocol::T_REPLY) { + iprot_->skip(::apache::thrift::protocol::T_STRUCT); + iprot_->readMessageEnd(); + iprot_->getTransport()->readEnd(); + } + if (fname.compare("GetFunctions") != 0) { iprot_->skip(::apache::thrift::protocol::T_STRUCT); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - TCLIService_GetTableTypes_presult result; + TCLIService_GetFunctions_presult result; result.success = &_return; result.read(iprot_); iprot_->readMessageEnd(); @@ -4080,21 +4570,21 @@ void TCLIServiceClient::recv_GetTableTypes(TGetTableTypesResp& _return) // _return pointer has now been filled return; } - throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "GetTableTypes failed: unknown result"); + throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "GetFunctions failed: unknown result"); } -void TCLIServiceClient::GetColumns(TGetColumnsResp& _return, const TGetColumnsReq& req) +void TCLIServiceClient::GetPrimaryKeys(TGetPrimaryKeysResp& _return, const TGetPrimaryKeysReq& req) { - send_GetColumns(req); - recv_GetColumns(_return); + send_GetPrimaryKeys(req); + recv_GetPrimaryKeys(_return); } -void TCLIServiceClient::send_GetColumns(const TGetColumnsReq& req) +void TCLIServiceClient::send_GetPrimaryKeys(const TGetPrimaryKeysReq& req) { int32_t cseqid = 0; - oprot_->writeMessageBegin("GetColumns", ::apache::thrift::protocol::T_CALL, cseqid); + oprot_->writeMessageBegin("GetPrimaryKeys", ::apache::thrift::protocol::T_CALL, cseqid); - TCLIService_GetColumns_pargs args; + TCLIService_GetPrimaryKeys_pargs args; args.req = &req; args.write(oprot_); @@ -4103,7 +4593,7 @@ void TCLIServiceClient::send_GetColumns(const TGetColumnsReq& req) oprot_->getTransport()->flush(); } -void TCLIServiceClient::recv_GetColumns(TGetColumnsResp& _return) +void TCLIServiceClient::recv_GetPrimaryKeys(TGetPrimaryKeysResp& _return) { int32_t rseqid = 0; @@ -4123,12 +4613,12 @@ void TCLIServiceClient::recv_GetColumns(TGetColumnsResp& _return) iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - if (fname.compare("GetColumns") != 0) { + if (fname.compare("GetPrimaryKeys") != 0) { iprot_->skip(::apache::thrift::protocol::T_STRUCT); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - TCLIService_GetColumns_presult result; + TCLIService_GetPrimaryKeys_presult result; result.success = &_return; result.read(iprot_); iprot_->readMessageEnd(); @@ -4138,21 +4628,21 @@ void TCLIServiceClient::recv_GetColumns(TGetColumnsResp& _return) // _return pointer has now been filled return; } - throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "GetColumns failed: unknown result"); + throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "GetPrimaryKeys failed: unknown result"); } -void TCLIServiceClient::GetFunctions(TGetFunctionsResp& _return, const TGetFunctionsReq& req) +void TCLIServiceClient::GetCrossReference(TGetCrossReferenceResp& _return, const TGetCrossReferenceReq& req) { - send_GetFunctions(req); - recv_GetFunctions(_return); + send_GetCrossReference(req); + recv_GetCrossReference(_return); } -void TCLIServiceClient::send_GetFunctions(const TGetFunctionsReq& req) +void TCLIServiceClient::send_GetCrossReference(const TGetCrossReferenceReq& req) { int32_t cseqid = 0; - oprot_->writeMessageBegin("GetFunctions", ::apache::thrift::protocol::T_CALL, cseqid); + oprot_->writeMessageBegin("GetCrossReference", ::apache::thrift::protocol::T_CALL, cseqid); - TCLIService_GetFunctions_pargs args; + TCLIService_GetCrossReference_pargs args; args.req = &req; args.write(oprot_); @@ -4161,7 +4651,7 @@ void TCLIServiceClient::send_GetFunctions(const TGetFunctionsReq& req) oprot_->getTransport()->flush(); } -void TCLIServiceClient::recv_GetFunctions(TGetFunctionsResp& _return) +void TCLIServiceClient::recv_GetCrossReference(TGetCrossReferenceResp& _return) { int32_t rseqid = 0; @@ -4181,12 +4671,12 @@ void TCLIServiceClient::recv_GetFunctions(TGetFunctionsResp& _return) iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - if (fname.compare("GetFunctions") != 0) { + if (fname.compare("GetCrossReference") != 0) { iprot_->skip(::apache::thrift::protocol::T_STRUCT); iprot_->readMessageEnd(); iprot_->getTransport()->readEnd(); } - TCLIService_GetFunctions_presult result; + TCLIService_GetCrossReference_presult result; result.success = &_return; result.read(iprot_); iprot_->readMessageEnd(); @@ -4196,7 +4686,7 @@ void TCLIServiceClient::recv_GetFunctions(TGetFunctionsResp& _return) // _return pointer has now been filled return; } - throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "GetFunctions failed: unknown result"); + throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "GetCrossReference failed: unknown result"); } void TCLIServiceClient::GetOperationStatus(TGetOperationStatusResp& _return, const TGetOperationStatusReq& req) @@ -5276,6 +5766,114 @@ void TCLIServiceProcessor::process_GetFunctions(int32_t seqid, ::apache::thrift: } } +void TCLIServiceProcessor::process_GetPrimaryKeys(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext) +{ + void* ctx = NULL; + if (this->eventHandler_.get() != NULL) { + ctx = this->eventHandler_->getContext("TCLIService.GetPrimaryKeys", callContext); + } + ::apache::thrift::TProcessorContextFreer freer(this->eventHandler_.get(), ctx, "TCLIService.GetPrimaryKeys"); + + if (this->eventHandler_.get() != NULL) { + this->eventHandler_->preRead(ctx, "TCLIService.GetPrimaryKeys"); + } + + TCLIService_GetPrimaryKeys_args args; + args.read(iprot); + iprot->readMessageEnd(); + uint32_t bytes = iprot->getTransport()->readEnd(); + + if (this->eventHandler_.get() != NULL) { + this->eventHandler_->postRead(ctx, "TCLIService.GetPrimaryKeys", bytes); + } + + TCLIService_GetPrimaryKeys_result result; + try { + iface_->GetPrimaryKeys(result.success, args.req); + result.__isset.success = true; + } catch (const std::exception& e) { + if (this->eventHandler_.get() != NULL) { + this->eventHandler_->handlerError(ctx, "TCLIService.GetPrimaryKeys"); + } + + ::apache::thrift::TApplicationException x(e.what()); + oprot->writeMessageBegin("GetPrimaryKeys", ::apache::thrift::protocol::T_EXCEPTION, seqid); + x.write(oprot); + oprot->writeMessageEnd(); + oprot->getTransport()->writeEnd(); + oprot->getTransport()->flush(); + return; + } + + if (this->eventHandler_.get() != NULL) { + this->eventHandler_->preWrite(ctx, "TCLIService.GetPrimaryKeys"); + } + + oprot->writeMessageBegin("GetPrimaryKeys", ::apache::thrift::protocol::T_REPLY, seqid); + result.write(oprot); + oprot->writeMessageEnd(); + bytes = oprot->getTransport()->writeEnd(); + oprot->getTransport()->flush(); + + if (this->eventHandler_.get() != NULL) { + this->eventHandler_->postWrite(ctx, "TCLIService.GetPrimaryKeys", bytes); + } +} + +void TCLIServiceProcessor::process_GetCrossReference(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext) +{ + void* ctx = NULL; + if (this->eventHandler_.get() != NULL) { + ctx = this->eventHandler_->getContext("TCLIService.GetCrossReference", callContext); + } + ::apache::thrift::TProcessorContextFreer freer(this->eventHandler_.get(), ctx, "TCLIService.GetCrossReference"); + + if (this->eventHandler_.get() != NULL) { + this->eventHandler_->preRead(ctx, "TCLIService.GetCrossReference"); + } + + TCLIService_GetCrossReference_args args; + args.read(iprot); + iprot->readMessageEnd(); + uint32_t bytes = iprot->getTransport()->readEnd(); + + if (this->eventHandler_.get() != NULL) { + this->eventHandler_->postRead(ctx, "TCLIService.GetCrossReference", bytes); + } + + TCLIService_GetCrossReference_result result; + try { + iface_->GetCrossReference(result.success, args.req); + result.__isset.success = true; + } catch (const std::exception& e) { + if (this->eventHandler_.get() != NULL) { + this->eventHandler_->handlerError(ctx, "TCLIService.GetCrossReference"); + } + + ::apache::thrift::TApplicationException x(e.what()); + oprot->writeMessageBegin("GetCrossReference", ::apache::thrift::protocol::T_EXCEPTION, seqid); + x.write(oprot); + oprot->writeMessageEnd(); + oprot->getTransport()->writeEnd(); + oprot->getTransport()->flush(); + return; + } + + if (this->eventHandler_.get() != NULL) { + this->eventHandler_->preWrite(ctx, "TCLIService.GetCrossReference"); + } + + oprot->writeMessageBegin("GetCrossReference", ::apache::thrift::protocol::T_REPLY, seqid); + result.write(oprot); + oprot->writeMessageEnd(); + bytes = oprot->getTransport()->writeEnd(); + oprot->getTransport()->flush(); + + if (this->eventHandler_.get() != NULL) { + this->eventHandler_->postWrite(ctx, "TCLIService.GetCrossReference", bytes); + } +} + void TCLIServiceProcessor::process_GetOperationStatus(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext) { void* ctx = NULL; @@ -6639,6 +7237,174 @@ void TCLIServiceConcurrentClient::recv_GetFunctions(TGetFunctionsResp& _return, } // end while(true) } +void TCLIServiceConcurrentClient::GetPrimaryKeys(TGetPrimaryKeysResp& _return, const TGetPrimaryKeysReq& req) +{ + int32_t seqid = send_GetPrimaryKeys(req); + recv_GetPrimaryKeys(_return, seqid); +} + +int32_t TCLIServiceConcurrentClient::send_GetPrimaryKeys(const TGetPrimaryKeysReq& req) +{ + int32_t cseqid = this->sync_.generateSeqId(); + ::apache::thrift::async::TConcurrentSendSentry sentry(&this->sync_); + oprot_->writeMessageBegin("GetPrimaryKeys", ::apache::thrift::protocol::T_CALL, cseqid); + + TCLIService_GetPrimaryKeys_pargs args; + args.req = &req; + args.write(oprot_); + + oprot_->writeMessageEnd(); + oprot_->getTransport()->writeEnd(); + oprot_->getTransport()->flush(); + + sentry.commit(); + return cseqid; +} + +void TCLIServiceConcurrentClient::recv_GetPrimaryKeys(TGetPrimaryKeysResp& _return, const int32_t seqid) +{ + + int32_t rseqid = 0; + std::string fname; + ::apache::thrift::protocol::TMessageType mtype; + + // the read mutex gets dropped and reacquired as part of waitForWork() + // The destructor of this sentry wakes up other clients + ::apache::thrift::async::TConcurrentRecvSentry sentry(&this->sync_, seqid); + + while(true) { + if(!this->sync_.getPending(fname, mtype, rseqid)) { + iprot_->readMessageBegin(fname, mtype, rseqid); + } + if(seqid == rseqid) { + if (mtype == ::apache::thrift::protocol::T_EXCEPTION) { + ::apache::thrift::TApplicationException x; + x.read(iprot_); + iprot_->readMessageEnd(); + iprot_->getTransport()->readEnd(); + sentry.commit(); + throw x; + } + if (mtype != ::apache::thrift::protocol::T_REPLY) { + iprot_->skip(::apache::thrift::protocol::T_STRUCT); + iprot_->readMessageEnd(); + iprot_->getTransport()->readEnd(); + } + if (fname.compare("GetPrimaryKeys") != 0) { + iprot_->skip(::apache::thrift::protocol::T_STRUCT); + iprot_->readMessageEnd(); + iprot_->getTransport()->readEnd(); + + // in a bad state, don't commit + using ::apache::thrift::protocol::TProtocolException; + throw TProtocolException(TProtocolException::INVALID_DATA); + } + TCLIService_GetPrimaryKeys_presult result; + result.success = &_return; + result.read(iprot_); + iprot_->readMessageEnd(); + iprot_->getTransport()->readEnd(); + + if (result.__isset.success) { + // _return pointer has now been filled + sentry.commit(); + return; + } + // in a bad state, don't commit + throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "GetPrimaryKeys failed: unknown result"); + } + // seqid != rseqid + this->sync_.updatePending(fname, mtype, rseqid); + + // this will temporarily unlock the readMutex, and let other clients get work done + this->sync_.waitForWork(seqid); + } // end while(true) +} + +void TCLIServiceConcurrentClient::GetCrossReference(TGetCrossReferenceResp& _return, const TGetCrossReferenceReq& req) +{ + int32_t seqid = send_GetCrossReference(req); + recv_GetCrossReference(_return, seqid); +} + +int32_t TCLIServiceConcurrentClient::send_GetCrossReference(const TGetCrossReferenceReq& req) +{ + int32_t cseqid = this->sync_.generateSeqId(); + ::apache::thrift::async::TConcurrentSendSentry sentry(&this->sync_); + oprot_->writeMessageBegin("GetCrossReference", ::apache::thrift::protocol::T_CALL, cseqid); + + TCLIService_GetCrossReference_pargs args; + args.req = &req; + args.write(oprot_); + + oprot_->writeMessageEnd(); + oprot_->getTransport()->writeEnd(); + oprot_->getTransport()->flush(); + + sentry.commit(); + return cseqid; +} + +void TCLIServiceConcurrentClient::recv_GetCrossReference(TGetCrossReferenceResp& _return, const int32_t seqid) +{ + + int32_t rseqid = 0; + std::string fname; + ::apache::thrift::protocol::TMessageType mtype; + + // the read mutex gets dropped and reacquired as part of waitForWork() + // The destructor of this sentry wakes up other clients + ::apache::thrift::async::TConcurrentRecvSentry sentry(&this->sync_, seqid); + + while(true) { + if(!this->sync_.getPending(fname, mtype, rseqid)) { + iprot_->readMessageBegin(fname, mtype, rseqid); + } + if(seqid == rseqid) { + if (mtype == ::apache::thrift::protocol::T_EXCEPTION) { + ::apache::thrift::TApplicationException x; + x.read(iprot_); + iprot_->readMessageEnd(); + iprot_->getTransport()->readEnd(); + sentry.commit(); + throw x; + } + if (mtype != ::apache::thrift::protocol::T_REPLY) { + iprot_->skip(::apache::thrift::protocol::T_STRUCT); + iprot_->readMessageEnd(); + iprot_->getTransport()->readEnd(); + } + if (fname.compare("GetCrossReference") != 0) { + iprot_->skip(::apache::thrift::protocol::T_STRUCT); + iprot_->readMessageEnd(); + iprot_->getTransport()->readEnd(); + + // in a bad state, don't commit + using ::apache::thrift::protocol::TProtocolException; + throw TProtocolException(TProtocolException::INVALID_DATA); + } + TCLIService_GetCrossReference_presult result; + result.success = &_return; + result.read(iprot_); + iprot_->readMessageEnd(); + iprot_->getTransport()->readEnd(); + + if (result.__isset.success) { + // _return pointer has now been filled + sentry.commit(); + return; + } + // in a bad state, don't commit + throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "GetCrossReference failed: unknown result"); + } + // seqid != rseqid + this->sync_.updatePending(fname, mtype, rseqid); + + // this will temporarily unlock the readMutex, and let other clients get work done + this->sync_.waitForWork(seqid); + } // end while(true) +} + void TCLIServiceConcurrentClient::GetOperationStatus(TGetOperationStatusResp& _return, const TGetOperationStatusReq& req) { int32_t seqid = send_GetOperationStatus(req); diff --git a/service-rpc/src/gen/thrift/gen-cpp/TCLIService.h b/service-rpc/src/gen/thrift/gen-cpp/TCLIService.h index 3407564..5fd423d 100644 --- a/service-rpc/src/gen/thrift/gen-cpp/TCLIService.h +++ b/service-rpc/src/gen/thrift/gen-cpp/TCLIService.h @@ -32,6 +32,8 @@ class TCLIServiceIf { virtual void GetTableTypes(TGetTableTypesResp& _return, const TGetTableTypesReq& req) = 0; virtual void GetColumns(TGetColumnsResp& _return, const TGetColumnsReq& req) = 0; virtual void GetFunctions(TGetFunctionsResp& _return, const TGetFunctionsReq& req) = 0; + virtual void GetPrimaryKeys(TGetPrimaryKeysResp& _return, const TGetPrimaryKeysReq& req) = 0; + virtual void GetCrossReference(TGetCrossReferenceResp& _return, const TGetCrossReferenceReq& req) = 0; virtual void GetOperationStatus(TGetOperationStatusResp& _return, const TGetOperationStatusReq& req) = 0; virtual void CancelOperation(TCancelOperationResp& _return, const TCancelOperationReq& req) = 0; virtual void CloseOperation(TCloseOperationResp& _return, const TCloseOperationReq& req) = 0; @@ -102,6 +104,12 @@ class TCLIServiceNull : virtual public TCLIServiceIf { void GetFunctions(TGetFunctionsResp& /* _return */, const TGetFunctionsReq& /* req */) { return; } + void GetPrimaryKeys(TGetPrimaryKeysResp& /* _return */, const TGetPrimaryKeysReq& /* req */) { + return; + } + void GetCrossReference(TGetCrossReferenceResp& /* _return */, const TGetCrossReferenceReq& /* req */) { + return; + } void GetOperationStatus(TGetOperationStatusResp& /* _return */, const TGetOperationStatusReq& /* req */) { return; } @@ -1272,6 +1280,214 @@ class TCLIService_GetFunctions_presult { }; +typedef struct _TCLIService_GetPrimaryKeys_args__isset { + _TCLIService_GetPrimaryKeys_args__isset() : req(false) {} + bool req :1; +} _TCLIService_GetPrimaryKeys_args__isset; + +class TCLIService_GetPrimaryKeys_args { + public: + + TCLIService_GetPrimaryKeys_args(const TCLIService_GetPrimaryKeys_args&); + TCLIService_GetPrimaryKeys_args& operator=(const TCLIService_GetPrimaryKeys_args&); + TCLIService_GetPrimaryKeys_args() { + } + + virtual ~TCLIService_GetPrimaryKeys_args() throw(); + TGetPrimaryKeysReq req; + + _TCLIService_GetPrimaryKeys_args__isset __isset; + + void __set_req(const TGetPrimaryKeysReq& val); + + bool operator == (const TCLIService_GetPrimaryKeys_args & rhs) const + { + if (!(req == rhs.req)) + return false; + return true; + } + bool operator != (const TCLIService_GetPrimaryKeys_args &rhs) const { + return !(*this == rhs); + } + + bool operator < (const TCLIService_GetPrimaryKeys_args & ) const; + + uint32_t read(::apache::thrift::protocol::TProtocol* iprot); + uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const; + +}; + + +class TCLIService_GetPrimaryKeys_pargs { + public: + + + virtual ~TCLIService_GetPrimaryKeys_pargs() throw(); + const TGetPrimaryKeysReq* req; + + uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const; + +}; + +typedef struct _TCLIService_GetPrimaryKeys_result__isset { + _TCLIService_GetPrimaryKeys_result__isset() : success(false) {} + bool success :1; +} _TCLIService_GetPrimaryKeys_result__isset; + +class TCLIService_GetPrimaryKeys_result { + public: + + TCLIService_GetPrimaryKeys_result(const TCLIService_GetPrimaryKeys_result&); + TCLIService_GetPrimaryKeys_result& operator=(const TCLIService_GetPrimaryKeys_result&); + TCLIService_GetPrimaryKeys_result() { + } + + virtual ~TCLIService_GetPrimaryKeys_result() throw(); + TGetPrimaryKeysResp success; + + _TCLIService_GetPrimaryKeys_result__isset __isset; + + void __set_success(const TGetPrimaryKeysResp& val); + + bool operator == (const TCLIService_GetPrimaryKeys_result & rhs) const + { + if (!(success == rhs.success)) + return false; + return true; + } + bool operator != (const TCLIService_GetPrimaryKeys_result &rhs) const { + return !(*this == rhs); + } + + bool operator < (const TCLIService_GetPrimaryKeys_result & ) const; + + uint32_t read(::apache::thrift::protocol::TProtocol* iprot); + uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const; + +}; + +typedef struct _TCLIService_GetPrimaryKeys_presult__isset { + _TCLIService_GetPrimaryKeys_presult__isset() : success(false) {} + bool success :1; +} _TCLIService_GetPrimaryKeys_presult__isset; + +class TCLIService_GetPrimaryKeys_presult { + public: + + + virtual ~TCLIService_GetPrimaryKeys_presult() throw(); + TGetPrimaryKeysResp* success; + + _TCLIService_GetPrimaryKeys_presult__isset __isset; + + uint32_t read(::apache::thrift::protocol::TProtocol* iprot); + +}; + +typedef struct _TCLIService_GetCrossReference_args__isset { + _TCLIService_GetCrossReference_args__isset() : req(false) {} + bool req :1; +} _TCLIService_GetCrossReference_args__isset; + +class TCLIService_GetCrossReference_args { + public: + + TCLIService_GetCrossReference_args(const TCLIService_GetCrossReference_args&); + TCLIService_GetCrossReference_args& operator=(const TCLIService_GetCrossReference_args&); + TCLIService_GetCrossReference_args() { + } + + virtual ~TCLIService_GetCrossReference_args() throw(); + TGetCrossReferenceReq req; + + _TCLIService_GetCrossReference_args__isset __isset; + + void __set_req(const TGetCrossReferenceReq& val); + + bool operator == (const TCLIService_GetCrossReference_args & rhs) const + { + if (!(req == rhs.req)) + return false; + return true; + } + bool operator != (const TCLIService_GetCrossReference_args &rhs) const { + return !(*this == rhs); + } + + bool operator < (const TCLIService_GetCrossReference_args & ) const; + + uint32_t read(::apache::thrift::protocol::TProtocol* iprot); + uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const; + +}; + + +class TCLIService_GetCrossReference_pargs { + public: + + + virtual ~TCLIService_GetCrossReference_pargs() throw(); + const TGetCrossReferenceReq* req; + + uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const; + +}; + +typedef struct _TCLIService_GetCrossReference_result__isset { + _TCLIService_GetCrossReference_result__isset() : success(false) {} + bool success :1; +} _TCLIService_GetCrossReference_result__isset; + +class TCLIService_GetCrossReference_result { + public: + + TCLIService_GetCrossReference_result(const TCLIService_GetCrossReference_result&); + TCLIService_GetCrossReference_result& operator=(const TCLIService_GetCrossReference_result&); + TCLIService_GetCrossReference_result() { + } + + virtual ~TCLIService_GetCrossReference_result() throw(); + TGetCrossReferenceResp success; + + _TCLIService_GetCrossReference_result__isset __isset; + + void __set_success(const TGetCrossReferenceResp& val); + + bool operator == (const TCLIService_GetCrossReference_result & rhs) const + { + if (!(success == rhs.success)) + return false; + return true; + } + bool operator != (const TCLIService_GetCrossReference_result &rhs) const { + return !(*this == rhs); + } + + bool operator < (const TCLIService_GetCrossReference_result & ) const; + + uint32_t read(::apache::thrift::protocol::TProtocol* iprot); + uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const; + +}; + +typedef struct _TCLIService_GetCrossReference_presult__isset { + _TCLIService_GetCrossReference_presult__isset() : success(false) {} + bool success :1; +} _TCLIService_GetCrossReference_presult__isset; + +class TCLIService_GetCrossReference_presult { + public: + + + virtual ~TCLIService_GetCrossReference_presult() throw(); + TGetCrossReferenceResp* success; + + _TCLIService_GetCrossReference_presult__isset __isset; + + uint32_t read(::apache::thrift::protocol::TProtocol* iprot); + +}; + typedef struct _TCLIService_GetOperationStatus_args__isset { _TCLIService_GetOperationStatus_args__isset() : req(false) {} bool req :1; @@ -2162,6 +2378,12 @@ class TCLIServiceClient : virtual public TCLIServiceIf { void GetFunctions(TGetFunctionsResp& _return, const TGetFunctionsReq& req); void send_GetFunctions(const TGetFunctionsReq& req); void recv_GetFunctions(TGetFunctionsResp& _return); + void GetPrimaryKeys(TGetPrimaryKeysResp& _return, const TGetPrimaryKeysReq& req); + void send_GetPrimaryKeys(const TGetPrimaryKeysReq& req); + void recv_GetPrimaryKeys(TGetPrimaryKeysResp& _return); + void GetCrossReference(TGetCrossReferenceResp& _return, const TGetCrossReferenceReq& req); + void send_GetCrossReference(const TGetCrossReferenceReq& req); + void recv_GetCrossReference(TGetCrossReferenceResp& _return); void GetOperationStatus(TGetOperationStatusResp& _return, const TGetOperationStatusReq& req); void send_GetOperationStatus(const TGetOperationStatusReq& req); void recv_GetOperationStatus(TGetOperationStatusResp& _return); @@ -2212,6 +2434,8 @@ class TCLIServiceProcessor : public ::apache::thrift::TDispatchProcessor { void process_GetTableTypes(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext); void process_GetColumns(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext); void process_GetFunctions(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext); + void process_GetPrimaryKeys(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext); + void process_GetCrossReference(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext); void process_GetOperationStatus(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext); void process_CancelOperation(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext); void process_CloseOperation(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext); @@ -2234,6 +2458,8 @@ class TCLIServiceProcessor : public ::apache::thrift::TDispatchProcessor { processMap_["GetTableTypes"] = &TCLIServiceProcessor::process_GetTableTypes; processMap_["GetColumns"] = &TCLIServiceProcessor::process_GetColumns; processMap_["GetFunctions"] = &TCLIServiceProcessor::process_GetFunctions; + processMap_["GetPrimaryKeys"] = &TCLIServiceProcessor::process_GetPrimaryKeys; + processMap_["GetCrossReference"] = &TCLIServiceProcessor::process_GetCrossReference; processMap_["GetOperationStatus"] = &TCLIServiceProcessor::process_GetOperationStatus; processMap_["CancelOperation"] = &TCLIServiceProcessor::process_CancelOperation; processMap_["CloseOperation"] = &TCLIServiceProcessor::process_CloseOperation; @@ -2380,6 +2606,26 @@ class TCLIServiceMultiface : virtual public TCLIServiceIf { return; } + void GetPrimaryKeys(TGetPrimaryKeysResp& _return, const TGetPrimaryKeysReq& req) { + size_t sz = ifaces_.size(); + size_t i = 0; + for (; i < (sz - 1); ++i) { + ifaces_[i]->GetPrimaryKeys(_return, req); + } + ifaces_[i]->GetPrimaryKeys(_return, req); + return; + } + + void GetCrossReference(TGetCrossReferenceResp& _return, const TGetCrossReferenceReq& req) { + size_t sz = ifaces_.size(); + size_t i = 0; + for (; i < (sz - 1); ++i) { + ifaces_[i]->GetCrossReference(_return, req); + } + ifaces_[i]->GetCrossReference(_return, req); + return; + } + void GetOperationStatus(TGetOperationStatusResp& _return, const TGetOperationStatusReq& req) { size_t sz = ifaces_.size(); size_t i = 0; @@ -2523,6 +2769,12 @@ class TCLIServiceConcurrentClient : virtual public TCLIServiceIf { void GetFunctions(TGetFunctionsResp& _return, const TGetFunctionsReq& req); int32_t send_GetFunctions(const TGetFunctionsReq& req); void recv_GetFunctions(TGetFunctionsResp& _return, const int32_t seqid); + void GetPrimaryKeys(TGetPrimaryKeysResp& _return, const TGetPrimaryKeysReq& req); + int32_t send_GetPrimaryKeys(const TGetPrimaryKeysReq& req); + void recv_GetPrimaryKeys(TGetPrimaryKeysResp& _return, const int32_t seqid); + void GetCrossReference(TGetCrossReferenceResp& _return, const TGetCrossReferenceReq& req); + int32_t send_GetCrossReference(const TGetCrossReferenceReq& req); + void recv_GetCrossReference(TGetCrossReferenceResp& _return, const int32_t seqid); void GetOperationStatus(TGetOperationStatusResp& _return, const TGetOperationStatusReq& req); int32_t send_GetOperationStatus(const TGetOperationStatusReq& req); void recv_GetOperationStatus(TGetOperationStatusResp& _return, const int32_t seqid); diff --git a/service-rpc/src/gen/thrift/gen-cpp/TCLIService_server.skeleton.cpp b/service-rpc/src/gen/thrift/gen-cpp/TCLIService_server.skeleton.cpp index 66ed6a7..5d7caf9 100644 --- a/service-rpc/src/gen/thrift/gen-cpp/TCLIService_server.skeleton.cpp +++ b/service-rpc/src/gen/thrift/gen-cpp/TCLIService_server.skeleton.cpp @@ -77,6 +77,16 @@ class TCLIServiceHandler : virtual public TCLIServiceIf { printf("GetFunctions\n"); } + void GetPrimaryKeys(TGetPrimaryKeysResp& _return, const TGetPrimaryKeysReq& req) { + // Your implementation goes here + printf("GetPrimaryKeys\n"); + } + + void GetCrossReference(TGetCrossReferenceResp& _return, const TGetCrossReferenceReq& req) { + // Your implementation goes here + printf("GetCrossReference\n"); + } + void GetOperationStatus(TGetOperationStatusResp& _return, const TGetOperationStatusReq& req) { // Your implementation goes here printf("GetOperationStatus\n"); diff --git a/service-rpc/src/gen/thrift/gen-cpp/TCLIService_types.cpp b/service-rpc/src/gen/thrift/gen-cpp/TCLIService_types.cpp index 3a27a60..afbc7ed 100644 --- a/service-rpc/src/gen/thrift/gen-cpp/TCLIService_types.cpp +++ b/service-rpc/src/gen/thrift/gen-cpp/TCLIService_types.cpp @@ -7498,6 +7498,604 @@ void TGetFunctionsResp::printTo(std::ostream& out) const { } +TGetPrimaryKeysReq::~TGetPrimaryKeysReq() throw() { +} + + +void TGetPrimaryKeysReq::__set_sessionHandle(const TSessionHandle& val) { + this->sessionHandle = val; +} + +void TGetPrimaryKeysReq::__set_catalogName(const TIdentifier& val) { + this->catalogName = val; +__isset.catalogName = true; +} + +void TGetPrimaryKeysReq::__set_schemaName(const TIdentifier& val) { + this->schemaName = val; +__isset.schemaName = true; +} + +void TGetPrimaryKeysReq::__set_tableName(const TIdentifier& val) { + this->tableName = val; +__isset.tableName = true; +} + +uint32_t TGetPrimaryKeysReq::read(::apache::thrift::protocol::TProtocol* iprot) { + + apache::thrift::protocol::TInputRecursionTracker tracker(*iprot); + uint32_t xfer = 0; + std::string fname; + ::apache::thrift::protocol::TType ftype; + int16_t fid; + + xfer += iprot->readStructBegin(fname); + + using ::apache::thrift::protocol::TProtocolException; + + bool isset_sessionHandle = false; + + while (true) + { + xfer += iprot->readFieldBegin(fname, ftype, fid); + if (ftype == ::apache::thrift::protocol::T_STOP) { + break; + } + switch (fid) + { + case 1: + if (ftype == ::apache::thrift::protocol::T_STRUCT) { + xfer += this->sessionHandle.read(iprot); + isset_sessionHandle = true; + } else { + xfer += iprot->skip(ftype); + } + break; + case 2: + if (ftype == ::apache::thrift::protocol::T_STRING) { + xfer += iprot->readString(this->catalogName); + this->__isset.catalogName = true; + } else { + xfer += iprot->skip(ftype); + } + break; + case 3: + if (ftype == ::apache::thrift::protocol::T_STRING) { + xfer += iprot->readString(this->schemaName); + this->__isset.schemaName = true; + } else { + xfer += iprot->skip(ftype); + } + break; + case 4: + if (ftype == ::apache::thrift::protocol::T_STRING) { + xfer += iprot->readString(this->tableName); + this->__isset.tableName = true; + } else { + xfer += iprot->skip(ftype); + } + break; + default: + xfer += iprot->skip(ftype); + break; + } + xfer += iprot->readFieldEnd(); + } + + xfer += iprot->readStructEnd(); + + if (!isset_sessionHandle) + throw TProtocolException(TProtocolException::INVALID_DATA); + return xfer; +} + +uint32_t TGetPrimaryKeysReq::write(::apache::thrift::protocol::TProtocol* oprot) const { + uint32_t xfer = 0; + apache::thrift::protocol::TOutputRecursionTracker tracker(*oprot); + xfer += oprot->writeStructBegin("TGetPrimaryKeysReq"); + + xfer += oprot->writeFieldBegin("sessionHandle", ::apache::thrift::protocol::T_STRUCT, 1); + xfer += this->sessionHandle.write(oprot); + xfer += oprot->writeFieldEnd(); + + if (this->__isset.catalogName) { + xfer += oprot->writeFieldBegin("catalogName", ::apache::thrift::protocol::T_STRING, 2); + xfer += oprot->writeString(this->catalogName); + xfer += oprot->writeFieldEnd(); + } + if (this->__isset.schemaName) { + xfer += oprot->writeFieldBegin("schemaName", ::apache::thrift::protocol::T_STRING, 3); + xfer += oprot->writeString(this->schemaName); + xfer += oprot->writeFieldEnd(); + } + if (this->__isset.tableName) { + xfer += oprot->writeFieldBegin("tableName", ::apache::thrift::protocol::T_STRING, 4); + xfer += oprot->writeString(this->tableName); + xfer += oprot->writeFieldEnd(); + } + xfer += oprot->writeFieldStop(); + xfer += oprot->writeStructEnd(); + return xfer; +} + +void swap(TGetPrimaryKeysReq &a, TGetPrimaryKeysReq &b) { + using ::std::swap; + swap(a.sessionHandle, b.sessionHandle); + swap(a.catalogName, b.catalogName); + swap(a.schemaName, b.schemaName); + swap(a.tableName, b.tableName); + swap(a.__isset, b.__isset); +} + +TGetPrimaryKeysReq::TGetPrimaryKeysReq(const TGetPrimaryKeysReq& other260) { + sessionHandle = other260.sessionHandle; + catalogName = other260.catalogName; + schemaName = other260.schemaName; + tableName = other260.tableName; + __isset = other260.__isset; +} +TGetPrimaryKeysReq& TGetPrimaryKeysReq::operator=(const TGetPrimaryKeysReq& other261) { + sessionHandle = other261.sessionHandle; + catalogName = other261.catalogName; + schemaName = other261.schemaName; + tableName = other261.tableName; + __isset = other261.__isset; + return *this; +} +void TGetPrimaryKeysReq::printTo(std::ostream& out) const { + using ::apache::thrift::to_string; + out << "TGetPrimaryKeysReq("; + out << "sessionHandle=" << to_string(sessionHandle); + out << ", " << "catalogName="; (__isset.catalogName ? (out << to_string(catalogName)) : (out << "")); + out << ", " << "schemaName="; (__isset.schemaName ? (out << to_string(schemaName)) : (out << "")); + out << ", " << "tableName="; (__isset.tableName ? (out << to_string(tableName)) : (out << "")); + out << ")"; +} + + +TGetPrimaryKeysResp::~TGetPrimaryKeysResp() throw() { +} + + +void TGetPrimaryKeysResp::__set_status(const TStatus& val) { + this->status = val; +} + +void TGetPrimaryKeysResp::__set_operationHandle(const TOperationHandle& val) { + this->operationHandle = val; +__isset.operationHandle = true; +} + +uint32_t TGetPrimaryKeysResp::read(::apache::thrift::protocol::TProtocol* iprot) { + + apache::thrift::protocol::TInputRecursionTracker tracker(*iprot); + uint32_t xfer = 0; + std::string fname; + ::apache::thrift::protocol::TType ftype; + int16_t fid; + + xfer += iprot->readStructBegin(fname); + + using ::apache::thrift::protocol::TProtocolException; + + bool isset_status = false; + + while (true) + { + xfer += iprot->readFieldBegin(fname, ftype, fid); + if (ftype == ::apache::thrift::protocol::T_STOP) { + break; + } + switch (fid) + { + case 1: + if (ftype == ::apache::thrift::protocol::T_STRUCT) { + xfer += this->status.read(iprot); + isset_status = true; + } else { + xfer += iprot->skip(ftype); + } + break; + case 2: + if (ftype == ::apache::thrift::protocol::T_STRUCT) { + xfer += this->operationHandle.read(iprot); + this->__isset.operationHandle = true; + } else { + xfer += iprot->skip(ftype); + } + break; + default: + xfer += iprot->skip(ftype); + break; + } + xfer += iprot->readFieldEnd(); + } + + xfer += iprot->readStructEnd(); + + if (!isset_status) + throw TProtocolException(TProtocolException::INVALID_DATA); + return xfer; +} + +uint32_t TGetPrimaryKeysResp::write(::apache::thrift::protocol::TProtocol* oprot) const { + uint32_t xfer = 0; + apache::thrift::protocol::TOutputRecursionTracker tracker(*oprot); + xfer += oprot->writeStructBegin("TGetPrimaryKeysResp"); + + xfer += oprot->writeFieldBegin("status", ::apache::thrift::protocol::T_STRUCT, 1); + xfer += this->status.write(oprot); + xfer += oprot->writeFieldEnd(); + + if (this->__isset.operationHandle) { + xfer += oprot->writeFieldBegin("operationHandle", ::apache::thrift::protocol::T_STRUCT, 2); + xfer += this->operationHandle.write(oprot); + xfer += oprot->writeFieldEnd(); + } + xfer += oprot->writeFieldStop(); + xfer += oprot->writeStructEnd(); + return xfer; +} + +void swap(TGetPrimaryKeysResp &a, TGetPrimaryKeysResp &b) { + using ::std::swap; + swap(a.status, b.status); + swap(a.operationHandle, b.operationHandle); + swap(a.__isset, b.__isset); +} + +TGetPrimaryKeysResp::TGetPrimaryKeysResp(const TGetPrimaryKeysResp& other262) { + status = other262.status; + operationHandle = other262.operationHandle; + __isset = other262.__isset; +} +TGetPrimaryKeysResp& TGetPrimaryKeysResp::operator=(const TGetPrimaryKeysResp& other263) { + status = other263.status; + operationHandle = other263.operationHandle; + __isset = other263.__isset; + return *this; +} +void TGetPrimaryKeysResp::printTo(std::ostream& out) const { + using ::apache::thrift::to_string; + out << "TGetPrimaryKeysResp("; + out << "status=" << to_string(status); + out << ", " << "operationHandle="; (__isset.operationHandle ? (out << to_string(operationHandle)) : (out << "")); + out << ")"; +} + + +TGetCrossReferenceReq::~TGetCrossReferenceReq() throw() { +} + + +void TGetCrossReferenceReq::__set_sessionHandle(const TSessionHandle& val) { + this->sessionHandle = val; +} + +void TGetCrossReferenceReq::__set_parentCatalogName(const TIdentifier& val) { + this->parentCatalogName = val; +__isset.parentCatalogName = true; +} + +void TGetCrossReferenceReq::__set_parentSchemaName(const TIdentifier& val) { + this->parentSchemaName = val; +__isset.parentSchemaName = true; +} + +void TGetCrossReferenceReq::__set_parentTableName(const TIdentifier& val) { + this->parentTableName = val; +__isset.parentTableName = true; +} + +void TGetCrossReferenceReq::__set_foreignCatalogName(const TIdentifier& val) { + this->foreignCatalogName = val; +__isset.foreignCatalogName = true; +} + +void TGetCrossReferenceReq::__set_foreignSchemaName(const TIdentifier& val) { + this->foreignSchemaName = val; +__isset.foreignSchemaName = true; +} + +void TGetCrossReferenceReq::__set_foreignTableName(const TIdentifier& val) { + this->foreignTableName = val; +__isset.foreignTableName = true; +} + +uint32_t TGetCrossReferenceReq::read(::apache::thrift::protocol::TProtocol* iprot) { + + apache::thrift::protocol::TInputRecursionTracker tracker(*iprot); + uint32_t xfer = 0; + std::string fname; + ::apache::thrift::protocol::TType ftype; + int16_t fid; + + xfer += iprot->readStructBegin(fname); + + using ::apache::thrift::protocol::TProtocolException; + + bool isset_sessionHandle = false; + + while (true) + { + xfer += iprot->readFieldBegin(fname, ftype, fid); + if (ftype == ::apache::thrift::protocol::T_STOP) { + break; + } + switch (fid) + { + case 1: + if (ftype == ::apache::thrift::protocol::T_STRUCT) { + xfer += this->sessionHandle.read(iprot); + isset_sessionHandle = true; + } else { + xfer += iprot->skip(ftype); + } + break; + case 2: + if (ftype == ::apache::thrift::protocol::T_STRING) { + xfer += iprot->readString(this->parentCatalogName); + this->__isset.parentCatalogName = true; + } else { + xfer += iprot->skip(ftype); + } + break; + case 3: + if (ftype == ::apache::thrift::protocol::T_STRING) { + xfer += iprot->readString(this->parentSchemaName); + this->__isset.parentSchemaName = true; + } else { + xfer += iprot->skip(ftype); + } + break; + case 4: + if (ftype == ::apache::thrift::protocol::T_STRING) { + xfer += iprot->readString(this->parentTableName); + this->__isset.parentTableName = true; + } else { + xfer += iprot->skip(ftype); + } + break; + case 5: + if (ftype == ::apache::thrift::protocol::T_STRING) { + xfer += iprot->readString(this->foreignCatalogName); + this->__isset.foreignCatalogName = true; + } else { + xfer += iprot->skip(ftype); + } + break; + case 6: + if (ftype == ::apache::thrift::protocol::T_STRING) { + xfer += iprot->readString(this->foreignSchemaName); + this->__isset.foreignSchemaName = true; + } else { + xfer += iprot->skip(ftype); + } + break; + case 7: + if (ftype == ::apache::thrift::protocol::T_STRING) { + xfer += iprot->readString(this->foreignTableName); + this->__isset.foreignTableName = true; + } else { + xfer += iprot->skip(ftype); + } + break; + default: + xfer += iprot->skip(ftype); + break; + } + xfer += iprot->readFieldEnd(); + } + + xfer += iprot->readStructEnd(); + + if (!isset_sessionHandle) + throw TProtocolException(TProtocolException::INVALID_DATA); + return xfer; +} + +uint32_t TGetCrossReferenceReq::write(::apache::thrift::protocol::TProtocol* oprot) const { + uint32_t xfer = 0; + apache::thrift::protocol::TOutputRecursionTracker tracker(*oprot); + xfer += oprot->writeStructBegin("TGetCrossReferenceReq"); + + xfer += oprot->writeFieldBegin("sessionHandle", ::apache::thrift::protocol::T_STRUCT, 1); + xfer += this->sessionHandle.write(oprot); + xfer += oprot->writeFieldEnd(); + + if (this->__isset.parentCatalogName) { + xfer += oprot->writeFieldBegin("parentCatalogName", ::apache::thrift::protocol::T_STRING, 2); + xfer += oprot->writeString(this->parentCatalogName); + xfer += oprot->writeFieldEnd(); + } + if (this->__isset.parentSchemaName) { + xfer += oprot->writeFieldBegin("parentSchemaName", ::apache::thrift::protocol::T_STRING, 3); + xfer += oprot->writeString(this->parentSchemaName); + xfer += oprot->writeFieldEnd(); + } + if (this->__isset.parentTableName) { + xfer += oprot->writeFieldBegin("parentTableName", ::apache::thrift::protocol::T_STRING, 4); + xfer += oprot->writeString(this->parentTableName); + xfer += oprot->writeFieldEnd(); + } + if (this->__isset.foreignCatalogName) { + xfer += oprot->writeFieldBegin("foreignCatalogName", ::apache::thrift::protocol::T_STRING, 5); + xfer += oprot->writeString(this->foreignCatalogName); + xfer += oprot->writeFieldEnd(); + } + if (this->__isset.foreignSchemaName) { + xfer += oprot->writeFieldBegin("foreignSchemaName", ::apache::thrift::protocol::T_STRING, 6); + xfer += oprot->writeString(this->foreignSchemaName); + xfer += oprot->writeFieldEnd(); + } + if (this->__isset.foreignTableName) { + xfer += oprot->writeFieldBegin("foreignTableName", ::apache::thrift::protocol::T_STRING, 7); + xfer += oprot->writeString(this->foreignTableName); + xfer += oprot->writeFieldEnd(); + } + xfer += oprot->writeFieldStop(); + xfer += oprot->writeStructEnd(); + return xfer; +} + +void swap(TGetCrossReferenceReq &a, TGetCrossReferenceReq &b) { + using ::std::swap; + swap(a.sessionHandle, b.sessionHandle); + swap(a.parentCatalogName, b.parentCatalogName); + swap(a.parentSchemaName, b.parentSchemaName); + swap(a.parentTableName, b.parentTableName); + swap(a.foreignCatalogName, b.foreignCatalogName); + swap(a.foreignSchemaName, b.foreignSchemaName); + swap(a.foreignTableName, b.foreignTableName); + swap(a.__isset, b.__isset); +} + +TGetCrossReferenceReq::TGetCrossReferenceReq(const TGetCrossReferenceReq& other264) { + sessionHandle = other264.sessionHandle; + parentCatalogName = other264.parentCatalogName; + parentSchemaName = other264.parentSchemaName; + parentTableName = other264.parentTableName; + foreignCatalogName = other264.foreignCatalogName; + foreignSchemaName = other264.foreignSchemaName; + foreignTableName = other264.foreignTableName; + __isset = other264.__isset; +} +TGetCrossReferenceReq& TGetCrossReferenceReq::operator=(const TGetCrossReferenceReq& other265) { + sessionHandle = other265.sessionHandle; + parentCatalogName = other265.parentCatalogName; + parentSchemaName = other265.parentSchemaName; + parentTableName = other265.parentTableName; + foreignCatalogName = other265.foreignCatalogName; + foreignSchemaName = other265.foreignSchemaName; + foreignTableName = other265.foreignTableName; + __isset = other265.__isset; + return *this; +} +void TGetCrossReferenceReq::printTo(std::ostream& out) const { + using ::apache::thrift::to_string; + out << "TGetCrossReferenceReq("; + out << "sessionHandle=" << to_string(sessionHandle); + out << ", " << "parentCatalogName="; (__isset.parentCatalogName ? (out << to_string(parentCatalogName)) : (out << "")); + out << ", " << "parentSchemaName="; (__isset.parentSchemaName ? (out << to_string(parentSchemaName)) : (out << "")); + out << ", " << "parentTableName="; (__isset.parentTableName ? (out << to_string(parentTableName)) : (out << "")); + out << ", " << "foreignCatalogName="; (__isset.foreignCatalogName ? (out << to_string(foreignCatalogName)) : (out << "")); + out << ", " << "foreignSchemaName="; (__isset.foreignSchemaName ? (out << to_string(foreignSchemaName)) : (out << "")); + out << ", " << "foreignTableName="; (__isset.foreignTableName ? (out << to_string(foreignTableName)) : (out << "")); + out << ")"; +} + + +TGetCrossReferenceResp::~TGetCrossReferenceResp() throw() { +} + + +void TGetCrossReferenceResp::__set_status(const TStatus& val) { + this->status = val; +} + +void TGetCrossReferenceResp::__set_operationHandle(const TOperationHandle& val) { + this->operationHandle = val; +__isset.operationHandle = true; +} + +uint32_t TGetCrossReferenceResp::read(::apache::thrift::protocol::TProtocol* iprot) { + + apache::thrift::protocol::TInputRecursionTracker tracker(*iprot); + uint32_t xfer = 0; + std::string fname; + ::apache::thrift::protocol::TType ftype; + int16_t fid; + + xfer += iprot->readStructBegin(fname); + + using ::apache::thrift::protocol::TProtocolException; + + bool isset_status = false; + + while (true) + { + xfer += iprot->readFieldBegin(fname, ftype, fid); + if (ftype == ::apache::thrift::protocol::T_STOP) { + break; + } + switch (fid) + { + case 1: + if (ftype == ::apache::thrift::protocol::T_STRUCT) { + xfer += this->status.read(iprot); + isset_status = true; + } else { + xfer += iprot->skip(ftype); + } + break; + case 2: + if (ftype == ::apache::thrift::protocol::T_STRUCT) { + xfer += this->operationHandle.read(iprot); + this->__isset.operationHandle = true; + } else { + xfer += iprot->skip(ftype); + } + break; + default: + xfer += iprot->skip(ftype); + break; + } + xfer += iprot->readFieldEnd(); + } + + xfer += iprot->readStructEnd(); + + if (!isset_status) + throw TProtocolException(TProtocolException::INVALID_DATA); + return xfer; +} + +uint32_t TGetCrossReferenceResp::write(::apache::thrift::protocol::TProtocol* oprot) const { + uint32_t xfer = 0; + apache::thrift::protocol::TOutputRecursionTracker tracker(*oprot); + xfer += oprot->writeStructBegin("TGetCrossReferenceResp"); + + xfer += oprot->writeFieldBegin("status", ::apache::thrift::protocol::T_STRUCT, 1); + xfer += this->status.write(oprot); + xfer += oprot->writeFieldEnd(); + + if (this->__isset.operationHandle) { + xfer += oprot->writeFieldBegin("operationHandle", ::apache::thrift::protocol::T_STRUCT, 2); + xfer += this->operationHandle.write(oprot); + xfer += oprot->writeFieldEnd(); + } + xfer += oprot->writeFieldStop(); + xfer += oprot->writeStructEnd(); + return xfer; +} + +void swap(TGetCrossReferenceResp &a, TGetCrossReferenceResp &b) { + using ::std::swap; + swap(a.status, b.status); + swap(a.operationHandle, b.operationHandle); + swap(a.__isset, b.__isset); +} + +TGetCrossReferenceResp::TGetCrossReferenceResp(const TGetCrossReferenceResp& other266) { + status = other266.status; + operationHandle = other266.operationHandle; + __isset = other266.__isset; +} +TGetCrossReferenceResp& TGetCrossReferenceResp::operator=(const TGetCrossReferenceResp& other267) { + status = other267.status; + operationHandle = other267.operationHandle; + __isset = other267.__isset; + return *this; +} +void TGetCrossReferenceResp::printTo(std::ostream& out) const { + using ::apache::thrift::to_string; + out << "TGetCrossReferenceResp("; + out << "status=" << to_string(status); + out << ", " << "operationHandle="; (__isset.operationHandle ? (out << to_string(operationHandle)) : (out << "")); + out << ")"; +} + + TGetOperationStatusReq::~TGetOperationStatusReq() throw() { } @@ -7569,11 +8167,11 @@ void swap(TGetOperationStatusReq &a, TGetOperationStatusReq &b) { swap(a.operationHandle, b.operationHandle); } -TGetOperationStatusReq::TGetOperationStatusReq(const TGetOperationStatusReq& other260) { - operationHandle = other260.operationHandle; +TGetOperationStatusReq::TGetOperationStatusReq(const TGetOperationStatusReq& other268) { + operationHandle = other268.operationHandle; } -TGetOperationStatusReq& TGetOperationStatusReq::operator=(const TGetOperationStatusReq& other261) { - operationHandle = other261.operationHandle; +TGetOperationStatusReq& TGetOperationStatusReq::operator=(const TGetOperationStatusReq& other269) { + operationHandle = other269.operationHandle; return *this; } void TGetOperationStatusReq::printTo(std::ostream& out) const { @@ -7659,9 +8257,9 @@ uint32_t TGetOperationStatusResp::read(::apache::thrift::protocol::TProtocol* ip break; case 2: if (ftype == ::apache::thrift::protocol::T_I32) { - int32_t ecast262; - xfer += iprot->readI32(ecast262); - this->operationState = (TOperationState::type)ecast262; + int32_t ecast270; + xfer += iprot->readI32(ecast270); + this->operationState = (TOperationState::type)ecast270; this->__isset.operationState = true; } else { xfer += iprot->skip(ftype); @@ -7791,27 +8389,27 @@ void swap(TGetOperationStatusResp &a, TGetOperationStatusResp &b) { swap(a.__isset, b.__isset); } -TGetOperationStatusResp::TGetOperationStatusResp(const TGetOperationStatusResp& other263) { - status = other263.status; - operationState = other263.operationState; - sqlState = other263.sqlState; - errorCode = other263.errorCode; - errorMessage = other263.errorMessage; - taskStatus = other263.taskStatus; - operationStarted = other263.operationStarted; - operationCompleted = other263.operationCompleted; - __isset = other263.__isset; -} -TGetOperationStatusResp& TGetOperationStatusResp::operator=(const TGetOperationStatusResp& other264) { - status = other264.status; - operationState = other264.operationState; - sqlState = other264.sqlState; - errorCode = other264.errorCode; - errorMessage = other264.errorMessage; - taskStatus = other264.taskStatus; - operationStarted = other264.operationStarted; - operationCompleted = other264.operationCompleted; - __isset = other264.__isset; +TGetOperationStatusResp::TGetOperationStatusResp(const TGetOperationStatusResp& other271) { + status = other271.status; + operationState = other271.operationState; + sqlState = other271.sqlState; + errorCode = other271.errorCode; + errorMessage = other271.errorMessage; + taskStatus = other271.taskStatus; + operationStarted = other271.operationStarted; + operationCompleted = other271.operationCompleted; + __isset = other271.__isset; +} +TGetOperationStatusResp& TGetOperationStatusResp::operator=(const TGetOperationStatusResp& other272) { + status = other272.status; + operationState = other272.operationState; + sqlState = other272.sqlState; + errorCode = other272.errorCode; + errorMessage = other272.errorMessage; + taskStatus = other272.taskStatus; + operationStarted = other272.operationStarted; + operationCompleted = other272.operationCompleted; + __isset = other272.__isset; return *this; } void TGetOperationStatusResp::printTo(std::ostream& out) const { @@ -7900,11 +8498,11 @@ void swap(TCancelOperationReq &a, TCancelOperationReq &b) { swap(a.operationHandle, b.operationHandle); } -TCancelOperationReq::TCancelOperationReq(const TCancelOperationReq& other265) { - operationHandle = other265.operationHandle; +TCancelOperationReq::TCancelOperationReq(const TCancelOperationReq& other273) { + operationHandle = other273.operationHandle; } -TCancelOperationReq& TCancelOperationReq::operator=(const TCancelOperationReq& other266) { - operationHandle = other266.operationHandle; +TCancelOperationReq& TCancelOperationReq::operator=(const TCancelOperationReq& other274) { + operationHandle = other274.operationHandle; return *this; } void TCancelOperationReq::printTo(std::ostream& out) const { @@ -7986,11 +8584,11 @@ void swap(TCancelOperationResp &a, TCancelOperationResp &b) { swap(a.status, b.status); } -TCancelOperationResp::TCancelOperationResp(const TCancelOperationResp& other267) { - status = other267.status; +TCancelOperationResp::TCancelOperationResp(const TCancelOperationResp& other275) { + status = other275.status; } -TCancelOperationResp& TCancelOperationResp::operator=(const TCancelOperationResp& other268) { - status = other268.status; +TCancelOperationResp& TCancelOperationResp::operator=(const TCancelOperationResp& other276) { + status = other276.status; return *this; } void TCancelOperationResp::printTo(std::ostream& out) const { @@ -8072,11 +8670,11 @@ void swap(TCloseOperationReq &a, TCloseOperationReq &b) { swap(a.operationHandle, b.operationHandle); } -TCloseOperationReq::TCloseOperationReq(const TCloseOperationReq& other269) { - operationHandle = other269.operationHandle; +TCloseOperationReq::TCloseOperationReq(const TCloseOperationReq& other277) { + operationHandle = other277.operationHandle; } -TCloseOperationReq& TCloseOperationReq::operator=(const TCloseOperationReq& other270) { - operationHandle = other270.operationHandle; +TCloseOperationReq& TCloseOperationReq::operator=(const TCloseOperationReq& other278) { + operationHandle = other278.operationHandle; return *this; } void TCloseOperationReq::printTo(std::ostream& out) const { @@ -8158,11 +8756,11 @@ void swap(TCloseOperationResp &a, TCloseOperationResp &b) { swap(a.status, b.status); } -TCloseOperationResp::TCloseOperationResp(const TCloseOperationResp& other271) { - status = other271.status; +TCloseOperationResp::TCloseOperationResp(const TCloseOperationResp& other279) { + status = other279.status; } -TCloseOperationResp& TCloseOperationResp::operator=(const TCloseOperationResp& other272) { - status = other272.status; +TCloseOperationResp& TCloseOperationResp::operator=(const TCloseOperationResp& other280) { + status = other280.status; return *this; } void TCloseOperationResp::printTo(std::ostream& out) const { @@ -8244,11 +8842,11 @@ void swap(TGetResultSetMetadataReq &a, TGetResultSetMetadataReq &b) { swap(a.operationHandle, b.operationHandle); } -TGetResultSetMetadataReq::TGetResultSetMetadataReq(const TGetResultSetMetadataReq& other273) { - operationHandle = other273.operationHandle; +TGetResultSetMetadataReq::TGetResultSetMetadataReq(const TGetResultSetMetadataReq& other281) { + operationHandle = other281.operationHandle; } -TGetResultSetMetadataReq& TGetResultSetMetadataReq::operator=(const TGetResultSetMetadataReq& other274) { - operationHandle = other274.operationHandle; +TGetResultSetMetadataReq& TGetResultSetMetadataReq::operator=(const TGetResultSetMetadataReq& other282) { + operationHandle = other282.operationHandle; return *this; } void TGetResultSetMetadataReq::printTo(std::ostream& out) const { @@ -8350,15 +8948,15 @@ void swap(TGetResultSetMetadataResp &a, TGetResultSetMetadataResp &b) { swap(a.__isset, b.__isset); } -TGetResultSetMetadataResp::TGetResultSetMetadataResp(const TGetResultSetMetadataResp& other275) { - status = other275.status; - schema = other275.schema; - __isset = other275.__isset; +TGetResultSetMetadataResp::TGetResultSetMetadataResp(const TGetResultSetMetadataResp& other283) { + status = other283.status; + schema = other283.schema; + __isset = other283.__isset; } -TGetResultSetMetadataResp& TGetResultSetMetadataResp::operator=(const TGetResultSetMetadataResp& other276) { - status = other276.status; - schema = other276.schema; - __isset = other276.__isset; +TGetResultSetMetadataResp& TGetResultSetMetadataResp::operator=(const TGetResultSetMetadataResp& other284) { + status = other284.status; + schema = other284.schema; + __isset = other284.__isset; return *this; } void TGetResultSetMetadataResp::printTo(std::ostream& out) const { @@ -8425,9 +9023,9 @@ uint32_t TFetchResultsReq::read(::apache::thrift::protocol::TProtocol* iprot) { break; case 2: if (ftype == ::apache::thrift::protocol::T_I32) { - int32_t ecast277; - xfer += iprot->readI32(ecast277); - this->orientation = (TFetchOrientation::type)ecast277; + int32_t ecast285; + xfer += iprot->readI32(ecast285); + this->orientation = (TFetchOrientation::type)ecast285; isset_orientation = true; } else { xfer += iprot->skip(ftype); @@ -8503,19 +9101,19 @@ void swap(TFetchResultsReq &a, TFetchResultsReq &b) { swap(a.__isset, b.__isset); } -TFetchResultsReq::TFetchResultsReq(const TFetchResultsReq& other278) { - operationHandle = other278.operationHandle; - orientation = other278.orientation; - maxRows = other278.maxRows; - fetchType = other278.fetchType; - __isset = other278.__isset; -} -TFetchResultsReq& TFetchResultsReq::operator=(const TFetchResultsReq& other279) { - operationHandle = other279.operationHandle; - orientation = other279.orientation; - maxRows = other279.maxRows; - fetchType = other279.fetchType; - __isset = other279.__isset; +TFetchResultsReq::TFetchResultsReq(const TFetchResultsReq& other286) { + operationHandle = other286.operationHandle; + orientation = other286.orientation; + maxRows = other286.maxRows; + fetchType = other286.fetchType; + __isset = other286.__isset; +} +TFetchResultsReq& TFetchResultsReq::operator=(const TFetchResultsReq& other287) { + operationHandle = other287.operationHandle; + orientation = other287.orientation; + maxRows = other287.maxRows; + fetchType = other287.fetchType; + __isset = other287.__isset; return *this; } void TFetchResultsReq::printTo(std::ostream& out) const { @@ -8639,17 +9237,17 @@ void swap(TFetchResultsResp &a, TFetchResultsResp &b) { swap(a.__isset, b.__isset); } -TFetchResultsResp::TFetchResultsResp(const TFetchResultsResp& other280) { - status = other280.status; - hasMoreRows = other280.hasMoreRows; - results = other280.results; - __isset = other280.__isset; -} -TFetchResultsResp& TFetchResultsResp::operator=(const TFetchResultsResp& other281) { - status = other281.status; - hasMoreRows = other281.hasMoreRows; - results = other281.results; - __isset = other281.__isset; +TFetchResultsResp::TFetchResultsResp(const TFetchResultsResp& other288) { + status = other288.status; + hasMoreRows = other288.hasMoreRows; + results = other288.results; + __isset = other288.__isset; +} +TFetchResultsResp& TFetchResultsResp::operator=(const TFetchResultsResp& other289) { + status = other289.status; + hasMoreRows = other289.hasMoreRows; + results = other289.results; + __isset = other289.__isset; return *this; } void TFetchResultsResp::printTo(std::ostream& out) const { @@ -8773,15 +9371,15 @@ void swap(TGetDelegationTokenReq &a, TGetDelegationTokenReq &b) { swap(a.renewer, b.renewer); } -TGetDelegationTokenReq::TGetDelegationTokenReq(const TGetDelegationTokenReq& other282) { - sessionHandle = other282.sessionHandle; - owner = other282.owner; - renewer = other282.renewer; +TGetDelegationTokenReq::TGetDelegationTokenReq(const TGetDelegationTokenReq& other290) { + sessionHandle = other290.sessionHandle; + owner = other290.owner; + renewer = other290.renewer; } -TGetDelegationTokenReq& TGetDelegationTokenReq::operator=(const TGetDelegationTokenReq& other283) { - sessionHandle = other283.sessionHandle; - owner = other283.owner; - renewer = other283.renewer; +TGetDelegationTokenReq& TGetDelegationTokenReq::operator=(const TGetDelegationTokenReq& other291) { + sessionHandle = other291.sessionHandle; + owner = other291.owner; + renewer = other291.renewer; return *this; } void TGetDelegationTokenReq::printTo(std::ostream& out) const { @@ -8885,15 +9483,15 @@ void swap(TGetDelegationTokenResp &a, TGetDelegationTokenResp &b) { swap(a.__isset, b.__isset); } -TGetDelegationTokenResp::TGetDelegationTokenResp(const TGetDelegationTokenResp& other284) { - status = other284.status; - delegationToken = other284.delegationToken; - __isset = other284.__isset; +TGetDelegationTokenResp::TGetDelegationTokenResp(const TGetDelegationTokenResp& other292) { + status = other292.status; + delegationToken = other292.delegationToken; + __isset = other292.__isset; } -TGetDelegationTokenResp& TGetDelegationTokenResp::operator=(const TGetDelegationTokenResp& other285) { - status = other285.status; - delegationToken = other285.delegationToken; - __isset = other285.__isset; +TGetDelegationTokenResp& TGetDelegationTokenResp::operator=(const TGetDelegationTokenResp& other293) { + status = other293.status; + delegationToken = other293.delegationToken; + __isset = other293.__isset; return *this; } void TGetDelegationTokenResp::printTo(std::ostream& out) const { @@ -8996,13 +9594,13 @@ void swap(TCancelDelegationTokenReq &a, TCancelDelegationTokenReq &b) { swap(a.delegationToken, b.delegationToken); } -TCancelDelegationTokenReq::TCancelDelegationTokenReq(const TCancelDelegationTokenReq& other286) { - sessionHandle = other286.sessionHandle; - delegationToken = other286.delegationToken; +TCancelDelegationTokenReq::TCancelDelegationTokenReq(const TCancelDelegationTokenReq& other294) { + sessionHandle = other294.sessionHandle; + delegationToken = other294.delegationToken; } -TCancelDelegationTokenReq& TCancelDelegationTokenReq::operator=(const TCancelDelegationTokenReq& other287) { - sessionHandle = other287.sessionHandle; - delegationToken = other287.delegationToken; +TCancelDelegationTokenReq& TCancelDelegationTokenReq::operator=(const TCancelDelegationTokenReq& other295) { + sessionHandle = other295.sessionHandle; + delegationToken = other295.delegationToken; return *this; } void TCancelDelegationTokenReq::printTo(std::ostream& out) const { @@ -9085,11 +9683,11 @@ void swap(TCancelDelegationTokenResp &a, TCancelDelegationTokenResp &b) { swap(a.status, b.status); } -TCancelDelegationTokenResp::TCancelDelegationTokenResp(const TCancelDelegationTokenResp& other288) { - status = other288.status; +TCancelDelegationTokenResp::TCancelDelegationTokenResp(const TCancelDelegationTokenResp& other296) { + status = other296.status; } -TCancelDelegationTokenResp& TCancelDelegationTokenResp::operator=(const TCancelDelegationTokenResp& other289) { - status = other289.status; +TCancelDelegationTokenResp& TCancelDelegationTokenResp::operator=(const TCancelDelegationTokenResp& other297) { + status = other297.status; return *this; } void TCancelDelegationTokenResp::printTo(std::ostream& out) const { @@ -9191,13 +9789,13 @@ void swap(TRenewDelegationTokenReq &a, TRenewDelegationTokenReq &b) { swap(a.delegationToken, b.delegationToken); } -TRenewDelegationTokenReq::TRenewDelegationTokenReq(const TRenewDelegationTokenReq& other290) { - sessionHandle = other290.sessionHandle; - delegationToken = other290.delegationToken; +TRenewDelegationTokenReq::TRenewDelegationTokenReq(const TRenewDelegationTokenReq& other298) { + sessionHandle = other298.sessionHandle; + delegationToken = other298.delegationToken; } -TRenewDelegationTokenReq& TRenewDelegationTokenReq::operator=(const TRenewDelegationTokenReq& other291) { - sessionHandle = other291.sessionHandle; - delegationToken = other291.delegationToken; +TRenewDelegationTokenReq& TRenewDelegationTokenReq::operator=(const TRenewDelegationTokenReq& other299) { + sessionHandle = other299.sessionHandle; + delegationToken = other299.delegationToken; return *this; } void TRenewDelegationTokenReq::printTo(std::ostream& out) const { @@ -9280,11 +9878,11 @@ void swap(TRenewDelegationTokenResp &a, TRenewDelegationTokenResp &b) { swap(a.status, b.status); } -TRenewDelegationTokenResp::TRenewDelegationTokenResp(const TRenewDelegationTokenResp& other292) { - status = other292.status; +TRenewDelegationTokenResp::TRenewDelegationTokenResp(const TRenewDelegationTokenResp& other300) { + status = other300.status; } -TRenewDelegationTokenResp& TRenewDelegationTokenResp::operator=(const TRenewDelegationTokenResp& other293) { - status = other293.status; +TRenewDelegationTokenResp& TRenewDelegationTokenResp::operator=(const TRenewDelegationTokenResp& other301) { + status = other301.status; return *this; } void TRenewDelegationTokenResp::printTo(std::ostream& out) const { diff --git a/service-rpc/src/gen/thrift/gen-cpp/TCLIService_types.h b/service-rpc/src/gen/thrift/gen-cpp/TCLIService_types.h index 7f1d9dd..fcc5e3e 100644 --- a/service-rpc/src/gen/thrift/gen-cpp/TCLIService_types.h +++ b/service-rpc/src/gen/thrift/gen-cpp/TCLIService_types.h @@ -297,6 +297,14 @@ class TGetFunctionsReq; class TGetFunctionsResp; +class TGetPrimaryKeysReq; + +class TGetPrimaryKeysResp; + +class TGetCrossReferenceReq; + +class TGetCrossReferenceResp; + class TGetOperationStatusReq; class TGetOperationStatusResp; @@ -3367,6 +3375,274 @@ inline std::ostream& operator<<(std::ostream& out, const TGetFunctionsResp& obj) return out; } +typedef struct _TGetPrimaryKeysReq__isset { + _TGetPrimaryKeysReq__isset() : catalogName(false), schemaName(false), tableName(false) {} + bool catalogName :1; + bool schemaName :1; + bool tableName :1; +} _TGetPrimaryKeysReq__isset; + +class TGetPrimaryKeysReq { + public: + + TGetPrimaryKeysReq(const TGetPrimaryKeysReq&); + TGetPrimaryKeysReq& operator=(const TGetPrimaryKeysReq&); + TGetPrimaryKeysReq() : catalogName(), schemaName(), tableName() { + } + + virtual ~TGetPrimaryKeysReq() throw(); + TSessionHandle sessionHandle; + TIdentifier catalogName; + TIdentifier schemaName; + TIdentifier tableName; + + _TGetPrimaryKeysReq__isset __isset; + + void __set_sessionHandle(const TSessionHandle& val); + + void __set_catalogName(const TIdentifier& val); + + void __set_schemaName(const TIdentifier& val); + + void __set_tableName(const TIdentifier& val); + + bool operator == (const TGetPrimaryKeysReq & rhs) const + { + if (!(sessionHandle == rhs.sessionHandle)) + return false; + if (__isset.catalogName != rhs.__isset.catalogName) + return false; + else if (__isset.catalogName && !(catalogName == rhs.catalogName)) + return false; + if (__isset.schemaName != rhs.__isset.schemaName) + return false; + else if (__isset.schemaName && !(schemaName == rhs.schemaName)) + return false; + if (__isset.tableName != rhs.__isset.tableName) + return false; + else if (__isset.tableName && !(tableName == rhs.tableName)) + return false; + return true; + } + bool operator != (const TGetPrimaryKeysReq &rhs) const { + return !(*this == rhs); + } + + bool operator < (const TGetPrimaryKeysReq & ) const; + + uint32_t read(::apache::thrift::protocol::TProtocol* iprot); + uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const; + + virtual void printTo(std::ostream& out) const; +}; + +void swap(TGetPrimaryKeysReq &a, TGetPrimaryKeysReq &b); + +inline std::ostream& operator<<(std::ostream& out, const TGetPrimaryKeysReq& obj) +{ + obj.printTo(out); + return out; +} + +typedef struct _TGetPrimaryKeysResp__isset { + _TGetPrimaryKeysResp__isset() : operationHandle(false) {} + bool operationHandle :1; +} _TGetPrimaryKeysResp__isset; + +class TGetPrimaryKeysResp { + public: + + TGetPrimaryKeysResp(const TGetPrimaryKeysResp&); + TGetPrimaryKeysResp& operator=(const TGetPrimaryKeysResp&); + TGetPrimaryKeysResp() { + } + + virtual ~TGetPrimaryKeysResp() throw(); + TStatus status; + TOperationHandle operationHandle; + + _TGetPrimaryKeysResp__isset __isset; + + void __set_status(const TStatus& val); + + void __set_operationHandle(const TOperationHandle& val); + + bool operator == (const TGetPrimaryKeysResp & rhs) const + { + if (!(status == rhs.status)) + return false; + if (__isset.operationHandle != rhs.__isset.operationHandle) + return false; + else if (__isset.operationHandle && !(operationHandle == rhs.operationHandle)) + return false; + return true; + } + bool operator != (const TGetPrimaryKeysResp &rhs) const { + return !(*this == rhs); + } + + bool operator < (const TGetPrimaryKeysResp & ) const; + + uint32_t read(::apache::thrift::protocol::TProtocol* iprot); + uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const; + + virtual void printTo(std::ostream& out) const; +}; + +void swap(TGetPrimaryKeysResp &a, TGetPrimaryKeysResp &b); + +inline std::ostream& operator<<(std::ostream& out, const TGetPrimaryKeysResp& obj) +{ + obj.printTo(out); + return out; +} + +typedef struct _TGetCrossReferenceReq__isset { + _TGetCrossReferenceReq__isset() : parentCatalogName(false), parentSchemaName(false), parentTableName(false), foreignCatalogName(false), foreignSchemaName(false), foreignTableName(false) {} + bool parentCatalogName :1; + bool parentSchemaName :1; + bool parentTableName :1; + bool foreignCatalogName :1; + bool foreignSchemaName :1; + bool foreignTableName :1; +} _TGetCrossReferenceReq__isset; + +class TGetCrossReferenceReq { + public: + + TGetCrossReferenceReq(const TGetCrossReferenceReq&); + TGetCrossReferenceReq& operator=(const TGetCrossReferenceReq&); + TGetCrossReferenceReq() : parentCatalogName(), parentSchemaName(), parentTableName(), foreignCatalogName(), foreignSchemaName(), foreignTableName() { + } + + virtual ~TGetCrossReferenceReq() throw(); + TSessionHandle sessionHandle; + TIdentifier parentCatalogName; + TIdentifier parentSchemaName; + TIdentifier parentTableName; + TIdentifier foreignCatalogName; + TIdentifier foreignSchemaName; + TIdentifier foreignTableName; + + _TGetCrossReferenceReq__isset __isset; + + void __set_sessionHandle(const TSessionHandle& val); + + void __set_parentCatalogName(const TIdentifier& val); + + void __set_parentSchemaName(const TIdentifier& val); + + void __set_parentTableName(const TIdentifier& val); + + void __set_foreignCatalogName(const TIdentifier& val); + + void __set_foreignSchemaName(const TIdentifier& val); + + void __set_foreignTableName(const TIdentifier& val); + + bool operator == (const TGetCrossReferenceReq & rhs) const + { + if (!(sessionHandle == rhs.sessionHandle)) + return false; + if (__isset.parentCatalogName != rhs.__isset.parentCatalogName) + return false; + else if (__isset.parentCatalogName && !(parentCatalogName == rhs.parentCatalogName)) + return false; + if (__isset.parentSchemaName != rhs.__isset.parentSchemaName) + return false; + else if (__isset.parentSchemaName && !(parentSchemaName == rhs.parentSchemaName)) + return false; + if (__isset.parentTableName != rhs.__isset.parentTableName) + return false; + else if (__isset.parentTableName && !(parentTableName == rhs.parentTableName)) + return false; + if (__isset.foreignCatalogName != rhs.__isset.foreignCatalogName) + return false; + else if (__isset.foreignCatalogName && !(foreignCatalogName == rhs.foreignCatalogName)) + return false; + if (__isset.foreignSchemaName != rhs.__isset.foreignSchemaName) + return false; + else if (__isset.foreignSchemaName && !(foreignSchemaName == rhs.foreignSchemaName)) + return false; + if (__isset.foreignTableName != rhs.__isset.foreignTableName) + return false; + else if (__isset.foreignTableName && !(foreignTableName == rhs.foreignTableName)) + return false; + return true; + } + bool operator != (const TGetCrossReferenceReq &rhs) const { + return !(*this == rhs); + } + + bool operator < (const TGetCrossReferenceReq & ) const; + + uint32_t read(::apache::thrift::protocol::TProtocol* iprot); + uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const; + + virtual void printTo(std::ostream& out) const; +}; + +void swap(TGetCrossReferenceReq &a, TGetCrossReferenceReq &b); + +inline std::ostream& operator<<(std::ostream& out, const TGetCrossReferenceReq& obj) +{ + obj.printTo(out); + return out; +} + +typedef struct _TGetCrossReferenceResp__isset { + _TGetCrossReferenceResp__isset() : operationHandle(false) {} + bool operationHandle :1; +} _TGetCrossReferenceResp__isset; + +class TGetCrossReferenceResp { + public: + + TGetCrossReferenceResp(const TGetCrossReferenceResp&); + TGetCrossReferenceResp& operator=(const TGetCrossReferenceResp&); + TGetCrossReferenceResp() { + } + + virtual ~TGetCrossReferenceResp() throw(); + TStatus status; + TOperationHandle operationHandle; + + _TGetCrossReferenceResp__isset __isset; + + void __set_status(const TStatus& val); + + void __set_operationHandle(const TOperationHandle& val); + + bool operator == (const TGetCrossReferenceResp & rhs) const + { + if (!(status == rhs.status)) + return false; + if (__isset.operationHandle != rhs.__isset.operationHandle) + return false; + else if (__isset.operationHandle && !(operationHandle == rhs.operationHandle)) + return false; + return true; + } + bool operator != (const TGetCrossReferenceResp &rhs) const { + return !(*this == rhs); + } + + bool operator < (const TGetCrossReferenceResp & ) const; + + uint32_t read(::apache::thrift::protocol::TProtocol* iprot); + uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const; + + virtual void printTo(std::ostream& out) const; +}; + +void swap(TGetCrossReferenceResp &a, TGetCrossReferenceResp &b); + +inline std::ostream& operator<<(std::ostream& out, const TGetCrossReferenceResp& obj) +{ + obj.printTo(out); + return out; +} + class TGetOperationStatusReq { public: diff --git a/service-rpc/src/gen/thrift/gen-javabean/org/apache/hive/service/rpc/thrift/TCLIService.java b/service-rpc/src/gen/thrift/gen-javabean/org/apache/hive/service/rpc/thrift/TCLIService.java index c684f89..6dba051 100644 --- a/service-rpc/src/gen/thrift/gen-javabean/org/apache/hive/service/rpc/thrift/TCLIService.java +++ b/service-rpc/src/gen/thrift/gen-javabean/org/apache/hive/service/rpc/thrift/TCLIService.java @@ -61,6 +61,10 @@ public TGetFunctionsResp GetFunctions(TGetFunctionsReq req) throws org.apache.thrift.TException; + public TGetPrimaryKeysResp GetPrimaryKeys(TGetPrimaryKeysReq req) throws org.apache.thrift.TException; + + public TGetCrossReferenceResp GetCrossReference(TGetCrossReferenceReq req) throws org.apache.thrift.TException; + public TGetOperationStatusResp GetOperationStatus(TGetOperationStatusReq req) throws org.apache.thrift.TException; public TCancelOperationResp CancelOperation(TCancelOperationReq req) throws org.apache.thrift.TException; @@ -103,6 +107,10 @@ public void GetFunctions(TGetFunctionsReq req, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException; + public void GetPrimaryKeys(TGetPrimaryKeysReq req, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException; + + public void GetCrossReference(TGetCrossReferenceReq req, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException; + public void GetOperationStatus(TGetOperationStatusReq req, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException; public void CancelOperation(TCancelOperationReq req, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException; @@ -394,6 +402,52 @@ public TGetFunctionsResp recv_GetFunctions() throws org.apache.thrift.TException throw new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.MISSING_RESULT, "GetFunctions failed: unknown result"); } + public TGetPrimaryKeysResp GetPrimaryKeys(TGetPrimaryKeysReq req) throws org.apache.thrift.TException + { + send_GetPrimaryKeys(req); + return recv_GetPrimaryKeys(); + } + + public void send_GetPrimaryKeys(TGetPrimaryKeysReq req) throws org.apache.thrift.TException + { + GetPrimaryKeys_args args = new GetPrimaryKeys_args(); + args.setReq(req); + sendBase("GetPrimaryKeys", args); + } + + public TGetPrimaryKeysResp recv_GetPrimaryKeys() throws org.apache.thrift.TException + { + GetPrimaryKeys_result result = new GetPrimaryKeys_result(); + receiveBase(result, "GetPrimaryKeys"); + if (result.isSetSuccess()) { + return result.success; + } + throw new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.MISSING_RESULT, "GetPrimaryKeys failed: unknown result"); + } + + public TGetCrossReferenceResp GetCrossReference(TGetCrossReferenceReq req) throws org.apache.thrift.TException + { + send_GetCrossReference(req); + return recv_GetCrossReference(); + } + + public void send_GetCrossReference(TGetCrossReferenceReq req) throws org.apache.thrift.TException + { + GetCrossReference_args args = new GetCrossReference_args(); + args.setReq(req); + sendBase("GetCrossReference", args); + } + + public TGetCrossReferenceResp recv_GetCrossReference() throws org.apache.thrift.TException + { + GetCrossReference_result result = new GetCrossReference_result(); + receiveBase(result, "GetCrossReference"); + if (result.isSetSuccess()) { + return result.success; + } + throw new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.MISSING_RESULT, "GetCrossReference failed: unknown result"); + } + public TGetOperationStatusResp GetOperationStatus(TGetOperationStatusReq req) throws org.apache.thrift.TException { send_GetOperationStatus(req); @@ -948,6 +1002,70 @@ public TGetFunctionsResp getResult() throws org.apache.thrift.TException { } } + public void GetPrimaryKeys(TGetPrimaryKeysReq req, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException { + checkReady(); + GetPrimaryKeys_call method_call = new GetPrimaryKeys_call(req, resultHandler, this, ___protocolFactory, ___transport); + this.___currentMethod = method_call; + ___manager.call(method_call); + } + + public static class GetPrimaryKeys_call extends org.apache.thrift.async.TAsyncMethodCall { + private TGetPrimaryKeysReq req; + public GetPrimaryKeys_call(TGetPrimaryKeysReq req, org.apache.thrift.async.AsyncMethodCallback resultHandler, org.apache.thrift.async.TAsyncClient client, org.apache.thrift.protocol.TProtocolFactory protocolFactory, org.apache.thrift.transport.TNonblockingTransport transport) throws org.apache.thrift.TException { + super(client, protocolFactory, transport, resultHandler, false); + this.req = req; + } + + public void write_args(org.apache.thrift.protocol.TProtocol prot) throws org.apache.thrift.TException { + prot.writeMessageBegin(new org.apache.thrift.protocol.TMessage("GetPrimaryKeys", org.apache.thrift.protocol.TMessageType.CALL, 0)); + GetPrimaryKeys_args args = new GetPrimaryKeys_args(); + args.setReq(req); + args.write(prot); + prot.writeMessageEnd(); + } + + public TGetPrimaryKeysResp getResult() throws org.apache.thrift.TException { + if (getState() != org.apache.thrift.async.TAsyncMethodCall.State.RESPONSE_READ) { + throw new IllegalStateException("Method call not finished!"); + } + org.apache.thrift.transport.TMemoryInputTransport memoryTransport = new org.apache.thrift.transport.TMemoryInputTransport(getFrameBuffer().array()); + org.apache.thrift.protocol.TProtocol prot = client.getProtocolFactory().getProtocol(memoryTransport); + return (new Client(prot)).recv_GetPrimaryKeys(); + } + } + + public void GetCrossReference(TGetCrossReferenceReq req, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException { + checkReady(); + GetCrossReference_call method_call = new GetCrossReference_call(req, resultHandler, this, ___protocolFactory, ___transport); + this.___currentMethod = method_call; + ___manager.call(method_call); + } + + public static class GetCrossReference_call extends org.apache.thrift.async.TAsyncMethodCall { + private TGetCrossReferenceReq req; + public GetCrossReference_call(TGetCrossReferenceReq req, org.apache.thrift.async.AsyncMethodCallback resultHandler, org.apache.thrift.async.TAsyncClient client, org.apache.thrift.protocol.TProtocolFactory protocolFactory, org.apache.thrift.transport.TNonblockingTransport transport) throws org.apache.thrift.TException { + super(client, protocolFactory, transport, resultHandler, false); + this.req = req; + } + + public void write_args(org.apache.thrift.protocol.TProtocol prot) throws org.apache.thrift.TException { + prot.writeMessageBegin(new org.apache.thrift.protocol.TMessage("GetCrossReference", org.apache.thrift.protocol.TMessageType.CALL, 0)); + GetCrossReference_args args = new GetCrossReference_args(); + args.setReq(req); + args.write(prot); + prot.writeMessageEnd(); + } + + public TGetCrossReferenceResp getResult() throws org.apache.thrift.TException { + if (getState() != org.apache.thrift.async.TAsyncMethodCall.State.RESPONSE_READ) { + throw new IllegalStateException("Method call not finished!"); + } + org.apache.thrift.transport.TMemoryInputTransport memoryTransport = new org.apache.thrift.transport.TMemoryInputTransport(getFrameBuffer().array()); + org.apache.thrift.protocol.TProtocol prot = client.getProtocolFactory().getProtocol(memoryTransport); + return (new Client(prot)).recv_GetCrossReference(); + } + } + public void GetOperationStatus(TGetOperationStatusReq req, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException { checkReady(); GetOperationStatus_call method_call = new GetOperationStatus_call(req, resultHandler, this, ___protocolFactory, ___transport); @@ -1228,6 +1346,8 @@ protected Processor(I iface, Map extends org.apache.thrift.ProcessFunction { + public GetPrimaryKeys() { + super("GetPrimaryKeys"); + } + + public GetPrimaryKeys_args getEmptyArgsInstance() { + return new GetPrimaryKeys_args(); + } + + protected boolean isOneway() { + return false; + } + + public GetPrimaryKeys_result getResult(I iface, GetPrimaryKeys_args args) throws org.apache.thrift.TException { + GetPrimaryKeys_result result = new GetPrimaryKeys_result(); + result.success = iface.GetPrimaryKeys(args.req); + return result; + } + } + + public static class GetCrossReference extends org.apache.thrift.ProcessFunction { + public GetCrossReference() { + super("GetCrossReference"); + } + + public GetCrossReference_args getEmptyArgsInstance() { + return new GetCrossReference_args(); + } + + protected boolean isOneway() { + return false; + } + + public GetCrossReference_result getResult(I iface, GetCrossReference_args args) throws org.apache.thrift.TException { + GetCrossReference_result result = new GetCrossReference_result(); + result.success = iface.GetCrossReference(args.req); + return result; + } + } + public static class GetOperationStatus extends org.apache.thrift.ProcessFunction { public GetOperationStatus() { super("GetOperationStatus"); @@ -1643,6 +1803,8 @@ protected AsyncProcessor(I iface, Map extends org.apache.thrift.AsyncProcessFunction { + public GetPrimaryKeys() { + super("GetPrimaryKeys"); + } + + public GetPrimaryKeys_args getEmptyArgsInstance() { + return new GetPrimaryKeys_args(); + } + + public AsyncMethodCallback getResultHandler(final AsyncFrameBuffer fb, final int seqid) { + final org.apache.thrift.AsyncProcessFunction fcall = this; + return new AsyncMethodCallback() { + public void onComplete(TGetPrimaryKeysResp o) { + GetPrimaryKeys_result result = new GetPrimaryKeys_result(); + result.success = o; + try { + fcall.sendResponse(fb,result, org.apache.thrift.protocol.TMessageType.REPLY,seqid); + return; + } catch (Exception e) { + LOGGER.error("Exception writing to internal frame buffer", e); + } + fb.close(); + } + public void onError(Exception e) { + byte msgType = org.apache.thrift.protocol.TMessageType.REPLY; + org.apache.thrift.TBase msg; + GetPrimaryKeys_result result = new GetPrimaryKeys_result(); + { + msgType = org.apache.thrift.protocol.TMessageType.EXCEPTION; + msg = (org.apache.thrift.TBase)new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.INTERNAL_ERROR, e.getMessage()); + } + try { + fcall.sendResponse(fb,msg,msgType,seqid); + return; + } catch (Exception ex) { + LOGGER.error("Exception writing to internal frame buffer", ex); + } + fb.close(); + } + }; + } + + protected boolean isOneway() { + return false; + } + + public void start(I iface, GetPrimaryKeys_args args, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws TException { + iface.GetPrimaryKeys(args.req,resultHandler); + } + } + + public static class GetCrossReference extends org.apache.thrift.AsyncProcessFunction { + public GetCrossReference() { + super("GetCrossReference"); + } + + public GetCrossReference_args getEmptyArgsInstance() { + return new GetCrossReference_args(); + } + + public AsyncMethodCallback getResultHandler(final AsyncFrameBuffer fb, final int seqid) { + final org.apache.thrift.AsyncProcessFunction fcall = this; + return new AsyncMethodCallback() { + public void onComplete(TGetCrossReferenceResp o) { + GetCrossReference_result result = new GetCrossReference_result(); + result.success = o; + try { + fcall.sendResponse(fb,result, org.apache.thrift.protocol.TMessageType.REPLY,seqid); + return; + } catch (Exception e) { + LOGGER.error("Exception writing to internal frame buffer", e); + } + fb.close(); + } + public void onError(Exception e) { + byte msgType = org.apache.thrift.protocol.TMessageType.REPLY; + org.apache.thrift.TBase msg; + GetCrossReference_result result = new GetCrossReference_result(); + { + msgType = org.apache.thrift.protocol.TMessageType.EXCEPTION; + msg = (org.apache.thrift.TBase)new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.INTERNAL_ERROR, e.getMessage()); + } + try { + fcall.sendResponse(fb,msg,msgType,seqid); + return; + } catch (Exception ex) { + LOGGER.error("Exception writing to internal frame buffer", ex); + } + fb.close(); + } + }; + } + + protected boolean isOneway() { + return false; + } + + public void start(I iface, GetCrossReference_args args, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws TException { + iface.GetCrossReference(args.req,resultHandler); + } + } + public static class GetOperationStatus extends org.apache.thrift.AsyncProcessFunction { public GetOperationStatus() { super("GetOperationStatus"); @@ -10611,6 +10875,1458 @@ public void read(org.apache.thrift.protocol.TProtocol prot, GetFunctions_result } + public static class GetPrimaryKeys_args implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { + private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("GetPrimaryKeys_args"); + + private static final org.apache.thrift.protocol.TField REQ_FIELD_DESC = new org.apache.thrift.protocol.TField("req", org.apache.thrift.protocol.TType.STRUCT, (short)1); + + private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); + static { + schemes.put(StandardScheme.class, new GetPrimaryKeys_argsStandardSchemeFactory()); + schemes.put(TupleScheme.class, new GetPrimaryKeys_argsTupleSchemeFactory()); + } + + private TGetPrimaryKeysReq req; // required + + /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ + public enum _Fields implements org.apache.thrift.TFieldIdEnum { + REQ((short)1, "req"); + + private static final Map byName = new HashMap(); + + static { + for (_Fields field : EnumSet.allOf(_Fields.class)) { + byName.put(field.getFieldName(), field); + } + } + + /** + * Find the _Fields constant that matches fieldId, or null if its not found. + */ + public static _Fields findByThriftId(int fieldId) { + switch(fieldId) { + case 1: // REQ + return REQ; + default: + return null; + } + } + + /** + * Find the _Fields constant that matches fieldId, throwing an exception + * if it is not found. + */ + public static _Fields findByThriftIdOrThrow(int fieldId) { + _Fields fields = findByThriftId(fieldId); + if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!"); + return fields; + } + + /** + * Find the _Fields constant that matches name, or null if its not found. + */ + public static _Fields findByName(String name) { + return byName.get(name); + } + + private final short _thriftId; + private final String _fieldName; + + _Fields(short thriftId, String fieldName) { + _thriftId = thriftId; + _fieldName = fieldName; + } + + public short getThriftFieldId() { + return _thriftId; + } + + public String getFieldName() { + return _fieldName; + } + } + + // isset id assignments + public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; + static { + Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); + tmpMap.put(_Fields.REQ, new org.apache.thrift.meta_data.FieldMetaData("req", org.apache.thrift.TFieldRequirementType.DEFAULT, + new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, TGetPrimaryKeysReq.class))); + metaDataMap = Collections.unmodifiableMap(tmpMap); + org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(GetPrimaryKeys_args.class, metaDataMap); + } + + public GetPrimaryKeys_args() { + } + + public GetPrimaryKeys_args( + TGetPrimaryKeysReq req) + { + this(); + this.req = req; + } + + /** + * Performs a deep copy on other. + */ + public GetPrimaryKeys_args(GetPrimaryKeys_args other) { + if (other.isSetReq()) { + this.req = new TGetPrimaryKeysReq(other.req); + } + } + + public GetPrimaryKeys_args deepCopy() { + return new GetPrimaryKeys_args(this); + } + + @Override + public void clear() { + this.req = null; + } + + public TGetPrimaryKeysReq getReq() { + return this.req; + } + + public void setReq(TGetPrimaryKeysReq req) { + this.req = req; + } + + public void unsetReq() { + this.req = null; + } + + /** Returns true if field req is set (has been assigned a value) and false otherwise */ + public boolean isSetReq() { + return this.req != null; + } + + public void setReqIsSet(boolean value) { + if (!value) { + this.req = null; + } + } + + public void setFieldValue(_Fields field, Object value) { + switch (field) { + case REQ: + if (value == null) { + unsetReq(); + } else { + setReq((TGetPrimaryKeysReq)value); + } + break; + + } + } + + public Object getFieldValue(_Fields field) { + switch (field) { + case REQ: + return getReq(); + + } + throw new IllegalStateException(); + } + + /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */ + public boolean isSet(_Fields field) { + if (field == null) { + throw new IllegalArgumentException(); + } + + switch (field) { + case REQ: + return isSetReq(); + } + throw new IllegalStateException(); + } + + @Override + public boolean equals(Object that) { + if (that == null) + return false; + if (that instanceof GetPrimaryKeys_args) + return this.equals((GetPrimaryKeys_args)that); + return false; + } + + public boolean equals(GetPrimaryKeys_args that) { + if (that == null) + return false; + + boolean this_present_req = true && this.isSetReq(); + boolean that_present_req = true && that.isSetReq(); + if (this_present_req || that_present_req) { + if (!(this_present_req && that_present_req)) + return false; + if (!this.req.equals(that.req)) + return false; + } + + return true; + } + + @Override + public int hashCode() { + List list = new ArrayList(); + + boolean present_req = true && (isSetReq()); + list.add(present_req); + if (present_req) + list.add(req); + + return list.hashCode(); + } + + @Override + public int compareTo(GetPrimaryKeys_args other) { + if (!getClass().equals(other.getClass())) { + return getClass().getName().compareTo(other.getClass().getName()); + } + + int lastComparison = 0; + + lastComparison = Boolean.valueOf(isSetReq()).compareTo(other.isSetReq()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetReq()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.req, other.req); + if (lastComparison != 0) { + return lastComparison; + } + } + return 0; + } + + public _Fields fieldForId(int fieldId) { + return _Fields.findByThriftId(fieldId); + } + + public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException { + schemes.get(iprot.getScheme()).getScheme().read(iprot, this); + } + + public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException { + schemes.get(oprot.getScheme()).getScheme().write(oprot, this); + } + + @Override + public String toString() { + StringBuilder sb = new StringBuilder("GetPrimaryKeys_args("); + boolean first = true; + + sb.append("req:"); + if (this.req == null) { + sb.append("null"); + } else { + sb.append(this.req); + } + first = false; + sb.append(")"); + return sb.toString(); + } + + public void validate() throws org.apache.thrift.TException { + // check for required fields + // check for sub-struct validity + if (req != null) { + req.validate(); + } + } + + private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { + try { + write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out))); + } catch (org.apache.thrift.TException te) { + throw new java.io.IOException(te); + } + } + + private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { + try { + read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); + } catch (org.apache.thrift.TException te) { + throw new java.io.IOException(te); + } + } + + private static class GetPrimaryKeys_argsStandardSchemeFactory implements SchemeFactory { + public GetPrimaryKeys_argsStandardScheme getScheme() { + return new GetPrimaryKeys_argsStandardScheme(); + } + } + + private static class GetPrimaryKeys_argsStandardScheme extends StandardScheme { + + public void read(org.apache.thrift.protocol.TProtocol iprot, GetPrimaryKeys_args struct) throws org.apache.thrift.TException { + org.apache.thrift.protocol.TField schemeField; + iprot.readStructBegin(); + while (true) + { + schemeField = iprot.readFieldBegin(); + if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { + break; + } + switch (schemeField.id) { + case 1: // REQ + if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { + struct.req = new TGetPrimaryKeysReq(); + struct.req.read(iprot); + struct.setReqIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; + default: + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + iprot.readFieldEnd(); + } + iprot.readStructEnd(); + struct.validate(); + } + + public void write(org.apache.thrift.protocol.TProtocol oprot, GetPrimaryKeys_args struct) throws org.apache.thrift.TException { + struct.validate(); + + oprot.writeStructBegin(STRUCT_DESC); + if (struct.req != null) { + oprot.writeFieldBegin(REQ_FIELD_DESC); + struct.req.write(oprot); + oprot.writeFieldEnd(); + } + oprot.writeFieldStop(); + oprot.writeStructEnd(); + } + + } + + private static class GetPrimaryKeys_argsTupleSchemeFactory implements SchemeFactory { + public GetPrimaryKeys_argsTupleScheme getScheme() { + return new GetPrimaryKeys_argsTupleScheme(); + } + } + + private static class GetPrimaryKeys_argsTupleScheme extends TupleScheme { + + @Override + public void write(org.apache.thrift.protocol.TProtocol prot, GetPrimaryKeys_args struct) throws org.apache.thrift.TException { + TTupleProtocol oprot = (TTupleProtocol) prot; + BitSet optionals = new BitSet(); + if (struct.isSetReq()) { + optionals.set(0); + } + oprot.writeBitSet(optionals, 1); + if (struct.isSetReq()) { + struct.req.write(oprot); + } + } + + @Override + public void read(org.apache.thrift.protocol.TProtocol prot, GetPrimaryKeys_args struct) throws org.apache.thrift.TException { + TTupleProtocol iprot = (TTupleProtocol) prot; + BitSet incoming = iprot.readBitSet(1); + if (incoming.get(0)) { + struct.req = new TGetPrimaryKeysReq(); + struct.req.read(iprot); + struct.setReqIsSet(true); + } + } + } + + } + + public static class GetPrimaryKeys_result implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { + private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("GetPrimaryKeys_result"); + + private static final org.apache.thrift.protocol.TField SUCCESS_FIELD_DESC = new org.apache.thrift.protocol.TField("success", org.apache.thrift.protocol.TType.STRUCT, (short)0); + + private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); + static { + schemes.put(StandardScheme.class, new GetPrimaryKeys_resultStandardSchemeFactory()); + schemes.put(TupleScheme.class, new GetPrimaryKeys_resultTupleSchemeFactory()); + } + + private TGetPrimaryKeysResp success; // required + + /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ + public enum _Fields implements org.apache.thrift.TFieldIdEnum { + SUCCESS((short)0, "success"); + + private static final Map byName = new HashMap(); + + static { + for (_Fields field : EnumSet.allOf(_Fields.class)) { + byName.put(field.getFieldName(), field); + } + } + + /** + * Find the _Fields constant that matches fieldId, or null if its not found. + */ + public static _Fields findByThriftId(int fieldId) { + switch(fieldId) { + case 0: // SUCCESS + return SUCCESS; + default: + return null; + } + } + + /** + * Find the _Fields constant that matches fieldId, throwing an exception + * if it is not found. + */ + public static _Fields findByThriftIdOrThrow(int fieldId) { + _Fields fields = findByThriftId(fieldId); + if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!"); + return fields; + } + + /** + * Find the _Fields constant that matches name, or null if its not found. + */ + public static _Fields findByName(String name) { + return byName.get(name); + } + + private final short _thriftId; + private final String _fieldName; + + _Fields(short thriftId, String fieldName) { + _thriftId = thriftId; + _fieldName = fieldName; + } + + public short getThriftFieldId() { + return _thriftId; + } + + public String getFieldName() { + return _fieldName; + } + } + + // isset id assignments + public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; + static { + Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); + tmpMap.put(_Fields.SUCCESS, new org.apache.thrift.meta_data.FieldMetaData("success", org.apache.thrift.TFieldRequirementType.DEFAULT, + new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, TGetPrimaryKeysResp.class))); + metaDataMap = Collections.unmodifiableMap(tmpMap); + org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(GetPrimaryKeys_result.class, metaDataMap); + } + + public GetPrimaryKeys_result() { + } + + public GetPrimaryKeys_result( + TGetPrimaryKeysResp success) + { + this(); + this.success = success; + } + + /** + * Performs a deep copy on other. + */ + public GetPrimaryKeys_result(GetPrimaryKeys_result other) { + if (other.isSetSuccess()) { + this.success = new TGetPrimaryKeysResp(other.success); + } + } + + public GetPrimaryKeys_result deepCopy() { + return new GetPrimaryKeys_result(this); + } + + @Override + public void clear() { + this.success = null; + } + + public TGetPrimaryKeysResp getSuccess() { + return this.success; + } + + public void setSuccess(TGetPrimaryKeysResp success) { + this.success = success; + } + + public void unsetSuccess() { + this.success = null; + } + + /** Returns true if field success is set (has been assigned a value) and false otherwise */ + public boolean isSetSuccess() { + return this.success != null; + } + + public void setSuccessIsSet(boolean value) { + if (!value) { + this.success = null; + } + } + + public void setFieldValue(_Fields field, Object value) { + switch (field) { + case SUCCESS: + if (value == null) { + unsetSuccess(); + } else { + setSuccess((TGetPrimaryKeysResp)value); + } + break; + + } + } + + public Object getFieldValue(_Fields field) { + switch (field) { + case SUCCESS: + return getSuccess(); + + } + throw new IllegalStateException(); + } + + /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */ + public boolean isSet(_Fields field) { + if (field == null) { + throw new IllegalArgumentException(); + } + + switch (field) { + case SUCCESS: + return isSetSuccess(); + } + throw new IllegalStateException(); + } + + @Override + public boolean equals(Object that) { + if (that == null) + return false; + if (that instanceof GetPrimaryKeys_result) + return this.equals((GetPrimaryKeys_result)that); + return false; + } + + public boolean equals(GetPrimaryKeys_result that) { + if (that == null) + return false; + + boolean this_present_success = true && this.isSetSuccess(); + boolean that_present_success = true && that.isSetSuccess(); + if (this_present_success || that_present_success) { + if (!(this_present_success && that_present_success)) + return false; + if (!this.success.equals(that.success)) + return false; + } + + return true; + } + + @Override + public int hashCode() { + List list = new ArrayList(); + + boolean present_success = true && (isSetSuccess()); + list.add(present_success); + if (present_success) + list.add(success); + + return list.hashCode(); + } + + @Override + public int compareTo(GetPrimaryKeys_result other) { + if (!getClass().equals(other.getClass())) { + return getClass().getName().compareTo(other.getClass().getName()); + } + + int lastComparison = 0; + + lastComparison = Boolean.valueOf(isSetSuccess()).compareTo(other.isSetSuccess()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetSuccess()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.success, other.success); + if (lastComparison != 0) { + return lastComparison; + } + } + return 0; + } + + public _Fields fieldForId(int fieldId) { + return _Fields.findByThriftId(fieldId); + } + + public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException { + schemes.get(iprot.getScheme()).getScheme().read(iprot, this); + } + + public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException { + schemes.get(oprot.getScheme()).getScheme().write(oprot, this); + } + + @Override + public String toString() { + StringBuilder sb = new StringBuilder("GetPrimaryKeys_result("); + boolean first = true; + + sb.append("success:"); + if (this.success == null) { + sb.append("null"); + } else { + sb.append(this.success); + } + first = false; + sb.append(")"); + return sb.toString(); + } + + public void validate() throws org.apache.thrift.TException { + // check for required fields + // check for sub-struct validity + if (success != null) { + success.validate(); + } + } + + private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { + try { + write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out))); + } catch (org.apache.thrift.TException te) { + throw new java.io.IOException(te); + } + } + + private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { + try { + read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); + } catch (org.apache.thrift.TException te) { + throw new java.io.IOException(te); + } + } + + private static class GetPrimaryKeys_resultStandardSchemeFactory implements SchemeFactory { + public GetPrimaryKeys_resultStandardScheme getScheme() { + return new GetPrimaryKeys_resultStandardScheme(); + } + } + + private static class GetPrimaryKeys_resultStandardScheme extends StandardScheme { + + public void read(org.apache.thrift.protocol.TProtocol iprot, GetPrimaryKeys_result struct) throws org.apache.thrift.TException { + org.apache.thrift.protocol.TField schemeField; + iprot.readStructBegin(); + while (true) + { + schemeField = iprot.readFieldBegin(); + if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { + break; + } + switch (schemeField.id) { + case 0: // SUCCESS + if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { + struct.success = new TGetPrimaryKeysResp(); + struct.success.read(iprot); + struct.setSuccessIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; + default: + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + iprot.readFieldEnd(); + } + iprot.readStructEnd(); + struct.validate(); + } + + public void write(org.apache.thrift.protocol.TProtocol oprot, GetPrimaryKeys_result struct) throws org.apache.thrift.TException { + struct.validate(); + + oprot.writeStructBegin(STRUCT_DESC); + if (struct.success != null) { + oprot.writeFieldBegin(SUCCESS_FIELD_DESC); + struct.success.write(oprot); + oprot.writeFieldEnd(); + } + oprot.writeFieldStop(); + oprot.writeStructEnd(); + } + + } + + private static class GetPrimaryKeys_resultTupleSchemeFactory implements SchemeFactory { + public GetPrimaryKeys_resultTupleScheme getScheme() { + return new GetPrimaryKeys_resultTupleScheme(); + } + } + + private static class GetPrimaryKeys_resultTupleScheme extends TupleScheme { + + @Override + public void write(org.apache.thrift.protocol.TProtocol prot, GetPrimaryKeys_result struct) throws org.apache.thrift.TException { + TTupleProtocol oprot = (TTupleProtocol) prot; + BitSet optionals = new BitSet(); + if (struct.isSetSuccess()) { + optionals.set(0); + } + oprot.writeBitSet(optionals, 1); + if (struct.isSetSuccess()) { + struct.success.write(oprot); + } + } + + @Override + public void read(org.apache.thrift.protocol.TProtocol prot, GetPrimaryKeys_result struct) throws org.apache.thrift.TException { + TTupleProtocol iprot = (TTupleProtocol) prot; + BitSet incoming = iprot.readBitSet(1); + if (incoming.get(0)) { + struct.success = new TGetPrimaryKeysResp(); + struct.success.read(iprot); + struct.setSuccessIsSet(true); + } + } + } + + } + + public static class GetCrossReference_args implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { + private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("GetCrossReference_args"); + + private static final org.apache.thrift.protocol.TField REQ_FIELD_DESC = new org.apache.thrift.protocol.TField("req", org.apache.thrift.protocol.TType.STRUCT, (short)1); + + private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); + static { + schemes.put(StandardScheme.class, new GetCrossReference_argsStandardSchemeFactory()); + schemes.put(TupleScheme.class, new GetCrossReference_argsTupleSchemeFactory()); + } + + private TGetCrossReferenceReq req; // required + + /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ + public enum _Fields implements org.apache.thrift.TFieldIdEnum { + REQ((short)1, "req"); + + private static final Map byName = new HashMap(); + + static { + for (_Fields field : EnumSet.allOf(_Fields.class)) { + byName.put(field.getFieldName(), field); + } + } + + /** + * Find the _Fields constant that matches fieldId, or null if its not found. + */ + public static _Fields findByThriftId(int fieldId) { + switch(fieldId) { + case 1: // REQ + return REQ; + default: + return null; + } + } + + /** + * Find the _Fields constant that matches fieldId, throwing an exception + * if it is not found. + */ + public static _Fields findByThriftIdOrThrow(int fieldId) { + _Fields fields = findByThriftId(fieldId); + if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!"); + return fields; + } + + /** + * Find the _Fields constant that matches name, or null if its not found. + */ + public static _Fields findByName(String name) { + return byName.get(name); + } + + private final short _thriftId; + private final String _fieldName; + + _Fields(short thriftId, String fieldName) { + _thriftId = thriftId; + _fieldName = fieldName; + } + + public short getThriftFieldId() { + return _thriftId; + } + + public String getFieldName() { + return _fieldName; + } + } + + // isset id assignments + public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; + static { + Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); + tmpMap.put(_Fields.REQ, new org.apache.thrift.meta_data.FieldMetaData("req", org.apache.thrift.TFieldRequirementType.DEFAULT, + new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, TGetCrossReferenceReq.class))); + metaDataMap = Collections.unmodifiableMap(tmpMap); + org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(GetCrossReference_args.class, metaDataMap); + } + + public GetCrossReference_args() { + } + + public GetCrossReference_args( + TGetCrossReferenceReq req) + { + this(); + this.req = req; + } + + /** + * Performs a deep copy on other. + */ + public GetCrossReference_args(GetCrossReference_args other) { + if (other.isSetReq()) { + this.req = new TGetCrossReferenceReq(other.req); + } + } + + public GetCrossReference_args deepCopy() { + return new GetCrossReference_args(this); + } + + @Override + public void clear() { + this.req = null; + } + + public TGetCrossReferenceReq getReq() { + return this.req; + } + + public void setReq(TGetCrossReferenceReq req) { + this.req = req; + } + + public void unsetReq() { + this.req = null; + } + + /** Returns true if field req is set (has been assigned a value) and false otherwise */ + public boolean isSetReq() { + return this.req != null; + } + + public void setReqIsSet(boolean value) { + if (!value) { + this.req = null; + } + } + + public void setFieldValue(_Fields field, Object value) { + switch (field) { + case REQ: + if (value == null) { + unsetReq(); + } else { + setReq((TGetCrossReferenceReq)value); + } + break; + + } + } + + public Object getFieldValue(_Fields field) { + switch (field) { + case REQ: + return getReq(); + + } + throw new IllegalStateException(); + } + + /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */ + public boolean isSet(_Fields field) { + if (field == null) { + throw new IllegalArgumentException(); + } + + switch (field) { + case REQ: + return isSetReq(); + } + throw new IllegalStateException(); + } + + @Override + public boolean equals(Object that) { + if (that == null) + return false; + if (that instanceof GetCrossReference_args) + return this.equals((GetCrossReference_args)that); + return false; + } + + public boolean equals(GetCrossReference_args that) { + if (that == null) + return false; + + boolean this_present_req = true && this.isSetReq(); + boolean that_present_req = true && that.isSetReq(); + if (this_present_req || that_present_req) { + if (!(this_present_req && that_present_req)) + return false; + if (!this.req.equals(that.req)) + return false; + } + + return true; + } + + @Override + public int hashCode() { + List list = new ArrayList(); + + boolean present_req = true && (isSetReq()); + list.add(present_req); + if (present_req) + list.add(req); + + return list.hashCode(); + } + + @Override + public int compareTo(GetCrossReference_args other) { + if (!getClass().equals(other.getClass())) { + return getClass().getName().compareTo(other.getClass().getName()); + } + + int lastComparison = 0; + + lastComparison = Boolean.valueOf(isSetReq()).compareTo(other.isSetReq()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetReq()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.req, other.req); + if (lastComparison != 0) { + return lastComparison; + } + } + return 0; + } + + public _Fields fieldForId(int fieldId) { + return _Fields.findByThriftId(fieldId); + } + + public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException { + schemes.get(iprot.getScheme()).getScheme().read(iprot, this); + } + + public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException { + schemes.get(oprot.getScheme()).getScheme().write(oprot, this); + } + + @Override + public String toString() { + StringBuilder sb = new StringBuilder("GetCrossReference_args("); + boolean first = true; + + sb.append("req:"); + if (this.req == null) { + sb.append("null"); + } else { + sb.append(this.req); + } + first = false; + sb.append(")"); + return sb.toString(); + } + + public void validate() throws org.apache.thrift.TException { + // check for required fields + // check for sub-struct validity + if (req != null) { + req.validate(); + } + } + + private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { + try { + write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out))); + } catch (org.apache.thrift.TException te) { + throw new java.io.IOException(te); + } + } + + private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { + try { + read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); + } catch (org.apache.thrift.TException te) { + throw new java.io.IOException(te); + } + } + + private static class GetCrossReference_argsStandardSchemeFactory implements SchemeFactory { + public GetCrossReference_argsStandardScheme getScheme() { + return new GetCrossReference_argsStandardScheme(); + } + } + + private static class GetCrossReference_argsStandardScheme extends StandardScheme { + + public void read(org.apache.thrift.protocol.TProtocol iprot, GetCrossReference_args struct) throws org.apache.thrift.TException { + org.apache.thrift.protocol.TField schemeField; + iprot.readStructBegin(); + while (true) + { + schemeField = iprot.readFieldBegin(); + if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { + break; + } + switch (schemeField.id) { + case 1: // REQ + if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { + struct.req = new TGetCrossReferenceReq(); + struct.req.read(iprot); + struct.setReqIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; + default: + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + iprot.readFieldEnd(); + } + iprot.readStructEnd(); + struct.validate(); + } + + public void write(org.apache.thrift.protocol.TProtocol oprot, GetCrossReference_args struct) throws org.apache.thrift.TException { + struct.validate(); + + oprot.writeStructBegin(STRUCT_DESC); + if (struct.req != null) { + oprot.writeFieldBegin(REQ_FIELD_DESC); + struct.req.write(oprot); + oprot.writeFieldEnd(); + } + oprot.writeFieldStop(); + oprot.writeStructEnd(); + } + + } + + private static class GetCrossReference_argsTupleSchemeFactory implements SchemeFactory { + public GetCrossReference_argsTupleScheme getScheme() { + return new GetCrossReference_argsTupleScheme(); + } + } + + private static class GetCrossReference_argsTupleScheme extends TupleScheme { + + @Override + public void write(org.apache.thrift.protocol.TProtocol prot, GetCrossReference_args struct) throws org.apache.thrift.TException { + TTupleProtocol oprot = (TTupleProtocol) prot; + BitSet optionals = new BitSet(); + if (struct.isSetReq()) { + optionals.set(0); + } + oprot.writeBitSet(optionals, 1); + if (struct.isSetReq()) { + struct.req.write(oprot); + } + } + + @Override + public void read(org.apache.thrift.protocol.TProtocol prot, GetCrossReference_args struct) throws org.apache.thrift.TException { + TTupleProtocol iprot = (TTupleProtocol) prot; + BitSet incoming = iprot.readBitSet(1); + if (incoming.get(0)) { + struct.req = new TGetCrossReferenceReq(); + struct.req.read(iprot); + struct.setReqIsSet(true); + } + } + } + + } + + public static class GetCrossReference_result implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { + private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("GetCrossReference_result"); + + private static final org.apache.thrift.protocol.TField SUCCESS_FIELD_DESC = new org.apache.thrift.protocol.TField("success", org.apache.thrift.protocol.TType.STRUCT, (short)0); + + private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); + static { + schemes.put(StandardScheme.class, new GetCrossReference_resultStandardSchemeFactory()); + schemes.put(TupleScheme.class, new GetCrossReference_resultTupleSchemeFactory()); + } + + private TGetCrossReferenceResp success; // required + + /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ + public enum _Fields implements org.apache.thrift.TFieldIdEnum { + SUCCESS((short)0, "success"); + + private static final Map byName = new HashMap(); + + static { + for (_Fields field : EnumSet.allOf(_Fields.class)) { + byName.put(field.getFieldName(), field); + } + } + + /** + * Find the _Fields constant that matches fieldId, or null if its not found. + */ + public static _Fields findByThriftId(int fieldId) { + switch(fieldId) { + case 0: // SUCCESS + return SUCCESS; + default: + return null; + } + } + + /** + * Find the _Fields constant that matches fieldId, throwing an exception + * if it is not found. + */ + public static _Fields findByThriftIdOrThrow(int fieldId) { + _Fields fields = findByThriftId(fieldId); + if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!"); + return fields; + } + + /** + * Find the _Fields constant that matches name, or null if its not found. + */ + public static _Fields findByName(String name) { + return byName.get(name); + } + + private final short _thriftId; + private final String _fieldName; + + _Fields(short thriftId, String fieldName) { + _thriftId = thriftId; + _fieldName = fieldName; + } + + public short getThriftFieldId() { + return _thriftId; + } + + public String getFieldName() { + return _fieldName; + } + } + + // isset id assignments + public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; + static { + Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); + tmpMap.put(_Fields.SUCCESS, new org.apache.thrift.meta_data.FieldMetaData("success", org.apache.thrift.TFieldRequirementType.DEFAULT, + new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, TGetCrossReferenceResp.class))); + metaDataMap = Collections.unmodifiableMap(tmpMap); + org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(GetCrossReference_result.class, metaDataMap); + } + + public GetCrossReference_result() { + } + + public GetCrossReference_result( + TGetCrossReferenceResp success) + { + this(); + this.success = success; + } + + /** + * Performs a deep copy on other. + */ + public GetCrossReference_result(GetCrossReference_result other) { + if (other.isSetSuccess()) { + this.success = new TGetCrossReferenceResp(other.success); + } + } + + public GetCrossReference_result deepCopy() { + return new GetCrossReference_result(this); + } + + @Override + public void clear() { + this.success = null; + } + + public TGetCrossReferenceResp getSuccess() { + return this.success; + } + + public void setSuccess(TGetCrossReferenceResp success) { + this.success = success; + } + + public void unsetSuccess() { + this.success = null; + } + + /** Returns true if field success is set (has been assigned a value) and false otherwise */ + public boolean isSetSuccess() { + return this.success != null; + } + + public void setSuccessIsSet(boolean value) { + if (!value) { + this.success = null; + } + } + + public void setFieldValue(_Fields field, Object value) { + switch (field) { + case SUCCESS: + if (value == null) { + unsetSuccess(); + } else { + setSuccess((TGetCrossReferenceResp)value); + } + break; + + } + } + + public Object getFieldValue(_Fields field) { + switch (field) { + case SUCCESS: + return getSuccess(); + + } + throw new IllegalStateException(); + } + + /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */ + public boolean isSet(_Fields field) { + if (field == null) { + throw new IllegalArgumentException(); + } + + switch (field) { + case SUCCESS: + return isSetSuccess(); + } + throw new IllegalStateException(); + } + + @Override + public boolean equals(Object that) { + if (that == null) + return false; + if (that instanceof GetCrossReference_result) + return this.equals((GetCrossReference_result)that); + return false; + } + + public boolean equals(GetCrossReference_result that) { + if (that == null) + return false; + + boolean this_present_success = true && this.isSetSuccess(); + boolean that_present_success = true && that.isSetSuccess(); + if (this_present_success || that_present_success) { + if (!(this_present_success && that_present_success)) + return false; + if (!this.success.equals(that.success)) + return false; + } + + return true; + } + + @Override + public int hashCode() { + List list = new ArrayList(); + + boolean present_success = true && (isSetSuccess()); + list.add(present_success); + if (present_success) + list.add(success); + + return list.hashCode(); + } + + @Override + public int compareTo(GetCrossReference_result other) { + if (!getClass().equals(other.getClass())) { + return getClass().getName().compareTo(other.getClass().getName()); + } + + int lastComparison = 0; + + lastComparison = Boolean.valueOf(isSetSuccess()).compareTo(other.isSetSuccess()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetSuccess()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.success, other.success); + if (lastComparison != 0) { + return lastComparison; + } + } + return 0; + } + + public _Fields fieldForId(int fieldId) { + return _Fields.findByThriftId(fieldId); + } + + public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException { + schemes.get(iprot.getScheme()).getScheme().read(iprot, this); + } + + public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException { + schemes.get(oprot.getScheme()).getScheme().write(oprot, this); + } + + @Override + public String toString() { + StringBuilder sb = new StringBuilder("GetCrossReference_result("); + boolean first = true; + + sb.append("success:"); + if (this.success == null) { + sb.append("null"); + } else { + sb.append(this.success); + } + first = false; + sb.append(")"); + return sb.toString(); + } + + public void validate() throws org.apache.thrift.TException { + // check for required fields + // check for sub-struct validity + if (success != null) { + success.validate(); + } + } + + private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { + try { + write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out))); + } catch (org.apache.thrift.TException te) { + throw new java.io.IOException(te); + } + } + + private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { + try { + read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); + } catch (org.apache.thrift.TException te) { + throw new java.io.IOException(te); + } + } + + private static class GetCrossReference_resultStandardSchemeFactory implements SchemeFactory { + public GetCrossReference_resultStandardScheme getScheme() { + return new GetCrossReference_resultStandardScheme(); + } + } + + private static class GetCrossReference_resultStandardScheme extends StandardScheme { + + public void read(org.apache.thrift.protocol.TProtocol iprot, GetCrossReference_result struct) throws org.apache.thrift.TException { + org.apache.thrift.protocol.TField schemeField; + iprot.readStructBegin(); + while (true) + { + schemeField = iprot.readFieldBegin(); + if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { + break; + } + switch (schemeField.id) { + case 0: // SUCCESS + if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { + struct.success = new TGetCrossReferenceResp(); + struct.success.read(iprot); + struct.setSuccessIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; + default: + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + iprot.readFieldEnd(); + } + iprot.readStructEnd(); + struct.validate(); + } + + public void write(org.apache.thrift.protocol.TProtocol oprot, GetCrossReference_result struct) throws org.apache.thrift.TException { + struct.validate(); + + oprot.writeStructBegin(STRUCT_DESC); + if (struct.success != null) { + oprot.writeFieldBegin(SUCCESS_FIELD_DESC); + struct.success.write(oprot); + oprot.writeFieldEnd(); + } + oprot.writeFieldStop(); + oprot.writeStructEnd(); + } + + } + + private static class GetCrossReference_resultTupleSchemeFactory implements SchemeFactory { + public GetCrossReference_resultTupleScheme getScheme() { + return new GetCrossReference_resultTupleScheme(); + } + } + + private static class GetCrossReference_resultTupleScheme extends TupleScheme { + + @Override + public void write(org.apache.thrift.protocol.TProtocol prot, GetCrossReference_result struct) throws org.apache.thrift.TException { + TTupleProtocol oprot = (TTupleProtocol) prot; + BitSet optionals = new BitSet(); + if (struct.isSetSuccess()) { + optionals.set(0); + } + oprot.writeBitSet(optionals, 1); + if (struct.isSetSuccess()) { + struct.success.write(oprot); + } + } + + @Override + public void read(org.apache.thrift.protocol.TProtocol prot, GetCrossReference_result struct) throws org.apache.thrift.TException { + TTupleProtocol iprot = (TTupleProtocol) prot; + BitSet incoming = iprot.readBitSet(1); + if (incoming.get(0)) { + struct.success = new TGetCrossReferenceResp(); + struct.success.read(iprot); + struct.setSuccessIsSet(true); + } + } + } + + } + public static class GetOperationStatus_args implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("GetOperationStatus_args"); diff --git a/service-rpc/src/gen/thrift/gen-javabean/org/apache/hive/service/rpc/thrift/TGetCrossReferenceReq.java b/service-rpc/src/gen/thrift/gen-javabean/org/apache/hive/service/rpc/thrift/TGetCrossReferenceReq.java new file mode 100644 index 0000000..9729570 --- /dev/null +++ b/service-rpc/src/gen/thrift/gen-javabean/org/apache/hive/service/rpc/thrift/TGetCrossReferenceReq.java @@ -0,0 +1,1034 @@ +/** + * Autogenerated by Thrift Compiler (0.9.3) + * + * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING + * @generated + */ +package org.apache.hive.service.rpc.thrift; + +import org.apache.thrift.scheme.IScheme; +import org.apache.thrift.scheme.SchemeFactory; +import org.apache.thrift.scheme.StandardScheme; + +import org.apache.thrift.scheme.TupleScheme; +import org.apache.thrift.protocol.TTupleProtocol; +import org.apache.thrift.protocol.TProtocolException; +import org.apache.thrift.EncodingUtils; +import org.apache.thrift.TException; +import org.apache.thrift.async.AsyncMethodCallback; +import org.apache.thrift.server.AbstractNonblockingServer.*; +import java.util.List; +import java.util.ArrayList; +import java.util.Map; +import java.util.HashMap; +import java.util.EnumMap; +import java.util.Set; +import java.util.HashSet; +import java.util.EnumSet; +import java.util.Collections; +import java.util.BitSet; +import java.nio.ByteBuffer; +import java.util.Arrays; +import javax.annotation.Generated; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +@SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"}) +@Generated(value = "Autogenerated by Thrift Compiler (0.9.3)") +public class TGetCrossReferenceReq implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { + private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("TGetCrossReferenceReq"); + + private static final org.apache.thrift.protocol.TField SESSION_HANDLE_FIELD_DESC = new org.apache.thrift.protocol.TField("sessionHandle", org.apache.thrift.protocol.TType.STRUCT, (short)1); + private static final org.apache.thrift.protocol.TField PARENT_CATALOG_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("parentCatalogName", org.apache.thrift.protocol.TType.STRING, (short)2); + private static final org.apache.thrift.protocol.TField PARENT_SCHEMA_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("parentSchemaName", org.apache.thrift.protocol.TType.STRING, (short)3); + private static final org.apache.thrift.protocol.TField PARENT_TABLE_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("parentTableName", org.apache.thrift.protocol.TType.STRING, (short)4); + private static final org.apache.thrift.protocol.TField FOREIGN_CATALOG_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("foreignCatalogName", org.apache.thrift.protocol.TType.STRING, (short)5); + private static final org.apache.thrift.protocol.TField FOREIGN_SCHEMA_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("foreignSchemaName", org.apache.thrift.protocol.TType.STRING, (short)6); + private static final org.apache.thrift.protocol.TField FOREIGN_TABLE_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("foreignTableName", org.apache.thrift.protocol.TType.STRING, (short)7); + + private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); + static { + schemes.put(StandardScheme.class, new TGetCrossReferenceReqStandardSchemeFactory()); + schemes.put(TupleScheme.class, new TGetCrossReferenceReqTupleSchemeFactory()); + } + + private TSessionHandle sessionHandle; // required + private String parentCatalogName; // optional + private String parentSchemaName; // optional + private String parentTableName; // optional + private String foreignCatalogName; // optional + private String foreignSchemaName; // optional + private String foreignTableName; // optional + + /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ + public enum _Fields implements org.apache.thrift.TFieldIdEnum { + SESSION_HANDLE((short)1, "sessionHandle"), + PARENT_CATALOG_NAME((short)2, "parentCatalogName"), + PARENT_SCHEMA_NAME((short)3, "parentSchemaName"), + PARENT_TABLE_NAME((short)4, "parentTableName"), + FOREIGN_CATALOG_NAME((short)5, "foreignCatalogName"), + FOREIGN_SCHEMA_NAME((short)6, "foreignSchemaName"), + FOREIGN_TABLE_NAME((short)7, "foreignTableName"); + + private static final Map byName = new HashMap(); + + static { + for (_Fields field : EnumSet.allOf(_Fields.class)) { + byName.put(field.getFieldName(), field); + } + } + + /** + * Find the _Fields constant that matches fieldId, or null if its not found. + */ + public static _Fields findByThriftId(int fieldId) { + switch(fieldId) { + case 1: // SESSION_HANDLE + return SESSION_HANDLE; + case 2: // PARENT_CATALOG_NAME + return PARENT_CATALOG_NAME; + case 3: // PARENT_SCHEMA_NAME + return PARENT_SCHEMA_NAME; + case 4: // PARENT_TABLE_NAME + return PARENT_TABLE_NAME; + case 5: // FOREIGN_CATALOG_NAME + return FOREIGN_CATALOG_NAME; + case 6: // FOREIGN_SCHEMA_NAME + return FOREIGN_SCHEMA_NAME; + case 7: // FOREIGN_TABLE_NAME + return FOREIGN_TABLE_NAME; + default: + return null; + } + } + + /** + * Find the _Fields constant that matches fieldId, throwing an exception + * if it is not found. + */ + public static _Fields findByThriftIdOrThrow(int fieldId) { + _Fields fields = findByThriftId(fieldId); + if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!"); + return fields; + } + + /** + * Find the _Fields constant that matches name, or null if its not found. + */ + public static _Fields findByName(String name) { + return byName.get(name); + } + + private final short _thriftId; + private final String _fieldName; + + _Fields(short thriftId, String fieldName) { + _thriftId = thriftId; + _fieldName = fieldName; + } + + public short getThriftFieldId() { + return _thriftId; + } + + public String getFieldName() { + return _fieldName; + } + } + + // isset id assignments + private static final _Fields optionals[] = {_Fields.PARENT_CATALOG_NAME,_Fields.PARENT_SCHEMA_NAME,_Fields.PARENT_TABLE_NAME,_Fields.FOREIGN_CATALOG_NAME,_Fields.FOREIGN_SCHEMA_NAME,_Fields.FOREIGN_TABLE_NAME}; + public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; + static { + Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); + tmpMap.put(_Fields.SESSION_HANDLE, new org.apache.thrift.meta_data.FieldMetaData("sessionHandle", org.apache.thrift.TFieldRequirementType.REQUIRED, + new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, TSessionHandle.class))); + tmpMap.put(_Fields.PARENT_CATALOG_NAME, new org.apache.thrift.meta_data.FieldMetaData("parentCatalogName", org.apache.thrift.TFieldRequirementType.OPTIONAL, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING , "TIdentifier"))); + tmpMap.put(_Fields.PARENT_SCHEMA_NAME, new org.apache.thrift.meta_data.FieldMetaData("parentSchemaName", org.apache.thrift.TFieldRequirementType.OPTIONAL, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING , "TIdentifier"))); + tmpMap.put(_Fields.PARENT_TABLE_NAME, new org.apache.thrift.meta_data.FieldMetaData("parentTableName", org.apache.thrift.TFieldRequirementType.OPTIONAL, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING , "TIdentifier"))); + tmpMap.put(_Fields.FOREIGN_CATALOG_NAME, new org.apache.thrift.meta_data.FieldMetaData("foreignCatalogName", org.apache.thrift.TFieldRequirementType.OPTIONAL, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING , "TIdentifier"))); + tmpMap.put(_Fields.FOREIGN_SCHEMA_NAME, new org.apache.thrift.meta_data.FieldMetaData("foreignSchemaName", org.apache.thrift.TFieldRequirementType.OPTIONAL, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING , "TIdentifier"))); + tmpMap.put(_Fields.FOREIGN_TABLE_NAME, new org.apache.thrift.meta_data.FieldMetaData("foreignTableName", org.apache.thrift.TFieldRequirementType.OPTIONAL, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING , "TIdentifier"))); + metaDataMap = Collections.unmodifiableMap(tmpMap); + org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(TGetCrossReferenceReq.class, metaDataMap); + } + + public TGetCrossReferenceReq() { + } + + public TGetCrossReferenceReq( + TSessionHandle sessionHandle) + { + this(); + this.sessionHandle = sessionHandle; + } + + /** + * Performs a deep copy on other. + */ + public TGetCrossReferenceReq(TGetCrossReferenceReq other) { + if (other.isSetSessionHandle()) { + this.sessionHandle = new TSessionHandle(other.sessionHandle); + } + if (other.isSetParentCatalogName()) { + this.parentCatalogName = other.parentCatalogName; + } + if (other.isSetParentSchemaName()) { + this.parentSchemaName = other.parentSchemaName; + } + if (other.isSetParentTableName()) { + this.parentTableName = other.parentTableName; + } + if (other.isSetForeignCatalogName()) { + this.foreignCatalogName = other.foreignCatalogName; + } + if (other.isSetForeignSchemaName()) { + this.foreignSchemaName = other.foreignSchemaName; + } + if (other.isSetForeignTableName()) { + this.foreignTableName = other.foreignTableName; + } + } + + public TGetCrossReferenceReq deepCopy() { + return new TGetCrossReferenceReq(this); + } + + @Override + public void clear() { + this.sessionHandle = null; + this.parentCatalogName = null; + this.parentSchemaName = null; + this.parentTableName = null; + this.foreignCatalogName = null; + this.foreignSchemaName = null; + this.foreignTableName = null; + } + + public TSessionHandle getSessionHandle() { + return this.sessionHandle; + } + + public void setSessionHandle(TSessionHandle sessionHandle) { + this.sessionHandle = sessionHandle; + } + + public void unsetSessionHandle() { + this.sessionHandle = null; + } + + /** Returns true if field sessionHandle is set (has been assigned a value) and false otherwise */ + public boolean isSetSessionHandle() { + return this.sessionHandle != null; + } + + public void setSessionHandleIsSet(boolean value) { + if (!value) { + this.sessionHandle = null; + } + } + + public String getParentCatalogName() { + return this.parentCatalogName; + } + + public void setParentCatalogName(String parentCatalogName) { + this.parentCatalogName = parentCatalogName; + } + + public void unsetParentCatalogName() { + this.parentCatalogName = null; + } + + /** Returns true if field parentCatalogName is set (has been assigned a value) and false otherwise */ + public boolean isSetParentCatalogName() { + return this.parentCatalogName != null; + } + + public void setParentCatalogNameIsSet(boolean value) { + if (!value) { + this.parentCatalogName = null; + } + } + + public String getParentSchemaName() { + return this.parentSchemaName; + } + + public void setParentSchemaName(String parentSchemaName) { + this.parentSchemaName = parentSchemaName; + } + + public void unsetParentSchemaName() { + this.parentSchemaName = null; + } + + /** Returns true if field parentSchemaName is set (has been assigned a value) and false otherwise */ + public boolean isSetParentSchemaName() { + return this.parentSchemaName != null; + } + + public void setParentSchemaNameIsSet(boolean value) { + if (!value) { + this.parentSchemaName = null; + } + } + + public String getParentTableName() { + return this.parentTableName; + } + + public void setParentTableName(String parentTableName) { + this.parentTableName = parentTableName; + } + + public void unsetParentTableName() { + this.parentTableName = null; + } + + /** Returns true if field parentTableName is set (has been assigned a value) and false otherwise */ + public boolean isSetParentTableName() { + return this.parentTableName != null; + } + + public void setParentTableNameIsSet(boolean value) { + if (!value) { + this.parentTableName = null; + } + } + + public String getForeignCatalogName() { + return this.foreignCatalogName; + } + + public void setForeignCatalogName(String foreignCatalogName) { + this.foreignCatalogName = foreignCatalogName; + } + + public void unsetForeignCatalogName() { + this.foreignCatalogName = null; + } + + /** Returns true if field foreignCatalogName is set (has been assigned a value) and false otherwise */ + public boolean isSetForeignCatalogName() { + return this.foreignCatalogName != null; + } + + public void setForeignCatalogNameIsSet(boolean value) { + if (!value) { + this.foreignCatalogName = null; + } + } + + public String getForeignSchemaName() { + return this.foreignSchemaName; + } + + public void setForeignSchemaName(String foreignSchemaName) { + this.foreignSchemaName = foreignSchemaName; + } + + public void unsetForeignSchemaName() { + this.foreignSchemaName = null; + } + + /** Returns true if field foreignSchemaName is set (has been assigned a value) and false otherwise */ + public boolean isSetForeignSchemaName() { + return this.foreignSchemaName != null; + } + + public void setForeignSchemaNameIsSet(boolean value) { + if (!value) { + this.foreignSchemaName = null; + } + } + + public String getForeignTableName() { + return this.foreignTableName; + } + + public void setForeignTableName(String foreignTableName) { + this.foreignTableName = foreignTableName; + } + + public void unsetForeignTableName() { + this.foreignTableName = null; + } + + /** Returns true if field foreignTableName is set (has been assigned a value) and false otherwise */ + public boolean isSetForeignTableName() { + return this.foreignTableName != null; + } + + public void setForeignTableNameIsSet(boolean value) { + if (!value) { + this.foreignTableName = null; + } + } + + public void setFieldValue(_Fields field, Object value) { + switch (field) { + case SESSION_HANDLE: + if (value == null) { + unsetSessionHandle(); + } else { + setSessionHandle((TSessionHandle)value); + } + break; + + case PARENT_CATALOG_NAME: + if (value == null) { + unsetParentCatalogName(); + } else { + setParentCatalogName((String)value); + } + break; + + case PARENT_SCHEMA_NAME: + if (value == null) { + unsetParentSchemaName(); + } else { + setParentSchemaName((String)value); + } + break; + + case PARENT_TABLE_NAME: + if (value == null) { + unsetParentTableName(); + } else { + setParentTableName((String)value); + } + break; + + case FOREIGN_CATALOG_NAME: + if (value == null) { + unsetForeignCatalogName(); + } else { + setForeignCatalogName((String)value); + } + break; + + case FOREIGN_SCHEMA_NAME: + if (value == null) { + unsetForeignSchemaName(); + } else { + setForeignSchemaName((String)value); + } + break; + + case FOREIGN_TABLE_NAME: + if (value == null) { + unsetForeignTableName(); + } else { + setForeignTableName((String)value); + } + break; + + } + } + + public Object getFieldValue(_Fields field) { + switch (field) { + case SESSION_HANDLE: + return getSessionHandle(); + + case PARENT_CATALOG_NAME: + return getParentCatalogName(); + + case PARENT_SCHEMA_NAME: + return getParentSchemaName(); + + case PARENT_TABLE_NAME: + return getParentTableName(); + + case FOREIGN_CATALOG_NAME: + return getForeignCatalogName(); + + case FOREIGN_SCHEMA_NAME: + return getForeignSchemaName(); + + case FOREIGN_TABLE_NAME: + return getForeignTableName(); + + } + throw new IllegalStateException(); + } + + /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */ + public boolean isSet(_Fields field) { + if (field == null) { + throw new IllegalArgumentException(); + } + + switch (field) { + case SESSION_HANDLE: + return isSetSessionHandle(); + case PARENT_CATALOG_NAME: + return isSetParentCatalogName(); + case PARENT_SCHEMA_NAME: + return isSetParentSchemaName(); + case PARENT_TABLE_NAME: + return isSetParentTableName(); + case FOREIGN_CATALOG_NAME: + return isSetForeignCatalogName(); + case FOREIGN_SCHEMA_NAME: + return isSetForeignSchemaName(); + case FOREIGN_TABLE_NAME: + return isSetForeignTableName(); + } + throw new IllegalStateException(); + } + + @Override + public boolean equals(Object that) { + if (that == null) + return false; + if (that instanceof TGetCrossReferenceReq) + return this.equals((TGetCrossReferenceReq)that); + return false; + } + + public boolean equals(TGetCrossReferenceReq that) { + if (that == null) + return false; + + boolean this_present_sessionHandle = true && this.isSetSessionHandle(); + boolean that_present_sessionHandle = true && that.isSetSessionHandle(); + if (this_present_sessionHandle || that_present_sessionHandle) { + if (!(this_present_sessionHandle && that_present_sessionHandle)) + return false; + if (!this.sessionHandle.equals(that.sessionHandle)) + return false; + } + + boolean this_present_parentCatalogName = true && this.isSetParentCatalogName(); + boolean that_present_parentCatalogName = true && that.isSetParentCatalogName(); + if (this_present_parentCatalogName || that_present_parentCatalogName) { + if (!(this_present_parentCatalogName && that_present_parentCatalogName)) + return false; + if (!this.parentCatalogName.equals(that.parentCatalogName)) + return false; + } + + boolean this_present_parentSchemaName = true && this.isSetParentSchemaName(); + boolean that_present_parentSchemaName = true && that.isSetParentSchemaName(); + if (this_present_parentSchemaName || that_present_parentSchemaName) { + if (!(this_present_parentSchemaName && that_present_parentSchemaName)) + return false; + if (!this.parentSchemaName.equals(that.parentSchemaName)) + return false; + } + + boolean this_present_parentTableName = true && this.isSetParentTableName(); + boolean that_present_parentTableName = true && that.isSetParentTableName(); + if (this_present_parentTableName || that_present_parentTableName) { + if (!(this_present_parentTableName && that_present_parentTableName)) + return false; + if (!this.parentTableName.equals(that.parentTableName)) + return false; + } + + boolean this_present_foreignCatalogName = true && this.isSetForeignCatalogName(); + boolean that_present_foreignCatalogName = true && that.isSetForeignCatalogName(); + if (this_present_foreignCatalogName || that_present_foreignCatalogName) { + if (!(this_present_foreignCatalogName && that_present_foreignCatalogName)) + return false; + if (!this.foreignCatalogName.equals(that.foreignCatalogName)) + return false; + } + + boolean this_present_foreignSchemaName = true && this.isSetForeignSchemaName(); + boolean that_present_foreignSchemaName = true && that.isSetForeignSchemaName(); + if (this_present_foreignSchemaName || that_present_foreignSchemaName) { + if (!(this_present_foreignSchemaName && that_present_foreignSchemaName)) + return false; + if (!this.foreignSchemaName.equals(that.foreignSchemaName)) + return false; + } + + boolean this_present_foreignTableName = true && this.isSetForeignTableName(); + boolean that_present_foreignTableName = true && that.isSetForeignTableName(); + if (this_present_foreignTableName || that_present_foreignTableName) { + if (!(this_present_foreignTableName && that_present_foreignTableName)) + return false; + if (!this.foreignTableName.equals(that.foreignTableName)) + return false; + } + + return true; + } + + @Override + public int hashCode() { + List list = new ArrayList(); + + boolean present_sessionHandle = true && (isSetSessionHandle()); + list.add(present_sessionHandle); + if (present_sessionHandle) + list.add(sessionHandle); + + boolean present_parentCatalogName = true && (isSetParentCatalogName()); + list.add(present_parentCatalogName); + if (present_parentCatalogName) + list.add(parentCatalogName); + + boolean present_parentSchemaName = true && (isSetParentSchemaName()); + list.add(present_parentSchemaName); + if (present_parentSchemaName) + list.add(parentSchemaName); + + boolean present_parentTableName = true && (isSetParentTableName()); + list.add(present_parentTableName); + if (present_parentTableName) + list.add(parentTableName); + + boolean present_foreignCatalogName = true && (isSetForeignCatalogName()); + list.add(present_foreignCatalogName); + if (present_foreignCatalogName) + list.add(foreignCatalogName); + + boolean present_foreignSchemaName = true && (isSetForeignSchemaName()); + list.add(present_foreignSchemaName); + if (present_foreignSchemaName) + list.add(foreignSchemaName); + + boolean present_foreignTableName = true && (isSetForeignTableName()); + list.add(present_foreignTableName); + if (present_foreignTableName) + list.add(foreignTableName); + + return list.hashCode(); + } + + @Override + public int compareTo(TGetCrossReferenceReq other) { + if (!getClass().equals(other.getClass())) { + return getClass().getName().compareTo(other.getClass().getName()); + } + + int lastComparison = 0; + + lastComparison = Boolean.valueOf(isSetSessionHandle()).compareTo(other.isSetSessionHandle()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetSessionHandle()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.sessionHandle, other.sessionHandle); + if (lastComparison != 0) { + return lastComparison; + } + } + lastComparison = Boolean.valueOf(isSetParentCatalogName()).compareTo(other.isSetParentCatalogName()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetParentCatalogName()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.parentCatalogName, other.parentCatalogName); + if (lastComparison != 0) { + return lastComparison; + } + } + lastComparison = Boolean.valueOf(isSetParentSchemaName()).compareTo(other.isSetParentSchemaName()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetParentSchemaName()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.parentSchemaName, other.parentSchemaName); + if (lastComparison != 0) { + return lastComparison; + } + } + lastComparison = Boolean.valueOf(isSetParentTableName()).compareTo(other.isSetParentTableName()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetParentTableName()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.parentTableName, other.parentTableName); + if (lastComparison != 0) { + return lastComparison; + } + } + lastComparison = Boolean.valueOf(isSetForeignCatalogName()).compareTo(other.isSetForeignCatalogName()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetForeignCatalogName()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.foreignCatalogName, other.foreignCatalogName); + if (lastComparison != 0) { + return lastComparison; + } + } + lastComparison = Boolean.valueOf(isSetForeignSchemaName()).compareTo(other.isSetForeignSchemaName()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetForeignSchemaName()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.foreignSchemaName, other.foreignSchemaName); + if (lastComparison != 0) { + return lastComparison; + } + } + lastComparison = Boolean.valueOf(isSetForeignTableName()).compareTo(other.isSetForeignTableName()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetForeignTableName()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.foreignTableName, other.foreignTableName); + if (lastComparison != 0) { + return lastComparison; + } + } + return 0; + } + + public _Fields fieldForId(int fieldId) { + return _Fields.findByThriftId(fieldId); + } + + public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException { + schemes.get(iprot.getScheme()).getScheme().read(iprot, this); + } + + public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException { + schemes.get(oprot.getScheme()).getScheme().write(oprot, this); + } + + @Override + public String toString() { + StringBuilder sb = new StringBuilder("TGetCrossReferenceReq("); + boolean first = true; + + sb.append("sessionHandle:"); + if (this.sessionHandle == null) { + sb.append("null"); + } else { + sb.append(this.sessionHandle); + } + first = false; + if (isSetParentCatalogName()) { + if (!first) sb.append(", "); + sb.append("parentCatalogName:"); + if (this.parentCatalogName == null) { + sb.append("null"); + } else { + sb.append(this.parentCatalogName); + } + first = false; + } + if (isSetParentSchemaName()) { + if (!first) sb.append(", "); + sb.append("parentSchemaName:"); + if (this.parentSchemaName == null) { + sb.append("null"); + } else { + sb.append(this.parentSchemaName); + } + first = false; + } + if (isSetParentTableName()) { + if (!first) sb.append(", "); + sb.append("parentTableName:"); + if (this.parentTableName == null) { + sb.append("null"); + } else { + sb.append(this.parentTableName); + } + first = false; + } + if (isSetForeignCatalogName()) { + if (!first) sb.append(", "); + sb.append("foreignCatalogName:"); + if (this.foreignCatalogName == null) { + sb.append("null"); + } else { + sb.append(this.foreignCatalogName); + } + first = false; + } + if (isSetForeignSchemaName()) { + if (!first) sb.append(", "); + sb.append("foreignSchemaName:"); + if (this.foreignSchemaName == null) { + sb.append("null"); + } else { + sb.append(this.foreignSchemaName); + } + first = false; + } + if (isSetForeignTableName()) { + if (!first) sb.append(", "); + sb.append("foreignTableName:"); + if (this.foreignTableName == null) { + sb.append("null"); + } else { + sb.append(this.foreignTableName); + } + first = false; + } + sb.append(")"); + return sb.toString(); + } + + public void validate() throws org.apache.thrift.TException { + // check for required fields + if (!isSetSessionHandle()) { + throw new org.apache.thrift.protocol.TProtocolException("Required field 'sessionHandle' is unset! Struct:" + toString()); + } + + // check for sub-struct validity + if (sessionHandle != null) { + sessionHandle.validate(); + } + } + + private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { + try { + write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out))); + } catch (org.apache.thrift.TException te) { + throw new java.io.IOException(te); + } + } + + private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { + try { + read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); + } catch (org.apache.thrift.TException te) { + throw new java.io.IOException(te); + } + } + + private static class TGetCrossReferenceReqStandardSchemeFactory implements SchemeFactory { + public TGetCrossReferenceReqStandardScheme getScheme() { + return new TGetCrossReferenceReqStandardScheme(); + } + } + + private static class TGetCrossReferenceReqStandardScheme extends StandardScheme { + + public void read(org.apache.thrift.protocol.TProtocol iprot, TGetCrossReferenceReq struct) throws org.apache.thrift.TException { + org.apache.thrift.protocol.TField schemeField; + iprot.readStructBegin(); + while (true) + { + schemeField = iprot.readFieldBegin(); + if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { + break; + } + switch (schemeField.id) { + case 1: // SESSION_HANDLE + if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { + struct.sessionHandle = new TSessionHandle(); + struct.sessionHandle.read(iprot); + struct.setSessionHandleIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; + case 2: // PARENT_CATALOG_NAME + if (schemeField.type == org.apache.thrift.protocol.TType.STRING) { + struct.parentCatalogName = iprot.readString(); + struct.setParentCatalogNameIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; + case 3: // PARENT_SCHEMA_NAME + if (schemeField.type == org.apache.thrift.protocol.TType.STRING) { + struct.parentSchemaName = iprot.readString(); + struct.setParentSchemaNameIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; + case 4: // PARENT_TABLE_NAME + if (schemeField.type == org.apache.thrift.protocol.TType.STRING) { + struct.parentTableName = iprot.readString(); + struct.setParentTableNameIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; + case 5: // FOREIGN_CATALOG_NAME + if (schemeField.type == org.apache.thrift.protocol.TType.STRING) { + struct.foreignCatalogName = iprot.readString(); + struct.setForeignCatalogNameIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; + case 6: // FOREIGN_SCHEMA_NAME + if (schemeField.type == org.apache.thrift.protocol.TType.STRING) { + struct.foreignSchemaName = iprot.readString(); + struct.setForeignSchemaNameIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; + case 7: // FOREIGN_TABLE_NAME + if (schemeField.type == org.apache.thrift.protocol.TType.STRING) { + struct.foreignTableName = iprot.readString(); + struct.setForeignTableNameIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; + default: + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + iprot.readFieldEnd(); + } + iprot.readStructEnd(); + struct.validate(); + } + + public void write(org.apache.thrift.protocol.TProtocol oprot, TGetCrossReferenceReq struct) throws org.apache.thrift.TException { + struct.validate(); + + oprot.writeStructBegin(STRUCT_DESC); + if (struct.sessionHandle != null) { + oprot.writeFieldBegin(SESSION_HANDLE_FIELD_DESC); + struct.sessionHandle.write(oprot); + oprot.writeFieldEnd(); + } + if (struct.parentCatalogName != null) { + if (struct.isSetParentCatalogName()) { + oprot.writeFieldBegin(PARENT_CATALOG_NAME_FIELD_DESC); + oprot.writeString(struct.parentCatalogName); + oprot.writeFieldEnd(); + } + } + if (struct.parentSchemaName != null) { + if (struct.isSetParentSchemaName()) { + oprot.writeFieldBegin(PARENT_SCHEMA_NAME_FIELD_DESC); + oprot.writeString(struct.parentSchemaName); + oprot.writeFieldEnd(); + } + } + if (struct.parentTableName != null) { + if (struct.isSetParentTableName()) { + oprot.writeFieldBegin(PARENT_TABLE_NAME_FIELD_DESC); + oprot.writeString(struct.parentTableName); + oprot.writeFieldEnd(); + } + } + if (struct.foreignCatalogName != null) { + if (struct.isSetForeignCatalogName()) { + oprot.writeFieldBegin(FOREIGN_CATALOG_NAME_FIELD_DESC); + oprot.writeString(struct.foreignCatalogName); + oprot.writeFieldEnd(); + } + } + if (struct.foreignSchemaName != null) { + if (struct.isSetForeignSchemaName()) { + oprot.writeFieldBegin(FOREIGN_SCHEMA_NAME_FIELD_DESC); + oprot.writeString(struct.foreignSchemaName); + oprot.writeFieldEnd(); + } + } + if (struct.foreignTableName != null) { + if (struct.isSetForeignTableName()) { + oprot.writeFieldBegin(FOREIGN_TABLE_NAME_FIELD_DESC); + oprot.writeString(struct.foreignTableName); + oprot.writeFieldEnd(); + } + } + oprot.writeFieldStop(); + oprot.writeStructEnd(); + } + + } + + private static class TGetCrossReferenceReqTupleSchemeFactory implements SchemeFactory { + public TGetCrossReferenceReqTupleScheme getScheme() { + return new TGetCrossReferenceReqTupleScheme(); + } + } + + private static class TGetCrossReferenceReqTupleScheme extends TupleScheme { + + @Override + public void write(org.apache.thrift.protocol.TProtocol prot, TGetCrossReferenceReq struct) throws org.apache.thrift.TException { + TTupleProtocol oprot = (TTupleProtocol) prot; + struct.sessionHandle.write(oprot); + BitSet optionals = new BitSet(); + if (struct.isSetParentCatalogName()) { + optionals.set(0); + } + if (struct.isSetParentSchemaName()) { + optionals.set(1); + } + if (struct.isSetParentTableName()) { + optionals.set(2); + } + if (struct.isSetForeignCatalogName()) { + optionals.set(3); + } + if (struct.isSetForeignSchemaName()) { + optionals.set(4); + } + if (struct.isSetForeignTableName()) { + optionals.set(5); + } + oprot.writeBitSet(optionals, 6); + if (struct.isSetParentCatalogName()) { + oprot.writeString(struct.parentCatalogName); + } + if (struct.isSetParentSchemaName()) { + oprot.writeString(struct.parentSchemaName); + } + if (struct.isSetParentTableName()) { + oprot.writeString(struct.parentTableName); + } + if (struct.isSetForeignCatalogName()) { + oprot.writeString(struct.foreignCatalogName); + } + if (struct.isSetForeignSchemaName()) { + oprot.writeString(struct.foreignSchemaName); + } + if (struct.isSetForeignTableName()) { + oprot.writeString(struct.foreignTableName); + } + } + + @Override + public void read(org.apache.thrift.protocol.TProtocol prot, TGetCrossReferenceReq struct) throws org.apache.thrift.TException { + TTupleProtocol iprot = (TTupleProtocol) prot; + struct.sessionHandle = new TSessionHandle(); + struct.sessionHandle.read(iprot); + struct.setSessionHandleIsSet(true); + BitSet incoming = iprot.readBitSet(6); + if (incoming.get(0)) { + struct.parentCatalogName = iprot.readString(); + struct.setParentCatalogNameIsSet(true); + } + if (incoming.get(1)) { + struct.parentSchemaName = iprot.readString(); + struct.setParentSchemaNameIsSet(true); + } + if (incoming.get(2)) { + struct.parentTableName = iprot.readString(); + struct.setParentTableNameIsSet(true); + } + if (incoming.get(3)) { + struct.foreignCatalogName = iprot.readString(); + struct.setForeignCatalogNameIsSet(true); + } + if (incoming.get(4)) { + struct.foreignSchemaName = iprot.readString(); + struct.setForeignSchemaNameIsSet(true); + } + if (incoming.get(5)) { + struct.foreignTableName = iprot.readString(); + struct.setForeignTableNameIsSet(true); + } + } + } + +} + diff --git a/service-rpc/src/gen/thrift/gen-javabean/org/apache/hive/service/rpc/thrift/TGetCrossReferenceResp.java b/service-rpc/src/gen/thrift/gen-javabean/org/apache/hive/service/rpc/thrift/TGetCrossReferenceResp.java new file mode 100644 index 0000000..1bfe6d1 --- /dev/null +++ b/service-rpc/src/gen/thrift/gen-javabean/org/apache/hive/service/rpc/thrift/TGetCrossReferenceResp.java @@ -0,0 +1,509 @@ +/** + * Autogenerated by Thrift Compiler (0.9.3) + * + * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING + * @generated + */ +package org.apache.hive.service.rpc.thrift; + +import org.apache.thrift.scheme.IScheme; +import org.apache.thrift.scheme.SchemeFactory; +import org.apache.thrift.scheme.StandardScheme; + +import org.apache.thrift.scheme.TupleScheme; +import org.apache.thrift.protocol.TTupleProtocol; +import org.apache.thrift.protocol.TProtocolException; +import org.apache.thrift.EncodingUtils; +import org.apache.thrift.TException; +import org.apache.thrift.async.AsyncMethodCallback; +import org.apache.thrift.server.AbstractNonblockingServer.*; +import java.util.List; +import java.util.ArrayList; +import java.util.Map; +import java.util.HashMap; +import java.util.EnumMap; +import java.util.Set; +import java.util.HashSet; +import java.util.EnumSet; +import java.util.Collections; +import java.util.BitSet; +import java.nio.ByteBuffer; +import java.util.Arrays; +import javax.annotation.Generated; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +@SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"}) +@Generated(value = "Autogenerated by Thrift Compiler (0.9.3)") +public class TGetCrossReferenceResp implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { + private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("TGetCrossReferenceResp"); + + private static final org.apache.thrift.protocol.TField STATUS_FIELD_DESC = new org.apache.thrift.protocol.TField("status", org.apache.thrift.protocol.TType.STRUCT, (short)1); + private static final org.apache.thrift.protocol.TField OPERATION_HANDLE_FIELD_DESC = new org.apache.thrift.protocol.TField("operationHandle", org.apache.thrift.protocol.TType.STRUCT, (short)2); + + private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); + static { + schemes.put(StandardScheme.class, new TGetCrossReferenceRespStandardSchemeFactory()); + schemes.put(TupleScheme.class, new TGetCrossReferenceRespTupleSchemeFactory()); + } + + private TStatus status; // required + private TOperationHandle operationHandle; // optional + + /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ + public enum _Fields implements org.apache.thrift.TFieldIdEnum { + STATUS((short)1, "status"), + OPERATION_HANDLE((short)2, "operationHandle"); + + private static final Map byName = new HashMap(); + + static { + for (_Fields field : EnumSet.allOf(_Fields.class)) { + byName.put(field.getFieldName(), field); + } + } + + /** + * Find the _Fields constant that matches fieldId, or null if its not found. + */ + public static _Fields findByThriftId(int fieldId) { + switch(fieldId) { + case 1: // STATUS + return STATUS; + case 2: // OPERATION_HANDLE + return OPERATION_HANDLE; + default: + return null; + } + } + + /** + * Find the _Fields constant that matches fieldId, throwing an exception + * if it is not found. + */ + public static _Fields findByThriftIdOrThrow(int fieldId) { + _Fields fields = findByThriftId(fieldId); + if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!"); + return fields; + } + + /** + * Find the _Fields constant that matches name, or null if its not found. + */ + public static _Fields findByName(String name) { + return byName.get(name); + } + + private final short _thriftId; + private final String _fieldName; + + _Fields(short thriftId, String fieldName) { + _thriftId = thriftId; + _fieldName = fieldName; + } + + public short getThriftFieldId() { + return _thriftId; + } + + public String getFieldName() { + return _fieldName; + } + } + + // isset id assignments + private static final _Fields optionals[] = {_Fields.OPERATION_HANDLE}; + public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; + static { + Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); + tmpMap.put(_Fields.STATUS, new org.apache.thrift.meta_data.FieldMetaData("status", org.apache.thrift.TFieldRequirementType.REQUIRED, + new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, TStatus.class))); + tmpMap.put(_Fields.OPERATION_HANDLE, new org.apache.thrift.meta_data.FieldMetaData("operationHandle", org.apache.thrift.TFieldRequirementType.OPTIONAL, + new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, TOperationHandle.class))); + metaDataMap = Collections.unmodifiableMap(tmpMap); + org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(TGetCrossReferenceResp.class, metaDataMap); + } + + public TGetCrossReferenceResp() { + } + + public TGetCrossReferenceResp( + TStatus status) + { + this(); + this.status = status; + } + + /** + * Performs a deep copy on other. + */ + public TGetCrossReferenceResp(TGetCrossReferenceResp other) { + if (other.isSetStatus()) { + this.status = new TStatus(other.status); + } + if (other.isSetOperationHandle()) { + this.operationHandle = new TOperationHandle(other.operationHandle); + } + } + + public TGetCrossReferenceResp deepCopy() { + return new TGetCrossReferenceResp(this); + } + + @Override + public void clear() { + this.status = null; + this.operationHandle = null; + } + + public TStatus getStatus() { + return this.status; + } + + public void setStatus(TStatus status) { + this.status = status; + } + + public void unsetStatus() { + this.status = null; + } + + /** Returns true if field status is set (has been assigned a value) and false otherwise */ + public boolean isSetStatus() { + return this.status != null; + } + + public void setStatusIsSet(boolean value) { + if (!value) { + this.status = null; + } + } + + public TOperationHandle getOperationHandle() { + return this.operationHandle; + } + + public void setOperationHandle(TOperationHandle operationHandle) { + this.operationHandle = operationHandle; + } + + public void unsetOperationHandle() { + this.operationHandle = null; + } + + /** Returns true if field operationHandle is set (has been assigned a value) and false otherwise */ + public boolean isSetOperationHandle() { + return this.operationHandle != null; + } + + public void setOperationHandleIsSet(boolean value) { + if (!value) { + this.operationHandle = null; + } + } + + public void setFieldValue(_Fields field, Object value) { + switch (field) { + case STATUS: + if (value == null) { + unsetStatus(); + } else { + setStatus((TStatus)value); + } + break; + + case OPERATION_HANDLE: + if (value == null) { + unsetOperationHandle(); + } else { + setOperationHandle((TOperationHandle)value); + } + break; + + } + } + + public Object getFieldValue(_Fields field) { + switch (field) { + case STATUS: + return getStatus(); + + case OPERATION_HANDLE: + return getOperationHandle(); + + } + throw new IllegalStateException(); + } + + /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */ + public boolean isSet(_Fields field) { + if (field == null) { + throw new IllegalArgumentException(); + } + + switch (field) { + case STATUS: + return isSetStatus(); + case OPERATION_HANDLE: + return isSetOperationHandle(); + } + throw new IllegalStateException(); + } + + @Override + public boolean equals(Object that) { + if (that == null) + return false; + if (that instanceof TGetCrossReferenceResp) + return this.equals((TGetCrossReferenceResp)that); + return false; + } + + public boolean equals(TGetCrossReferenceResp that) { + if (that == null) + return false; + + boolean this_present_status = true && this.isSetStatus(); + boolean that_present_status = true && that.isSetStatus(); + if (this_present_status || that_present_status) { + if (!(this_present_status && that_present_status)) + return false; + if (!this.status.equals(that.status)) + return false; + } + + boolean this_present_operationHandle = true && this.isSetOperationHandle(); + boolean that_present_operationHandle = true && that.isSetOperationHandle(); + if (this_present_operationHandle || that_present_operationHandle) { + if (!(this_present_operationHandle && that_present_operationHandle)) + return false; + if (!this.operationHandle.equals(that.operationHandle)) + return false; + } + + return true; + } + + @Override + public int hashCode() { + List list = new ArrayList(); + + boolean present_status = true && (isSetStatus()); + list.add(present_status); + if (present_status) + list.add(status); + + boolean present_operationHandle = true && (isSetOperationHandle()); + list.add(present_operationHandle); + if (present_operationHandle) + list.add(operationHandle); + + return list.hashCode(); + } + + @Override + public int compareTo(TGetCrossReferenceResp other) { + if (!getClass().equals(other.getClass())) { + return getClass().getName().compareTo(other.getClass().getName()); + } + + int lastComparison = 0; + + lastComparison = Boolean.valueOf(isSetStatus()).compareTo(other.isSetStatus()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetStatus()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.status, other.status); + if (lastComparison != 0) { + return lastComparison; + } + } + lastComparison = Boolean.valueOf(isSetOperationHandle()).compareTo(other.isSetOperationHandle()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetOperationHandle()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.operationHandle, other.operationHandle); + if (lastComparison != 0) { + return lastComparison; + } + } + return 0; + } + + public _Fields fieldForId(int fieldId) { + return _Fields.findByThriftId(fieldId); + } + + public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException { + schemes.get(iprot.getScheme()).getScheme().read(iprot, this); + } + + public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException { + schemes.get(oprot.getScheme()).getScheme().write(oprot, this); + } + + @Override + public String toString() { + StringBuilder sb = new StringBuilder("TGetCrossReferenceResp("); + boolean first = true; + + sb.append("status:"); + if (this.status == null) { + sb.append("null"); + } else { + sb.append(this.status); + } + first = false; + if (isSetOperationHandle()) { + if (!first) sb.append(", "); + sb.append("operationHandle:"); + if (this.operationHandle == null) { + sb.append("null"); + } else { + sb.append(this.operationHandle); + } + first = false; + } + sb.append(")"); + return sb.toString(); + } + + public void validate() throws org.apache.thrift.TException { + // check for required fields + if (!isSetStatus()) { + throw new org.apache.thrift.protocol.TProtocolException("Required field 'status' is unset! Struct:" + toString()); + } + + // check for sub-struct validity + if (status != null) { + status.validate(); + } + if (operationHandle != null) { + operationHandle.validate(); + } + } + + private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { + try { + write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out))); + } catch (org.apache.thrift.TException te) { + throw new java.io.IOException(te); + } + } + + private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { + try { + read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); + } catch (org.apache.thrift.TException te) { + throw new java.io.IOException(te); + } + } + + private static class TGetCrossReferenceRespStandardSchemeFactory implements SchemeFactory { + public TGetCrossReferenceRespStandardScheme getScheme() { + return new TGetCrossReferenceRespStandardScheme(); + } + } + + private static class TGetCrossReferenceRespStandardScheme extends StandardScheme { + + public void read(org.apache.thrift.protocol.TProtocol iprot, TGetCrossReferenceResp struct) throws org.apache.thrift.TException { + org.apache.thrift.protocol.TField schemeField; + iprot.readStructBegin(); + while (true) + { + schemeField = iprot.readFieldBegin(); + if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { + break; + } + switch (schemeField.id) { + case 1: // STATUS + if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { + struct.status = new TStatus(); + struct.status.read(iprot); + struct.setStatusIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; + case 2: // OPERATION_HANDLE + if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { + struct.operationHandle = new TOperationHandle(); + struct.operationHandle.read(iprot); + struct.setOperationHandleIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; + default: + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + iprot.readFieldEnd(); + } + iprot.readStructEnd(); + struct.validate(); + } + + public void write(org.apache.thrift.protocol.TProtocol oprot, TGetCrossReferenceResp struct) throws org.apache.thrift.TException { + struct.validate(); + + oprot.writeStructBegin(STRUCT_DESC); + if (struct.status != null) { + oprot.writeFieldBegin(STATUS_FIELD_DESC); + struct.status.write(oprot); + oprot.writeFieldEnd(); + } + if (struct.operationHandle != null) { + if (struct.isSetOperationHandle()) { + oprot.writeFieldBegin(OPERATION_HANDLE_FIELD_DESC); + struct.operationHandle.write(oprot); + oprot.writeFieldEnd(); + } + } + oprot.writeFieldStop(); + oprot.writeStructEnd(); + } + + } + + private static class TGetCrossReferenceRespTupleSchemeFactory implements SchemeFactory { + public TGetCrossReferenceRespTupleScheme getScheme() { + return new TGetCrossReferenceRespTupleScheme(); + } + } + + private static class TGetCrossReferenceRespTupleScheme extends TupleScheme { + + @Override + public void write(org.apache.thrift.protocol.TProtocol prot, TGetCrossReferenceResp struct) throws org.apache.thrift.TException { + TTupleProtocol oprot = (TTupleProtocol) prot; + struct.status.write(oprot); + BitSet optionals = new BitSet(); + if (struct.isSetOperationHandle()) { + optionals.set(0); + } + oprot.writeBitSet(optionals, 1); + if (struct.isSetOperationHandle()) { + struct.operationHandle.write(oprot); + } + } + + @Override + public void read(org.apache.thrift.protocol.TProtocol prot, TGetCrossReferenceResp struct) throws org.apache.thrift.TException { + TTupleProtocol iprot = (TTupleProtocol) prot; + struct.status = new TStatus(); + struct.status.read(iprot); + struct.setStatusIsSet(true); + BitSet incoming = iprot.readBitSet(1); + if (incoming.get(0)) { + struct.operationHandle = new TOperationHandle(); + struct.operationHandle.read(iprot); + struct.setOperationHandleIsSet(true); + } + } + } + +} + diff --git a/service-rpc/src/gen/thrift/gen-javabean/org/apache/hive/service/rpc/thrift/TGetPrimaryKeysReq.java b/service-rpc/src/gen/thrift/gen-javabean/org/apache/hive/service/rpc/thrift/TGetPrimaryKeysReq.java new file mode 100644 index 0000000..1bec9b5 --- /dev/null +++ b/service-rpc/src/gen/thrift/gen-javabean/org/apache/hive/service/rpc/thrift/TGetPrimaryKeysReq.java @@ -0,0 +1,716 @@ +/** + * Autogenerated by Thrift Compiler (0.9.3) + * + * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING + * @generated + */ +package org.apache.hive.service.rpc.thrift; + +import org.apache.thrift.scheme.IScheme; +import org.apache.thrift.scheme.SchemeFactory; +import org.apache.thrift.scheme.StandardScheme; + +import org.apache.thrift.scheme.TupleScheme; +import org.apache.thrift.protocol.TTupleProtocol; +import org.apache.thrift.protocol.TProtocolException; +import org.apache.thrift.EncodingUtils; +import org.apache.thrift.TException; +import org.apache.thrift.async.AsyncMethodCallback; +import org.apache.thrift.server.AbstractNonblockingServer.*; +import java.util.List; +import java.util.ArrayList; +import java.util.Map; +import java.util.HashMap; +import java.util.EnumMap; +import java.util.Set; +import java.util.HashSet; +import java.util.EnumSet; +import java.util.Collections; +import java.util.BitSet; +import java.nio.ByteBuffer; +import java.util.Arrays; +import javax.annotation.Generated; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +@SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"}) +@Generated(value = "Autogenerated by Thrift Compiler (0.9.3)") +public class TGetPrimaryKeysReq implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { + private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("TGetPrimaryKeysReq"); + + private static final org.apache.thrift.protocol.TField SESSION_HANDLE_FIELD_DESC = new org.apache.thrift.protocol.TField("sessionHandle", org.apache.thrift.protocol.TType.STRUCT, (short)1); + private static final org.apache.thrift.protocol.TField CATALOG_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("catalogName", org.apache.thrift.protocol.TType.STRING, (short)2); + private static final org.apache.thrift.protocol.TField SCHEMA_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("schemaName", org.apache.thrift.protocol.TType.STRING, (short)3); + private static final org.apache.thrift.protocol.TField TABLE_NAME_FIELD_DESC = new org.apache.thrift.protocol.TField("tableName", org.apache.thrift.protocol.TType.STRING, (short)4); + + private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); + static { + schemes.put(StandardScheme.class, new TGetPrimaryKeysReqStandardSchemeFactory()); + schemes.put(TupleScheme.class, new TGetPrimaryKeysReqTupleSchemeFactory()); + } + + private TSessionHandle sessionHandle; // required + private String catalogName; // optional + private String schemaName; // optional + private String tableName; // optional + + /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ + public enum _Fields implements org.apache.thrift.TFieldIdEnum { + SESSION_HANDLE((short)1, "sessionHandle"), + CATALOG_NAME((short)2, "catalogName"), + SCHEMA_NAME((short)3, "schemaName"), + TABLE_NAME((short)4, "tableName"); + + private static final Map byName = new HashMap(); + + static { + for (_Fields field : EnumSet.allOf(_Fields.class)) { + byName.put(field.getFieldName(), field); + } + } + + /** + * Find the _Fields constant that matches fieldId, or null if its not found. + */ + public static _Fields findByThriftId(int fieldId) { + switch(fieldId) { + case 1: // SESSION_HANDLE + return SESSION_HANDLE; + case 2: // CATALOG_NAME + return CATALOG_NAME; + case 3: // SCHEMA_NAME + return SCHEMA_NAME; + case 4: // TABLE_NAME + return TABLE_NAME; + default: + return null; + } + } + + /** + * Find the _Fields constant that matches fieldId, throwing an exception + * if it is not found. + */ + public static _Fields findByThriftIdOrThrow(int fieldId) { + _Fields fields = findByThriftId(fieldId); + if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!"); + return fields; + } + + /** + * Find the _Fields constant that matches name, or null if its not found. + */ + public static _Fields findByName(String name) { + return byName.get(name); + } + + private final short _thriftId; + private final String _fieldName; + + _Fields(short thriftId, String fieldName) { + _thriftId = thriftId; + _fieldName = fieldName; + } + + public short getThriftFieldId() { + return _thriftId; + } + + public String getFieldName() { + return _fieldName; + } + } + + // isset id assignments + private static final _Fields optionals[] = {_Fields.CATALOG_NAME,_Fields.SCHEMA_NAME,_Fields.TABLE_NAME}; + public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; + static { + Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); + tmpMap.put(_Fields.SESSION_HANDLE, new org.apache.thrift.meta_data.FieldMetaData("sessionHandle", org.apache.thrift.TFieldRequirementType.REQUIRED, + new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, TSessionHandle.class))); + tmpMap.put(_Fields.CATALOG_NAME, new org.apache.thrift.meta_data.FieldMetaData("catalogName", org.apache.thrift.TFieldRequirementType.OPTIONAL, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING , "TIdentifier"))); + tmpMap.put(_Fields.SCHEMA_NAME, new org.apache.thrift.meta_data.FieldMetaData("schemaName", org.apache.thrift.TFieldRequirementType.OPTIONAL, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING , "TIdentifier"))); + tmpMap.put(_Fields.TABLE_NAME, new org.apache.thrift.meta_data.FieldMetaData("tableName", org.apache.thrift.TFieldRequirementType.OPTIONAL, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING , "TIdentifier"))); + metaDataMap = Collections.unmodifiableMap(tmpMap); + org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(TGetPrimaryKeysReq.class, metaDataMap); + } + + public TGetPrimaryKeysReq() { + } + + public TGetPrimaryKeysReq( + TSessionHandle sessionHandle) + { + this(); + this.sessionHandle = sessionHandle; + } + + /** + * Performs a deep copy on other. + */ + public TGetPrimaryKeysReq(TGetPrimaryKeysReq other) { + if (other.isSetSessionHandle()) { + this.sessionHandle = new TSessionHandle(other.sessionHandle); + } + if (other.isSetCatalogName()) { + this.catalogName = other.catalogName; + } + if (other.isSetSchemaName()) { + this.schemaName = other.schemaName; + } + if (other.isSetTableName()) { + this.tableName = other.tableName; + } + } + + public TGetPrimaryKeysReq deepCopy() { + return new TGetPrimaryKeysReq(this); + } + + @Override + public void clear() { + this.sessionHandle = null; + this.catalogName = null; + this.schemaName = null; + this.tableName = null; + } + + public TSessionHandle getSessionHandle() { + return this.sessionHandle; + } + + public void setSessionHandle(TSessionHandle sessionHandle) { + this.sessionHandle = sessionHandle; + } + + public void unsetSessionHandle() { + this.sessionHandle = null; + } + + /** Returns true if field sessionHandle is set (has been assigned a value) and false otherwise */ + public boolean isSetSessionHandle() { + return this.sessionHandle != null; + } + + public void setSessionHandleIsSet(boolean value) { + if (!value) { + this.sessionHandle = null; + } + } + + public String getCatalogName() { + return this.catalogName; + } + + public void setCatalogName(String catalogName) { + this.catalogName = catalogName; + } + + public void unsetCatalogName() { + this.catalogName = null; + } + + /** Returns true if field catalogName is set (has been assigned a value) and false otherwise */ + public boolean isSetCatalogName() { + return this.catalogName != null; + } + + public void setCatalogNameIsSet(boolean value) { + if (!value) { + this.catalogName = null; + } + } + + public String getSchemaName() { + return this.schemaName; + } + + public void setSchemaName(String schemaName) { + this.schemaName = schemaName; + } + + public void unsetSchemaName() { + this.schemaName = null; + } + + /** Returns true if field schemaName is set (has been assigned a value) and false otherwise */ + public boolean isSetSchemaName() { + return this.schemaName != null; + } + + public void setSchemaNameIsSet(boolean value) { + if (!value) { + this.schemaName = null; + } + } + + public String getTableName() { + return this.tableName; + } + + public void setTableName(String tableName) { + this.tableName = tableName; + } + + public void unsetTableName() { + this.tableName = null; + } + + /** Returns true if field tableName is set (has been assigned a value) and false otherwise */ + public boolean isSetTableName() { + return this.tableName != null; + } + + public void setTableNameIsSet(boolean value) { + if (!value) { + this.tableName = null; + } + } + + public void setFieldValue(_Fields field, Object value) { + switch (field) { + case SESSION_HANDLE: + if (value == null) { + unsetSessionHandle(); + } else { + setSessionHandle((TSessionHandle)value); + } + break; + + case CATALOG_NAME: + if (value == null) { + unsetCatalogName(); + } else { + setCatalogName((String)value); + } + break; + + case SCHEMA_NAME: + if (value == null) { + unsetSchemaName(); + } else { + setSchemaName((String)value); + } + break; + + case TABLE_NAME: + if (value == null) { + unsetTableName(); + } else { + setTableName((String)value); + } + break; + + } + } + + public Object getFieldValue(_Fields field) { + switch (field) { + case SESSION_HANDLE: + return getSessionHandle(); + + case CATALOG_NAME: + return getCatalogName(); + + case SCHEMA_NAME: + return getSchemaName(); + + case TABLE_NAME: + return getTableName(); + + } + throw new IllegalStateException(); + } + + /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */ + public boolean isSet(_Fields field) { + if (field == null) { + throw new IllegalArgumentException(); + } + + switch (field) { + case SESSION_HANDLE: + return isSetSessionHandle(); + case CATALOG_NAME: + return isSetCatalogName(); + case SCHEMA_NAME: + return isSetSchemaName(); + case TABLE_NAME: + return isSetTableName(); + } + throw new IllegalStateException(); + } + + @Override + public boolean equals(Object that) { + if (that == null) + return false; + if (that instanceof TGetPrimaryKeysReq) + return this.equals((TGetPrimaryKeysReq)that); + return false; + } + + public boolean equals(TGetPrimaryKeysReq that) { + if (that == null) + return false; + + boolean this_present_sessionHandle = true && this.isSetSessionHandle(); + boolean that_present_sessionHandle = true && that.isSetSessionHandle(); + if (this_present_sessionHandle || that_present_sessionHandle) { + if (!(this_present_sessionHandle && that_present_sessionHandle)) + return false; + if (!this.sessionHandle.equals(that.sessionHandle)) + return false; + } + + boolean this_present_catalogName = true && this.isSetCatalogName(); + boolean that_present_catalogName = true && that.isSetCatalogName(); + if (this_present_catalogName || that_present_catalogName) { + if (!(this_present_catalogName && that_present_catalogName)) + return false; + if (!this.catalogName.equals(that.catalogName)) + return false; + } + + boolean this_present_schemaName = true && this.isSetSchemaName(); + boolean that_present_schemaName = true && that.isSetSchemaName(); + if (this_present_schemaName || that_present_schemaName) { + if (!(this_present_schemaName && that_present_schemaName)) + return false; + if (!this.schemaName.equals(that.schemaName)) + return false; + } + + boolean this_present_tableName = true && this.isSetTableName(); + boolean that_present_tableName = true && that.isSetTableName(); + if (this_present_tableName || that_present_tableName) { + if (!(this_present_tableName && that_present_tableName)) + return false; + if (!this.tableName.equals(that.tableName)) + return false; + } + + return true; + } + + @Override + public int hashCode() { + List list = new ArrayList(); + + boolean present_sessionHandle = true && (isSetSessionHandle()); + list.add(present_sessionHandle); + if (present_sessionHandle) + list.add(sessionHandle); + + boolean present_catalogName = true && (isSetCatalogName()); + list.add(present_catalogName); + if (present_catalogName) + list.add(catalogName); + + boolean present_schemaName = true && (isSetSchemaName()); + list.add(present_schemaName); + if (present_schemaName) + list.add(schemaName); + + boolean present_tableName = true && (isSetTableName()); + list.add(present_tableName); + if (present_tableName) + list.add(tableName); + + return list.hashCode(); + } + + @Override + public int compareTo(TGetPrimaryKeysReq other) { + if (!getClass().equals(other.getClass())) { + return getClass().getName().compareTo(other.getClass().getName()); + } + + int lastComparison = 0; + + lastComparison = Boolean.valueOf(isSetSessionHandle()).compareTo(other.isSetSessionHandle()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetSessionHandle()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.sessionHandle, other.sessionHandle); + if (lastComparison != 0) { + return lastComparison; + } + } + lastComparison = Boolean.valueOf(isSetCatalogName()).compareTo(other.isSetCatalogName()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetCatalogName()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.catalogName, other.catalogName); + if (lastComparison != 0) { + return lastComparison; + } + } + lastComparison = Boolean.valueOf(isSetSchemaName()).compareTo(other.isSetSchemaName()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetSchemaName()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.schemaName, other.schemaName); + if (lastComparison != 0) { + return lastComparison; + } + } + lastComparison = Boolean.valueOf(isSetTableName()).compareTo(other.isSetTableName()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetTableName()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.tableName, other.tableName); + if (lastComparison != 0) { + return lastComparison; + } + } + return 0; + } + + public _Fields fieldForId(int fieldId) { + return _Fields.findByThriftId(fieldId); + } + + public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException { + schemes.get(iprot.getScheme()).getScheme().read(iprot, this); + } + + public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException { + schemes.get(oprot.getScheme()).getScheme().write(oprot, this); + } + + @Override + public String toString() { + StringBuilder sb = new StringBuilder("TGetPrimaryKeysReq("); + boolean first = true; + + sb.append("sessionHandle:"); + if (this.sessionHandle == null) { + sb.append("null"); + } else { + sb.append(this.sessionHandle); + } + first = false; + if (isSetCatalogName()) { + if (!first) sb.append(", "); + sb.append("catalogName:"); + if (this.catalogName == null) { + sb.append("null"); + } else { + sb.append(this.catalogName); + } + first = false; + } + if (isSetSchemaName()) { + if (!first) sb.append(", "); + sb.append("schemaName:"); + if (this.schemaName == null) { + sb.append("null"); + } else { + sb.append(this.schemaName); + } + first = false; + } + if (isSetTableName()) { + if (!first) sb.append(", "); + sb.append("tableName:"); + if (this.tableName == null) { + sb.append("null"); + } else { + sb.append(this.tableName); + } + first = false; + } + sb.append(")"); + return sb.toString(); + } + + public void validate() throws org.apache.thrift.TException { + // check for required fields + if (!isSetSessionHandle()) { + throw new org.apache.thrift.protocol.TProtocolException("Required field 'sessionHandle' is unset! Struct:" + toString()); + } + + // check for sub-struct validity + if (sessionHandle != null) { + sessionHandle.validate(); + } + } + + private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { + try { + write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out))); + } catch (org.apache.thrift.TException te) { + throw new java.io.IOException(te); + } + } + + private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { + try { + read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); + } catch (org.apache.thrift.TException te) { + throw new java.io.IOException(te); + } + } + + private static class TGetPrimaryKeysReqStandardSchemeFactory implements SchemeFactory { + public TGetPrimaryKeysReqStandardScheme getScheme() { + return new TGetPrimaryKeysReqStandardScheme(); + } + } + + private static class TGetPrimaryKeysReqStandardScheme extends StandardScheme { + + public void read(org.apache.thrift.protocol.TProtocol iprot, TGetPrimaryKeysReq struct) throws org.apache.thrift.TException { + org.apache.thrift.protocol.TField schemeField; + iprot.readStructBegin(); + while (true) + { + schemeField = iprot.readFieldBegin(); + if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { + break; + } + switch (schemeField.id) { + case 1: // SESSION_HANDLE + if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { + struct.sessionHandle = new TSessionHandle(); + struct.sessionHandle.read(iprot); + struct.setSessionHandleIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; + case 2: // CATALOG_NAME + if (schemeField.type == org.apache.thrift.protocol.TType.STRING) { + struct.catalogName = iprot.readString(); + struct.setCatalogNameIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; + case 3: // SCHEMA_NAME + if (schemeField.type == org.apache.thrift.protocol.TType.STRING) { + struct.schemaName = iprot.readString(); + struct.setSchemaNameIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; + case 4: // TABLE_NAME + if (schemeField.type == org.apache.thrift.protocol.TType.STRING) { + struct.tableName = iprot.readString(); + struct.setTableNameIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; + default: + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + iprot.readFieldEnd(); + } + iprot.readStructEnd(); + struct.validate(); + } + + public void write(org.apache.thrift.protocol.TProtocol oprot, TGetPrimaryKeysReq struct) throws org.apache.thrift.TException { + struct.validate(); + + oprot.writeStructBegin(STRUCT_DESC); + if (struct.sessionHandle != null) { + oprot.writeFieldBegin(SESSION_HANDLE_FIELD_DESC); + struct.sessionHandle.write(oprot); + oprot.writeFieldEnd(); + } + if (struct.catalogName != null) { + if (struct.isSetCatalogName()) { + oprot.writeFieldBegin(CATALOG_NAME_FIELD_DESC); + oprot.writeString(struct.catalogName); + oprot.writeFieldEnd(); + } + } + if (struct.schemaName != null) { + if (struct.isSetSchemaName()) { + oprot.writeFieldBegin(SCHEMA_NAME_FIELD_DESC); + oprot.writeString(struct.schemaName); + oprot.writeFieldEnd(); + } + } + if (struct.tableName != null) { + if (struct.isSetTableName()) { + oprot.writeFieldBegin(TABLE_NAME_FIELD_DESC); + oprot.writeString(struct.tableName); + oprot.writeFieldEnd(); + } + } + oprot.writeFieldStop(); + oprot.writeStructEnd(); + } + + } + + private static class TGetPrimaryKeysReqTupleSchemeFactory implements SchemeFactory { + public TGetPrimaryKeysReqTupleScheme getScheme() { + return new TGetPrimaryKeysReqTupleScheme(); + } + } + + private static class TGetPrimaryKeysReqTupleScheme extends TupleScheme { + + @Override + public void write(org.apache.thrift.protocol.TProtocol prot, TGetPrimaryKeysReq struct) throws org.apache.thrift.TException { + TTupleProtocol oprot = (TTupleProtocol) prot; + struct.sessionHandle.write(oprot); + BitSet optionals = new BitSet(); + if (struct.isSetCatalogName()) { + optionals.set(0); + } + if (struct.isSetSchemaName()) { + optionals.set(1); + } + if (struct.isSetTableName()) { + optionals.set(2); + } + oprot.writeBitSet(optionals, 3); + if (struct.isSetCatalogName()) { + oprot.writeString(struct.catalogName); + } + if (struct.isSetSchemaName()) { + oprot.writeString(struct.schemaName); + } + if (struct.isSetTableName()) { + oprot.writeString(struct.tableName); + } + } + + @Override + public void read(org.apache.thrift.protocol.TProtocol prot, TGetPrimaryKeysReq struct) throws org.apache.thrift.TException { + TTupleProtocol iprot = (TTupleProtocol) prot; + struct.sessionHandle = new TSessionHandle(); + struct.sessionHandle.read(iprot); + struct.setSessionHandleIsSet(true); + BitSet incoming = iprot.readBitSet(3); + if (incoming.get(0)) { + struct.catalogName = iprot.readString(); + struct.setCatalogNameIsSet(true); + } + if (incoming.get(1)) { + struct.schemaName = iprot.readString(); + struct.setSchemaNameIsSet(true); + } + if (incoming.get(2)) { + struct.tableName = iprot.readString(); + struct.setTableNameIsSet(true); + } + } + } + +} + diff --git a/service-rpc/src/gen/thrift/gen-javabean/org/apache/hive/service/rpc/thrift/TGetPrimaryKeysResp.java b/service-rpc/src/gen/thrift/gen-javabean/org/apache/hive/service/rpc/thrift/TGetPrimaryKeysResp.java new file mode 100644 index 0000000..72d9507 --- /dev/null +++ b/service-rpc/src/gen/thrift/gen-javabean/org/apache/hive/service/rpc/thrift/TGetPrimaryKeysResp.java @@ -0,0 +1,509 @@ +/** + * Autogenerated by Thrift Compiler (0.9.3) + * + * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING + * @generated + */ +package org.apache.hive.service.rpc.thrift; + +import org.apache.thrift.scheme.IScheme; +import org.apache.thrift.scheme.SchemeFactory; +import org.apache.thrift.scheme.StandardScheme; + +import org.apache.thrift.scheme.TupleScheme; +import org.apache.thrift.protocol.TTupleProtocol; +import org.apache.thrift.protocol.TProtocolException; +import org.apache.thrift.EncodingUtils; +import org.apache.thrift.TException; +import org.apache.thrift.async.AsyncMethodCallback; +import org.apache.thrift.server.AbstractNonblockingServer.*; +import java.util.List; +import java.util.ArrayList; +import java.util.Map; +import java.util.HashMap; +import java.util.EnumMap; +import java.util.Set; +import java.util.HashSet; +import java.util.EnumSet; +import java.util.Collections; +import java.util.BitSet; +import java.nio.ByteBuffer; +import java.util.Arrays; +import javax.annotation.Generated; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +@SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"}) +@Generated(value = "Autogenerated by Thrift Compiler (0.9.3)") +public class TGetPrimaryKeysResp implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { + private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("TGetPrimaryKeysResp"); + + private static final org.apache.thrift.protocol.TField STATUS_FIELD_DESC = new org.apache.thrift.protocol.TField("status", org.apache.thrift.protocol.TType.STRUCT, (short)1); + private static final org.apache.thrift.protocol.TField OPERATION_HANDLE_FIELD_DESC = new org.apache.thrift.protocol.TField("operationHandle", org.apache.thrift.protocol.TType.STRUCT, (short)2); + + private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); + static { + schemes.put(StandardScheme.class, new TGetPrimaryKeysRespStandardSchemeFactory()); + schemes.put(TupleScheme.class, new TGetPrimaryKeysRespTupleSchemeFactory()); + } + + private TStatus status; // required + private TOperationHandle operationHandle; // optional + + /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ + public enum _Fields implements org.apache.thrift.TFieldIdEnum { + STATUS((short)1, "status"), + OPERATION_HANDLE((short)2, "operationHandle"); + + private static final Map byName = new HashMap(); + + static { + for (_Fields field : EnumSet.allOf(_Fields.class)) { + byName.put(field.getFieldName(), field); + } + } + + /** + * Find the _Fields constant that matches fieldId, or null if its not found. + */ + public static _Fields findByThriftId(int fieldId) { + switch(fieldId) { + case 1: // STATUS + return STATUS; + case 2: // OPERATION_HANDLE + return OPERATION_HANDLE; + default: + return null; + } + } + + /** + * Find the _Fields constant that matches fieldId, throwing an exception + * if it is not found. + */ + public static _Fields findByThriftIdOrThrow(int fieldId) { + _Fields fields = findByThriftId(fieldId); + if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!"); + return fields; + } + + /** + * Find the _Fields constant that matches name, or null if its not found. + */ + public static _Fields findByName(String name) { + return byName.get(name); + } + + private final short _thriftId; + private final String _fieldName; + + _Fields(short thriftId, String fieldName) { + _thriftId = thriftId; + _fieldName = fieldName; + } + + public short getThriftFieldId() { + return _thriftId; + } + + public String getFieldName() { + return _fieldName; + } + } + + // isset id assignments + private static final _Fields optionals[] = {_Fields.OPERATION_HANDLE}; + public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; + static { + Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); + tmpMap.put(_Fields.STATUS, new org.apache.thrift.meta_data.FieldMetaData("status", org.apache.thrift.TFieldRequirementType.REQUIRED, + new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, TStatus.class))); + tmpMap.put(_Fields.OPERATION_HANDLE, new org.apache.thrift.meta_data.FieldMetaData("operationHandle", org.apache.thrift.TFieldRequirementType.OPTIONAL, + new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, TOperationHandle.class))); + metaDataMap = Collections.unmodifiableMap(tmpMap); + org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(TGetPrimaryKeysResp.class, metaDataMap); + } + + public TGetPrimaryKeysResp() { + } + + public TGetPrimaryKeysResp( + TStatus status) + { + this(); + this.status = status; + } + + /** + * Performs a deep copy on other. + */ + public TGetPrimaryKeysResp(TGetPrimaryKeysResp other) { + if (other.isSetStatus()) { + this.status = new TStatus(other.status); + } + if (other.isSetOperationHandle()) { + this.operationHandle = new TOperationHandle(other.operationHandle); + } + } + + public TGetPrimaryKeysResp deepCopy() { + return new TGetPrimaryKeysResp(this); + } + + @Override + public void clear() { + this.status = null; + this.operationHandle = null; + } + + public TStatus getStatus() { + return this.status; + } + + public void setStatus(TStatus status) { + this.status = status; + } + + public void unsetStatus() { + this.status = null; + } + + /** Returns true if field status is set (has been assigned a value) and false otherwise */ + public boolean isSetStatus() { + return this.status != null; + } + + public void setStatusIsSet(boolean value) { + if (!value) { + this.status = null; + } + } + + public TOperationHandle getOperationHandle() { + return this.operationHandle; + } + + public void setOperationHandle(TOperationHandle operationHandle) { + this.operationHandle = operationHandle; + } + + public void unsetOperationHandle() { + this.operationHandle = null; + } + + /** Returns true if field operationHandle is set (has been assigned a value) and false otherwise */ + public boolean isSetOperationHandle() { + return this.operationHandle != null; + } + + public void setOperationHandleIsSet(boolean value) { + if (!value) { + this.operationHandle = null; + } + } + + public void setFieldValue(_Fields field, Object value) { + switch (field) { + case STATUS: + if (value == null) { + unsetStatus(); + } else { + setStatus((TStatus)value); + } + break; + + case OPERATION_HANDLE: + if (value == null) { + unsetOperationHandle(); + } else { + setOperationHandle((TOperationHandle)value); + } + break; + + } + } + + public Object getFieldValue(_Fields field) { + switch (field) { + case STATUS: + return getStatus(); + + case OPERATION_HANDLE: + return getOperationHandle(); + + } + throw new IllegalStateException(); + } + + /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */ + public boolean isSet(_Fields field) { + if (field == null) { + throw new IllegalArgumentException(); + } + + switch (field) { + case STATUS: + return isSetStatus(); + case OPERATION_HANDLE: + return isSetOperationHandle(); + } + throw new IllegalStateException(); + } + + @Override + public boolean equals(Object that) { + if (that == null) + return false; + if (that instanceof TGetPrimaryKeysResp) + return this.equals((TGetPrimaryKeysResp)that); + return false; + } + + public boolean equals(TGetPrimaryKeysResp that) { + if (that == null) + return false; + + boolean this_present_status = true && this.isSetStatus(); + boolean that_present_status = true && that.isSetStatus(); + if (this_present_status || that_present_status) { + if (!(this_present_status && that_present_status)) + return false; + if (!this.status.equals(that.status)) + return false; + } + + boolean this_present_operationHandle = true && this.isSetOperationHandle(); + boolean that_present_operationHandle = true && that.isSetOperationHandle(); + if (this_present_operationHandle || that_present_operationHandle) { + if (!(this_present_operationHandle && that_present_operationHandle)) + return false; + if (!this.operationHandle.equals(that.operationHandle)) + return false; + } + + return true; + } + + @Override + public int hashCode() { + List list = new ArrayList(); + + boolean present_status = true && (isSetStatus()); + list.add(present_status); + if (present_status) + list.add(status); + + boolean present_operationHandle = true && (isSetOperationHandle()); + list.add(present_operationHandle); + if (present_operationHandle) + list.add(operationHandle); + + return list.hashCode(); + } + + @Override + public int compareTo(TGetPrimaryKeysResp other) { + if (!getClass().equals(other.getClass())) { + return getClass().getName().compareTo(other.getClass().getName()); + } + + int lastComparison = 0; + + lastComparison = Boolean.valueOf(isSetStatus()).compareTo(other.isSetStatus()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetStatus()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.status, other.status); + if (lastComparison != 0) { + return lastComparison; + } + } + lastComparison = Boolean.valueOf(isSetOperationHandle()).compareTo(other.isSetOperationHandle()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetOperationHandle()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.operationHandle, other.operationHandle); + if (lastComparison != 0) { + return lastComparison; + } + } + return 0; + } + + public _Fields fieldForId(int fieldId) { + return _Fields.findByThriftId(fieldId); + } + + public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException { + schemes.get(iprot.getScheme()).getScheme().read(iprot, this); + } + + public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException { + schemes.get(oprot.getScheme()).getScheme().write(oprot, this); + } + + @Override + public String toString() { + StringBuilder sb = new StringBuilder("TGetPrimaryKeysResp("); + boolean first = true; + + sb.append("status:"); + if (this.status == null) { + sb.append("null"); + } else { + sb.append(this.status); + } + first = false; + if (isSetOperationHandle()) { + if (!first) sb.append(", "); + sb.append("operationHandle:"); + if (this.operationHandle == null) { + sb.append("null"); + } else { + sb.append(this.operationHandle); + } + first = false; + } + sb.append(")"); + return sb.toString(); + } + + public void validate() throws org.apache.thrift.TException { + // check for required fields + if (!isSetStatus()) { + throw new org.apache.thrift.protocol.TProtocolException("Required field 'status' is unset! Struct:" + toString()); + } + + // check for sub-struct validity + if (status != null) { + status.validate(); + } + if (operationHandle != null) { + operationHandle.validate(); + } + } + + private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { + try { + write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out))); + } catch (org.apache.thrift.TException te) { + throw new java.io.IOException(te); + } + } + + private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { + try { + read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); + } catch (org.apache.thrift.TException te) { + throw new java.io.IOException(te); + } + } + + private static class TGetPrimaryKeysRespStandardSchemeFactory implements SchemeFactory { + public TGetPrimaryKeysRespStandardScheme getScheme() { + return new TGetPrimaryKeysRespStandardScheme(); + } + } + + private static class TGetPrimaryKeysRespStandardScheme extends StandardScheme { + + public void read(org.apache.thrift.protocol.TProtocol iprot, TGetPrimaryKeysResp struct) throws org.apache.thrift.TException { + org.apache.thrift.protocol.TField schemeField; + iprot.readStructBegin(); + while (true) + { + schemeField = iprot.readFieldBegin(); + if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { + break; + } + switch (schemeField.id) { + case 1: // STATUS + if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { + struct.status = new TStatus(); + struct.status.read(iprot); + struct.setStatusIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; + case 2: // OPERATION_HANDLE + if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { + struct.operationHandle = new TOperationHandle(); + struct.operationHandle.read(iprot); + struct.setOperationHandleIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; + default: + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + iprot.readFieldEnd(); + } + iprot.readStructEnd(); + struct.validate(); + } + + public void write(org.apache.thrift.protocol.TProtocol oprot, TGetPrimaryKeysResp struct) throws org.apache.thrift.TException { + struct.validate(); + + oprot.writeStructBegin(STRUCT_DESC); + if (struct.status != null) { + oprot.writeFieldBegin(STATUS_FIELD_DESC); + struct.status.write(oprot); + oprot.writeFieldEnd(); + } + if (struct.operationHandle != null) { + if (struct.isSetOperationHandle()) { + oprot.writeFieldBegin(OPERATION_HANDLE_FIELD_DESC); + struct.operationHandle.write(oprot); + oprot.writeFieldEnd(); + } + } + oprot.writeFieldStop(); + oprot.writeStructEnd(); + } + + } + + private static class TGetPrimaryKeysRespTupleSchemeFactory implements SchemeFactory { + public TGetPrimaryKeysRespTupleScheme getScheme() { + return new TGetPrimaryKeysRespTupleScheme(); + } + } + + private static class TGetPrimaryKeysRespTupleScheme extends TupleScheme { + + @Override + public void write(org.apache.thrift.protocol.TProtocol prot, TGetPrimaryKeysResp struct) throws org.apache.thrift.TException { + TTupleProtocol oprot = (TTupleProtocol) prot; + struct.status.write(oprot); + BitSet optionals = new BitSet(); + if (struct.isSetOperationHandle()) { + optionals.set(0); + } + oprot.writeBitSet(optionals, 1); + if (struct.isSetOperationHandle()) { + struct.operationHandle.write(oprot); + } + } + + @Override + public void read(org.apache.thrift.protocol.TProtocol prot, TGetPrimaryKeysResp struct) throws org.apache.thrift.TException { + TTupleProtocol iprot = (TTupleProtocol) prot; + struct.status = new TStatus(); + struct.status.read(iprot); + struct.setStatusIsSet(true); + BitSet incoming = iprot.readBitSet(1); + if (incoming.get(0)) { + struct.operationHandle = new TOperationHandle(); + struct.operationHandle.read(iprot); + struct.setOperationHandleIsSet(true); + } + } + } + +} + diff --git a/service-rpc/src/gen/thrift/gen-php/TCLIService.php b/service-rpc/src/gen/thrift/gen-php/TCLIService.php index eba62f1..d283145 100644 --- a/service-rpc/src/gen/thrift/gen-php/TCLIService.php +++ b/service-rpc/src/gen/thrift/gen-php/TCLIService.php @@ -72,6 +72,16 @@ interface TCLIServiceIf { */ public function GetFunctions(\TGetFunctionsReq $req); /** + * @param \TGetPrimaryKeysReq $req + * @return \TGetPrimaryKeysResp + */ + public function GetPrimaryKeys(\TGetPrimaryKeysReq $req); + /** + * @param \TGetCrossReferenceReq $req + * @return \TGetCrossReferenceResp + */ + public function GetCrossReference(\TGetCrossReferenceReq $req); + /** * @param \TGetOperationStatusReq $req * @return \TGetOperationStatusResp */ @@ -685,6 +695,108 @@ class TCLIServiceClient implements \TCLIServiceIf { throw new \Exception("GetFunctions failed: unknown result"); } + public function GetPrimaryKeys(\TGetPrimaryKeysReq $req) + { + $this->send_GetPrimaryKeys($req); + return $this->recv_GetPrimaryKeys(); + } + + public function send_GetPrimaryKeys(\TGetPrimaryKeysReq $req) + { + $args = new \TCLIService_GetPrimaryKeys_args(); + $args->req = $req; + $bin_accel = ($this->output_ instanceof TBinaryProtocolAccelerated) && function_exists('thrift_protocol_write_binary'); + if ($bin_accel) + { + thrift_protocol_write_binary($this->output_, 'GetPrimaryKeys', TMessageType::CALL, $args, $this->seqid_, $this->output_->isStrictWrite()); + } + else + { + $this->output_->writeMessageBegin('GetPrimaryKeys', TMessageType::CALL, $this->seqid_); + $args->write($this->output_); + $this->output_->writeMessageEnd(); + $this->output_->getTransport()->flush(); + } + } + + public function recv_GetPrimaryKeys() + { + $bin_accel = ($this->input_ instanceof TBinaryProtocolAccelerated) && function_exists('thrift_protocol_read_binary'); + if ($bin_accel) $result = thrift_protocol_read_binary($this->input_, '\TCLIService_GetPrimaryKeys_result', $this->input_->isStrictRead()); + else + { + $rseqid = 0; + $fname = null; + $mtype = 0; + + $this->input_->readMessageBegin($fname, $mtype, $rseqid); + if ($mtype == TMessageType::EXCEPTION) { + $x = new TApplicationException(); + $x->read($this->input_); + $this->input_->readMessageEnd(); + throw $x; + } + $result = new \TCLIService_GetPrimaryKeys_result(); + $result->read($this->input_); + $this->input_->readMessageEnd(); + } + if ($result->success !== null) { + return $result->success; + } + throw new \Exception("GetPrimaryKeys failed: unknown result"); + } + + public function GetCrossReference(\TGetCrossReferenceReq $req) + { + $this->send_GetCrossReference($req); + return $this->recv_GetCrossReference(); + } + + public function send_GetCrossReference(\TGetCrossReferenceReq $req) + { + $args = new \TCLIService_GetCrossReference_args(); + $args->req = $req; + $bin_accel = ($this->output_ instanceof TBinaryProtocolAccelerated) && function_exists('thrift_protocol_write_binary'); + if ($bin_accel) + { + thrift_protocol_write_binary($this->output_, 'GetCrossReference', TMessageType::CALL, $args, $this->seqid_, $this->output_->isStrictWrite()); + } + else + { + $this->output_->writeMessageBegin('GetCrossReference', TMessageType::CALL, $this->seqid_); + $args->write($this->output_); + $this->output_->writeMessageEnd(); + $this->output_->getTransport()->flush(); + } + } + + public function recv_GetCrossReference() + { + $bin_accel = ($this->input_ instanceof TBinaryProtocolAccelerated) && function_exists('thrift_protocol_read_binary'); + if ($bin_accel) $result = thrift_protocol_read_binary($this->input_, '\TCLIService_GetCrossReference_result', $this->input_->isStrictRead()); + else + { + $rseqid = 0; + $fname = null; + $mtype = 0; + + $this->input_->readMessageBegin($fname, $mtype, $rseqid); + if ($mtype == TMessageType::EXCEPTION) { + $x = new TApplicationException(); + $x->read($this->input_); + $this->input_->readMessageEnd(); + throw $x; + } + $result = new \TCLIService_GetCrossReference_result(); + $result->read($this->input_); + $this->input_->readMessageEnd(); + } + if ($result->success !== null) { + return $result->success; + } + throw new \Exception("GetCrossReference failed: unknown result"); + } + public function GetOperationStatus(\TGetOperationStatusReq $req) { $this->send_GetOperationStatus($req); @@ -2857,6 +2969,326 @@ class TCLIService_GetFunctions_result { } +class TCLIService_GetPrimaryKeys_args { + static $_TSPEC; + + /** + * @var \TGetPrimaryKeysReq + */ + public $req = null; + + public function __construct($vals=null) { + if (!isset(self::$_TSPEC)) { + self::$_TSPEC = array( + 1 => array( + 'var' => 'req', + 'type' => TType::STRUCT, + 'class' => '\TGetPrimaryKeysReq', + ), + ); + } + if (is_array($vals)) { + if (isset($vals['req'])) { + $this->req = $vals['req']; + } + } + } + + public function getName() { + return 'TCLIService_GetPrimaryKeys_args'; + } + + public function read($input) + { + $xfer = 0; + $fname = null; + $ftype = 0; + $fid = 0; + $xfer += $input->readStructBegin($fname); + while (true) + { + $xfer += $input->readFieldBegin($fname, $ftype, $fid); + if ($ftype == TType::STOP) { + break; + } + switch ($fid) + { + case 1: + if ($ftype == TType::STRUCT) { + $this->req = new \TGetPrimaryKeysReq(); + $xfer += $this->req->read($input); + } else { + $xfer += $input->skip($ftype); + } + break; + default: + $xfer += $input->skip($ftype); + break; + } + $xfer += $input->readFieldEnd(); + } + $xfer += $input->readStructEnd(); + return $xfer; + } + + public function write($output) { + $xfer = 0; + $xfer += $output->writeStructBegin('TCLIService_GetPrimaryKeys_args'); + if ($this->req !== null) { + if (!is_object($this->req)) { + throw new TProtocolException('Bad type in structure.', TProtocolException::INVALID_DATA); + } + $xfer += $output->writeFieldBegin('req', TType::STRUCT, 1); + $xfer += $this->req->write($output); + $xfer += $output->writeFieldEnd(); + } + $xfer += $output->writeFieldStop(); + $xfer += $output->writeStructEnd(); + return $xfer; + } + +} + +class TCLIService_GetPrimaryKeys_result { + static $_TSPEC; + + /** + * @var \TGetPrimaryKeysResp + */ + public $success = null; + + public function __construct($vals=null) { + if (!isset(self::$_TSPEC)) { + self::$_TSPEC = array( + 0 => array( + 'var' => 'success', + 'type' => TType::STRUCT, + 'class' => '\TGetPrimaryKeysResp', + ), + ); + } + if (is_array($vals)) { + if (isset($vals['success'])) { + $this->success = $vals['success']; + } + } + } + + public function getName() { + return 'TCLIService_GetPrimaryKeys_result'; + } + + public function read($input) + { + $xfer = 0; + $fname = null; + $ftype = 0; + $fid = 0; + $xfer += $input->readStructBegin($fname); + while (true) + { + $xfer += $input->readFieldBegin($fname, $ftype, $fid); + if ($ftype == TType::STOP) { + break; + } + switch ($fid) + { + case 0: + if ($ftype == TType::STRUCT) { + $this->success = new \TGetPrimaryKeysResp(); + $xfer += $this->success->read($input); + } else { + $xfer += $input->skip($ftype); + } + break; + default: + $xfer += $input->skip($ftype); + break; + } + $xfer += $input->readFieldEnd(); + } + $xfer += $input->readStructEnd(); + return $xfer; + } + + public function write($output) { + $xfer = 0; + $xfer += $output->writeStructBegin('TCLIService_GetPrimaryKeys_result'); + if ($this->success !== null) { + if (!is_object($this->success)) { + throw new TProtocolException('Bad type in structure.', TProtocolException::INVALID_DATA); + } + $xfer += $output->writeFieldBegin('success', TType::STRUCT, 0); + $xfer += $this->success->write($output); + $xfer += $output->writeFieldEnd(); + } + $xfer += $output->writeFieldStop(); + $xfer += $output->writeStructEnd(); + return $xfer; + } + +} + +class TCLIService_GetCrossReference_args { + static $_TSPEC; + + /** + * @var \TGetCrossReferenceReq + */ + public $req = null; + + public function __construct($vals=null) { + if (!isset(self::$_TSPEC)) { + self::$_TSPEC = array( + 1 => array( + 'var' => 'req', + 'type' => TType::STRUCT, + 'class' => '\TGetCrossReferenceReq', + ), + ); + } + if (is_array($vals)) { + if (isset($vals['req'])) { + $this->req = $vals['req']; + } + } + } + + public function getName() { + return 'TCLIService_GetCrossReference_args'; + } + + public function read($input) + { + $xfer = 0; + $fname = null; + $ftype = 0; + $fid = 0; + $xfer += $input->readStructBegin($fname); + while (true) + { + $xfer += $input->readFieldBegin($fname, $ftype, $fid); + if ($ftype == TType::STOP) { + break; + } + switch ($fid) + { + case 1: + if ($ftype == TType::STRUCT) { + $this->req = new \TGetCrossReferenceReq(); + $xfer += $this->req->read($input); + } else { + $xfer += $input->skip($ftype); + } + break; + default: + $xfer += $input->skip($ftype); + break; + } + $xfer += $input->readFieldEnd(); + } + $xfer += $input->readStructEnd(); + return $xfer; + } + + public function write($output) { + $xfer = 0; + $xfer += $output->writeStructBegin('TCLIService_GetCrossReference_args'); + if ($this->req !== null) { + if (!is_object($this->req)) { + throw new TProtocolException('Bad type in structure.', TProtocolException::INVALID_DATA); + } + $xfer += $output->writeFieldBegin('req', TType::STRUCT, 1); + $xfer += $this->req->write($output); + $xfer += $output->writeFieldEnd(); + } + $xfer += $output->writeFieldStop(); + $xfer += $output->writeStructEnd(); + return $xfer; + } + +} + +class TCLIService_GetCrossReference_result { + static $_TSPEC; + + /** + * @var \TGetCrossReferenceResp + */ + public $success = null; + + public function __construct($vals=null) { + if (!isset(self::$_TSPEC)) { + self::$_TSPEC = array( + 0 => array( + 'var' => 'success', + 'type' => TType::STRUCT, + 'class' => '\TGetCrossReferenceResp', + ), + ); + } + if (is_array($vals)) { + if (isset($vals['success'])) { + $this->success = $vals['success']; + } + } + } + + public function getName() { + return 'TCLIService_GetCrossReference_result'; + } + + public function read($input) + { + $xfer = 0; + $fname = null; + $ftype = 0; + $fid = 0; + $xfer += $input->readStructBegin($fname); + while (true) + { + $xfer += $input->readFieldBegin($fname, $ftype, $fid); + if ($ftype == TType::STOP) { + break; + } + switch ($fid) + { + case 0: + if ($ftype == TType::STRUCT) { + $this->success = new \TGetCrossReferenceResp(); + $xfer += $this->success->read($input); + } else { + $xfer += $input->skip($ftype); + } + break; + default: + $xfer += $input->skip($ftype); + break; + } + $xfer += $input->readFieldEnd(); + } + $xfer += $input->readStructEnd(); + return $xfer; + } + + public function write($output) { + $xfer = 0; + $xfer += $output->writeStructBegin('TCLIService_GetCrossReference_result'); + if ($this->success !== null) { + if (!is_object($this->success)) { + throw new TProtocolException('Bad type in structure.', TProtocolException::INVALID_DATA); + } + $xfer += $output->writeFieldBegin('success', TType::STRUCT, 0); + $xfer += $this->success->write($output); + $xfer += $output->writeFieldEnd(); + } + $xfer += $output->writeFieldStop(); + $xfer += $output->writeStructEnd(); + return $xfer; + } + +} + class TCLIService_GetOperationStatus_args { static $_TSPEC; diff --git a/service-rpc/src/gen/thrift/gen-php/Types.php b/service-rpc/src/gen/thrift/gen-php/Types.php index b7df50a..faea15d 100644 --- a/service-rpc/src/gen/thrift/gen-php/Types.php +++ b/service-rpc/src/gen/thrift/gen-php/Types.php @@ -7313,6 +7313,589 @@ class TGetFunctionsResp { } +class TGetPrimaryKeysReq { + static $_TSPEC; + + /** + * @var \TSessionHandle + */ + public $sessionHandle = null; + /** + * @var string + */ + public $catalogName = null; + /** + * @var string + */ + public $schemaName = null; + /** + * @var string + */ + public $tableName = null; + + public function __construct($vals=null) { + if (!isset(self::$_TSPEC)) { + self::$_TSPEC = array( + 1 => array( + 'var' => 'sessionHandle', + 'type' => TType::STRUCT, + 'class' => '\TSessionHandle', + ), + 2 => array( + 'var' => 'catalogName', + 'type' => TType::STRING, + ), + 3 => array( + 'var' => 'schemaName', + 'type' => TType::STRING, + ), + 4 => array( + 'var' => 'tableName', + 'type' => TType::STRING, + ), + ); + } + if (is_array($vals)) { + if (isset($vals['sessionHandle'])) { + $this->sessionHandle = $vals['sessionHandle']; + } + if (isset($vals['catalogName'])) { + $this->catalogName = $vals['catalogName']; + } + if (isset($vals['schemaName'])) { + $this->schemaName = $vals['schemaName']; + } + if (isset($vals['tableName'])) { + $this->tableName = $vals['tableName']; + } + } + } + + public function getName() { + return 'TGetPrimaryKeysReq'; + } + + public function read($input) + { + $xfer = 0; + $fname = null; + $ftype = 0; + $fid = 0; + $xfer += $input->readStructBegin($fname); + while (true) + { + $xfer += $input->readFieldBegin($fname, $ftype, $fid); + if ($ftype == TType::STOP) { + break; + } + switch ($fid) + { + case 1: + if ($ftype == TType::STRUCT) { + $this->sessionHandle = new \TSessionHandle(); + $xfer += $this->sessionHandle->read($input); + } else { + $xfer += $input->skip($ftype); + } + break; + case 2: + if ($ftype == TType::STRING) { + $xfer += $input->readString($this->catalogName); + } else { + $xfer += $input->skip($ftype); + } + break; + case 3: + if ($ftype == TType::STRING) { + $xfer += $input->readString($this->schemaName); + } else { + $xfer += $input->skip($ftype); + } + break; + case 4: + if ($ftype == TType::STRING) { + $xfer += $input->readString($this->tableName); + } else { + $xfer += $input->skip($ftype); + } + break; + default: + $xfer += $input->skip($ftype); + break; + } + $xfer += $input->readFieldEnd(); + } + $xfer += $input->readStructEnd(); + return $xfer; + } + + public function write($output) { + $xfer = 0; + $xfer += $output->writeStructBegin('TGetPrimaryKeysReq'); + if ($this->sessionHandle !== null) { + if (!is_object($this->sessionHandle)) { + throw new TProtocolException('Bad type in structure.', TProtocolException::INVALID_DATA); + } + $xfer += $output->writeFieldBegin('sessionHandle', TType::STRUCT, 1); + $xfer += $this->sessionHandle->write($output); + $xfer += $output->writeFieldEnd(); + } + if ($this->catalogName !== null) { + $xfer += $output->writeFieldBegin('catalogName', TType::STRING, 2); + $xfer += $output->writeString($this->catalogName); + $xfer += $output->writeFieldEnd(); + } + if ($this->schemaName !== null) { + $xfer += $output->writeFieldBegin('schemaName', TType::STRING, 3); + $xfer += $output->writeString($this->schemaName); + $xfer += $output->writeFieldEnd(); + } + if ($this->tableName !== null) { + $xfer += $output->writeFieldBegin('tableName', TType::STRING, 4); + $xfer += $output->writeString($this->tableName); + $xfer += $output->writeFieldEnd(); + } + $xfer += $output->writeFieldStop(); + $xfer += $output->writeStructEnd(); + return $xfer; + } + +} + +class TGetPrimaryKeysResp { + static $_TSPEC; + + /** + * @var \TStatus + */ + public $status = null; + /** + * @var \TOperationHandle + */ + public $operationHandle = null; + + public function __construct($vals=null) { + if (!isset(self::$_TSPEC)) { + self::$_TSPEC = array( + 1 => array( + 'var' => 'status', + 'type' => TType::STRUCT, + 'class' => '\TStatus', + ), + 2 => array( + 'var' => 'operationHandle', + 'type' => TType::STRUCT, + 'class' => '\TOperationHandle', + ), + ); + } + if (is_array($vals)) { + if (isset($vals['status'])) { + $this->status = $vals['status']; + } + if (isset($vals['operationHandle'])) { + $this->operationHandle = $vals['operationHandle']; + } + } + } + + public function getName() { + return 'TGetPrimaryKeysResp'; + } + + public function read($input) + { + $xfer = 0; + $fname = null; + $ftype = 0; + $fid = 0; + $xfer += $input->readStructBegin($fname); + while (true) + { + $xfer += $input->readFieldBegin($fname, $ftype, $fid); + if ($ftype == TType::STOP) { + break; + } + switch ($fid) + { + case 1: + if ($ftype == TType::STRUCT) { + $this->status = new \TStatus(); + $xfer += $this->status->read($input); + } else { + $xfer += $input->skip($ftype); + } + break; + case 2: + if ($ftype == TType::STRUCT) { + $this->operationHandle = new \TOperationHandle(); + $xfer += $this->operationHandle->read($input); + } else { + $xfer += $input->skip($ftype); + } + break; + default: + $xfer += $input->skip($ftype); + break; + } + $xfer += $input->readFieldEnd(); + } + $xfer += $input->readStructEnd(); + return $xfer; + } + + public function write($output) { + $xfer = 0; + $xfer += $output->writeStructBegin('TGetPrimaryKeysResp'); + if ($this->status !== null) { + if (!is_object($this->status)) { + throw new TProtocolException('Bad type in structure.', TProtocolException::INVALID_DATA); + } + $xfer += $output->writeFieldBegin('status', TType::STRUCT, 1); + $xfer += $this->status->write($output); + $xfer += $output->writeFieldEnd(); + } + if ($this->operationHandle !== null) { + if (!is_object($this->operationHandle)) { + throw new TProtocolException('Bad type in structure.', TProtocolException::INVALID_DATA); + } + $xfer += $output->writeFieldBegin('operationHandle', TType::STRUCT, 2); + $xfer += $this->operationHandle->write($output); + $xfer += $output->writeFieldEnd(); + } + $xfer += $output->writeFieldStop(); + $xfer += $output->writeStructEnd(); + return $xfer; + } + +} + +class TGetCrossReferenceReq { + static $_TSPEC; + + /** + * @var \TSessionHandle + */ + public $sessionHandle = null; + /** + * @var string + */ + public $parentCatalogName = null; + /** + * @var string + */ + public $parentSchemaName = null; + /** + * @var string + */ + public $parentTableName = null; + /** + * @var string + */ + public $foreignCatalogName = null; + /** + * @var string + */ + public $foreignSchemaName = null; + /** + * @var string + */ + public $foreignTableName = null; + + public function __construct($vals=null) { + if (!isset(self::$_TSPEC)) { + self::$_TSPEC = array( + 1 => array( + 'var' => 'sessionHandle', + 'type' => TType::STRUCT, + 'class' => '\TSessionHandle', + ), + 2 => array( + 'var' => 'parentCatalogName', + 'type' => TType::STRING, + ), + 3 => array( + 'var' => 'parentSchemaName', + 'type' => TType::STRING, + ), + 4 => array( + 'var' => 'parentTableName', + 'type' => TType::STRING, + ), + 5 => array( + 'var' => 'foreignCatalogName', + 'type' => TType::STRING, + ), + 6 => array( + 'var' => 'foreignSchemaName', + 'type' => TType::STRING, + ), + 7 => array( + 'var' => 'foreignTableName', + 'type' => TType::STRING, + ), + ); + } + if (is_array($vals)) { + if (isset($vals['sessionHandle'])) { + $this->sessionHandle = $vals['sessionHandle']; + } + if (isset($vals['parentCatalogName'])) { + $this->parentCatalogName = $vals['parentCatalogName']; + } + if (isset($vals['parentSchemaName'])) { + $this->parentSchemaName = $vals['parentSchemaName']; + } + if (isset($vals['parentTableName'])) { + $this->parentTableName = $vals['parentTableName']; + } + if (isset($vals['foreignCatalogName'])) { + $this->foreignCatalogName = $vals['foreignCatalogName']; + } + if (isset($vals['foreignSchemaName'])) { + $this->foreignSchemaName = $vals['foreignSchemaName']; + } + if (isset($vals['foreignTableName'])) { + $this->foreignTableName = $vals['foreignTableName']; + } + } + } + + public function getName() { + return 'TGetCrossReferenceReq'; + } + + public function read($input) + { + $xfer = 0; + $fname = null; + $ftype = 0; + $fid = 0; + $xfer += $input->readStructBegin($fname); + while (true) + { + $xfer += $input->readFieldBegin($fname, $ftype, $fid); + if ($ftype == TType::STOP) { + break; + } + switch ($fid) + { + case 1: + if ($ftype == TType::STRUCT) { + $this->sessionHandle = new \TSessionHandle(); + $xfer += $this->sessionHandle->read($input); + } else { + $xfer += $input->skip($ftype); + } + break; + case 2: + if ($ftype == TType::STRING) { + $xfer += $input->readString($this->parentCatalogName); + } else { + $xfer += $input->skip($ftype); + } + break; + case 3: + if ($ftype == TType::STRING) { + $xfer += $input->readString($this->parentSchemaName); + } else { + $xfer += $input->skip($ftype); + } + break; + case 4: + if ($ftype == TType::STRING) { + $xfer += $input->readString($this->parentTableName); + } else { + $xfer += $input->skip($ftype); + } + break; + case 5: + if ($ftype == TType::STRING) { + $xfer += $input->readString($this->foreignCatalogName); + } else { + $xfer += $input->skip($ftype); + } + break; + case 6: + if ($ftype == TType::STRING) { + $xfer += $input->readString($this->foreignSchemaName); + } else { + $xfer += $input->skip($ftype); + } + break; + case 7: + if ($ftype == TType::STRING) { + $xfer += $input->readString($this->foreignTableName); + } else { + $xfer += $input->skip($ftype); + } + break; + default: + $xfer += $input->skip($ftype); + break; + } + $xfer += $input->readFieldEnd(); + } + $xfer += $input->readStructEnd(); + return $xfer; + } + + public function write($output) { + $xfer = 0; + $xfer += $output->writeStructBegin('TGetCrossReferenceReq'); + if ($this->sessionHandle !== null) { + if (!is_object($this->sessionHandle)) { + throw new TProtocolException('Bad type in structure.', TProtocolException::INVALID_DATA); + } + $xfer += $output->writeFieldBegin('sessionHandle', TType::STRUCT, 1); + $xfer += $this->sessionHandle->write($output); + $xfer += $output->writeFieldEnd(); + } + if ($this->parentCatalogName !== null) { + $xfer += $output->writeFieldBegin('parentCatalogName', TType::STRING, 2); + $xfer += $output->writeString($this->parentCatalogName); + $xfer += $output->writeFieldEnd(); + } + if ($this->parentSchemaName !== null) { + $xfer += $output->writeFieldBegin('parentSchemaName', TType::STRING, 3); + $xfer += $output->writeString($this->parentSchemaName); + $xfer += $output->writeFieldEnd(); + } + if ($this->parentTableName !== null) { + $xfer += $output->writeFieldBegin('parentTableName', TType::STRING, 4); + $xfer += $output->writeString($this->parentTableName); + $xfer += $output->writeFieldEnd(); + } + if ($this->foreignCatalogName !== null) { + $xfer += $output->writeFieldBegin('foreignCatalogName', TType::STRING, 5); + $xfer += $output->writeString($this->foreignCatalogName); + $xfer += $output->writeFieldEnd(); + } + if ($this->foreignSchemaName !== null) { + $xfer += $output->writeFieldBegin('foreignSchemaName', TType::STRING, 6); + $xfer += $output->writeString($this->foreignSchemaName); + $xfer += $output->writeFieldEnd(); + } + if ($this->foreignTableName !== null) { + $xfer += $output->writeFieldBegin('foreignTableName', TType::STRING, 7); + $xfer += $output->writeString($this->foreignTableName); + $xfer += $output->writeFieldEnd(); + } + $xfer += $output->writeFieldStop(); + $xfer += $output->writeStructEnd(); + return $xfer; + } + +} + +class TGetCrossReferenceResp { + static $_TSPEC; + + /** + * @var \TStatus + */ + public $status = null; + /** + * @var \TOperationHandle + */ + public $operationHandle = null; + + public function __construct($vals=null) { + if (!isset(self::$_TSPEC)) { + self::$_TSPEC = array( + 1 => array( + 'var' => 'status', + 'type' => TType::STRUCT, + 'class' => '\TStatus', + ), + 2 => array( + 'var' => 'operationHandle', + 'type' => TType::STRUCT, + 'class' => '\TOperationHandle', + ), + ); + } + if (is_array($vals)) { + if (isset($vals['status'])) { + $this->status = $vals['status']; + } + if (isset($vals['operationHandle'])) { + $this->operationHandle = $vals['operationHandle']; + } + } + } + + public function getName() { + return 'TGetCrossReferenceResp'; + } + + public function read($input) + { + $xfer = 0; + $fname = null; + $ftype = 0; + $fid = 0; + $xfer += $input->readStructBegin($fname); + while (true) + { + $xfer += $input->readFieldBegin($fname, $ftype, $fid); + if ($ftype == TType::STOP) { + break; + } + switch ($fid) + { + case 1: + if ($ftype == TType::STRUCT) { + $this->status = new \TStatus(); + $xfer += $this->status->read($input); + } else { + $xfer += $input->skip($ftype); + } + break; + case 2: + if ($ftype == TType::STRUCT) { + $this->operationHandle = new \TOperationHandle(); + $xfer += $this->operationHandle->read($input); + } else { + $xfer += $input->skip($ftype); + } + break; + default: + $xfer += $input->skip($ftype); + break; + } + $xfer += $input->readFieldEnd(); + } + $xfer += $input->readStructEnd(); + return $xfer; + } + + public function write($output) { + $xfer = 0; + $xfer += $output->writeStructBegin('TGetCrossReferenceResp'); + if ($this->status !== null) { + if (!is_object($this->status)) { + throw new TProtocolException('Bad type in structure.', TProtocolException::INVALID_DATA); + } + $xfer += $output->writeFieldBegin('status', TType::STRUCT, 1); + $xfer += $this->status->write($output); + $xfer += $output->writeFieldEnd(); + } + if ($this->operationHandle !== null) { + if (!is_object($this->operationHandle)) { + throw new TProtocolException('Bad type in structure.', TProtocolException::INVALID_DATA); + } + $xfer += $output->writeFieldBegin('operationHandle', TType::STRUCT, 2); + $xfer += $this->operationHandle->write($output); + $xfer += $output->writeFieldEnd(); + } + $xfer += $output->writeFieldStop(); + $xfer += $output->writeStructEnd(); + return $xfer; + } + +} + class TGetOperationStatusReq { static $_TSPEC; diff --git a/service-rpc/src/gen/thrift/gen-py/TCLIService/TCLIService-remote b/service-rpc/src/gen/thrift/gen-py/TCLIService/TCLIService-remote index 56f5c5d..62b87a3 100755 --- a/service-rpc/src/gen/thrift/gen-py/TCLIService/TCLIService-remote +++ b/service-rpc/src/gen/thrift/gen-py/TCLIService/TCLIService-remote @@ -35,6 +35,8 @@ if len(sys.argv) <= 1 or sys.argv[1] == '--help': print(' TGetTableTypesResp GetTableTypes(TGetTableTypesReq req)') print(' TGetColumnsResp GetColumns(TGetColumnsReq req)') print(' TGetFunctionsResp GetFunctions(TGetFunctionsReq req)') + print(' TGetPrimaryKeysResp GetPrimaryKeys(TGetPrimaryKeysReq req)') + print(' TGetCrossReferenceResp GetCrossReference(TGetCrossReferenceReq req)') print(' TGetOperationStatusResp GetOperationStatus(TGetOperationStatusReq req)') print(' TCancelOperationResp CancelOperation(TCancelOperationReq req)') print(' TCloseOperationResp CloseOperation(TCloseOperationReq req)') @@ -165,6 +167,18 @@ elif cmd == 'GetFunctions': sys.exit(1) pp.pprint(client.GetFunctions(eval(args[0]),)) +elif cmd == 'GetPrimaryKeys': + if len(args) != 1: + print('GetPrimaryKeys requires 1 args') + sys.exit(1) + pp.pprint(client.GetPrimaryKeys(eval(args[0]),)) + +elif cmd == 'GetCrossReference': + if len(args) != 1: + print('GetCrossReference requires 1 args') + sys.exit(1) + pp.pprint(client.GetCrossReference(eval(args[0]),)) + elif cmd == 'GetOperationStatus': if len(args) != 1: print('GetOperationStatus requires 1 args') diff --git a/service-rpc/src/gen/thrift/gen-py/TCLIService/TCLIService.py b/service-rpc/src/gen/thrift/gen-py/TCLIService/TCLIService.py index ad2d71d..35f2ae4 100644 --- a/service-rpc/src/gen/thrift/gen-py/TCLIService/TCLIService.py +++ b/service-rpc/src/gen/thrift/gen-py/TCLIService/TCLIService.py @@ -96,6 +96,20 @@ def GetFunctions(self, req): """ pass + def GetPrimaryKeys(self, req): + """ + Parameters: + - req + """ + pass + + def GetCrossReference(self, req): + """ + Parameters: + - req + """ + pass + def GetOperationStatus(self, req): """ Parameters: @@ -501,6 +515,68 @@ def recv_GetFunctions(self): return result.success raise TApplicationException(TApplicationException.MISSING_RESULT, "GetFunctions failed: unknown result") + def GetPrimaryKeys(self, req): + """ + Parameters: + - req + """ + self.send_GetPrimaryKeys(req) + return self.recv_GetPrimaryKeys() + + def send_GetPrimaryKeys(self, req): + self._oprot.writeMessageBegin('GetPrimaryKeys', TMessageType.CALL, self._seqid) + args = GetPrimaryKeys_args() + args.req = req + args.write(self._oprot) + self._oprot.writeMessageEnd() + self._oprot.trans.flush() + + def recv_GetPrimaryKeys(self): + iprot = self._iprot + (fname, mtype, rseqid) = iprot.readMessageBegin() + if mtype == TMessageType.EXCEPTION: + x = TApplicationException() + x.read(iprot) + iprot.readMessageEnd() + raise x + result = GetPrimaryKeys_result() + result.read(iprot) + iprot.readMessageEnd() + if result.success is not None: + return result.success + raise TApplicationException(TApplicationException.MISSING_RESULT, "GetPrimaryKeys failed: unknown result") + + def GetCrossReference(self, req): + """ + Parameters: + - req + """ + self.send_GetCrossReference(req) + return self.recv_GetCrossReference() + + def send_GetCrossReference(self, req): + self._oprot.writeMessageBegin('GetCrossReference', TMessageType.CALL, self._seqid) + args = GetCrossReference_args() + args.req = req + args.write(self._oprot) + self._oprot.writeMessageEnd() + self._oprot.trans.flush() + + def recv_GetCrossReference(self): + iprot = self._iprot + (fname, mtype, rseqid) = iprot.readMessageBegin() + if mtype == TMessageType.EXCEPTION: + x = TApplicationException() + x.read(iprot) + iprot.readMessageEnd() + raise x + result = GetCrossReference_result() + result.read(iprot) + iprot.readMessageEnd() + if result.success is not None: + return result.success + raise TApplicationException(TApplicationException.MISSING_RESULT, "GetCrossReference failed: unknown result") + def GetOperationStatus(self, req): """ Parameters: @@ -765,6 +841,8 @@ def __init__(self, handler): self._processMap["GetTableTypes"] = Processor.process_GetTableTypes self._processMap["GetColumns"] = Processor.process_GetColumns self._processMap["GetFunctions"] = Processor.process_GetFunctions + self._processMap["GetPrimaryKeys"] = Processor.process_GetPrimaryKeys + self._processMap["GetCrossReference"] = Processor.process_GetCrossReference self._processMap["GetOperationStatus"] = Processor.process_GetOperationStatus self._processMap["CancelOperation"] = Processor.process_CancelOperation self._processMap["CloseOperation"] = Processor.process_CloseOperation @@ -998,6 +1076,44 @@ def process_GetFunctions(self, seqid, iprot, oprot): oprot.writeMessageEnd() oprot.trans.flush() + def process_GetPrimaryKeys(self, seqid, iprot, oprot): + args = GetPrimaryKeys_args() + args.read(iprot) + iprot.readMessageEnd() + result = GetPrimaryKeys_result() + try: + result.success = self._handler.GetPrimaryKeys(args.req) + msg_type = TMessageType.REPLY + except (TTransport.TTransportException, KeyboardInterrupt, SystemExit): + raise + except Exception as ex: + msg_type = TMessageType.EXCEPTION + logging.exception(ex) + result = TApplicationException(TApplicationException.INTERNAL_ERROR, 'Internal error') + oprot.writeMessageBegin("GetPrimaryKeys", msg_type, seqid) + result.write(oprot) + oprot.writeMessageEnd() + oprot.trans.flush() + + def process_GetCrossReference(self, seqid, iprot, oprot): + args = GetCrossReference_args() + args.read(iprot) + iprot.readMessageEnd() + result = GetCrossReference_result() + try: + result.success = self._handler.GetCrossReference(args.req) + msg_type = TMessageType.REPLY + except (TTransport.TTransportException, KeyboardInterrupt, SystemExit): + raise + except Exception as ex: + msg_type = TMessageType.EXCEPTION + logging.exception(ex) + result = TApplicationException(TApplicationException.INTERNAL_ERROR, 'Internal error') + oprot.writeMessageBegin("GetCrossReference", msg_type, seqid) + result.write(oprot) + oprot.writeMessageEnd() + oprot.trans.flush() + def process_GetOperationStatus(self, seqid, iprot, oprot): args = GetOperationStatus_args() args.read(iprot) @@ -2594,6 +2710,268 @@ def __eq__(self, other): def __ne__(self, other): return not (self == other) +class GetPrimaryKeys_args: + """ + Attributes: + - req + """ + + thrift_spec = ( + None, # 0 + (1, TType.STRUCT, 'req', (TGetPrimaryKeysReq, TGetPrimaryKeysReq.thrift_spec), None, ), # 1 + ) + + def __init__(self, req=None,): + self.req = req + + def read(self, iprot): + if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: + fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec)) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.STRUCT: + self.req = TGetPrimaryKeysReq() + self.req.read(iprot) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: + oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) + return + oprot.writeStructBegin('GetPrimaryKeys_args') + if self.req is not None: + oprot.writeFieldBegin('req', TType.STRUCT, 1) + self.req.write(oprot) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + + def __hash__(self): + value = 17 + value = (value * 31) ^ hash(self.req) + return value + + def __repr__(self): + L = ['%s=%r' % (key, value) + for key, value in self.__dict__.iteritems()] + return '%s(%s)' % (self.__class__.__name__, ', '.join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + +class GetPrimaryKeys_result: + """ + Attributes: + - success + """ + + thrift_spec = ( + (0, TType.STRUCT, 'success', (TGetPrimaryKeysResp, TGetPrimaryKeysResp.thrift_spec), None, ), # 0 + ) + + def __init__(self, success=None,): + self.success = success + + def read(self, iprot): + if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: + fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec)) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 0: + if ftype == TType.STRUCT: + self.success = TGetPrimaryKeysResp() + self.success.read(iprot) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: + oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) + return + oprot.writeStructBegin('GetPrimaryKeys_result') + if self.success is not None: + oprot.writeFieldBegin('success', TType.STRUCT, 0) + self.success.write(oprot) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + + def __hash__(self): + value = 17 + value = (value * 31) ^ hash(self.success) + return value + + def __repr__(self): + L = ['%s=%r' % (key, value) + for key, value in self.__dict__.iteritems()] + return '%s(%s)' % (self.__class__.__name__, ', '.join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + +class GetCrossReference_args: + """ + Attributes: + - req + """ + + thrift_spec = ( + None, # 0 + (1, TType.STRUCT, 'req', (TGetCrossReferenceReq, TGetCrossReferenceReq.thrift_spec), None, ), # 1 + ) + + def __init__(self, req=None,): + self.req = req + + def read(self, iprot): + if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: + fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec)) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.STRUCT: + self.req = TGetCrossReferenceReq() + self.req.read(iprot) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: + oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) + return + oprot.writeStructBegin('GetCrossReference_args') + if self.req is not None: + oprot.writeFieldBegin('req', TType.STRUCT, 1) + self.req.write(oprot) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + + def __hash__(self): + value = 17 + value = (value * 31) ^ hash(self.req) + return value + + def __repr__(self): + L = ['%s=%r' % (key, value) + for key, value in self.__dict__.iteritems()] + return '%s(%s)' % (self.__class__.__name__, ', '.join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + +class GetCrossReference_result: + """ + Attributes: + - success + """ + + thrift_spec = ( + (0, TType.STRUCT, 'success', (TGetCrossReferenceResp, TGetCrossReferenceResp.thrift_spec), None, ), # 0 + ) + + def __init__(self, success=None,): + self.success = success + + def read(self, iprot): + if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: + fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec)) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 0: + if ftype == TType.STRUCT: + self.success = TGetCrossReferenceResp() + self.success.read(iprot) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: + oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) + return + oprot.writeStructBegin('GetCrossReference_result') + if self.success is not None: + oprot.writeFieldBegin('success', TType.STRUCT, 0) + self.success.write(oprot) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + + def __hash__(self): + value = 17 + value = (value * 31) ^ hash(self.success) + return value + + def __repr__(self): + L = ['%s=%r' % (key, value) + for key, value in self.__dict__.iteritems()] + return '%s(%s)' % (self.__class__.__name__, ', '.join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + class GetOperationStatus_args: """ Attributes: diff --git a/service-rpc/src/gen/thrift/gen-py/TCLIService/ttypes.py b/service-rpc/src/gen/thrift/gen-py/TCLIService/ttypes.py index c691781..0e45041 100644 --- a/service-rpc/src/gen/thrift/gen-py/TCLIService/ttypes.py +++ b/service-rpc/src/gen/thrift/gen-py/TCLIService/ttypes.py @@ -5559,6 +5559,423 @@ def __eq__(self, other): def __ne__(self, other): return not (self == other) +class TGetPrimaryKeysReq: + """ + Attributes: + - sessionHandle + - catalogName + - schemaName + - tableName + """ + + thrift_spec = ( + None, # 0 + (1, TType.STRUCT, 'sessionHandle', (TSessionHandle, TSessionHandle.thrift_spec), None, ), # 1 + (2, TType.STRING, 'catalogName', None, None, ), # 2 + (3, TType.STRING, 'schemaName', None, None, ), # 3 + (4, TType.STRING, 'tableName', None, None, ), # 4 + ) + + def __init__(self, sessionHandle=None, catalogName=None, schemaName=None, tableName=None,): + self.sessionHandle = sessionHandle + self.catalogName = catalogName + self.schemaName = schemaName + self.tableName = tableName + + def read(self, iprot): + if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: + fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec)) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.STRUCT: + self.sessionHandle = TSessionHandle() + self.sessionHandle.read(iprot) + else: + iprot.skip(ftype) + elif fid == 2: + if ftype == TType.STRING: + self.catalogName = iprot.readString() + else: + iprot.skip(ftype) + elif fid == 3: + if ftype == TType.STRING: + self.schemaName = iprot.readString() + else: + iprot.skip(ftype) + elif fid == 4: + if ftype == TType.STRING: + self.tableName = iprot.readString() + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: + oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) + return + oprot.writeStructBegin('TGetPrimaryKeysReq') + if self.sessionHandle is not None: + oprot.writeFieldBegin('sessionHandle', TType.STRUCT, 1) + self.sessionHandle.write(oprot) + oprot.writeFieldEnd() + if self.catalogName is not None: + oprot.writeFieldBegin('catalogName', TType.STRING, 2) + oprot.writeString(self.catalogName) + oprot.writeFieldEnd() + if self.schemaName is not None: + oprot.writeFieldBegin('schemaName', TType.STRING, 3) + oprot.writeString(self.schemaName) + oprot.writeFieldEnd() + if self.tableName is not None: + oprot.writeFieldBegin('tableName', TType.STRING, 4) + oprot.writeString(self.tableName) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + if self.sessionHandle is None: + raise TProtocol.TProtocolException(message='Required field sessionHandle is unset!') + return + + + def __hash__(self): + value = 17 + value = (value * 31) ^ hash(self.sessionHandle) + value = (value * 31) ^ hash(self.catalogName) + value = (value * 31) ^ hash(self.schemaName) + value = (value * 31) ^ hash(self.tableName) + return value + + def __repr__(self): + L = ['%s=%r' % (key, value) + for key, value in self.__dict__.iteritems()] + return '%s(%s)' % (self.__class__.__name__, ', '.join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + +class TGetPrimaryKeysResp: + """ + Attributes: + - status + - operationHandle + """ + + thrift_spec = ( + None, # 0 + (1, TType.STRUCT, 'status', (TStatus, TStatus.thrift_spec), None, ), # 1 + (2, TType.STRUCT, 'operationHandle', (TOperationHandle, TOperationHandle.thrift_spec), None, ), # 2 + ) + + def __init__(self, status=None, operationHandle=None,): + self.status = status + self.operationHandle = operationHandle + + def read(self, iprot): + if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: + fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec)) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.STRUCT: + self.status = TStatus() + self.status.read(iprot) + else: + iprot.skip(ftype) + elif fid == 2: + if ftype == TType.STRUCT: + self.operationHandle = TOperationHandle() + self.operationHandle.read(iprot) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: + oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) + return + oprot.writeStructBegin('TGetPrimaryKeysResp') + if self.status is not None: + oprot.writeFieldBegin('status', TType.STRUCT, 1) + self.status.write(oprot) + oprot.writeFieldEnd() + if self.operationHandle is not None: + oprot.writeFieldBegin('operationHandle', TType.STRUCT, 2) + self.operationHandle.write(oprot) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + if self.status is None: + raise TProtocol.TProtocolException(message='Required field status is unset!') + return + + + def __hash__(self): + value = 17 + value = (value * 31) ^ hash(self.status) + value = (value * 31) ^ hash(self.operationHandle) + return value + + def __repr__(self): + L = ['%s=%r' % (key, value) + for key, value in self.__dict__.iteritems()] + return '%s(%s)' % (self.__class__.__name__, ', '.join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + +class TGetCrossReferenceReq: + """ + Attributes: + - sessionHandle + - parentCatalogName + - parentSchemaName + - parentTableName + - foreignCatalogName + - foreignSchemaName + - foreignTableName + """ + + thrift_spec = ( + None, # 0 + (1, TType.STRUCT, 'sessionHandle', (TSessionHandle, TSessionHandle.thrift_spec), None, ), # 1 + (2, TType.STRING, 'parentCatalogName', None, None, ), # 2 + (3, TType.STRING, 'parentSchemaName', None, None, ), # 3 + (4, TType.STRING, 'parentTableName', None, None, ), # 4 + (5, TType.STRING, 'foreignCatalogName', None, None, ), # 5 + (6, TType.STRING, 'foreignSchemaName', None, None, ), # 6 + (7, TType.STRING, 'foreignTableName', None, None, ), # 7 + ) + + def __init__(self, sessionHandle=None, parentCatalogName=None, parentSchemaName=None, parentTableName=None, foreignCatalogName=None, foreignSchemaName=None, foreignTableName=None,): + self.sessionHandle = sessionHandle + self.parentCatalogName = parentCatalogName + self.parentSchemaName = parentSchemaName + self.parentTableName = parentTableName + self.foreignCatalogName = foreignCatalogName + self.foreignSchemaName = foreignSchemaName + self.foreignTableName = foreignTableName + + def read(self, iprot): + if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: + fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec)) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.STRUCT: + self.sessionHandle = TSessionHandle() + self.sessionHandle.read(iprot) + else: + iprot.skip(ftype) + elif fid == 2: + if ftype == TType.STRING: + self.parentCatalogName = iprot.readString() + else: + iprot.skip(ftype) + elif fid == 3: + if ftype == TType.STRING: + self.parentSchemaName = iprot.readString() + else: + iprot.skip(ftype) + elif fid == 4: + if ftype == TType.STRING: + self.parentTableName = iprot.readString() + else: + iprot.skip(ftype) + elif fid == 5: + if ftype == TType.STRING: + self.foreignCatalogName = iprot.readString() + else: + iprot.skip(ftype) + elif fid == 6: + if ftype == TType.STRING: + self.foreignSchemaName = iprot.readString() + else: + iprot.skip(ftype) + elif fid == 7: + if ftype == TType.STRING: + self.foreignTableName = iprot.readString() + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: + oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) + return + oprot.writeStructBegin('TGetCrossReferenceReq') + if self.sessionHandle is not None: + oprot.writeFieldBegin('sessionHandle', TType.STRUCT, 1) + self.sessionHandle.write(oprot) + oprot.writeFieldEnd() + if self.parentCatalogName is not None: + oprot.writeFieldBegin('parentCatalogName', TType.STRING, 2) + oprot.writeString(self.parentCatalogName) + oprot.writeFieldEnd() + if self.parentSchemaName is not None: + oprot.writeFieldBegin('parentSchemaName', TType.STRING, 3) + oprot.writeString(self.parentSchemaName) + oprot.writeFieldEnd() + if self.parentTableName is not None: + oprot.writeFieldBegin('parentTableName', TType.STRING, 4) + oprot.writeString(self.parentTableName) + oprot.writeFieldEnd() + if self.foreignCatalogName is not None: + oprot.writeFieldBegin('foreignCatalogName', TType.STRING, 5) + oprot.writeString(self.foreignCatalogName) + oprot.writeFieldEnd() + if self.foreignSchemaName is not None: + oprot.writeFieldBegin('foreignSchemaName', TType.STRING, 6) + oprot.writeString(self.foreignSchemaName) + oprot.writeFieldEnd() + if self.foreignTableName is not None: + oprot.writeFieldBegin('foreignTableName', TType.STRING, 7) + oprot.writeString(self.foreignTableName) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + if self.sessionHandle is None: + raise TProtocol.TProtocolException(message='Required field sessionHandle is unset!') + return + + + def __hash__(self): + value = 17 + value = (value * 31) ^ hash(self.sessionHandle) + value = (value * 31) ^ hash(self.parentCatalogName) + value = (value * 31) ^ hash(self.parentSchemaName) + value = (value * 31) ^ hash(self.parentTableName) + value = (value * 31) ^ hash(self.foreignCatalogName) + value = (value * 31) ^ hash(self.foreignSchemaName) + value = (value * 31) ^ hash(self.foreignTableName) + return value + + def __repr__(self): + L = ['%s=%r' % (key, value) + for key, value in self.__dict__.iteritems()] + return '%s(%s)' % (self.__class__.__name__, ', '.join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + +class TGetCrossReferenceResp: + """ + Attributes: + - status + - operationHandle + """ + + thrift_spec = ( + None, # 0 + (1, TType.STRUCT, 'status', (TStatus, TStatus.thrift_spec), None, ), # 1 + (2, TType.STRUCT, 'operationHandle', (TOperationHandle, TOperationHandle.thrift_spec), None, ), # 2 + ) + + def __init__(self, status=None, operationHandle=None,): + self.status = status + self.operationHandle = operationHandle + + def read(self, iprot): + if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: + fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec)) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.STRUCT: + self.status = TStatus() + self.status.read(iprot) + else: + iprot.skip(ftype) + elif fid == 2: + if ftype == TType.STRUCT: + self.operationHandle = TOperationHandle() + self.operationHandle.read(iprot) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: + oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) + return + oprot.writeStructBegin('TGetCrossReferenceResp') + if self.status is not None: + oprot.writeFieldBegin('status', TType.STRUCT, 1) + self.status.write(oprot) + oprot.writeFieldEnd() + if self.operationHandle is not None: + oprot.writeFieldBegin('operationHandle', TType.STRUCT, 2) + self.operationHandle.write(oprot) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + if self.status is None: + raise TProtocol.TProtocolException(message='Required field status is unset!') + return + + + def __hash__(self): + value = 17 + value = (value * 31) ^ hash(self.status) + value = (value * 31) ^ hash(self.operationHandle) + return value + + def __repr__(self): + L = ['%s=%r' % (key, value) + for key, value in self.__dict__.iteritems()] + return '%s(%s)' % (self.__class__.__name__, ', '.join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + class TGetOperationStatusReq: """ Attributes: diff --git a/service-rpc/src/gen/thrift/gen-py/__init__.py b/service-rpc/src/gen/thrift/gen-py/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/service-rpc/src/gen/thrift/gen-rb/t_c_l_i_service.rb b/service-rpc/src/gen/thrift/gen-rb/t_c_l_i_service.rb index 7d7f7a7..a50fe25 100644 --- a/service-rpc/src/gen/thrift/gen-rb/t_c_l_i_service.rb +++ b/service-rpc/src/gen/thrift/gen-rb/t_c_l_i_service.rb @@ -176,6 +176,36 @@ module TCLIService raise ::Thrift::ApplicationException.new(::Thrift::ApplicationException::MISSING_RESULT, 'GetFunctions failed: unknown result') end + def GetPrimaryKeys(req) + send_GetPrimaryKeys(req) + return recv_GetPrimaryKeys() + end + + def send_GetPrimaryKeys(req) + send_message('GetPrimaryKeys', GetPrimaryKeys_args, :req => req) + end + + def recv_GetPrimaryKeys() + result = receive_message(GetPrimaryKeys_result) + return result.success unless result.success.nil? + raise ::Thrift::ApplicationException.new(::Thrift::ApplicationException::MISSING_RESULT, 'GetPrimaryKeys failed: unknown result') + end + + def GetCrossReference(req) + send_GetCrossReference(req) + return recv_GetCrossReference() + end + + def send_GetCrossReference(req) + send_message('GetCrossReference', GetCrossReference_args, :req => req) + end + + def recv_GetCrossReference() + result = receive_message(GetCrossReference_result) + return result.success unless result.success.nil? + raise ::Thrift::ApplicationException.new(::Thrift::ApplicationException::MISSING_RESULT, 'GetCrossReference failed: unknown result') + end + def GetOperationStatus(req) send_GetOperationStatus(req) return recv_GetOperationStatus() @@ -378,6 +408,20 @@ module TCLIService write_result(result, oprot, 'GetFunctions', seqid) end + def process_GetPrimaryKeys(seqid, iprot, oprot) + args = read_args(iprot, GetPrimaryKeys_args) + result = GetPrimaryKeys_result.new() + result.success = @handler.GetPrimaryKeys(args.req) + write_result(result, oprot, 'GetPrimaryKeys', seqid) + end + + def process_GetCrossReference(seqid, iprot, oprot) + args = read_args(iprot, GetCrossReference_args) + result = GetCrossReference_result.new() + result.success = @handler.GetCrossReference(args.req) + write_result(result, oprot, 'GetCrossReference', seqid) + end + def process_GetOperationStatus(seqid, iprot, oprot) args = read_args(iprot, GetOperationStatus_args) result = GetOperationStatus_result.new() @@ -790,6 +834,70 @@ module TCLIService ::Thrift::Struct.generate_accessors self end + class GetPrimaryKeys_args + include ::Thrift::Struct, ::Thrift::Struct_Union + REQ = 1 + + FIELDS = { + REQ => {:type => ::Thrift::Types::STRUCT, :name => 'req', :class => ::TGetPrimaryKeysReq} + } + + def struct_fields; FIELDS; end + + def validate + end + + ::Thrift::Struct.generate_accessors self + end + + class GetPrimaryKeys_result + include ::Thrift::Struct, ::Thrift::Struct_Union + SUCCESS = 0 + + FIELDS = { + SUCCESS => {:type => ::Thrift::Types::STRUCT, :name => 'success', :class => ::TGetPrimaryKeysResp} + } + + def struct_fields; FIELDS; end + + def validate + end + + ::Thrift::Struct.generate_accessors self + end + + class GetCrossReference_args + include ::Thrift::Struct, ::Thrift::Struct_Union + REQ = 1 + + FIELDS = { + REQ => {:type => ::Thrift::Types::STRUCT, :name => 'req', :class => ::TGetCrossReferenceReq} + } + + def struct_fields; FIELDS; end + + def validate + end + + ::Thrift::Struct.generate_accessors self + end + + class GetCrossReference_result + include ::Thrift::Struct, ::Thrift::Struct_Union + SUCCESS = 0 + + FIELDS = { + SUCCESS => {:type => ::Thrift::Types::STRUCT, :name => 'success', :class => ::TGetCrossReferenceResp} + } + + def struct_fields; FIELDS; end + + def validate + end + + ::Thrift::Struct.generate_accessors self + end + class GetOperationStatus_args include ::Thrift::Struct, ::Thrift::Struct_Union REQ = 1 diff --git a/service-rpc/src/gen/thrift/gen-rb/t_c_l_i_service_types.rb b/service-rpc/src/gen/thrift/gen-rb/t_c_l_i_service_types.rb index 07ed97c..14bc69d 100644 --- a/service-rpc/src/gen/thrift/gen-rb/t_c_l_i_service_types.rb +++ b/service-rpc/src/gen/thrift/gen-rb/t_c_l_i_service_types.rb @@ -1447,6 +1447,96 @@ class TGetFunctionsResp ::Thrift::Struct.generate_accessors self end +class TGetPrimaryKeysReq + include ::Thrift::Struct, ::Thrift::Struct_Union + SESSIONHANDLE = 1 + CATALOGNAME = 2 + SCHEMANAME = 3 + TABLENAME = 4 + + FIELDS = { + SESSIONHANDLE => {:type => ::Thrift::Types::STRUCT, :name => 'sessionHandle', :class => ::TSessionHandle}, + CATALOGNAME => {:type => ::Thrift::Types::STRING, :name => 'catalogName', :optional => true}, + SCHEMANAME => {:type => ::Thrift::Types::STRING, :name => 'schemaName', :optional => true}, + TABLENAME => {:type => ::Thrift::Types::STRING, :name => 'tableName', :optional => true} + } + + def struct_fields; FIELDS; end + + def validate + raise ::Thrift::ProtocolException.new(::Thrift::ProtocolException::UNKNOWN, 'Required field sessionHandle is unset!') unless @sessionHandle + end + + ::Thrift::Struct.generate_accessors self +end + +class TGetPrimaryKeysResp + include ::Thrift::Struct, ::Thrift::Struct_Union + STATUS = 1 + OPERATIONHANDLE = 2 + + FIELDS = { + STATUS => {:type => ::Thrift::Types::STRUCT, :name => 'status', :class => ::TStatus}, + OPERATIONHANDLE => {:type => ::Thrift::Types::STRUCT, :name => 'operationHandle', :class => ::TOperationHandle, :optional => true} + } + + def struct_fields; FIELDS; end + + def validate + raise ::Thrift::ProtocolException.new(::Thrift::ProtocolException::UNKNOWN, 'Required field status is unset!') unless @status + end + + ::Thrift::Struct.generate_accessors self +end + +class TGetCrossReferenceReq + include ::Thrift::Struct, ::Thrift::Struct_Union + SESSIONHANDLE = 1 + PARENTCATALOGNAME = 2 + PARENTSCHEMANAME = 3 + PARENTTABLENAME = 4 + FOREIGNCATALOGNAME = 5 + FOREIGNSCHEMANAME = 6 + FOREIGNTABLENAME = 7 + + FIELDS = { + SESSIONHANDLE => {:type => ::Thrift::Types::STRUCT, :name => 'sessionHandle', :class => ::TSessionHandle}, + PARENTCATALOGNAME => {:type => ::Thrift::Types::STRING, :name => 'parentCatalogName', :optional => true}, + PARENTSCHEMANAME => {:type => ::Thrift::Types::STRING, :name => 'parentSchemaName', :optional => true}, + PARENTTABLENAME => {:type => ::Thrift::Types::STRING, :name => 'parentTableName', :optional => true}, + FOREIGNCATALOGNAME => {:type => ::Thrift::Types::STRING, :name => 'foreignCatalogName', :optional => true}, + FOREIGNSCHEMANAME => {:type => ::Thrift::Types::STRING, :name => 'foreignSchemaName', :optional => true}, + FOREIGNTABLENAME => {:type => ::Thrift::Types::STRING, :name => 'foreignTableName', :optional => true} + } + + def struct_fields; FIELDS; end + + def validate + raise ::Thrift::ProtocolException.new(::Thrift::ProtocolException::UNKNOWN, 'Required field sessionHandle is unset!') unless @sessionHandle + end + + ::Thrift::Struct.generate_accessors self +end + +class TGetCrossReferenceResp + include ::Thrift::Struct, ::Thrift::Struct_Union + STATUS = 1 + OPERATIONHANDLE = 2 + + FIELDS = { + STATUS => {:type => ::Thrift::Types::STRUCT, :name => 'status', :class => ::TStatus}, + OPERATIONHANDLE => {:type => ::Thrift::Types::STRUCT, :name => 'operationHandle', :class => ::TOperationHandle, :optional => true} + } + + def struct_fields; FIELDS; end + + def validate + raise ::Thrift::ProtocolException.new(::Thrift::ProtocolException::UNKNOWN, 'Required field status is unset!') unless @status + end + + ::Thrift::Struct.generate_accessors self +end + class TGetOperationStatusReq include ::Thrift::Struct, ::Thrift::Struct_Union OPERATIONHANDLE = 1 diff --git a/service/src/gen/thrift/gen-cpp/ThriftHive.cpp b/service/src/gen/thrift/gen-cpp/ThriftHive.cpp new file mode 100644 index 0000000..a5448f0 --- /dev/null +++ b/service/src/gen/thrift/gen-cpp/ThriftHive.cpp @@ -0,0 +1,3544 @@ +/** + * Autogenerated by Thrift Compiler (0.9.3) + * + * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING + * @generated + */ +#include "ThriftHive.h" + +namespace Apache { namespace Hadoop { namespace Hive { + + +ThriftHive_execute_args::~ThriftHive_execute_args() throw() { +} + + +uint32_t ThriftHive_execute_args::read(::apache::thrift::protocol::TProtocol* iprot) { + + apache::thrift::protocol::TInputRecursionTracker tracker(*iprot); + uint32_t xfer = 0; + std::string fname; + ::apache::thrift::protocol::TType ftype; + int16_t fid; + + xfer += iprot->readStructBegin(fname); + + using ::apache::thrift::protocol::TProtocolException; + + + while (true) + { + xfer += iprot->readFieldBegin(fname, ftype, fid); + if (ftype == ::apache::thrift::protocol::T_STOP) { + break; + } + switch (fid) + { + case 1: + if (ftype == ::apache::thrift::protocol::T_STRING) { + xfer += iprot->readString(this->query); + this->__isset.query = true; + } else { + xfer += iprot->skip(ftype); + } + break; + default: + xfer += iprot->skip(ftype); + break; + } + xfer += iprot->readFieldEnd(); + } + + xfer += iprot->readStructEnd(); + + return xfer; +} + +uint32_t ThriftHive_execute_args::write(::apache::thrift::protocol::TProtocol* oprot) const { + uint32_t xfer = 0; + apache::thrift::protocol::TOutputRecursionTracker tracker(*oprot); + xfer += oprot->writeStructBegin("ThriftHive_execute_args"); + + xfer += oprot->writeFieldBegin("query", ::apache::thrift::protocol::T_STRING, 1); + xfer += oprot->writeString(this->query); + xfer += oprot->writeFieldEnd(); + + xfer += oprot->writeFieldStop(); + xfer += oprot->writeStructEnd(); + return xfer; +} + + +ThriftHive_execute_pargs::~ThriftHive_execute_pargs() throw() { +} + + +uint32_t ThriftHive_execute_pargs::write(::apache::thrift::protocol::TProtocol* oprot) const { + uint32_t xfer = 0; + apache::thrift::protocol::TOutputRecursionTracker tracker(*oprot); + xfer += oprot->writeStructBegin("ThriftHive_execute_pargs"); + + xfer += oprot->writeFieldBegin("query", ::apache::thrift::protocol::T_STRING, 1); + xfer += oprot->writeString((*(this->query))); + xfer += oprot->writeFieldEnd(); + + xfer += oprot->writeFieldStop(); + xfer += oprot->writeStructEnd(); + return xfer; +} + + +ThriftHive_execute_result::~ThriftHive_execute_result() throw() { +} + + +uint32_t ThriftHive_execute_result::read(::apache::thrift::protocol::TProtocol* iprot) { + + apache::thrift::protocol::TInputRecursionTracker tracker(*iprot); + uint32_t xfer = 0; + std::string fname; + ::apache::thrift::protocol::TType ftype; + int16_t fid; + + xfer += iprot->readStructBegin(fname); + + using ::apache::thrift::protocol::TProtocolException; + + + while (true) + { + xfer += iprot->readFieldBegin(fname, ftype, fid); + if (ftype == ::apache::thrift::protocol::T_STOP) { + break; + } + switch (fid) + { + case 1: + if (ftype == ::apache::thrift::protocol::T_STRUCT) { + xfer += this->ex.read(iprot); + this->__isset.ex = true; + } else { + xfer += iprot->skip(ftype); + } + break; + default: + xfer += iprot->skip(ftype); + break; + } + xfer += iprot->readFieldEnd(); + } + + xfer += iprot->readStructEnd(); + + return xfer; +} + +uint32_t ThriftHive_execute_result::write(::apache::thrift::protocol::TProtocol* oprot) const { + + uint32_t xfer = 0; + + xfer += oprot->writeStructBegin("ThriftHive_execute_result"); + + if (this->__isset.ex) { + xfer += oprot->writeFieldBegin("ex", ::apache::thrift::protocol::T_STRUCT, 1); + xfer += this->ex.write(oprot); + xfer += oprot->writeFieldEnd(); + } + xfer += oprot->writeFieldStop(); + xfer += oprot->writeStructEnd(); + return xfer; +} + + +ThriftHive_execute_presult::~ThriftHive_execute_presult() throw() { +} + + +uint32_t ThriftHive_execute_presult::read(::apache::thrift::protocol::TProtocol* iprot) { + + apache::thrift::protocol::TInputRecursionTracker tracker(*iprot); + uint32_t xfer = 0; + std::string fname; + ::apache::thrift::protocol::TType ftype; + int16_t fid; + + xfer += iprot->readStructBegin(fname); + + using ::apache::thrift::protocol::TProtocolException; + + + while (true) + { + xfer += iprot->readFieldBegin(fname, ftype, fid); + if (ftype == ::apache::thrift::protocol::T_STOP) { + break; + } + switch (fid) + { + case 1: + if (ftype == ::apache::thrift::protocol::T_STRUCT) { + xfer += this->ex.read(iprot); + this->__isset.ex = true; + } else { + xfer += iprot->skip(ftype); + } + break; + default: + xfer += iprot->skip(ftype); + break; + } + xfer += iprot->readFieldEnd(); + } + + xfer += iprot->readStructEnd(); + + return xfer; +} + + +ThriftHive_fetchOne_args::~ThriftHive_fetchOne_args() throw() { +} + + +uint32_t ThriftHive_fetchOne_args::read(::apache::thrift::protocol::TProtocol* iprot) { + + apache::thrift::protocol::TInputRecursionTracker tracker(*iprot); + uint32_t xfer = 0; + std::string fname; + ::apache::thrift::protocol::TType ftype; + int16_t fid; + + xfer += iprot->readStructBegin(fname); + + using ::apache::thrift::protocol::TProtocolException; + + + while (true) + { + xfer += iprot->readFieldBegin(fname, ftype, fid); + if (ftype == ::apache::thrift::protocol::T_STOP) { + break; + } + xfer += iprot->skip(ftype); + xfer += iprot->readFieldEnd(); + } + + xfer += iprot->readStructEnd(); + + return xfer; +} + +uint32_t ThriftHive_fetchOne_args::write(::apache::thrift::protocol::TProtocol* oprot) const { + uint32_t xfer = 0; + apache::thrift::protocol::TOutputRecursionTracker tracker(*oprot); + xfer += oprot->writeStructBegin("ThriftHive_fetchOne_args"); + + xfer += oprot->writeFieldStop(); + xfer += oprot->writeStructEnd(); + return xfer; +} + + +ThriftHive_fetchOne_pargs::~ThriftHive_fetchOne_pargs() throw() { +} + + +uint32_t ThriftHive_fetchOne_pargs::write(::apache::thrift::protocol::TProtocol* oprot) const { + uint32_t xfer = 0; + apache::thrift::protocol::TOutputRecursionTracker tracker(*oprot); + xfer += oprot->writeStructBegin("ThriftHive_fetchOne_pargs"); + + xfer += oprot->writeFieldStop(); + xfer += oprot->writeStructEnd(); + return xfer; +} + + +ThriftHive_fetchOne_result::~ThriftHive_fetchOne_result() throw() { +} + + +uint32_t ThriftHive_fetchOne_result::read(::apache::thrift::protocol::TProtocol* iprot) { + + apache::thrift::protocol::TInputRecursionTracker tracker(*iprot); + uint32_t xfer = 0; + std::string fname; + ::apache::thrift::protocol::TType ftype; + int16_t fid; + + xfer += iprot->readStructBegin(fname); + + using ::apache::thrift::protocol::TProtocolException; + + + while (true) + { + xfer += iprot->readFieldBegin(fname, ftype, fid); + if (ftype == ::apache::thrift::protocol::T_STOP) { + break; + } + switch (fid) + { + case 0: + if (ftype == ::apache::thrift::protocol::T_STRING) { + xfer += iprot->readString(this->success); + this->__isset.success = true; + } else { + xfer += iprot->skip(ftype); + } + break; + case 1: + if (ftype == ::apache::thrift::protocol::T_STRUCT) { + xfer += this->ex.read(iprot); + this->__isset.ex = true; + } else { + xfer += iprot->skip(ftype); + } + break; + default: + xfer += iprot->skip(ftype); + break; + } + xfer += iprot->readFieldEnd(); + } + + xfer += iprot->readStructEnd(); + + return xfer; +} + +uint32_t ThriftHive_fetchOne_result::write(::apache::thrift::protocol::TProtocol* oprot) const { + + uint32_t xfer = 0; + + xfer += oprot->writeStructBegin("ThriftHive_fetchOne_result"); + + if (this->__isset.success) { + xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_STRING, 0); + xfer += oprot->writeString(this->success); + xfer += oprot->writeFieldEnd(); + } else if (this->__isset.ex) { + xfer += oprot->writeFieldBegin("ex", ::apache::thrift::protocol::T_STRUCT, 1); + xfer += this->ex.write(oprot); + xfer += oprot->writeFieldEnd(); + } + xfer += oprot->writeFieldStop(); + xfer += oprot->writeStructEnd(); + return xfer; +} + + +ThriftHive_fetchOne_presult::~ThriftHive_fetchOne_presult() throw() { +} + + +uint32_t ThriftHive_fetchOne_presult::read(::apache::thrift::protocol::TProtocol* iprot) { + + apache::thrift::protocol::TInputRecursionTracker tracker(*iprot); + uint32_t xfer = 0; + std::string fname; + ::apache::thrift::protocol::TType ftype; + int16_t fid; + + xfer += iprot->readStructBegin(fname); + + using ::apache::thrift::protocol::TProtocolException; + + + while (true) + { + xfer += iprot->readFieldBegin(fname, ftype, fid); + if (ftype == ::apache::thrift::protocol::T_STOP) { + break; + } + switch (fid) + { + case 0: + if (ftype == ::apache::thrift::protocol::T_STRING) { + xfer += iprot->readString((*(this->success))); + this->__isset.success = true; + } else { + xfer += iprot->skip(ftype); + } + break; + case 1: + if (ftype == ::apache::thrift::protocol::T_STRUCT) { + xfer += this->ex.read(iprot); + this->__isset.ex = true; + } else { + xfer += iprot->skip(ftype); + } + break; + default: + xfer += iprot->skip(ftype); + break; + } + xfer += iprot->readFieldEnd(); + } + + xfer += iprot->readStructEnd(); + + return xfer; +} + + +ThriftHive_fetchN_args::~ThriftHive_fetchN_args() throw() { +} + + +uint32_t ThriftHive_fetchN_args::read(::apache::thrift::protocol::TProtocol* iprot) { + + apache::thrift::protocol::TInputRecursionTracker tracker(*iprot); + uint32_t xfer = 0; + std::string fname; + ::apache::thrift::protocol::TType ftype; + int16_t fid; + + xfer += iprot->readStructBegin(fname); + + using ::apache::thrift::protocol::TProtocolException; + + + while (true) + { + xfer += iprot->readFieldBegin(fname, ftype, fid); + if (ftype == ::apache::thrift::protocol::T_STOP) { + break; + } + switch (fid) + { + case 1: + if (ftype == ::apache::thrift::protocol::T_I32) { + xfer += iprot->readI32(this->numRows); + this->__isset.numRows = true; + } else { + xfer += iprot->skip(ftype); + } + break; + default: + xfer += iprot->skip(ftype); + break; + } + xfer += iprot->readFieldEnd(); + } + + xfer += iprot->readStructEnd(); + + return xfer; +} + +uint32_t ThriftHive_fetchN_args::write(::apache::thrift::protocol::TProtocol* oprot) const { + uint32_t xfer = 0; + apache::thrift::protocol::TOutputRecursionTracker tracker(*oprot); + xfer += oprot->writeStructBegin("ThriftHive_fetchN_args"); + + xfer += oprot->writeFieldBegin("numRows", ::apache::thrift::protocol::T_I32, 1); + xfer += oprot->writeI32(this->numRows); + xfer += oprot->writeFieldEnd(); + + xfer += oprot->writeFieldStop(); + xfer += oprot->writeStructEnd(); + return xfer; +} + + +ThriftHive_fetchN_pargs::~ThriftHive_fetchN_pargs() throw() { +} + + +uint32_t ThriftHive_fetchN_pargs::write(::apache::thrift::protocol::TProtocol* oprot) const { + uint32_t xfer = 0; + apache::thrift::protocol::TOutputRecursionTracker tracker(*oprot); + xfer += oprot->writeStructBegin("ThriftHive_fetchN_pargs"); + + xfer += oprot->writeFieldBegin("numRows", ::apache::thrift::protocol::T_I32, 1); + xfer += oprot->writeI32((*(this->numRows))); + xfer += oprot->writeFieldEnd(); + + xfer += oprot->writeFieldStop(); + xfer += oprot->writeStructEnd(); + return xfer; +} + + +ThriftHive_fetchN_result::~ThriftHive_fetchN_result() throw() { +} + + +uint32_t ThriftHive_fetchN_result::read(::apache::thrift::protocol::TProtocol* iprot) { + + apache::thrift::protocol::TInputRecursionTracker tracker(*iprot); + uint32_t xfer = 0; + std::string fname; + ::apache::thrift::protocol::TType ftype; + int16_t fid; + + xfer += iprot->readStructBegin(fname); + + using ::apache::thrift::protocol::TProtocolException; + + + while (true) + { + xfer += iprot->readFieldBegin(fname, ftype, fid); + if (ftype == ::apache::thrift::protocol::T_STOP) { + break; + } + switch (fid) + { + case 0: + if (ftype == ::apache::thrift::protocol::T_LIST) { + { + this->success.clear(); + uint32_t _size5; + ::apache::thrift::protocol::TType _etype8; + xfer += iprot->readListBegin(_etype8, _size5); + this->success.resize(_size5); + uint32_t _i9; + for (_i9 = 0; _i9 < _size5; ++_i9) + { + xfer += iprot->readString(this->success[_i9]); + } + xfer += iprot->readListEnd(); + } + this->__isset.success = true; + } else { + xfer += iprot->skip(ftype); + } + break; + case 1: + if (ftype == ::apache::thrift::protocol::T_STRUCT) { + xfer += this->ex.read(iprot); + this->__isset.ex = true; + } else { + xfer += iprot->skip(ftype); + } + break; + default: + xfer += iprot->skip(ftype); + break; + } + xfer += iprot->readFieldEnd(); + } + + xfer += iprot->readStructEnd(); + + return xfer; +} + +uint32_t ThriftHive_fetchN_result::write(::apache::thrift::protocol::TProtocol* oprot) const { + + uint32_t xfer = 0; + + xfer += oprot->writeStructBegin("ThriftHive_fetchN_result"); + + if (this->__isset.success) { + xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0); + { + xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->success.size())); + std::vector ::const_iterator _iter10; + for (_iter10 = this->success.begin(); _iter10 != this->success.end(); ++_iter10) + { + xfer += oprot->writeString((*_iter10)); + } + xfer += oprot->writeListEnd(); + } + xfer += oprot->writeFieldEnd(); + } else if (this->__isset.ex) { + xfer += oprot->writeFieldBegin("ex", ::apache::thrift::protocol::T_STRUCT, 1); + xfer += this->ex.write(oprot); + xfer += oprot->writeFieldEnd(); + } + xfer += oprot->writeFieldStop(); + xfer += oprot->writeStructEnd(); + return xfer; +} + + +ThriftHive_fetchN_presult::~ThriftHive_fetchN_presult() throw() { +} + + +uint32_t ThriftHive_fetchN_presult::read(::apache::thrift::protocol::TProtocol* iprot) { + + apache::thrift::protocol::TInputRecursionTracker tracker(*iprot); + uint32_t xfer = 0; + std::string fname; + ::apache::thrift::protocol::TType ftype; + int16_t fid; + + xfer += iprot->readStructBegin(fname); + + using ::apache::thrift::protocol::TProtocolException; + + + while (true) + { + xfer += iprot->readFieldBegin(fname, ftype, fid); + if (ftype == ::apache::thrift::protocol::T_STOP) { + break; + } + switch (fid) + { + case 0: + if (ftype == ::apache::thrift::protocol::T_LIST) { + { + (*(this->success)).clear(); + uint32_t _size11; + ::apache::thrift::protocol::TType _etype14; + xfer += iprot->readListBegin(_etype14, _size11); + (*(this->success)).resize(_size11); + uint32_t _i15; + for (_i15 = 0; _i15 < _size11; ++_i15) + { + xfer += iprot->readString((*(this->success))[_i15]); + } + xfer += iprot->readListEnd(); + } + this->__isset.success = true; + } else { + xfer += iprot->skip(ftype); + } + break; + case 1: + if (ftype == ::apache::thrift::protocol::T_STRUCT) { + xfer += this->ex.read(iprot); + this->__isset.ex = true; + } else { + xfer += iprot->skip(ftype); + } + break; + default: + xfer += iprot->skip(ftype); + break; + } + xfer += iprot->readFieldEnd(); + } + + xfer += iprot->readStructEnd(); + + return xfer; +} + + +ThriftHive_fetchAll_args::~ThriftHive_fetchAll_args() throw() { +} + + +uint32_t ThriftHive_fetchAll_args::read(::apache::thrift::protocol::TProtocol* iprot) { + + apache::thrift::protocol::TInputRecursionTracker tracker(*iprot); + uint32_t xfer = 0; + std::string fname; + ::apache::thrift::protocol::TType ftype; + int16_t fid; + + xfer += iprot->readStructBegin(fname); + + using ::apache::thrift::protocol::TProtocolException; + + + while (true) + { + xfer += iprot->readFieldBegin(fname, ftype, fid); + if (ftype == ::apache::thrift::protocol::T_STOP) { + break; + } + xfer += iprot->skip(ftype); + xfer += iprot->readFieldEnd(); + } + + xfer += iprot->readStructEnd(); + + return xfer; +} + +uint32_t ThriftHive_fetchAll_args::write(::apache::thrift::protocol::TProtocol* oprot) const { + uint32_t xfer = 0; + apache::thrift::protocol::TOutputRecursionTracker tracker(*oprot); + xfer += oprot->writeStructBegin("ThriftHive_fetchAll_args"); + + xfer += oprot->writeFieldStop(); + xfer += oprot->writeStructEnd(); + return xfer; +} + + +ThriftHive_fetchAll_pargs::~ThriftHive_fetchAll_pargs() throw() { +} + + +uint32_t ThriftHive_fetchAll_pargs::write(::apache::thrift::protocol::TProtocol* oprot) const { + uint32_t xfer = 0; + apache::thrift::protocol::TOutputRecursionTracker tracker(*oprot); + xfer += oprot->writeStructBegin("ThriftHive_fetchAll_pargs"); + + xfer += oprot->writeFieldStop(); + xfer += oprot->writeStructEnd(); + return xfer; +} + + +ThriftHive_fetchAll_result::~ThriftHive_fetchAll_result() throw() { +} + + +uint32_t ThriftHive_fetchAll_result::read(::apache::thrift::protocol::TProtocol* iprot) { + + apache::thrift::protocol::TInputRecursionTracker tracker(*iprot); + uint32_t xfer = 0; + std::string fname; + ::apache::thrift::protocol::TType ftype; + int16_t fid; + + xfer += iprot->readStructBegin(fname); + + using ::apache::thrift::protocol::TProtocolException; + + + while (true) + { + xfer += iprot->readFieldBegin(fname, ftype, fid); + if (ftype == ::apache::thrift::protocol::T_STOP) { + break; + } + switch (fid) + { + case 0: + if (ftype == ::apache::thrift::protocol::T_LIST) { + { + this->success.clear(); + uint32_t _size16; + ::apache::thrift::protocol::TType _etype19; + xfer += iprot->readListBegin(_etype19, _size16); + this->success.resize(_size16); + uint32_t _i20; + for (_i20 = 0; _i20 < _size16; ++_i20) + { + xfer += iprot->readString(this->success[_i20]); + } + xfer += iprot->readListEnd(); + } + this->__isset.success = true; + } else { + xfer += iprot->skip(ftype); + } + break; + case 1: + if (ftype == ::apache::thrift::protocol::T_STRUCT) { + xfer += this->ex.read(iprot); + this->__isset.ex = true; + } else { + xfer += iprot->skip(ftype); + } + break; + default: + xfer += iprot->skip(ftype); + break; + } + xfer += iprot->readFieldEnd(); + } + + xfer += iprot->readStructEnd(); + + return xfer; +} + +uint32_t ThriftHive_fetchAll_result::write(::apache::thrift::protocol::TProtocol* oprot) const { + + uint32_t xfer = 0; + + xfer += oprot->writeStructBegin("ThriftHive_fetchAll_result"); + + if (this->__isset.success) { + xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0); + { + xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->success.size())); + std::vector ::const_iterator _iter21; + for (_iter21 = this->success.begin(); _iter21 != this->success.end(); ++_iter21) + { + xfer += oprot->writeString((*_iter21)); + } + xfer += oprot->writeListEnd(); + } + xfer += oprot->writeFieldEnd(); + } else if (this->__isset.ex) { + xfer += oprot->writeFieldBegin("ex", ::apache::thrift::protocol::T_STRUCT, 1); + xfer += this->ex.write(oprot); + xfer += oprot->writeFieldEnd(); + } + xfer += oprot->writeFieldStop(); + xfer += oprot->writeStructEnd(); + return xfer; +} + + +ThriftHive_fetchAll_presult::~ThriftHive_fetchAll_presult() throw() { +} + + +uint32_t ThriftHive_fetchAll_presult::read(::apache::thrift::protocol::TProtocol* iprot) { + + apache::thrift::protocol::TInputRecursionTracker tracker(*iprot); + uint32_t xfer = 0; + std::string fname; + ::apache::thrift::protocol::TType ftype; + int16_t fid; + + xfer += iprot->readStructBegin(fname); + + using ::apache::thrift::protocol::TProtocolException; + + + while (true) + { + xfer += iprot->readFieldBegin(fname, ftype, fid); + if (ftype == ::apache::thrift::protocol::T_STOP) { + break; + } + switch (fid) + { + case 0: + if (ftype == ::apache::thrift::protocol::T_LIST) { + { + (*(this->success)).clear(); + uint32_t _size22; + ::apache::thrift::protocol::TType _etype25; + xfer += iprot->readListBegin(_etype25, _size22); + (*(this->success)).resize(_size22); + uint32_t _i26; + for (_i26 = 0; _i26 < _size22; ++_i26) + { + xfer += iprot->readString((*(this->success))[_i26]); + } + xfer += iprot->readListEnd(); + } + this->__isset.success = true; + } else { + xfer += iprot->skip(ftype); + } + break; + case 1: + if (ftype == ::apache::thrift::protocol::T_STRUCT) { + xfer += this->ex.read(iprot); + this->__isset.ex = true; + } else { + xfer += iprot->skip(ftype); + } + break; + default: + xfer += iprot->skip(ftype); + break; + } + xfer += iprot->readFieldEnd(); + } + + xfer += iprot->readStructEnd(); + + return xfer; +} + + +ThriftHive_getSchema_args::~ThriftHive_getSchema_args() throw() { +} + + +uint32_t ThriftHive_getSchema_args::read(::apache::thrift::protocol::TProtocol* iprot) { + + apache::thrift::protocol::TInputRecursionTracker tracker(*iprot); + uint32_t xfer = 0; + std::string fname; + ::apache::thrift::protocol::TType ftype; + int16_t fid; + + xfer += iprot->readStructBegin(fname); + + using ::apache::thrift::protocol::TProtocolException; + + + while (true) + { + xfer += iprot->readFieldBegin(fname, ftype, fid); + if (ftype == ::apache::thrift::protocol::T_STOP) { + break; + } + xfer += iprot->skip(ftype); + xfer += iprot->readFieldEnd(); + } + + xfer += iprot->readStructEnd(); + + return xfer; +} + +uint32_t ThriftHive_getSchema_args::write(::apache::thrift::protocol::TProtocol* oprot) const { + uint32_t xfer = 0; + apache::thrift::protocol::TOutputRecursionTracker tracker(*oprot); + xfer += oprot->writeStructBegin("ThriftHive_getSchema_args"); + + xfer += oprot->writeFieldStop(); + xfer += oprot->writeStructEnd(); + return xfer; +} + + +ThriftHive_getSchema_pargs::~ThriftHive_getSchema_pargs() throw() { +} + + +uint32_t ThriftHive_getSchema_pargs::write(::apache::thrift::protocol::TProtocol* oprot) const { + uint32_t xfer = 0; + apache::thrift::protocol::TOutputRecursionTracker tracker(*oprot); + xfer += oprot->writeStructBegin("ThriftHive_getSchema_pargs"); + + xfer += oprot->writeFieldStop(); + xfer += oprot->writeStructEnd(); + return xfer; +} + + +ThriftHive_getSchema_result::~ThriftHive_getSchema_result() throw() { +} + + +uint32_t ThriftHive_getSchema_result::read(::apache::thrift::protocol::TProtocol* iprot) { + + apache::thrift::protocol::TInputRecursionTracker tracker(*iprot); + uint32_t xfer = 0; + std::string fname; + ::apache::thrift::protocol::TType ftype; + int16_t fid; + + xfer += iprot->readStructBegin(fname); + + using ::apache::thrift::protocol::TProtocolException; + + + while (true) + { + xfer += iprot->readFieldBegin(fname, ftype, fid); + if (ftype == ::apache::thrift::protocol::T_STOP) { + break; + } + switch (fid) + { + case 0: + if (ftype == ::apache::thrift::protocol::T_STRUCT) { + xfer += this->success.read(iprot); + this->__isset.success = true; + } else { + xfer += iprot->skip(ftype); + } + break; + case 1: + if (ftype == ::apache::thrift::protocol::T_STRUCT) { + xfer += this->ex.read(iprot); + this->__isset.ex = true; + } else { + xfer += iprot->skip(ftype); + } + break; + default: + xfer += iprot->skip(ftype); + break; + } + xfer += iprot->readFieldEnd(); + } + + xfer += iprot->readStructEnd(); + + return xfer; +} + +uint32_t ThriftHive_getSchema_result::write(::apache::thrift::protocol::TProtocol* oprot) const { + + uint32_t xfer = 0; + + xfer += oprot->writeStructBegin("ThriftHive_getSchema_result"); + + if (this->__isset.success) { + xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_STRUCT, 0); + xfer += this->success.write(oprot); + xfer += oprot->writeFieldEnd(); + } else if (this->__isset.ex) { + xfer += oprot->writeFieldBegin("ex", ::apache::thrift::protocol::T_STRUCT, 1); + xfer += this->ex.write(oprot); + xfer += oprot->writeFieldEnd(); + } + xfer += oprot->writeFieldStop(); + xfer += oprot->writeStructEnd(); + return xfer; +} + + +ThriftHive_getSchema_presult::~ThriftHive_getSchema_presult() throw() { +} + + +uint32_t ThriftHive_getSchema_presult::read(::apache::thrift::protocol::TProtocol* iprot) { + + apache::thrift::protocol::TInputRecursionTracker tracker(*iprot); + uint32_t xfer = 0; + std::string fname; + ::apache::thrift::protocol::TType ftype; + int16_t fid; + + xfer += iprot->readStructBegin(fname); + + using ::apache::thrift::protocol::TProtocolException; + + + while (true) + { + xfer += iprot->readFieldBegin(fname, ftype, fid); + if (ftype == ::apache::thrift::protocol::T_STOP) { + break; + } + switch (fid) + { + case 0: + if (ftype == ::apache::thrift::protocol::T_STRUCT) { + xfer += (*(this->success)).read(iprot); + this->__isset.success = true; + } else { + xfer += iprot->skip(ftype); + } + break; + case 1: + if (ftype == ::apache::thrift::protocol::T_STRUCT) { + xfer += this->ex.read(iprot); + this->__isset.ex = true; + } else { + xfer += iprot->skip(ftype); + } + break; + default: + xfer += iprot->skip(ftype); + break; + } + xfer += iprot->readFieldEnd(); + } + + xfer += iprot->readStructEnd(); + + return xfer; +} + + +ThriftHive_getThriftSchema_args::~ThriftHive_getThriftSchema_args() throw() { +} + + +uint32_t ThriftHive_getThriftSchema_args::read(::apache::thrift::protocol::TProtocol* iprot) { + + apache::thrift::protocol::TInputRecursionTracker tracker(*iprot); + uint32_t xfer = 0; + std::string fname; + ::apache::thrift::protocol::TType ftype; + int16_t fid; + + xfer += iprot->readStructBegin(fname); + + using ::apache::thrift::protocol::TProtocolException; + + + while (true) + { + xfer += iprot->readFieldBegin(fname, ftype, fid); + if (ftype == ::apache::thrift::protocol::T_STOP) { + break; + } + xfer += iprot->skip(ftype); + xfer += iprot->readFieldEnd(); + } + + xfer += iprot->readStructEnd(); + + return xfer; +} + +uint32_t ThriftHive_getThriftSchema_args::write(::apache::thrift::protocol::TProtocol* oprot) const { + uint32_t xfer = 0; + apache::thrift::protocol::TOutputRecursionTracker tracker(*oprot); + xfer += oprot->writeStructBegin("ThriftHive_getThriftSchema_args"); + + xfer += oprot->writeFieldStop(); + xfer += oprot->writeStructEnd(); + return xfer; +} + + +ThriftHive_getThriftSchema_pargs::~ThriftHive_getThriftSchema_pargs() throw() { +} + + +uint32_t ThriftHive_getThriftSchema_pargs::write(::apache::thrift::protocol::TProtocol* oprot) const { + uint32_t xfer = 0; + apache::thrift::protocol::TOutputRecursionTracker tracker(*oprot); + xfer += oprot->writeStructBegin("ThriftHive_getThriftSchema_pargs"); + + xfer += oprot->writeFieldStop(); + xfer += oprot->writeStructEnd(); + return xfer; +} + + +ThriftHive_getThriftSchema_result::~ThriftHive_getThriftSchema_result() throw() { +} + + +uint32_t ThriftHive_getThriftSchema_result::read(::apache::thrift::protocol::TProtocol* iprot) { + + apache::thrift::protocol::TInputRecursionTracker tracker(*iprot); + uint32_t xfer = 0; + std::string fname; + ::apache::thrift::protocol::TType ftype; + int16_t fid; + + xfer += iprot->readStructBegin(fname); + + using ::apache::thrift::protocol::TProtocolException; + + + while (true) + { + xfer += iprot->readFieldBegin(fname, ftype, fid); + if (ftype == ::apache::thrift::protocol::T_STOP) { + break; + } + switch (fid) + { + case 0: + if (ftype == ::apache::thrift::protocol::T_STRUCT) { + xfer += this->success.read(iprot); + this->__isset.success = true; + } else { + xfer += iprot->skip(ftype); + } + break; + case 1: + if (ftype == ::apache::thrift::protocol::T_STRUCT) { + xfer += this->ex.read(iprot); + this->__isset.ex = true; + } else { + xfer += iprot->skip(ftype); + } + break; + default: + xfer += iprot->skip(ftype); + break; + } + xfer += iprot->readFieldEnd(); + } + + xfer += iprot->readStructEnd(); + + return xfer; +} + +uint32_t ThriftHive_getThriftSchema_result::write(::apache::thrift::protocol::TProtocol* oprot) const { + + uint32_t xfer = 0; + + xfer += oprot->writeStructBegin("ThriftHive_getThriftSchema_result"); + + if (this->__isset.success) { + xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_STRUCT, 0); + xfer += this->success.write(oprot); + xfer += oprot->writeFieldEnd(); + } else if (this->__isset.ex) { + xfer += oprot->writeFieldBegin("ex", ::apache::thrift::protocol::T_STRUCT, 1); + xfer += this->ex.write(oprot); + xfer += oprot->writeFieldEnd(); + } + xfer += oprot->writeFieldStop(); + xfer += oprot->writeStructEnd(); + return xfer; +} + + +ThriftHive_getThriftSchema_presult::~ThriftHive_getThriftSchema_presult() throw() { +} + + +uint32_t ThriftHive_getThriftSchema_presult::read(::apache::thrift::protocol::TProtocol* iprot) { + + apache::thrift::protocol::TInputRecursionTracker tracker(*iprot); + uint32_t xfer = 0; + std::string fname; + ::apache::thrift::protocol::TType ftype; + int16_t fid; + + xfer += iprot->readStructBegin(fname); + + using ::apache::thrift::protocol::TProtocolException; + + + while (true) + { + xfer += iprot->readFieldBegin(fname, ftype, fid); + if (ftype == ::apache::thrift::protocol::T_STOP) { + break; + } + switch (fid) + { + case 0: + if (ftype == ::apache::thrift::protocol::T_STRUCT) { + xfer += (*(this->success)).read(iprot); + this->__isset.success = true; + } else { + xfer += iprot->skip(ftype); + } + break; + case 1: + if (ftype == ::apache::thrift::protocol::T_STRUCT) { + xfer += this->ex.read(iprot); + this->__isset.ex = true; + } else { + xfer += iprot->skip(ftype); + } + break; + default: + xfer += iprot->skip(ftype); + break; + } + xfer += iprot->readFieldEnd(); + } + + xfer += iprot->readStructEnd(); + + return xfer; +} + + +ThriftHive_getClusterStatus_args::~ThriftHive_getClusterStatus_args() throw() { +} + + +uint32_t ThriftHive_getClusterStatus_args::read(::apache::thrift::protocol::TProtocol* iprot) { + + apache::thrift::protocol::TInputRecursionTracker tracker(*iprot); + uint32_t xfer = 0; + std::string fname; + ::apache::thrift::protocol::TType ftype; + int16_t fid; + + xfer += iprot->readStructBegin(fname); + + using ::apache::thrift::protocol::TProtocolException; + + + while (true) + { + xfer += iprot->readFieldBegin(fname, ftype, fid); + if (ftype == ::apache::thrift::protocol::T_STOP) { + break; + } + xfer += iprot->skip(ftype); + xfer += iprot->readFieldEnd(); + } + + xfer += iprot->readStructEnd(); + + return xfer; +} + +uint32_t ThriftHive_getClusterStatus_args::write(::apache::thrift::protocol::TProtocol* oprot) const { + uint32_t xfer = 0; + apache::thrift::protocol::TOutputRecursionTracker tracker(*oprot); + xfer += oprot->writeStructBegin("ThriftHive_getClusterStatus_args"); + + xfer += oprot->writeFieldStop(); + xfer += oprot->writeStructEnd(); + return xfer; +} + + +ThriftHive_getClusterStatus_pargs::~ThriftHive_getClusterStatus_pargs() throw() { +} + + +uint32_t ThriftHive_getClusterStatus_pargs::write(::apache::thrift::protocol::TProtocol* oprot) const { + uint32_t xfer = 0; + apache::thrift::protocol::TOutputRecursionTracker tracker(*oprot); + xfer += oprot->writeStructBegin("ThriftHive_getClusterStatus_pargs"); + + xfer += oprot->writeFieldStop(); + xfer += oprot->writeStructEnd(); + return xfer; +} + + +ThriftHive_getClusterStatus_result::~ThriftHive_getClusterStatus_result() throw() { +} + + +uint32_t ThriftHive_getClusterStatus_result::read(::apache::thrift::protocol::TProtocol* iprot) { + + apache::thrift::protocol::TInputRecursionTracker tracker(*iprot); + uint32_t xfer = 0; + std::string fname; + ::apache::thrift::protocol::TType ftype; + int16_t fid; + + xfer += iprot->readStructBegin(fname); + + using ::apache::thrift::protocol::TProtocolException; + + + while (true) + { + xfer += iprot->readFieldBegin(fname, ftype, fid); + if (ftype == ::apache::thrift::protocol::T_STOP) { + break; + } + switch (fid) + { + case 0: + if (ftype == ::apache::thrift::protocol::T_STRUCT) { + xfer += this->success.read(iprot); + this->__isset.success = true; + } else { + xfer += iprot->skip(ftype); + } + break; + case 1: + if (ftype == ::apache::thrift::protocol::T_STRUCT) { + xfer += this->ex.read(iprot); + this->__isset.ex = true; + } else { + xfer += iprot->skip(ftype); + } + break; + default: + xfer += iprot->skip(ftype); + break; + } + xfer += iprot->readFieldEnd(); + } + + xfer += iprot->readStructEnd(); + + return xfer; +} + +uint32_t ThriftHive_getClusterStatus_result::write(::apache::thrift::protocol::TProtocol* oprot) const { + + uint32_t xfer = 0; + + xfer += oprot->writeStructBegin("ThriftHive_getClusterStatus_result"); + + if (this->__isset.success) { + xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_STRUCT, 0); + xfer += this->success.write(oprot); + xfer += oprot->writeFieldEnd(); + } else if (this->__isset.ex) { + xfer += oprot->writeFieldBegin("ex", ::apache::thrift::protocol::T_STRUCT, 1); + xfer += this->ex.write(oprot); + xfer += oprot->writeFieldEnd(); + } + xfer += oprot->writeFieldStop(); + xfer += oprot->writeStructEnd(); + return xfer; +} + + +ThriftHive_getClusterStatus_presult::~ThriftHive_getClusterStatus_presult() throw() { +} + + +uint32_t ThriftHive_getClusterStatus_presult::read(::apache::thrift::protocol::TProtocol* iprot) { + + apache::thrift::protocol::TInputRecursionTracker tracker(*iprot); + uint32_t xfer = 0; + std::string fname; + ::apache::thrift::protocol::TType ftype; + int16_t fid; + + xfer += iprot->readStructBegin(fname); + + using ::apache::thrift::protocol::TProtocolException; + + + while (true) + { + xfer += iprot->readFieldBegin(fname, ftype, fid); + if (ftype == ::apache::thrift::protocol::T_STOP) { + break; + } + switch (fid) + { + case 0: + if (ftype == ::apache::thrift::protocol::T_STRUCT) { + xfer += (*(this->success)).read(iprot); + this->__isset.success = true; + } else { + xfer += iprot->skip(ftype); + } + break; + case 1: + if (ftype == ::apache::thrift::protocol::T_STRUCT) { + xfer += this->ex.read(iprot); + this->__isset.ex = true; + } else { + xfer += iprot->skip(ftype); + } + break; + default: + xfer += iprot->skip(ftype); + break; + } + xfer += iprot->readFieldEnd(); + } + + xfer += iprot->readStructEnd(); + + return xfer; +} + + +ThriftHive_getQueryPlan_args::~ThriftHive_getQueryPlan_args() throw() { +} + + +uint32_t ThriftHive_getQueryPlan_args::read(::apache::thrift::protocol::TProtocol* iprot) { + + apache::thrift::protocol::TInputRecursionTracker tracker(*iprot); + uint32_t xfer = 0; + std::string fname; + ::apache::thrift::protocol::TType ftype; + int16_t fid; + + xfer += iprot->readStructBegin(fname); + + using ::apache::thrift::protocol::TProtocolException; + + + while (true) + { + xfer += iprot->readFieldBegin(fname, ftype, fid); + if (ftype == ::apache::thrift::protocol::T_STOP) { + break; + } + xfer += iprot->skip(ftype); + xfer += iprot->readFieldEnd(); + } + + xfer += iprot->readStructEnd(); + + return xfer; +} + +uint32_t ThriftHive_getQueryPlan_args::write(::apache::thrift::protocol::TProtocol* oprot) const { + uint32_t xfer = 0; + apache::thrift::protocol::TOutputRecursionTracker tracker(*oprot); + xfer += oprot->writeStructBegin("ThriftHive_getQueryPlan_args"); + + xfer += oprot->writeFieldStop(); + xfer += oprot->writeStructEnd(); + return xfer; +} + + +ThriftHive_getQueryPlan_pargs::~ThriftHive_getQueryPlan_pargs() throw() { +} + + +uint32_t ThriftHive_getQueryPlan_pargs::write(::apache::thrift::protocol::TProtocol* oprot) const { + uint32_t xfer = 0; + apache::thrift::protocol::TOutputRecursionTracker tracker(*oprot); + xfer += oprot->writeStructBegin("ThriftHive_getQueryPlan_pargs"); + + xfer += oprot->writeFieldStop(); + xfer += oprot->writeStructEnd(); + return xfer; +} + + +ThriftHive_getQueryPlan_result::~ThriftHive_getQueryPlan_result() throw() { +} + + +uint32_t ThriftHive_getQueryPlan_result::read(::apache::thrift::protocol::TProtocol* iprot) { + + apache::thrift::protocol::TInputRecursionTracker tracker(*iprot); + uint32_t xfer = 0; + std::string fname; + ::apache::thrift::protocol::TType ftype; + int16_t fid; + + xfer += iprot->readStructBegin(fname); + + using ::apache::thrift::protocol::TProtocolException; + + + while (true) + { + xfer += iprot->readFieldBegin(fname, ftype, fid); + if (ftype == ::apache::thrift::protocol::T_STOP) { + break; + } + switch (fid) + { + case 0: + if (ftype == ::apache::thrift::protocol::T_STRUCT) { + xfer += this->success.read(iprot); + this->__isset.success = true; + } else { + xfer += iprot->skip(ftype); + } + break; + case 1: + if (ftype == ::apache::thrift::protocol::T_STRUCT) { + xfer += this->ex.read(iprot); + this->__isset.ex = true; + } else { + xfer += iprot->skip(ftype); + } + break; + default: + xfer += iprot->skip(ftype); + break; + } + xfer += iprot->readFieldEnd(); + } + + xfer += iprot->readStructEnd(); + + return xfer; +} + +uint32_t ThriftHive_getQueryPlan_result::write(::apache::thrift::protocol::TProtocol* oprot) const { + + uint32_t xfer = 0; + + xfer += oprot->writeStructBegin("ThriftHive_getQueryPlan_result"); + + if (this->__isset.success) { + xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_STRUCT, 0); + xfer += this->success.write(oprot); + xfer += oprot->writeFieldEnd(); + } else if (this->__isset.ex) { + xfer += oprot->writeFieldBegin("ex", ::apache::thrift::protocol::T_STRUCT, 1); + xfer += this->ex.write(oprot); + xfer += oprot->writeFieldEnd(); + } + xfer += oprot->writeFieldStop(); + xfer += oprot->writeStructEnd(); + return xfer; +} + + +ThriftHive_getQueryPlan_presult::~ThriftHive_getQueryPlan_presult() throw() { +} + + +uint32_t ThriftHive_getQueryPlan_presult::read(::apache::thrift::protocol::TProtocol* iprot) { + + apache::thrift::protocol::TInputRecursionTracker tracker(*iprot); + uint32_t xfer = 0; + std::string fname; + ::apache::thrift::protocol::TType ftype; + int16_t fid; + + xfer += iprot->readStructBegin(fname); + + using ::apache::thrift::protocol::TProtocolException; + + + while (true) + { + xfer += iprot->readFieldBegin(fname, ftype, fid); + if (ftype == ::apache::thrift::protocol::T_STOP) { + break; + } + switch (fid) + { + case 0: + if (ftype == ::apache::thrift::protocol::T_STRUCT) { + xfer += (*(this->success)).read(iprot); + this->__isset.success = true; + } else { + xfer += iprot->skip(ftype); + } + break; + case 1: + if (ftype == ::apache::thrift::protocol::T_STRUCT) { + xfer += this->ex.read(iprot); + this->__isset.ex = true; + } else { + xfer += iprot->skip(ftype); + } + break; + default: + xfer += iprot->skip(ftype); + break; + } + xfer += iprot->readFieldEnd(); + } + + xfer += iprot->readStructEnd(); + + return xfer; +} + + +ThriftHive_clean_args::~ThriftHive_clean_args() throw() { +} + + +uint32_t ThriftHive_clean_args::read(::apache::thrift::protocol::TProtocol* iprot) { + + apache::thrift::protocol::TInputRecursionTracker tracker(*iprot); + uint32_t xfer = 0; + std::string fname; + ::apache::thrift::protocol::TType ftype; + int16_t fid; + + xfer += iprot->readStructBegin(fname); + + using ::apache::thrift::protocol::TProtocolException; + + + while (true) + { + xfer += iprot->readFieldBegin(fname, ftype, fid); + if (ftype == ::apache::thrift::protocol::T_STOP) { + break; + } + xfer += iprot->skip(ftype); + xfer += iprot->readFieldEnd(); + } + + xfer += iprot->readStructEnd(); + + return xfer; +} + +uint32_t ThriftHive_clean_args::write(::apache::thrift::protocol::TProtocol* oprot) const { + uint32_t xfer = 0; + apache::thrift::protocol::TOutputRecursionTracker tracker(*oprot); + xfer += oprot->writeStructBegin("ThriftHive_clean_args"); + + xfer += oprot->writeFieldStop(); + xfer += oprot->writeStructEnd(); + return xfer; +} + + +ThriftHive_clean_pargs::~ThriftHive_clean_pargs() throw() { +} + + +uint32_t ThriftHive_clean_pargs::write(::apache::thrift::protocol::TProtocol* oprot) const { + uint32_t xfer = 0; + apache::thrift::protocol::TOutputRecursionTracker tracker(*oprot); + xfer += oprot->writeStructBegin("ThriftHive_clean_pargs"); + + xfer += oprot->writeFieldStop(); + xfer += oprot->writeStructEnd(); + return xfer; +} + + +ThriftHive_clean_result::~ThriftHive_clean_result() throw() { +} + + +uint32_t ThriftHive_clean_result::read(::apache::thrift::protocol::TProtocol* iprot) { + + apache::thrift::protocol::TInputRecursionTracker tracker(*iprot); + uint32_t xfer = 0; + std::string fname; + ::apache::thrift::protocol::TType ftype; + int16_t fid; + + xfer += iprot->readStructBegin(fname); + + using ::apache::thrift::protocol::TProtocolException; + + + while (true) + { + xfer += iprot->readFieldBegin(fname, ftype, fid); + if (ftype == ::apache::thrift::protocol::T_STOP) { + break; + } + xfer += iprot->skip(ftype); + xfer += iprot->readFieldEnd(); + } + + xfer += iprot->readStructEnd(); + + return xfer; +} + +uint32_t ThriftHive_clean_result::write(::apache::thrift::protocol::TProtocol* oprot) const { + + uint32_t xfer = 0; + + xfer += oprot->writeStructBegin("ThriftHive_clean_result"); + + xfer += oprot->writeFieldStop(); + xfer += oprot->writeStructEnd(); + return xfer; +} + + +ThriftHive_clean_presult::~ThriftHive_clean_presult() throw() { +} + + +uint32_t ThriftHive_clean_presult::read(::apache::thrift::protocol::TProtocol* iprot) { + + apache::thrift::protocol::TInputRecursionTracker tracker(*iprot); + uint32_t xfer = 0; + std::string fname; + ::apache::thrift::protocol::TType ftype; + int16_t fid; + + xfer += iprot->readStructBegin(fname); + + using ::apache::thrift::protocol::TProtocolException; + + + while (true) + { + xfer += iprot->readFieldBegin(fname, ftype, fid); + if (ftype == ::apache::thrift::protocol::T_STOP) { + break; + } + xfer += iprot->skip(ftype); + xfer += iprot->readFieldEnd(); + } + + xfer += iprot->readStructEnd(); + + return xfer; +} + +void ThriftHiveClient::execute(const std::string& query) +{ + send_execute(query); + recv_execute(); +} + +void ThriftHiveClient::send_execute(const std::string& query) +{ + int32_t cseqid = 0; + oprot_->writeMessageBegin("execute", ::apache::thrift::protocol::T_CALL, cseqid); + + ThriftHive_execute_pargs args; + args.query = &query; + args.write(oprot_); + + oprot_->writeMessageEnd(); + oprot_->getTransport()->writeEnd(); + oprot_->getTransport()->flush(); +} + +void ThriftHiveClient::recv_execute() +{ + + int32_t rseqid = 0; + std::string fname; + ::apache::thrift::protocol::TMessageType mtype; + + iprot_->readMessageBegin(fname, mtype, rseqid); + if (mtype == ::apache::thrift::protocol::T_EXCEPTION) { + ::apache::thrift::TApplicationException x; + x.read(iprot_); + iprot_->readMessageEnd(); + iprot_->getTransport()->readEnd(); + throw x; + } + if (mtype != ::apache::thrift::protocol::T_REPLY) { + iprot_->skip(::apache::thrift::protocol::T_STRUCT); + iprot_->readMessageEnd(); + iprot_->getTransport()->readEnd(); + } + if (fname.compare("execute") != 0) { + iprot_->skip(::apache::thrift::protocol::T_STRUCT); + iprot_->readMessageEnd(); + iprot_->getTransport()->readEnd(); + } + ThriftHive_execute_presult result; + result.read(iprot_); + iprot_->readMessageEnd(); + iprot_->getTransport()->readEnd(); + + if (result.__isset.ex) { + throw result.ex; + } + return; +} + +void ThriftHiveClient::fetchOne(std::string& _return) +{ + send_fetchOne(); + recv_fetchOne(_return); +} + +void ThriftHiveClient::send_fetchOne() +{ + int32_t cseqid = 0; + oprot_->writeMessageBegin("fetchOne", ::apache::thrift::protocol::T_CALL, cseqid); + + ThriftHive_fetchOne_pargs args; + args.write(oprot_); + + oprot_->writeMessageEnd(); + oprot_->getTransport()->writeEnd(); + oprot_->getTransport()->flush(); +} + +void ThriftHiveClient::recv_fetchOne(std::string& _return) +{ + + int32_t rseqid = 0; + std::string fname; + ::apache::thrift::protocol::TMessageType mtype; + + iprot_->readMessageBegin(fname, mtype, rseqid); + if (mtype == ::apache::thrift::protocol::T_EXCEPTION) { + ::apache::thrift::TApplicationException x; + x.read(iprot_); + iprot_->readMessageEnd(); + iprot_->getTransport()->readEnd(); + throw x; + } + if (mtype != ::apache::thrift::protocol::T_REPLY) { + iprot_->skip(::apache::thrift::protocol::T_STRUCT); + iprot_->readMessageEnd(); + iprot_->getTransport()->readEnd(); + } + if (fname.compare("fetchOne") != 0) { + iprot_->skip(::apache::thrift::protocol::T_STRUCT); + iprot_->readMessageEnd(); + iprot_->getTransport()->readEnd(); + } + ThriftHive_fetchOne_presult result; + result.success = &_return; + result.read(iprot_); + iprot_->readMessageEnd(); + iprot_->getTransport()->readEnd(); + + if (result.__isset.success) { + // _return pointer has now been filled + return; + } + if (result.__isset.ex) { + throw result.ex; + } + throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "fetchOne failed: unknown result"); +} + +void ThriftHiveClient::fetchN(std::vector & _return, const int32_t numRows) +{ + send_fetchN(numRows); + recv_fetchN(_return); +} + +void ThriftHiveClient::send_fetchN(const int32_t numRows) +{ + int32_t cseqid = 0; + oprot_->writeMessageBegin("fetchN", ::apache::thrift::protocol::T_CALL, cseqid); + + ThriftHive_fetchN_pargs args; + args.numRows = &numRows; + args.write(oprot_); + + oprot_->writeMessageEnd(); + oprot_->getTransport()->writeEnd(); + oprot_->getTransport()->flush(); +} + +void ThriftHiveClient::recv_fetchN(std::vector & _return) +{ + + int32_t rseqid = 0; + std::string fname; + ::apache::thrift::protocol::TMessageType mtype; + + iprot_->readMessageBegin(fname, mtype, rseqid); + if (mtype == ::apache::thrift::protocol::T_EXCEPTION) { + ::apache::thrift::TApplicationException x; + x.read(iprot_); + iprot_->readMessageEnd(); + iprot_->getTransport()->readEnd(); + throw x; + } + if (mtype != ::apache::thrift::protocol::T_REPLY) { + iprot_->skip(::apache::thrift::protocol::T_STRUCT); + iprot_->readMessageEnd(); + iprot_->getTransport()->readEnd(); + } + if (fname.compare("fetchN") != 0) { + iprot_->skip(::apache::thrift::protocol::T_STRUCT); + iprot_->readMessageEnd(); + iprot_->getTransport()->readEnd(); + } + ThriftHive_fetchN_presult result; + result.success = &_return; + result.read(iprot_); + iprot_->readMessageEnd(); + iprot_->getTransport()->readEnd(); + + if (result.__isset.success) { + // _return pointer has now been filled + return; + } + if (result.__isset.ex) { + throw result.ex; + } + throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "fetchN failed: unknown result"); +} + +void ThriftHiveClient::fetchAll(std::vector & _return) +{ + send_fetchAll(); + recv_fetchAll(_return); +} + +void ThriftHiveClient::send_fetchAll() +{ + int32_t cseqid = 0; + oprot_->writeMessageBegin("fetchAll", ::apache::thrift::protocol::T_CALL, cseqid); + + ThriftHive_fetchAll_pargs args; + args.write(oprot_); + + oprot_->writeMessageEnd(); + oprot_->getTransport()->writeEnd(); + oprot_->getTransport()->flush(); +} + +void ThriftHiveClient::recv_fetchAll(std::vector & _return) +{ + + int32_t rseqid = 0; + std::string fname; + ::apache::thrift::protocol::TMessageType mtype; + + iprot_->readMessageBegin(fname, mtype, rseqid); + if (mtype == ::apache::thrift::protocol::T_EXCEPTION) { + ::apache::thrift::TApplicationException x; + x.read(iprot_); + iprot_->readMessageEnd(); + iprot_->getTransport()->readEnd(); + throw x; + } + if (mtype != ::apache::thrift::protocol::T_REPLY) { + iprot_->skip(::apache::thrift::protocol::T_STRUCT); + iprot_->readMessageEnd(); + iprot_->getTransport()->readEnd(); + } + if (fname.compare("fetchAll") != 0) { + iprot_->skip(::apache::thrift::protocol::T_STRUCT); + iprot_->readMessageEnd(); + iprot_->getTransport()->readEnd(); + } + ThriftHive_fetchAll_presult result; + result.success = &_return; + result.read(iprot_); + iprot_->readMessageEnd(); + iprot_->getTransport()->readEnd(); + + if (result.__isset.success) { + // _return pointer has now been filled + return; + } + if (result.__isset.ex) { + throw result.ex; + } + throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "fetchAll failed: unknown result"); +} + +void ThriftHiveClient::getSchema( ::Apache::Hadoop::Hive::Schema& _return) +{ + send_getSchema(); + recv_getSchema(_return); +} + +void ThriftHiveClient::send_getSchema() +{ + int32_t cseqid = 0; + oprot_->writeMessageBegin("getSchema", ::apache::thrift::protocol::T_CALL, cseqid); + + ThriftHive_getSchema_pargs args; + args.write(oprot_); + + oprot_->writeMessageEnd(); + oprot_->getTransport()->writeEnd(); + oprot_->getTransport()->flush(); +} + +void ThriftHiveClient::recv_getSchema( ::Apache::Hadoop::Hive::Schema& _return) +{ + + int32_t rseqid = 0; + std::string fname; + ::apache::thrift::protocol::TMessageType mtype; + + iprot_->readMessageBegin(fname, mtype, rseqid); + if (mtype == ::apache::thrift::protocol::T_EXCEPTION) { + ::apache::thrift::TApplicationException x; + x.read(iprot_); + iprot_->readMessageEnd(); + iprot_->getTransport()->readEnd(); + throw x; + } + if (mtype != ::apache::thrift::protocol::T_REPLY) { + iprot_->skip(::apache::thrift::protocol::T_STRUCT); + iprot_->readMessageEnd(); + iprot_->getTransport()->readEnd(); + } + if (fname.compare("getSchema") != 0) { + iprot_->skip(::apache::thrift::protocol::T_STRUCT); + iprot_->readMessageEnd(); + iprot_->getTransport()->readEnd(); + } + ThriftHive_getSchema_presult result; + result.success = &_return; + result.read(iprot_); + iprot_->readMessageEnd(); + iprot_->getTransport()->readEnd(); + + if (result.__isset.success) { + // _return pointer has now been filled + return; + } + if (result.__isset.ex) { + throw result.ex; + } + throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "getSchema failed: unknown result"); +} + +void ThriftHiveClient::getThriftSchema( ::Apache::Hadoop::Hive::Schema& _return) +{ + send_getThriftSchema(); + recv_getThriftSchema(_return); +} + +void ThriftHiveClient::send_getThriftSchema() +{ + int32_t cseqid = 0; + oprot_->writeMessageBegin("getThriftSchema", ::apache::thrift::protocol::T_CALL, cseqid); + + ThriftHive_getThriftSchema_pargs args; + args.write(oprot_); + + oprot_->writeMessageEnd(); + oprot_->getTransport()->writeEnd(); + oprot_->getTransport()->flush(); +} + +void ThriftHiveClient::recv_getThriftSchema( ::Apache::Hadoop::Hive::Schema& _return) +{ + + int32_t rseqid = 0; + std::string fname; + ::apache::thrift::protocol::TMessageType mtype; + + iprot_->readMessageBegin(fname, mtype, rseqid); + if (mtype == ::apache::thrift::protocol::T_EXCEPTION) { + ::apache::thrift::TApplicationException x; + x.read(iprot_); + iprot_->readMessageEnd(); + iprot_->getTransport()->readEnd(); + throw x; + } + if (mtype != ::apache::thrift::protocol::T_REPLY) { + iprot_->skip(::apache::thrift::protocol::T_STRUCT); + iprot_->readMessageEnd(); + iprot_->getTransport()->readEnd(); + } + if (fname.compare("getThriftSchema") != 0) { + iprot_->skip(::apache::thrift::protocol::T_STRUCT); + iprot_->readMessageEnd(); + iprot_->getTransport()->readEnd(); + } + ThriftHive_getThriftSchema_presult result; + result.success = &_return; + result.read(iprot_); + iprot_->readMessageEnd(); + iprot_->getTransport()->readEnd(); + + if (result.__isset.success) { + // _return pointer has now been filled + return; + } + if (result.__isset.ex) { + throw result.ex; + } + throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "getThriftSchema failed: unknown result"); +} + +void ThriftHiveClient::getClusterStatus(HiveClusterStatus& _return) +{ + send_getClusterStatus(); + recv_getClusterStatus(_return); +} + +void ThriftHiveClient::send_getClusterStatus() +{ + int32_t cseqid = 0; + oprot_->writeMessageBegin("getClusterStatus", ::apache::thrift::protocol::T_CALL, cseqid); + + ThriftHive_getClusterStatus_pargs args; + args.write(oprot_); + + oprot_->writeMessageEnd(); + oprot_->getTransport()->writeEnd(); + oprot_->getTransport()->flush(); +} + +void ThriftHiveClient::recv_getClusterStatus(HiveClusterStatus& _return) +{ + + int32_t rseqid = 0; + std::string fname; + ::apache::thrift::protocol::TMessageType mtype; + + iprot_->readMessageBegin(fname, mtype, rseqid); + if (mtype == ::apache::thrift::protocol::T_EXCEPTION) { + ::apache::thrift::TApplicationException x; + x.read(iprot_); + iprot_->readMessageEnd(); + iprot_->getTransport()->readEnd(); + throw x; + } + if (mtype != ::apache::thrift::protocol::T_REPLY) { + iprot_->skip(::apache::thrift::protocol::T_STRUCT); + iprot_->readMessageEnd(); + iprot_->getTransport()->readEnd(); + } + if (fname.compare("getClusterStatus") != 0) { + iprot_->skip(::apache::thrift::protocol::T_STRUCT); + iprot_->readMessageEnd(); + iprot_->getTransport()->readEnd(); + } + ThriftHive_getClusterStatus_presult result; + result.success = &_return; + result.read(iprot_); + iprot_->readMessageEnd(); + iprot_->getTransport()->readEnd(); + + if (result.__isset.success) { + // _return pointer has now been filled + return; + } + if (result.__isset.ex) { + throw result.ex; + } + throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "getClusterStatus failed: unknown result"); +} + +void ThriftHiveClient::getQueryPlan( ::Apache::Hadoop::Hive::QueryPlan& _return) +{ + send_getQueryPlan(); + recv_getQueryPlan(_return); +} + +void ThriftHiveClient::send_getQueryPlan() +{ + int32_t cseqid = 0; + oprot_->writeMessageBegin("getQueryPlan", ::apache::thrift::protocol::T_CALL, cseqid); + + ThriftHive_getQueryPlan_pargs args; + args.write(oprot_); + + oprot_->writeMessageEnd(); + oprot_->getTransport()->writeEnd(); + oprot_->getTransport()->flush(); +} + +void ThriftHiveClient::recv_getQueryPlan( ::Apache::Hadoop::Hive::QueryPlan& _return) +{ + + int32_t rseqid = 0; + std::string fname; + ::apache::thrift::protocol::TMessageType mtype; + + iprot_->readMessageBegin(fname, mtype, rseqid); + if (mtype == ::apache::thrift::protocol::T_EXCEPTION) { + ::apache::thrift::TApplicationException x; + x.read(iprot_); + iprot_->readMessageEnd(); + iprot_->getTransport()->readEnd(); + throw x; + } + if (mtype != ::apache::thrift::protocol::T_REPLY) { + iprot_->skip(::apache::thrift::protocol::T_STRUCT); + iprot_->readMessageEnd(); + iprot_->getTransport()->readEnd(); + } + if (fname.compare("getQueryPlan") != 0) { + iprot_->skip(::apache::thrift::protocol::T_STRUCT); + iprot_->readMessageEnd(); + iprot_->getTransport()->readEnd(); + } + ThriftHive_getQueryPlan_presult result; + result.success = &_return; + result.read(iprot_); + iprot_->readMessageEnd(); + iprot_->getTransport()->readEnd(); + + if (result.__isset.success) { + // _return pointer has now been filled + return; + } + if (result.__isset.ex) { + throw result.ex; + } + throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "getQueryPlan failed: unknown result"); +} + +void ThriftHiveClient::clean() +{ + send_clean(); + recv_clean(); +} + +void ThriftHiveClient::send_clean() +{ + int32_t cseqid = 0; + oprot_->writeMessageBegin("clean", ::apache::thrift::protocol::T_CALL, cseqid); + + ThriftHive_clean_pargs args; + args.write(oprot_); + + oprot_->writeMessageEnd(); + oprot_->getTransport()->writeEnd(); + oprot_->getTransport()->flush(); +} + +void ThriftHiveClient::recv_clean() +{ + + int32_t rseqid = 0; + std::string fname; + ::apache::thrift::protocol::TMessageType mtype; + + iprot_->readMessageBegin(fname, mtype, rseqid); + if (mtype == ::apache::thrift::protocol::T_EXCEPTION) { + ::apache::thrift::TApplicationException x; + x.read(iprot_); + iprot_->readMessageEnd(); + iprot_->getTransport()->readEnd(); + throw x; + } + if (mtype != ::apache::thrift::protocol::T_REPLY) { + iprot_->skip(::apache::thrift::protocol::T_STRUCT); + iprot_->readMessageEnd(); + iprot_->getTransport()->readEnd(); + } + if (fname.compare("clean") != 0) { + iprot_->skip(::apache::thrift::protocol::T_STRUCT); + iprot_->readMessageEnd(); + iprot_->getTransport()->readEnd(); + } + ThriftHive_clean_presult result; + result.read(iprot_); + iprot_->readMessageEnd(); + iprot_->getTransport()->readEnd(); + + return; +} + +bool ThriftHiveProcessor::dispatchCall(::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, const std::string& fname, int32_t seqid, void* callContext) { + ProcessMap::iterator pfn; + pfn = processMap_.find(fname); + if (pfn == processMap_.end()) { + return ::Apache::Hadoop::Hive::ThriftHiveMetastoreProcessor::dispatchCall(iprot, oprot, fname, seqid, callContext); + } + (this->*(pfn->second))(seqid, iprot, oprot, callContext); + return true; +} + +void ThriftHiveProcessor::process_execute(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext) +{ + void* ctx = NULL; + if (this->eventHandler_.get() != NULL) { + ctx = this->eventHandler_->getContext("ThriftHive.execute", callContext); + } + ::apache::thrift::TProcessorContextFreer freer(this->eventHandler_.get(), ctx, "ThriftHive.execute"); + + if (this->eventHandler_.get() != NULL) { + this->eventHandler_->preRead(ctx, "ThriftHive.execute"); + } + + ThriftHive_execute_args args; + args.read(iprot); + iprot->readMessageEnd(); + uint32_t bytes = iprot->getTransport()->readEnd(); + + if (this->eventHandler_.get() != NULL) { + this->eventHandler_->postRead(ctx, "ThriftHive.execute", bytes); + } + + ThriftHive_execute_result result; + try { + iface_->execute(args.query); + } catch (HiveServerException &ex) { + result.ex = ex; + result.__isset.ex = true; + } catch (const std::exception& e) { + if (this->eventHandler_.get() != NULL) { + this->eventHandler_->handlerError(ctx, "ThriftHive.execute"); + } + + ::apache::thrift::TApplicationException x(e.what()); + oprot->writeMessageBegin("execute", ::apache::thrift::protocol::T_EXCEPTION, seqid); + x.write(oprot); + oprot->writeMessageEnd(); + oprot->getTransport()->writeEnd(); + oprot->getTransport()->flush(); + return; + } + + if (this->eventHandler_.get() != NULL) { + this->eventHandler_->preWrite(ctx, "ThriftHive.execute"); + } + + oprot->writeMessageBegin("execute", ::apache::thrift::protocol::T_REPLY, seqid); + result.write(oprot); + oprot->writeMessageEnd(); + bytes = oprot->getTransport()->writeEnd(); + oprot->getTransport()->flush(); + + if (this->eventHandler_.get() != NULL) { + this->eventHandler_->postWrite(ctx, "ThriftHive.execute", bytes); + } +} + +void ThriftHiveProcessor::process_fetchOne(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext) +{ + void* ctx = NULL; + if (this->eventHandler_.get() != NULL) { + ctx = this->eventHandler_->getContext("ThriftHive.fetchOne", callContext); + } + ::apache::thrift::TProcessorContextFreer freer(this->eventHandler_.get(), ctx, "ThriftHive.fetchOne"); + + if (this->eventHandler_.get() != NULL) { + this->eventHandler_->preRead(ctx, "ThriftHive.fetchOne"); + } + + ThriftHive_fetchOne_args args; + args.read(iprot); + iprot->readMessageEnd(); + uint32_t bytes = iprot->getTransport()->readEnd(); + + if (this->eventHandler_.get() != NULL) { + this->eventHandler_->postRead(ctx, "ThriftHive.fetchOne", bytes); + } + + ThriftHive_fetchOne_result result; + try { + iface_->fetchOne(result.success); + result.__isset.success = true; + } catch (HiveServerException &ex) { + result.ex = ex; + result.__isset.ex = true; + } catch (const std::exception& e) { + if (this->eventHandler_.get() != NULL) { + this->eventHandler_->handlerError(ctx, "ThriftHive.fetchOne"); + } + + ::apache::thrift::TApplicationException x(e.what()); + oprot->writeMessageBegin("fetchOne", ::apache::thrift::protocol::T_EXCEPTION, seqid); + x.write(oprot); + oprot->writeMessageEnd(); + oprot->getTransport()->writeEnd(); + oprot->getTransport()->flush(); + return; + } + + if (this->eventHandler_.get() != NULL) { + this->eventHandler_->preWrite(ctx, "ThriftHive.fetchOne"); + } + + oprot->writeMessageBegin("fetchOne", ::apache::thrift::protocol::T_REPLY, seqid); + result.write(oprot); + oprot->writeMessageEnd(); + bytes = oprot->getTransport()->writeEnd(); + oprot->getTransport()->flush(); + + if (this->eventHandler_.get() != NULL) { + this->eventHandler_->postWrite(ctx, "ThriftHive.fetchOne", bytes); + } +} + +void ThriftHiveProcessor::process_fetchN(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext) +{ + void* ctx = NULL; + if (this->eventHandler_.get() != NULL) { + ctx = this->eventHandler_->getContext("ThriftHive.fetchN", callContext); + } + ::apache::thrift::TProcessorContextFreer freer(this->eventHandler_.get(), ctx, "ThriftHive.fetchN"); + + if (this->eventHandler_.get() != NULL) { + this->eventHandler_->preRead(ctx, "ThriftHive.fetchN"); + } + + ThriftHive_fetchN_args args; + args.read(iprot); + iprot->readMessageEnd(); + uint32_t bytes = iprot->getTransport()->readEnd(); + + if (this->eventHandler_.get() != NULL) { + this->eventHandler_->postRead(ctx, "ThriftHive.fetchN", bytes); + } + + ThriftHive_fetchN_result result; + try { + iface_->fetchN(result.success, args.numRows); + result.__isset.success = true; + } catch (HiveServerException &ex) { + result.ex = ex; + result.__isset.ex = true; + } catch (const std::exception& e) { + if (this->eventHandler_.get() != NULL) { + this->eventHandler_->handlerError(ctx, "ThriftHive.fetchN"); + } + + ::apache::thrift::TApplicationException x(e.what()); + oprot->writeMessageBegin("fetchN", ::apache::thrift::protocol::T_EXCEPTION, seqid); + x.write(oprot); + oprot->writeMessageEnd(); + oprot->getTransport()->writeEnd(); + oprot->getTransport()->flush(); + return; + } + + if (this->eventHandler_.get() != NULL) { + this->eventHandler_->preWrite(ctx, "ThriftHive.fetchN"); + } + + oprot->writeMessageBegin("fetchN", ::apache::thrift::protocol::T_REPLY, seqid); + result.write(oprot); + oprot->writeMessageEnd(); + bytes = oprot->getTransport()->writeEnd(); + oprot->getTransport()->flush(); + + if (this->eventHandler_.get() != NULL) { + this->eventHandler_->postWrite(ctx, "ThriftHive.fetchN", bytes); + } +} + +void ThriftHiveProcessor::process_fetchAll(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext) +{ + void* ctx = NULL; + if (this->eventHandler_.get() != NULL) { + ctx = this->eventHandler_->getContext("ThriftHive.fetchAll", callContext); + } + ::apache::thrift::TProcessorContextFreer freer(this->eventHandler_.get(), ctx, "ThriftHive.fetchAll"); + + if (this->eventHandler_.get() != NULL) { + this->eventHandler_->preRead(ctx, "ThriftHive.fetchAll"); + } + + ThriftHive_fetchAll_args args; + args.read(iprot); + iprot->readMessageEnd(); + uint32_t bytes = iprot->getTransport()->readEnd(); + + if (this->eventHandler_.get() != NULL) { + this->eventHandler_->postRead(ctx, "ThriftHive.fetchAll", bytes); + } + + ThriftHive_fetchAll_result result; + try { + iface_->fetchAll(result.success); + result.__isset.success = true; + } catch (HiveServerException &ex) { + result.ex = ex; + result.__isset.ex = true; + } catch (const std::exception& e) { + if (this->eventHandler_.get() != NULL) { + this->eventHandler_->handlerError(ctx, "ThriftHive.fetchAll"); + } + + ::apache::thrift::TApplicationException x(e.what()); + oprot->writeMessageBegin("fetchAll", ::apache::thrift::protocol::T_EXCEPTION, seqid); + x.write(oprot); + oprot->writeMessageEnd(); + oprot->getTransport()->writeEnd(); + oprot->getTransport()->flush(); + return; + } + + if (this->eventHandler_.get() != NULL) { + this->eventHandler_->preWrite(ctx, "ThriftHive.fetchAll"); + } + + oprot->writeMessageBegin("fetchAll", ::apache::thrift::protocol::T_REPLY, seqid); + result.write(oprot); + oprot->writeMessageEnd(); + bytes = oprot->getTransport()->writeEnd(); + oprot->getTransport()->flush(); + + if (this->eventHandler_.get() != NULL) { + this->eventHandler_->postWrite(ctx, "ThriftHive.fetchAll", bytes); + } +} + +void ThriftHiveProcessor::process_getSchema(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext) +{ + void* ctx = NULL; + if (this->eventHandler_.get() != NULL) { + ctx = this->eventHandler_->getContext("ThriftHive.getSchema", callContext); + } + ::apache::thrift::TProcessorContextFreer freer(this->eventHandler_.get(), ctx, "ThriftHive.getSchema"); + + if (this->eventHandler_.get() != NULL) { + this->eventHandler_->preRead(ctx, "ThriftHive.getSchema"); + } + + ThriftHive_getSchema_args args; + args.read(iprot); + iprot->readMessageEnd(); + uint32_t bytes = iprot->getTransport()->readEnd(); + + if (this->eventHandler_.get() != NULL) { + this->eventHandler_->postRead(ctx, "ThriftHive.getSchema", bytes); + } + + ThriftHive_getSchema_result result; + try { + iface_->getSchema(result.success); + result.__isset.success = true; + } catch (HiveServerException &ex) { + result.ex = ex; + result.__isset.ex = true; + } catch (const std::exception& e) { + if (this->eventHandler_.get() != NULL) { + this->eventHandler_->handlerError(ctx, "ThriftHive.getSchema"); + } + + ::apache::thrift::TApplicationException x(e.what()); + oprot->writeMessageBegin("getSchema", ::apache::thrift::protocol::T_EXCEPTION, seqid); + x.write(oprot); + oprot->writeMessageEnd(); + oprot->getTransport()->writeEnd(); + oprot->getTransport()->flush(); + return; + } + + if (this->eventHandler_.get() != NULL) { + this->eventHandler_->preWrite(ctx, "ThriftHive.getSchema"); + } + + oprot->writeMessageBegin("getSchema", ::apache::thrift::protocol::T_REPLY, seqid); + result.write(oprot); + oprot->writeMessageEnd(); + bytes = oprot->getTransport()->writeEnd(); + oprot->getTransport()->flush(); + + if (this->eventHandler_.get() != NULL) { + this->eventHandler_->postWrite(ctx, "ThriftHive.getSchema", bytes); + } +} + +void ThriftHiveProcessor::process_getThriftSchema(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext) +{ + void* ctx = NULL; + if (this->eventHandler_.get() != NULL) { + ctx = this->eventHandler_->getContext("ThriftHive.getThriftSchema", callContext); + } + ::apache::thrift::TProcessorContextFreer freer(this->eventHandler_.get(), ctx, "ThriftHive.getThriftSchema"); + + if (this->eventHandler_.get() != NULL) { + this->eventHandler_->preRead(ctx, "ThriftHive.getThriftSchema"); + } + + ThriftHive_getThriftSchema_args args; + args.read(iprot); + iprot->readMessageEnd(); + uint32_t bytes = iprot->getTransport()->readEnd(); + + if (this->eventHandler_.get() != NULL) { + this->eventHandler_->postRead(ctx, "ThriftHive.getThriftSchema", bytes); + } + + ThriftHive_getThriftSchema_result result; + try { + iface_->getThriftSchema(result.success); + result.__isset.success = true; + } catch (HiveServerException &ex) { + result.ex = ex; + result.__isset.ex = true; + } catch (const std::exception& e) { + if (this->eventHandler_.get() != NULL) { + this->eventHandler_->handlerError(ctx, "ThriftHive.getThriftSchema"); + } + + ::apache::thrift::TApplicationException x(e.what()); + oprot->writeMessageBegin("getThriftSchema", ::apache::thrift::protocol::T_EXCEPTION, seqid); + x.write(oprot); + oprot->writeMessageEnd(); + oprot->getTransport()->writeEnd(); + oprot->getTransport()->flush(); + return; + } + + if (this->eventHandler_.get() != NULL) { + this->eventHandler_->preWrite(ctx, "ThriftHive.getThriftSchema"); + } + + oprot->writeMessageBegin("getThriftSchema", ::apache::thrift::protocol::T_REPLY, seqid); + result.write(oprot); + oprot->writeMessageEnd(); + bytes = oprot->getTransport()->writeEnd(); + oprot->getTransport()->flush(); + + if (this->eventHandler_.get() != NULL) { + this->eventHandler_->postWrite(ctx, "ThriftHive.getThriftSchema", bytes); + } +} + +void ThriftHiveProcessor::process_getClusterStatus(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext) +{ + void* ctx = NULL; + if (this->eventHandler_.get() != NULL) { + ctx = this->eventHandler_->getContext("ThriftHive.getClusterStatus", callContext); + } + ::apache::thrift::TProcessorContextFreer freer(this->eventHandler_.get(), ctx, "ThriftHive.getClusterStatus"); + + if (this->eventHandler_.get() != NULL) { + this->eventHandler_->preRead(ctx, "ThriftHive.getClusterStatus"); + } + + ThriftHive_getClusterStatus_args args; + args.read(iprot); + iprot->readMessageEnd(); + uint32_t bytes = iprot->getTransport()->readEnd(); + + if (this->eventHandler_.get() != NULL) { + this->eventHandler_->postRead(ctx, "ThriftHive.getClusterStatus", bytes); + } + + ThriftHive_getClusterStatus_result result; + try { + iface_->getClusterStatus(result.success); + result.__isset.success = true; + } catch (HiveServerException &ex) { + result.ex = ex; + result.__isset.ex = true; + } catch (const std::exception& e) { + if (this->eventHandler_.get() != NULL) { + this->eventHandler_->handlerError(ctx, "ThriftHive.getClusterStatus"); + } + + ::apache::thrift::TApplicationException x(e.what()); + oprot->writeMessageBegin("getClusterStatus", ::apache::thrift::protocol::T_EXCEPTION, seqid); + x.write(oprot); + oprot->writeMessageEnd(); + oprot->getTransport()->writeEnd(); + oprot->getTransport()->flush(); + return; + } + + if (this->eventHandler_.get() != NULL) { + this->eventHandler_->preWrite(ctx, "ThriftHive.getClusterStatus"); + } + + oprot->writeMessageBegin("getClusterStatus", ::apache::thrift::protocol::T_REPLY, seqid); + result.write(oprot); + oprot->writeMessageEnd(); + bytes = oprot->getTransport()->writeEnd(); + oprot->getTransport()->flush(); + + if (this->eventHandler_.get() != NULL) { + this->eventHandler_->postWrite(ctx, "ThriftHive.getClusterStatus", bytes); + } +} + +void ThriftHiveProcessor::process_getQueryPlan(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext) +{ + void* ctx = NULL; + if (this->eventHandler_.get() != NULL) { + ctx = this->eventHandler_->getContext("ThriftHive.getQueryPlan", callContext); + } + ::apache::thrift::TProcessorContextFreer freer(this->eventHandler_.get(), ctx, "ThriftHive.getQueryPlan"); + + if (this->eventHandler_.get() != NULL) { + this->eventHandler_->preRead(ctx, "ThriftHive.getQueryPlan"); + } + + ThriftHive_getQueryPlan_args args; + args.read(iprot); + iprot->readMessageEnd(); + uint32_t bytes = iprot->getTransport()->readEnd(); + + if (this->eventHandler_.get() != NULL) { + this->eventHandler_->postRead(ctx, "ThriftHive.getQueryPlan", bytes); + } + + ThriftHive_getQueryPlan_result result; + try { + iface_->getQueryPlan(result.success); + result.__isset.success = true; + } catch (HiveServerException &ex) { + result.ex = ex; + result.__isset.ex = true; + } catch (const std::exception& e) { + if (this->eventHandler_.get() != NULL) { + this->eventHandler_->handlerError(ctx, "ThriftHive.getQueryPlan"); + } + + ::apache::thrift::TApplicationException x(e.what()); + oprot->writeMessageBegin("getQueryPlan", ::apache::thrift::protocol::T_EXCEPTION, seqid); + x.write(oprot); + oprot->writeMessageEnd(); + oprot->getTransport()->writeEnd(); + oprot->getTransport()->flush(); + return; + } + + if (this->eventHandler_.get() != NULL) { + this->eventHandler_->preWrite(ctx, "ThriftHive.getQueryPlan"); + } + + oprot->writeMessageBegin("getQueryPlan", ::apache::thrift::protocol::T_REPLY, seqid); + result.write(oprot); + oprot->writeMessageEnd(); + bytes = oprot->getTransport()->writeEnd(); + oprot->getTransport()->flush(); + + if (this->eventHandler_.get() != NULL) { + this->eventHandler_->postWrite(ctx, "ThriftHive.getQueryPlan", bytes); + } +} + +void ThriftHiveProcessor::process_clean(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext) +{ + void* ctx = NULL; + if (this->eventHandler_.get() != NULL) { + ctx = this->eventHandler_->getContext("ThriftHive.clean", callContext); + } + ::apache::thrift::TProcessorContextFreer freer(this->eventHandler_.get(), ctx, "ThriftHive.clean"); + + if (this->eventHandler_.get() != NULL) { + this->eventHandler_->preRead(ctx, "ThriftHive.clean"); + } + + ThriftHive_clean_args args; + args.read(iprot); + iprot->readMessageEnd(); + uint32_t bytes = iprot->getTransport()->readEnd(); + + if (this->eventHandler_.get() != NULL) { + this->eventHandler_->postRead(ctx, "ThriftHive.clean", bytes); + } + + ThriftHive_clean_result result; + try { + iface_->clean(); + } catch (const std::exception& e) { + if (this->eventHandler_.get() != NULL) { + this->eventHandler_->handlerError(ctx, "ThriftHive.clean"); + } + + ::apache::thrift::TApplicationException x(e.what()); + oprot->writeMessageBegin("clean", ::apache::thrift::protocol::T_EXCEPTION, seqid); + x.write(oprot); + oprot->writeMessageEnd(); + oprot->getTransport()->writeEnd(); + oprot->getTransport()->flush(); + return; + } + + if (this->eventHandler_.get() != NULL) { + this->eventHandler_->preWrite(ctx, "ThriftHive.clean"); + } + + oprot->writeMessageBegin("clean", ::apache::thrift::protocol::T_REPLY, seqid); + result.write(oprot); + oprot->writeMessageEnd(); + bytes = oprot->getTransport()->writeEnd(); + oprot->getTransport()->flush(); + + if (this->eventHandler_.get() != NULL) { + this->eventHandler_->postWrite(ctx, "ThriftHive.clean", bytes); + } +} + +::boost::shared_ptr< ::apache::thrift::TProcessor > ThriftHiveProcessorFactory::getProcessor(const ::apache::thrift::TConnectionInfo& connInfo) { + ::apache::thrift::ReleaseHandler< ThriftHiveIfFactory > cleanup(handlerFactory_); + ::boost::shared_ptr< ThriftHiveIf > handler(handlerFactory_->getHandler(connInfo), cleanup); + ::boost::shared_ptr< ::apache::thrift::TProcessor > processor(new ThriftHiveProcessor(handler)); + return processor; +} + +void ThriftHiveConcurrentClient::execute(const std::string& query) +{ + int32_t seqid = send_execute(query); + recv_execute(seqid); +} + +int32_t ThriftHiveConcurrentClient::send_execute(const std::string& query) +{ + int32_t cseqid = this->sync_.generateSeqId(); + ::apache::thrift::async::TConcurrentSendSentry sentry(&this->sync_); + oprot_->writeMessageBegin("execute", ::apache::thrift::protocol::T_CALL, cseqid); + + ThriftHive_execute_pargs args; + args.query = &query; + args.write(oprot_); + + oprot_->writeMessageEnd(); + oprot_->getTransport()->writeEnd(); + oprot_->getTransport()->flush(); + + sentry.commit(); + return cseqid; +} + +void ThriftHiveConcurrentClient::recv_execute(const int32_t seqid) +{ + + int32_t rseqid = 0; + std::string fname; + ::apache::thrift::protocol::TMessageType mtype; + + // the read mutex gets dropped and reacquired as part of waitForWork() + // The destructor of this sentry wakes up other clients + ::apache::thrift::async::TConcurrentRecvSentry sentry(&this->sync_, seqid); + + while(true) { + if(!this->sync_.getPending(fname, mtype, rseqid)) { + iprot_->readMessageBegin(fname, mtype, rseqid); + } + if(seqid == rseqid) { + if (mtype == ::apache::thrift::protocol::T_EXCEPTION) { + ::apache::thrift::TApplicationException x; + x.read(iprot_); + iprot_->readMessageEnd(); + iprot_->getTransport()->readEnd(); + sentry.commit(); + throw x; + } + if (mtype != ::apache::thrift::protocol::T_REPLY) { + iprot_->skip(::apache::thrift::protocol::T_STRUCT); + iprot_->readMessageEnd(); + iprot_->getTransport()->readEnd(); + } + if (fname.compare("execute") != 0) { + iprot_->skip(::apache::thrift::protocol::T_STRUCT); + iprot_->readMessageEnd(); + iprot_->getTransport()->readEnd(); + + // in a bad state, don't commit + using ::apache::thrift::protocol::TProtocolException; + throw TProtocolException(TProtocolException::INVALID_DATA); + } + ThriftHive_execute_presult result; + result.read(iprot_); + iprot_->readMessageEnd(); + iprot_->getTransport()->readEnd(); + + if (result.__isset.ex) { + sentry.commit(); + throw result.ex; + } + sentry.commit(); + return; + } + // seqid != rseqid + this->sync_.updatePending(fname, mtype, rseqid); + + // this will temporarily unlock the readMutex, and let other clients get work done + this->sync_.waitForWork(seqid); + } // end while(true) +} + +void ThriftHiveConcurrentClient::fetchOne(std::string& _return) +{ + int32_t seqid = send_fetchOne(); + recv_fetchOne(_return, seqid); +} + +int32_t ThriftHiveConcurrentClient::send_fetchOne() +{ + int32_t cseqid = this->sync_.generateSeqId(); + ::apache::thrift::async::TConcurrentSendSentry sentry(&this->sync_); + oprot_->writeMessageBegin("fetchOne", ::apache::thrift::protocol::T_CALL, cseqid); + + ThriftHive_fetchOne_pargs args; + args.write(oprot_); + + oprot_->writeMessageEnd(); + oprot_->getTransport()->writeEnd(); + oprot_->getTransport()->flush(); + + sentry.commit(); + return cseqid; +} + +void ThriftHiveConcurrentClient::recv_fetchOne(std::string& _return, const int32_t seqid) +{ + + int32_t rseqid = 0; + std::string fname; + ::apache::thrift::protocol::TMessageType mtype; + + // the read mutex gets dropped and reacquired as part of waitForWork() + // The destructor of this sentry wakes up other clients + ::apache::thrift::async::TConcurrentRecvSentry sentry(&this->sync_, seqid); + + while(true) { + if(!this->sync_.getPending(fname, mtype, rseqid)) { + iprot_->readMessageBegin(fname, mtype, rseqid); + } + if(seqid == rseqid) { + if (mtype == ::apache::thrift::protocol::T_EXCEPTION) { + ::apache::thrift::TApplicationException x; + x.read(iprot_); + iprot_->readMessageEnd(); + iprot_->getTransport()->readEnd(); + sentry.commit(); + throw x; + } + if (mtype != ::apache::thrift::protocol::T_REPLY) { + iprot_->skip(::apache::thrift::protocol::T_STRUCT); + iprot_->readMessageEnd(); + iprot_->getTransport()->readEnd(); + } + if (fname.compare("fetchOne") != 0) { + iprot_->skip(::apache::thrift::protocol::T_STRUCT); + iprot_->readMessageEnd(); + iprot_->getTransport()->readEnd(); + + // in a bad state, don't commit + using ::apache::thrift::protocol::TProtocolException; + throw TProtocolException(TProtocolException::INVALID_DATA); + } + ThriftHive_fetchOne_presult result; + result.success = &_return; + result.read(iprot_); + iprot_->readMessageEnd(); + iprot_->getTransport()->readEnd(); + + if (result.__isset.success) { + // _return pointer has now been filled + sentry.commit(); + return; + } + if (result.__isset.ex) { + sentry.commit(); + throw result.ex; + } + // in a bad state, don't commit + throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "fetchOne failed: unknown result"); + } + // seqid != rseqid + this->sync_.updatePending(fname, mtype, rseqid); + + // this will temporarily unlock the readMutex, and let other clients get work done + this->sync_.waitForWork(seqid); + } // end while(true) +} + +void ThriftHiveConcurrentClient::fetchN(std::vector & _return, const int32_t numRows) +{ + int32_t seqid = send_fetchN(numRows); + recv_fetchN(_return, seqid); +} + +int32_t ThriftHiveConcurrentClient::send_fetchN(const int32_t numRows) +{ + int32_t cseqid = this->sync_.generateSeqId(); + ::apache::thrift::async::TConcurrentSendSentry sentry(&this->sync_); + oprot_->writeMessageBegin("fetchN", ::apache::thrift::protocol::T_CALL, cseqid); + + ThriftHive_fetchN_pargs args; + args.numRows = &numRows; + args.write(oprot_); + + oprot_->writeMessageEnd(); + oprot_->getTransport()->writeEnd(); + oprot_->getTransport()->flush(); + + sentry.commit(); + return cseqid; +} + +void ThriftHiveConcurrentClient::recv_fetchN(std::vector & _return, const int32_t seqid) +{ + + int32_t rseqid = 0; + std::string fname; + ::apache::thrift::protocol::TMessageType mtype; + + // the read mutex gets dropped and reacquired as part of waitForWork() + // The destructor of this sentry wakes up other clients + ::apache::thrift::async::TConcurrentRecvSentry sentry(&this->sync_, seqid); + + while(true) { + if(!this->sync_.getPending(fname, mtype, rseqid)) { + iprot_->readMessageBegin(fname, mtype, rseqid); + } + if(seqid == rseqid) { + if (mtype == ::apache::thrift::protocol::T_EXCEPTION) { + ::apache::thrift::TApplicationException x; + x.read(iprot_); + iprot_->readMessageEnd(); + iprot_->getTransport()->readEnd(); + sentry.commit(); + throw x; + } + if (mtype != ::apache::thrift::protocol::T_REPLY) { + iprot_->skip(::apache::thrift::protocol::T_STRUCT); + iprot_->readMessageEnd(); + iprot_->getTransport()->readEnd(); + } + if (fname.compare("fetchN") != 0) { + iprot_->skip(::apache::thrift::protocol::T_STRUCT); + iprot_->readMessageEnd(); + iprot_->getTransport()->readEnd(); + + // in a bad state, don't commit + using ::apache::thrift::protocol::TProtocolException; + throw TProtocolException(TProtocolException::INVALID_DATA); + } + ThriftHive_fetchN_presult result; + result.success = &_return; + result.read(iprot_); + iprot_->readMessageEnd(); + iprot_->getTransport()->readEnd(); + + if (result.__isset.success) { + // _return pointer has now been filled + sentry.commit(); + return; + } + if (result.__isset.ex) { + sentry.commit(); + throw result.ex; + } + // in a bad state, don't commit + throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "fetchN failed: unknown result"); + } + // seqid != rseqid + this->sync_.updatePending(fname, mtype, rseqid); + + // this will temporarily unlock the readMutex, and let other clients get work done + this->sync_.waitForWork(seqid); + } // end while(true) +} + +void ThriftHiveConcurrentClient::fetchAll(std::vector & _return) +{ + int32_t seqid = send_fetchAll(); + recv_fetchAll(_return, seqid); +} + +int32_t ThriftHiveConcurrentClient::send_fetchAll() +{ + int32_t cseqid = this->sync_.generateSeqId(); + ::apache::thrift::async::TConcurrentSendSentry sentry(&this->sync_); + oprot_->writeMessageBegin("fetchAll", ::apache::thrift::protocol::T_CALL, cseqid); + + ThriftHive_fetchAll_pargs args; + args.write(oprot_); + + oprot_->writeMessageEnd(); + oprot_->getTransport()->writeEnd(); + oprot_->getTransport()->flush(); + + sentry.commit(); + return cseqid; +} + +void ThriftHiveConcurrentClient::recv_fetchAll(std::vector & _return, const int32_t seqid) +{ + + int32_t rseqid = 0; + std::string fname; + ::apache::thrift::protocol::TMessageType mtype; + + // the read mutex gets dropped and reacquired as part of waitForWork() + // The destructor of this sentry wakes up other clients + ::apache::thrift::async::TConcurrentRecvSentry sentry(&this->sync_, seqid); + + while(true) { + if(!this->sync_.getPending(fname, mtype, rseqid)) { + iprot_->readMessageBegin(fname, mtype, rseqid); + } + if(seqid == rseqid) { + if (mtype == ::apache::thrift::protocol::T_EXCEPTION) { + ::apache::thrift::TApplicationException x; + x.read(iprot_); + iprot_->readMessageEnd(); + iprot_->getTransport()->readEnd(); + sentry.commit(); + throw x; + } + if (mtype != ::apache::thrift::protocol::T_REPLY) { + iprot_->skip(::apache::thrift::protocol::T_STRUCT); + iprot_->readMessageEnd(); + iprot_->getTransport()->readEnd(); + } + if (fname.compare("fetchAll") != 0) { + iprot_->skip(::apache::thrift::protocol::T_STRUCT); + iprot_->readMessageEnd(); + iprot_->getTransport()->readEnd(); + + // in a bad state, don't commit + using ::apache::thrift::protocol::TProtocolException; + throw TProtocolException(TProtocolException::INVALID_DATA); + } + ThriftHive_fetchAll_presult result; + result.success = &_return; + result.read(iprot_); + iprot_->readMessageEnd(); + iprot_->getTransport()->readEnd(); + + if (result.__isset.success) { + // _return pointer has now been filled + sentry.commit(); + return; + } + if (result.__isset.ex) { + sentry.commit(); + throw result.ex; + } + // in a bad state, don't commit + throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "fetchAll failed: unknown result"); + } + // seqid != rseqid + this->sync_.updatePending(fname, mtype, rseqid); + + // this will temporarily unlock the readMutex, and let other clients get work done + this->sync_.waitForWork(seqid); + } // end while(true) +} + +void ThriftHiveConcurrentClient::getSchema( ::Apache::Hadoop::Hive::Schema& _return) +{ + int32_t seqid = send_getSchema(); + recv_getSchema(_return, seqid); +} + +int32_t ThriftHiveConcurrentClient::send_getSchema() +{ + int32_t cseqid = this->sync_.generateSeqId(); + ::apache::thrift::async::TConcurrentSendSentry sentry(&this->sync_); + oprot_->writeMessageBegin("getSchema", ::apache::thrift::protocol::T_CALL, cseqid); + + ThriftHive_getSchema_pargs args; + args.write(oprot_); + + oprot_->writeMessageEnd(); + oprot_->getTransport()->writeEnd(); + oprot_->getTransport()->flush(); + + sentry.commit(); + return cseqid; +} + +void ThriftHiveConcurrentClient::recv_getSchema( ::Apache::Hadoop::Hive::Schema& _return, const int32_t seqid) +{ + + int32_t rseqid = 0; + std::string fname; + ::apache::thrift::protocol::TMessageType mtype; + + // the read mutex gets dropped and reacquired as part of waitForWork() + // The destructor of this sentry wakes up other clients + ::apache::thrift::async::TConcurrentRecvSentry sentry(&this->sync_, seqid); + + while(true) { + if(!this->sync_.getPending(fname, mtype, rseqid)) { + iprot_->readMessageBegin(fname, mtype, rseqid); + } + if(seqid == rseqid) { + if (mtype == ::apache::thrift::protocol::T_EXCEPTION) { + ::apache::thrift::TApplicationException x; + x.read(iprot_); + iprot_->readMessageEnd(); + iprot_->getTransport()->readEnd(); + sentry.commit(); + throw x; + } + if (mtype != ::apache::thrift::protocol::T_REPLY) { + iprot_->skip(::apache::thrift::protocol::T_STRUCT); + iprot_->readMessageEnd(); + iprot_->getTransport()->readEnd(); + } + if (fname.compare("getSchema") != 0) { + iprot_->skip(::apache::thrift::protocol::T_STRUCT); + iprot_->readMessageEnd(); + iprot_->getTransport()->readEnd(); + + // in a bad state, don't commit + using ::apache::thrift::protocol::TProtocolException; + throw TProtocolException(TProtocolException::INVALID_DATA); + } + ThriftHive_getSchema_presult result; + result.success = &_return; + result.read(iprot_); + iprot_->readMessageEnd(); + iprot_->getTransport()->readEnd(); + + if (result.__isset.success) { + // _return pointer has now been filled + sentry.commit(); + return; + } + if (result.__isset.ex) { + sentry.commit(); + throw result.ex; + } + // in a bad state, don't commit + throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "getSchema failed: unknown result"); + } + // seqid != rseqid + this->sync_.updatePending(fname, mtype, rseqid); + + // this will temporarily unlock the readMutex, and let other clients get work done + this->sync_.waitForWork(seqid); + } // end while(true) +} + +void ThriftHiveConcurrentClient::getThriftSchema( ::Apache::Hadoop::Hive::Schema& _return) +{ + int32_t seqid = send_getThriftSchema(); + recv_getThriftSchema(_return, seqid); +} + +int32_t ThriftHiveConcurrentClient::send_getThriftSchema() +{ + int32_t cseqid = this->sync_.generateSeqId(); + ::apache::thrift::async::TConcurrentSendSentry sentry(&this->sync_); + oprot_->writeMessageBegin("getThriftSchema", ::apache::thrift::protocol::T_CALL, cseqid); + + ThriftHive_getThriftSchema_pargs args; + args.write(oprot_); + + oprot_->writeMessageEnd(); + oprot_->getTransport()->writeEnd(); + oprot_->getTransport()->flush(); + + sentry.commit(); + return cseqid; +} + +void ThriftHiveConcurrentClient::recv_getThriftSchema( ::Apache::Hadoop::Hive::Schema& _return, const int32_t seqid) +{ + + int32_t rseqid = 0; + std::string fname; + ::apache::thrift::protocol::TMessageType mtype; + + // the read mutex gets dropped and reacquired as part of waitForWork() + // The destructor of this sentry wakes up other clients + ::apache::thrift::async::TConcurrentRecvSentry sentry(&this->sync_, seqid); + + while(true) { + if(!this->sync_.getPending(fname, mtype, rseqid)) { + iprot_->readMessageBegin(fname, mtype, rseqid); + } + if(seqid == rseqid) { + if (mtype == ::apache::thrift::protocol::T_EXCEPTION) { + ::apache::thrift::TApplicationException x; + x.read(iprot_); + iprot_->readMessageEnd(); + iprot_->getTransport()->readEnd(); + sentry.commit(); + throw x; + } + if (mtype != ::apache::thrift::protocol::T_REPLY) { + iprot_->skip(::apache::thrift::protocol::T_STRUCT); + iprot_->readMessageEnd(); + iprot_->getTransport()->readEnd(); + } + if (fname.compare("getThriftSchema") != 0) { + iprot_->skip(::apache::thrift::protocol::T_STRUCT); + iprot_->readMessageEnd(); + iprot_->getTransport()->readEnd(); + + // in a bad state, don't commit + using ::apache::thrift::protocol::TProtocolException; + throw TProtocolException(TProtocolException::INVALID_DATA); + } + ThriftHive_getThriftSchema_presult result; + result.success = &_return; + result.read(iprot_); + iprot_->readMessageEnd(); + iprot_->getTransport()->readEnd(); + + if (result.__isset.success) { + // _return pointer has now been filled + sentry.commit(); + return; + } + if (result.__isset.ex) { + sentry.commit(); + throw result.ex; + } + // in a bad state, don't commit + throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "getThriftSchema failed: unknown result"); + } + // seqid != rseqid + this->sync_.updatePending(fname, mtype, rseqid); + + // this will temporarily unlock the readMutex, and let other clients get work done + this->sync_.waitForWork(seqid); + } // end while(true) +} + +void ThriftHiveConcurrentClient::getClusterStatus(HiveClusterStatus& _return) +{ + int32_t seqid = send_getClusterStatus(); + recv_getClusterStatus(_return, seqid); +} + +int32_t ThriftHiveConcurrentClient::send_getClusterStatus() +{ + int32_t cseqid = this->sync_.generateSeqId(); + ::apache::thrift::async::TConcurrentSendSentry sentry(&this->sync_); + oprot_->writeMessageBegin("getClusterStatus", ::apache::thrift::protocol::T_CALL, cseqid); + + ThriftHive_getClusterStatus_pargs args; + args.write(oprot_); + + oprot_->writeMessageEnd(); + oprot_->getTransport()->writeEnd(); + oprot_->getTransport()->flush(); + + sentry.commit(); + return cseqid; +} + +void ThriftHiveConcurrentClient::recv_getClusterStatus(HiveClusterStatus& _return, const int32_t seqid) +{ + + int32_t rseqid = 0; + std::string fname; + ::apache::thrift::protocol::TMessageType mtype; + + // the read mutex gets dropped and reacquired as part of waitForWork() + // The destructor of this sentry wakes up other clients + ::apache::thrift::async::TConcurrentRecvSentry sentry(&this->sync_, seqid); + + while(true) { + if(!this->sync_.getPending(fname, mtype, rseqid)) { + iprot_->readMessageBegin(fname, mtype, rseqid); + } + if(seqid == rseqid) { + if (mtype == ::apache::thrift::protocol::T_EXCEPTION) { + ::apache::thrift::TApplicationException x; + x.read(iprot_); + iprot_->readMessageEnd(); + iprot_->getTransport()->readEnd(); + sentry.commit(); + throw x; + } + if (mtype != ::apache::thrift::protocol::T_REPLY) { + iprot_->skip(::apache::thrift::protocol::T_STRUCT); + iprot_->readMessageEnd(); + iprot_->getTransport()->readEnd(); + } + if (fname.compare("getClusterStatus") != 0) { + iprot_->skip(::apache::thrift::protocol::T_STRUCT); + iprot_->readMessageEnd(); + iprot_->getTransport()->readEnd(); + + // in a bad state, don't commit + using ::apache::thrift::protocol::TProtocolException; + throw TProtocolException(TProtocolException::INVALID_DATA); + } + ThriftHive_getClusterStatus_presult result; + result.success = &_return; + result.read(iprot_); + iprot_->readMessageEnd(); + iprot_->getTransport()->readEnd(); + + if (result.__isset.success) { + // _return pointer has now been filled + sentry.commit(); + return; + } + if (result.__isset.ex) { + sentry.commit(); + throw result.ex; + } + // in a bad state, don't commit + throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "getClusterStatus failed: unknown result"); + } + // seqid != rseqid + this->sync_.updatePending(fname, mtype, rseqid); + + // this will temporarily unlock the readMutex, and let other clients get work done + this->sync_.waitForWork(seqid); + } // end while(true) +} + +void ThriftHiveConcurrentClient::getQueryPlan( ::Apache::Hadoop::Hive::QueryPlan& _return) +{ + int32_t seqid = send_getQueryPlan(); + recv_getQueryPlan(_return, seqid); +} + +int32_t ThriftHiveConcurrentClient::send_getQueryPlan() +{ + int32_t cseqid = this->sync_.generateSeqId(); + ::apache::thrift::async::TConcurrentSendSentry sentry(&this->sync_); + oprot_->writeMessageBegin("getQueryPlan", ::apache::thrift::protocol::T_CALL, cseqid); + + ThriftHive_getQueryPlan_pargs args; + args.write(oprot_); + + oprot_->writeMessageEnd(); + oprot_->getTransport()->writeEnd(); + oprot_->getTransport()->flush(); + + sentry.commit(); + return cseqid; +} + +void ThriftHiveConcurrentClient::recv_getQueryPlan( ::Apache::Hadoop::Hive::QueryPlan& _return, const int32_t seqid) +{ + + int32_t rseqid = 0; + std::string fname; + ::apache::thrift::protocol::TMessageType mtype; + + // the read mutex gets dropped and reacquired as part of waitForWork() + // The destructor of this sentry wakes up other clients + ::apache::thrift::async::TConcurrentRecvSentry sentry(&this->sync_, seqid); + + while(true) { + if(!this->sync_.getPending(fname, mtype, rseqid)) { + iprot_->readMessageBegin(fname, mtype, rseqid); + } + if(seqid == rseqid) { + if (mtype == ::apache::thrift::protocol::T_EXCEPTION) { + ::apache::thrift::TApplicationException x; + x.read(iprot_); + iprot_->readMessageEnd(); + iprot_->getTransport()->readEnd(); + sentry.commit(); + throw x; + } + if (mtype != ::apache::thrift::protocol::T_REPLY) { + iprot_->skip(::apache::thrift::protocol::T_STRUCT); + iprot_->readMessageEnd(); + iprot_->getTransport()->readEnd(); + } + if (fname.compare("getQueryPlan") != 0) { + iprot_->skip(::apache::thrift::protocol::T_STRUCT); + iprot_->readMessageEnd(); + iprot_->getTransport()->readEnd(); + + // in a bad state, don't commit + using ::apache::thrift::protocol::TProtocolException; + throw TProtocolException(TProtocolException::INVALID_DATA); + } + ThriftHive_getQueryPlan_presult result; + result.success = &_return; + result.read(iprot_); + iprot_->readMessageEnd(); + iprot_->getTransport()->readEnd(); + + if (result.__isset.success) { + // _return pointer has now been filled + sentry.commit(); + return; + } + if (result.__isset.ex) { + sentry.commit(); + throw result.ex; + } + // in a bad state, don't commit + throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "getQueryPlan failed: unknown result"); + } + // seqid != rseqid + this->sync_.updatePending(fname, mtype, rseqid); + + // this will temporarily unlock the readMutex, and let other clients get work done + this->sync_.waitForWork(seqid); + } // end while(true) +} + +void ThriftHiveConcurrentClient::clean() +{ + int32_t seqid = send_clean(); + recv_clean(seqid); +} + +int32_t ThriftHiveConcurrentClient::send_clean() +{ + int32_t cseqid = this->sync_.generateSeqId(); + ::apache::thrift::async::TConcurrentSendSentry sentry(&this->sync_); + oprot_->writeMessageBegin("clean", ::apache::thrift::protocol::T_CALL, cseqid); + + ThriftHive_clean_pargs args; + args.write(oprot_); + + oprot_->writeMessageEnd(); + oprot_->getTransport()->writeEnd(); + oprot_->getTransport()->flush(); + + sentry.commit(); + return cseqid; +} + +void ThriftHiveConcurrentClient::recv_clean(const int32_t seqid) +{ + + int32_t rseqid = 0; + std::string fname; + ::apache::thrift::protocol::TMessageType mtype; + + // the read mutex gets dropped and reacquired as part of waitForWork() + // The destructor of this sentry wakes up other clients + ::apache::thrift::async::TConcurrentRecvSentry sentry(&this->sync_, seqid); + + while(true) { + if(!this->sync_.getPending(fname, mtype, rseqid)) { + iprot_->readMessageBegin(fname, mtype, rseqid); + } + if(seqid == rseqid) { + if (mtype == ::apache::thrift::protocol::T_EXCEPTION) { + ::apache::thrift::TApplicationException x; + x.read(iprot_); + iprot_->readMessageEnd(); + iprot_->getTransport()->readEnd(); + sentry.commit(); + throw x; + } + if (mtype != ::apache::thrift::protocol::T_REPLY) { + iprot_->skip(::apache::thrift::protocol::T_STRUCT); + iprot_->readMessageEnd(); + iprot_->getTransport()->readEnd(); + } + if (fname.compare("clean") != 0) { + iprot_->skip(::apache::thrift::protocol::T_STRUCT); + iprot_->readMessageEnd(); + iprot_->getTransport()->readEnd(); + + // in a bad state, don't commit + using ::apache::thrift::protocol::TProtocolException; + throw TProtocolException(TProtocolException::INVALID_DATA); + } + ThriftHive_clean_presult result; + result.read(iprot_); + iprot_->readMessageEnd(); + iprot_->getTransport()->readEnd(); + + sentry.commit(); + return; + } + // seqid != rseqid + this->sync_.updatePending(fname, mtype, rseqid); + + // this will temporarily unlock the readMutex, and let other clients get work done + this->sync_.waitForWork(seqid); + } // end while(true) +} + +}}} // namespace + diff --git a/service/src/gen/thrift/gen-cpp/ThriftHive.h b/service/src/gen/thrift/gen-cpp/ThriftHive.h new file mode 100644 index 0000000..902bd4b --- /dev/null +++ b/service/src/gen/thrift/gen-cpp/ThriftHive.h @@ -0,0 +1,1224 @@ +/** + * Autogenerated by Thrift Compiler (0.9.3) + * + * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING + * @generated + */ +#ifndef ThriftHive_H +#define ThriftHive_H + +#include +#include +#include "hive_service_types.h" +#include "ThriftHiveMetastore.h" + +namespace Apache { namespace Hadoop { namespace Hive { + +#ifdef _WIN32 + #pragma warning( push ) + #pragma warning (disable : 4250 ) //inheriting methods via dominance +#endif + +class ThriftHiveIf : virtual public ::Apache::Hadoop::Hive::ThriftHiveMetastoreIf { + public: + virtual ~ThriftHiveIf() {} + virtual void execute(const std::string& query) = 0; + virtual void fetchOne(std::string& _return) = 0; + virtual void fetchN(std::vector & _return, const int32_t numRows) = 0; + virtual void fetchAll(std::vector & _return) = 0; + virtual void getSchema( ::Apache::Hadoop::Hive::Schema& _return) = 0; + virtual void getThriftSchema( ::Apache::Hadoop::Hive::Schema& _return) = 0; + virtual void getClusterStatus(HiveClusterStatus& _return) = 0; + virtual void getQueryPlan( ::Apache::Hadoop::Hive::QueryPlan& _return) = 0; + virtual void clean() = 0; +}; + +class ThriftHiveIfFactory : virtual public ::Apache::Hadoop::Hive::ThriftHiveMetastoreIfFactory { + public: + typedef ThriftHiveIf Handler; + + virtual ~ThriftHiveIfFactory() {} + + virtual ThriftHiveIf* getHandler(const ::apache::thrift::TConnectionInfo& connInfo) = 0; + virtual void releaseHandler( ::facebook::fb303::FacebookServiceIf* /* handler */) = 0; +}; + +class ThriftHiveIfSingletonFactory : virtual public ThriftHiveIfFactory { + public: + ThriftHiveIfSingletonFactory(const boost::shared_ptr& iface) : iface_(iface) {} + virtual ~ThriftHiveIfSingletonFactory() {} + + virtual ThriftHiveIf* getHandler(const ::apache::thrift::TConnectionInfo&) { + return iface_.get(); + } + virtual void releaseHandler( ::facebook::fb303::FacebookServiceIf* /* handler */) {} + + protected: + boost::shared_ptr iface_; +}; + +class ThriftHiveNull : virtual public ThriftHiveIf , virtual public ::Apache::Hadoop::Hive::ThriftHiveMetastoreNull { + public: + virtual ~ThriftHiveNull() {} + void execute(const std::string& /* query */) { + return; + } + void fetchOne(std::string& /* _return */) { + return; + } + void fetchN(std::vector & /* _return */, const int32_t /* numRows */) { + return; + } + void fetchAll(std::vector & /* _return */) { + return; + } + void getSchema( ::Apache::Hadoop::Hive::Schema& /* _return */) { + return; + } + void getThriftSchema( ::Apache::Hadoop::Hive::Schema& /* _return */) { + return; + } + void getClusterStatus(HiveClusterStatus& /* _return */) { + return; + } + void getQueryPlan( ::Apache::Hadoop::Hive::QueryPlan& /* _return */) { + return; + } + void clean() { + return; + } +}; + +typedef struct _ThriftHive_execute_args__isset { + _ThriftHive_execute_args__isset() : query(false) {} + bool query :1; +} _ThriftHive_execute_args__isset; + +class ThriftHive_execute_args { + public: + + ThriftHive_execute_args(const ThriftHive_execute_args&); + ThriftHive_execute_args& operator=(const ThriftHive_execute_args&); + ThriftHive_execute_args() : query() { + } + + virtual ~ThriftHive_execute_args() throw(); + std::string query; + + _ThriftHive_execute_args__isset __isset; + + void __set_query(const std::string& val); + + bool operator == (const ThriftHive_execute_args & rhs) const + { + if (!(query == rhs.query)) + return false; + return true; + } + bool operator != (const ThriftHive_execute_args &rhs) const { + return !(*this == rhs); + } + + bool operator < (const ThriftHive_execute_args & ) const; + + uint32_t read(::apache::thrift::protocol::TProtocol* iprot); + uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const; + +}; + + +class ThriftHive_execute_pargs { + public: + + + virtual ~ThriftHive_execute_pargs() throw(); + const std::string* query; + + uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const; + +}; + +typedef struct _ThriftHive_execute_result__isset { + _ThriftHive_execute_result__isset() : ex(false) {} + bool ex :1; +} _ThriftHive_execute_result__isset; + +class ThriftHive_execute_result { + public: + + ThriftHive_execute_result(const ThriftHive_execute_result&); + ThriftHive_execute_result& operator=(const ThriftHive_execute_result&); + ThriftHive_execute_result() { + } + + virtual ~ThriftHive_execute_result() throw(); + HiveServerException ex; + + _ThriftHive_execute_result__isset __isset; + + void __set_ex(const HiveServerException& val); + + bool operator == (const ThriftHive_execute_result & rhs) const + { + if (!(ex == rhs.ex)) + return false; + return true; + } + bool operator != (const ThriftHive_execute_result &rhs) const { + return !(*this == rhs); + } + + bool operator < (const ThriftHive_execute_result & ) const; + + uint32_t read(::apache::thrift::protocol::TProtocol* iprot); + uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const; + +}; + +typedef struct _ThriftHive_execute_presult__isset { + _ThriftHive_execute_presult__isset() : ex(false) {} + bool ex :1; +} _ThriftHive_execute_presult__isset; + +class ThriftHive_execute_presult { + public: + + + virtual ~ThriftHive_execute_presult() throw(); + HiveServerException ex; + + _ThriftHive_execute_presult__isset __isset; + + uint32_t read(::apache::thrift::protocol::TProtocol* iprot); + +}; + + +class ThriftHive_fetchOne_args { + public: + + ThriftHive_fetchOne_args(const ThriftHive_fetchOne_args&); + ThriftHive_fetchOne_args& operator=(const ThriftHive_fetchOne_args&); + ThriftHive_fetchOne_args() { + } + + virtual ~ThriftHive_fetchOne_args() throw(); + + bool operator == (const ThriftHive_fetchOne_args & /* rhs */) const + { + return true; + } + bool operator != (const ThriftHive_fetchOne_args &rhs) const { + return !(*this == rhs); + } + + bool operator < (const ThriftHive_fetchOne_args & ) const; + + uint32_t read(::apache::thrift::protocol::TProtocol* iprot); + uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const; + +}; + + +class ThriftHive_fetchOne_pargs { + public: + + + virtual ~ThriftHive_fetchOne_pargs() throw(); + + uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const; + +}; + +typedef struct _ThriftHive_fetchOne_result__isset { + _ThriftHive_fetchOne_result__isset() : success(false), ex(false) {} + bool success :1; + bool ex :1; +} _ThriftHive_fetchOne_result__isset; + +class ThriftHive_fetchOne_result { + public: + + ThriftHive_fetchOne_result(const ThriftHive_fetchOne_result&); + ThriftHive_fetchOne_result& operator=(const ThriftHive_fetchOne_result&); + ThriftHive_fetchOne_result() : success() { + } + + virtual ~ThriftHive_fetchOne_result() throw(); + std::string success; + HiveServerException ex; + + _ThriftHive_fetchOne_result__isset __isset; + + void __set_success(const std::string& val); + + void __set_ex(const HiveServerException& val); + + bool operator == (const ThriftHive_fetchOne_result & rhs) const + { + if (!(success == rhs.success)) + return false; + if (!(ex == rhs.ex)) + return false; + return true; + } + bool operator != (const ThriftHive_fetchOne_result &rhs) const { + return !(*this == rhs); + } + + bool operator < (const ThriftHive_fetchOne_result & ) const; + + uint32_t read(::apache::thrift::protocol::TProtocol* iprot); + uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const; + +}; + +typedef struct _ThriftHive_fetchOne_presult__isset { + _ThriftHive_fetchOne_presult__isset() : success(false), ex(false) {} + bool success :1; + bool ex :1; +} _ThriftHive_fetchOne_presult__isset; + +class ThriftHive_fetchOne_presult { + public: + + + virtual ~ThriftHive_fetchOne_presult() throw(); + std::string* success; + HiveServerException ex; + + _ThriftHive_fetchOne_presult__isset __isset; + + uint32_t read(::apache::thrift::protocol::TProtocol* iprot); + +}; + +typedef struct _ThriftHive_fetchN_args__isset { + _ThriftHive_fetchN_args__isset() : numRows(false) {} + bool numRows :1; +} _ThriftHive_fetchN_args__isset; + +class ThriftHive_fetchN_args { + public: + + ThriftHive_fetchN_args(const ThriftHive_fetchN_args&); + ThriftHive_fetchN_args& operator=(const ThriftHive_fetchN_args&); + ThriftHive_fetchN_args() : numRows(0) { + } + + virtual ~ThriftHive_fetchN_args() throw(); + int32_t numRows; + + _ThriftHive_fetchN_args__isset __isset; + + void __set_numRows(const int32_t val); + + bool operator == (const ThriftHive_fetchN_args & rhs) const + { + if (!(numRows == rhs.numRows)) + return false; + return true; + } + bool operator != (const ThriftHive_fetchN_args &rhs) const { + return !(*this == rhs); + } + + bool operator < (const ThriftHive_fetchN_args & ) const; + + uint32_t read(::apache::thrift::protocol::TProtocol* iprot); + uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const; + +}; + + +class ThriftHive_fetchN_pargs { + public: + + + virtual ~ThriftHive_fetchN_pargs() throw(); + const int32_t* numRows; + + uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const; + +}; + +typedef struct _ThriftHive_fetchN_result__isset { + _ThriftHive_fetchN_result__isset() : success(false), ex(false) {} + bool success :1; + bool ex :1; +} _ThriftHive_fetchN_result__isset; + +class ThriftHive_fetchN_result { + public: + + ThriftHive_fetchN_result(const ThriftHive_fetchN_result&); + ThriftHive_fetchN_result& operator=(const ThriftHive_fetchN_result&); + ThriftHive_fetchN_result() { + } + + virtual ~ThriftHive_fetchN_result() throw(); + std::vector success; + HiveServerException ex; + + _ThriftHive_fetchN_result__isset __isset; + + void __set_success(const std::vector & val); + + void __set_ex(const HiveServerException& val); + + bool operator == (const ThriftHive_fetchN_result & rhs) const + { + if (!(success == rhs.success)) + return false; + if (!(ex == rhs.ex)) + return false; + return true; + } + bool operator != (const ThriftHive_fetchN_result &rhs) const { + return !(*this == rhs); + } + + bool operator < (const ThriftHive_fetchN_result & ) const; + + uint32_t read(::apache::thrift::protocol::TProtocol* iprot); + uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const; + +}; + +typedef struct _ThriftHive_fetchN_presult__isset { + _ThriftHive_fetchN_presult__isset() : success(false), ex(false) {} + bool success :1; + bool ex :1; +} _ThriftHive_fetchN_presult__isset; + +class ThriftHive_fetchN_presult { + public: + + + virtual ~ThriftHive_fetchN_presult() throw(); + std::vector * success; + HiveServerException ex; + + _ThriftHive_fetchN_presult__isset __isset; + + uint32_t read(::apache::thrift::protocol::TProtocol* iprot); + +}; + + +class ThriftHive_fetchAll_args { + public: + + ThriftHive_fetchAll_args(const ThriftHive_fetchAll_args&); + ThriftHive_fetchAll_args& operator=(const ThriftHive_fetchAll_args&); + ThriftHive_fetchAll_args() { + } + + virtual ~ThriftHive_fetchAll_args() throw(); + + bool operator == (const ThriftHive_fetchAll_args & /* rhs */) const + { + return true; + } + bool operator != (const ThriftHive_fetchAll_args &rhs) const { + return !(*this == rhs); + } + + bool operator < (const ThriftHive_fetchAll_args & ) const; + + uint32_t read(::apache::thrift::protocol::TProtocol* iprot); + uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const; + +}; + + +class ThriftHive_fetchAll_pargs { + public: + + + virtual ~ThriftHive_fetchAll_pargs() throw(); + + uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const; + +}; + +typedef struct _ThriftHive_fetchAll_result__isset { + _ThriftHive_fetchAll_result__isset() : success(false), ex(false) {} + bool success :1; + bool ex :1; +} _ThriftHive_fetchAll_result__isset; + +class ThriftHive_fetchAll_result { + public: + + ThriftHive_fetchAll_result(const ThriftHive_fetchAll_result&); + ThriftHive_fetchAll_result& operator=(const ThriftHive_fetchAll_result&); + ThriftHive_fetchAll_result() { + } + + virtual ~ThriftHive_fetchAll_result() throw(); + std::vector success; + HiveServerException ex; + + _ThriftHive_fetchAll_result__isset __isset; + + void __set_success(const std::vector & val); + + void __set_ex(const HiveServerException& val); + + bool operator == (const ThriftHive_fetchAll_result & rhs) const + { + if (!(success == rhs.success)) + return false; + if (!(ex == rhs.ex)) + return false; + return true; + } + bool operator != (const ThriftHive_fetchAll_result &rhs) const { + return !(*this == rhs); + } + + bool operator < (const ThriftHive_fetchAll_result & ) const; + + uint32_t read(::apache::thrift::protocol::TProtocol* iprot); + uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const; + +}; + +typedef struct _ThriftHive_fetchAll_presult__isset { + _ThriftHive_fetchAll_presult__isset() : success(false), ex(false) {} + bool success :1; + bool ex :1; +} _ThriftHive_fetchAll_presult__isset; + +class ThriftHive_fetchAll_presult { + public: + + + virtual ~ThriftHive_fetchAll_presult() throw(); + std::vector * success; + HiveServerException ex; + + _ThriftHive_fetchAll_presult__isset __isset; + + uint32_t read(::apache::thrift::protocol::TProtocol* iprot); + +}; + + +class ThriftHive_getSchema_args { + public: + + ThriftHive_getSchema_args(const ThriftHive_getSchema_args&); + ThriftHive_getSchema_args& operator=(const ThriftHive_getSchema_args&); + ThriftHive_getSchema_args() { + } + + virtual ~ThriftHive_getSchema_args() throw(); + + bool operator == (const ThriftHive_getSchema_args & /* rhs */) const + { + return true; + } + bool operator != (const ThriftHive_getSchema_args &rhs) const { + return !(*this == rhs); + } + + bool operator < (const ThriftHive_getSchema_args & ) const; + + uint32_t read(::apache::thrift::protocol::TProtocol* iprot); + uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const; + +}; + + +class ThriftHive_getSchema_pargs { + public: + + + virtual ~ThriftHive_getSchema_pargs() throw(); + + uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const; + +}; + +typedef struct _ThriftHive_getSchema_result__isset { + _ThriftHive_getSchema_result__isset() : success(false), ex(false) {} + bool success :1; + bool ex :1; +} _ThriftHive_getSchema_result__isset; + +class ThriftHive_getSchema_result { + public: + + ThriftHive_getSchema_result(const ThriftHive_getSchema_result&); + ThriftHive_getSchema_result& operator=(const ThriftHive_getSchema_result&); + ThriftHive_getSchema_result() { + } + + virtual ~ThriftHive_getSchema_result() throw(); + ::Apache::Hadoop::Hive::Schema success; + HiveServerException ex; + + _ThriftHive_getSchema_result__isset __isset; + + void __set_success(const ::Apache::Hadoop::Hive::Schema& val); + + void __set_ex(const HiveServerException& val); + + bool operator == (const ThriftHive_getSchema_result & rhs) const + { + if (!(success == rhs.success)) + return false; + if (!(ex == rhs.ex)) + return false; + return true; + } + bool operator != (const ThriftHive_getSchema_result &rhs) const { + return !(*this == rhs); + } + + bool operator < (const ThriftHive_getSchema_result & ) const; + + uint32_t read(::apache::thrift::protocol::TProtocol* iprot); + uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const; + +}; + +typedef struct _ThriftHive_getSchema_presult__isset { + _ThriftHive_getSchema_presult__isset() : success(false), ex(false) {} + bool success :1; + bool ex :1; +} _ThriftHive_getSchema_presult__isset; + +class ThriftHive_getSchema_presult { + public: + + + virtual ~ThriftHive_getSchema_presult() throw(); + ::Apache::Hadoop::Hive::Schema* success; + HiveServerException ex; + + _ThriftHive_getSchema_presult__isset __isset; + + uint32_t read(::apache::thrift::protocol::TProtocol* iprot); + +}; + + +class ThriftHive_getThriftSchema_args { + public: + + ThriftHive_getThriftSchema_args(const ThriftHive_getThriftSchema_args&); + ThriftHive_getThriftSchema_args& operator=(const ThriftHive_getThriftSchema_args&); + ThriftHive_getThriftSchema_args() { + } + + virtual ~ThriftHive_getThriftSchema_args() throw(); + + bool operator == (const ThriftHive_getThriftSchema_args & /* rhs */) const + { + return true; + } + bool operator != (const ThriftHive_getThriftSchema_args &rhs) const { + return !(*this == rhs); + } + + bool operator < (const ThriftHive_getThriftSchema_args & ) const; + + uint32_t read(::apache::thrift::protocol::TProtocol* iprot); + uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const; + +}; + + +class ThriftHive_getThriftSchema_pargs { + public: + + + virtual ~ThriftHive_getThriftSchema_pargs() throw(); + + uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const; + +}; + +typedef struct _ThriftHive_getThriftSchema_result__isset { + _ThriftHive_getThriftSchema_result__isset() : success(false), ex(false) {} + bool success :1; + bool ex :1; +} _ThriftHive_getThriftSchema_result__isset; + +class ThriftHive_getThriftSchema_result { + public: + + ThriftHive_getThriftSchema_result(const ThriftHive_getThriftSchema_result&); + ThriftHive_getThriftSchema_result& operator=(const ThriftHive_getThriftSchema_result&); + ThriftHive_getThriftSchema_result() { + } + + virtual ~ThriftHive_getThriftSchema_result() throw(); + ::Apache::Hadoop::Hive::Schema success; + HiveServerException ex; + + _ThriftHive_getThriftSchema_result__isset __isset; + + void __set_success(const ::Apache::Hadoop::Hive::Schema& val); + + void __set_ex(const HiveServerException& val); + + bool operator == (const ThriftHive_getThriftSchema_result & rhs) const + { + if (!(success == rhs.success)) + return false; + if (!(ex == rhs.ex)) + return false; + return true; + } + bool operator != (const ThriftHive_getThriftSchema_result &rhs) const { + return !(*this == rhs); + } + + bool operator < (const ThriftHive_getThriftSchema_result & ) const; + + uint32_t read(::apache::thrift::protocol::TProtocol* iprot); + uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const; + +}; + +typedef struct _ThriftHive_getThriftSchema_presult__isset { + _ThriftHive_getThriftSchema_presult__isset() : success(false), ex(false) {} + bool success :1; + bool ex :1; +} _ThriftHive_getThriftSchema_presult__isset; + +class ThriftHive_getThriftSchema_presult { + public: + + + virtual ~ThriftHive_getThriftSchema_presult() throw(); + ::Apache::Hadoop::Hive::Schema* success; + HiveServerException ex; + + _ThriftHive_getThriftSchema_presult__isset __isset; + + uint32_t read(::apache::thrift::protocol::TProtocol* iprot); + +}; + + +class ThriftHive_getClusterStatus_args { + public: + + ThriftHive_getClusterStatus_args(const ThriftHive_getClusterStatus_args&); + ThriftHive_getClusterStatus_args& operator=(const ThriftHive_getClusterStatus_args&); + ThriftHive_getClusterStatus_args() { + } + + virtual ~ThriftHive_getClusterStatus_args() throw(); + + bool operator == (const ThriftHive_getClusterStatus_args & /* rhs */) const + { + return true; + } + bool operator != (const ThriftHive_getClusterStatus_args &rhs) const { + return !(*this == rhs); + } + + bool operator < (const ThriftHive_getClusterStatus_args & ) const; + + uint32_t read(::apache::thrift::protocol::TProtocol* iprot); + uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const; + +}; + + +class ThriftHive_getClusterStatus_pargs { + public: + + + virtual ~ThriftHive_getClusterStatus_pargs() throw(); + + uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const; + +}; + +typedef struct _ThriftHive_getClusterStatus_result__isset { + _ThriftHive_getClusterStatus_result__isset() : success(false), ex(false) {} + bool success :1; + bool ex :1; +} _ThriftHive_getClusterStatus_result__isset; + +class ThriftHive_getClusterStatus_result { + public: + + ThriftHive_getClusterStatus_result(const ThriftHive_getClusterStatus_result&); + ThriftHive_getClusterStatus_result& operator=(const ThriftHive_getClusterStatus_result&); + ThriftHive_getClusterStatus_result() { + } + + virtual ~ThriftHive_getClusterStatus_result() throw(); + HiveClusterStatus success; + HiveServerException ex; + + _ThriftHive_getClusterStatus_result__isset __isset; + + void __set_success(const HiveClusterStatus& val); + + void __set_ex(const HiveServerException& val); + + bool operator == (const ThriftHive_getClusterStatus_result & rhs) const + { + if (!(success == rhs.success)) + return false; + if (!(ex == rhs.ex)) + return false; + return true; + } + bool operator != (const ThriftHive_getClusterStatus_result &rhs) const { + return !(*this == rhs); + } + + bool operator < (const ThriftHive_getClusterStatus_result & ) const; + + uint32_t read(::apache::thrift::protocol::TProtocol* iprot); + uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const; + +}; + +typedef struct _ThriftHive_getClusterStatus_presult__isset { + _ThriftHive_getClusterStatus_presult__isset() : success(false), ex(false) {} + bool success :1; + bool ex :1; +} _ThriftHive_getClusterStatus_presult__isset; + +class ThriftHive_getClusterStatus_presult { + public: + + + virtual ~ThriftHive_getClusterStatus_presult() throw(); + HiveClusterStatus* success; + HiveServerException ex; + + _ThriftHive_getClusterStatus_presult__isset __isset; + + uint32_t read(::apache::thrift::protocol::TProtocol* iprot); + +}; + + +class ThriftHive_getQueryPlan_args { + public: + + ThriftHive_getQueryPlan_args(const ThriftHive_getQueryPlan_args&); + ThriftHive_getQueryPlan_args& operator=(const ThriftHive_getQueryPlan_args&); + ThriftHive_getQueryPlan_args() { + } + + virtual ~ThriftHive_getQueryPlan_args() throw(); + + bool operator == (const ThriftHive_getQueryPlan_args & /* rhs */) const + { + return true; + } + bool operator != (const ThriftHive_getQueryPlan_args &rhs) const { + return !(*this == rhs); + } + + bool operator < (const ThriftHive_getQueryPlan_args & ) const; + + uint32_t read(::apache::thrift::protocol::TProtocol* iprot); + uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const; + +}; + + +class ThriftHive_getQueryPlan_pargs { + public: + + + virtual ~ThriftHive_getQueryPlan_pargs() throw(); + + uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const; + +}; + +typedef struct _ThriftHive_getQueryPlan_result__isset { + _ThriftHive_getQueryPlan_result__isset() : success(false), ex(false) {} + bool success :1; + bool ex :1; +} _ThriftHive_getQueryPlan_result__isset; + +class ThriftHive_getQueryPlan_result { + public: + + ThriftHive_getQueryPlan_result(const ThriftHive_getQueryPlan_result&); + ThriftHive_getQueryPlan_result& operator=(const ThriftHive_getQueryPlan_result&); + ThriftHive_getQueryPlan_result() { + } + + virtual ~ThriftHive_getQueryPlan_result() throw(); + ::Apache::Hadoop::Hive::QueryPlan success; + HiveServerException ex; + + _ThriftHive_getQueryPlan_result__isset __isset; + + void __set_success(const ::Apache::Hadoop::Hive::QueryPlan& val); + + void __set_ex(const HiveServerException& val); + + bool operator == (const ThriftHive_getQueryPlan_result & rhs) const + { + if (!(success == rhs.success)) + return false; + if (!(ex == rhs.ex)) + return false; + return true; + } + bool operator != (const ThriftHive_getQueryPlan_result &rhs) const { + return !(*this == rhs); + } + + bool operator < (const ThriftHive_getQueryPlan_result & ) const; + + uint32_t read(::apache::thrift::protocol::TProtocol* iprot); + uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const; + +}; + +typedef struct _ThriftHive_getQueryPlan_presult__isset { + _ThriftHive_getQueryPlan_presult__isset() : success(false), ex(false) {} + bool success :1; + bool ex :1; +} _ThriftHive_getQueryPlan_presult__isset; + +class ThriftHive_getQueryPlan_presult { + public: + + + virtual ~ThriftHive_getQueryPlan_presult() throw(); + ::Apache::Hadoop::Hive::QueryPlan* success; + HiveServerException ex; + + _ThriftHive_getQueryPlan_presult__isset __isset; + + uint32_t read(::apache::thrift::protocol::TProtocol* iprot); + +}; + + +class ThriftHive_clean_args { + public: + + ThriftHive_clean_args(const ThriftHive_clean_args&); + ThriftHive_clean_args& operator=(const ThriftHive_clean_args&); + ThriftHive_clean_args() { + } + + virtual ~ThriftHive_clean_args() throw(); + + bool operator == (const ThriftHive_clean_args & /* rhs */) const + { + return true; + } + bool operator != (const ThriftHive_clean_args &rhs) const { + return !(*this == rhs); + } + + bool operator < (const ThriftHive_clean_args & ) const; + + uint32_t read(::apache::thrift::protocol::TProtocol* iprot); + uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const; + +}; + + +class ThriftHive_clean_pargs { + public: + + + virtual ~ThriftHive_clean_pargs() throw(); + + uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const; + +}; + + +class ThriftHive_clean_result { + public: + + ThriftHive_clean_result(const ThriftHive_clean_result&); + ThriftHive_clean_result& operator=(const ThriftHive_clean_result&); + ThriftHive_clean_result() { + } + + virtual ~ThriftHive_clean_result() throw(); + + bool operator == (const ThriftHive_clean_result & /* rhs */) const + { + return true; + } + bool operator != (const ThriftHive_clean_result &rhs) const { + return !(*this == rhs); + } + + bool operator < (const ThriftHive_clean_result & ) const; + + uint32_t read(::apache::thrift::protocol::TProtocol* iprot); + uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const; + +}; + + +class ThriftHive_clean_presult { + public: + + + virtual ~ThriftHive_clean_presult() throw(); + + uint32_t read(::apache::thrift::protocol::TProtocol* iprot); + +}; + +class ThriftHiveClient : virtual public ThriftHiveIf, public ::Apache::Hadoop::Hive::ThriftHiveMetastoreClient { + public: + ThriftHiveClient(boost::shared_ptr< ::apache::thrift::protocol::TProtocol> prot) : + ::Apache::Hadoop::Hive::ThriftHiveMetastoreClient(prot, prot) {} + ThriftHiveClient(boost::shared_ptr< ::apache::thrift::protocol::TProtocol> iprot, boost::shared_ptr< ::apache::thrift::protocol::TProtocol> oprot) : ::Apache::Hadoop::Hive::ThriftHiveMetastoreClient(iprot, oprot) {} + boost::shared_ptr< ::apache::thrift::protocol::TProtocol> getInputProtocol() { + return piprot_; + } + boost::shared_ptr< ::apache::thrift::protocol::TProtocol> getOutputProtocol() { + return poprot_; + } + void execute(const std::string& query); + void send_execute(const std::string& query); + void recv_execute(); + void fetchOne(std::string& _return); + void send_fetchOne(); + void recv_fetchOne(std::string& _return); + void fetchN(std::vector & _return, const int32_t numRows); + void send_fetchN(const int32_t numRows); + void recv_fetchN(std::vector & _return); + void fetchAll(std::vector & _return); + void send_fetchAll(); + void recv_fetchAll(std::vector & _return); + void getSchema( ::Apache::Hadoop::Hive::Schema& _return); + void send_getSchema(); + void recv_getSchema( ::Apache::Hadoop::Hive::Schema& _return); + void getThriftSchema( ::Apache::Hadoop::Hive::Schema& _return); + void send_getThriftSchema(); + void recv_getThriftSchema( ::Apache::Hadoop::Hive::Schema& _return); + void getClusterStatus(HiveClusterStatus& _return); + void send_getClusterStatus(); + void recv_getClusterStatus(HiveClusterStatus& _return); + void getQueryPlan( ::Apache::Hadoop::Hive::QueryPlan& _return); + void send_getQueryPlan(); + void recv_getQueryPlan( ::Apache::Hadoop::Hive::QueryPlan& _return); + void clean(); + void send_clean(); + void recv_clean(); +}; + +class ThriftHiveProcessor : public ::Apache::Hadoop::Hive::ThriftHiveMetastoreProcessor { + protected: + boost::shared_ptr iface_; + virtual bool dispatchCall(::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, const std::string& fname, int32_t seqid, void* callContext); + private: + typedef void (ThriftHiveProcessor::*ProcessFunction)(int32_t, ::apache::thrift::protocol::TProtocol*, ::apache::thrift::protocol::TProtocol*, void*); + typedef std::map ProcessMap; + ProcessMap processMap_; + void process_execute(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext); + void process_fetchOne(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext); + void process_fetchN(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext); + void process_fetchAll(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext); + void process_getSchema(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext); + void process_getThriftSchema(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext); + void process_getClusterStatus(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext); + void process_getQueryPlan(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext); + void process_clean(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext); + public: + ThriftHiveProcessor(boost::shared_ptr iface) : + ::Apache::Hadoop::Hive::ThriftHiveMetastoreProcessor(iface), + iface_(iface) { + processMap_["execute"] = &ThriftHiveProcessor::process_execute; + processMap_["fetchOne"] = &ThriftHiveProcessor::process_fetchOne; + processMap_["fetchN"] = &ThriftHiveProcessor::process_fetchN; + processMap_["fetchAll"] = &ThriftHiveProcessor::process_fetchAll; + processMap_["getSchema"] = &ThriftHiveProcessor::process_getSchema; + processMap_["getThriftSchema"] = &ThriftHiveProcessor::process_getThriftSchema; + processMap_["getClusterStatus"] = &ThriftHiveProcessor::process_getClusterStatus; + processMap_["getQueryPlan"] = &ThriftHiveProcessor::process_getQueryPlan; + processMap_["clean"] = &ThriftHiveProcessor::process_clean; + } + + virtual ~ThriftHiveProcessor() {} +}; + +class ThriftHiveProcessorFactory : public ::apache::thrift::TProcessorFactory { + public: + ThriftHiveProcessorFactory(const ::boost::shared_ptr< ThriftHiveIfFactory >& handlerFactory) : + handlerFactory_(handlerFactory) {} + + ::boost::shared_ptr< ::apache::thrift::TProcessor > getProcessor(const ::apache::thrift::TConnectionInfo& connInfo); + + protected: + ::boost::shared_ptr< ThriftHiveIfFactory > handlerFactory_; +}; + +class ThriftHiveMultiface : virtual public ThriftHiveIf, public ::Apache::Hadoop::Hive::ThriftHiveMetastoreMultiface { + public: + ThriftHiveMultiface(std::vector >& ifaces) : ifaces_(ifaces) { + std::vector >::iterator iter; + for (iter = ifaces.begin(); iter != ifaces.end(); ++iter) { + ::Apache::Hadoop::Hive::ThriftHiveMetastoreMultiface::add(*iter); + } + } + virtual ~ThriftHiveMultiface() {} + protected: + std::vector > ifaces_; + ThriftHiveMultiface() {} + void add(boost::shared_ptr iface) { + ::Apache::Hadoop::Hive::ThriftHiveMetastoreMultiface::add(iface); + ifaces_.push_back(iface); + } + public: + void execute(const std::string& query) { + size_t sz = ifaces_.size(); + size_t i = 0; + for (; i < (sz - 1); ++i) { + ifaces_[i]->execute(query); + } + ifaces_[i]->execute(query); + } + + void fetchOne(std::string& _return) { + size_t sz = ifaces_.size(); + size_t i = 0; + for (; i < (sz - 1); ++i) { + ifaces_[i]->fetchOne(_return); + } + ifaces_[i]->fetchOne(_return); + return; + } + + void fetchN(std::vector & _return, const int32_t numRows) { + size_t sz = ifaces_.size(); + size_t i = 0; + for (; i < (sz - 1); ++i) { + ifaces_[i]->fetchN(_return, numRows); + } + ifaces_[i]->fetchN(_return, numRows); + return; + } + + void fetchAll(std::vector & _return) { + size_t sz = ifaces_.size(); + size_t i = 0; + for (; i < (sz - 1); ++i) { + ifaces_[i]->fetchAll(_return); + } + ifaces_[i]->fetchAll(_return); + return; + } + + void getSchema( ::Apache::Hadoop::Hive::Schema& _return) { + size_t sz = ifaces_.size(); + size_t i = 0; + for (; i < (sz - 1); ++i) { + ifaces_[i]->getSchema(_return); + } + ifaces_[i]->getSchema(_return); + return; + } + + void getThriftSchema( ::Apache::Hadoop::Hive::Schema& _return) { + size_t sz = ifaces_.size(); + size_t i = 0; + for (; i < (sz - 1); ++i) { + ifaces_[i]->getThriftSchema(_return); + } + ifaces_[i]->getThriftSchema(_return); + return; + } + + void getClusterStatus(HiveClusterStatus& _return) { + size_t sz = ifaces_.size(); + size_t i = 0; + for (; i < (sz - 1); ++i) { + ifaces_[i]->getClusterStatus(_return); + } + ifaces_[i]->getClusterStatus(_return); + return; + } + + void getQueryPlan( ::Apache::Hadoop::Hive::QueryPlan& _return) { + size_t sz = ifaces_.size(); + size_t i = 0; + for (; i < (sz - 1); ++i) { + ifaces_[i]->getQueryPlan(_return); + } + ifaces_[i]->getQueryPlan(_return); + return; + } + + void clean() { + size_t sz = ifaces_.size(); + size_t i = 0; + for (; i < (sz - 1); ++i) { + ifaces_[i]->clean(); + } + ifaces_[i]->clean(); + } + +}; + +// The 'concurrent' client is a thread safe client that correctly handles +// out of order responses. It is slower than the regular client, so should +// only be used when you need to share a connection among multiple threads +class ThriftHiveConcurrentClient : virtual public ThriftHiveIf, public ::Apache::Hadoop::Hive::ThriftHiveMetastoreConcurrentClient { + public: + ThriftHiveConcurrentClient(boost::shared_ptr< ::apache::thrift::protocol::TProtocol> prot) : + ::Apache::Hadoop::Hive::ThriftHiveMetastoreConcurrentClient(prot, prot) {} + ThriftHiveConcurrentClient(boost::shared_ptr< ::apache::thrift::protocol::TProtocol> iprot, boost::shared_ptr< ::apache::thrift::protocol::TProtocol> oprot) : ::Apache::Hadoop::Hive::ThriftHiveMetastoreConcurrentClient(iprot, oprot) {} + boost::shared_ptr< ::apache::thrift::protocol::TProtocol> getInputProtocol() { + return piprot_; + } + boost::shared_ptr< ::apache::thrift::protocol::TProtocol> getOutputProtocol() { + return poprot_; + } + void execute(const std::string& query); + int32_t send_execute(const std::string& query); + void recv_execute(const int32_t seqid); + void fetchOne(std::string& _return); + int32_t send_fetchOne(); + void recv_fetchOne(std::string& _return, const int32_t seqid); + void fetchN(std::vector & _return, const int32_t numRows); + int32_t send_fetchN(const int32_t numRows); + void recv_fetchN(std::vector & _return, const int32_t seqid); + void fetchAll(std::vector & _return); + int32_t send_fetchAll(); + void recv_fetchAll(std::vector & _return, const int32_t seqid); + void getSchema( ::Apache::Hadoop::Hive::Schema& _return); + int32_t send_getSchema(); + void recv_getSchema( ::Apache::Hadoop::Hive::Schema& _return, const int32_t seqid); + void getThriftSchema( ::Apache::Hadoop::Hive::Schema& _return); + int32_t send_getThriftSchema(); + void recv_getThriftSchema( ::Apache::Hadoop::Hive::Schema& _return, const int32_t seqid); + void getClusterStatus(HiveClusterStatus& _return); + int32_t send_getClusterStatus(); + void recv_getClusterStatus(HiveClusterStatus& _return, const int32_t seqid); + void getQueryPlan( ::Apache::Hadoop::Hive::QueryPlan& _return); + int32_t send_getQueryPlan(); + void recv_getQueryPlan( ::Apache::Hadoop::Hive::QueryPlan& _return, const int32_t seqid); + void clean(); + int32_t send_clean(); + void recv_clean(const int32_t seqid); +}; + +#ifdef _WIN32 + #pragma warning( pop ) +#endif + +}}} // namespace + +#endif diff --git a/service/src/gen/thrift/gen-cpp/ThriftHive_server.skeleton.cpp b/service/src/gen/thrift/gen-cpp/ThriftHive_server.skeleton.cpp new file mode 100644 index 0000000..3b61b76 --- /dev/null +++ b/service/src/gen/thrift/gen-cpp/ThriftHive_server.skeleton.cpp @@ -0,0 +1,84 @@ +// This autogenerated skeleton file illustrates how to build a server. +// You should copy it to another filename to avoid overwriting it. + +#include "ThriftHive.h" +#include +#include +#include +#include + +using namespace ::apache::thrift; +using namespace ::apache::thrift::protocol; +using namespace ::apache::thrift::transport; +using namespace ::apache::thrift::server; + +using boost::shared_ptr; + +using namespace ::Apache::Hadoop::Hive; + +class ThriftHiveHandler : virtual public ThriftHiveIf { + public: + ThriftHiveHandler() { + // Your initialization goes here + } + + void execute(const std::string& query) { + // Your implementation goes here + printf("execute\n"); + } + + void fetchOne(std::string& _return) { + // Your implementation goes here + printf("fetchOne\n"); + } + + void fetchN(std::vector & _return, const int32_t numRows) { + // Your implementation goes here + printf("fetchN\n"); + } + + void fetchAll(std::vector & _return) { + // Your implementation goes here + printf("fetchAll\n"); + } + + void getSchema( ::Apache::Hadoop::Hive::Schema& _return) { + // Your implementation goes here + printf("getSchema\n"); + } + + void getThriftSchema( ::Apache::Hadoop::Hive::Schema& _return) { + // Your implementation goes here + printf("getThriftSchema\n"); + } + + void getClusterStatus(HiveClusterStatus& _return) { + // Your implementation goes here + printf("getClusterStatus\n"); + } + + void getQueryPlan( ::Apache::Hadoop::Hive::QueryPlan& _return) { + // Your implementation goes here + printf("getQueryPlan\n"); + } + + void clean() { + // Your implementation goes here + printf("clean\n"); + } + +}; + +int main(int argc, char **argv) { + int port = 9090; + shared_ptr handler(new ThriftHiveHandler()); + shared_ptr processor(new ThriftHiveProcessor(handler)); + shared_ptr serverTransport(new TServerSocket(port)); + shared_ptr transportFactory(new TBufferedTransportFactory()); + shared_ptr protocolFactory(new TBinaryProtocolFactory()); + + TSimpleServer server(processor, serverTransport, transportFactory, protocolFactory); + server.serve(); + return 0; +} + diff --git a/service/src/gen/thrift/gen-cpp/hive_service_constants.cpp b/service/src/gen/thrift/gen-cpp/hive_service_constants.cpp new file mode 100644 index 0000000..e2bbe71 --- /dev/null +++ b/service/src/gen/thrift/gen-cpp/hive_service_constants.cpp @@ -0,0 +1,17 @@ +/** + * Autogenerated by Thrift Compiler (0.9.3) + * + * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING + * @generated + */ +#include "hive_service_constants.h" + +namespace Apache { namespace Hadoop { namespace Hive { + +const hive_serviceConstants g_hive_service_constants; + +hive_serviceConstants::hive_serviceConstants() { +} + +}}} // namespace + diff --git a/service/src/gen/thrift/gen-cpp/hive_service_constants.h b/service/src/gen/thrift/gen-cpp/hive_service_constants.h new file mode 100644 index 0000000..e0887f4 --- /dev/null +++ b/service/src/gen/thrift/gen-cpp/hive_service_constants.h @@ -0,0 +1,24 @@ +/** + * Autogenerated by Thrift Compiler (0.9.3) + * + * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING + * @generated + */ +#ifndef hive_service_CONSTANTS_H +#define hive_service_CONSTANTS_H + +#include "hive_service_types.h" + +namespace Apache { namespace Hadoop { namespace Hive { + +class hive_serviceConstants { + public: + hive_serviceConstants(); + +}; + +extern const hive_serviceConstants g_hive_service_constants; + +}}} // namespace + +#endif diff --git a/service/src/gen/thrift/gen-cpp/hive_service_types.cpp b/service/src/gen/thrift/gen-cpp/hive_service_types.cpp new file mode 100644 index 0000000..9ddf7c1 --- /dev/null +++ b/service/src/gen/thrift/gen-cpp/hive_service_types.cpp @@ -0,0 +1,351 @@ +/** + * Autogenerated by Thrift Compiler (0.9.3) + * + * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING + * @generated + */ +#include "hive_service_types.h" + +#include +#include + +#include + +namespace Apache { namespace Hadoop { namespace Hive { + +int _kJobTrackerStateValues[] = { + JobTrackerState::INITIALIZING, + JobTrackerState::RUNNING +}; +const char* _kJobTrackerStateNames[] = { + "INITIALIZING", + "RUNNING" +}; +const std::map _JobTrackerState_VALUES_TO_NAMES(::apache::thrift::TEnumIterator(2, _kJobTrackerStateValues, _kJobTrackerStateNames), ::apache::thrift::TEnumIterator(-1, NULL, NULL)); + + +HiveClusterStatus::~HiveClusterStatus() throw() { +} + + +void HiveClusterStatus::__set_taskTrackers(const int32_t val) { + this->taskTrackers = val; +} + +void HiveClusterStatus::__set_mapTasks(const int32_t val) { + this->mapTasks = val; +} + +void HiveClusterStatus::__set_reduceTasks(const int32_t val) { + this->reduceTasks = val; +} + +void HiveClusterStatus::__set_maxMapTasks(const int32_t val) { + this->maxMapTasks = val; +} + +void HiveClusterStatus::__set_maxReduceTasks(const int32_t val) { + this->maxReduceTasks = val; +} + +void HiveClusterStatus::__set_state(const JobTrackerState::type val) { + this->state = val; +} + +uint32_t HiveClusterStatus::read(::apache::thrift::protocol::TProtocol* iprot) { + + apache::thrift::protocol::TInputRecursionTracker tracker(*iprot); + uint32_t xfer = 0; + std::string fname; + ::apache::thrift::protocol::TType ftype; + int16_t fid; + + xfer += iprot->readStructBegin(fname); + + using ::apache::thrift::protocol::TProtocolException; + + + while (true) + { + xfer += iprot->readFieldBegin(fname, ftype, fid); + if (ftype == ::apache::thrift::protocol::T_STOP) { + break; + } + switch (fid) + { + case 1: + if (ftype == ::apache::thrift::protocol::T_I32) { + xfer += iprot->readI32(this->taskTrackers); + this->__isset.taskTrackers = true; + } else { + xfer += iprot->skip(ftype); + } + break; + case 2: + if (ftype == ::apache::thrift::protocol::T_I32) { + xfer += iprot->readI32(this->mapTasks); + this->__isset.mapTasks = true; + } else { + xfer += iprot->skip(ftype); + } + break; + case 3: + if (ftype == ::apache::thrift::protocol::T_I32) { + xfer += iprot->readI32(this->reduceTasks); + this->__isset.reduceTasks = true; + } else { + xfer += iprot->skip(ftype); + } + break; + case 4: + if (ftype == ::apache::thrift::protocol::T_I32) { + xfer += iprot->readI32(this->maxMapTasks); + this->__isset.maxMapTasks = true; + } else { + xfer += iprot->skip(ftype); + } + break; + case 5: + if (ftype == ::apache::thrift::protocol::T_I32) { + xfer += iprot->readI32(this->maxReduceTasks); + this->__isset.maxReduceTasks = true; + } else { + xfer += iprot->skip(ftype); + } + break; + case 6: + if (ftype == ::apache::thrift::protocol::T_I32) { + int32_t ecast0; + xfer += iprot->readI32(ecast0); + this->state = (JobTrackerState::type)ecast0; + this->__isset.state = true; + } else { + xfer += iprot->skip(ftype); + } + break; + default: + xfer += iprot->skip(ftype); + break; + } + xfer += iprot->readFieldEnd(); + } + + xfer += iprot->readStructEnd(); + + return xfer; +} + +uint32_t HiveClusterStatus::write(::apache::thrift::protocol::TProtocol* oprot) const { + uint32_t xfer = 0; + apache::thrift::protocol::TOutputRecursionTracker tracker(*oprot); + xfer += oprot->writeStructBegin("HiveClusterStatus"); + + xfer += oprot->writeFieldBegin("taskTrackers", ::apache::thrift::protocol::T_I32, 1); + xfer += oprot->writeI32(this->taskTrackers); + xfer += oprot->writeFieldEnd(); + + xfer += oprot->writeFieldBegin("mapTasks", ::apache::thrift::protocol::T_I32, 2); + xfer += oprot->writeI32(this->mapTasks); + xfer += oprot->writeFieldEnd(); + + xfer += oprot->writeFieldBegin("reduceTasks", ::apache::thrift::protocol::T_I32, 3); + xfer += oprot->writeI32(this->reduceTasks); + xfer += oprot->writeFieldEnd(); + + xfer += oprot->writeFieldBegin("maxMapTasks", ::apache::thrift::protocol::T_I32, 4); + xfer += oprot->writeI32(this->maxMapTasks); + xfer += oprot->writeFieldEnd(); + + xfer += oprot->writeFieldBegin("maxReduceTasks", ::apache::thrift::protocol::T_I32, 5); + xfer += oprot->writeI32(this->maxReduceTasks); + xfer += oprot->writeFieldEnd(); + + xfer += oprot->writeFieldBegin("state", ::apache::thrift::protocol::T_I32, 6); + xfer += oprot->writeI32((int32_t)this->state); + xfer += oprot->writeFieldEnd(); + + xfer += oprot->writeFieldStop(); + xfer += oprot->writeStructEnd(); + return xfer; +} + +void swap(HiveClusterStatus &a, HiveClusterStatus &b) { + using ::std::swap; + swap(a.taskTrackers, b.taskTrackers); + swap(a.mapTasks, b.mapTasks); + swap(a.reduceTasks, b.reduceTasks); + swap(a.maxMapTasks, b.maxMapTasks); + swap(a.maxReduceTasks, b.maxReduceTasks); + swap(a.state, b.state); + swap(a.__isset, b.__isset); +} + +HiveClusterStatus::HiveClusterStatus(const HiveClusterStatus& other1) { + taskTrackers = other1.taskTrackers; + mapTasks = other1.mapTasks; + reduceTasks = other1.reduceTasks; + maxMapTasks = other1.maxMapTasks; + maxReduceTasks = other1.maxReduceTasks; + state = other1.state; + __isset = other1.__isset; +} +HiveClusterStatus& HiveClusterStatus::operator=(const HiveClusterStatus& other2) { + taskTrackers = other2.taskTrackers; + mapTasks = other2.mapTasks; + reduceTasks = other2.reduceTasks; + maxMapTasks = other2.maxMapTasks; + maxReduceTasks = other2.maxReduceTasks; + state = other2.state; + __isset = other2.__isset; + return *this; +} +void HiveClusterStatus::printTo(std::ostream& out) const { + using ::apache::thrift::to_string; + out << "HiveClusterStatus("; + out << "taskTrackers=" << to_string(taskTrackers); + out << ", " << "mapTasks=" << to_string(mapTasks); + out << ", " << "reduceTasks=" << to_string(reduceTasks); + out << ", " << "maxMapTasks=" << to_string(maxMapTasks); + out << ", " << "maxReduceTasks=" << to_string(maxReduceTasks); + out << ", " << "state=" << to_string(state); + out << ")"; +} + + +HiveServerException::~HiveServerException() throw() { +} + + +void HiveServerException::__set_message(const std::string& val) { + this->message = val; +} + +void HiveServerException::__set_errorCode(const int32_t val) { + this->errorCode = val; +} + +void HiveServerException::__set_SQLState(const std::string& val) { + this->SQLState = val; +} + +uint32_t HiveServerException::read(::apache::thrift::protocol::TProtocol* iprot) { + + apache::thrift::protocol::TInputRecursionTracker tracker(*iprot); + uint32_t xfer = 0; + std::string fname; + ::apache::thrift::protocol::TType ftype; + int16_t fid; + + xfer += iprot->readStructBegin(fname); + + using ::apache::thrift::protocol::TProtocolException; + + + while (true) + { + xfer += iprot->readFieldBegin(fname, ftype, fid); + if (ftype == ::apache::thrift::protocol::T_STOP) { + break; + } + switch (fid) + { + case 1: + if (ftype == ::apache::thrift::protocol::T_STRING) { + xfer += iprot->readString(this->message); + this->__isset.message = true; + } else { + xfer += iprot->skip(ftype); + } + break; + case 2: + if (ftype == ::apache::thrift::protocol::T_I32) { + xfer += iprot->readI32(this->errorCode); + this->__isset.errorCode = true; + } else { + xfer += iprot->skip(ftype); + } + break; + case 3: + if (ftype == ::apache::thrift::protocol::T_STRING) { + xfer += iprot->readString(this->SQLState); + this->__isset.SQLState = true; + } else { + xfer += iprot->skip(ftype); + } + break; + default: + xfer += iprot->skip(ftype); + break; + } + xfer += iprot->readFieldEnd(); + } + + xfer += iprot->readStructEnd(); + + return xfer; +} + +uint32_t HiveServerException::write(::apache::thrift::protocol::TProtocol* oprot) const { + uint32_t xfer = 0; + apache::thrift::protocol::TOutputRecursionTracker tracker(*oprot); + xfer += oprot->writeStructBegin("HiveServerException"); + + xfer += oprot->writeFieldBegin("message", ::apache::thrift::protocol::T_STRING, 1); + xfer += oprot->writeString(this->message); + xfer += oprot->writeFieldEnd(); + + xfer += oprot->writeFieldBegin("errorCode", ::apache::thrift::protocol::T_I32, 2); + xfer += oprot->writeI32(this->errorCode); + xfer += oprot->writeFieldEnd(); + + xfer += oprot->writeFieldBegin("SQLState", ::apache::thrift::protocol::T_STRING, 3); + xfer += oprot->writeString(this->SQLState); + xfer += oprot->writeFieldEnd(); + + xfer += oprot->writeFieldStop(); + xfer += oprot->writeStructEnd(); + return xfer; +} + +void swap(HiveServerException &a, HiveServerException &b) { + using ::std::swap; + swap(a.message, b.message); + swap(a.errorCode, b.errorCode); + swap(a.SQLState, b.SQLState); + swap(a.__isset, b.__isset); +} + +HiveServerException::HiveServerException(const HiveServerException& other3) : TException() { + message = other3.message; + errorCode = other3.errorCode; + SQLState = other3.SQLState; + __isset = other3.__isset; +} +HiveServerException& HiveServerException::operator=(const HiveServerException& other4) { + message = other4.message; + errorCode = other4.errorCode; + SQLState = other4.SQLState; + __isset = other4.__isset; + return *this; +} +void HiveServerException::printTo(std::ostream& out) const { + using ::apache::thrift::to_string; + out << "HiveServerException("; + out << "message=" << to_string(message); + out << ", " << "errorCode=" << to_string(errorCode); + out << ", " << "SQLState=" << to_string(SQLState); + out << ")"; +} + +const char* HiveServerException::what() const throw() { + try { + std::stringstream ss; + ss << "TException - service has thrown: " << *this; + this->thriftTExceptionMessageHolder_ = ss.str(); + return this->thriftTExceptionMessageHolder_.c_str(); + } catch (const std::exception&) { + return "TException - service has thrown: HiveServerException"; + } +} + +}}} // namespace diff --git a/service/src/gen/thrift/gen-cpp/hive_service_types.h b/service/src/gen/thrift/gen-cpp/hive_service_types.h new file mode 100644 index 0000000..266f8ea --- /dev/null +++ b/service/src/gen/thrift/gen-cpp/hive_service_types.h @@ -0,0 +1,176 @@ +/** + * Autogenerated by Thrift Compiler (0.9.3) + * + * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING + * @generated + */ +#ifndef hive_service_TYPES_H +#define hive_service_TYPES_H + +#include + +#include +#include +#include +#include + +#include +#include "fb303_types.h" +#include "hive_metastore_types.h" +#include "queryplan_types.h" + + +namespace Apache { namespace Hadoop { namespace Hive { + +struct JobTrackerState { + enum type { + INITIALIZING = 1, + RUNNING = 2 + }; +}; + +extern const std::map _JobTrackerState_VALUES_TO_NAMES; + +class HiveClusterStatus; + +class HiveServerException; + +typedef struct _HiveClusterStatus__isset { + _HiveClusterStatus__isset() : taskTrackers(false), mapTasks(false), reduceTasks(false), maxMapTasks(false), maxReduceTasks(false), state(false) {} + bool taskTrackers :1; + bool mapTasks :1; + bool reduceTasks :1; + bool maxMapTasks :1; + bool maxReduceTasks :1; + bool state :1; +} _HiveClusterStatus__isset; + +class HiveClusterStatus { + public: + + HiveClusterStatus(const HiveClusterStatus&); + HiveClusterStatus& operator=(const HiveClusterStatus&); + HiveClusterStatus() : taskTrackers(0), mapTasks(0), reduceTasks(0), maxMapTasks(0), maxReduceTasks(0), state((JobTrackerState::type)0) { + } + + virtual ~HiveClusterStatus() throw(); + int32_t taskTrackers; + int32_t mapTasks; + int32_t reduceTasks; + int32_t maxMapTasks; + int32_t maxReduceTasks; + JobTrackerState::type state; + + _HiveClusterStatus__isset __isset; + + void __set_taskTrackers(const int32_t val); + + void __set_mapTasks(const int32_t val); + + void __set_reduceTasks(const int32_t val); + + void __set_maxMapTasks(const int32_t val); + + void __set_maxReduceTasks(const int32_t val); + + void __set_state(const JobTrackerState::type val); + + bool operator == (const HiveClusterStatus & rhs) const + { + if (!(taskTrackers == rhs.taskTrackers)) + return false; + if (!(mapTasks == rhs.mapTasks)) + return false; + if (!(reduceTasks == rhs.reduceTasks)) + return false; + if (!(maxMapTasks == rhs.maxMapTasks)) + return false; + if (!(maxReduceTasks == rhs.maxReduceTasks)) + return false; + if (!(state == rhs.state)) + return false; + return true; + } + bool operator != (const HiveClusterStatus &rhs) const { + return !(*this == rhs); + } + + bool operator < (const HiveClusterStatus & ) const; + + uint32_t read(::apache::thrift::protocol::TProtocol* iprot); + uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const; + + virtual void printTo(std::ostream& out) const; +}; + +void swap(HiveClusterStatus &a, HiveClusterStatus &b); + +inline std::ostream& operator<<(std::ostream& out, const HiveClusterStatus& obj) +{ + obj.printTo(out); + return out; +} + +typedef struct _HiveServerException__isset { + _HiveServerException__isset() : message(false), errorCode(false), SQLState(false) {} + bool message :1; + bool errorCode :1; + bool SQLState :1; +} _HiveServerException__isset; + +class HiveServerException : public ::apache::thrift::TException { + public: + + HiveServerException(const HiveServerException&); + HiveServerException& operator=(const HiveServerException&); + HiveServerException() : message(), errorCode(0), SQLState() { + } + + virtual ~HiveServerException() throw(); + std::string message; + int32_t errorCode; + std::string SQLState; + + _HiveServerException__isset __isset; + + void __set_message(const std::string& val); + + void __set_errorCode(const int32_t val); + + void __set_SQLState(const std::string& val); + + bool operator == (const HiveServerException & rhs) const + { + if (!(message == rhs.message)) + return false; + if (!(errorCode == rhs.errorCode)) + return false; + if (!(SQLState == rhs.SQLState)) + return false; + return true; + } + bool operator != (const HiveServerException &rhs) const { + return !(*this == rhs); + } + + bool operator < (const HiveServerException & ) const; + + uint32_t read(::apache::thrift::protocol::TProtocol* iprot); + uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const; + + virtual void printTo(std::ostream& out) const; + mutable std::string thriftTExceptionMessageHolder_; + const char* what() const throw(); +}; + +void swap(HiveServerException &a, HiveServerException &b); + +inline std::ostream& operator<<(std::ostream& out, const HiveServerException& obj) +{ + obj.printTo(out); + return out; +} + +}}} // namespace + +#endif diff --git a/service/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/service/HiveClusterStatus.java b/service/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/service/HiveClusterStatus.java new file mode 100644 index 0000000..ad89867 --- /dev/null +++ b/service/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/service/HiveClusterStatus.java @@ -0,0 +1,901 @@ +/** + * Autogenerated by Thrift Compiler (0.9.3) + * + * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING + * @generated + */ +package org.apache.hadoop.hive.service; + +import org.apache.thrift.scheme.IScheme; +import org.apache.thrift.scheme.SchemeFactory; +import org.apache.thrift.scheme.StandardScheme; + +import org.apache.thrift.scheme.TupleScheme; +import org.apache.thrift.protocol.TTupleProtocol; +import org.apache.thrift.protocol.TProtocolException; +import org.apache.thrift.EncodingUtils; +import org.apache.thrift.TException; +import org.apache.thrift.async.AsyncMethodCallback; +import org.apache.thrift.server.AbstractNonblockingServer.*; +import java.util.List; +import java.util.ArrayList; +import java.util.Map; +import java.util.HashMap; +import java.util.EnumMap; +import java.util.Set; +import java.util.HashSet; +import java.util.EnumSet; +import java.util.Collections; +import java.util.BitSet; +import java.nio.ByteBuffer; +import java.util.Arrays; +import javax.annotation.Generated; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +@SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"}) +@Generated(value = "Autogenerated by Thrift Compiler (0.9.3)") +public class HiveClusterStatus implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { + private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("HiveClusterStatus"); + + private static final org.apache.thrift.protocol.TField TASK_TRACKERS_FIELD_DESC = new org.apache.thrift.protocol.TField("taskTrackers", org.apache.thrift.protocol.TType.I32, (short)1); + private static final org.apache.thrift.protocol.TField MAP_TASKS_FIELD_DESC = new org.apache.thrift.protocol.TField("mapTasks", org.apache.thrift.protocol.TType.I32, (short)2); + private static final org.apache.thrift.protocol.TField REDUCE_TASKS_FIELD_DESC = new org.apache.thrift.protocol.TField("reduceTasks", org.apache.thrift.protocol.TType.I32, (short)3); + private static final org.apache.thrift.protocol.TField MAX_MAP_TASKS_FIELD_DESC = new org.apache.thrift.protocol.TField("maxMapTasks", org.apache.thrift.protocol.TType.I32, (short)4); + private static final org.apache.thrift.protocol.TField MAX_REDUCE_TASKS_FIELD_DESC = new org.apache.thrift.protocol.TField("maxReduceTasks", org.apache.thrift.protocol.TType.I32, (short)5); + private static final org.apache.thrift.protocol.TField STATE_FIELD_DESC = new org.apache.thrift.protocol.TField("state", org.apache.thrift.protocol.TType.I32, (short)6); + + private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); + static { + schemes.put(StandardScheme.class, new HiveClusterStatusStandardSchemeFactory()); + schemes.put(TupleScheme.class, new HiveClusterStatusTupleSchemeFactory()); + } + + private int taskTrackers; // required + private int mapTasks; // required + private int reduceTasks; // required + private int maxMapTasks; // required + private int maxReduceTasks; // required + private JobTrackerState state; // required + + /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ + public enum _Fields implements org.apache.thrift.TFieldIdEnum { + TASK_TRACKERS((short)1, "taskTrackers"), + MAP_TASKS((short)2, "mapTasks"), + REDUCE_TASKS((short)3, "reduceTasks"), + MAX_MAP_TASKS((short)4, "maxMapTasks"), + MAX_REDUCE_TASKS((short)5, "maxReduceTasks"), + /** + * + * @see JobTrackerState + */ + STATE((short)6, "state"); + + private static final Map byName = new HashMap(); + + static { + for (_Fields field : EnumSet.allOf(_Fields.class)) { + byName.put(field.getFieldName(), field); + } + } + + /** + * Find the _Fields constant that matches fieldId, or null if its not found. + */ + public static _Fields findByThriftId(int fieldId) { + switch(fieldId) { + case 1: // TASK_TRACKERS + return TASK_TRACKERS; + case 2: // MAP_TASKS + return MAP_TASKS; + case 3: // REDUCE_TASKS + return REDUCE_TASKS; + case 4: // MAX_MAP_TASKS + return MAX_MAP_TASKS; + case 5: // MAX_REDUCE_TASKS + return MAX_REDUCE_TASKS; + case 6: // STATE + return STATE; + default: + return null; + } + } + + /** + * Find the _Fields constant that matches fieldId, throwing an exception + * if it is not found. + */ + public static _Fields findByThriftIdOrThrow(int fieldId) { + _Fields fields = findByThriftId(fieldId); + if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!"); + return fields; + } + + /** + * Find the _Fields constant that matches name, or null if its not found. + */ + public static _Fields findByName(String name) { + return byName.get(name); + } + + private final short _thriftId; + private final String _fieldName; + + _Fields(short thriftId, String fieldName) { + _thriftId = thriftId; + _fieldName = fieldName; + } + + public short getThriftFieldId() { + return _thriftId; + } + + public String getFieldName() { + return _fieldName; + } + } + + // isset id assignments + private static final int __TASKTRACKERS_ISSET_ID = 0; + private static final int __MAPTASKS_ISSET_ID = 1; + private static final int __REDUCETASKS_ISSET_ID = 2; + private static final int __MAXMAPTASKS_ISSET_ID = 3; + private static final int __MAXREDUCETASKS_ISSET_ID = 4; + private byte __isset_bitfield = 0; + public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; + static { + Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); + tmpMap.put(_Fields.TASK_TRACKERS, new org.apache.thrift.meta_data.FieldMetaData("taskTrackers", org.apache.thrift.TFieldRequirementType.DEFAULT, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I32))); + tmpMap.put(_Fields.MAP_TASKS, new org.apache.thrift.meta_data.FieldMetaData("mapTasks", org.apache.thrift.TFieldRequirementType.DEFAULT, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I32))); + tmpMap.put(_Fields.REDUCE_TASKS, new org.apache.thrift.meta_data.FieldMetaData("reduceTasks", org.apache.thrift.TFieldRequirementType.DEFAULT, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I32))); + tmpMap.put(_Fields.MAX_MAP_TASKS, new org.apache.thrift.meta_data.FieldMetaData("maxMapTasks", org.apache.thrift.TFieldRequirementType.DEFAULT, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I32))); + tmpMap.put(_Fields.MAX_REDUCE_TASKS, new org.apache.thrift.meta_data.FieldMetaData("maxReduceTasks", org.apache.thrift.TFieldRequirementType.DEFAULT, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I32))); + tmpMap.put(_Fields.STATE, new org.apache.thrift.meta_data.FieldMetaData("state", org.apache.thrift.TFieldRequirementType.DEFAULT, + new org.apache.thrift.meta_data.EnumMetaData(org.apache.thrift.protocol.TType.ENUM, JobTrackerState.class))); + metaDataMap = Collections.unmodifiableMap(tmpMap); + org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(HiveClusterStatus.class, metaDataMap); + } + + public HiveClusterStatus() { + } + + public HiveClusterStatus( + int taskTrackers, + int mapTasks, + int reduceTasks, + int maxMapTasks, + int maxReduceTasks, + JobTrackerState state) + { + this(); + this.taskTrackers = taskTrackers; + setTaskTrackersIsSet(true); + this.mapTasks = mapTasks; + setMapTasksIsSet(true); + this.reduceTasks = reduceTasks; + setReduceTasksIsSet(true); + this.maxMapTasks = maxMapTasks; + setMaxMapTasksIsSet(true); + this.maxReduceTasks = maxReduceTasks; + setMaxReduceTasksIsSet(true); + this.state = state; + } + + /** + * Performs a deep copy on other. + */ + public HiveClusterStatus(HiveClusterStatus other) { + __isset_bitfield = other.__isset_bitfield; + this.taskTrackers = other.taskTrackers; + this.mapTasks = other.mapTasks; + this.reduceTasks = other.reduceTasks; + this.maxMapTasks = other.maxMapTasks; + this.maxReduceTasks = other.maxReduceTasks; + if (other.isSetState()) { + this.state = other.state; + } + } + + public HiveClusterStatus deepCopy() { + return new HiveClusterStatus(this); + } + + @Override + public void clear() { + setTaskTrackersIsSet(false); + this.taskTrackers = 0; + setMapTasksIsSet(false); + this.mapTasks = 0; + setReduceTasksIsSet(false); + this.reduceTasks = 0; + setMaxMapTasksIsSet(false); + this.maxMapTasks = 0; + setMaxReduceTasksIsSet(false); + this.maxReduceTasks = 0; + this.state = null; + } + + public int getTaskTrackers() { + return this.taskTrackers; + } + + public void setTaskTrackers(int taskTrackers) { + this.taskTrackers = taskTrackers; + setTaskTrackersIsSet(true); + } + + public void unsetTaskTrackers() { + __isset_bitfield = EncodingUtils.clearBit(__isset_bitfield, __TASKTRACKERS_ISSET_ID); + } + + /** Returns true if field taskTrackers is set (has been assigned a value) and false otherwise */ + public boolean isSetTaskTrackers() { + return EncodingUtils.testBit(__isset_bitfield, __TASKTRACKERS_ISSET_ID); + } + + public void setTaskTrackersIsSet(boolean value) { + __isset_bitfield = EncodingUtils.setBit(__isset_bitfield, __TASKTRACKERS_ISSET_ID, value); + } + + public int getMapTasks() { + return this.mapTasks; + } + + public void setMapTasks(int mapTasks) { + this.mapTasks = mapTasks; + setMapTasksIsSet(true); + } + + public void unsetMapTasks() { + __isset_bitfield = EncodingUtils.clearBit(__isset_bitfield, __MAPTASKS_ISSET_ID); + } + + /** Returns true if field mapTasks is set (has been assigned a value) and false otherwise */ + public boolean isSetMapTasks() { + return EncodingUtils.testBit(__isset_bitfield, __MAPTASKS_ISSET_ID); + } + + public void setMapTasksIsSet(boolean value) { + __isset_bitfield = EncodingUtils.setBit(__isset_bitfield, __MAPTASKS_ISSET_ID, value); + } + + public int getReduceTasks() { + return this.reduceTasks; + } + + public void setReduceTasks(int reduceTasks) { + this.reduceTasks = reduceTasks; + setReduceTasksIsSet(true); + } + + public void unsetReduceTasks() { + __isset_bitfield = EncodingUtils.clearBit(__isset_bitfield, __REDUCETASKS_ISSET_ID); + } + + /** Returns true if field reduceTasks is set (has been assigned a value) and false otherwise */ + public boolean isSetReduceTasks() { + return EncodingUtils.testBit(__isset_bitfield, __REDUCETASKS_ISSET_ID); + } + + public void setReduceTasksIsSet(boolean value) { + __isset_bitfield = EncodingUtils.setBit(__isset_bitfield, __REDUCETASKS_ISSET_ID, value); + } + + public int getMaxMapTasks() { + return this.maxMapTasks; + } + + public void setMaxMapTasks(int maxMapTasks) { + this.maxMapTasks = maxMapTasks; + setMaxMapTasksIsSet(true); + } + + public void unsetMaxMapTasks() { + __isset_bitfield = EncodingUtils.clearBit(__isset_bitfield, __MAXMAPTASKS_ISSET_ID); + } + + /** Returns true if field maxMapTasks is set (has been assigned a value) and false otherwise */ + public boolean isSetMaxMapTasks() { + return EncodingUtils.testBit(__isset_bitfield, __MAXMAPTASKS_ISSET_ID); + } + + public void setMaxMapTasksIsSet(boolean value) { + __isset_bitfield = EncodingUtils.setBit(__isset_bitfield, __MAXMAPTASKS_ISSET_ID, value); + } + + public int getMaxReduceTasks() { + return this.maxReduceTasks; + } + + public void setMaxReduceTasks(int maxReduceTasks) { + this.maxReduceTasks = maxReduceTasks; + setMaxReduceTasksIsSet(true); + } + + public void unsetMaxReduceTasks() { + __isset_bitfield = EncodingUtils.clearBit(__isset_bitfield, __MAXREDUCETASKS_ISSET_ID); + } + + /** Returns true if field maxReduceTasks is set (has been assigned a value) and false otherwise */ + public boolean isSetMaxReduceTasks() { + return EncodingUtils.testBit(__isset_bitfield, __MAXREDUCETASKS_ISSET_ID); + } + + public void setMaxReduceTasksIsSet(boolean value) { + __isset_bitfield = EncodingUtils.setBit(__isset_bitfield, __MAXREDUCETASKS_ISSET_ID, value); + } + + /** + * + * @see JobTrackerState + */ + public JobTrackerState getState() { + return this.state; + } + + /** + * + * @see JobTrackerState + */ + public void setState(JobTrackerState state) { + this.state = state; + } + + public void unsetState() { + this.state = null; + } + + /** Returns true if field state is set (has been assigned a value) and false otherwise */ + public boolean isSetState() { + return this.state != null; + } + + public void setStateIsSet(boolean value) { + if (!value) { + this.state = null; + } + } + + public void setFieldValue(_Fields field, Object value) { + switch (field) { + case TASK_TRACKERS: + if (value == null) { + unsetTaskTrackers(); + } else { + setTaskTrackers((Integer)value); + } + break; + + case MAP_TASKS: + if (value == null) { + unsetMapTasks(); + } else { + setMapTasks((Integer)value); + } + break; + + case REDUCE_TASKS: + if (value == null) { + unsetReduceTasks(); + } else { + setReduceTasks((Integer)value); + } + break; + + case MAX_MAP_TASKS: + if (value == null) { + unsetMaxMapTasks(); + } else { + setMaxMapTasks((Integer)value); + } + break; + + case MAX_REDUCE_TASKS: + if (value == null) { + unsetMaxReduceTasks(); + } else { + setMaxReduceTasks((Integer)value); + } + break; + + case STATE: + if (value == null) { + unsetState(); + } else { + setState((JobTrackerState)value); + } + break; + + } + } + + public Object getFieldValue(_Fields field) { + switch (field) { + case TASK_TRACKERS: + return getTaskTrackers(); + + case MAP_TASKS: + return getMapTasks(); + + case REDUCE_TASKS: + return getReduceTasks(); + + case MAX_MAP_TASKS: + return getMaxMapTasks(); + + case MAX_REDUCE_TASKS: + return getMaxReduceTasks(); + + case STATE: + return getState(); + + } + throw new IllegalStateException(); + } + + /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */ + public boolean isSet(_Fields field) { + if (field == null) { + throw new IllegalArgumentException(); + } + + switch (field) { + case TASK_TRACKERS: + return isSetTaskTrackers(); + case MAP_TASKS: + return isSetMapTasks(); + case REDUCE_TASKS: + return isSetReduceTasks(); + case MAX_MAP_TASKS: + return isSetMaxMapTasks(); + case MAX_REDUCE_TASKS: + return isSetMaxReduceTasks(); + case STATE: + return isSetState(); + } + throw new IllegalStateException(); + } + + @Override + public boolean equals(Object that) { + if (that == null) + return false; + if (that instanceof HiveClusterStatus) + return this.equals((HiveClusterStatus)that); + return false; + } + + public boolean equals(HiveClusterStatus that) { + if (that == null) + return false; + + boolean this_present_taskTrackers = true; + boolean that_present_taskTrackers = true; + if (this_present_taskTrackers || that_present_taskTrackers) { + if (!(this_present_taskTrackers && that_present_taskTrackers)) + return false; + if (this.taskTrackers != that.taskTrackers) + return false; + } + + boolean this_present_mapTasks = true; + boolean that_present_mapTasks = true; + if (this_present_mapTasks || that_present_mapTasks) { + if (!(this_present_mapTasks && that_present_mapTasks)) + return false; + if (this.mapTasks != that.mapTasks) + return false; + } + + boolean this_present_reduceTasks = true; + boolean that_present_reduceTasks = true; + if (this_present_reduceTasks || that_present_reduceTasks) { + if (!(this_present_reduceTasks && that_present_reduceTasks)) + return false; + if (this.reduceTasks != that.reduceTasks) + return false; + } + + boolean this_present_maxMapTasks = true; + boolean that_present_maxMapTasks = true; + if (this_present_maxMapTasks || that_present_maxMapTasks) { + if (!(this_present_maxMapTasks && that_present_maxMapTasks)) + return false; + if (this.maxMapTasks != that.maxMapTasks) + return false; + } + + boolean this_present_maxReduceTasks = true; + boolean that_present_maxReduceTasks = true; + if (this_present_maxReduceTasks || that_present_maxReduceTasks) { + if (!(this_present_maxReduceTasks && that_present_maxReduceTasks)) + return false; + if (this.maxReduceTasks != that.maxReduceTasks) + return false; + } + + boolean this_present_state = true && this.isSetState(); + boolean that_present_state = true && that.isSetState(); + if (this_present_state || that_present_state) { + if (!(this_present_state && that_present_state)) + return false; + if (!this.state.equals(that.state)) + return false; + } + + return true; + } + + @Override + public int hashCode() { + List list = new ArrayList(); + + boolean present_taskTrackers = true; + list.add(present_taskTrackers); + if (present_taskTrackers) + list.add(taskTrackers); + + boolean present_mapTasks = true; + list.add(present_mapTasks); + if (present_mapTasks) + list.add(mapTasks); + + boolean present_reduceTasks = true; + list.add(present_reduceTasks); + if (present_reduceTasks) + list.add(reduceTasks); + + boolean present_maxMapTasks = true; + list.add(present_maxMapTasks); + if (present_maxMapTasks) + list.add(maxMapTasks); + + boolean present_maxReduceTasks = true; + list.add(present_maxReduceTasks); + if (present_maxReduceTasks) + list.add(maxReduceTasks); + + boolean present_state = true && (isSetState()); + list.add(present_state); + if (present_state) + list.add(state.getValue()); + + return list.hashCode(); + } + + @Override + public int compareTo(HiveClusterStatus other) { + if (!getClass().equals(other.getClass())) { + return getClass().getName().compareTo(other.getClass().getName()); + } + + int lastComparison = 0; + + lastComparison = Boolean.valueOf(isSetTaskTrackers()).compareTo(other.isSetTaskTrackers()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetTaskTrackers()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.taskTrackers, other.taskTrackers); + if (lastComparison != 0) { + return lastComparison; + } + } + lastComparison = Boolean.valueOf(isSetMapTasks()).compareTo(other.isSetMapTasks()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetMapTasks()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.mapTasks, other.mapTasks); + if (lastComparison != 0) { + return lastComparison; + } + } + lastComparison = Boolean.valueOf(isSetReduceTasks()).compareTo(other.isSetReduceTasks()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetReduceTasks()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.reduceTasks, other.reduceTasks); + if (lastComparison != 0) { + return lastComparison; + } + } + lastComparison = Boolean.valueOf(isSetMaxMapTasks()).compareTo(other.isSetMaxMapTasks()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetMaxMapTasks()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.maxMapTasks, other.maxMapTasks); + if (lastComparison != 0) { + return lastComparison; + } + } + lastComparison = Boolean.valueOf(isSetMaxReduceTasks()).compareTo(other.isSetMaxReduceTasks()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetMaxReduceTasks()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.maxReduceTasks, other.maxReduceTasks); + if (lastComparison != 0) { + return lastComparison; + } + } + lastComparison = Boolean.valueOf(isSetState()).compareTo(other.isSetState()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetState()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.state, other.state); + if (lastComparison != 0) { + return lastComparison; + } + } + return 0; + } + + public _Fields fieldForId(int fieldId) { + return _Fields.findByThriftId(fieldId); + } + + public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException { + schemes.get(iprot.getScheme()).getScheme().read(iprot, this); + } + + public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException { + schemes.get(oprot.getScheme()).getScheme().write(oprot, this); + } + + @Override + public String toString() { + StringBuilder sb = new StringBuilder("HiveClusterStatus("); + boolean first = true; + + sb.append("taskTrackers:"); + sb.append(this.taskTrackers); + first = false; + if (!first) sb.append(", "); + sb.append("mapTasks:"); + sb.append(this.mapTasks); + first = false; + if (!first) sb.append(", "); + sb.append("reduceTasks:"); + sb.append(this.reduceTasks); + first = false; + if (!first) sb.append(", "); + sb.append("maxMapTasks:"); + sb.append(this.maxMapTasks); + first = false; + if (!first) sb.append(", "); + sb.append("maxReduceTasks:"); + sb.append(this.maxReduceTasks); + first = false; + if (!first) sb.append(", "); + sb.append("state:"); + if (this.state == null) { + sb.append("null"); + } else { + sb.append(this.state); + } + first = false; + sb.append(")"); + return sb.toString(); + } + + public void validate() throws org.apache.thrift.TException { + // check for required fields + // check for sub-struct validity + } + + private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { + try { + write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out))); + } catch (org.apache.thrift.TException te) { + throw new java.io.IOException(te); + } + } + + private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { + try { + // it doesn't seem like you should have to do this, but java serialization is wacky, and doesn't call the default constructor. + __isset_bitfield = 0; + read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); + } catch (org.apache.thrift.TException te) { + throw new java.io.IOException(te); + } + } + + private static class HiveClusterStatusStandardSchemeFactory implements SchemeFactory { + public HiveClusterStatusStandardScheme getScheme() { + return new HiveClusterStatusStandardScheme(); + } + } + + private static class HiveClusterStatusStandardScheme extends StandardScheme { + + public void read(org.apache.thrift.protocol.TProtocol iprot, HiveClusterStatus struct) throws org.apache.thrift.TException { + org.apache.thrift.protocol.TField schemeField; + iprot.readStructBegin(); + while (true) + { + schemeField = iprot.readFieldBegin(); + if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { + break; + } + switch (schemeField.id) { + case 1: // TASK_TRACKERS + if (schemeField.type == org.apache.thrift.protocol.TType.I32) { + struct.taskTrackers = iprot.readI32(); + struct.setTaskTrackersIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; + case 2: // MAP_TASKS + if (schemeField.type == org.apache.thrift.protocol.TType.I32) { + struct.mapTasks = iprot.readI32(); + struct.setMapTasksIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; + case 3: // REDUCE_TASKS + if (schemeField.type == org.apache.thrift.protocol.TType.I32) { + struct.reduceTasks = iprot.readI32(); + struct.setReduceTasksIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; + case 4: // MAX_MAP_TASKS + if (schemeField.type == org.apache.thrift.protocol.TType.I32) { + struct.maxMapTasks = iprot.readI32(); + struct.setMaxMapTasksIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; + case 5: // MAX_REDUCE_TASKS + if (schemeField.type == org.apache.thrift.protocol.TType.I32) { + struct.maxReduceTasks = iprot.readI32(); + struct.setMaxReduceTasksIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; + case 6: // STATE + if (schemeField.type == org.apache.thrift.protocol.TType.I32) { + struct.state = org.apache.hadoop.hive.service.JobTrackerState.findByValue(iprot.readI32()); + struct.setStateIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; + default: + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + iprot.readFieldEnd(); + } + iprot.readStructEnd(); + struct.validate(); + } + + public void write(org.apache.thrift.protocol.TProtocol oprot, HiveClusterStatus struct) throws org.apache.thrift.TException { + struct.validate(); + + oprot.writeStructBegin(STRUCT_DESC); + oprot.writeFieldBegin(TASK_TRACKERS_FIELD_DESC); + oprot.writeI32(struct.taskTrackers); + oprot.writeFieldEnd(); + oprot.writeFieldBegin(MAP_TASKS_FIELD_DESC); + oprot.writeI32(struct.mapTasks); + oprot.writeFieldEnd(); + oprot.writeFieldBegin(REDUCE_TASKS_FIELD_DESC); + oprot.writeI32(struct.reduceTasks); + oprot.writeFieldEnd(); + oprot.writeFieldBegin(MAX_MAP_TASKS_FIELD_DESC); + oprot.writeI32(struct.maxMapTasks); + oprot.writeFieldEnd(); + oprot.writeFieldBegin(MAX_REDUCE_TASKS_FIELD_DESC); + oprot.writeI32(struct.maxReduceTasks); + oprot.writeFieldEnd(); + if (struct.state != null) { + oprot.writeFieldBegin(STATE_FIELD_DESC); + oprot.writeI32(struct.state.getValue()); + oprot.writeFieldEnd(); + } + oprot.writeFieldStop(); + oprot.writeStructEnd(); + } + + } + + private static class HiveClusterStatusTupleSchemeFactory implements SchemeFactory { + public HiveClusterStatusTupleScheme getScheme() { + return new HiveClusterStatusTupleScheme(); + } + } + + private static class HiveClusterStatusTupleScheme extends TupleScheme { + + @Override + public void write(org.apache.thrift.protocol.TProtocol prot, HiveClusterStatus struct) throws org.apache.thrift.TException { + TTupleProtocol oprot = (TTupleProtocol) prot; + BitSet optionals = new BitSet(); + if (struct.isSetTaskTrackers()) { + optionals.set(0); + } + if (struct.isSetMapTasks()) { + optionals.set(1); + } + if (struct.isSetReduceTasks()) { + optionals.set(2); + } + if (struct.isSetMaxMapTasks()) { + optionals.set(3); + } + if (struct.isSetMaxReduceTasks()) { + optionals.set(4); + } + if (struct.isSetState()) { + optionals.set(5); + } + oprot.writeBitSet(optionals, 6); + if (struct.isSetTaskTrackers()) { + oprot.writeI32(struct.taskTrackers); + } + if (struct.isSetMapTasks()) { + oprot.writeI32(struct.mapTasks); + } + if (struct.isSetReduceTasks()) { + oprot.writeI32(struct.reduceTasks); + } + if (struct.isSetMaxMapTasks()) { + oprot.writeI32(struct.maxMapTasks); + } + if (struct.isSetMaxReduceTasks()) { + oprot.writeI32(struct.maxReduceTasks); + } + if (struct.isSetState()) { + oprot.writeI32(struct.state.getValue()); + } + } + + @Override + public void read(org.apache.thrift.protocol.TProtocol prot, HiveClusterStatus struct) throws org.apache.thrift.TException { + TTupleProtocol iprot = (TTupleProtocol) prot; + BitSet incoming = iprot.readBitSet(6); + if (incoming.get(0)) { + struct.taskTrackers = iprot.readI32(); + struct.setTaskTrackersIsSet(true); + } + if (incoming.get(1)) { + struct.mapTasks = iprot.readI32(); + struct.setMapTasksIsSet(true); + } + if (incoming.get(2)) { + struct.reduceTasks = iprot.readI32(); + struct.setReduceTasksIsSet(true); + } + if (incoming.get(3)) { + struct.maxMapTasks = iprot.readI32(); + struct.setMaxMapTasksIsSet(true); + } + if (incoming.get(4)) { + struct.maxReduceTasks = iprot.readI32(); + struct.setMaxReduceTasksIsSet(true); + } + if (incoming.get(5)) { + struct.state = org.apache.hadoop.hive.service.JobTrackerState.findByValue(iprot.readI32()); + struct.setStateIsSet(true); + } + } + } + +} + diff --git a/service/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/service/HiveServerException.java b/service/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/service/HiveServerException.java new file mode 100644 index 0000000..97b1219 --- /dev/null +++ b/service/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/service/HiveServerException.java @@ -0,0 +1,601 @@ +/** + * Autogenerated by Thrift Compiler (0.9.3) + * + * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING + * @generated + */ +package org.apache.hadoop.hive.service; + +import org.apache.thrift.scheme.IScheme; +import org.apache.thrift.scheme.SchemeFactory; +import org.apache.thrift.scheme.StandardScheme; + +import org.apache.thrift.scheme.TupleScheme; +import org.apache.thrift.protocol.TTupleProtocol; +import org.apache.thrift.protocol.TProtocolException; +import org.apache.thrift.EncodingUtils; +import org.apache.thrift.TException; +import org.apache.thrift.async.AsyncMethodCallback; +import org.apache.thrift.server.AbstractNonblockingServer.*; +import java.util.List; +import java.util.ArrayList; +import java.util.Map; +import java.util.HashMap; +import java.util.EnumMap; +import java.util.Set; +import java.util.HashSet; +import java.util.EnumSet; +import java.util.Collections; +import java.util.BitSet; +import java.nio.ByteBuffer; +import java.util.Arrays; +import javax.annotation.Generated; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +@SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"}) +@Generated(value = "Autogenerated by Thrift Compiler (0.9.3)") +public class HiveServerException extends TException implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { + private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("HiveServerException"); + + private static final org.apache.thrift.protocol.TField MESSAGE_FIELD_DESC = new org.apache.thrift.protocol.TField("message", org.apache.thrift.protocol.TType.STRING, (short)1); + private static final org.apache.thrift.protocol.TField ERROR_CODE_FIELD_DESC = new org.apache.thrift.protocol.TField("errorCode", org.apache.thrift.protocol.TType.I32, (short)2); + private static final org.apache.thrift.protocol.TField SQLSTATE_FIELD_DESC = new org.apache.thrift.protocol.TField("SQLState", org.apache.thrift.protocol.TType.STRING, (short)3); + + private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); + static { + schemes.put(StandardScheme.class, new HiveServerExceptionStandardSchemeFactory()); + schemes.put(TupleScheme.class, new HiveServerExceptionTupleSchemeFactory()); + } + + private String message; // required + private int errorCode; // required + private String SQLState; // required + + /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ + public enum _Fields implements org.apache.thrift.TFieldIdEnum { + MESSAGE((short)1, "message"), + ERROR_CODE((short)2, "errorCode"), + SQLSTATE((short)3, "SQLState"); + + private static final Map byName = new HashMap(); + + static { + for (_Fields field : EnumSet.allOf(_Fields.class)) { + byName.put(field.getFieldName(), field); + } + } + + /** + * Find the _Fields constant that matches fieldId, or null if its not found. + */ + public static _Fields findByThriftId(int fieldId) { + switch(fieldId) { + case 1: // MESSAGE + return MESSAGE; + case 2: // ERROR_CODE + return ERROR_CODE; + case 3: // SQLSTATE + return SQLSTATE; + default: + return null; + } + } + + /** + * Find the _Fields constant that matches fieldId, throwing an exception + * if it is not found. + */ + public static _Fields findByThriftIdOrThrow(int fieldId) { + _Fields fields = findByThriftId(fieldId); + if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!"); + return fields; + } + + /** + * Find the _Fields constant that matches name, or null if its not found. + */ + public static _Fields findByName(String name) { + return byName.get(name); + } + + private final short _thriftId; + private final String _fieldName; + + _Fields(short thriftId, String fieldName) { + _thriftId = thriftId; + _fieldName = fieldName; + } + + public short getThriftFieldId() { + return _thriftId; + } + + public String getFieldName() { + return _fieldName; + } + } + + // isset id assignments + private static final int __ERRORCODE_ISSET_ID = 0; + private byte __isset_bitfield = 0; + public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; + static { + Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); + tmpMap.put(_Fields.MESSAGE, new org.apache.thrift.meta_data.FieldMetaData("message", org.apache.thrift.TFieldRequirementType.DEFAULT, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); + tmpMap.put(_Fields.ERROR_CODE, new org.apache.thrift.meta_data.FieldMetaData("errorCode", org.apache.thrift.TFieldRequirementType.DEFAULT, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I32))); + tmpMap.put(_Fields.SQLSTATE, new org.apache.thrift.meta_data.FieldMetaData("SQLState", org.apache.thrift.TFieldRequirementType.DEFAULT, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); + metaDataMap = Collections.unmodifiableMap(tmpMap); + org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(HiveServerException.class, metaDataMap); + } + + public HiveServerException() { + } + + public HiveServerException( + String message, + int errorCode, + String SQLState) + { + this(); + this.message = message; + this.errorCode = errorCode; + setErrorCodeIsSet(true); + this.SQLState = SQLState; + } + + /** + * Performs a deep copy on other. + */ + public HiveServerException(HiveServerException other) { + __isset_bitfield = other.__isset_bitfield; + if (other.isSetMessage()) { + this.message = other.message; + } + this.errorCode = other.errorCode; + if (other.isSetSQLState()) { + this.SQLState = other.SQLState; + } + } + + public HiveServerException deepCopy() { + return new HiveServerException(this); + } + + @Override + public void clear() { + this.message = null; + setErrorCodeIsSet(false); + this.errorCode = 0; + this.SQLState = null; + } + + public String getMessage() { + return this.message; + } + + public void setMessage(String message) { + this.message = message; + } + + public void unsetMessage() { + this.message = null; + } + + /** Returns true if field message is set (has been assigned a value) and false otherwise */ + public boolean isSetMessage() { + return this.message != null; + } + + public void setMessageIsSet(boolean value) { + if (!value) { + this.message = null; + } + } + + public int getErrorCode() { + return this.errorCode; + } + + public void setErrorCode(int errorCode) { + this.errorCode = errorCode; + setErrorCodeIsSet(true); + } + + public void unsetErrorCode() { + __isset_bitfield = EncodingUtils.clearBit(__isset_bitfield, __ERRORCODE_ISSET_ID); + } + + /** Returns true if field errorCode is set (has been assigned a value) and false otherwise */ + public boolean isSetErrorCode() { + return EncodingUtils.testBit(__isset_bitfield, __ERRORCODE_ISSET_ID); + } + + public void setErrorCodeIsSet(boolean value) { + __isset_bitfield = EncodingUtils.setBit(__isset_bitfield, __ERRORCODE_ISSET_ID, value); + } + + public String getSQLState() { + return this.SQLState; + } + + public void setSQLState(String SQLState) { + this.SQLState = SQLState; + } + + public void unsetSQLState() { + this.SQLState = null; + } + + /** Returns true if field SQLState is set (has been assigned a value) and false otherwise */ + public boolean isSetSQLState() { + return this.SQLState != null; + } + + public void setSQLStateIsSet(boolean value) { + if (!value) { + this.SQLState = null; + } + } + + public void setFieldValue(_Fields field, Object value) { + switch (field) { + case MESSAGE: + if (value == null) { + unsetMessage(); + } else { + setMessage((String)value); + } + break; + + case ERROR_CODE: + if (value == null) { + unsetErrorCode(); + } else { + setErrorCode((Integer)value); + } + break; + + case SQLSTATE: + if (value == null) { + unsetSQLState(); + } else { + setSQLState((String)value); + } + break; + + } + } + + public Object getFieldValue(_Fields field) { + switch (field) { + case MESSAGE: + return getMessage(); + + case ERROR_CODE: + return getErrorCode(); + + case SQLSTATE: + return getSQLState(); + + } + throw new IllegalStateException(); + } + + /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */ + public boolean isSet(_Fields field) { + if (field == null) { + throw new IllegalArgumentException(); + } + + switch (field) { + case MESSAGE: + return isSetMessage(); + case ERROR_CODE: + return isSetErrorCode(); + case SQLSTATE: + return isSetSQLState(); + } + throw new IllegalStateException(); + } + + @Override + public boolean equals(Object that) { + if (that == null) + return false; + if (that instanceof HiveServerException) + return this.equals((HiveServerException)that); + return false; + } + + public boolean equals(HiveServerException that) { + if (that == null) + return false; + + boolean this_present_message = true && this.isSetMessage(); + boolean that_present_message = true && that.isSetMessage(); + if (this_present_message || that_present_message) { + if (!(this_present_message && that_present_message)) + return false; + if (!this.message.equals(that.message)) + return false; + } + + boolean this_present_errorCode = true; + boolean that_present_errorCode = true; + if (this_present_errorCode || that_present_errorCode) { + if (!(this_present_errorCode && that_present_errorCode)) + return false; + if (this.errorCode != that.errorCode) + return false; + } + + boolean this_present_SQLState = true && this.isSetSQLState(); + boolean that_present_SQLState = true && that.isSetSQLState(); + if (this_present_SQLState || that_present_SQLState) { + if (!(this_present_SQLState && that_present_SQLState)) + return false; + if (!this.SQLState.equals(that.SQLState)) + return false; + } + + return true; + } + + @Override + public int hashCode() { + List list = new ArrayList(); + + boolean present_message = true && (isSetMessage()); + list.add(present_message); + if (present_message) + list.add(message); + + boolean present_errorCode = true; + list.add(present_errorCode); + if (present_errorCode) + list.add(errorCode); + + boolean present_SQLState = true && (isSetSQLState()); + list.add(present_SQLState); + if (present_SQLState) + list.add(SQLState); + + return list.hashCode(); + } + + @Override + public int compareTo(HiveServerException other) { + if (!getClass().equals(other.getClass())) { + return getClass().getName().compareTo(other.getClass().getName()); + } + + int lastComparison = 0; + + lastComparison = Boolean.valueOf(isSetMessage()).compareTo(other.isSetMessage()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetMessage()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.message, other.message); + if (lastComparison != 0) { + return lastComparison; + } + } + lastComparison = Boolean.valueOf(isSetErrorCode()).compareTo(other.isSetErrorCode()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetErrorCode()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.errorCode, other.errorCode); + if (lastComparison != 0) { + return lastComparison; + } + } + lastComparison = Boolean.valueOf(isSetSQLState()).compareTo(other.isSetSQLState()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetSQLState()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.SQLState, other.SQLState); + if (lastComparison != 0) { + return lastComparison; + } + } + return 0; + } + + public _Fields fieldForId(int fieldId) { + return _Fields.findByThriftId(fieldId); + } + + public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException { + schemes.get(iprot.getScheme()).getScheme().read(iprot, this); + } + + public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException { + schemes.get(oprot.getScheme()).getScheme().write(oprot, this); + } + + @Override + public String toString() { + StringBuilder sb = new StringBuilder("HiveServerException("); + boolean first = true; + + sb.append("message:"); + if (this.message == null) { + sb.append("null"); + } else { + sb.append(this.message); + } + first = false; + if (!first) sb.append(", "); + sb.append("errorCode:"); + sb.append(this.errorCode); + first = false; + if (!first) sb.append(", "); + sb.append("SQLState:"); + if (this.SQLState == null) { + sb.append("null"); + } else { + sb.append(this.SQLState); + } + first = false; + sb.append(")"); + return sb.toString(); + } + + public void validate() throws org.apache.thrift.TException { + // check for required fields + // check for sub-struct validity + } + + private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { + try { + write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out))); + } catch (org.apache.thrift.TException te) { + throw new java.io.IOException(te); + } + } + + private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { + try { + // it doesn't seem like you should have to do this, but java serialization is wacky, and doesn't call the default constructor. + __isset_bitfield = 0; + read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); + } catch (org.apache.thrift.TException te) { + throw new java.io.IOException(te); + } + } + + private static class HiveServerExceptionStandardSchemeFactory implements SchemeFactory { + public HiveServerExceptionStandardScheme getScheme() { + return new HiveServerExceptionStandardScheme(); + } + } + + private static class HiveServerExceptionStandardScheme extends StandardScheme { + + public void read(org.apache.thrift.protocol.TProtocol iprot, HiveServerException struct) throws org.apache.thrift.TException { + org.apache.thrift.protocol.TField schemeField; + iprot.readStructBegin(); + while (true) + { + schemeField = iprot.readFieldBegin(); + if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { + break; + } + switch (schemeField.id) { + case 1: // MESSAGE + if (schemeField.type == org.apache.thrift.protocol.TType.STRING) { + struct.message = iprot.readString(); + struct.setMessageIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; + case 2: // ERROR_CODE + if (schemeField.type == org.apache.thrift.protocol.TType.I32) { + struct.errorCode = iprot.readI32(); + struct.setErrorCodeIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; + case 3: // SQLSTATE + if (schemeField.type == org.apache.thrift.protocol.TType.STRING) { + struct.SQLState = iprot.readString(); + struct.setSQLStateIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; + default: + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + iprot.readFieldEnd(); + } + iprot.readStructEnd(); + struct.validate(); + } + + public void write(org.apache.thrift.protocol.TProtocol oprot, HiveServerException struct) throws org.apache.thrift.TException { + struct.validate(); + + oprot.writeStructBegin(STRUCT_DESC); + if (struct.message != null) { + oprot.writeFieldBegin(MESSAGE_FIELD_DESC); + oprot.writeString(struct.message); + oprot.writeFieldEnd(); + } + oprot.writeFieldBegin(ERROR_CODE_FIELD_DESC); + oprot.writeI32(struct.errorCode); + oprot.writeFieldEnd(); + if (struct.SQLState != null) { + oprot.writeFieldBegin(SQLSTATE_FIELD_DESC); + oprot.writeString(struct.SQLState); + oprot.writeFieldEnd(); + } + oprot.writeFieldStop(); + oprot.writeStructEnd(); + } + + } + + private static class HiveServerExceptionTupleSchemeFactory implements SchemeFactory { + public HiveServerExceptionTupleScheme getScheme() { + return new HiveServerExceptionTupleScheme(); + } + } + + private static class HiveServerExceptionTupleScheme extends TupleScheme { + + @Override + public void write(org.apache.thrift.protocol.TProtocol prot, HiveServerException struct) throws org.apache.thrift.TException { + TTupleProtocol oprot = (TTupleProtocol) prot; + BitSet optionals = new BitSet(); + if (struct.isSetMessage()) { + optionals.set(0); + } + if (struct.isSetErrorCode()) { + optionals.set(1); + } + if (struct.isSetSQLState()) { + optionals.set(2); + } + oprot.writeBitSet(optionals, 3); + if (struct.isSetMessage()) { + oprot.writeString(struct.message); + } + if (struct.isSetErrorCode()) { + oprot.writeI32(struct.errorCode); + } + if (struct.isSetSQLState()) { + oprot.writeString(struct.SQLState); + } + } + + @Override + public void read(org.apache.thrift.protocol.TProtocol prot, HiveServerException struct) throws org.apache.thrift.TException { + TTupleProtocol iprot = (TTupleProtocol) prot; + BitSet incoming = iprot.readBitSet(3); + if (incoming.get(0)) { + struct.message = iprot.readString(); + struct.setMessageIsSet(true); + } + if (incoming.get(1)) { + struct.errorCode = iprot.readI32(); + struct.setErrorCodeIsSet(true); + } + if (incoming.get(2)) { + struct.SQLState = iprot.readString(); + struct.setSQLStateIsSet(true); + } + } + } + +} + diff --git a/service/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/service/JobTrackerState.java b/service/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/service/JobTrackerState.java new file mode 100644 index 0000000..46c71ee --- /dev/null +++ b/service/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/service/JobTrackerState.java @@ -0,0 +1,45 @@ +/** + * Autogenerated by Thrift Compiler (0.9.3) + * + * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING + * @generated + */ +package org.apache.hadoop.hive.service; + + +import java.util.Map; +import java.util.HashMap; +import org.apache.thrift.TEnum; + +public enum JobTrackerState implements org.apache.thrift.TEnum { + INITIALIZING(1), + RUNNING(2); + + private final int value; + + private JobTrackerState(int value) { + this.value = value; + } + + /** + * Get the integer value of this enum value, as defined in the Thrift IDL. + */ + public int getValue() { + return value; + } + + /** + * Find a the enum type by its integer value, as defined in the Thrift IDL. + * @return null if the value is not found. + */ + public static JobTrackerState findByValue(int value) { + switch (value) { + case 1: + return INITIALIZING; + case 2: + return RUNNING; + default: + return null; + } + } +} diff --git a/service/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/service/ThriftHive.java b/service/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/service/ThriftHive.java new file mode 100644 index 0000000..934a8a5 --- /dev/null +++ b/service/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/service/ThriftHive.java @@ -0,0 +1,7784 @@ +/** + * Autogenerated by Thrift Compiler (0.9.3) + * + * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING + * @generated + */ +package org.apache.hadoop.hive.service; + +import org.apache.thrift.scheme.IScheme; +import org.apache.thrift.scheme.SchemeFactory; +import org.apache.thrift.scheme.StandardScheme; + +import org.apache.thrift.scheme.TupleScheme; +import org.apache.thrift.protocol.TTupleProtocol; +import org.apache.thrift.protocol.TProtocolException; +import org.apache.thrift.EncodingUtils; +import org.apache.thrift.TException; +import org.apache.thrift.async.AsyncMethodCallback; +import org.apache.thrift.server.AbstractNonblockingServer.*; +import java.util.List; +import java.util.ArrayList; +import java.util.Map; +import java.util.HashMap; +import java.util.EnumMap; +import java.util.Set; +import java.util.HashSet; +import java.util.EnumSet; +import java.util.Collections; +import java.util.BitSet; +import java.nio.ByteBuffer; +import java.util.Arrays; +import javax.annotation.Generated; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +@SuppressWarnings({"cast", "rawtypes", "serial", "unchecked"}) +@Generated(value = "Autogenerated by Thrift Compiler (0.9.3)") +public class ThriftHive { + + public interface Iface extends org.apache.hadoop.hive.metastore.api.ThriftHiveMetastore.Iface { + + public void execute(String query) throws HiveServerException, org.apache.thrift.TException; + + public String fetchOne() throws HiveServerException, org.apache.thrift.TException; + + public List fetchN(int numRows) throws HiveServerException, org.apache.thrift.TException; + + public List fetchAll() throws HiveServerException, org.apache.thrift.TException; + + public org.apache.hadoop.hive.metastore.api.Schema getSchema() throws HiveServerException, org.apache.thrift.TException; + + public org.apache.hadoop.hive.metastore.api.Schema getThriftSchema() throws HiveServerException, org.apache.thrift.TException; + + public HiveClusterStatus getClusterStatus() throws HiveServerException, org.apache.thrift.TException; + + public org.apache.hadoop.hive.ql.plan.api.QueryPlan getQueryPlan() throws HiveServerException, org.apache.thrift.TException; + + public void clean() throws org.apache.thrift.TException; + + } + + public interface AsyncIface extends org.apache.hadoop.hive.metastore.api.ThriftHiveMetastore .AsyncIface { + + public void execute(String query, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException; + + public void fetchOne(org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException; + + public void fetchN(int numRows, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException; + + public void fetchAll(org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException; + + public void getSchema(org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException; + + public void getThriftSchema(org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException; + + public void getClusterStatus(org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException; + + public void getQueryPlan(org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException; + + public void clean(org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException; + + } + + public static class Client extends org.apache.hadoop.hive.metastore.api.ThriftHiveMetastore.Client implements Iface { + public static class Factory implements org.apache.thrift.TServiceClientFactory { + public Factory() {} + public Client getClient(org.apache.thrift.protocol.TProtocol prot) { + return new Client(prot); + } + public Client getClient(org.apache.thrift.protocol.TProtocol iprot, org.apache.thrift.protocol.TProtocol oprot) { + return new Client(iprot, oprot); + } + } + + public Client(org.apache.thrift.protocol.TProtocol prot) + { + super(prot, prot); + } + + public Client(org.apache.thrift.protocol.TProtocol iprot, org.apache.thrift.protocol.TProtocol oprot) { + super(iprot, oprot); + } + + public void execute(String query) throws HiveServerException, org.apache.thrift.TException + { + send_execute(query); + recv_execute(); + } + + public void send_execute(String query) throws org.apache.thrift.TException + { + execute_args args = new execute_args(); + args.setQuery(query); + sendBase("execute", args); + } + + public void recv_execute() throws HiveServerException, org.apache.thrift.TException + { + execute_result result = new execute_result(); + receiveBase(result, "execute"); + if (result.ex != null) { + throw result.ex; + } + return; + } + + public String fetchOne() throws HiveServerException, org.apache.thrift.TException + { + send_fetchOne(); + return recv_fetchOne(); + } + + public void send_fetchOne() throws org.apache.thrift.TException + { + fetchOne_args args = new fetchOne_args(); + sendBase("fetchOne", args); + } + + public String recv_fetchOne() throws HiveServerException, org.apache.thrift.TException + { + fetchOne_result result = new fetchOne_result(); + receiveBase(result, "fetchOne"); + if (result.isSetSuccess()) { + return result.success; + } + if (result.ex != null) { + throw result.ex; + } + throw new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.MISSING_RESULT, "fetchOne failed: unknown result"); + } + + public List fetchN(int numRows) throws HiveServerException, org.apache.thrift.TException + { + send_fetchN(numRows); + return recv_fetchN(); + } + + public void send_fetchN(int numRows) throws org.apache.thrift.TException + { + fetchN_args args = new fetchN_args(); + args.setNumRows(numRows); + sendBase("fetchN", args); + } + + public List recv_fetchN() throws HiveServerException, org.apache.thrift.TException + { + fetchN_result result = new fetchN_result(); + receiveBase(result, "fetchN"); + if (result.isSetSuccess()) { + return result.success; + } + if (result.ex != null) { + throw result.ex; + } + throw new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.MISSING_RESULT, "fetchN failed: unknown result"); + } + + public List fetchAll() throws HiveServerException, org.apache.thrift.TException + { + send_fetchAll(); + return recv_fetchAll(); + } + + public void send_fetchAll() throws org.apache.thrift.TException + { + fetchAll_args args = new fetchAll_args(); + sendBase("fetchAll", args); + } + + public List recv_fetchAll() throws HiveServerException, org.apache.thrift.TException + { + fetchAll_result result = new fetchAll_result(); + receiveBase(result, "fetchAll"); + if (result.isSetSuccess()) { + return result.success; + } + if (result.ex != null) { + throw result.ex; + } + throw new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.MISSING_RESULT, "fetchAll failed: unknown result"); + } + + public org.apache.hadoop.hive.metastore.api.Schema getSchema() throws HiveServerException, org.apache.thrift.TException + { + send_getSchema(); + return recv_getSchema(); + } + + public void send_getSchema() throws org.apache.thrift.TException + { + getSchema_args args = new getSchema_args(); + sendBase("getSchema", args); + } + + public org.apache.hadoop.hive.metastore.api.Schema recv_getSchema() throws HiveServerException, org.apache.thrift.TException + { + getSchema_result result = new getSchema_result(); + receiveBase(result, "getSchema"); + if (result.isSetSuccess()) { + return result.success; + } + if (result.ex != null) { + throw result.ex; + } + throw new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.MISSING_RESULT, "getSchema failed: unknown result"); + } + + public org.apache.hadoop.hive.metastore.api.Schema getThriftSchema() throws HiveServerException, org.apache.thrift.TException + { + send_getThriftSchema(); + return recv_getThriftSchema(); + } + + public void send_getThriftSchema() throws org.apache.thrift.TException + { + getThriftSchema_args args = new getThriftSchema_args(); + sendBase("getThriftSchema", args); + } + + public org.apache.hadoop.hive.metastore.api.Schema recv_getThriftSchema() throws HiveServerException, org.apache.thrift.TException + { + getThriftSchema_result result = new getThriftSchema_result(); + receiveBase(result, "getThriftSchema"); + if (result.isSetSuccess()) { + return result.success; + } + if (result.ex != null) { + throw result.ex; + } + throw new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.MISSING_RESULT, "getThriftSchema failed: unknown result"); + } + + public HiveClusterStatus getClusterStatus() throws HiveServerException, org.apache.thrift.TException + { + send_getClusterStatus(); + return recv_getClusterStatus(); + } + + public void send_getClusterStatus() throws org.apache.thrift.TException + { + getClusterStatus_args args = new getClusterStatus_args(); + sendBase("getClusterStatus", args); + } + + public HiveClusterStatus recv_getClusterStatus() throws HiveServerException, org.apache.thrift.TException + { + getClusterStatus_result result = new getClusterStatus_result(); + receiveBase(result, "getClusterStatus"); + if (result.isSetSuccess()) { + return result.success; + } + if (result.ex != null) { + throw result.ex; + } + throw new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.MISSING_RESULT, "getClusterStatus failed: unknown result"); + } + + public org.apache.hadoop.hive.ql.plan.api.QueryPlan getQueryPlan() throws HiveServerException, org.apache.thrift.TException + { + send_getQueryPlan(); + return recv_getQueryPlan(); + } + + public void send_getQueryPlan() throws org.apache.thrift.TException + { + getQueryPlan_args args = new getQueryPlan_args(); + sendBase("getQueryPlan", args); + } + + public org.apache.hadoop.hive.ql.plan.api.QueryPlan recv_getQueryPlan() throws HiveServerException, org.apache.thrift.TException + { + getQueryPlan_result result = new getQueryPlan_result(); + receiveBase(result, "getQueryPlan"); + if (result.isSetSuccess()) { + return result.success; + } + if (result.ex != null) { + throw result.ex; + } + throw new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.MISSING_RESULT, "getQueryPlan failed: unknown result"); + } + + public void clean() throws org.apache.thrift.TException + { + send_clean(); + recv_clean(); + } + + public void send_clean() throws org.apache.thrift.TException + { + clean_args args = new clean_args(); + sendBase("clean", args); + } + + public void recv_clean() throws org.apache.thrift.TException + { + clean_result result = new clean_result(); + receiveBase(result, "clean"); + return; + } + + } + public static class AsyncClient extends org.apache.hadoop.hive.metastore.api.ThriftHiveMetastore.AsyncClient implements AsyncIface { + public static class Factory implements org.apache.thrift.async.TAsyncClientFactory { + private org.apache.thrift.async.TAsyncClientManager clientManager; + private org.apache.thrift.protocol.TProtocolFactory protocolFactory; + public Factory(org.apache.thrift.async.TAsyncClientManager clientManager, org.apache.thrift.protocol.TProtocolFactory protocolFactory) { + this.clientManager = clientManager; + this.protocolFactory = protocolFactory; + } + public AsyncClient getAsyncClient(org.apache.thrift.transport.TNonblockingTransport transport) { + return new AsyncClient(protocolFactory, clientManager, transport); + } + } + + public AsyncClient(org.apache.thrift.protocol.TProtocolFactory protocolFactory, org.apache.thrift.async.TAsyncClientManager clientManager, org.apache.thrift.transport.TNonblockingTransport transport) { + super(protocolFactory, clientManager, transport); + } + + public void execute(String query, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException { + checkReady(); + execute_call method_call = new execute_call(query, resultHandler, this, ___protocolFactory, ___transport); + this.___currentMethod = method_call; + ___manager.call(method_call); + } + + public static class execute_call extends org.apache.thrift.async.TAsyncMethodCall { + private String query; + public execute_call(String query, org.apache.thrift.async.AsyncMethodCallback resultHandler, org.apache.thrift.async.TAsyncClient client, org.apache.thrift.protocol.TProtocolFactory protocolFactory, org.apache.thrift.transport.TNonblockingTransport transport) throws org.apache.thrift.TException { + super(client, protocolFactory, transport, resultHandler, false); + this.query = query; + } + + public void write_args(org.apache.thrift.protocol.TProtocol prot) throws org.apache.thrift.TException { + prot.writeMessageBegin(new org.apache.thrift.protocol.TMessage("execute", org.apache.thrift.protocol.TMessageType.CALL, 0)); + execute_args args = new execute_args(); + args.setQuery(query); + args.write(prot); + prot.writeMessageEnd(); + } + + public void getResult() throws HiveServerException, org.apache.thrift.TException { + if (getState() != org.apache.thrift.async.TAsyncMethodCall.State.RESPONSE_READ) { + throw new IllegalStateException("Method call not finished!"); + } + org.apache.thrift.transport.TMemoryInputTransport memoryTransport = new org.apache.thrift.transport.TMemoryInputTransport(getFrameBuffer().array()); + org.apache.thrift.protocol.TProtocol prot = client.getProtocolFactory().getProtocol(memoryTransport); + (new Client(prot)).recv_execute(); + } + } + + public void fetchOne(org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException { + checkReady(); + fetchOne_call method_call = new fetchOne_call(resultHandler, this, ___protocolFactory, ___transport); + this.___currentMethod = method_call; + ___manager.call(method_call); + } + + public static class fetchOne_call extends org.apache.thrift.async.TAsyncMethodCall { + public fetchOne_call(org.apache.thrift.async.AsyncMethodCallback resultHandler, org.apache.thrift.async.TAsyncClient client, org.apache.thrift.protocol.TProtocolFactory protocolFactory, org.apache.thrift.transport.TNonblockingTransport transport) throws org.apache.thrift.TException { + super(client, protocolFactory, transport, resultHandler, false); + } + + public void write_args(org.apache.thrift.protocol.TProtocol prot) throws org.apache.thrift.TException { + prot.writeMessageBegin(new org.apache.thrift.protocol.TMessage("fetchOne", org.apache.thrift.protocol.TMessageType.CALL, 0)); + fetchOne_args args = new fetchOne_args(); + args.write(prot); + prot.writeMessageEnd(); + } + + public String getResult() throws HiveServerException, org.apache.thrift.TException { + if (getState() != org.apache.thrift.async.TAsyncMethodCall.State.RESPONSE_READ) { + throw new IllegalStateException("Method call not finished!"); + } + org.apache.thrift.transport.TMemoryInputTransport memoryTransport = new org.apache.thrift.transport.TMemoryInputTransport(getFrameBuffer().array()); + org.apache.thrift.protocol.TProtocol prot = client.getProtocolFactory().getProtocol(memoryTransport); + return (new Client(prot)).recv_fetchOne(); + } + } + + public void fetchN(int numRows, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException { + checkReady(); + fetchN_call method_call = new fetchN_call(numRows, resultHandler, this, ___protocolFactory, ___transport); + this.___currentMethod = method_call; + ___manager.call(method_call); + } + + public static class fetchN_call extends org.apache.thrift.async.TAsyncMethodCall { + private int numRows; + public fetchN_call(int numRows, org.apache.thrift.async.AsyncMethodCallback resultHandler, org.apache.thrift.async.TAsyncClient client, org.apache.thrift.protocol.TProtocolFactory protocolFactory, org.apache.thrift.transport.TNonblockingTransport transport) throws org.apache.thrift.TException { + super(client, protocolFactory, transport, resultHandler, false); + this.numRows = numRows; + } + + public void write_args(org.apache.thrift.protocol.TProtocol prot) throws org.apache.thrift.TException { + prot.writeMessageBegin(new org.apache.thrift.protocol.TMessage("fetchN", org.apache.thrift.protocol.TMessageType.CALL, 0)); + fetchN_args args = new fetchN_args(); + args.setNumRows(numRows); + args.write(prot); + prot.writeMessageEnd(); + } + + public List getResult() throws HiveServerException, org.apache.thrift.TException { + if (getState() != org.apache.thrift.async.TAsyncMethodCall.State.RESPONSE_READ) { + throw new IllegalStateException("Method call not finished!"); + } + org.apache.thrift.transport.TMemoryInputTransport memoryTransport = new org.apache.thrift.transport.TMemoryInputTransport(getFrameBuffer().array()); + org.apache.thrift.protocol.TProtocol prot = client.getProtocolFactory().getProtocol(memoryTransport); + return (new Client(prot)).recv_fetchN(); + } + } + + public void fetchAll(org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException { + checkReady(); + fetchAll_call method_call = new fetchAll_call(resultHandler, this, ___protocolFactory, ___transport); + this.___currentMethod = method_call; + ___manager.call(method_call); + } + + public static class fetchAll_call extends org.apache.thrift.async.TAsyncMethodCall { + public fetchAll_call(org.apache.thrift.async.AsyncMethodCallback resultHandler, org.apache.thrift.async.TAsyncClient client, org.apache.thrift.protocol.TProtocolFactory protocolFactory, org.apache.thrift.transport.TNonblockingTransport transport) throws org.apache.thrift.TException { + super(client, protocolFactory, transport, resultHandler, false); + } + + public void write_args(org.apache.thrift.protocol.TProtocol prot) throws org.apache.thrift.TException { + prot.writeMessageBegin(new org.apache.thrift.protocol.TMessage("fetchAll", org.apache.thrift.protocol.TMessageType.CALL, 0)); + fetchAll_args args = new fetchAll_args(); + args.write(prot); + prot.writeMessageEnd(); + } + + public List getResult() throws HiveServerException, org.apache.thrift.TException { + if (getState() != org.apache.thrift.async.TAsyncMethodCall.State.RESPONSE_READ) { + throw new IllegalStateException("Method call not finished!"); + } + org.apache.thrift.transport.TMemoryInputTransport memoryTransport = new org.apache.thrift.transport.TMemoryInputTransport(getFrameBuffer().array()); + org.apache.thrift.protocol.TProtocol prot = client.getProtocolFactory().getProtocol(memoryTransport); + return (new Client(prot)).recv_fetchAll(); + } + } + + public void getSchema(org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException { + checkReady(); + getSchema_call method_call = new getSchema_call(resultHandler, this, ___protocolFactory, ___transport); + this.___currentMethod = method_call; + ___manager.call(method_call); + } + + public static class getSchema_call extends org.apache.thrift.async.TAsyncMethodCall { + public getSchema_call(org.apache.thrift.async.AsyncMethodCallback resultHandler, org.apache.thrift.async.TAsyncClient client, org.apache.thrift.protocol.TProtocolFactory protocolFactory, org.apache.thrift.transport.TNonblockingTransport transport) throws org.apache.thrift.TException { + super(client, protocolFactory, transport, resultHandler, false); + } + + public void write_args(org.apache.thrift.protocol.TProtocol prot) throws org.apache.thrift.TException { + prot.writeMessageBegin(new org.apache.thrift.protocol.TMessage("getSchema", org.apache.thrift.protocol.TMessageType.CALL, 0)); + getSchema_args args = new getSchema_args(); + args.write(prot); + prot.writeMessageEnd(); + } + + public org.apache.hadoop.hive.metastore.api.Schema getResult() throws HiveServerException, org.apache.thrift.TException { + if (getState() != org.apache.thrift.async.TAsyncMethodCall.State.RESPONSE_READ) { + throw new IllegalStateException("Method call not finished!"); + } + org.apache.thrift.transport.TMemoryInputTransport memoryTransport = new org.apache.thrift.transport.TMemoryInputTransport(getFrameBuffer().array()); + org.apache.thrift.protocol.TProtocol prot = client.getProtocolFactory().getProtocol(memoryTransport); + return (new Client(prot)).recv_getSchema(); + } + } + + public void getThriftSchema(org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException { + checkReady(); + getThriftSchema_call method_call = new getThriftSchema_call(resultHandler, this, ___protocolFactory, ___transport); + this.___currentMethod = method_call; + ___manager.call(method_call); + } + + public static class getThriftSchema_call extends org.apache.thrift.async.TAsyncMethodCall { + public getThriftSchema_call(org.apache.thrift.async.AsyncMethodCallback resultHandler, org.apache.thrift.async.TAsyncClient client, org.apache.thrift.protocol.TProtocolFactory protocolFactory, org.apache.thrift.transport.TNonblockingTransport transport) throws org.apache.thrift.TException { + super(client, protocolFactory, transport, resultHandler, false); + } + + public void write_args(org.apache.thrift.protocol.TProtocol prot) throws org.apache.thrift.TException { + prot.writeMessageBegin(new org.apache.thrift.protocol.TMessage("getThriftSchema", org.apache.thrift.protocol.TMessageType.CALL, 0)); + getThriftSchema_args args = new getThriftSchema_args(); + args.write(prot); + prot.writeMessageEnd(); + } + + public org.apache.hadoop.hive.metastore.api.Schema getResult() throws HiveServerException, org.apache.thrift.TException { + if (getState() != org.apache.thrift.async.TAsyncMethodCall.State.RESPONSE_READ) { + throw new IllegalStateException("Method call not finished!"); + } + org.apache.thrift.transport.TMemoryInputTransport memoryTransport = new org.apache.thrift.transport.TMemoryInputTransport(getFrameBuffer().array()); + org.apache.thrift.protocol.TProtocol prot = client.getProtocolFactory().getProtocol(memoryTransport); + return (new Client(prot)).recv_getThriftSchema(); + } + } + + public void getClusterStatus(org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException { + checkReady(); + getClusterStatus_call method_call = new getClusterStatus_call(resultHandler, this, ___protocolFactory, ___transport); + this.___currentMethod = method_call; + ___manager.call(method_call); + } + + public static class getClusterStatus_call extends org.apache.thrift.async.TAsyncMethodCall { + public getClusterStatus_call(org.apache.thrift.async.AsyncMethodCallback resultHandler, org.apache.thrift.async.TAsyncClient client, org.apache.thrift.protocol.TProtocolFactory protocolFactory, org.apache.thrift.transport.TNonblockingTransport transport) throws org.apache.thrift.TException { + super(client, protocolFactory, transport, resultHandler, false); + } + + public void write_args(org.apache.thrift.protocol.TProtocol prot) throws org.apache.thrift.TException { + prot.writeMessageBegin(new org.apache.thrift.protocol.TMessage("getClusterStatus", org.apache.thrift.protocol.TMessageType.CALL, 0)); + getClusterStatus_args args = new getClusterStatus_args(); + args.write(prot); + prot.writeMessageEnd(); + } + + public HiveClusterStatus getResult() throws HiveServerException, org.apache.thrift.TException { + if (getState() != org.apache.thrift.async.TAsyncMethodCall.State.RESPONSE_READ) { + throw new IllegalStateException("Method call not finished!"); + } + org.apache.thrift.transport.TMemoryInputTransport memoryTransport = new org.apache.thrift.transport.TMemoryInputTransport(getFrameBuffer().array()); + org.apache.thrift.protocol.TProtocol prot = client.getProtocolFactory().getProtocol(memoryTransport); + return (new Client(prot)).recv_getClusterStatus(); + } + } + + public void getQueryPlan(org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException { + checkReady(); + getQueryPlan_call method_call = new getQueryPlan_call(resultHandler, this, ___protocolFactory, ___transport); + this.___currentMethod = method_call; + ___manager.call(method_call); + } + + public static class getQueryPlan_call extends org.apache.thrift.async.TAsyncMethodCall { + public getQueryPlan_call(org.apache.thrift.async.AsyncMethodCallback resultHandler, org.apache.thrift.async.TAsyncClient client, org.apache.thrift.protocol.TProtocolFactory protocolFactory, org.apache.thrift.transport.TNonblockingTransport transport) throws org.apache.thrift.TException { + super(client, protocolFactory, transport, resultHandler, false); + } + + public void write_args(org.apache.thrift.protocol.TProtocol prot) throws org.apache.thrift.TException { + prot.writeMessageBegin(new org.apache.thrift.protocol.TMessage("getQueryPlan", org.apache.thrift.protocol.TMessageType.CALL, 0)); + getQueryPlan_args args = new getQueryPlan_args(); + args.write(prot); + prot.writeMessageEnd(); + } + + public org.apache.hadoop.hive.ql.plan.api.QueryPlan getResult() throws HiveServerException, org.apache.thrift.TException { + if (getState() != org.apache.thrift.async.TAsyncMethodCall.State.RESPONSE_READ) { + throw new IllegalStateException("Method call not finished!"); + } + org.apache.thrift.transport.TMemoryInputTransport memoryTransport = new org.apache.thrift.transport.TMemoryInputTransport(getFrameBuffer().array()); + org.apache.thrift.protocol.TProtocol prot = client.getProtocolFactory().getProtocol(memoryTransport); + return (new Client(prot)).recv_getQueryPlan(); + } + } + + public void clean(org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException { + checkReady(); + clean_call method_call = new clean_call(resultHandler, this, ___protocolFactory, ___transport); + this.___currentMethod = method_call; + ___manager.call(method_call); + } + + public static class clean_call extends org.apache.thrift.async.TAsyncMethodCall { + public clean_call(org.apache.thrift.async.AsyncMethodCallback resultHandler, org.apache.thrift.async.TAsyncClient client, org.apache.thrift.protocol.TProtocolFactory protocolFactory, org.apache.thrift.transport.TNonblockingTransport transport) throws org.apache.thrift.TException { + super(client, protocolFactory, transport, resultHandler, false); + } + + public void write_args(org.apache.thrift.protocol.TProtocol prot) throws org.apache.thrift.TException { + prot.writeMessageBegin(new org.apache.thrift.protocol.TMessage("clean", org.apache.thrift.protocol.TMessageType.CALL, 0)); + clean_args args = new clean_args(); + args.write(prot); + prot.writeMessageEnd(); + } + + public void getResult() throws org.apache.thrift.TException { + if (getState() != org.apache.thrift.async.TAsyncMethodCall.State.RESPONSE_READ) { + throw new IllegalStateException("Method call not finished!"); + } + org.apache.thrift.transport.TMemoryInputTransport memoryTransport = new org.apache.thrift.transport.TMemoryInputTransport(getFrameBuffer().array()); + org.apache.thrift.protocol.TProtocol prot = client.getProtocolFactory().getProtocol(memoryTransport); + (new Client(prot)).recv_clean(); + } + } + + } + + public static class Processor extends org.apache.hadoop.hive.metastore.api.ThriftHiveMetastore.Processor implements org.apache.thrift.TProcessor { + private static final Logger LOGGER = LoggerFactory.getLogger(Processor.class.getName()); + public Processor(I iface) { + super(iface, getProcessMap(new HashMap>())); + } + + protected Processor(I iface, Map> processMap) { + super(iface, getProcessMap(processMap)); + } + + private static Map> getProcessMap(Map> processMap) { + processMap.put("execute", new execute()); + processMap.put("fetchOne", new fetchOne()); + processMap.put("fetchN", new fetchN()); + processMap.put("fetchAll", new fetchAll()); + processMap.put("getSchema", new getSchema()); + processMap.put("getThriftSchema", new getThriftSchema()); + processMap.put("getClusterStatus", new getClusterStatus()); + processMap.put("getQueryPlan", new getQueryPlan()); + processMap.put("clean", new clean()); + return processMap; + } + + public static class execute extends org.apache.thrift.ProcessFunction { + public execute() { + super("execute"); + } + + public execute_args getEmptyArgsInstance() { + return new execute_args(); + } + + protected boolean isOneway() { + return false; + } + + public execute_result getResult(I iface, execute_args args) throws org.apache.thrift.TException { + execute_result result = new execute_result(); + try { + iface.execute(args.query); + } catch (HiveServerException ex) { + result.ex = ex; + } + return result; + } + } + + public static class fetchOne extends org.apache.thrift.ProcessFunction { + public fetchOne() { + super("fetchOne"); + } + + public fetchOne_args getEmptyArgsInstance() { + return new fetchOne_args(); + } + + protected boolean isOneway() { + return false; + } + + public fetchOne_result getResult(I iface, fetchOne_args args) throws org.apache.thrift.TException { + fetchOne_result result = new fetchOne_result(); + try { + result.success = iface.fetchOne(); + } catch (HiveServerException ex) { + result.ex = ex; + } + return result; + } + } + + public static class fetchN extends org.apache.thrift.ProcessFunction { + public fetchN() { + super("fetchN"); + } + + public fetchN_args getEmptyArgsInstance() { + return new fetchN_args(); + } + + protected boolean isOneway() { + return false; + } + + public fetchN_result getResult(I iface, fetchN_args args) throws org.apache.thrift.TException { + fetchN_result result = new fetchN_result(); + try { + result.success = iface.fetchN(args.numRows); + } catch (HiveServerException ex) { + result.ex = ex; + } + return result; + } + } + + public static class fetchAll extends org.apache.thrift.ProcessFunction { + public fetchAll() { + super("fetchAll"); + } + + public fetchAll_args getEmptyArgsInstance() { + return new fetchAll_args(); + } + + protected boolean isOneway() { + return false; + } + + public fetchAll_result getResult(I iface, fetchAll_args args) throws org.apache.thrift.TException { + fetchAll_result result = new fetchAll_result(); + try { + result.success = iface.fetchAll(); + } catch (HiveServerException ex) { + result.ex = ex; + } + return result; + } + } + + public static class getSchema extends org.apache.thrift.ProcessFunction { + public getSchema() { + super("getSchema"); + } + + public getSchema_args getEmptyArgsInstance() { + return new getSchema_args(); + } + + protected boolean isOneway() { + return false; + } + + public getSchema_result getResult(I iface, getSchema_args args) throws org.apache.thrift.TException { + getSchema_result result = new getSchema_result(); + try { + result.success = iface.getSchema(); + } catch (HiveServerException ex) { + result.ex = ex; + } + return result; + } + } + + public static class getThriftSchema extends org.apache.thrift.ProcessFunction { + public getThriftSchema() { + super("getThriftSchema"); + } + + public getThriftSchema_args getEmptyArgsInstance() { + return new getThriftSchema_args(); + } + + protected boolean isOneway() { + return false; + } + + public getThriftSchema_result getResult(I iface, getThriftSchema_args args) throws org.apache.thrift.TException { + getThriftSchema_result result = new getThriftSchema_result(); + try { + result.success = iface.getThriftSchema(); + } catch (HiveServerException ex) { + result.ex = ex; + } + return result; + } + } + + public static class getClusterStatus extends org.apache.thrift.ProcessFunction { + public getClusterStatus() { + super("getClusterStatus"); + } + + public getClusterStatus_args getEmptyArgsInstance() { + return new getClusterStatus_args(); + } + + protected boolean isOneway() { + return false; + } + + public getClusterStatus_result getResult(I iface, getClusterStatus_args args) throws org.apache.thrift.TException { + getClusterStatus_result result = new getClusterStatus_result(); + try { + result.success = iface.getClusterStatus(); + } catch (HiveServerException ex) { + result.ex = ex; + } + return result; + } + } + + public static class getQueryPlan extends org.apache.thrift.ProcessFunction { + public getQueryPlan() { + super("getQueryPlan"); + } + + public getQueryPlan_args getEmptyArgsInstance() { + return new getQueryPlan_args(); + } + + protected boolean isOneway() { + return false; + } + + public getQueryPlan_result getResult(I iface, getQueryPlan_args args) throws org.apache.thrift.TException { + getQueryPlan_result result = new getQueryPlan_result(); + try { + result.success = iface.getQueryPlan(); + } catch (HiveServerException ex) { + result.ex = ex; + } + return result; + } + } + + public static class clean extends org.apache.thrift.ProcessFunction { + public clean() { + super("clean"); + } + + public clean_args getEmptyArgsInstance() { + return new clean_args(); + } + + protected boolean isOneway() { + return false; + } + + public clean_result getResult(I iface, clean_args args) throws org.apache.thrift.TException { + clean_result result = new clean_result(); + iface.clean(); + return result; + } + } + + } + + public static class AsyncProcessor extends org.apache.hadoop.hive.metastore.api.ThriftHiveMetastore.AsyncProcessor { + private static final Logger LOGGER = LoggerFactory.getLogger(AsyncProcessor.class.getName()); + public AsyncProcessor(I iface) { + super(iface, getProcessMap(new HashMap>())); + } + + protected AsyncProcessor(I iface, Map> processMap) { + super(iface, getProcessMap(processMap)); + } + + private static Map> getProcessMap(Map> processMap) { + processMap.put("execute", new execute()); + processMap.put("fetchOne", new fetchOne()); + processMap.put("fetchN", new fetchN()); + processMap.put("fetchAll", new fetchAll()); + processMap.put("getSchema", new getSchema()); + processMap.put("getThriftSchema", new getThriftSchema()); + processMap.put("getClusterStatus", new getClusterStatus()); + processMap.put("getQueryPlan", new getQueryPlan()); + processMap.put("clean", new clean()); + return processMap; + } + + public static class execute extends org.apache.thrift.AsyncProcessFunction { + public execute() { + super("execute"); + } + + public execute_args getEmptyArgsInstance() { + return new execute_args(); + } + + public AsyncMethodCallback getResultHandler(final AsyncFrameBuffer fb, final int seqid) { + final org.apache.thrift.AsyncProcessFunction fcall = this; + return new AsyncMethodCallback() { + public void onComplete(Void o) { + execute_result result = new execute_result(); + try { + fcall.sendResponse(fb,result, org.apache.thrift.protocol.TMessageType.REPLY,seqid); + return; + } catch (Exception e) { + LOGGER.error("Exception writing to internal frame buffer", e); + } + fb.close(); + } + public void onError(Exception e) { + byte msgType = org.apache.thrift.protocol.TMessageType.REPLY; + org.apache.thrift.TBase msg; + execute_result result = new execute_result(); + if (e instanceof HiveServerException) { + result.ex = (HiveServerException) e; + result.setExIsSet(true); + msg = result; + } + else + { + msgType = org.apache.thrift.protocol.TMessageType.EXCEPTION; + msg = (org.apache.thrift.TBase)new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.INTERNAL_ERROR, e.getMessage()); + } + try { + fcall.sendResponse(fb,msg,msgType,seqid); + return; + } catch (Exception ex) { + LOGGER.error("Exception writing to internal frame buffer", ex); + } + fb.close(); + } + }; + } + + protected boolean isOneway() { + return false; + } + + public void start(I iface, execute_args args, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws TException { + iface.execute(args.query,resultHandler); + } + } + + public static class fetchOne extends org.apache.thrift.AsyncProcessFunction { + public fetchOne() { + super("fetchOne"); + } + + public fetchOne_args getEmptyArgsInstance() { + return new fetchOne_args(); + } + + public AsyncMethodCallback getResultHandler(final AsyncFrameBuffer fb, final int seqid) { + final org.apache.thrift.AsyncProcessFunction fcall = this; + return new AsyncMethodCallback() { + public void onComplete(String o) { + fetchOne_result result = new fetchOne_result(); + result.success = o; + try { + fcall.sendResponse(fb,result, org.apache.thrift.protocol.TMessageType.REPLY,seqid); + return; + } catch (Exception e) { + LOGGER.error("Exception writing to internal frame buffer", e); + } + fb.close(); + } + public void onError(Exception e) { + byte msgType = org.apache.thrift.protocol.TMessageType.REPLY; + org.apache.thrift.TBase msg; + fetchOne_result result = new fetchOne_result(); + if (e instanceof HiveServerException) { + result.ex = (HiveServerException) e; + result.setExIsSet(true); + msg = result; + } + else + { + msgType = org.apache.thrift.protocol.TMessageType.EXCEPTION; + msg = (org.apache.thrift.TBase)new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.INTERNAL_ERROR, e.getMessage()); + } + try { + fcall.sendResponse(fb,msg,msgType,seqid); + return; + } catch (Exception ex) { + LOGGER.error("Exception writing to internal frame buffer", ex); + } + fb.close(); + } + }; + } + + protected boolean isOneway() { + return false; + } + + public void start(I iface, fetchOne_args args, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws TException { + iface.fetchOne(resultHandler); + } + } + + public static class fetchN extends org.apache.thrift.AsyncProcessFunction> { + public fetchN() { + super("fetchN"); + } + + public fetchN_args getEmptyArgsInstance() { + return new fetchN_args(); + } + + public AsyncMethodCallback> getResultHandler(final AsyncFrameBuffer fb, final int seqid) { + final org.apache.thrift.AsyncProcessFunction fcall = this; + return new AsyncMethodCallback>() { + public void onComplete(List o) { + fetchN_result result = new fetchN_result(); + result.success = o; + try { + fcall.sendResponse(fb,result, org.apache.thrift.protocol.TMessageType.REPLY,seqid); + return; + } catch (Exception e) { + LOGGER.error("Exception writing to internal frame buffer", e); + } + fb.close(); + } + public void onError(Exception e) { + byte msgType = org.apache.thrift.protocol.TMessageType.REPLY; + org.apache.thrift.TBase msg; + fetchN_result result = new fetchN_result(); + if (e instanceof HiveServerException) { + result.ex = (HiveServerException) e; + result.setExIsSet(true); + msg = result; + } + else + { + msgType = org.apache.thrift.protocol.TMessageType.EXCEPTION; + msg = (org.apache.thrift.TBase)new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.INTERNAL_ERROR, e.getMessage()); + } + try { + fcall.sendResponse(fb,msg,msgType,seqid); + return; + } catch (Exception ex) { + LOGGER.error("Exception writing to internal frame buffer", ex); + } + fb.close(); + } + }; + } + + protected boolean isOneway() { + return false; + } + + public void start(I iface, fetchN_args args, org.apache.thrift.async.AsyncMethodCallback> resultHandler) throws TException { + iface.fetchN(args.numRows,resultHandler); + } + } + + public static class fetchAll extends org.apache.thrift.AsyncProcessFunction> { + public fetchAll() { + super("fetchAll"); + } + + public fetchAll_args getEmptyArgsInstance() { + return new fetchAll_args(); + } + + public AsyncMethodCallback> getResultHandler(final AsyncFrameBuffer fb, final int seqid) { + final org.apache.thrift.AsyncProcessFunction fcall = this; + return new AsyncMethodCallback>() { + public void onComplete(List o) { + fetchAll_result result = new fetchAll_result(); + result.success = o; + try { + fcall.sendResponse(fb,result, org.apache.thrift.protocol.TMessageType.REPLY,seqid); + return; + } catch (Exception e) { + LOGGER.error("Exception writing to internal frame buffer", e); + } + fb.close(); + } + public void onError(Exception e) { + byte msgType = org.apache.thrift.protocol.TMessageType.REPLY; + org.apache.thrift.TBase msg; + fetchAll_result result = new fetchAll_result(); + if (e instanceof HiveServerException) { + result.ex = (HiveServerException) e; + result.setExIsSet(true); + msg = result; + } + else + { + msgType = org.apache.thrift.protocol.TMessageType.EXCEPTION; + msg = (org.apache.thrift.TBase)new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.INTERNAL_ERROR, e.getMessage()); + } + try { + fcall.sendResponse(fb,msg,msgType,seqid); + return; + } catch (Exception ex) { + LOGGER.error("Exception writing to internal frame buffer", ex); + } + fb.close(); + } + }; + } + + protected boolean isOneway() { + return false; + } + + public void start(I iface, fetchAll_args args, org.apache.thrift.async.AsyncMethodCallback> resultHandler) throws TException { + iface.fetchAll(resultHandler); + } + } + + public static class getSchema extends org.apache.thrift.AsyncProcessFunction { + public getSchema() { + super("getSchema"); + } + + public getSchema_args getEmptyArgsInstance() { + return new getSchema_args(); + } + + public AsyncMethodCallback getResultHandler(final AsyncFrameBuffer fb, final int seqid) { + final org.apache.thrift.AsyncProcessFunction fcall = this; + return new AsyncMethodCallback() { + public void onComplete(org.apache.hadoop.hive.metastore.api.Schema o) { + getSchema_result result = new getSchema_result(); + result.success = o; + try { + fcall.sendResponse(fb,result, org.apache.thrift.protocol.TMessageType.REPLY,seqid); + return; + } catch (Exception e) { + LOGGER.error("Exception writing to internal frame buffer", e); + } + fb.close(); + } + public void onError(Exception e) { + byte msgType = org.apache.thrift.protocol.TMessageType.REPLY; + org.apache.thrift.TBase msg; + getSchema_result result = new getSchema_result(); + if (e instanceof HiveServerException) { + result.ex = (HiveServerException) e; + result.setExIsSet(true); + msg = result; + } + else + { + msgType = org.apache.thrift.protocol.TMessageType.EXCEPTION; + msg = (org.apache.thrift.TBase)new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.INTERNAL_ERROR, e.getMessage()); + } + try { + fcall.sendResponse(fb,msg,msgType,seqid); + return; + } catch (Exception ex) { + LOGGER.error("Exception writing to internal frame buffer", ex); + } + fb.close(); + } + }; + } + + protected boolean isOneway() { + return false; + } + + public void start(I iface, getSchema_args args, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws TException { + iface.getSchema(resultHandler); + } + } + + public static class getThriftSchema extends org.apache.thrift.AsyncProcessFunction { + public getThriftSchema() { + super("getThriftSchema"); + } + + public getThriftSchema_args getEmptyArgsInstance() { + return new getThriftSchema_args(); + } + + public AsyncMethodCallback getResultHandler(final AsyncFrameBuffer fb, final int seqid) { + final org.apache.thrift.AsyncProcessFunction fcall = this; + return new AsyncMethodCallback() { + public void onComplete(org.apache.hadoop.hive.metastore.api.Schema o) { + getThriftSchema_result result = new getThriftSchema_result(); + result.success = o; + try { + fcall.sendResponse(fb,result, org.apache.thrift.protocol.TMessageType.REPLY,seqid); + return; + } catch (Exception e) { + LOGGER.error("Exception writing to internal frame buffer", e); + } + fb.close(); + } + public void onError(Exception e) { + byte msgType = org.apache.thrift.protocol.TMessageType.REPLY; + org.apache.thrift.TBase msg; + getThriftSchema_result result = new getThriftSchema_result(); + if (e instanceof HiveServerException) { + result.ex = (HiveServerException) e; + result.setExIsSet(true); + msg = result; + } + else + { + msgType = org.apache.thrift.protocol.TMessageType.EXCEPTION; + msg = (org.apache.thrift.TBase)new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.INTERNAL_ERROR, e.getMessage()); + } + try { + fcall.sendResponse(fb,msg,msgType,seqid); + return; + } catch (Exception ex) { + LOGGER.error("Exception writing to internal frame buffer", ex); + } + fb.close(); + } + }; + } + + protected boolean isOneway() { + return false; + } + + public void start(I iface, getThriftSchema_args args, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws TException { + iface.getThriftSchema(resultHandler); + } + } + + public static class getClusterStatus extends org.apache.thrift.AsyncProcessFunction { + public getClusterStatus() { + super("getClusterStatus"); + } + + public getClusterStatus_args getEmptyArgsInstance() { + return new getClusterStatus_args(); + } + + public AsyncMethodCallback getResultHandler(final AsyncFrameBuffer fb, final int seqid) { + final org.apache.thrift.AsyncProcessFunction fcall = this; + return new AsyncMethodCallback() { + public void onComplete(HiveClusterStatus o) { + getClusterStatus_result result = new getClusterStatus_result(); + result.success = o; + try { + fcall.sendResponse(fb,result, org.apache.thrift.protocol.TMessageType.REPLY,seqid); + return; + } catch (Exception e) { + LOGGER.error("Exception writing to internal frame buffer", e); + } + fb.close(); + } + public void onError(Exception e) { + byte msgType = org.apache.thrift.protocol.TMessageType.REPLY; + org.apache.thrift.TBase msg; + getClusterStatus_result result = new getClusterStatus_result(); + if (e instanceof HiveServerException) { + result.ex = (HiveServerException) e; + result.setExIsSet(true); + msg = result; + } + else + { + msgType = org.apache.thrift.protocol.TMessageType.EXCEPTION; + msg = (org.apache.thrift.TBase)new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.INTERNAL_ERROR, e.getMessage()); + } + try { + fcall.sendResponse(fb,msg,msgType,seqid); + return; + } catch (Exception ex) { + LOGGER.error("Exception writing to internal frame buffer", ex); + } + fb.close(); + } + }; + } + + protected boolean isOneway() { + return false; + } + + public void start(I iface, getClusterStatus_args args, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws TException { + iface.getClusterStatus(resultHandler); + } + } + + public static class getQueryPlan extends org.apache.thrift.AsyncProcessFunction { + public getQueryPlan() { + super("getQueryPlan"); + } + + public getQueryPlan_args getEmptyArgsInstance() { + return new getQueryPlan_args(); + } + + public AsyncMethodCallback getResultHandler(final AsyncFrameBuffer fb, final int seqid) { + final org.apache.thrift.AsyncProcessFunction fcall = this; + return new AsyncMethodCallback() { + public void onComplete(org.apache.hadoop.hive.ql.plan.api.QueryPlan o) { + getQueryPlan_result result = new getQueryPlan_result(); + result.success = o; + try { + fcall.sendResponse(fb,result, org.apache.thrift.protocol.TMessageType.REPLY,seqid); + return; + } catch (Exception e) { + LOGGER.error("Exception writing to internal frame buffer", e); + } + fb.close(); + } + public void onError(Exception e) { + byte msgType = org.apache.thrift.protocol.TMessageType.REPLY; + org.apache.thrift.TBase msg; + getQueryPlan_result result = new getQueryPlan_result(); + if (e instanceof HiveServerException) { + result.ex = (HiveServerException) e; + result.setExIsSet(true); + msg = result; + } + else + { + msgType = org.apache.thrift.protocol.TMessageType.EXCEPTION; + msg = (org.apache.thrift.TBase)new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.INTERNAL_ERROR, e.getMessage()); + } + try { + fcall.sendResponse(fb,msg,msgType,seqid); + return; + } catch (Exception ex) { + LOGGER.error("Exception writing to internal frame buffer", ex); + } + fb.close(); + } + }; + } + + protected boolean isOneway() { + return false; + } + + public void start(I iface, getQueryPlan_args args, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws TException { + iface.getQueryPlan(resultHandler); + } + } + + public static class clean extends org.apache.thrift.AsyncProcessFunction { + public clean() { + super("clean"); + } + + public clean_args getEmptyArgsInstance() { + return new clean_args(); + } + + public AsyncMethodCallback getResultHandler(final AsyncFrameBuffer fb, final int seqid) { + final org.apache.thrift.AsyncProcessFunction fcall = this; + return new AsyncMethodCallback() { + public void onComplete(Void o) { + clean_result result = new clean_result(); + try { + fcall.sendResponse(fb,result, org.apache.thrift.protocol.TMessageType.REPLY,seqid); + return; + } catch (Exception e) { + LOGGER.error("Exception writing to internal frame buffer", e); + } + fb.close(); + } + public void onError(Exception e) { + byte msgType = org.apache.thrift.protocol.TMessageType.REPLY; + org.apache.thrift.TBase msg; + clean_result result = new clean_result(); + { + msgType = org.apache.thrift.protocol.TMessageType.EXCEPTION; + msg = (org.apache.thrift.TBase)new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.INTERNAL_ERROR, e.getMessage()); + } + try { + fcall.sendResponse(fb,msg,msgType,seqid); + return; + } catch (Exception ex) { + LOGGER.error("Exception writing to internal frame buffer", ex); + } + fb.close(); + } + }; + } + + protected boolean isOneway() { + return false; + } + + public void start(I iface, clean_args args, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws TException { + iface.clean(resultHandler); + } + } + + } + + public static class execute_args implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { + private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("execute_args"); + + private static final org.apache.thrift.protocol.TField QUERY_FIELD_DESC = new org.apache.thrift.protocol.TField("query", org.apache.thrift.protocol.TType.STRING, (short)1); + + private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); + static { + schemes.put(StandardScheme.class, new execute_argsStandardSchemeFactory()); + schemes.put(TupleScheme.class, new execute_argsTupleSchemeFactory()); + } + + private String query; // required + + /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ + public enum _Fields implements org.apache.thrift.TFieldIdEnum { + QUERY((short)1, "query"); + + private static final Map byName = new HashMap(); + + static { + for (_Fields field : EnumSet.allOf(_Fields.class)) { + byName.put(field.getFieldName(), field); + } + } + + /** + * Find the _Fields constant that matches fieldId, or null if its not found. + */ + public static _Fields findByThriftId(int fieldId) { + switch(fieldId) { + case 1: // QUERY + return QUERY; + default: + return null; + } + } + + /** + * Find the _Fields constant that matches fieldId, throwing an exception + * if it is not found. + */ + public static _Fields findByThriftIdOrThrow(int fieldId) { + _Fields fields = findByThriftId(fieldId); + if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!"); + return fields; + } + + /** + * Find the _Fields constant that matches name, or null if its not found. + */ + public static _Fields findByName(String name) { + return byName.get(name); + } + + private final short _thriftId; + private final String _fieldName; + + _Fields(short thriftId, String fieldName) { + _thriftId = thriftId; + _fieldName = fieldName; + } + + public short getThriftFieldId() { + return _thriftId; + } + + public String getFieldName() { + return _fieldName; + } + } + + // isset id assignments + public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; + static { + Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); + tmpMap.put(_Fields.QUERY, new org.apache.thrift.meta_data.FieldMetaData("query", org.apache.thrift.TFieldRequirementType.DEFAULT, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); + metaDataMap = Collections.unmodifiableMap(tmpMap); + org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(execute_args.class, metaDataMap); + } + + public execute_args() { + } + + public execute_args( + String query) + { + this(); + this.query = query; + } + + /** + * Performs a deep copy on other. + */ + public execute_args(execute_args other) { + if (other.isSetQuery()) { + this.query = other.query; + } + } + + public execute_args deepCopy() { + return new execute_args(this); + } + + @Override + public void clear() { + this.query = null; + } + + public String getQuery() { + return this.query; + } + + public void setQuery(String query) { + this.query = query; + } + + public void unsetQuery() { + this.query = null; + } + + /** Returns true if field query is set (has been assigned a value) and false otherwise */ + public boolean isSetQuery() { + return this.query != null; + } + + public void setQueryIsSet(boolean value) { + if (!value) { + this.query = null; + } + } + + public void setFieldValue(_Fields field, Object value) { + switch (field) { + case QUERY: + if (value == null) { + unsetQuery(); + } else { + setQuery((String)value); + } + break; + + } + } + + public Object getFieldValue(_Fields field) { + switch (field) { + case QUERY: + return getQuery(); + + } + throw new IllegalStateException(); + } + + /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */ + public boolean isSet(_Fields field) { + if (field == null) { + throw new IllegalArgumentException(); + } + + switch (field) { + case QUERY: + return isSetQuery(); + } + throw new IllegalStateException(); + } + + @Override + public boolean equals(Object that) { + if (that == null) + return false; + if (that instanceof execute_args) + return this.equals((execute_args)that); + return false; + } + + public boolean equals(execute_args that) { + if (that == null) + return false; + + boolean this_present_query = true && this.isSetQuery(); + boolean that_present_query = true && that.isSetQuery(); + if (this_present_query || that_present_query) { + if (!(this_present_query && that_present_query)) + return false; + if (!this.query.equals(that.query)) + return false; + } + + return true; + } + + @Override + public int hashCode() { + List list = new ArrayList(); + + boolean present_query = true && (isSetQuery()); + list.add(present_query); + if (present_query) + list.add(query); + + return list.hashCode(); + } + + @Override + public int compareTo(execute_args other) { + if (!getClass().equals(other.getClass())) { + return getClass().getName().compareTo(other.getClass().getName()); + } + + int lastComparison = 0; + + lastComparison = Boolean.valueOf(isSetQuery()).compareTo(other.isSetQuery()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetQuery()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.query, other.query); + if (lastComparison != 0) { + return lastComparison; + } + } + return 0; + } + + public _Fields fieldForId(int fieldId) { + return _Fields.findByThriftId(fieldId); + } + + public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException { + schemes.get(iprot.getScheme()).getScheme().read(iprot, this); + } + + public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException { + schemes.get(oprot.getScheme()).getScheme().write(oprot, this); + } + + @Override + public String toString() { + StringBuilder sb = new StringBuilder("execute_args("); + boolean first = true; + + sb.append("query:"); + if (this.query == null) { + sb.append("null"); + } else { + sb.append(this.query); + } + first = false; + sb.append(")"); + return sb.toString(); + } + + public void validate() throws org.apache.thrift.TException { + // check for required fields + // check for sub-struct validity + } + + private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { + try { + write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out))); + } catch (org.apache.thrift.TException te) { + throw new java.io.IOException(te); + } + } + + private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { + try { + read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); + } catch (org.apache.thrift.TException te) { + throw new java.io.IOException(te); + } + } + + private static class execute_argsStandardSchemeFactory implements SchemeFactory { + public execute_argsStandardScheme getScheme() { + return new execute_argsStandardScheme(); + } + } + + private static class execute_argsStandardScheme extends StandardScheme { + + public void read(org.apache.thrift.protocol.TProtocol iprot, execute_args struct) throws org.apache.thrift.TException { + org.apache.thrift.protocol.TField schemeField; + iprot.readStructBegin(); + while (true) + { + schemeField = iprot.readFieldBegin(); + if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { + break; + } + switch (schemeField.id) { + case 1: // QUERY + if (schemeField.type == org.apache.thrift.protocol.TType.STRING) { + struct.query = iprot.readString(); + struct.setQueryIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; + default: + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + iprot.readFieldEnd(); + } + iprot.readStructEnd(); + struct.validate(); + } + + public void write(org.apache.thrift.protocol.TProtocol oprot, execute_args struct) throws org.apache.thrift.TException { + struct.validate(); + + oprot.writeStructBegin(STRUCT_DESC); + if (struct.query != null) { + oprot.writeFieldBegin(QUERY_FIELD_DESC); + oprot.writeString(struct.query); + oprot.writeFieldEnd(); + } + oprot.writeFieldStop(); + oprot.writeStructEnd(); + } + + } + + private static class execute_argsTupleSchemeFactory implements SchemeFactory { + public execute_argsTupleScheme getScheme() { + return new execute_argsTupleScheme(); + } + } + + private static class execute_argsTupleScheme extends TupleScheme { + + @Override + public void write(org.apache.thrift.protocol.TProtocol prot, execute_args struct) throws org.apache.thrift.TException { + TTupleProtocol oprot = (TTupleProtocol) prot; + BitSet optionals = new BitSet(); + if (struct.isSetQuery()) { + optionals.set(0); + } + oprot.writeBitSet(optionals, 1); + if (struct.isSetQuery()) { + oprot.writeString(struct.query); + } + } + + @Override + public void read(org.apache.thrift.protocol.TProtocol prot, execute_args struct) throws org.apache.thrift.TException { + TTupleProtocol iprot = (TTupleProtocol) prot; + BitSet incoming = iprot.readBitSet(1); + if (incoming.get(0)) { + struct.query = iprot.readString(); + struct.setQueryIsSet(true); + } + } + } + + } + + public static class execute_result implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { + private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("execute_result"); + + private static final org.apache.thrift.protocol.TField EX_FIELD_DESC = new org.apache.thrift.protocol.TField("ex", org.apache.thrift.protocol.TType.STRUCT, (short)1); + + private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); + static { + schemes.put(StandardScheme.class, new execute_resultStandardSchemeFactory()); + schemes.put(TupleScheme.class, new execute_resultTupleSchemeFactory()); + } + + private HiveServerException ex; // required + + /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ + public enum _Fields implements org.apache.thrift.TFieldIdEnum { + EX((short)1, "ex"); + + private static final Map byName = new HashMap(); + + static { + for (_Fields field : EnumSet.allOf(_Fields.class)) { + byName.put(field.getFieldName(), field); + } + } + + /** + * Find the _Fields constant that matches fieldId, or null if its not found. + */ + public static _Fields findByThriftId(int fieldId) { + switch(fieldId) { + case 1: // EX + return EX; + default: + return null; + } + } + + /** + * Find the _Fields constant that matches fieldId, throwing an exception + * if it is not found. + */ + public static _Fields findByThriftIdOrThrow(int fieldId) { + _Fields fields = findByThriftId(fieldId); + if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!"); + return fields; + } + + /** + * Find the _Fields constant that matches name, or null if its not found. + */ + public static _Fields findByName(String name) { + return byName.get(name); + } + + private final short _thriftId; + private final String _fieldName; + + _Fields(short thriftId, String fieldName) { + _thriftId = thriftId; + _fieldName = fieldName; + } + + public short getThriftFieldId() { + return _thriftId; + } + + public String getFieldName() { + return _fieldName; + } + } + + // isset id assignments + public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; + static { + Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); + tmpMap.put(_Fields.EX, new org.apache.thrift.meta_data.FieldMetaData("ex", org.apache.thrift.TFieldRequirementType.DEFAULT, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRUCT))); + metaDataMap = Collections.unmodifiableMap(tmpMap); + org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(execute_result.class, metaDataMap); + } + + public execute_result() { + } + + public execute_result( + HiveServerException ex) + { + this(); + this.ex = ex; + } + + /** + * Performs a deep copy on other. + */ + public execute_result(execute_result other) { + if (other.isSetEx()) { + this.ex = new HiveServerException(other.ex); + } + } + + public execute_result deepCopy() { + return new execute_result(this); + } + + @Override + public void clear() { + this.ex = null; + } + + public HiveServerException getEx() { + return this.ex; + } + + public void setEx(HiveServerException ex) { + this.ex = ex; + } + + public void unsetEx() { + this.ex = null; + } + + /** Returns true if field ex is set (has been assigned a value) and false otherwise */ + public boolean isSetEx() { + return this.ex != null; + } + + public void setExIsSet(boolean value) { + if (!value) { + this.ex = null; + } + } + + public void setFieldValue(_Fields field, Object value) { + switch (field) { + case EX: + if (value == null) { + unsetEx(); + } else { + setEx((HiveServerException)value); + } + break; + + } + } + + public Object getFieldValue(_Fields field) { + switch (field) { + case EX: + return getEx(); + + } + throw new IllegalStateException(); + } + + /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */ + public boolean isSet(_Fields field) { + if (field == null) { + throw new IllegalArgumentException(); + } + + switch (field) { + case EX: + return isSetEx(); + } + throw new IllegalStateException(); + } + + @Override + public boolean equals(Object that) { + if (that == null) + return false; + if (that instanceof execute_result) + return this.equals((execute_result)that); + return false; + } + + public boolean equals(execute_result that) { + if (that == null) + return false; + + boolean this_present_ex = true && this.isSetEx(); + boolean that_present_ex = true && that.isSetEx(); + if (this_present_ex || that_present_ex) { + if (!(this_present_ex && that_present_ex)) + return false; + if (!this.ex.equals(that.ex)) + return false; + } + + return true; + } + + @Override + public int hashCode() { + List list = new ArrayList(); + + boolean present_ex = true && (isSetEx()); + list.add(present_ex); + if (present_ex) + list.add(ex); + + return list.hashCode(); + } + + @Override + public int compareTo(execute_result other) { + if (!getClass().equals(other.getClass())) { + return getClass().getName().compareTo(other.getClass().getName()); + } + + int lastComparison = 0; + + lastComparison = Boolean.valueOf(isSetEx()).compareTo(other.isSetEx()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetEx()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.ex, other.ex); + if (lastComparison != 0) { + return lastComparison; + } + } + return 0; + } + + public _Fields fieldForId(int fieldId) { + return _Fields.findByThriftId(fieldId); + } + + public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException { + schemes.get(iprot.getScheme()).getScheme().read(iprot, this); + } + + public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException { + schemes.get(oprot.getScheme()).getScheme().write(oprot, this); + } + + @Override + public String toString() { + StringBuilder sb = new StringBuilder("execute_result("); + boolean first = true; + + sb.append("ex:"); + if (this.ex == null) { + sb.append("null"); + } else { + sb.append(this.ex); + } + first = false; + sb.append(")"); + return sb.toString(); + } + + public void validate() throws org.apache.thrift.TException { + // check for required fields + // check for sub-struct validity + } + + private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { + try { + write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out))); + } catch (org.apache.thrift.TException te) { + throw new java.io.IOException(te); + } + } + + private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { + try { + read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); + } catch (org.apache.thrift.TException te) { + throw new java.io.IOException(te); + } + } + + private static class execute_resultStandardSchemeFactory implements SchemeFactory { + public execute_resultStandardScheme getScheme() { + return new execute_resultStandardScheme(); + } + } + + private static class execute_resultStandardScheme extends StandardScheme { + + public void read(org.apache.thrift.protocol.TProtocol iprot, execute_result struct) throws org.apache.thrift.TException { + org.apache.thrift.protocol.TField schemeField; + iprot.readStructBegin(); + while (true) + { + schemeField = iprot.readFieldBegin(); + if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { + break; + } + switch (schemeField.id) { + case 1: // EX + if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { + struct.ex = new HiveServerException(); + struct.ex.read(iprot); + struct.setExIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; + default: + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + iprot.readFieldEnd(); + } + iprot.readStructEnd(); + struct.validate(); + } + + public void write(org.apache.thrift.protocol.TProtocol oprot, execute_result struct) throws org.apache.thrift.TException { + struct.validate(); + + oprot.writeStructBegin(STRUCT_DESC); + if (struct.ex != null) { + oprot.writeFieldBegin(EX_FIELD_DESC); + struct.ex.write(oprot); + oprot.writeFieldEnd(); + } + oprot.writeFieldStop(); + oprot.writeStructEnd(); + } + + } + + private static class execute_resultTupleSchemeFactory implements SchemeFactory { + public execute_resultTupleScheme getScheme() { + return new execute_resultTupleScheme(); + } + } + + private static class execute_resultTupleScheme extends TupleScheme { + + @Override + public void write(org.apache.thrift.protocol.TProtocol prot, execute_result struct) throws org.apache.thrift.TException { + TTupleProtocol oprot = (TTupleProtocol) prot; + BitSet optionals = new BitSet(); + if (struct.isSetEx()) { + optionals.set(0); + } + oprot.writeBitSet(optionals, 1); + if (struct.isSetEx()) { + struct.ex.write(oprot); + } + } + + @Override + public void read(org.apache.thrift.protocol.TProtocol prot, execute_result struct) throws org.apache.thrift.TException { + TTupleProtocol iprot = (TTupleProtocol) prot; + BitSet incoming = iprot.readBitSet(1); + if (incoming.get(0)) { + struct.ex = new HiveServerException(); + struct.ex.read(iprot); + struct.setExIsSet(true); + } + } + } + + } + + public static class fetchOne_args implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { + private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("fetchOne_args"); + + + private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); + static { + schemes.put(StandardScheme.class, new fetchOne_argsStandardSchemeFactory()); + schemes.put(TupleScheme.class, new fetchOne_argsTupleSchemeFactory()); + } + + + /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ + public enum _Fields implements org.apache.thrift.TFieldIdEnum { +; + + private static final Map byName = new HashMap(); + + static { + for (_Fields field : EnumSet.allOf(_Fields.class)) { + byName.put(field.getFieldName(), field); + } + } + + /** + * Find the _Fields constant that matches fieldId, or null if its not found. + */ + public static _Fields findByThriftId(int fieldId) { + switch(fieldId) { + default: + return null; + } + } + + /** + * Find the _Fields constant that matches fieldId, throwing an exception + * if it is not found. + */ + public static _Fields findByThriftIdOrThrow(int fieldId) { + _Fields fields = findByThriftId(fieldId); + if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!"); + return fields; + } + + /** + * Find the _Fields constant that matches name, or null if its not found. + */ + public static _Fields findByName(String name) { + return byName.get(name); + } + + private final short _thriftId; + private final String _fieldName; + + _Fields(short thriftId, String fieldName) { + _thriftId = thriftId; + _fieldName = fieldName; + } + + public short getThriftFieldId() { + return _thriftId; + } + + public String getFieldName() { + return _fieldName; + } + } + public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; + static { + Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); + metaDataMap = Collections.unmodifiableMap(tmpMap); + org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(fetchOne_args.class, metaDataMap); + } + + public fetchOne_args() { + } + + /** + * Performs a deep copy on other. + */ + public fetchOne_args(fetchOne_args other) { + } + + public fetchOne_args deepCopy() { + return new fetchOne_args(this); + } + + @Override + public void clear() { + } + + public void setFieldValue(_Fields field, Object value) { + switch (field) { + } + } + + public Object getFieldValue(_Fields field) { + switch (field) { + } + throw new IllegalStateException(); + } + + /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */ + public boolean isSet(_Fields field) { + if (field == null) { + throw new IllegalArgumentException(); + } + + switch (field) { + } + throw new IllegalStateException(); + } + + @Override + public boolean equals(Object that) { + if (that == null) + return false; + if (that instanceof fetchOne_args) + return this.equals((fetchOne_args)that); + return false; + } + + public boolean equals(fetchOne_args that) { + if (that == null) + return false; + + return true; + } + + @Override + public int hashCode() { + List list = new ArrayList(); + + return list.hashCode(); + } + + @Override + public int compareTo(fetchOne_args other) { + if (!getClass().equals(other.getClass())) { + return getClass().getName().compareTo(other.getClass().getName()); + } + + int lastComparison = 0; + + return 0; + } + + public _Fields fieldForId(int fieldId) { + return _Fields.findByThriftId(fieldId); + } + + public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException { + schemes.get(iprot.getScheme()).getScheme().read(iprot, this); + } + + public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException { + schemes.get(oprot.getScheme()).getScheme().write(oprot, this); + } + + @Override + public String toString() { + StringBuilder sb = new StringBuilder("fetchOne_args("); + boolean first = true; + + sb.append(")"); + return sb.toString(); + } + + public void validate() throws org.apache.thrift.TException { + // check for required fields + // check for sub-struct validity + } + + private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { + try { + write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out))); + } catch (org.apache.thrift.TException te) { + throw new java.io.IOException(te); + } + } + + private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { + try { + read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); + } catch (org.apache.thrift.TException te) { + throw new java.io.IOException(te); + } + } + + private static class fetchOne_argsStandardSchemeFactory implements SchemeFactory { + public fetchOne_argsStandardScheme getScheme() { + return new fetchOne_argsStandardScheme(); + } + } + + private static class fetchOne_argsStandardScheme extends StandardScheme { + + public void read(org.apache.thrift.protocol.TProtocol iprot, fetchOne_args struct) throws org.apache.thrift.TException { + org.apache.thrift.protocol.TField schemeField; + iprot.readStructBegin(); + while (true) + { + schemeField = iprot.readFieldBegin(); + if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { + break; + } + switch (schemeField.id) { + default: + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + iprot.readFieldEnd(); + } + iprot.readStructEnd(); + struct.validate(); + } + + public void write(org.apache.thrift.protocol.TProtocol oprot, fetchOne_args struct) throws org.apache.thrift.TException { + struct.validate(); + + oprot.writeStructBegin(STRUCT_DESC); + oprot.writeFieldStop(); + oprot.writeStructEnd(); + } + + } + + private static class fetchOne_argsTupleSchemeFactory implements SchemeFactory { + public fetchOne_argsTupleScheme getScheme() { + return new fetchOne_argsTupleScheme(); + } + } + + private static class fetchOne_argsTupleScheme extends TupleScheme { + + @Override + public void write(org.apache.thrift.protocol.TProtocol prot, fetchOne_args struct) throws org.apache.thrift.TException { + TTupleProtocol oprot = (TTupleProtocol) prot; + } + + @Override + public void read(org.apache.thrift.protocol.TProtocol prot, fetchOne_args struct) throws org.apache.thrift.TException { + TTupleProtocol iprot = (TTupleProtocol) prot; + } + } + + } + + public static class fetchOne_result implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { + private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("fetchOne_result"); + + private static final org.apache.thrift.protocol.TField SUCCESS_FIELD_DESC = new org.apache.thrift.protocol.TField("success", org.apache.thrift.protocol.TType.STRING, (short)0); + private static final org.apache.thrift.protocol.TField EX_FIELD_DESC = new org.apache.thrift.protocol.TField("ex", org.apache.thrift.protocol.TType.STRUCT, (short)1); + + private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); + static { + schemes.put(StandardScheme.class, new fetchOne_resultStandardSchemeFactory()); + schemes.put(TupleScheme.class, new fetchOne_resultTupleSchemeFactory()); + } + + private String success; // required + private HiveServerException ex; // required + + /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ + public enum _Fields implements org.apache.thrift.TFieldIdEnum { + SUCCESS((short)0, "success"), + EX((short)1, "ex"); + + private static final Map byName = new HashMap(); + + static { + for (_Fields field : EnumSet.allOf(_Fields.class)) { + byName.put(field.getFieldName(), field); + } + } + + /** + * Find the _Fields constant that matches fieldId, or null if its not found. + */ + public static _Fields findByThriftId(int fieldId) { + switch(fieldId) { + case 0: // SUCCESS + return SUCCESS; + case 1: // EX + return EX; + default: + return null; + } + } + + /** + * Find the _Fields constant that matches fieldId, throwing an exception + * if it is not found. + */ + public static _Fields findByThriftIdOrThrow(int fieldId) { + _Fields fields = findByThriftId(fieldId); + if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!"); + return fields; + } + + /** + * Find the _Fields constant that matches name, or null if its not found. + */ + public static _Fields findByName(String name) { + return byName.get(name); + } + + private final short _thriftId; + private final String _fieldName; + + _Fields(short thriftId, String fieldName) { + _thriftId = thriftId; + _fieldName = fieldName; + } + + public short getThriftFieldId() { + return _thriftId; + } + + public String getFieldName() { + return _fieldName; + } + } + + // isset id assignments + public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; + static { + Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); + tmpMap.put(_Fields.SUCCESS, new org.apache.thrift.meta_data.FieldMetaData("success", org.apache.thrift.TFieldRequirementType.DEFAULT, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING))); + tmpMap.put(_Fields.EX, new org.apache.thrift.meta_data.FieldMetaData("ex", org.apache.thrift.TFieldRequirementType.DEFAULT, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRUCT))); + metaDataMap = Collections.unmodifiableMap(tmpMap); + org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(fetchOne_result.class, metaDataMap); + } + + public fetchOne_result() { + } + + public fetchOne_result( + String success, + HiveServerException ex) + { + this(); + this.success = success; + this.ex = ex; + } + + /** + * Performs a deep copy on other. + */ + public fetchOne_result(fetchOne_result other) { + if (other.isSetSuccess()) { + this.success = other.success; + } + if (other.isSetEx()) { + this.ex = new HiveServerException(other.ex); + } + } + + public fetchOne_result deepCopy() { + return new fetchOne_result(this); + } + + @Override + public void clear() { + this.success = null; + this.ex = null; + } + + public String getSuccess() { + return this.success; + } + + public void setSuccess(String success) { + this.success = success; + } + + public void unsetSuccess() { + this.success = null; + } + + /** Returns true if field success is set (has been assigned a value) and false otherwise */ + public boolean isSetSuccess() { + return this.success != null; + } + + public void setSuccessIsSet(boolean value) { + if (!value) { + this.success = null; + } + } + + public HiveServerException getEx() { + return this.ex; + } + + public void setEx(HiveServerException ex) { + this.ex = ex; + } + + public void unsetEx() { + this.ex = null; + } + + /** Returns true if field ex is set (has been assigned a value) and false otherwise */ + public boolean isSetEx() { + return this.ex != null; + } + + public void setExIsSet(boolean value) { + if (!value) { + this.ex = null; + } + } + + public void setFieldValue(_Fields field, Object value) { + switch (field) { + case SUCCESS: + if (value == null) { + unsetSuccess(); + } else { + setSuccess((String)value); + } + break; + + case EX: + if (value == null) { + unsetEx(); + } else { + setEx((HiveServerException)value); + } + break; + + } + } + + public Object getFieldValue(_Fields field) { + switch (field) { + case SUCCESS: + return getSuccess(); + + case EX: + return getEx(); + + } + throw new IllegalStateException(); + } + + /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */ + public boolean isSet(_Fields field) { + if (field == null) { + throw new IllegalArgumentException(); + } + + switch (field) { + case SUCCESS: + return isSetSuccess(); + case EX: + return isSetEx(); + } + throw new IllegalStateException(); + } + + @Override + public boolean equals(Object that) { + if (that == null) + return false; + if (that instanceof fetchOne_result) + return this.equals((fetchOne_result)that); + return false; + } + + public boolean equals(fetchOne_result that) { + if (that == null) + return false; + + boolean this_present_success = true && this.isSetSuccess(); + boolean that_present_success = true && that.isSetSuccess(); + if (this_present_success || that_present_success) { + if (!(this_present_success && that_present_success)) + return false; + if (!this.success.equals(that.success)) + return false; + } + + boolean this_present_ex = true && this.isSetEx(); + boolean that_present_ex = true && that.isSetEx(); + if (this_present_ex || that_present_ex) { + if (!(this_present_ex && that_present_ex)) + return false; + if (!this.ex.equals(that.ex)) + return false; + } + + return true; + } + + @Override + public int hashCode() { + List list = new ArrayList(); + + boolean present_success = true && (isSetSuccess()); + list.add(present_success); + if (present_success) + list.add(success); + + boolean present_ex = true && (isSetEx()); + list.add(present_ex); + if (present_ex) + list.add(ex); + + return list.hashCode(); + } + + @Override + public int compareTo(fetchOne_result other) { + if (!getClass().equals(other.getClass())) { + return getClass().getName().compareTo(other.getClass().getName()); + } + + int lastComparison = 0; + + lastComparison = Boolean.valueOf(isSetSuccess()).compareTo(other.isSetSuccess()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetSuccess()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.success, other.success); + if (lastComparison != 0) { + return lastComparison; + } + } + lastComparison = Boolean.valueOf(isSetEx()).compareTo(other.isSetEx()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetEx()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.ex, other.ex); + if (lastComparison != 0) { + return lastComparison; + } + } + return 0; + } + + public _Fields fieldForId(int fieldId) { + return _Fields.findByThriftId(fieldId); + } + + public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException { + schemes.get(iprot.getScheme()).getScheme().read(iprot, this); + } + + public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException { + schemes.get(oprot.getScheme()).getScheme().write(oprot, this); + } + + @Override + public String toString() { + StringBuilder sb = new StringBuilder("fetchOne_result("); + boolean first = true; + + sb.append("success:"); + if (this.success == null) { + sb.append("null"); + } else { + sb.append(this.success); + } + first = false; + if (!first) sb.append(", "); + sb.append("ex:"); + if (this.ex == null) { + sb.append("null"); + } else { + sb.append(this.ex); + } + first = false; + sb.append(")"); + return sb.toString(); + } + + public void validate() throws org.apache.thrift.TException { + // check for required fields + // check for sub-struct validity + } + + private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { + try { + write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out))); + } catch (org.apache.thrift.TException te) { + throw new java.io.IOException(te); + } + } + + private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { + try { + read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); + } catch (org.apache.thrift.TException te) { + throw new java.io.IOException(te); + } + } + + private static class fetchOne_resultStandardSchemeFactory implements SchemeFactory { + public fetchOne_resultStandardScheme getScheme() { + return new fetchOne_resultStandardScheme(); + } + } + + private static class fetchOne_resultStandardScheme extends StandardScheme { + + public void read(org.apache.thrift.protocol.TProtocol iprot, fetchOne_result struct) throws org.apache.thrift.TException { + org.apache.thrift.protocol.TField schemeField; + iprot.readStructBegin(); + while (true) + { + schemeField = iprot.readFieldBegin(); + if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { + break; + } + switch (schemeField.id) { + case 0: // SUCCESS + if (schemeField.type == org.apache.thrift.protocol.TType.STRING) { + struct.success = iprot.readString(); + struct.setSuccessIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; + case 1: // EX + if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { + struct.ex = new HiveServerException(); + struct.ex.read(iprot); + struct.setExIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; + default: + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + iprot.readFieldEnd(); + } + iprot.readStructEnd(); + struct.validate(); + } + + public void write(org.apache.thrift.protocol.TProtocol oprot, fetchOne_result struct) throws org.apache.thrift.TException { + struct.validate(); + + oprot.writeStructBegin(STRUCT_DESC); + if (struct.success != null) { + oprot.writeFieldBegin(SUCCESS_FIELD_DESC); + oprot.writeString(struct.success); + oprot.writeFieldEnd(); + } + if (struct.ex != null) { + oprot.writeFieldBegin(EX_FIELD_DESC); + struct.ex.write(oprot); + oprot.writeFieldEnd(); + } + oprot.writeFieldStop(); + oprot.writeStructEnd(); + } + + } + + private static class fetchOne_resultTupleSchemeFactory implements SchemeFactory { + public fetchOne_resultTupleScheme getScheme() { + return new fetchOne_resultTupleScheme(); + } + } + + private static class fetchOne_resultTupleScheme extends TupleScheme { + + @Override + public void write(org.apache.thrift.protocol.TProtocol prot, fetchOne_result struct) throws org.apache.thrift.TException { + TTupleProtocol oprot = (TTupleProtocol) prot; + BitSet optionals = new BitSet(); + if (struct.isSetSuccess()) { + optionals.set(0); + } + if (struct.isSetEx()) { + optionals.set(1); + } + oprot.writeBitSet(optionals, 2); + if (struct.isSetSuccess()) { + oprot.writeString(struct.success); + } + if (struct.isSetEx()) { + struct.ex.write(oprot); + } + } + + @Override + public void read(org.apache.thrift.protocol.TProtocol prot, fetchOne_result struct) throws org.apache.thrift.TException { + TTupleProtocol iprot = (TTupleProtocol) prot; + BitSet incoming = iprot.readBitSet(2); + if (incoming.get(0)) { + struct.success = iprot.readString(); + struct.setSuccessIsSet(true); + } + if (incoming.get(1)) { + struct.ex = new HiveServerException(); + struct.ex.read(iprot); + struct.setExIsSet(true); + } + } + } + + } + + public static class fetchN_args implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { + private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("fetchN_args"); + + private static final org.apache.thrift.protocol.TField NUM_ROWS_FIELD_DESC = new org.apache.thrift.protocol.TField("numRows", org.apache.thrift.protocol.TType.I32, (short)1); + + private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); + static { + schemes.put(StandardScheme.class, new fetchN_argsStandardSchemeFactory()); + schemes.put(TupleScheme.class, new fetchN_argsTupleSchemeFactory()); + } + + private int numRows; // required + + /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ + public enum _Fields implements org.apache.thrift.TFieldIdEnum { + NUM_ROWS((short)1, "numRows"); + + private static final Map byName = new HashMap(); + + static { + for (_Fields field : EnumSet.allOf(_Fields.class)) { + byName.put(field.getFieldName(), field); + } + } + + /** + * Find the _Fields constant that matches fieldId, or null if its not found. + */ + public static _Fields findByThriftId(int fieldId) { + switch(fieldId) { + case 1: // NUM_ROWS + return NUM_ROWS; + default: + return null; + } + } + + /** + * Find the _Fields constant that matches fieldId, throwing an exception + * if it is not found. + */ + public static _Fields findByThriftIdOrThrow(int fieldId) { + _Fields fields = findByThriftId(fieldId); + if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!"); + return fields; + } + + /** + * Find the _Fields constant that matches name, or null if its not found. + */ + public static _Fields findByName(String name) { + return byName.get(name); + } + + private final short _thriftId; + private final String _fieldName; + + _Fields(short thriftId, String fieldName) { + _thriftId = thriftId; + _fieldName = fieldName; + } + + public short getThriftFieldId() { + return _thriftId; + } + + public String getFieldName() { + return _fieldName; + } + } + + // isset id assignments + private static final int __NUMROWS_ISSET_ID = 0; + private byte __isset_bitfield = 0; + public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; + static { + Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); + tmpMap.put(_Fields.NUM_ROWS, new org.apache.thrift.meta_data.FieldMetaData("numRows", org.apache.thrift.TFieldRequirementType.DEFAULT, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.I32))); + metaDataMap = Collections.unmodifiableMap(tmpMap); + org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(fetchN_args.class, metaDataMap); + } + + public fetchN_args() { + } + + public fetchN_args( + int numRows) + { + this(); + this.numRows = numRows; + setNumRowsIsSet(true); + } + + /** + * Performs a deep copy on other. + */ + public fetchN_args(fetchN_args other) { + __isset_bitfield = other.__isset_bitfield; + this.numRows = other.numRows; + } + + public fetchN_args deepCopy() { + return new fetchN_args(this); + } + + @Override + public void clear() { + setNumRowsIsSet(false); + this.numRows = 0; + } + + public int getNumRows() { + return this.numRows; + } + + public void setNumRows(int numRows) { + this.numRows = numRows; + setNumRowsIsSet(true); + } + + public void unsetNumRows() { + __isset_bitfield = EncodingUtils.clearBit(__isset_bitfield, __NUMROWS_ISSET_ID); + } + + /** Returns true if field numRows is set (has been assigned a value) and false otherwise */ + public boolean isSetNumRows() { + return EncodingUtils.testBit(__isset_bitfield, __NUMROWS_ISSET_ID); + } + + public void setNumRowsIsSet(boolean value) { + __isset_bitfield = EncodingUtils.setBit(__isset_bitfield, __NUMROWS_ISSET_ID, value); + } + + public void setFieldValue(_Fields field, Object value) { + switch (field) { + case NUM_ROWS: + if (value == null) { + unsetNumRows(); + } else { + setNumRows((Integer)value); + } + break; + + } + } + + public Object getFieldValue(_Fields field) { + switch (field) { + case NUM_ROWS: + return getNumRows(); + + } + throw new IllegalStateException(); + } + + /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */ + public boolean isSet(_Fields field) { + if (field == null) { + throw new IllegalArgumentException(); + } + + switch (field) { + case NUM_ROWS: + return isSetNumRows(); + } + throw new IllegalStateException(); + } + + @Override + public boolean equals(Object that) { + if (that == null) + return false; + if (that instanceof fetchN_args) + return this.equals((fetchN_args)that); + return false; + } + + public boolean equals(fetchN_args that) { + if (that == null) + return false; + + boolean this_present_numRows = true; + boolean that_present_numRows = true; + if (this_present_numRows || that_present_numRows) { + if (!(this_present_numRows && that_present_numRows)) + return false; + if (this.numRows != that.numRows) + return false; + } + + return true; + } + + @Override + public int hashCode() { + List list = new ArrayList(); + + boolean present_numRows = true; + list.add(present_numRows); + if (present_numRows) + list.add(numRows); + + return list.hashCode(); + } + + @Override + public int compareTo(fetchN_args other) { + if (!getClass().equals(other.getClass())) { + return getClass().getName().compareTo(other.getClass().getName()); + } + + int lastComparison = 0; + + lastComparison = Boolean.valueOf(isSetNumRows()).compareTo(other.isSetNumRows()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetNumRows()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.numRows, other.numRows); + if (lastComparison != 0) { + return lastComparison; + } + } + return 0; + } + + public _Fields fieldForId(int fieldId) { + return _Fields.findByThriftId(fieldId); + } + + public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException { + schemes.get(iprot.getScheme()).getScheme().read(iprot, this); + } + + public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException { + schemes.get(oprot.getScheme()).getScheme().write(oprot, this); + } + + @Override + public String toString() { + StringBuilder sb = new StringBuilder("fetchN_args("); + boolean first = true; + + sb.append("numRows:"); + sb.append(this.numRows); + first = false; + sb.append(")"); + return sb.toString(); + } + + public void validate() throws org.apache.thrift.TException { + // check for required fields + // check for sub-struct validity + } + + private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { + try { + write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out))); + } catch (org.apache.thrift.TException te) { + throw new java.io.IOException(te); + } + } + + private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { + try { + // it doesn't seem like you should have to do this, but java serialization is wacky, and doesn't call the default constructor. + __isset_bitfield = 0; + read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); + } catch (org.apache.thrift.TException te) { + throw new java.io.IOException(te); + } + } + + private static class fetchN_argsStandardSchemeFactory implements SchemeFactory { + public fetchN_argsStandardScheme getScheme() { + return new fetchN_argsStandardScheme(); + } + } + + private static class fetchN_argsStandardScheme extends StandardScheme { + + public void read(org.apache.thrift.protocol.TProtocol iprot, fetchN_args struct) throws org.apache.thrift.TException { + org.apache.thrift.protocol.TField schemeField; + iprot.readStructBegin(); + while (true) + { + schemeField = iprot.readFieldBegin(); + if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { + break; + } + switch (schemeField.id) { + case 1: // NUM_ROWS + if (schemeField.type == org.apache.thrift.protocol.TType.I32) { + struct.numRows = iprot.readI32(); + struct.setNumRowsIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; + default: + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + iprot.readFieldEnd(); + } + iprot.readStructEnd(); + struct.validate(); + } + + public void write(org.apache.thrift.protocol.TProtocol oprot, fetchN_args struct) throws org.apache.thrift.TException { + struct.validate(); + + oprot.writeStructBegin(STRUCT_DESC); + oprot.writeFieldBegin(NUM_ROWS_FIELD_DESC); + oprot.writeI32(struct.numRows); + oprot.writeFieldEnd(); + oprot.writeFieldStop(); + oprot.writeStructEnd(); + } + + } + + private static class fetchN_argsTupleSchemeFactory implements SchemeFactory { + public fetchN_argsTupleScheme getScheme() { + return new fetchN_argsTupleScheme(); + } + } + + private static class fetchN_argsTupleScheme extends TupleScheme { + + @Override + public void write(org.apache.thrift.protocol.TProtocol prot, fetchN_args struct) throws org.apache.thrift.TException { + TTupleProtocol oprot = (TTupleProtocol) prot; + BitSet optionals = new BitSet(); + if (struct.isSetNumRows()) { + optionals.set(0); + } + oprot.writeBitSet(optionals, 1); + if (struct.isSetNumRows()) { + oprot.writeI32(struct.numRows); + } + } + + @Override + public void read(org.apache.thrift.protocol.TProtocol prot, fetchN_args struct) throws org.apache.thrift.TException { + TTupleProtocol iprot = (TTupleProtocol) prot; + BitSet incoming = iprot.readBitSet(1); + if (incoming.get(0)) { + struct.numRows = iprot.readI32(); + struct.setNumRowsIsSet(true); + } + } + } + + } + + public static class fetchN_result implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { + private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("fetchN_result"); + + private static final org.apache.thrift.protocol.TField SUCCESS_FIELD_DESC = new org.apache.thrift.protocol.TField("success", org.apache.thrift.protocol.TType.LIST, (short)0); + private static final org.apache.thrift.protocol.TField EX_FIELD_DESC = new org.apache.thrift.protocol.TField("ex", org.apache.thrift.protocol.TType.STRUCT, (short)1); + + private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); + static { + schemes.put(StandardScheme.class, new fetchN_resultStandardSchemeFactory()); + schemes.put(TupleScheme.class, new fetchN_resultTupleSchemeFactory()); + } + + private List success; // required + private HiveServerException ex; // required + + /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ + public enum _Fields implements org.apache.thrift.TFieldIdEnum { + SUCCESS((short)0, "success"), + EX((short)1, "ex"); + + private static final Map byName = new HashMap(); + + static { + for (_Fields field : EnumSet.allOf(_Fields.class)) { + byName.put(field.getFieldName(), field); + } + } + + /** + * Find the _Fields constant that matches fieldId, or null if its not found. + */ + public static _Fields findByThriftId(int fieldId) { + switch(fieldId) { + case 0: // SUCCESS + return SUCCESS; + case 1: // EX + return EX; + default: + return null; + } + } + + /** + * Find the _Fields constant that matches fieldId, throwing an exception + * if it is not found. + */ + public static _Fields findByThriftIdOrThrow(int fieldId) { + _Fields fields = findByThriftId(fieldId); + if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!"); + return fields; + } + + /** + * Find the _Fields constant that matches name, or null if its not found. + */ + public static _Fields findByName(String name) { + return byName.get(name); + } + + private final short _thriftId; + private final String _fieldName; + + _Fields(short thriftId, String fieldName) { + _thriftId = thriftId; + _fieldName = fieldName; + } + + public short getThriftFieldId() { + return _thriftId; + } + + public String getFieldName() { + return _fieldName; + } + } + + // isset id assignments + public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; + static { + Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); + tmpMap.put(_Fields.SUCCESS, new org.apache.thrift.meta_data.FieldMetaData("success", org.apache.thrift.TFieldRequirementType.DEFAULT, + new org.apache.thrift.meta_data.ListMetaData(org.apache.thrift.protocol.TType.LIST, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING)))); + tmpMap.put(_Fields.EX, new org.apache.thrift.meta_data.FieldMetaData("ex", org.apache.thrift.TFieldRequirementType.DEFAULT, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRUCT))); + metaDataMap = Collections.unmodifiableMap(tmpMap); + org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(fetchN_result.class, metaDataMap); + } + + public fetchN_result() { + } + + public fetchN_result( + List success, + HiveServerException ex) + { + this(); + this.success = success; + this.ex = ex; + } + + /** + * Performs a deep copy on other. + */ + public fetchN_result(fetchN_result other) { + if (other.isSetSuccess()) { + List __this__success = new ArrayList(other.success); + this.success = __this__success; + } + if (other.isSetEx()) { + this.ex = new HiveServerException(other.ex); + } + } + + public fetchN_result deepCopy() { + return new fetchN_result(this); + } + + @Override + public void clear() { + this.success = null; + this.ex = null; + } + + public int getSuccessSize() { + return (this.success == null) ? 0 : this.success.size(); + } + + public java.util.Iterator getSuccessIterator() { + return (this.success == null) ? null : this.success.iterator(); + } + + public void addToSuccess(String elem) { + if (this.success == null) { + this.success = new ArrayList(); + } + this.success.add(elem); + } + + public List getSuccess() { + return this.success; + } + + public void setSuccess(List success) { + this.success = success; + } + + public void unsetSuccess() { + this.success = null; + } + + /** Returns true if field success is set (has been assigned a value) and false otherwise */ + public boolean isSetSuccess() { + return this.success != null; + } + + public void setSuccessIsSet(boolean value) { + if (!value) { + this.success = null; + } + } + + public HiveServerException getEx() { + return this.ex; + } + + public void setEx(HiveServerException ex) { + this.ex = ex; + } + + public void unsetEx() { + this.ex = null; + } + + /** Returns true if field ex is set (has been assigned a value) and false otherwise */ + public boolean isSetEx() { + return this.ex != null; + } + + public void setExIsSet(boolean value) { + if (!value) { + this.ex = null; + } + } + + public void setFieldValue(_Fields field, Object value) { + switch (field) { + case SUCCESS: + if (value == null) { + unsetSuccess(); + } else { + setSuccess((List)value); + } + break; + + case EX: + if (value == null) { + unsetEx(); + } else { + setEx((HiveServerException)value); + } + break; + + } + } + + public Object getFieldValue(_Fields field) { + switch (field) { + case SUCCESS: + return getSuccess(); + + case EX: + return getEx(); + + } + throw new IllegalStateException(); + } + + /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */ + public boolean isSet(_Fields field) { + if (field == null) { + throw new IllegalArgumentException(); + } + + switch (field) { + case SUCCESS: + return isSetSuccess(); + case EX: + return isSetEx(); + } + throw new IllegalStateException(); + } + + @Override + public boolean equals(Object that) { + if (that == null) + return false; + if (that instanceof fetchN_result) + return this.equals((fetchN_result)that); + return false; + } + + public boolean equals(fetchN_result that) { + if (that == null) + return false; + + boolean this_present_success = true && this.isSetSuccess(); + boolean that_present_success = true && that.isSetSuccess(); + if (this_present_success || that_present_success) { + if (!(this_present_success && that_present_success)) + return false; + if (!this.success.equals(that.success)) + return false; + } + + boolean this_present_ex = true && this.isSetEx(); + boolean that_present_ex = true && that.isSetEx(); + if (this_present_ex || that_present_ex) { + if (!(this_present_ex && that_present_ex)) + return false; + if (!this.ex.equals(that.ex)) + return false; + } + + return true; + } + + @Override + public int hashCode() { + List list = new ArrayList(); + + boolean present_success = true && (isSetSuccess()); + list.add(present_success); + if (present_success) + list.add(success); + + boolean present_ex = true && (isSetEx()); + list.add(present_ex); + if (present_ex) + list.add(ex); + + return list.hashCode(); + } + + @Override + public int compareTo(fetchN_result other) { + if (!getClass().equals(other.getClass())) { + return getClass().getName().compareTo(other.getClass().getName()); + } + + int lastComparison = 0; + + lastComparison = Boolean.valueOf(isSetSuccess()).compareTo(other.isSetSuccess()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetSuccess()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.success, other.success); + if (lastComparison != 0) { + return lastComparison; + } + } + lastComparison = Boolean.valueOf(isSetEx()).compareTo(other.isSetEx()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetEx()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.ex, other.ex); + if (lastComparison != 0) { + return lastComparison; + } + } + return 0; + } + + public _Fields fieldForId(int fieldId) { + return _Fields.findByThriftId(fieldId); + } + + public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException { + schemes.get(iprot.getScheme()).getScheme().read(iprot, this); + } + + public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException { + schemes.get(oprot.getScheme()).getScheme().write(oprot, this); + } + + @Override + public String toString() { + StringBuilder sb = new StringBuilder("fetchN_result("); + boolean first = true; + + sb.append("success:"); + if (this.success == null) { + sb.append("null"); + } else { + sb.append(this.success); + } + first = false; + if (!first) sb.append(", "); + sb.append("ex:"); + if (this.ex == null) { + sb.append("null"); + } else { + sb.append(this.ex); + } + first = false; + sb.append(")"); + return sb.toString(); + } + + public void validate() throws org.apache.thrift.TException { + // check for required fields + // check for sub-struct validity + } + + private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { + try { + write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out))); + } catch (org.apache.thrift.TException te) { + throw new java.io.IOException(te); + } + } + + private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { + try { + read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); + } catch (org.apache.thrift.TException te) { + throw new java.io.IOException(te); + } + } + + private static class fetchN_resultStandardSchemeFactory implements SchemeFactory { + public fetchN_resultStandardScheme getScheme() { + return new fetchN_resultStandardScheme(); + } + } + + private static class fetchN_resultStandardScheme extends StandardScheme { + + public void read(org.apache.thrift.protocol.TProtocol iprot, fetchN_result struct) throws org.apache.thrift.TException { + org.apache.thrift.protocol.TField schemeField; + iprot.readStructBegin(); + while (true) + { + schemeField = iprot.readFieldBegin(); + if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { + break; + } + switch (schemeField.id) { + case 0: // SUCCESS + if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { + { + org.apache.thrift.protocol.TList _list0 = iprot.readListBegin(); + struct.success = new ArrayList(_list0.size); + String _elem1; + for (int _i2 = 0; _i2 < _list0.size; ++_i2) + { + _elem1 = iprot.readString(); + struct.success.add(_elem1); + } + iprot.readListEnd(); + } + struct.setSuccessIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; + case 1: // EX + if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { + struct.ex = new HiveServerException(); + struct.ex.read(iprot); + struct.setExIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; + default: + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + iprot.readFieldEnd(); + } + iprot.readStructEnd(); + struct.validate(); + } + + public void write(org.apache.thrift.protocol.TProtocol oprot, fetchN_result struct) throws org.apache.thrift.TException { + struct.validate(); + + oprot.writeStructBegin(STRUCT_DESC); + if (struct.success != null) { + oprot.writeFieldBegin(SUCCESS_FIELD_DESC); + { + oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.success.size())); + for (String _iter3 : struct.success) + { + oprot.writeString(_iter3); + } + oprot.writeListEnd(); + } + oprot.writeFieldEnd(); + } + if (struct.ex != null) { + oprot.writeFieldBegin(EX_FIELD_DESC); + struct.ex.write(oprot); + oprot.writeFieldEnd(); + } + oprot.writeFieldStop(); + oprot.writeStructEnd(); + } + + } + + private static class fetchN_resultTupleSchemeFactory implements SchemeFactory { + public fetchN_resultTupleScheme getScheme() { + return new fetchN_resultTupleScheme(); + } + } + + private static class fetchN_resultTupleScheme extends TupleScheme { + + @Override + public void write(org.apache.thrift.protocol.TProtocol prot, fetchN_result struct) throws org.apache.thrift.TException { + TTupleProtocol oprot = (TTupleProtocol) prot; + BitSet optionals = new BitSet(); + if (struct.isSetSuccess()) { + optionals.set(0); + } + if (struct.isSetEx()) { + optionals.set(1); + } + oprot.writeBitSet(optionals, 2); + if (struct.isSetSuccess()) { + { + oprot.writeI32(struct.success.size()); + for (String _iter4 : struct.success) + { + oprot.writeString(_iter4); + } + } + } + if (struct.isSetEx()) { + struct.ex.write(oprot); + } + } + + @Override + public void read(org.apache.thrift.protocol.TProtocol prot, fetchN_result struct) throws org.apache.thrift.TException { + TTupleProtocol iprot = (TTupleProtocol) prot; + BitSet incoming = iprot.readBitSet(2); + if (incoming.get(0)) { + { + org.apache.thrift.protocol.TList _list5 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.success = new ArrayList(_list5.size); + String _elem6; + for (int _i7 = 0; _i7 < _list5.size; ++_i7) + { + _elem6 = iprot.readString(); + struct.success.add(_elem6); + } + } + struct.setSuccessIsSet(true); + } + if (incoming.get(1)) { + struct.ex = new HiveServerException(); + struct.ex.read(iprot); + struct.setExIsSet(true); + } + } + } + + } + + public static class fetchAll_args implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { + private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("fetchAll_args"); + + + private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); + static { + schemes.put(StandardScheme.class, new fetchAll_argsStandardSchemeFactory()); + schemes.put(TupleScheme.class, new fetchAll_argsTupleSchemeFactory()); + } + + + /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ + public enum _Fields implements org.apache.thrift.TFieldIdEnum { +; + + private static final Map byName = new HashMap(); + + static { + for (_Fields field : EnumSet.allOf(_Fields.class)) { + byName.put(field.getFieldName(), field); + } + } + + /** + * Find the _Fields constant that matches fieldId, or null if its not found. + */ + public static _Fields findByThriftId(int fieldId) { + switch(fieldId) { + default: + return null; + } + } + + /** + * Find the _Fields constant that matches fieldId, throwing an exception + * if it is not found. + */ + public static _Fields findByThriftIdOrThrow(int fieldId) { + _Fields fields = findByThriftId(fieldId); + if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!"); + return fields; + } + + /** + * Find the _Fields constant that matches name, or null if its not found. + */ + public static _Fields findByName(String name) { + return byName.get(name); + } + + private final short _thriftId; + private final String _fieldName; + + _Fields(short thriftId, String fieldName) { + _thriftId = thriftId; + _fieldName = fieldName; + } + + public short getThriftFieldId() { + return _thriftId; + } + + public String getFieldName() { + return _fieldName; + } + } + public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; + static { + Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); + metaDataMap = Collections.unmodifiableMap(tmpMap); + org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(fetchAll_args.class, metaDataMap); + } + + public fetchAll_args() { + } + + /** + * Performs a deep copy on other. + */ + public fetchAll_args(fetchAll_args other) { + } + + public fetchAll_args deepCopy() { + return new fetchAll_args(this); + } + + @Override + public void clear() { + } + + public void setFieldValue(_Fields field, Object value) { + switch (field) { + } + } + + public Object getFieldValue(_Fields field) { + switch (field) { + } + throw new IllegalStateException(); + } + + /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */ + public boolean isSet(_Fields field) { + if (field == null) { + throw new IllegalArgumentException(); + } + + switch (field) { + } + throw new IllegalStateException(); + } + + @Override + public boolean equals(Object that) { + if (that == null) + return false; + if (that instanceof fetchAll_args) + return this.equals((fetchAll_args)that); + return false; + } + + public boolean equals(fetchAll_args that) { + if (that == null) + return false; + + return true; + } + + @Override + public int hashCode() { + List list = new ArrayList(); + + return list.hashCode(); + } + + @Override + public int compareTo(fetchAll_args other) { + if (!getClass().equals(other.getClass())) { + return getClass().getName().compareTo(other.getClass().getName()); + } + + int lastComparison = 0; + + return 0; + } + + public _Fields fieldForId(int fieldId) { + return _Fields.findByThriftId(fieldId); + } + + public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException { + schemes.get(iprot.getScheme()).getScheme().read(iprot, this); + } + + public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException { + schemes.get(oprot.getScheme()).getScheme().write(oprot, this); + } + + @Override + public String toString() { + StringBuilder sb = new StringBuilder("fetchAll_args("); + boolean first = true; + + sb.append(")"); + return sb.toString(); + } + + public void validate() throws org.apache.thrift.TException { + // check for required fields + // check for sub-struct validity + } + + private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { + try { + write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out))); + } catch (org.apache.thrift.TException te) { + throw new java.io.IOException(te); + } + } + + private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { + try { + read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); + } catch (org.apache.thrift.TException te) { + throw new java.io.IOException(te); + } + } + + private static class fetchAll_argsStandardSchemeFactory implements SchemeFactory { + public fetchAll_argsStandardScheme getScheme() { + return new fetchAll_argsStandardScheme(); + } + } + + private static class fetchAll_argsStandardScheme extends StandardScheme { + + public void read(org.apache.thrift.protocol.TProtocol iprot, fetchAll_args struct) throws org.apache.thrift.TException { + org.apache.thrift.protocol.TField schemeField; + iprot.readStructBegin(); + while (true) + { + schemeField = iprot.readFieldBegin(); + if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { + break; + } + switch (schemeField.id) { + default: + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + iprot.readFieldEnd(); + } + iprot.readStructEnd(); + struct.validate(); + } + + public void write(org.apache.thrift.protocol.TProtocol oprot, fetchAll_args struct) throws org.apache.thrift.TException { + struct.validate(); + + oprot.writeStructBegin(STRUCT_DESC); + oprot.writeFieldStop(); + oprot.writeStructEnd(); + } + + } + + private static class fetchAll_argsTupleSchemeFactory implements SchemeFactory { + public fetchAll_argsTupleScheme getScheme() { + return new fetchAll_argsTupleScheme(); + } + } + + private static class fetchAll_argsTupleScheme extends TupleScheme { + + @Override + public void write(org.apache.thrift.protocol.TProtocol prot, fetchAll_args struct) throws org.apache.thrift.TException { + TTupleProtocol oprot = (TTupleProtocol) prot; + } + + @Override + public void read(org.apache.thrift.protocol.TProtocol prot, fetchAll_args struct) throws org.apache.thrift.TException { + TTupleProtocol iprot = (TTupleProtocol) prot; + } + } + + } + + public static class fetchAll_result implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { + private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("fetchAll_result"); + + private static final org.apache.thrift.protocol.TField SUCCESS_FIELD_DESC = new org.apache.thrift.protocol.TField("success", org.apache.thrift.protocol.TType.LIST, (short)0); + private static final org.apache.thrift.protocol.TField EX_FIELD_DESC = new org.apache.thrift.protocol.TField("ex", org.apache.thrift.protocol.TType.STRUCT, (short)1); + + private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); + static { + schemes.put(StandardScheme.class, new fetchAll_resultStandardSchemeFactory()); + schemes.put(TupleScheme.class, new fetchAll_resultTupleSchemeFactory()); + } + + private List success; // required + private HiveServerException ex; // required + + /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ + public enum _Fields implements org.apache.thrift.TFieldIdEnum { + SUCCESS((short)0, "success"), + EX((short)1, "ex"); + + private static final Map byName = new HashMap(); + + static { + for (_Fields field : EnumSet.allOf(_Fields.class)) { + byName.put(field.getFieldName(), field); + } + } + + /** + * Find the _Fields constant that matches fieldId, or null if its not found. + */ + public static _Fields findByThriftId(int fieldId) { + switch(fieldId) { + case 0: // SUCCESS + return SUCCESS; + case 1: // EX + return EX; + default: + return null; + } + } + + /** + * Find the _Fields constant that matches fieldId, throwing an exception + * if it is not found. + */ + public static _Fields findByThriftIdOrThrow(int fieldId) { + _Fields fields = findByThriftId(fieldId); + if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!"); + return fields; + } + + /** + * Find the _Fields constant that matches name, or null if its not found. + */ + public static _Fields findByName(String name) { + return byName.get(name); + } + + private final short _thriftId; + private final String _fieldName; + + _Fields(short thriftId, String fieldName) { + _thriftId = thriftId; + _fieldName = fieldName; + } + + public short getThriftFieldId() { + return _thriftId; + } + + public String getFieldName() { + return _fieldName; + } + } + + // isset id assignments + public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; + static { + Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); + tmpMap.put(_Fields.SUCCESS, new org.apache.thrift.meta_data.FieldMetaData("success", org.apache.thrift.TFieldRequirementType.DEFAULT, + new org.apache.thrift.meta_data.ListMetaData(org.apache.thrift.protocol.TType.LIST, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRING)))); + tmpMap.put(_Fields.EX, new org.apache.thrift.meta_data.FieldMetaData("ex", org.apache.thrift.TFieldRequirementType.DEFAULT, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRUCT))); + metaDataMap = Collections.unmodifiableMap(tmpMap); + org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(fetchAll_result.class, metaDataMap); + } + + public fetchAll_result() { + } + + public fetchAll_result( + List success, + HiveServerException ex) + { + this(); + this.success = success; + this.ex = ex; + } + + /** + * Performs a deep copy on other. + */ + public fetchAll_result(fetchAll_result other) { + if (other.isSetSuccess()) { + List __this__success = new ArrayList(other.success); + this.success = __this__success; + } + if (other.isSetEx()) { + this.ex = new HiveServerException(other.ex); + } + } + + public fetchAll_result deepCopy() { + return new fetchAll_result(this); + } + + @Override + public void clear() { + this.success = null; + this.ex = null; + } + + public int getSuccessSize() { + return (this.success == null) ? 0 : this.success.size(); + } + + public java.util.Iterator getSuccessIterator() { + return (this.success == null) ? null : this.success.iterator(); + } + + public void addToSuccess(String elem) { + if (this.success == null) { + this.success = new ArrayList(); + } + this.success.add(elem); + } + + public List getSuccess() { + return this.success; + } + + public void setSuccess(List success) { + this.success = success; + } + + public void unsetSuccess() { + this.success = null; + } + + /** Returns true if field success is set (has been assigned a value) and false otherwise */ + public boolean isSetSuccess() { + return this.success != null; + } + + public void setSuccessIsSet(boolean value) { + if (!value) { + this.success = null; + } + } + + public HiveServerException getEx() { + return this.ex; + } + + public void setEx(HiveServerException ex) { + this.ex = ex; + } + + public void unsetEx() { + this.ex = null; + } + + /** Returns true if field ex is set (has been assigned a value) and false otherwise */ + public boolean isSetEx() { + return this.ex != null; + } + + public void setExIsSet(boolean value) { + if (!value) { + this.ex = null; + } + } + + public void setFieldValue(_Fields field, Object value) { + switch (field) { + case SUCCESS: + if (value == null) { + unsetSuccess(); + } else { + setSuccess((List)value); + } + break; + + case EX: + if (value == null) { + unsetEx(); + } else { + setEx((HiveServerException)value); + } + break; + + } + } + + public Object getFieldValue(_Fields field) { + switch (field) { + case SUCCESS: + return getSuccess(); + + case EX: + return getEx(); + + } + throw new IllegalStateException(); + } + + /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */ + public boolean isSet(_Fields field) { + if (field == null) { + throw new IllegalArgumentException(); + } + + switch (field) { + case SUCCESS: + return isSetSuccess(); + case EX: + return isSetEx(); + } + throw new IllegalStateException(); + } + + @Override + public boolean equals(Object that) { + if (that == null) + return false; + if (that instanceof fetchAll_result) + return this.equals((fetchAll_result)that); + return false; + } + + public boolean equals(fetchAll_result that) { + if (that == null) + return false; + + boolean this_present_success = true && this.isSetSuccess(); + boolean that_present_success = true && that.isSetSuccess(); + if (this_present_success || that_present_success) { + if (!(this_present_success && that_present_success)) + return false; + if (!this.success.equals(that.success)) + return false; + } + + boolean this_present_ex = true && this.isSetEx(); + boolean that_present_ex = true && that.isSetEx(); + if (this_present_ex || that_present_ex) { + if (!(this_present_ex && that_present_ex)) + return false; + if (!this.ex.equals(that.ex)) + return false; + } + + return true; + } + + @Override + public int hashCode() { + List list = new ArrayList(); + + boolean present_success = true && (isSetSuccess()); + list.add(present_success); + if (present_success) + list.add(success); + + boolean present_ex = true && (isSetEx()); + list.add(present_ex); + if (present_ex) + list.add(ex); + + return list.hashCode(); + } + + @Override + public int compareTo(fetchAll_result other) { + if (!getClass().equals(other.getClass())) { + return getClass().getName().compareTo(other.getClass().getName()); + } + + int lastComparison = 0; + + lastComparison = Boolean.valueOf(isSetSuccess()).compareTo(other.isSetSuccess()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetSuccess()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.success, other.success); + if (lastComparison != 0) { + return lastComparison; + } + } + lastComparison = Boolean.valueOf(isSetEx()).compareTo(other.isSetEx()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetEx()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.ex, other.ex); + if (lastComparison != 0) { + return lastComparison; + } + } + return 0; + } + + public _Fields fieldForId(int fieldId) { + return _Fields.findByThriftId(fieldId); + } + + public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException { + schemes.get(iprot.getScheme()).getScheme().read(iprot, this); + } + + public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException { + schemes.get(oprot.getScheme()).getScheme().write(oprot, this); + } + + @Override + public String toString() { + StringBuilder sb = new StringBuilder("fetchAll_result("); + boolean first = true; + + sb.append("success:"); + if (this.success == null) { + sb.append("null"); + } else { + sb.append(this.success); + } + first = false; + if (!first) sb.append(", "); + sb.append("ex:"); + if (this.ex == null) { + sb.append("null"); + } else { + sb.append(this.ex); + } + first = false; + sb.append(")"); + return sb.toString(); + } + + public void validate() throws org.apache.thrift.TException { + // check for required fields + // check for sub-struct validity + } + + private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { + try { + write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out))); + } catch (org.apache.thrift.TException te) { + throw new java.io.IOException(te); + } + } + + private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { + try { + read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); + } catch (org.apache.thrift.TException te) { + throw new java.io.IOException(te); + } + } + + private static class fetchAll_resultStandardSchemeFactory implements SchemeFactory { + public fetchAll_resultStandardScheme getScheme() { + return new fetchAll_resultStandardScheme(); + } + } + + private static class fetchAll_resultStandardScheme extends StandardScheme { + + public void read(org.apache.thrift.protocol.TProtocol iprot, fetchAll_result struct) throws org.apache.thrift.TException { + org.apache.thrift.protocol.TField schemeField; + iprot.readStructBegin(); + while (true) + { + schemeField = iprot.readFieldBegin(); + if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { + break; + } + switch (schemeField.id) { + case 0: // SUCCESS + if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { + { + org.apache.thrift.protocol.TList _list8 = iprot.readListBegin(); + struct.success = new ArrayList(_list8.size); + String _elem9; + for (int _i10 = 0; _i10 < _list8.size; ++_i10) + { + _elem9 = iprot.readString(); + struct.success.add(_elem9); + } + iprot.readListEnd(); + } + struct.setSuccessIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; + case 1: // EX + if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { + struct.ex = new HiveServerException(); + struct.ex.read(iprot); + struct.setExIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; + default: + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + iprot.readFieldEnd(); + } + iprot.readStructEnd(); + struct.validate(); + } + + public void write(org.apache.thrift.protocol.TProtocol oprot, fetchAll_result struct) throws org.apache.thrift.TException { + struct.validate(); + + oprot.writeStructBegin(STRUCT_DESC); + if (struct.success != null) { + oprot.writeFieldBegin(SUCCESS_FIELD_DESC); + { + oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.success.size())); + for (String _iter11 : struct.success) + { + oprot.writeString(_iter11); + } + oprot.writeListEnd(); + } + oprot.writeFieldEnd(); + } + if (struct.ex != null) { + oprot.writeFieldBegin(EX_FIELD_DESC); + struct.ex.write(oprot); + oprot.writeFieldEnd(); + } + oprot.writeFieldStop(); + oprot.writeStructEnd(); + } + + } + + private static class fetchAll_resultTupleSchemeFactory implements SchemeFactory { + public fetchAll_resultTupleScheme getScheme() { + return new fetchAll_resultTupleScheme(); + } + } + + private static class fetchAll_resultTupleScheme extends TupleScheme { + + @Override + public void write(org.apache.thrift.protocol.TProtocol prot, fetchAll_result struct) throws org.apache.thrift.TException { + TTupleProtocol oprot = (TTupleProtocol) prot; + BitSet optionals = new BitSet(); + if (struct.isSetSuccess()) { + optionals.set(0); + } + if (struct.isSetEx()) { + optionals.set(1); + } + oprot.writeBitSet(optionals, 2); + if (struct.isSetSuccess()) { + { + oprot.writeI32(struct.success.size()); + for (String _iter12 : struct.success) + { + oprot.writeString(_iter12); + } + } + } + if (struct.isSetEx()) { + struct.ex.write(oprot); + } + } + + @Override + public void read(org.apache.thrift.protocol.TProtocol prot, fetchAll_result struct) throws org.apache.thrift.TException { + TTupleProtocol iprot = (TTupleProtocol) prot; + BitSet incoming = iprot.readBitSet(2); + if (incoming.get(0)) { + { + org.apache.thrift.protocol.TList _list13 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.success = new ArrayList(_list13.size); + String _elem14; + for (int _i15 = 0; _i15 < _list13.size; ++_i15) + { + _elem14 = iprot.readString(); + struct.success.add(_elem14); + } + } + struct.setSuccessIsSet(true); + } + if (incoming.get(1)) { + struct.ex = new HiveServerException(); + struct.ex.read(iprot); + struct.setExIsSet(true); + } + } + } + + } + + public static class getSchema_args implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { + private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("getSchema_args"); + + + private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); + static { + schemes.put(StandardScheme.class, new getSchema_argsStandardSchemeFactory()); + schemes.put(TupleScheme.class, new getSchema_argsTupleSchemeFactory()); + } + + + /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ + public enum _Fields implements org.apache.thrift.TFieldIdEnum { +; + + private static final Map byName = new HashMap(); + + static { + for (_Fields field : EnumSet.allOf(_Fields.class)) { + byName.put(field.getFieldName(), field); + } + } + + /** + * Find the _Fields constant that matches fieldId, or null if its not found. + */ + public static _Fields findByThriftId(int fieldId) { + switch(fieldId) { + default: + return null; + } + } + + /** + * Find the _Fields constant that matches fieldId, throwing an exception + * if it is not found. + */ + public static _Fields findByThriftIdOrThrow(int fieldId) { + _Fields fields = findByThriftId(fieldId); + if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!"); + return fields; + } + + /** + * Find the _Fields constant that matches name, or null if its not found. + */ + public static _Fields findByName(String name) { + return byName.get(name); + } + + private final short _thriftId; + private final String _fieldName; + + _Fields(short thriftId, String fieldName) { + _thriftId = thriftId; + _fieldName = fieldName; + } + + public short getThriftFieldId() { + return _thriftId; + } + + public String getFieldName() { + return _fieldName; + } + } + public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; + static { + Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); + metaDataMap = Collections.unmodifiableMap(tmpMap); + org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(getSchema_args.class, metaDataMap); + } + + public getSchema_args() { + } + + /** + * Performs a deep copy on other. + */ + public getSchema_args(getSchema_args other) { + } + + public getSchema_args deepCopy() { + return new getSchema_args(this); + } + + @Override + public void clear() { + } + + public void setFieldValue(_Fields field, Object value) { + switch (field) { + } + } + + public Object getFieldValue(_Fields field) { + switch (field) { + } + throw new IllegalStateException(); + } + + /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */ + public boolean isSet(_Fields field) { + if (field == null) { + throw new IllegalArgumentException(); + } + + switch (field) { + } + throw new IllegalStateException(); + } + + @Override + public boolean equals(Object that) { + if (that == null) + return false; + if (that instanceof getSchema_args) + return this.equals((getSchema_args)that); + return false; + } + + public boolean equals(getSchema_args that) { + if (that == null) + return false; + + return true; + } + + @Override + public int hashCode() { + List list = new ArrayList(); + + return list.hashCode(); + } + + @Override + public int compareTo(getSchema_args other) { + if (!getClass().equals(other.getClass())) { + return getClass().getName().compareTo(other.getClass().getName()); + } + + int lastComparison = 0; + + return 0; + } + + public _Fields fieldForId(int fieldId) { + return _Fields.findByThriftId(fieldId); + } + + public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException { + schemes.get(iprot.getScheme()).getScheme().read(iprot, this); + } + + public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException { + schemes.get(oprot.getScheme()).getScheme().write(oprot, this); + } + + @Override + public String toString() { + StringBuilder sb = new StringBuilder("getSchema_args("); + boolean first = true; + + sb.append(")"); + return sb.toString(); + } + + public void validate() throws org.apache.thrift.TException { + // check for required fields + // check for sub-struct validity + } + + private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { + try { + write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out))); + } catch (org.apache.thrift.TException te) { + throw new java.io.IOException(te); + } + } + + private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { + try { + read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); + } catch (org.apache.thrift.TException te) { + throw new java.io.IOException(te); + } + } + + private static class getSchema_argsStandardSchemeFactory implements SchemeFactory { + public getSchema_argsStandardScheme getScheme() { + return new getSchema_argsStandardScheme(); + } + } + + private static class getSchema_argsStandardScheme extends StandardScheme { + + public void read(org.apache.thrift.protocol.TProtocol iprot, getSchema_args struct) throws org.apache.thrift.TException { + org.apache.thrift.protocol.TField schemeField; + iprot.readStructBegin(); + while (true) + { + schemeField = iprot.readFieldBegin(); + if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { + break; + } + switch (schemeField.id) { + default: + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + iprot.readFieldEnd(); + } + iprot.readStructEnd(); + struct.validate(); + } + + public void write(org.apache.thrift.protocol.TProtocol oprot, getSchema_args struct) throws org.apache.thrift.TException { + struct.validate(); + + oprot.writeStructBegin(STRUCT_DESC); + oprot.writeFieldStop(); + oprot.writeStructEnd(); + } + + } + + private static class getSchema_argsTupleSchemeFactory implements SchemeFactory { + public getSchema_argsTupleScheme getScheme() { + return new getSchema_argsTupleScheme(); + } + } + + private static class getSchema_argsTupleScheme extends TupleScheme { + + @Override + public void write(org.apache.thrift.protocol.TProtocol prot, getSchema_args struct) throws org.apache.thrift.TException { + TTupleProtocol oprot = (TTupleProtocol) prot; + } + + @Override + public void read(org.apache.thrift.protocol.TProtocol prot, getSchema_args struct) throws org.apache.thrift.TException { + TTupleProtocol iprot = (TTupleProtocol) prot; + } + } + + } + + public static class getSchema_result implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { + private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("getSchema_result"); + + private static final org.apache.thrift.protocol.TField SUCCESS_FIELD_DESC = new org.apache.thrift.protocol.TField("success", org.apache.thrift.protocol.TType.STRUCT, (short)0); + private static final org.apache.thrift.protocol.TField EX_FIELD_DESC = new org.apache.thrift.protocol.TField("ex", org.apache.thrift.protocol.TType.STRUCT, (short)1); + + private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); + static { + schemes.put(StandardScheme.class, new getSchema_resultStandardSchemeFactory()); + schemes.put(TupleScheme.class, new getSchema_resultTupleSchemeFactory()); + } + + private org.apache.hadoop.hive.metastore.api.Schema success; // required + private HiveServerException ex; // required + + /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ + public enum _Fields implements org.apache.thrift.TFieldIdEnum { + SUCCESS((short)0, "success"), + EX((short)1, "ex"); + + private static final Map byName = new HashMap(); + + static { + for (_Fields field : EnumSet.allOf(_Fields.class)) { + byName.put(field.getFieldName(), field); + } + } + + /** + * Find the _Fields constant that matches fieldId, or null if its not found. + */ + public static _Fields findByThriftId(int fieldId) { + switch(fieldId) { + case 0: // SUCCESS + return SUCCESS; + case 1: // EX + return EX; + default: + return null; + } + } + + /** + * Find the _Fields constant that matches fieldId, throwing an exception + * if it is not found. + */ + public static _Fields findByThriftIdOrThrow(int fieldId) { + _Fields fields = findByThriftId(fieldId); + if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!"); + return fields; + } + + /** + * Find the _Fields constant that matches name, or null if its not found. + */ + public static _Fields findByName(String name) { + return byName.get(name); + } + + private final short _thriftId; + private final String _fieldName; + + _Fields(short thriftId, String fieldName) { + _thriftId = thriftId; + _fieldName = fieldName; + } + + public short getThriftFieldId() { + return _thriftId; + } + + public String getFieldName() { + return _fieldName; + } + } + + // isset id assignments + public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; + static { + Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); + tmpMap.put(_Fields.SUCCESS, new org.apache.thrift.meta_data.FieldMetaData("success", org.apache.thrift.TFieldRequirementType.DEFAULT, + new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, org.apache.hadoop.hive.metastore.api.Schema.class))); + tmpMap.put(_Fields.EX, new org.apache.thrift.meta_data.FieldMetaData("ex", org.apache.thrift.TFieldRequirementType.DEFAULT, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRUCT))); + metaDataMap = Collections.unmodifiableMap(tmpMap); + org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(getSchema_result.class, metaDataMap); + } + + public getSchema_result() { + } + + public getSchema_result( + org.apache.hadoop.hive.metastore.api.Schema success, + HiveServerException ex) + { + this(); + this.success = success; + this.ex = ex; + } + + /** + * Performs a deep copy on other. + */ + public getSchema_result(getSchema_result other) { + if (other.isSetSuccess()) { + this.success = new org.apache.hadoop.hive.metastore.api.Schema(other.success); + } + if (other.isSetEx()) { + this.ex = new HiveServerException(other.ex); + } + } + + public getSchema_result deepCopy() { + return new getSchema_result(this); + } + + @Override + public void clear() { + this.success = null; + this.ex = null; + } + + public org.apache.hadoop.hive.metastore.api.Schema getSuccess() { + return this.success; + } + + public void setSuccess(org.apache.hadoop.hive.metastore.api.Schema success) { + this.success = success; + } + + public void unsetSuccess() { + this.success = null; + } + + /** Returns true if field success is set (has been assigned a value) and false otherwise */ + public boolean isSetSuccess() { + return this.success != null; + } + + public void setSuccessIsSet(boolean value) { + if (!value) { + this.success = null; + } + } + + public HiveServerException getEx() { + return this.ex; + } + + public void setEx(HiveServerException ex) { + this.ex = ex; + } + + public void unsetEx() { + this.ex = null; + } + + /** Returns true if field ex is set (has been assigned a value) and false otherwise */ + public boolean isSetEx() { + return this.ex != null; + } + + public void setExIsSet(boolean value) { + if (!value) { + this.ex = null; + } + } + + public void setFieldValue(_Fields field, Object value) { + switch (field) { + case SUCCESS: + if (value == null) { + unsetSuccess(); + } else { + setSuccess((org.apache.hadoop.hive.metastore.api.Schema)value); + } + break; + + case EX: + if (value == null) { + unsetEx(); + } else { + setEx((HiveServerException)value); + } + break; + + } + } + + public Object getFieldValue(_Fields field) { + switch (field) { + case SUCCESS: + return getSuccess(); + + case EX: + return getEx(); + + } + throw new IllegalStateException(); + } + + /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */ + public boolean isSet(_Fields field) { + if (field == null) { + throw new IllegalArgumentException(); + } + + switch (field) { + case SUCCESS: + return isSetSuccess(); + case EX: + return isSetEx(); + } + throw new IllegalStateException(); + } + + @Override + public boolean equals(Object that) { + if (that == null) + return false; + if (that instanceof getSchema_result) + return this.equals((getSchema_result)that); + return false; + } + + public boolean equals(getSchema_result that) { + if (that == null) + return false; + + boolean this_present_success = true && this.isSetSuccess(); + boolean that_present_success = true && that.isSetSuccess(); + if (this_present_success || that_present_success) { + if (!(this_present_success && that_present_success)) + return false; + if (!this.success.equals(that.success)) + return false; + } + + boolean this_present_ex = true && this.isSetEx(); + boolean that_present_ex = true && that.isSetEx(); + if (this_present_ex || that_present_ex) { + if (!(this_present_ex && that_present_ex)) + return false; + if (!this.ex.equals(that.ex)) + return false; + } + + return true; + } + + @Override + public int hashCode() { + List list = new ArrayList(); + + boolean present_success = true && (isSetSuccess()); + list.add(present_success); + if (present_success) + list.add(success); + + boolean present_ex = true && (isSetEx()); + list.add(present_ex); + if (present_ex) + list.add(ex); + + return list.hashCode(); + } + + @Override + public int compareTo(getSchema_result other) { + if (!getClass().equals(other.getClass())) { + return getClass().getName().compareTo(other.getClass().getName()); + } + + int lastComparison = 0; + + lastComparison = Boolean.valueOf(isSetSuccess()).compareTo(other.isSetSuccess()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetSuccess()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.success, other.success); + if (lastComparison != 0) { + return lastComparison; + } + } + lastComparison = Boolean.valueOf(isSetEx()).compareTo(other.isSetEx()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetEx()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.ex, other.ex); + if (lastComparison != 0) { + return lastComparison; + } + } + return 0; + } + + public _Fields fieldForId(int fieldId) { + return _Fields.findByThriftId(fieldId); + } + + public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException { + schemes.get(iprot.getScheme()).getScheme().read(iprot, this); + } + + public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException { + schemes.get(oprot.getScheme()).getScheme().write(oprot, this); + } + + @Override + public String toString() { + StringBuilder sb = new StringBuilder("getSchema_result("); + boolean first = true; + + sb.append("success:"); + if (this.success == null) { + sb.append("null"); + } else { + sb.append(this.success); + } + first = false; + if (!first) sb.append(", "); + sb.append("ex:"); + if (this.ex == null) { + sb.append("null"); + } else { + sb.append(this.ex); + } + first = false; + sb.append(")"); + return sb.toString(); + } + + public void validate() throws org.apache.thrift.TException { + // check for required fields + // check for sub-struct validity + if (success != null) { + success.validate(); + } + } + + private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { + try { + write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out))); + } catch (org.apache.thrift.TException te) { + throw new java.io.IOException(te); + } + } + + private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { + try { + read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); + } catch (org.apache.thrift.TException te) { + throw new java.io.IOException(te); + } + } + + private static class getSchema_resultStandardSchemeFactory implements SchemeFactory { + public getSchema_resultStandardScheme getScheme() { + return new getSchema_resultStandardScheme(); + } + } + + private static class getSchema_resultStandardScheme extends StandardScheme { + + public void read(org.apache.thrift.protocol.TProtocol iprot, getSchema_result struct) throws org.apache.thrift.TException { + org.apache.thrift.protocol.TField schemeField; + iprot.readStructBegin(); + while (true) + { + schemeField = iprot.readFieldBegin(); + if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { + break; + } + switch (schemeField.id) { + case 0: // SUCCESS + if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { + struct.success = new org.apache.hadoop.hive.metastore.api.Schema(); + struct.success.read(iprot); + struct.setSuccessIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; + case 1: // EX + if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { + struct.ex = new HiveServerException(); + struct.ex.read(iprot); + struct.setExIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; + default: + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + iprot.readFieldEnd(); + } + iprot.readStructEnd(); + struct.validate(); + } + + public void write(org.apache.thrift.protocol.TProtocol oprot, getSchema_result struct) throws org.apache.thrift.TException { + struct.validate(); + + oprot.writeStructBegin(STRUCT_DESC); + if (struct.success != null) { + oprot.writeFieldBegin(SUCCESS_FIELD_DESC); + struct.success.write(oprot); + oprot.writeFieldEnd(); + } + if (struct.ex != null) { + oprot.writeFieldBegin(EX_FIELD_DESC); + struct.ex.write(oprot); + oprot.writeFieldEnd(); + } + oprot.writeFieldStop(); + oprot.writeStructEnd(); + } + + } + + private static class getSchema_resultTupleSchemeFactory implements SchemeFactory { + public getSchema_resultTupleScheme getScheme() { + return new getSchema_resultTupleScheme(); + } + } + + private static class getSchema_resultTupleScheme extends TupleScheme { + + @Override + public void write(org.apache.thrift.protocol.TProtocol prot, getSchema_result struct) throws org.apache.thrift.TException { + TTupleProtocol oprot = (TTupleProtocol) prot; + BitSet optionals = new BitSet(); + if (struct.isSetSuccess()) { + optionals.set(0); + } + if (struct.isSetEx()) { + optionals.set(1); + } + oprot.writeBitSet(optionals, 2); + if (struct.isSetSuccess()) { + struct.success.write(oprot); + } + if (struct.isSetEx()) { + struct.ex.write(oprot); + } + } + + @Override + public void read(org.apache.thrift.protocol.TProtocol prot, getSchema_result struct) throws org.apache.thrift.TException { + TTupleProtocol iprot = (TTupleProtocol) prot; + BitSet incoming = iprot.readBitSet(2); + if (incoming.get(0)) { + struct.success = new org.apache.hadoop.hive.metastore.api.Schema(); + struct.success.read(iprot); + struct.setSuccessIsSet(true); + } + if (incoming.get(1)) { + struct.ex = new HiveServerException(); + struct.ex.read(iprot); + struct.setExIsSet(true); + } + } + } + + } + + public static class getThriftSchema_args implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { + private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("getThriftSchema_args"); + + + private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); + static { + schemes.put(StandardScheme.class, new getThriftSchema_argsStandardSchemeFactory()); + schemes.put(TupleScheme.class, new getThriftSchema_argsTupleSchemeFactory()); + } + + + /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ + public enum _Fields implements org.apache.thrift.TFieldIdEnum { +; + + private static final Map byName = new HashMap(); + + static { + for (_Fields field : EnumSet.allOf(_Fields.class)) { + byName.put(field.getFieldName(), field); + } + } + + /** + * Find the _Fields constant that matches fieldId, or null if its not found. + */ + public static _Fields findByThriftId(int fieldId) { + switch(fieldId) { + default: + return null; + } + } + + /** + * Find the _Fields constant that matches fieldId, throwing an exception + * if it is not found. + */ + public static _Fields findByThriftIdOrThrow(int fieldId) { + _Fields fields = findByThriftId(fieldId); + if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!"); + return fields; + } + + /** + * Find the _Fields constant that matches name, or null if its not found. + */ + public static _Fields findByName(String name) { + return byName.get(name); + } + + private final short _thriftId; + private final String _fieldName; + + _Fields(short thriftId, String fieldName) { + _thriftId = thriftId; + _fieldName = fieldName; + } + + public short getThriftFieldId() { + return _thriftId; + } + + public String getFieldName() { + return _fieldName; + } + } + public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; + static { + Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); + metaDataMap = Collections.unmodifiableMap(tmpMap); + org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(getThriftSchema_args.class, metaDataMap); + } + + public getThriftSchema_args() { + } + + /** + * Performs a deep copy on other. + */ + public getThriftSchema_args(getThriftSchema_args other) { + } + + public getThriftSchema_args deepCopy() { + return new getThriftSchema_args(this); + } + + @Override + public void clear() { + } + + public void setFieldValue(_Fields field, Object value) { + switch (field) { + } + } + + public Object getFieldValue(_Fields field) { + switch (field) { + } + throw new IllegalStateException(); + } + + /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */ + public boolean isSet(_Fields field) { + if (field == null) { + throw new IllegalArgumentException(); + } + + switch (field) { + } + throw new IllegalStateException(); + } + + @Override + public boolean equals(Object that) { + if (that == null) + return false; + if (that instanceof getThriftSchema_args) + return this.equals((getThriftSchema_args)that); + return false; + } + + public boolean equals(getThriftSchema_args that) { + if (that == null) + return false; + + return true; + } + + @Override + public int hashCode() { + List list = new ArrayList(); + + return list.hashCode(); + } + + @Override + public int compareTo(getThriftSchema_args other) { + if (!getClass().equals(other.getClass())) { + return getClass().getName().compareTo(other.getClass().getName()); + } + + int lastComparison = 0; + + return 0; + } + + public _Fields fieldForId(int fieldId) { + return _Fields.findByThriftId(fieldId); + } + + public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException { + schemes.get(iprot.getScheme()).getScheme().read(iprot, this); + } + + public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException { + schemes.get(oprot.getScheme()).getScheme().write(oprot, this); + } + + @Override + public String toString() { + StringBuilder sb = new StringBuilder("getThriftSchema_args("); + boolean first = true; + + sb.append(")"); + return sb.toString(); + } + + public void validate() throws org.apache.thrift.TException { + // check for required fields + // check for sub-struct validity + } + + private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { + try { + write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out))); + } catch (org.apache.thrift.TException te) { + throw new java.io.IOException(te); + } + } + + private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { + try { + read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); + } catch (org.apache.thrift.TException te) { + throw new java.io.IOException(te); + } + } + + private static class getThriftSchema_argsStandardSchemeFactory implements SchemeFactory { + public getThriftSchema_argsStandardScheme getScheme() { + return new getThriftSchema_argsStandardScheme(); + } + } + + private static class getThriftSchema_argsStandardScheme extends StandardScheme { + + public void read(org.apache.thrift.protocol.TProtocol iprot, getThriftSchema_args struct) throws org.apache.thrift.TException { + org.apache.thrift.protocol.TField schemeField; + iprot.readStructBegin(); + while (true) + { + schemeField = iprot.readFieldBegin(); + if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { + break; + } + switch (schemeField.id) { + default: + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + iprot.readFieldEnd(); + } + iprot.readStructEnd(); + struct.validate(); + } + + public void write(org.apache.thrift.protocol.TProtocol oprot, getThriftSchema_args struct) throws org.apache.thrift.TException { + struct.validate(); + + oprot.writeStructBegin(STRUCT_DESC); + oprot.writeFieldStop(); + oprot.writeStructEnd(); + } + + } + + private static class getThriftSchema_argsTupleSchemeFactory implements SchemeFactory { + public getThriftSchema_argsTupleScheme getScheme() { + return new getThriftSchema_argsTupleScheme(); + } + } + + private static class getThriftSchema_argsTupleScheme extends TupleScheme { + + @Override + public void write(org.apache.thrift.protocol.TProtocol prot, getThriftSchema_args struct) throws org.apache.thrift.TException { + TTupleProtocol oprot = (TTupleProtocol) prot; + } + + @Override + public void read(org.apache.thrift.protocol.TProtocol prot, getThriftSchema_args struct) throws org.apache.thrift.TException { + TTupleProtocol iprot = (TTupleProtocol) prot; + } + } + + } + + public static class getThriftSchema_result implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { + private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("getThriftSchema_result"); + + private static final org.apache.thrift.protocol.TField SUCCESS_FIELD_DESC = new org.apache.thrift.protocol.TField("success", org.apache.thrift.protocol.TType.STRUCT, (short)0); + private static final org.apache.thrift.protocol.TField EX_FIELD_DESC = new org.apache.thrift.protocol.TField("ex", org.apache.thrift.protocol.TType.STRUCT, (short)1); + + private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); + static { + schemes.put(StandardScheme.class, new getThriftSchema_resultStandardSchemeFactory()); + schemes.put(TupleScheme.class, new getThriftSchema_resultTupleSchemeFactory()); + } + + private org.apache.hadoop.hive.metastore.api.Schema success; // required + private HiveServerException ex; // required + + /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ + public enum _Fields implements org.apache.thrift.TFieldIdEnum { + SUCCESS((short)0, "success"), + EX((short)1, "ex"); + + private static final Map byName = new HashMap(); + + static { + for (_Fields field : EnumSet.allOf(_Fields.class)) { + byName.put(field.getFieldName(), field); + } + } + + /** + * Find the _Fields constant that matches fieldId, or null if its not found. + */ + public static _Fields findByThriftId(int fieldId) { + switch(fieldId) { + case 0: // SUCCESS + return SUCCESS; + case 1: // EX + return EX; + default: + return null; + } + } + + /** + * Find the _Fields constant that matches fieldId, throwing an exception + * if it is not found. + */ + public static _Fields findByThriftIdOrThrow(int fieldId) { + _Fields fields = findByThriftId(fieldId); + if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!"); + return fields; + } + + /** + * Find the _Fields constant that matches name, or null if its not found. + */ + public static _Fields findByName(String name) { + return byName.get(name); + } + + private final short _thriftId; + private final String _fieldName; + + _Fields(short thriftId, String fieldName) { + _thriftId = thriftId; + _fieldName = fieldName; + } + + public short getThriftFieldId() { + return _thriftId; + } + + public String getFieldName() { + return _fieldName; + } + } + + // isset id assignments + public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; + static { + Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); + tmpMap.put(_Fields.SUCCESS, new org.apache.thrift.meta_data.FieldMetaData("success", org.apache.thrift.TFieldRequirementType.DEFAULT, + new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, org.apache.hadoop.hive.metastore.api.Schema.class))); + tmpMap.put(_Fields.EX, new org.apache.thrift.meta_data.FieldMetaData("ex", org.apache.thrift.TFieldRequirementType.DEFAULT, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRUCT))); + metaDataMap = Collections.unmodifiableMap(tmpMap); + org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(getThriftSchema_result.class, metaDataMap); + } + + public getThriftSchema_result() { + } + + public getThriftSchema_result( + org.apache.hadoop.hive.metastore.api.Schema success, + HiveServerException ex) + { + this(); + this.success = success; + this.ex = ex; + } + + /** + * Performs a deep copy on other. + */ + public getThriftSchema_result(getThriftSchema_result other) { + if (other.isSetSuccess()) { + this.success = new org.apache.hadoop.hive.metastore.api.Schema(other.success); + } + if (other.isSetEx()) { + this.ex = new HiveServerException(other.ex); + } + } + + public getThriftSchema_result deepCopy() { + return new getThriftSchema_result(this); + } + + @Override + public void clear() { + this.success = null; + this.ex = null; + } + + public org.apache.hadoop.hive.metastore.api.Schema getSuccess() { + return this.success; + } + + public void setSuccess(org.apache.hadoop.hive.metastore.api.Schema success) { + this.success = success; + } + + public void unsetSuccess() { + this.success = null; + } + + /** Returns true if field success is set (has been assigned a value) and false otherwise */ + public boolean isSetSuccess() { + return this.success != null; + } + + public void setSuccessIsSet(boolean value) { + if (!value) { + this.success = null; + } + } + + public HiveServerException getEx() { + return this.ex; + } + + public void setEx(HiveServerException ex) { + this.ex = ex; + } + + public void unsetEx() { + this.ex = null; + } + + /** Returns true if field ex is set (has been assigned a value) and false otherwise */ + public boolean isSetEx() { + return this.ex != null; + } + + public void setExIsSet(boolean value) { + if (!value) { + this.ex = null; + } + } + + public void setFieldValue(_Fields field, Object value) { + switch (field) { + case SUCCESS: + if (value == null) { + unsetSuccess(); + } else { + setSuccess((org.apache.hadoop.hive.metastore.api.Schema)value); + } + break; + + case EX: + if (value == null) { + unsetEx(); + } else { + setEx((HiveServerException)value); + } + break; + + } + } + + public Object getFieldValue(_Fields field) { + switch (field) { + case SUCCESS: + return getSuccess(); + + case EX: + return getEx(); + + } + throw new IllegalStateException(); + } + + /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */ + public boolean isSet(_Fields field) { + if (field == null) { + throw new IllegalArgumentException(); + } + + switch (field) { + case SUCCESS: + return isSetSuccess(); + case EX: + return isSetEx(); + } + throw new IllegalStateException(); + } + + @Override + public boolean equals(Object that) { + if (that == null) + return false; + if (that instanceof getThriftSchema_result) + return this.equals((getThriftSchema_result)that); + return false; + } + + public boolean equals(getThriftSchema_result that) { + if (that == null) + return false; + + boolean this_present_success = true && this.isSetSuccess(); + boolean that_present_success = true && that.isSetSuccess(); + if (this_present_success || that_present_success) { + if (!(this_present_success && that_present_success)) + return false; + if (!this.success.equals(that.success)) + return false; + } + + boolean this_present_ex = true && this.isSetEx(); + boolean that_present_ex = true && that.isSetEx(); + if (this_present_ex || that_present_ex) { + if (!(this_present_ex && that_present_ex)) + return false; + if (!this.ex.equals(that.ex)) + return false; + } + + return true; + } + + @Override + public int hashCode() { + List list = new ArrayList(); + + boolean present_success = true && (isSetSuccess()); + list.add(present_success); + if (present_success) + list.add(success); + + boolean present_ex = true && (isSetEx()); + list.add(present_ex); + if (present_ex) + list.add(ex); + + return list.hashCode(); + } + + @Override + public int compareTo(getThriftSchema_result other) { + if (!getClass().equals(other.getClass())) { + return getClass().getName().compareTo(other.getClass().getName()); + } + + int lastComparison = 0; + + lastComparison = Boolean.valueOf(isSetSuccess()).compareTo(other.isSetSuccess()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetSuccess()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.success, other.success); + if (lastComparison != 0) { + return lastComparison; + } + } + lastComparison = Boolean.valueOf(isSetEx()).compareTo(other.isSetEx()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetEx()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.ex, other.ex); + if (lastComparison != 0) { + return lastComparison; + } + } + return 0; + } + + public _Fields fieldForId(int fieldId) { + return _Fields.findByThriftId(fieldId); + } + + public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException { + schemes.get(iprot.getScheme()).getScheme().read(iprot, this); + } + + public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException { + schemes.get(oprot.getScheme()).getScheme().write(oprot, this); + } + + @Override + public String toString() { + StringBuilder sb = new StringBuilder("getThriftSchema_result("); + boolean first = true; + + sb.append("success:"); + if (this.success == null) { + sb.append("null"); + } else { + sb.append(this.success); + } + first = false; + if (!first) sb.append(", "); + sb.append("ex:"); + if (this.ex == null) { + sb.append("null"); + } else { + sb.append(this.ex); + } + first = false; + sb.append(")"); + return sb.toString(); + } + + public void validate() throws org.apache.thrift.TException { + // check for required fields + // check for sub-struct validity + if (success != null) { + success.validate(); + } + } + + private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { + try { + write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out))); + } catch (org.apache.thrift.TException te) { + throw new java.io.IOException(te); + } + } + + private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { + try { + read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); + } catch (org.apache.thrift.TException te) { + throw new java.io.IOException(te); + } + } + + private static class getThriftSchema_resultStandardSchemeFactory implements SchemeFactory { + public getThriftSchema_resultStandardScheme getScheme() { + return new getThriftSchema_resultStandardScheme(); + } + } + + private static class getThriftSchema_resultStandardScheme extends StandardScheme { + + public void read(org.apache.thrift.protocol.TProtocol iprot, getThriftSchema_result struct) throws org.apache.thrift.TException { + org.apache.thrift.protocol.TField schemeField; + iprot.readStructBegin(); + while (true) + { + schemeField = iprot.readFieldBegin(); + if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { + break; + } + switch (schemeField.id) { + case 0: // SUCCESS + if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { + struct.success = new org.apache.hadoop.hive.metastore.api.Schema(); + struct.success.read(iprot); + struct.setSuccessIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; + case 1: // EX + if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { + struct.ex = new HiveServerException(); + struct.ex.read(iprot); + struct.setExIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; + default: + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + iprot.readFieldEnd(); + } + iprot.readStructEnd(); + struct.validate(); + } + + public void write(org.apache.thrift.protocol.TProtocol oprot, getThriftSchema_result struct) throws org.apache.thrift.TException { + struct.validate(); + + oprot.writeStructBegin(STRUCT_DESC); + if (struct.success != null) { + oprot.writeFieldBegin(SUCCESS_FIELD_DESC); + struct.success.write(oprot); + oprot.writeFieldEnd(); + } + if (struct.ex != null) { + oprot.writeFieldBegin(EX_FIELD_DESC); + struct.ex.write(oprot); + oprot.writeFieldEnd(); + } + oprot.writeFieldStop(); + oprot.writeStructEnd(); + } + + } + + private static class getThriftSchema_resultTupleSchemeFactory implements SchemeFactory { + public getThriftSchema_resultTupleScheme getScheme() { + return new getThriftSchema_resultTupleScheme(); + } + } + + private static class getThriftSchema_resultTupleScheme extends TupleScheme { + + @Override + public void write(org.apache.thrift.protocol.TProtocol prot, getThriftSchema_result struct) throws org.apache.thrift.TException { + TTupleProtocol oprot = (TTupleProtocol) prot; + BitSet optionals = new BitSet(); + if (struct.isSetSuccess()) { + optionals.set(0); + } + if (struct.isSetEx()) { + optionals.set(1); + } + oprot.writeBitSet(optionals, 2); + if (struct.isSetSuccess()) { + struct.success.write(oprot); + } + if (struct.isSetEx()) { + struct.ex.write(oprot); + } + } + + @Override + public void read(org.apache.thrift.protocol.TProtocol prot, getThriftSchema_result struct) throws org.apache.thrift.TException { + TTupleProtocol iprot = (TTupleProtocol) prot; + BitSet incoming = iprot.readBitSet(2); + if (incoming.get(0)) { + struct.success = new org.apache.hadoop.hive.metastore.api.Schema(); + struct.success.read(iprot); + struct.setSuccessIsSet(true); + } + if (incoming.get(1)) { + struct.ex = new HiveServerException(); + struct.ex.read(iprot); + struct.setExIsSet(true); + } + } + } + + } + + public static class getClusterStatus_args implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { + private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("getClusterStatus_args"); + + + private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); + static { + schemes.put(StandardScheme.class, new getClusterStatus_argsStandardSchemeFactory()); + schemes.put(TupleScheme.class, new getClusterStatus_argsTupleSchemeFactory()); + } + + + /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ + public enum _Fields implements org.apache.thrift.TFieldIdEnum { +; + + private static final Map byName = new HashMap(); + + static { + for (_Fields field : EnumSet.allOf(_Fields.class)) { + byName.put(field.getFieldName(), field); + } + } + + /** + * Find the _Fields constant that matches fieldId, or null if its not found. + */ + public static _Fields findByThriftId(int fieldId) { + switch(fieldId) { + default: + return null; + } + } + + /** + * Find the _Fields constant that matches fieldId, throwing an exception + * if it is not found. + */ + public static _Fields findByThriftIdOrThrow(int fieldId) { + _Fields fields = findByThriftId(fieldId); + if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!"); + return fields; + } + + /** + * Find the _Fields constant that matches name, or null if its not found. + */ + public static _Fields findByName(String name) { + return byName.get(name); + } + + private final short _thriftId; + private final String _fieldName; + + _Fields(short thriftId, String fieldName) { + _thriftId = thriftId; + _fieldName = fieldName; + } + + public short getThriftFieldId() { + return _thriftId; + } + + public String getFieldName() { + return _fieldName; + } + } + public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; + static { + Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); + metaDataMap = Collections.unmodifiableMap(tmpMap); + org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(getClusterStatus_args.class, metaDataMap); + } + + public getClusterStatus_args() { + } + + /** + * Performs a deep copy on other. + */ + public getClusterStatus_args(getClusterStatus_args other) { + } + + public getClusterStatus_args deepCopy() { + return new getClusterStatus_args(this); + } + + @Override + public void clear() { + } + + public void setFieldValue(_Fields field, Object value) { + switch (field) { + } + } + + public Object getFieldValue(_Fields field) { + switch (field) { + } + throw new IllegalStateException(); + } + + /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */ + public boolean isSet(_Fields field) { + if (field == null) { + throw new IllegalArgumentException(); + } + + switch (field) { + } + throw new IllegalStateException(); + } + + @Override + public boolean equals(Object that) { + if (that == null) + return false; + if (that instanceof getClusterStatus_args) + return this.equals((getClusterStatus_args)that); + return false; + } + + public boolean equals(getClusterStatus_args that) { + if (that == null) + return false; + + return true; + } + + @Override + public int hashCode() { + List list = new ArrayList(); + + return list.hashCode(); + } + + @Override + public int compareTo(getClusterStatus_args other) { + if (!getClass().equals(other.getClass())) { + return getClass().getName().compareTo(other.getClass().getName()); + } + + int lastComparison = 0; + + return 0; + } + + public _Fields fieldForId(int fieldId) { + return _Fields.findByThriftId(fieldId); + } + + public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException { + schemes.get(iprot.getScheme()).getScheme().read(iprot, this); + } + + public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException { + schemes.get(oprot.getScheme()).getScheme().write(oprot, this); + } + + @Override + public String toString() { + StringBuilder sb = new StringBuilder("getClusterStatus_args("); + boolean first = true; + + sb.append(")"); + return sb.toString(); + } + + public void validate() throws org.apache.thrift.TException { + // check for required fields + // check for sub-struct validity + } + + private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { + try { + write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out))); + } catch (org.apache.thrift.TException te) { + throw new java.io.IOException(te); + } + } + + private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { + try { + read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); + } catch (org.apache.thrift.TException te) { + throw new java.io.IOException(te); + } + } + + private static class getClusterStatus_argsStandardSchemeFactory implements SchemeFactory { + public getClusterStatus_argsStandardScheme getScheme() { + return new getClusterStatus_argsStandardScheme(); + } + } + + private static class getClusterStatus_argsStandardScheme extends StandardScheme { + + public void read(org.apache.thrift.protocol.TProtocol iprot, getClusterStatus_args struct) throws org.apache.thrift.TException { + org.apache.thrift.protocol.TField schemeField; + iprot.readStructBegin(); + while (true) + { + schemeField = iprot.readFieldBegin(); + if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { + break; + } + switch (schemeField.id) { + default: + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + iprot.readFieldEnd(); + } + iprot.readStructEnd(); + struct.validate(); + } + + public void write(org.apache.thrift.protocol.TProtocol oprot, getClusterStatus_args struct) throws org.apache.thrift.TException { + struct.validate(); + + oprot.writeStructBegin(STRUCT_DESC); + oprot.writeFieldStop(); + oprot.writeStructEnd(); + } + + } + + private static class getClusterStatus_argsTupleSchemeFactory implements SchemeFactory { + public getClusterStatus_argsTupleScheme getScheme() { + return new getClusterStatus_argsTupleScheme(); + } + } + + private static class getClusterStatus_argsTupleScheme extends TupleScheme { + + @Override + public void write(org.apache.thrift.protocol.TProtocol prot, getClusterStatus_args struct) throws org.apache.thrift.TException { + TTupleProtocol oprot = (TTupleProtocol) prot; + } + + @Override + public void read(org.apache.thrift.protocol.TProtocol prot, getClusterStatus_args struct) throws org.apache.thrift.TException { + TTupleProtocol iprot = (TTupleProtocol) prot; + } + } + + } + + public static class getClusterStatus_result implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { + private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("getClusterStatus_result"); + + private static final org.apache.thrift.protocol.TField SUCCESS_FIELD_DESC = new org.apache.thrift.protocol.TField("success", org.apache.thrift.protocol.TType.STRUCT, (short)0); + private static final org.apache.thrift.protocol.TField EX_FIELD_DESC = new org.apache.thrift.protocol.TField("ex", org.apache.thrift.protocol.TType.STRUCT, (short)1); + + private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); + static { + schemes.put(StandardScheme.class, new getClusterStatus_resultStandardSchemeFactory()); + schemes.put(TupleScheme.class, new getClusterStatus_resultTupleSchemeFactory()); + } + + private HiveClusterStatus success; // required + private HiveServerException ex; // required + + /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ + public enum _Fields implements org.apache.thrift.TFieldIdEnum { + SUCCESS((short)0, "success"), + EX((short)1, "ex"); + + private static final Map byName = new HashMap(); + + static { + for (_Fields field : EnumSet.allOf(_Fields.class)) { + byName.put(field.getFieldName(), field); + } + } + + /** + * Find the _Fields constant that matches fieldId, or null if its not found. + */ + public static _Fields findByThriftId(int fieldId) { + switch(fieldId) { + case 0: // SUCCESS + return SUCCESS; + case 1: // EX + return EX; + default: + return null; + } + } + + /** + * Find the _Fields constant that matches fieldId, throwing an exception + * if it is not found. + */ + public static _Fields findByThriftIdOrThrow(int fieldId) { + _Fields fields = findByThriftId(fieldId); + if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!"); + return fields; + } + + /** + * Find the _Fields constant that matches name, or null if its not found. + */ + public static _Fields findByName(String name) { + return byName.get(name); + } + + private final short _thriftId; + private final String _fieldName; + + _Fields(short thriftId, String fieldName) { + _thriftId = thriftId; + _fieldName = fieldName; + } + + public short getThriftFieldId() { + return _thriftId; + } + + public String getFieldName() { + return _fieldName; + } + } + + // isset id assignments + public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; + static { + Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); + tmpMap.put(_Fields.SUCCESS, new org.apache.thrift.meta_data.FieldMetaData("success", org.apache.thrift.TFieldRequirementType.DEFAULT, + new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, HiveClusterStatus.class))); + tmpMap.put(_Fields.EX, new org.apache.thrift.meta_data.FieldMetaData("ex", org.apache.thrift.TFieldRequirementType.DEFAULT, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRUCT))); + metaDataMap = Collections.unmodifiableMap(tmpMap); + org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(getClusterStatus_result.class, metaDataMap); + } + + public getClusterStatus_result() { + } + + public getClusterStatus_result( + HiveClusterStatus success, + HiveServerException ex) + { + this(); + this.success = success; + this.ex = ex; + } + + /** + * Performs a deep copy on other. + */ + public getClusterStatus_result(getClusterStatus_result other) { + if (other.isSetSuccess()) { + this.success = new HiveClusterStatus(other.success); + } + if (other.isSetEx()) { + this.ex = new HiveServerException(other.ex); + } + } + + public getClusterStatus_result deepCopy() { + return new getClusterStatus_result(this); + } + + @Override + public void clear() { + this.success = null; + this.ex = null; + } + + public HiveClusterStatus getSuccess() { + return this.success; + } + + public void setSuccess(HiveClusterStatus success) { + this.success = success; + } + + public void unsetSuccess() { + this.success = null; + } + + /** Returns true if field success is set (has been assigned a value) and false otherwise */ + public boolean isSetSuccess() { + return this.success != null; + } + + public void setSuccessIsSet(boolean value) { + if (!value) { + this.success = null; + } + } + + public HiveServerException getEx() { + return this.ex; + } + + public void setEx(HiveServerException ex) { + this.ex = ex; + } + + public void unsetEx() { + this.ex = null; + } + + /** Returns true if field ex is set (has been assigned a value) and false otherwise */ + public boolean isSetEx() { + return this.ex != null; + } + + public void setExIsSet(boolean value) { + if (!value) { + this.ex = null; + } + } + + public void setFieldValue(_Fields field, Object value) { + switch (field) { + case SUCCESS: + if (value == null) { + unsetSuccess(); + } else { + setSuccess((HiveClusterStatus)value); + } + break; + + case EX: + if (value == null) { + unsetEx(); + } else { + setEx((HiveServerException)value); + } + break; + + } + } + + public Object getFieldValue(_Fields field) { + switch (field) { + case SUCCESS: + return getSuccess(); + + case EX: + return getEx(); + + } + throw new IllegalStateException(); + } + + /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */ + public boolean isSet(_Fields field) { + if (field == null) { + throw new IllegalArgumentException(); + } + + switch (field) { + case SUCCESS: + return isSetSuccess(); + case EX: + return isSetEx(); + } + throw new IllegalStateException(); + } + + @Override + public boolean equals(Object that) { + if (that == null) + return false; + if (that instanceof getClusterStatus_result) + return this.equals((getClusterStatus_result)that); + return false; + } + + public boolean equals(getClusterStatus_result that) { + if (that == null) + return false; + + boolean this_present_success = true && this.isSetSuccess(); + boolean that_present_success = true && that.isSetSuccess(); + if (this_present_success || that_present_success) { + if (!(this_present_success && that_present_success)) + return false; + if (!this.success.equals(that.success)) + return false; + } + + boolean this_present_ex = true && this.isSetEx(); + boolean that_present_ex = true && that.isSetEx(); + if (this_present_ex || that_present_ex) { + if (!(this_present_ex && that_present_ex)) + return false; + if (!this.ex.equals(that.ex)) + return false; + } + + return true; + } + + @Override + public int hashCode() { + List list = new ArrayList(); + + boolean present_success = true && (isSetSuccess()); + list.add(present_success); + if (present_success) + list.add(success); + + boolean present_ex = true && (isSetEx()); + list.add(present_ex); + if (present_ex) + list.add(ex); + + return list.hashCode(); + } + + @Override + public int compareTo(getClusterStatus_result other) { + if (!getClass().equals(other.getClass())) { + return getClass().getName().compareTo(other.getClass().getName()); + } + + int lastComparison = 0; + + lastComparison = Boolean.valueOf(isSetSuccess()).compareTo(other.isSetSuccess()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetSuccess()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.success, other.success); + if (lastComparison != 0) { + return lastComparison; + } + } + lastComparison = Boolean.valueOf(isSetEx()).compareTo(other.isSetEx()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetEx()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.ex, other.ex); + if (lastComparison != 0) { + return lastComparison; + } + } + return 0; + } + + public _Fields fieldForId(int fieldId) { + return _Fields.findByThriftId(fieldId); + } + + public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException { + schemes.get(iprot.getScheme()).getScheme().read(iprot, this); + } + + public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException { + schemes.get(oprot.getScheme()).getScheme().write(oprot, this); + } + + @Override + public String toString() { + StringBuilder sb = new StringBuilder("getClusterStatus_result("); + boolean first = true; + + sb.append("success:"); + if (this.success == null) { + sb.append("null"); + } else { + sb.append(this.success); + } + first = false; + if (!first) sb.append(", "); + sb.append("ex:"); + if (this.ex == null) { + sb.append("null"); + } else { + sb.append(this.ex); + } + first = false; + sb.append(")"); + return sb.toString(); + } + + public void validate() throws org.apache.thrift.TException { + // check for required fields + // check for sub-struct validity + if (success != null) { + success.validate(); + } + } + + private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { + try { + write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out))); + } catch (org.apache.thrift.TException te) { + throw new java.io.IOException(te); + } + } + + private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { + try { + read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); + } catch (org.apache.thrift.TException te) { + throw new java.io.IOException(te); + } + } + + private static class getClusterStatus_resultStandardSchemeFactory implements SchemeFactory { + public getClusterStatus_resultStandardScheme getScheme() { + return new getClusterStatus_resultStandardScheme(); + } + } + + private static class getClusterStatus_resultStandardScheme extends StandardScheme { + + public void read(org.apache.thrift.protocol.TProtocol iprot, getClusterStatus_result struct) throws org.apache.thrift.TException { + org.apache.thrift.protocol.TField schemeField; + iprot.readStructBegin(); + while (true) + { + schemeField = iprot.readFieldBegin(); + if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { + break; + } + switch (schemeField.id) { + case 0: // SUCCESS + if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { + struct.success = new HiveClusterStatus(); + struct.success.read(iprot); + struct.setSuccessIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; + case 1: // EX + if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { + struct.ex = new HiveServerException(); + struct.ex.read(iprot); + struct.setExIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; + default: + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + iprot.readFieldEnd(); + } + iprot.readStructEnd(); + struct.validate(); + } + + public void write(org.apache.thrift.protocol.TProtocol oprot, getClusterStatus_result struct) throws org.apache.thrift.TException { + struct.validate(); + + oprot.writeStructBegin(STRUCT_DESC); + if (struct.success != null) { + oprot.writeFieldBegin(SUCCESS_FIELD_DESC); + struct.success.write(oprot); + oprot.writeFieldEnd(); + } + if (struct.ex != null) { + oprot.writeFieldBegin(EX_FIELD_DESC); + struct.ex.write(oprot); + oprot.writeFieldEnd(); + } + oprot.writeFieldStop(); + oprot.writeStructEnd(); + } + + } + + private static class getClusterStatus_resultTupleSchemeFactory implements SchemeFactory { + public getClusterStatus_resultTupleScheme getScheme() { + return new getClusterStatus_resultTupleScheme(); + } + } + + private static class getClusterStatus_resultTupleScheme extends TupleScheme { + + @Override + public void write(org.apache.thrift.protocol.TProtocol prot, getClusterStatus_result struct) throws org.apache.thrift.TException { + TTupleProtocol oprot = (TTupleProtocol) prot; + BitSet optionals = new BitSet(); + if (struct.isSetSuccess()) { + optionals.set(0); + } + if (struct.isSetEx()) { + optionals.set(1); + } + oprot.writeBitSet(optionals, 2); + if (struct.isSetSuccess()) { + struct.success.write(oprot); + } + if (struct.isSetEx()) { + struct.ex.write(oprot); + } + } + + @Override + public void read(org.apache.thrift.protocol.TProtocol prot, getClusterStatus_result struct) throws org.apache.thrift.TException { + TTupleProtocol iprot = (TTupleProtocol) prot; + BitSet incoming = iprot.readBitSet(2); + if (incoming.get(0)) { + struct.success = new HiveClusterStatus(); + struct.success.read(iprot); + struct.setSuccessIsSet(true); + } + if (incoming.get(1)) { + struct.ex = new HiveServerException(); + struct.ex.read(iprot); + struct.setExIsSet(true); + } + } + } + + } + + public static class getQueryPlan_args implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { + private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("getQueryPlan_args"); + + + private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); + static { + schemes.put(StandardScheme.class, new getQueryPlan_argsStandardSchemeFactory()); + schemes.put(TupleScheme.class, new getQueryPlan_argsTupleSchemeFactory()); + } + + + /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ + public enum _Fields implements org.apache.thrift.TFieldIdEnum { +; + + private static final Map byName = new HashMap(); + + static { + for (_Fields field : EnumSet.allOf(_Fields.class)) { + byName.put(field.getFieldName(), field); + } + } + + /** + * Find the _Fields constant that matches fieldId, or null if its not found. + */ + public static _Fields findByThriftId(int fieldId) { + switch(fieldId) { + default: + return null; + } + } + + /** + * Find the _Fields constant that matches fieldId, throwing an exception + * if it is not found. + */ + public static _Fields findByThriftIdOrThrow(int fieldId) { + _Fields fields = findByThriftId(fieldId); + if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!"); + return fields; + } + + /** + * Find the _Fields constant that matches name, or null if its not found. + */ + public static _Fields findByName(String name) { + return byName.get(name); + } + + private final short _thriftId; + private final String _fieldName; + + _Fields(short thriftId, String fieldName) { + _thriftId = thriftId; + _fieldName = fieldName; + } + + public short getThriftFieldId() { + return _thriftId; + } + + public String getFieldName() { + return _fieldName; + } + } + public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; + static { + Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); + metaDataMap = Collections.unmodifiableMap(tmpMap); + org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(getQueryPlan_args.class, metaDataMap); + } + + public getQueryPlan_args() { + } + + /** + * Performs a deep copy on other. + */ + public getQueryPlan_args(getQueryPlan_args other) { + } + + public getQueryPlan_args deepCopy() { + return new getQueryPlan_args(this); + } + + @Override + public void clear() { + } + + public void setFieldValue(_Fields field, Object value) { + switch (field) { + } + } + + public Object getFieldValue(_Fields field) { + switch (field) { + } + throw new IllegalStateException(); + } + + /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */ + public boolean isSet(_Fields field) { + if (field == null) { + throw new IllegalArgumentException(); + } + + switch (field) { + } + throw new IllegalStateException(); + } + + @Override + public boolean equals(Object that) { + if (that == null) + return false; + if (that instanceof getQueryPlan_args) + return this.equals((getQueryPlan_args)that); + return false; + } + + public boolean equals(getQueryPlan_args that) { + if (that == null) + return false; + + return true; + } + + @Override + public int hashCode() { + List list = new ArrayList(); + + return list.hashCode(); + } + + @Override + public int compareTo(getQueryPlan_args other) { + if (!getClass().equals(other.getClass())) { + return getClass().getName().compareTo(other.getClass().getName()); + } + + int lastComparison = 0; + + return 0; + } + + public _Fields fieldForId(int fieldId) { + return _Fields.findByThriftId(fieldId); + } + + public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException { + schemes.get(iprot.getScheme()).getScheme().read(iprot, this); + } + + public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException { + schemes.get(oprot.getScheme()).getScheme().write(oprot, this); + } + + @Override + public String toString() { + StringBuilder sb = new StringBuilder("getQueryPlan_args("); + boolean first = true; + + sb.append(")"); + return sb.toString(); + } + + public void validate() throws org.apache.thrift.TException { + // check for required fields + // check for sub-struct validity + } + + private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { + try { + write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out))); + } catch (org.apache.thrift.TException te) { + throw new java.io.IOException(te); + } + } + + private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { + try { + read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); + } catch (org.apache.thrift.TException te) { + throw new java.io.IOException(te); + } + } + + private static class getQueryPlan_argsStandardSchemeFactory implements SchemeFactory { + public getQueryPlan_argsStandardScheme getScheme() { + return new getQueryPlan_argsStandardScheme(); + } + } + + private static class getQueryPlan_argsStandardScheme extends StandardScheme { + + public void read(org.apache.thrift.protocol.TProtocol iprot, getQueryPlan_args struct) throws org.apache.thrift.TException { + org.apache.thrift.protocol.TField schemeField; + iprot.readStructBegin(); + while (true) + { + schemeField = iprot.readFieldBegin(); + if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { + break; + } + switch (schemeField.id) { + default: + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + iprot.readFieldEnd(); + } + iprot.readStructEnd(); + struct.validate(); + } + + public void write(org.apache.thrift.protocol.TProtocol oprot, getQueryPlan_args struct) throws org.apache.thrift.TException { + struct.validate(); + + oprot.writeStructBegin(STRUCT_DESC); + oprot.writeFieldStop(); + oprot.writeStructEnd(); + } + + } + + private static class getQueryPlan_argsTupleSchemeFactory implements SchemeFactory { + public getQueryPlan_argsTupleScheme getScheme() { + return new getQueryPlan_argsTupleScheme(); + } + } + + private static class getQueryPlan_argsTupleScheme extends TupleScheme { + + @Override + public void write(org.apache.thrift.protocol.TProtocol prot, getQueryPlan_args struct) throws org.apache.thrift.TException { + TTupleProtocol oprot = (TTupleProtocol) prot; + } + + @Override + public void read(org.apache.thrift.protocol.TProtocol prot, getQueryPlan_args struct) throws org.apache.thrift.TException { + TTupleProtocol iprot = (TTupleProtocol) prot; + } + } + + } + + public static class getQueryPlan_result implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { + private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("getQueryPlan_result"); + + private static final org.apache.thrift.protocol.TField SUCCESS_FIELD_DESC = new org.apache.thrift.protocol.TField("success", org.apache.thrift.protocol.TType.STRUCT, (short)0); + private static final org.apache.thrift.protocol.TField EX_FIELD_DESC = new org.apache.thrift.protocol.TField("ex", org.apache.thrift.protocol.TType.STRUCT, (short)1); + + private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); + static { + schemes.put(StandardScheme.class, new getQueryPlan_resultStandardSchemeFactory()); + schemes.put(TupleScheme.class, new getQueryPlan_resultTupleSchemeFactory()); + } + + private org.apache.hadoop.hive.ql.plan.api.QueryPlan success; // required + private HiveServerException ex; // required + + /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ + public enum _Fields implements org.apache.thrift.TFieldIdEnum { + SUCCESS((short)0, "success"), + EX((short)1, "ex"); + + private static final Map byName = new HashMap(); + + static { + for (_Fields field : EnumSet.allOf(_Fields.class)) { + byName.put(field.getFieldName(), field); + } + } + + /** + * Find the _Fields constant that matches fieldId, or null if its not found. + */ + public static _Fields findByThriftId(int fieldId) { + switch(fieldId) { + case 0: // SUCCESS + return SUCCESS; + case 1: // EX + return EX; + default: + return null; + } + } + + /** + * Find the _Fields constant that matches fieldId, throwing an exception + * if it is not found. + */ + public static _Fields findByThriftIdOrThrow(int fieldId) { + _Fields fields = findByThriftId(fieldId); + if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!"); + return fields; + } + + /** + * Find the _Fields constant that matches name, or null if its not found. + */ + public static _Fields findByName(String name) { + return byName.get(name); + } + + private final short _thriftId; + private final String _fieldName; + + _Fields(short thriftId, String fieldName) { + _thriftId = thriftId; + _fieldName = fieldName; + } + + public short getThriftFieldId() { + return _thriftId; + } + + public String getFieldName() { + return _fieldName; + } + } + + // isset id assignments + public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; + static { + Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); + tmpMap.put(_Fields.SUCCESS, new org.apache.thrift.meta_data.FieldMetaData("success", org.apache.thrift.TFieldRequirementType.DEFAULT, + new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, org.apache.hadoop.hive.ql.plan.api.QueryPlan.class))); + tmpMap.put(_Fields.EX, new org.apache.thrift.meta_data.FieldMetaData("ex", org.apache.thrift.TFieldRequirementType.DEFAULT, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRUCT))); + metaDataMap = Collections.unmodifiableMap(tmpMap); + org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(getQueryPlan_result.class, metaDataMap); + } + + public getQueryPlan_result() { + } + + public getQueryPlan_result( + org.apache.hadoop.hive.ql.plan.api.QueryPlan success, + HiveServerException ex) + { + this(); + this.success = success; + this.ex = ex; + } + + /** + * Performs a deep copy on other. + */ + public getQueryPlan_result(getQueryPlan_result other) { + if (other.isSetSuccess()) { + this.success = new org.apache.hadoop.hive.ql.plan.api.QueryPlan(other.success); + } + if (other.isSetEx()) { + this.ex = new HiveServerException(other.ex); + } + } + + public getQueryPlan_result deepCopy() { + return new getQueryPlan_result(this); + } + + @Override + public void clear() { + this.success = null; + this.ex = null; + } + + public org.apache.hadoop.hive.ql.plan.api.QueryPlan getSuccess() { + return this.success; + } + + public void setSuccess(org.apache.hadoop.hive.ql.plan.api.QueryPlan success) { + this.success = success; + } + + public void unsetSuccess() { + this.success = null; + } + + /** Returns true if field success is set (has been assigned a value) and false otherwise */ + public boolean isSetSuccess() { + return this.success != null; + } + + public void setSuccessIsSet(boolean value) { + if (!value) { + this.success = null; + } + } + + public HiveServerException getEx() { + return this.ex; + } + + public void setEx(HiveServerException ex) { + this.ex = ex; + } + + public void unsetEx() { + this.ex = null; + } + + /** Returns true if field ex is set (has been assigned a value) and false otherwise */ + public boolean isSetEx() { + return this.ex != null; + } + + public void setExIsSet(boolean value) { + if (!value) { + this.ex = null; + } + } + + public void setFieldValue(_Fields field, Object value) { + switch (field) { + case SUCCESS: + if (value == null) { + unsetSuccess(); + } else { + setSuccess((org.apache.hadoop.hive.ql.plan.api.QueryPlan)value); + } + break; + + case EX: + if (value == null) { + unsetEx(); + } else { + setEx((HiveServerException)value); + } + break; + + } + } + + public Object getFieldValue(_Fields field) { + switch (field) { + case SUCCESS: + return getSuccess(); + + case EX: + return getEx(); + + } + throw new IllegalStateException(); + } + + /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */ + public boolean isSet(_Fields field) { + if (field == null) { + throw new IllegalArgumentException(); + } + + switch (field) { + case SUCCESS: + return isSetSuccess(); + case EX: + return isSetEx(); + } + throw new IllegalStateException(); + } + + @Override + public boolean equals(Object that) { + if (that == null) + return false; + if (that instanceof getQueryPlan_result) + return this.equals((getQueryPlan_result)that); + return false; + } + + public boolean equals(getQueryPlan_result that) { + if (that == null) + return false; + + boolean this_present_success = true && this.isSetSuccess(); + boolean that_present_success = true && that.isSetSuccess(); + if (this_present_success || that_present_success) { + if (!(this_present_success && that_present_success)) + return false; + if (!this.success.equals(that.success)) + return false; + } + + boolean this_present_ex = true && this.isSetEx(); + boolean that_present_ex = true && that.isSetEx(); + if (this_present_ex || that_present_ex) { + if (!(this_present_ex && that_present_ex)) + return false; + if (!this.ex.equals(that.ex)) + return false; + } + + return true; + } + + @Override + public int hashCode() { + List list = new ArrayList(); + + boolean present_success = true && (isSetSuccess()); + list.add(present_success); + if (present_success) + list.add(success); + + boolean present_ex = true && (isSetEx()); + list.add(present_ex); + if (present_ex) + list.add(ex); + + return list.hashCode(); + } + + @Override + public int compareTo(getQueryPlan_result other) { + if (!getClass().equals(other.getClass())) { + return getClass().getName().compareTo(other.getClass().getName()); + } + + int lastComparison = 0; + + lastComparison = Boolean.valueOf(isSetSuccess()).compareTo(other.isSetSuccess()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetSuccess()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.success, other.success); + if (lastComparison != 0) { + return lastComparison; + } + } + lastComparison = Boolean.valueOf(isSetEx()).compareTo(other.isSetEx()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetEx()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.ex, other.ex); + if (lastComparison != 0) { + return lastComparison; + } + } + return 0; + } + + public _Fields fieldForId(int fieldId) { + return _Fields.findByThriftId(fieldId); + } + + public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException { + schemes.get(iprot.getScheme()).getScheme().read(iprot, this); + } + + public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException { + schemes.get(oprot.getScheme()).getScheme().write(oprot, this); + } + + @Override + public String toString() { + StringBuilder sb = new StringBuilder("getQueryPlan_result("); + boolean first = true; + + sb.append("success:"); + if (this.success == null) { + sb.append("null"); + } else { + sb.append(this.success); + } + first = false; + if (!first) sb.append(", "); + sb.append("ex:"); + if (this.ex == null) { + sb.append("null"); + } else { + sb.append(this.ex); + } + first = false; + sb.append(")"); + return sb.toString(); + } + + public void validate() throws org.apache.thrift.TException { + // check for required fields + // check for sub-struct validity + if (success != null) { + success.validate(); + } + } + + private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { + try { + write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out))); + } catch (org.apache.thrift.TException te) { + throw new java.io.IOException(te); + } + } + + private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { + try { + read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); + } catch (org.apache.thrift.TException te) { + throw new java.io.IOException(te); + } + } + + private static class getQueryPlan_resultStandardSchemeFactory implements SchemeFactory { + public getQueryPlan_resultStandardScheme getScheme() { + return new getQueryPlan_resultStandardScheme(); + } + } + + private static class getQueryPlan_resultStandardScheme extends StandardScheme { + + public void read(org.apache.thrift.protocol.TProtocol iprot, getQueryPlan_result struct) throws org.apache.thrift.TException { + org.apache.thrift.protocol.TField schemeField; + iprot.readStructBegin(); + while (true) + { + schemeField = iprot.readFieldBegin(); + if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { + break; + } + switch (schemeField.id) { + case 0: // SUCCESS + if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { + struct.success = new org.apache.hadoop.hive.ql.plan.api.QueryPlan(); + struct.success.read(iprot); + struct.setSuccessIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; + case 1: // EX + if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { + struct.ex = new HiveServerException(); + struct.ex.read(iprot); + struct.setExIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; + default: + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + iprot.readFieldEnd(); + } + iprot.readStructEnd(); + struct.validate(); + } + + public void write(org.apache.thrift.protocol.TProtocol oprot, getQueryPlan_result struct) throws org.apache.thrift.TException { + struct.validate(); + + oprot.writeStructBegin(STRUCT_DESC); + if (struct.success != null) { + oprot.writeFieldBegin(SUCCESS_FIELD_DESC); + struct.success.write(oprot); + oprot.writeFieldEnd(); + } + if (struct.ex != null) { + oprot.writeFieldBegin(EX_FIELD_DESC); + struct.ex.write(oprot); + oprot.writeFieldEnd(); + } + oprot.writeFieldStop(); + oprot.writeStructEnd(); + } + + } + + private static class getQueryPlan_resultTupleSchemeFactory implements SchemeFactory { + public getQueryPlan_resultTupleScheme getScheme() { + return new getQueryPlan_resultTupleScheme(); + } + } + + private static class getQueryPlan_resultTupleScheme extends TupleScheme { + + @Override + public void write(org.apache.thrift.protocol.TProtocol prot, getQueryPlan_result struct) throws org.apache.thrift.TException { + TTupleProtocol oprot = (TTupleProtocol) prot; + BitSet optionals = new BitSet(); + if (struct.isSetSuccess()) { + optionals.set(0); + } + if (struct.isSetEx()) { + optionals.set(1); + } + oprot.writeBitSet(optionals, 2); + if (struct.isSetSuccess()) { + struct.success.write(oprot); + } + if (struct.isSetEx()) { + struct.ex.write(oprot); + } + } + + @Override + public void read(org.apache.thrift.protocol.TProtocol prot, getQueryPlan_result struct) throws org.apache.thrift.TException { + TTupleProtocol iprot = (TTupleProtocol) prot; + BitSet incoming = iprot.readBitSet(2); + if (incoming.get(0)) { + struct.success = new org.apache.hadoop.hive.ql.plan.api.QueryPlan(); + struct.success.read(iprot); + struct.setSuccessIsSet(true); + } + if (incoming.get(1)) { + struct.ex = new HiveServerException(); + struct.ex.read(iprot); + struct.setExIsSet(true); + } + } + } + + } + + public static class clean_args implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { + private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("clean_args"); + + + private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); + static { + schemes.put(StandardScheme.class, new clean_argsStandardSchemeFactory()); + schemes.put(TupleScheme.class, new clean_argsTupleSchemeFactory()); + } + + + /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ + public enum _Fields implements org.apache.thrift.TFieldIdEnum { +; + + private static final Map byName = new HashMap(); + + static { + for (_Fields field : EnumSet.allOf(_Fields.class)) { + byName.put(field.getFieldName(), field); + } + } + + /** + * Find the _Fields constant that matches fieldId, or null if its not found. + */ + public static _Fields findByThriftId(int fieldId) { + switch(fieldId) { + default: + return null; + } + } + + /** + * Find the _Fields constant that matches fieldId, throwing an exception + * if it is not found. + */ + public static _Fields findByThriftIdOrThrow(int fieldId) { + _Fields fields = findByThriftId(fieldId); + if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!"); + return fields; + } + + /** + * Find the _Fields constant that matches name, or null if its not found. + */ + public static _Fields findByName(String name) { + return byName.get(name); + } + + private final short _thriftId; + private final String _fieldName; + + _Fields(short thriftId, String fieldName) { + _thriftId = thriftId; + _fieldName = fieldName; + } + + public short getThriftFieldId() { + return _thriftId; + } + + public String getFieldName() { + return _fieldName; + } + } + public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; + static { + Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); + metaDataMap = Collections.unmodifiableMap(tmpMap); + org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(clean_args.class, metaDataMap); + } + + public clean_args() { + } + + /** + * Performs a deep copy on other. + */ + public clean_args(clean_args other) { + } + + public clean_args deepCopy() { + return new clean_args(this); + } + + @Override + public void clear() { + } + + public void setFieldValue(_Fields field, Object value) { + switch (field) { + } + } + + public Object getFieldValue(_Fields field) { + switch (field) { + } + throw new IllegalStateException(); + } + + /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */ + public boolean isSet(_Fields field) { + if (field == null) { + throw new IllegalArgumentException(); + } + + switch (field) { + } + throw new IllegalStateException(); + } + + @Override + public boolean equals(Object that) { + if (that == null) + return false; + if (that instanceof clean_args) + return this.equals((clean_args)that); + return false; + } + + public boolean equals(clean_args that) { + if (that == null) + return false; + + return true; + } + + @Override + public int hashCode() { + List list = new ArrayList(); + + return list.hashCode(); + } + + @Override + public int compareTo(clean_args other) { + if (!getClass().equals(other.getClass())) { + return getClass().getName().compareTo(other.getClass().getName()); + } + + int lastComparison = 0; + + return 0; + } + + public _Fields fieldForId(int fieldId) { + return _Fields.findByThriftId(fieldId); + } + + public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException { + schemes.get(iprot.getScheme()).getScheme().read(iprot, this); + } + + public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException { + schemes.get(oprot.getScheme()).getScheme().write(oprot, this); + } + + @Override + public String toString() { + StringBuilder sb = new StringBuilder("clean_args("); + boolean first = true; + + sb.append(")"); + return sb.toString(); + } + + public void validate() throws org.apache.thrift.TException { + // check for required fields + // check for sub-struct validity + } + + private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { + try { + write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out))); + } catch (org.apache.thrift.TException te) { + throw new java.io.IOException(te); + } + } + + private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { + try { + read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); + } catch (org.apache.thrift.TException te) { + throw new java.io.IOException(te); + } + } + + private static class clean_argsStandardSchemeFactory implements SchemeFactory { + public clean_argsStandardScheme getScheme() { + return new clean_argsStandardScheme(); + } + } + + private static class clean_argsStandardScheme extends StandardScheme { + + public void read(org.apache.thrift.protocol.TProtocol iprot, clean_args struct) throws org.apache.thrift.TException { + org.apache.thrift.protocol.TField schemeField; + iprot.readStructBegin(); + while (true) + { + schemeField = iprot.readFieldBegin(); + if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { + break; + } + switch (schemeField.id) { + default: + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + iprot.readFieldEnd(); + } + iprot.readStructEnd(); + struct.validate(); + } + + public void write(org.apache.thrift.protocol.TProtocol oprot, clean_args struct) throws org.apache.thrift.TException { + struct.validate(); + + oprot.writeStructBegin(STRUCT_DESC); + oprot.writeFieldStop(); + oprot.writeStructEnd(); + } + + } + + private static class clean_argsTupleSchemeFactory implements SchemeFactory { + public clean_argsTupleScheme getScheme() { + return new clean_argsTupleScheme(); + } + } + + private static class clean_argsTupleScheme extends TupleScheme { + + @Override + public void write(org.apache.thrift.protocol.TProtocol prot, clean_args struct) throws org.apache.thrift.TException { + TTupleProtocol oprot = (TTupleProtocol) prot; + } + + @Override + public void read(org.apache.thrift.protocol.TProtocol prot, clean_args struct) throws org.apache.thrift.TException { + TTupleProtocol iprot = (TTupleProtocol) prot; + } + } + + } + + public static class clean_result implements org.apache.thrift.TBase, java.io.Serializable, Cloneable, Comparable { + private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("clean_result"); + + + private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); + static { + schemes.put(StandardScheme.class, new clean_resultStandardSchemeFactory()); + schemes.put(TupleScheme.class, new clean_resultTupleSchemeFactory()); + } + + + /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ + public enum _Fields implements org.apache.thrift.TFieldIdEnum { +; + + private static final Map byName = new HashMap(); + + static { + for (_Fields field : EnumSet.allOf(_Fields.class)) { + byName.put(field.getFieldName(), field); + } + } + + /** + * Find the _Fields constant that matches fieldId, or null if its not found. + */ + public static _Fields findByThriftId(int fieldId) { + switch(fieldId) { + default: + return null; + } + } + + /** + * Find the _Fields constant that matches fieldId, throwing an exception + * if it is not found. + */ + public static _Fields findByThriftIdOrThrow(int fieldId) { + _Fields fields = findByThriftId(fieldId); + if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!"); + return fields; + } + + /** + * Find the _Fields constant that matches name, or null if its not found. + */ + public static _Fields findByName(String name) { + return byName.get(name); + } + + private final short _thriftId; + private final String _fieldName; + + _Fields(short thriftId, String fieldName) { + _thriftId = thriftId; + _fieldName = fieldName; + } + + public short getThriftFieldId() { + return _thriftId; + } + + public String getFieldName() { + return _fieldName; + } + } + public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; + static { + Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); + metaDataMap = Collections.unmodifiableMap(tmpMap); + org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(clean_result.class, metaDataMap); + } + + public clean_result() { + } + + /** + * Performs a deep copy on other. + */ + public clean_result(clean_result other) { + } + + public clean_result deepCopy() { + return new clean_result(this); + } + + @Override + public void clear() { + } + + public void setFieldValue(_Fields field, Object value) { + switch (field) { + } + } + + public Object getFieldValue(_Fields field) { + switch (field) { + } + throw new IllegalStateException(); + } + + /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */ + public boolean isSet(_Fields field) { + if (field == null) { + throw new IllegalArgumentException(); + } + + switch (field) { + } + throw new IllegalStateException(); + } + + @Override + public boolean equals(Object that) { + if (that == null) + return false; + if (that instanceof clean_result) + return this.equals((clean_result)that); + return false; + } + + public boolean equals(clean_result that) { + if (that == null) + return false; + + return true; + } + + @Override + public int hashCode() { + List list = new ArrayList(); + + return list.hashCode(); + } + + @Override + public int compareTo(clean_result other) { + if (!getClass().equals(other.getClass())) { + return getClass().getName().compareTo(other.getClass().getName()); + } + + int lastComparison = 0; + + return 0; + } + + public _Fields fieldForId(int fieldId) { + return _Fields.findByThriftId(fieldId); + } + + public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException { + schemes.get(iprot.getScheme()).getScheme().read(iprot, this); + } + + public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException { + schemes.get(oprot.getScheme()).getScheme().write(oprot, this); + } + + @Override + public String toString() { + StringBuilder sb = new StringBuilder("clean_result("); + boolean first = true; + + sb.append(")"); + return sb.toString(); + } + + public void validate() throws org.apache.thrift.TException { + // check for required fields + // check for sub-struct validity + } + + private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { + try { + write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out))); + } catch (org.apache.thrift.TException te) { + throw new java.io.IOException(te); + } + } + + private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { + try { + read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); + } catch (org.apache.thrift.TException te) { + throw new java.io.IOException(te); + } + } + + private static class clean_resultStandardSchemeFactory implements SchemeFactory { + public clean_resultStandardScheme getScheme() { + return new clean_resultStandardScheme(); + } + } + + private static class clean_resultStandardScheme extends StandardScheme { + + public void read(org.apache.thrift.protocol.TProtocol iprot, clean_result struct) throws org.apache.thrift.TException { + org.apache.thrift.protocol.TField schemeField; + iprot.readStructBegin(); + while (true) + { + schemeField = iprot.readFieldBegin(); + if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { + break; + } + switch (schemeField.id) { + default: + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + iprot.readFieldEnd(); + } + iprot.readStructEnd(); + struct.validate(); + } + + public void write(org.apache.thrift.protocol.TProtocol oprot, clean_result struct) throws org.apache.thrift.TException { + struct.validate(); + + oprot.writeStructBegin(STRUCT_DESC); + oprot.writeFieldStop(); + oprot.writeStructEnd(); + } + + } + + private static class clean_resultTupleSchemeFactory implements SchemeFactory { + public clean_resultTupleScheme getScheme() { + return new clean_resultTupleScheme(); + } + } + + private static class clean_resultTupleScheme extends TupleScheme { + + @Override + public void write(org.apache.thrift.protocol.TProtocol prot, clean_result struct) throws org.apache.thrift.TException { + TTupleProtocol oprot = (TTupleProtocol) prot; + } + + @Override + public void read(org.apache.thrift.protocol.TProtocol prot, clean_result struct) throws org.apache.thrift.TException { + TTupleProtocol iprot = (TTupleProtocol) prot; + } + } + + } + +} diff --git a/service/src/gen/thrift/gen-php/ThriftHive.php b/service/src/gen/thrift/gen-php/ThriftHive.php new file mode 100644 index 0000000..23dc8fd --- /dev/null +++ b/service/src/gen/thrift/gen-php/ThriftHive.php @@ -0,0 +1,1943 @@ +send_execute($query); + $this->recv_execute(); + } + + public function send_execute($query) + { + $args = new \ThriftHive_execute_args(); + $args->query = $query; + $bin_accel = ($this->output_ instanceof TBinaryProtocolAccelerated) && function_exists('thrift_protocol_write_binary'); + if ($bin_accel) + { + thrift_protocol_write_binary($this->output_, 'execute', TMessageType::CALL, $args, $this->seqid_, $this->output_->isStrictWrite()); + } + else + { + $this->output_->writeMessageBegin('execute', TMessageType::CALL, $this->seqid_); + $args->write($this->output_); + $this->output_->writeMessageEnd(); + $this->output_->getTransport()->flush(); + } + } + + public function recv_execute() + { + $bin_accel = ($this->input_ instanceof TBinaryProtocolAccelerated) && function_exists('thrift_protocol_read_binary'); + if ($bin_accel) $result = thrift_protocol_read_binary($this->input_, '\ThriftHive_execute_result', $this->input_->isStrictRead()); + else + { + $rseqid = 0; + $fname = null; + $mtype = 0; + + $this->input_->readMessageBegin($fname, $mtype, $rseqid); + if ($mtype == TMessageType::EXCEPTION) { + $x = new TApplicationException(); + $x->read($this->input_); + $this->input_->readMessageEnd(); + throw $x; + } + $result = new \ThriftHive_execute_result(); + $result->read($this->input_); + $this->input_->readMessageEnd(); + } + if ($result->ex !== null) { + throw $result->ex; + } + return; + } + + public function fetchOne() + { + $this->send_fetchOne(); + return $this->recv_fetchOne(); + } + + public function send_fetchOne() + { + $args = new \ThriftHive_fetchOne_args(); + $bin_accel = ($this->output_ instanceof TBinaryProtocolAccelerated) && function_exists('thrift_protocol_write_binary'); + if ($bin_accel) + { + thrift_protocol_write_binary($this->output_, 'fetchOne', TMessageType::CALL, $args, $this->seqid_, $this->output_->isStrictWrite()); + } + else + { + $this->output_->writeMessageBegin('fetchOne', TMessageType::CALL, $this->seqid_); + $args->write($this->output_); + $this->output_->writeMessageEnd(); + $this->output_->getTransport()->flush(); + } + } + + public function recv_fetchOne() + { + $bin_accel = ($this->input_ instanceof TBinaryProtocolAccelerated) && function_exists('thrift_protocol_read_binary'); + if ($bin_accel) $result = thrift_protocol_read_binary($this->input_, '\ThriftHive_fetchOne_result', $this->input_->isStrictRead()); + else + { + $rseqid = 0; + $fname = null; + $mtype = 0; + + $this->input_->readMessageBegin($fname, $mtype, $rseqid); + if ($mtype == TMessageType::EXCEPTION) { + $x = new TApplicationException(); + $x->read($this->input_); + $this->input_->readMessageEnd(); + throw $x; + } + $result = new \ThriftHive_fetchOne_result(); + $result->read($this->input_); + $this->input_->readMessageEnd(); + } + if ($result->success !== null) { + return $result->success; + } + if ($result->ex !== null) { + throw $result->ex; + } + throw new \Exception("fetchOne failed: unknown result"); + } + + public function fetchN($numRows) + { + $this->send_fetchN($numRows); + return $this->recv_fetchN(); + } + + public function send_fetchN($numRows) + { + $args = new \ThriftHive_fetchN_args(); + $args->numRows = $numRows; + $bin_accel = ($this->output_ instanceof TBinaryProtocolAccelerated) && function_exists('thrift_protocol_write_binary'); + if ($bin_accel) + { + thrift_protocol_write_binary($this->output_, 'fetchN', TMessageType::CALL, $args, $this->seqid_, $this->output_->isStrictWrite()); + } + else + { + $this->output_->writeMessageBegin('fetchN', TMessageType::CALL, $this->seqid_); + $args->write($this->output_); + $this->output_->writeMessageEnd(); + $this->output_->getTransport()->flush(); + } + } + + public function recv_fetchN() + { + $bin_accel = ($this->input_ instanceof TBinaryProtocolAccelerated) && function_exists('thrift_protocol_read_binary'); + if ($bin_accel) $result = thrift_protocol_read_binary($this->input_, '\ThriftHive_fetchN_result', $this->input_->isStrictRead()); + else + { + $rseqid = 0; + $fname = null; + $mtype = 0; + + $this->input_->readMessageBegin($fname, $mtype, $rseqid); + if ($mtype == TMessageType::EXCEPTION) { + $x = new TApplicationException(); + $x->read($this->input_); + $this->input_->readMessageEnd(); + throw $x; + } + $result = new \ThriftHive_fetchN_result(); + $result->read($this->input_); + $this->input_->readMessageEnd(); + } + if ($result->success !== null) { + return $result->success; + } + if ($result->ex !== null) { + throw $result->ex; + } + throw new \Exception("fetchN failed: unknown result"); + } + + public function fetchAll() + { + $this->send_fetchAll(); + return $this->recv_fetchAll(); + } + + public function send_fetchAll() + { + $args = new \ThriftHive_fetchAll_args(); + $bin_accel = ($this->output_ instanceof TBinaryProtocolAccelerated) && function_exists('thrift_protocol_write_binary'); + if ($bin_accel) + { + thrift_protocol_write_binary($this->output_, 'fetchAll', TMessageType::CALL, $args, $this->seqid_, $this->output_->isStrictWrite()); + } + else + { + $this->output_->writeMessageBegin('fetchAll', TMessageType::CALL, $this->seqid_); + $args->write($this->output_); + $this->output_->writeMessageEnd(); + $this->output_->getTransport()->flush(); + } + } + + public function recv_fetchAll() + { + $bin_accel = ($this->input_ instanceof TBinaryProtocolAccelerated) && function_exists('thrift_protocol_read_binary'); + if ($bin_accel) $result = thrift_protocol_read_binary($this->input_, '\ThriftHive_fetchAll_result', $this->input_->isStrictRead()); + else + { + $rseqid = 0; + $fname = null; + $mtype = 0; + + $this->input_->readMessageBegin($fname, $mtype, $rseqid); + if ($mtype == TMessageType::EXCEPTION) { + $x = new TApplicationException(); + $x->read($this->input_); + $this->input_->readMessageEnd(); + throw $x; + } + $result = new \ThriftHive_fetchAll_result(); + $result->read($this->input_); + $this->input_->readMessageEnd(); + } + if ($result->success !== null) { + return $result->success; + } + if ($result->ex !== null) { + throw $result->ex; + } + throw new \Exception("fetchAll failed: unknown result"); + } + + public function getSchema() + { + $this->send_getSchema(); + return $this->recv_getSchema(); + } + + public function send_getSchema() + { + $args = new \ThriftHive_getSchema_args(); + $bin_accel = ($this->output_ instanceof TBinaryProtocolAccelerated) && function_exists('thrift_protocol_write_binary'); + if ($bin_accel) + { + thrift_protocol_write_binary($this->output_, 'getSchema', TMessageType::CALL, $args, $this->seqid_, $this->output_->isStrictWrite()); + } + else + { + $this->output_->writeMessageBegin('getSchema', TMessageType::CALL, $this->seqid_); + $args->write($this->output_); + $this->output_->writeMessageEnd(); + $this->output_->getTransport()->flush(); + } + } + + public function recv_getSchema() + { + $bin_accel = ($this->input_ instanceof TBinaryProtocolAccelerated) && function_exists('thrift_protocol_read_binary'); + if ($bin_accel) $result = thrift_protocol_read_binary($this->input_, '\ThriftHive_getSchema_result', $this->input_->isStrictRead()); + else + { + $rseqid = 0; + $fname = null; + $mtype = 0; + + $this->input_->readMessageBegin($fname, $mtype, $rseqid); + if ($mtype == TMessageType::EXCEPTION) { + $x = new TApplicationException(); + $x->read($this->input_); + $this->input_->readMessageEnd(); + throw $x; + } + $result = new \ThriftHive_getSchema_result(); + $result->read($this->input_); + $this->input_->readMessageEnd(); + } + if ($result->success !== null) { + return $result->success; + } + if ($result->ex !== null) { + throw $result->ex; + } + throw new \Exception("getSchema failed: unknown result"); + } + + public function getThriftSchema() + { + $this->send_getThriftSchema(); + return $this->recv_getThriftSchema(); + } + + public function send_getThriftSchema() + { + $args = new \ThriftHive_getThriftSchema_args(); + $bin_accel = ($this->output_ instanceof TBinaryProtocolAccelerated) && function_exists('thrift_protocol_write_binary'); + if ($bin_accel) + { + thrift_protocol_write_binary($this->output_, 'getThriftSchema', TMessageType::CALL, $args, $this->seqid_, $this->output_->isStrictWrite()); + } + else + { + $this->output_->writeMessageBegin('getThriftSchema', TMessageType::CALL, $this->seqid_); + $args->write($this->output_); + $this->output_->writeMessageEnd(); + $this->output_->getTransport()->flush(); + } + } + + public function recv_getThriftSchema() + { + $bin_accel = ($this->input_ instanceof TBinaryProtocolAccelerated) && function_exists('thrift_protocol_read_binary'); + if ($bin_accel) $result = thrift_protocol_read_binary($this->input_, '\ThriftHive_getThriftSchema_result', $this->input_->isStrictRead()); + else + { + $rseqid = 0; + $fname = null; + $mtype = 0; + + $this->input_->readMessageBegin($fname, $mtype, $rseqid); + if ($mtype == TMessageType::EXCEPTION) { + $x = new TApplicationException(); + $x->read($this->input_); + $this->input_->readMessageEnd(); + throw $x; + } + $result = new \ThriftHive_getThriftSchema_result(); + $result->read($this->input_); + $this->input_->readMessageEnd(); + } + if ($result->success !== null) { + return $result->success; + } + if ($result->ex !== null) { + throw $result->ex; + } + throw new \Exception("getThriftSchema failed: unknown result"); + } + + public function getClusterStatus() + { + $this->send_getClusterStatus(); + return $this->recv_getClusterStatus(); + } + + public function send_getClusterStatus() + { + $args = new \ThriftHive_getClusterStatus_args(); + $bin_accel = ($this->output_ instanceof TBinaryProtocolAccelerated) && function_exists('thrift_protocol_write_binary'); + if ($bin_accel) + { + thrift_protocol_write_binary($this->output_, 'getClusterStatus', TMessageType::CALL, $args, $this->seqid_, $this->output_->isStrictWrite()); + } + else + { + $this->output_->writeMessageBegin('getClusterStatus', TMessageType::CALL, $this->seqid_); + $args->write($this->output_); + $this->output_->writeMessageEnd(); + $this->output_->getTransport()->flush(); + } + } + + public function recv_getClusterStatus() + { + $bin_accel = ($this->input_ instanceof TBinaryProtocolAccelerated) && function_exists('thrift_protocol_read_binary'); + if ($bin_accel) $result = thrift_protocol_read_binary($this->input_, '\ThriftHive_getClusterStatus_result', $this->input_->isStrictRead()); + else + { + $rseqid = 0; + $fname = null; + $mtype = 0; + + $this->input_->readMessageBegin($fname, $mtype, $rseqid); + if ($mtype == TMessageType::EXCEPTION) { + $x = new TApplicationException(); + $x->read($this->input_); + $this->input_->readMessageEnd(); + throw $x; + } + $result = new \ThriftHive_getClusterStatus_result(); + $result->read($this->input_); + $this->input_->readMessageEnd(); + } + if ($result->success !== null) { + return $result->success; + } + if ($result->ex !== null) { + throw $result->ex; + } + throw new \Exception("getClusterStatus failed: unknown result"); + } + + public function getQueryPlan() + { + $this->send_getQueryPlan(); + return $this->recv_getQueryPlan(); + } + + public function send_getQueryPlan() + { + $args = new \ThriftHive_getQueryPlan_args(); + $bin_accel = ($this->output_ instanceof TBinaryProtocolAccelerated) && function_exists('thrift_protocol_write_binary'); + if ($bin_accel) + { + thrift_protocol_write_binary($this->output_, 'getQueryPlan', TMessageType::CALL, $args, $this->seqid_, $this->output_->isStrictWrite()); + } + else + { + $this->output_->writeMessageBegin('getQueryPlan', TMessageType::CALL, $this->seqid_); + $args->write($this->output_); + $this->output_->writeMessageEnd(); + $this->output_->getTransport()->flush(); + } + } + + public function recv_getQueryPlan() + { + $bin_accel = ($this->input_ instanceof TBinaryProtocolAccelerated) && function_exists('thrift_protocol_read_binary'); + if ($bin_accel) $result = thrift_protocol_read_binary($this->input_, '\ThriftHive_getQueryPlan_result', $this->input_->isStrictRead()); + else + { + $rseqid = 0; + $fname = null; + $mtype = 0; + + $this->input_->readMessageBegin($fname, $mtype, $rseqid); + if ($mtype == TMessageType::EXCEPTION) { + $x = new TApplicationException(); + $x->read($this->input_); + $this->input_->readMessageEnd(); + throw $x; + } + $result = new \ThriftHive_getQueryPlan_result(); + $result->read($this->input_); + $this->input_->readMessageEnd(); + } + if ($result->success !== null) { + return $result->success; + } + if ($result->ex !== null) { + throw $result->ex; + } + throw new \Exception("getQueryPlan failed: unknown result"); + } + + public function clean() + { + $this->send_clean(); + $this->recv_clean(); + } + + public function send_clean() + { + $args = new \ThriftHive_clean_args(); + $bin_accel = ($this->output_ instanceof TBinaryProtocolAccelerated) && function_exists('thrift_protocol_write_binary'); + if ($bin_accel) + { + thrift_protocol_write_binary($this->output_, 'clean', TMessageType::CALL, $args, $this->seqid_, $this->output_->isStrictWrite()); + } + else + { + $this->output_->writeMessageBegin('clean', TMessageType::CALL, $this->seqid_); + $args->write($this->output_); + $this->output_->writeMessageEnd(); + $this->output_->getTransport()->flush(); + } + } + + public function recv_clean() + { + $bin_accel = ($this->input_ instanceof TBinaryProtocolAccelerated) && function_exists('thrift_protocol_read_binary'); + if ($bin_accel) $result = thrift_protocol_read_binary($this->input_, '\ThriftHive_clean_result', $this->input_->isStrictRead()); + else + { + $rseqid = 0; + $fname = null; + $mtype = 0; + + $this->input_->readMessageBegin($fname, $mtype, $rseqid); + if ($mtype == TMessageType::EXCEPTION) { + $x = new TApplicationException(); + $x->read($this->input_); + $this->input_->readMessageEnd(); + throw $x; + } + $result = new \ThriftHive_clean_result(); + $result->read($this->input_); + $this->input_->readMessageEnd(); + } + return; + } + +} + +// HELPER FUNCTIONS AND STRUCTURES + +class ThriftHive_execute_args { + static $_TSPEC; + + /** + * @var string + */ + public $query = null; + + public function __construct($vals=null) { + if (!isset(self::$_TSPEC)) { + self::$_TSPEC = array( + 1 => array( + 'var' => 'query', + 'type' => TType::STRING, + ), + ); + } + if (is_array($vals)) { + if (isset($vals['query'])) { + $this->query = $vals['query']; + } + } + } + + public function getName() { + return 'ThriftHive_execute_args'; + } + + public function read($input) + { + $xfer = 0; + $fname = null; + $ftype = 0; + $fid = 0; + $xfer += $input->readStructBegin($fname); + while (true) + { + $xfer += $input->readFieldBegin($fname, $ftype, $fid); + if ($ftype == TType::STOP) { + break; + } + switch ($fid) + { + case 1: + if ($ftype == TType::STRING) { + $xfer += $input->readString($this->query); + } else { + $xfer += $input->skip($ftype); + } + break; + default: + $xfer += $input->skip($ftype); + break; + } + $xfer += $input->readFieldEnd(); + } + $xfer += $input->readStructEnd(); + return $xfer; + } + + public function write($output) { + $xfer = 0; + $xfer += $output->writeStructBegin('ThriftHive_execute_args'); + if ($this->query !== null) { + $xfer += $output->writeFieldBegin('query', TType::STRING, 1); + $xfer += $output->writeString($this->query); + $xfer += $output->writeFieldEnd(); + } + $xfer += $output->writeFieldStop(); + $xfer += $output->writeStructEnd(); + return $xfer; + } + +} + +class ThriftHive_execute_result { + static $_TSPEC; + + /** + * @var \HiveServerException + */ + public $ex = null; + + public function __construct($vals=null) { + if (!isset(self::$_TSPEC)) { + self::$_TSPEC = array( + 1 => array( + 'var' => 'ex', + 'type' => TType::STRUCT, + 'class' => '\HiveServerException', + ), + ); + } + if (is_array($vals)) { + if (isset($vals['ex'])) { + $this->ex = $vals['ex']; + } + } + } + + public function getName() { + return 'ThriftHive_execute_result'; + } + + public function read($input) + { + $xfer = 0; + $fname = null; + $ftype = 0; + $fid = 0; + $xfer += $input->readStructBegin($fname); + while (true) + { + $xfer += $input->readFieldBegin($fname, $ftype, $fid); + if ($ftype == TType::STOP) { + break; + } + switch ($fid) + { + case 1: + if ($ftype == TType::STRUCT) { + $this->ex = new \HiveServerException(); + $xfer += $this->ex->read($input); + } else { + $xfer += $input->skip($ftype); + } + break; + default: + $xfer += $input->skip($ftype); + break; + } + $xfer += $input->readFieldEnd(); + } + $xfer += $input->readStructEnd(); + return $xfer; + } + + public function write($output) { + $xfer = 0; + $xfer += $output->writeStructBegin('ThriftHive_execute_result'); + if ($this->ex !== null) { + $xfer += $output->writeFieldBegin('ex', TType::STRUCT, 1); + $xfer += $this->ex->write($output); + $xfer += $output->writeFieldEnd(); + } + $xfer += $output->writeFieldStop(); + $xfer += $output->writeStructEnd(); + return $xfer; + } + +} + +class ThriftHive_fetchOne_args { + static $_TSPEC; + + + public function __construct() { + if (!isset(self::$_TSPEC)) { + self::$_TSPEC = array( + ); + } + } + + public function getName() { + return 'ThriftHive_fetchOne_args'; + } + + public function read($input) + { + $xfer = 0; + $fname = null; + $ftype = 0; + $fid = 0; + $xfer += $input->readStructBegin($fname); + while (true) + { + $xfer += $input->readFieldBegin($fname, $ftype, $fid); + if ($ftype == TType::STOP) { + break; + } + switch ($fid) + { + default: + $xfer += $input->skip($ftype); + break; + } + $xfer += $input->readFieldEnd(); + } + $xfer += $input->readStructEnd(); + return $xfer; + } + + public function write($output) { + $xfer = 0; + $xfer += $output->writeStructBegin('ThriftHive_fetchOne_args'); + $xfer += $output->writeFieldStop(); + $xfer += $output->writeStructEnd(); + return $xfer; + } + +} + +class ThriftHive_fetchOne_result { + static $_TSPEC; + + /** + * @var string + */ + public $success = null; + /** + * @var \HiveServerException + */ + public $ex = null; + + public function __construct($vals=null) { + if (!isset(self::$_TSPEC)) { + self::$_TSPEC = array( + 0 => array( + 'var' => 'success', + 'type' => TType::STRING, + ), + 1 => array( + 'var' => 'ex', + 'type' => TType::STRUCT, + 'class' => '\HiveServerException', + ), + ); + } + if (is_array($vals)) { + if (isset($vals['success'])) { + $this->success = $vals['success']; + } + if (isset($vals['ex'])) { + $this->ex = $vals['ex']; + } + } + } + + public function getName() { + return 'ThriftHive_fetchOne_result'; + } + + public function read($input) + { + $xfer = 0; + $fname = null; + $ftype = 0; + $fid = 0; + $xfer += $input->readStructBegin($fname); + while (true) + { + $xfer += $input->readFieldBegin($fname, $ftype, $fid); + if ($ftype == TType::STOP) { + break; + } + switch ($fid) + { + case 0: + if ($ftype == TType::STRING) { + $xfer += $input->readString($this->success); + } else { + $xfer += $input->skip($ftype); + } + break; + case 1: + if ($ftype == TType::STRUCT) { + $this->ex = new \HiveServerException(); + $xfer += $this->ex->read($input); + } else { + $xfer += $input->skip($ftype); + } + break; + default: + $xfer += $input->skip($ftype); + break; + } + $xfer += $input->readFieldEnd(); + } + $xfer += $input->readStructEnd(); + return $xfer; + } + + public function write($output) { + $xfer = 0; + $xfer += $output->writeStructBegin('ThriftHive_fetchOne_result'); + if ($this->success !== null) { + $xfer += $output->writeFieldBegin('success', TType::STRING, 0); + $xfer += $output->writeString($this->success); + $xfer += $output->writeFieldEnd(); + } + if ($this->ex !== null) { + $xfer += $output->writeFieldBegin('ex', TType::STRUCT, 1); + $xfer += $this->ex->write($output); + $xfer += $output->writeFieldEnd(); + } + $xfer += $output->writeFieldStop(); + $xfer += $output->writeStructEnd(); + return $xfer; + } + +} + +class ThriftHive_fetchN_args { + static $_TSPEC; + + /** + * @var int + */ + public $numRows = null; + + public function __construct($vals=null) { + if (!isset(self::$_TSPEC)) { + self::$_TSPEC = array( + 1 => array( + 'var' => 'numRows', + 'type' => TType::I32, + ), + ); + } + if (is_array($vals)) { + if (isset($vals['numRows'])) { + $this->numRows = $vals['numRows']; + } + } + } + + public function getName() { + return 'ThriftHive_fetchN_args'; + } + + public function read($input) + { + $xfer = 0; + $fname = null; + $ftype = 0; + $fid = 0; + $xfer += $input->readStructBegin($fname); + while (true) + { + $xfer += $input->readFieldBegin($fname, $ftype, $fid); + if ($ftype == TType::STOP) { + break; + } + switch ($fid) + { + case 1: + if ($ftype == TType::I32) { + $xfer += $input->readI32($this->numRows); + } else { + $xfer += $input->skip($ftype); + } + break; + default: + $xfer += $input->skip($ftype); + break; + } + $xfer += $input->readFieldEnd(); + } + $xfer += $input->readStructEnd(); + return $xfer; + } + + public function write($output) { + $xfer = 0; + $xfer += $output->writeStructBegin('ThriftHive_fetchN_args'); + if ($this->numRows !== null) { + $xfer += $output->writeFieldBegin('numRows', TType::I32, 1); + $xfer += $output->writeI32($this->numRows); + $xfer += $output->writeFieldEnd(); + } + $xfer += $output->writeFieldStop(); + $xfer += $output->writeStructEnd(); + return $xfer; + } + +} + +class ThriftHive_fetchN_result { + static $_TSPEC; + + /** + * @var string[] + */ + public $success = null; + /** + * @var \HiveServerException + */ + public $ex = null; + + public function __construct($vals=null) { + if (!isset(self::$_TSPEC)) { + self::$_TSPEC = array( + 0 => array( + 'var' => 'success', + 'type' => TType::LST, + 'etype' => TType::STRING, + 'elem' => array( + 'type' => TType::STRING, + ), + ), + 1 => array( + 'var' => 'ex', + 'type' => TType::STRUCT, + 'class' => '\HiveServerException', + ), + ); + } + if (is_array($vals)) { + if (isset($vals['success'])) { + $this->success = $vals['success']; + } + if (isset($vals['ex'])) { + $this->ex = $vals['ex']; + } + } + } + + public function getName() { + return 'ThriftHive_fetchN_result'; + } + + public function read($input) + { + $xfer = 0; + $fname = null; + $ftype = 0; + $fid = 0; + $xfer += $input->readStructBegin($fname); + while (true) + { + $xfer += $input->readFieldBegin($fname, $ftype, $fid); + if ($ftype == TType::STOP) { + break; + } + switch ($fid) + { + case 0: + if ($ftype == TType::LST) { + $this->success = array(); + $_size0 = 0; + $_etype3 = 0; + $xfer += $input->readListBegin($_etype3, $_size0); + for ($_i4 = 0; $_i4 < $_size0; ++$_i4) + { + $elem5 = null; + $xfer += $input->readString($elem5); + $this->success []= $elem5; + } + $xfer += $input->readListEnd(); + } else { + $xfer += $input->skip($ftype); + } + break; + case 1: + if ($ftype == TType::STRUCT) { + $this->ex = new \HiveServerException(); + $xfer += $this->ex->read($input); + } else { + $xfer += $input->skip($ftype); + } + break; + default: + $xfer += $input->skip($ftype); + break; + } + $xfer += $input->readFieldEnd(); + } + $xfer += $input->readStructEnd(); + return $xfer; + } + + public function write($output) { + $xfer = 0; + $xfer += $output->writeStructBegin('ThriftHive_fetchN_result'); + if ($this->success !== null) { + if (!is_array($this->success)) { + throw new TProtocolException('Bad type in structure.', TProtocolException::INVALID_DATA); + } + $xfer += $output->writeFieldBegin('success', TType::LST, 0); + { + $output->writeListBegin(TType::STRING, count($this->success)); + { + foreach ($this->success as $iter6) + { + $xfer += $output->writeString($iter6); + } + } + $output->writeListEnd(); + } + $xfer += $output->writeFieldEnd(); + } + if ($this->ex !== null) { + $xfer += $output->writeFieldBegin('ex', TType::STRUCT, 1); + $xfer += $this->ex->write($output); + $xfer += $output->writeFieldEnd(); + } + $xfer += $output->writeFieldStop(); + $xfer += $output->writeStructEnd(); + return $xfer; + } + +} + +class ThriftHive_fetchAll_args { + static $_TSPEC; + + + public function __construct() { + if (!isset(self::$_TSPEC)) { + self::$_TSPEC = array( + ); + } + } + + public function getName() { + return 'ThriftHive_fetchAll_args'; + } + + public function read($input) + { + $xfer = 0; + $fname = null; + $ftype = 0; + $fid = 0; + $xfer += $input->readStructBegin($fname); + while (true) + { + $xfer += $input->readFieldBegin($fname, $ftype, $fid); + if ($ftype == TType::STOP) { + break; + } + switch ($fid) + { + default: + $xfer += $input->skip($ftype); + break; + } + $xfer += $input->readFieldEnd(); + } + $xfer += $input->readStructEnd(); + return $xfer; + } + + public function write($output) { + $xfer = 0; + $xfer += $output->writeStructBegin('ThriftHive_fetchAll_args'); + $xfer += $output->writeFieldStop(); + $xfer += $output->writeStructEnd(); + return $xfer; + } + +} + +class ThriftHive_fetchAll_result { + static $_TSPEC; + + /** + * @var string[] + */ + public $success = null; + /** + * @var \HiveServerException + */ + public $ex = null; + + public function __construct($vals=null) { + if (!isset(self::$_TSPEC)) { + self::$_TSPEC = array( + 0 => array( + 'var' => 'success', + 'type' => TType::LST, + 'etype' => TType::STRING, + 'elem' => array( + 'type' => TType::STRING, + ), + ), + 1 => array( + 'var' => 'ex', + 'type' => TType::STRUCT, + 'class' => '\HiveServerException', + ), + ); + } + if (is_array($vals)) { + if (isset($vals['success'])) { + $this->success = $vals['success']; + } + if (isset($vals['ex'])) { + $this->ex = $vals['ex']; + } + } + } + + public function getName() { + return 'ThriftHive_fetchAll_result'; + } + + public function read($input) + { + $xfer = 0; + $fname = null; + $ftype = 0; + $fid = 0; + $xfer += $input->readStructBegin($fname); + while (true) + { + $xfer += $input->readFieldBegin($fname, $ftype, $fid); + if ($ftype == TType::STOP) { + break; + } + switch ($fid) + { + case 0: + if ($ftype == TType::LST) { + $this->success = array(); + $_size7 = 0; + $_etype10 = 0; + $xfer += $input->readListBegin($_etype10, $_size7); + for ($_i11 = 0; $_i11 < $_size7; ++$_i11) + { + $elem12 = null; + $xfer += $input->readString($elem12); + $this->success []= $elem12; + } + $xfer += $input->readListEnd(); + } else { + $xfer += $input->skip($ftype); + } + break; + case 1: + if ($ftype == TType::STRUCT) { + $this->ex = new \HiveServerException(); + $xfer += $this->ex->read($input); + } else { + $xfer += $input->skip($ftype); + } + break; + default: + $xfer += $input->skip($ftype); + break; + } + $xfer += $input->readFieldEnd(); + } + $xfer += $input->readStructEnd(); + return $xfer; + } + + public function write($output) { + $xfer = 0; + $xfer += $output->writeStructBegin('ThriftHive_fetchAll_result'); + if ($this->success !== null) { + if (!is_array($this->success)) { + throw new TProtocolException('Bad type in structure.', TProtocolException::INVALID_DATA); + } + $xfer += $output->writeFieldBegin('success', TType::LST, 0); + { + $output->writeListBegin(TType::STRING, count($this->success)); + { + foreach ($this->success as $iter13) + { + $xfer += $output->writeString($iter13); + } + } + $output->writeListEnd(); + } + $xfer += $output->writeFieldEnd(); + } + if ($this->ex !== null) { + $xfer += $output->writeFieldBegin('ex', TType::STRUCT, 1); + $xfer += $this->ex->write($output); + $xfer += $output->writeFieldEnd(); + } + $xfer += $output->writeFieldStop(); + $xfer += $output->writeStructEnd(); + return $xfer; + } + +} + +class ThriftHive_getSchema_args { + static $_TSPEC; + + + public function __construct() { + if (!isset(self::$_TSPEC)) { + self::$_TSPEC = array( + ); + } + } + + public function getName() { + return 'ThriftHive_getSchema_args'; + } + + public function read($input) + { + $xfer = 0; + $fname = null; + $ftype = 0; + $fid = 0; + $xfer += $input->readStructBegin($fname); + while (true) + { + $xfer += $input->readFieldBegin($fname, $ftype, $fid); + if ($ftype == TType::STOP) { + break; + } + switch ($fid) + { + default: + $xfer += $input->skip($ftype); + break; + } + $xfer += $input->readFieldEnd(); + } + $xfer += $input->readStructEnd(); + return $xfer; + } + + public function write($output) { + $xfer = 0; + $xfer += $output->writeStructBegin('ThriftHive_getSchema_args'); + $xfer += $output->writeFieldStop(); + $xfer += $output->writeStructEnd(); + return $xfer; + } + +} + +class ThriftHive_getSchema_result { + static $_TSPEC; + + /** + * @var \metastore\Schema + */ + public $success = null; + /** + * @var \HiveServerException + */ + public $ex = null; + + public function __construct($vals=null) { + if (!isset(self::$_TSPEC)) { + self::$_TSPEC = array( + 0 => array( + 'var' => 'success', + 'type' => TType::STRUCT, + 'class' => '\metastore\Schema', + ), + 1 => array( + 'var' => 'ex', + 'type' => TType::STRUCT, + 'class' => '\HiveServerException', + ), + ); + } + if (is_array($vals)) { + if (isset($vals['success'])) { + $this->success = $vals['success']; + } + if (isset($vals['ex'])) { + $this->ex = $vals['ex']; + } + } + } + + public function getName() { + return 'ThriftHive_getSchema_result'; + } + + public function read($input) + { + $xfer = 0; + $fname = null; + $ftype = 0; + $fid = 0; + $xfer += $input->readStructBegin($fname); + while (true) + { + $xfer += $input->readFieldBegin($fname, $ftype, $fid); + if ($ftype == TType::STOP) { + break; + } + switch ($fid) + { + case 0: + if ($ftype == TType::STRUCT) { + $this->success = new \metastore\Schema(); + $xfer += $this->success->read($input); + } else { + $xfer += $input->skip($ftype); + } + break; + case 1: + if ($ftype == TType::STRUCT) { + $this->ex = new \HiveServerException(); + $xfer += $this->ex->read($input); + } else { + $xfer += $input->skip($ftype); + } + break; + default: + $xfer += $input->skip($ftype); + break; + } + $xfer += $input->readFieldEnd(); + } + $xfer += $input->readStructEnd(); + return $xfer; + } + + public function write($output) { + $xfer = 0; + $xfer += $output->writeStructBegin('ThriftHive_getSchema_result'); + if ($this->success !== null) { + if (!is_object($this->success)) { + throw new TProtocolException('Bad type in structure.', TProtocolException::INVALID_DATA); + } + $xfer += $output->writeFieldBegin('success', TType::STRUCT, 0); + $xfer += $this->success->write($output); + $xfer += $output->writeFieldEnd(); + } + if ($this->ex !== null) { + $xfer += $output->writeFieldBegin('ex', TType::STRUCT, 1); + $xfer += $this->ex->write($output); + $xfer += $output->writeFieldEnd(); + } + $xfer += $output->writeFieldStop(); + $xfer += $output->writeStructEnd(); + return $xfer; + } + +} + +class ThriftHive_getThriftSchema_args { + static $_TSPEC; + + + public function __construct() { + if (!isset(self::$_TSPEC)) { + self::$_TSPEC = array( + ); + } + } + + public function getName() { + return 'ThriftHive_getThriftSchema_args'; + } + + public function read($input) + { + $xfer = 0; + $fname = null; + $ftype = 0; + $fid = 0; + $xfer += $input->readStructBegin($fname); + while (true) + { + $xfer += $input->readFieldBegin($fname, $ftype, $fid); + if ($ftype == TType::STOP) { + break; + } + switch ($fid) + { + default: + $xfer += $input->skip($ftype); + break; + } + $xfer += $input->readFieldEnd(); + } + $xfer += $input->readStructEnd(); + return $xfer; + } + + public function write($output) { + $xfer = 0; + $xfer += $output->writeStructBegin('ThriftHive_getThriftSchema_args'); + $xfer += $output->writeFieldStop(); + $xfer += $output->writeStructEnd(); + return $xfer; + } + +} + +class ThriftHive_getThriftSchema_result { + static $_TSPEC; + + /** + * @var \metastore\Schema + */ + public $success = null; + /** + * @var \HiveServerException + */ + public $ex = null; + + public function __construct($vals=null) { + if (!isset(self::$_TSPEC)) { + self::$_TSPEC = array( + 0 => array( + 'var' => 'success', + 'type' => TType::STRUCT, + 'class' => '\metastore\Schema', + ), + 1 => array( + 'var' => 'ex', + 'type' => TType::STRUCT, + 'class' => '\HiveServerException', + ), + ); + } + if (is_array($vals)) { + if (isset($vals['success'])) { + $this->success = $vals['success']; + } + if (isset($vals['ex'])) { + $this->ex = $vals['ex']; + } + } + } + + public function getName() { + return 'ThriftHive_getThriftSchema_result'; + } + + public function read($input) + { + $xfer = 0; + $fname = null; + $ftype = 0; + $fid = 0; + $xfer += $input->readStructBegin($fname); + while (true) + { + $xfer += $input->readFieldBegin($fname, $ftype, $fid); + if ($ftype == TType::STOP) { + break; + } + switch ($fid) + { + case 0: + if ($ftype == TType::STRUCT) { + $this->success = new \metastore\Schema(); + $xfer += $this->success->read($input); + } else { + $xfer += $input->skip($ftype); + } + break; + case 1: + if ($ftype == TType::STRUCT) { + $this->ex = new \HiveServerException(); + $xfer += $this->ex->read($input); + } else { + $xfer += $input->skip($ftype); + } + break; + default: + $xfer += $input->skip($ftype); + break; + } + $xfer += $input->readFieldEnd(); + } + $xfer += $input->readStructEnd(); + return $xfer; + } + + public function write($output) { + $xfer = 0; + $xfer += $output->writeStructBegin('ThriftHive_getThriftSchema_result'); + if ($this->success !== null) { + if (!is_object($this->success)) { + throw new TProtocolException('Bad type in structure.', TProtocolException::INVALID_DATA); + } + $xfer += $output->writeFieldBegin('success', TType::STRUCT, 0); + $xfer += $this->success->write($output); + $xfer += $output->writeFieldEnd(); + } + if ($this->ex !== null) { + $xfer += $output->writeFieldBegin('ex', TType::STRUCT, 1); + $xfer += $this->ex->write($output); + $xfer += $output->writeFieldEnd(); + } + $xfer += $output->writeFieldStop(); + $xfer += $output->writeStructEnd(); + return $xfer; + } + +} + +class ThriftHive_getClusterStatus_args { + static $_TSPEC; + + + public function __construct() { + if (!isset(self::$_TSPEC)) { + self::$_TSPEC = array( + ); + } + } + + public function getName() { + return 'ThriftHive_getClusterStatus_args'; + } + + public function read($input) + { + $xfer = 0; + $fname = null; + $ftype = 0; + $fid = 0; + $xfer += $input->readStructBegin($fname); + while (true) + { + $xfer += $input->readFieldBegin($fname, $ftype, $fid); + if ($ftype == TType::STOP) { + break; + } + switch ($fid) + { + default: + $xfer += $input->skip($ftype); + break; + } + $xfer += $input->readFieldEnd(); + } + $xfer += $input->readStructEnd(); + return $xfer; + } + + public function write($output) { + $xfer = 0; + $xfer += $output->writeStructBegin('ThriftHive_getClusterStatus_args'); + $xfer += $output->writeFieldStop(); + $xfer += $output->writeStructEnd(); + return $xfer; + } + +} + +class ThriftHive_getClusterStatus_result { + static $_TSPEC; + + /** + * @var \HiveClusterStatus + */ + public $success = null; + /** + * @var \HiveServerException + */ + public $ex = null; + + public function __construct($vals=null) { + if (!isset(self::$_TSPEC)) { + self::$_TSPEC = array( + 0 => array( + 'var' => 'success', + 'type' => TType::STRUCT, + 'class' => '\HiveClusterStatus', + ), + 1 => array( + 'var' => 'ex', + 'type' => TType::STRUCT, + 'class' => '\HiveServerException', + ), + ); + } + if (is_array($vals)) { + if (isset($vals['success'])) { + $this->success = $vals['success']; + } + if (isset($vals['ex'])) { + $this->ex = $vals['ex']; + } + } + } + + public function getName() { + return 'ThriftHive_getClusterStatus_result'; + } + + public function read($input) + { + $xfer = 0; + $fname = null; + $ftype = 0; + $fid = 0; + $xfer += $input->readStructBegin($fname); + while (true) + { + $xfer += $input->readFieldBegin($fname, $ftype, $fid); + if ($ftype == TType::STOP) { + break; + } + switch ($fid) + { + case 0: + if ($ftype == TType::STRUCT) { + $this->success = new \HiveClusterStatus(); + $xfer += $this->success->read($input); + } else { + $xfer += $input->skip($ftype); + } + break; + case 1: + if ($ftype == TType::STRUCT) { + $this->ex = new \HiveServerException(); + $xfer += $this->ex->read($input); + } else { + $xfer += $input->skip($ftype); + } + break; + default: + $xfer += $input->skip($ftype); + break; + } + $xfer += $input->readFieldEnd(); + } + $xfer += $input->readStructEnd(); + return $xfer; + } + + public function write($output) { + $xfer = 0; + $xfer += $output->writeStructBegin('ThriftHive_getClusterStatus_result'); + if ($this->success !== null) { + if (!is_object($this->success)) { + throw new TProtocolException('Bad type in structure.', TProtocolException::INVALID_DATA); + } + $xfer += $output->writeFieldBegin('success', TType::STRUCT, 0); + $xfer += $this->success->write($output); + $xfer += $output->writeFieldEnd(); + } + if ($this->ex !== null) { + $xfer += $output->writeFieldBegin('ex', TType::STRUCT, 1); + $xfer += $this->ex->write($output); + $xfer += $output->writeFieldEnd(); + } + $xfer += $output->writeFieldStop(); + $xfer += $output->writeStructEnd(); + return $xfer; + } + +} + +class ThriftHive_getQueryPlan_args { + static $_TSPEC; + + + public function __construct() { + if (!isset(self::$_TSPEC)) { + self::$_TSPEC = array( + ); + } + } + + public function getName() { + return 'ThriftHive_getQueryPlan_args'; + } + + public function read($input) + { + $xfer = 0; + $fname = null; + $ftype = 0; + $fid = 0; + $xfer += $input->readStructBegin($fname); + while (true) + { + $xfer += $input->readFieldBegin($fname, $ftype, $fid); + if ($ftype == TType::STOP) { + break; + } + switch ($fid) + { + default: + $xfer += $input->skip($ftype); + break; + } + $xfer += $input->readFieldEnd(); + } + $xfer += $input->readStructEnd(); + return $xfer; + } + + public function write($output) { + $xfer = 0; + $xfer += $output->writeStructBegin('ThriftHive_getQueryPlan_args'); + $xfer += $output->writeFieldStop(); + $xfer += $output->writeStructEnd(); + return $xfer; + } + +} + +class ThriftHive_getQueryPlan_result { + static $_TSPEC; + + /** + * @var \QueryPlan + */ + public $success = null; + /** + * @var \HiveServerException + */ + public $ex = null; + + public function __construct($vals=null) { + if (!isset(self::$_TSPEC)) { + self::$_TSPEC = array( + 0 => array( + 'var' => 'success', + 'type' => TType::STRUCT, + 'class' => '\QueryPlan', + ), + 1 => array( + 'var' => 'ex', + 'type' => TType::STRUCT, + 'class' => '\HiveServerException', + ), + ); + } + if (is_array($vals)) { + if (isset($vals['success'])) { + $this->success = $vals['success']; + } + if (isset($vals['ex'])) { + $this->ex = $vals['ex']; + } + } + } + + public function getName() { + return 'ThriftHive_getQueryPlan_result'; + } + + public function read($input) + { + $xfer = 0; + $fname = null; + $ftype = 0; + $fid = 0; + $xfer += $input->readStructBegin($fname); + while (true) + { + $xfer += $input->readFieldBegin($fname, $ftype, $fid); + if ($ftype == TType::STOP) { + break; + } + switch ($fid) + { + case 0: + if ($ftype == TType::STRUCT) { + $this->success = new \QueryPlan(); + $xfer += $this->success->read($input); + } else { + $xfer += $input->skip($ftype); + } + break; + case 1: + if ($ftype == TType::STRUCT) { + $this->ex = new \HiveServerException(); + $xfer += $this->ex->read($input); + } else { + $xfer += $input->skip($ftype); + } + break; + default: + $xfer += $input->skip($ftype); + break; + } + $xfer += $input->readFieldEnd(); + } + $xfer += $input->readStructEnd(); + return $xfer; + } + + public function write($output) { + $xfer = 0; + $xfer += $output->writeStructBegin('ThriftHive_getQueryPlan_result'); + if ($this->success !== null) { + if (!is_object($this->success)) { + throw new TProtocolException('Bad type in structure.', TProtocolException::INVALID_DATA); + } + $xfer += $output->writeFieldBegin('success', TType::STRUCT, 0); + $xfer += $this->success->write($output); + $xfer += $output->writeFieldEnd(); + } + if ($this->ex !== null) { + $xfer += $output->writeFieldBegin('ex', TType::STRUCT, 1); + $xfer += $this->ex->write($output); + $xfer += $output->writeFieldEnd(); + } + $xfer += $output->writeFieldStop(); + $xfer += $output->writeStructEnd(); + return $xfer; + } + +} + +class ThriftHive_clean_args { + static $_TSPEC; + + + public function __construct() { + if (!isset(self::$_TSPEC)) { + self::$_TSPEC = array( + ); + } + } + + public function getName() { + return 'ThriftHive_clean_args'; + } + + public function read($input) + { + $xfer = 0; + $fname = null; + $ftype = 0; + $fid = 0; + $xfer += $input->readStructBegin($fname); + while (true) + { + $xfer += $input->readFieldBegin($fname, $ftype, $fid); + if ($ftype == TType::STOP) { + break; + } + switch ($fid) + { + default: + $xfer += $input->skip($ftype); + break; + } + $xfer += $input->readFieldEnd(); + } + $xfer += $input->readStructEnd(); + return $xfer; + } + + public function write($output) { + $xfer = 0; + $xfer += $output->writeStructBegin('ThriftHive_clean_args'); + $xfer += $output->writeFieldStop(); + $xfer += $output->writeStructEnd(); + return $xfer; + } + +} + +class ThriftHive_clean_result { + static $_TSPEC; + + + public function __construct() { + if (!isset(self::$_TSPEC)) { + self::$_TSPEC = array( + ); + } + } + + public function getName() { + return 'ThriftHive_clean_result'; + } + + public function read($input) + { + $xfer = 0; + $fname = null; + $ftype = 0; + $fid = 0; + $xfer += $input->readStructBegin($fname); + while (true) + { + $xfer += $input->readFieldBegin($fname, $ftype, $fid); + if ($ftype == TType::STOP) { + break; + } + switch ($fid) + { + default: + $xfer += $input->skip($ftype); + break; + } + $xfer += $input->readFieldEnd(); + } + $xfer += $input->readStructEnd(); + return $xfer; + } + + public function write($output) { + $xfer = 0; + $xfer += $output->writeStructBegin('ThriftHive_clean_result'); + $xfer += $output->writeFieldStop(); + $xfer += $output->writeStructEnd(); + return $xfer; + } + +} + + diff --git a/service/src/gen/thrift/gen-php/Types.php b/service/src/gen/thrift/gen-php/Types.php new file mode 100644 index 0000000..cbf4c29 --- /dev/null +++ b/service/src/gen/thrift/gen-php/Types.php @@ -0,0 +1,338 @@ + 'INITIALIZING', + 2 => 'RUNNING', + ); +} + +class HiveClusterStatus { + static $_TSPEC; + + /** + * @var int + */ + public $taskTrackers = null; + /** + * @var int + */ + public $mapTasks = null; + /** + * @var int + */ + public $reduceTasks = null; + /** + * @var int + */ + public $maxMapTasks = null; + /** + * @var int + */ + public $maxReduceTasks = null; + /** + * @var int + */ + public $state = null; + + public function __construct($vals=null) { + if (!isset(self::$_TSPEC)) { + self::$_TSPEC = array( + 1 => array( + 'var' => 'taskTrackers', + 'type' => TType::I32, + ), + 2 => array( + 'var' => 'mapTasks', + 'type' => TType::I32, + ), + 3 => array( + 'var' => 'reduceTasks', + 'type' => TType::I32, + ), + 4 => array( + 'var' => 'maxMapTasks', + 'type' => TType::I32, + ), + 5 => array( + 'var' => 'maxReduceTasks', + 'type' => TType::I32, + ), + 6 => array( + 'var' => 'state', + 'type' => TType::I32, + ), + ); + } + if (is_array($vals)) { + if (isset($vals['taskTrackers'])) { + $this->taskTrackers = $vals['taskTrackers']; + } + if (isset($vals['mapTasks'])) { + $this->mapTasks = $vals['mapTasks']; + } + if (isset($vals['reduceTasks'])) { + $this->reduceTasks = $vals['reduceTasks']; + } + if (isset($vals['maxMapTasks'])) { + $this->maxMapTasks = $vals['maxMapTasks']; + } + if (isset($vals['maxReduceTasks'])) { + $this->maxReduceTasks = $vals['maxReduceTasks']; + } + if (isset($vals['state'])) { + $this->state = $vals['state']; + } + } + } + + public function getName() { + return 'HiveClusterStatus'; + } + + public function read($input) + { + $xfer = 0; + $fname = null; + $ftype = 0; + $fid = 0; + $xfer += $input->readStructBegin($fname); + while (true) + { + $xfer += $input->readFieldBegin($fname, $ftype, $fid); + if ($ftype == TType::STOP) { + break; + } + switch ($fid) + { + case 1: + if ($ftype == TType::I32) { + $xfer += $input->readI32($this->taskTrackers); + } else { + $xfer += $input->skip($ftype); + } + break; + case 2: + if ($ftype == TType::I32) { + $xfer += $input->readI32($this->mapTasks); + } else { + $xfer += $input->skip($ftype); + } + break; + case 3: + if ($ftype == TType::I32) { + $xfer += $input->readI32($this->reduceTasks); + } else { + $xfer += $input->skip($ftype); + } + break; + case 4: + if ($ftype == TType::I32) { + $xfer += $input->readI32($this->maxMapTasks); + } else { + $xfer += $input->skip($ftype); + } + break; + case 5: + if ($ftype == TType::I32) { + $xfer += $input->readI32($this->maxReduceTasks); + } else { + $xfer += $input->skip($ftype); + } + break; + case 6: + if ($ftype == TType::I32) { + $xfer += $input->readI32($this->state); + } else { + $xfer += $input->skip($ftype); + } + break; + default: + $xfer += $input->skip($ftype); + break; + } + $xfer += $input->readFieldEnd(); + } + $xfer += $input->readStructEnd(); + return $xfer; + } + + public function write($output) { + $xfer = 0; + $xfer += $output->writeStructBegin('HiveClusterStatus'); + if ($this->taskTrackers !== null) { + $xfer += $output->writeFieldBegin('taskTrackers', TType::I32, 1); + $xfer += $output->writeI32($this->taskTrackers); + $xfer += $output->writeFieldEnd(); + } + if ($this->mapTasks !== null) { + $xfer += $output->writeFieldBegin('mapTasks', TType::I32, 2); + $xfer += $output->writeI32($this->mapTasks); + $xfer += $output->writeFieldEnd(); + } + if ($this->reduceTasks !== null) { + $xfer += $output->writeFieldBegin('reduceTasks', TType::I32, 3); + $xfer += $output->writeI32($this->reduceTasks); + $xfer += $output->writeFieldEnd(); + } + if ($this->maxMapTasks !== null) { + $xfer += $output->writeFieldBegin('maxMapTasks', TType::I32, 4); + $xfer += $output->writeI32($this->maxMapTasks); + $xfer += $output->writeFieldEnd(); + } + if ($this->maxReduceTasks !== null) { + $xfer += $output->writeFieldBegin('maxReduceTasks', TType::I32, 5); + $xfer += $output->writeI32($this->maxReduceTasks); + $xfer += $output->writeFieldEnd(); + } + if ($this->state !== null) { + $xfer += $output->writeFieldBegin('state', TType::I32, 6); + $xfer += $output->writeI32($this->state); + $xfer += $output->writeFieldEnd(); + } + $xfer += $output->writeFieldStop(); + $xfer += $output->writeStructEnd(); + return $xfer; + } + +} + +class HiveServerException extends TException { + static $_TSPEC; + + /** + * @var string + */ + public $message = null; + /** + * @var int + */ + public $errorCode = null; + /** + * @var string + */ + public $SQLState = null; + + public function __construct($vals=null) { + if (!isset(self::$_TSPEC)) { + self::$_TSPEC = array( + 1 => array( + 'var' => 'message', + 'type' => TType::STRING, + ), + 2 => array( + 'var' => 'errorCode', + 'type' => TType::I32, + ), + 3 => array( + 'var' => 'SQLState', + 'type' => TType::STRING, + ), + ); + } + if (is_array($vals)) { + if (isset($vals['message'])) { + $this->message = $vals['message']; + } + if (isset($vals['errorCode'])) { + $this->errorCode = $vals['errorCode']; + } + if (isset($vals['SQLState'])) { + $this->SQLState = $vals['SQLState']; + } + } + } + + public function getName() { + return 'HiveServerException'; + } + + public function read($input) + { + $xfer = 0; + $fname = null; + $ftype = 0; + $fid = 0; + $xfer += $input->readStructBegin($fname); + while (true) + { + $xfer += $input->readFieldBegin($fname, $ftype, $fid); + if ($ftype == TType::STOP) { + break; + } + switch ($fid) + { + case 1: + if ($ftype == TType::STRING) { + $xfer += $input->readString($this->message); + } else { + $xfer += $input->skip($ftype); + } + break; + case 2: + if ($ftype == TType::I32) { + $xfer += $input->readI32($this->errorCode); + } else { + $xfer += $input->skip($ftype); + } + break; + case 3: + if ($ftype == TType::STRING) { + $xfer += $input->readString($this->SQLState); + } else { + $xfer += $input->skip($ftype); + } + break; + default: + $xfer += $input->skip($ftype); + break; + } + $xfer += $input->readFieldEnd(); + } + $xfer += $input->readStructEnd(); + return $xfer; + } + + public function write($output) { + $xfer = 0; + $xfer += $output->writeStructBegin('HiveServerException'); + if ($this->message !== null) { + $xfer += $output->writeFieldBegin('message', TType::STRING, 1); + $xfer += $output->writeString($this->message); + $xfer += $output->writeFieldEnd(); + } + if ($this->errorCode !== null) { + $xfer += $output->writeFieldBegin('errorCode', TType::I32, 2); + $xfer += $output->writeI32($this->errorCode); + $xfer += $output->writeFieldEnd(); + } + if ($this->SQLState !== null) { + $xfer += $output->writeFieldBegin('SQLState', TType::STRING, 3); + $xfer += $output->writeString($this->SQLState); + $xfer += $output->writeFieldEnd(); + } + $xfer += $output->writeFieldStop(); + $xfer += $output->writeStructEnd(); + return $xfer; + } + +} + + diff --git a/service/src/gen/thrift/gen-py/__init__.py b/service/src/gen/thrift/gen-py/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/service/src/gen/thrift/gen-py/hive_service/ThriftHive-remote b/service/src/gen/thrift/gen-py/hive_service/ThriftHive-remote new file mode 100755 index 0000000..1fe610d --- /dev/null +++ b/service/src/gen/thrift/gen-py/hive_service/ThriftHive-remote @@ -0,0 +1,1256 @@ +#!/usr/bin/env python +# +# Autogenerated by Thrift Compiler (0.9.3) +# +# DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING +# +# options string: py +# + +import sys +import pprint +from urlparse import urlparse +from thrift.transport import TTransport +from thrift.transport import TSocket +from thrift.transport import TSSLSocket +from thrift.transport import THttpClient +from thrift.protocol import TBinaryProtocol + +from hive_service import ThriftHive +from hive_service.ttypes import * + +if len(sys.argv) <= 1 or sys.argv[1] == '--help': + print('') + print('Usage: ' + sys.argv[0] + ' [-h host[:port]] [-u url] [-f[ramed]] [-s[sl]] function [arg1 [arg2...]]') + print('') + print('Functions:') + print(' void execute(string query)') + print(' string fetchOne()') + print(' fetchN(i32 numRows)') + print(' fetchAll()') + print(' Schema getSchema()') + print(' Schema getThriftSchema()') + print(' HiveClusterStatus getClusterStatus()') + print(' QueryPlan getQueryPlan()') + print(' void clean()') + print(' string getMetaConf(string key)') + print(' void setMetaConf(string key, string value)') + print(' void create_database(Database database)') + print(' Database get_database(string name)') + print(' void drop_database(string name, bool deleteData, bool cascade)') + print(' get_databases(string pattern)') + print(' get_all_databases()') + print(' void alter_database(string dbname, Database db)') + print(' Type get_type(string name)') + print(' bool create_type(Type type)') + print(' bool drop_type(string type)') + print(' get_type_all(string name)') + print(' get_fields(string db_name, string table_name)') + print(' get_fields_with_environment_context(string db_name, string table_name, EnvironmentContext environment_context)') + print(' get_schema(string db_name, string table_name)') + print(' get_schema_with_environment_context(string db_name, string table_name, EnvironmentContext environment_context)') + print(' void create_table(Table tbl)') + print(' void create_table_with_environment_context(Table tbl, EnvironmentContext environment_context)') + print(' void drop_table(string dbname, string name, bool deleteData)') + print(' void drop_table_with_environment_context(string dbname, string name, bool deleteData, EnvironmentContext environment_context)') + print(' get_tables(string db_name, string pattern)') + print(' get_table_meta(string db_patterns, string tbl_patterns, tbl_types)') + print(' get_all_tables(string db_name)') + print(' Table get_table(string dbname, string tbl_name)') + print(' get_table_objects_by_name(string dbname, tbl_names)') + print(' get_table_names_by_filter(string dbname, string filter, i16 max_tables)') + print(' void alter_table(string dbname, string tbl_name, Table new_tbl)') + print(' void alter_table_with_environment_context(string dbname, string tbl_name, Table new_tbl, EnvironmentContext environment_context)') + print(' void alter_table_with_cascade(string dbname, string tbl_name, Table new_tbl, bool cascade)') + print(' Partition add_partition(Partition new_part)') + print(' Partition add_partition_with_environment_context(Partition new_part, EnvironmentContext environment_context)') + print(' i32 add_partitions( new_parts)') + print(' i32 add_partitions_pspec( new_parts)') + print(' Partition append_partition(string db_name, string tbl_name, part_vals)') + print(' AddPartitionsResult add_partitions_req(AddPartitionsRequest request)') + print(' Partition append_partition_with_environment_context(string db_name, string tbl_name, part_vals, EnvironmentContext environment_context)') + print(' Partition append_partition_by_name(string db_name, string tbl_name, string part_name)') + print(' Partition append_partition_by_name_with_environment_context(string db_name, string tbl_name, string part_name, EnvironmentContext environment_context)') + print(' bool drop_partition(string db_name, string tbl_name, part_vals, bool deleteData)') + print(' bool drop_partition_with_environment_context(string db_name, string tbl_name, part_vals, bool deleteData, EnvironmentContext environment_context)') + print(' bool drop_partition_by_name(string db_name, string tbl_name, string part_name, bool deleteData)') + print(' bool drop_partition_by_name_with_environment_context(string db_name, string tbl_name, string part_name, bool deleteData, EnvironmentContext environment_context)') + print(' DropPartitionsResult drop_partitions_req(DropPartitionsRequest req)') + print(' Partition get_partition(string db_name, string tbl_name, part_vals)') + print(' Partition exchange_partition( partitionSpecs, string source_db, string source_table_name, string dest_db, string dest_table_name)') + print(' exchange_partitions( partitionSpecs, string source_db, string source_table_name, string dest_db, string dest_table_name)') + print(' Partition get_partition_with_auth(string db_name, string tbl_name, part_vals, string user_name, group_names)') + print(' Partition get_partition_by_name(string db_name, string tbl_name, string part_name)') + print(' get_partitions(string db_name, string tbl_name, i16 max_parts)') + print(' get_partitions_with_auth(string db_name, string tbl_name, i16 max_parts, string user_name, group_names)') + print(' get_partitions_pspec(string db_name, string tbl_name, i32 max_parts)') + print(' get_partition_names(string db_name, string tbl_name, i16 max_parts)') + print(' get_partitions_ps(string db_name, string tbl_name, part_vals, i16 max_parts)') + print(' get_partitions_ps_with_auth(string db_name, string tbl_name, part_vals, i16 max_parts, string user_name, group_names)') + print(' get_partition_names_ps(string db_name, string tbl_name, part_vals, i16 max_parts)') + print(' get_partitions_by_filter(string db_name, string tbl_name, string filter, i16 max_parts)') + print(' get_part_specs_by_filter(string db_name, string tbl_name, string filter, i32 max_parts)') + print(' PartitionsByExprResult get_partitions_by_expr(PartitionsByExprRequest req)') + print(' i32 get_num_partitions_by_filter(string db_name, string tbl_name, string filter)') + print(' get_partitions_by_names(string db_name, string tbl_name, names)') + print(' void alter_partition(string db_name, string tbl_name, Partition new_part)') + print(' void alter_partitions(string db_name, string tbl_name, new_parts)') + print(' void alter_partitions_with_environment_context(string db_name, string tbl_name, new_parts, EnvironmentContext environment_context)') + print(' void alter_partition_with_environment_context(string db_name, string tbl_name, Partition new_part, EnvironmentContext environment_context)') + print(' void rename_partition(string db_name, string tbl_name, part_vals, Partition new_part)') + print(' bool partition_name_has_valid_characters( part_vals, bool throw_exception)') + print(' string get_config_value(string name, string defaultValue)') + print(' partition_name_to_vals(string part_name)') + print(' partition_name_to_spec(string part_name)') + print(' void markPartitionForEvent(string db_name, string tbl_name, part_vals, PartitionEventType eventType)') + print(' bool isPartitionMarkedForEvent(string db_name, string tbl_name, part_vals, PartitionEventType eventType)') + print(' Index add_index(Index new_index, Table index_table)') + print(' void alter_index(string dbname, string base_tbl_name, string idx_name, Index new_idx)') + print(' bool drop_index_by_name(string db_name, string tbl_name, string index_name, bool deleteData)') + print(' Index get_index_by_name(string db_name, string tbl_name, string index_name)') + print(' get_indexes(string db_name, string tbl_name, i16 max_indexes)') + print(' get_index_names(string db_name, string tbl_name, i16 max_indexes)') + print(' get_primary_keys(string db_name, string tbl_name)') + print(' get_foreign_keys(string parent_db_name, string parent_tbl_name, string foreign_db_name, string foreign_tbl_name)') + print(' bool update_table_column_statistics(ColumnStatistics stats_obj)') + print(' bool update_partition_column_statistics(ColumnStatistics stats_obj)') + print(' ColumnStatistics get_table_column_statistics(string db_name, string tbl_name, string col_name)') + print(' ColumnStatistics get_partition_column_statistics(string db_name, string tbl_name, string part_name, string col_name)') + print(' TableStatsResult get_table_statistics_req(TableStatsRequest request)') + print(' PartitionsStatsResult get_partitions_statistics_req(PartitionsStatsRequest request)') + print(' AggrStats get_aggr_stats_for(PartitionsStatsRequest request)') + print(' bool set_aggr_stats_for(SetPartitionsStatsRequest request)') + print(' bool delete_partition_column_statistics(string db_name, string tbl_name, string part_name, string col_name)') + print(' bool delete_table_column_statistics(string db_name, string tbl_name, string col_name)') + print(' void create_function(Function func)') + print(' void drop_function(string dbName, string funcName)') + print(' void alter_function(string dbName, string funcName, Function newFunc)') + print(' get_functions(string dbName, string pattern)') + print(' Function get_function(string dbName, string funcName)') + print(' GetAllFunctionsResponse get_all_functions()') + print(' bool create_role(Role role)') + print(' bool drop_role(string role_name)') + print(' get_role_names()') + print(' bool grant_role(string role_name, string principal_name, PrincipalType principal_type, string grantor, PrincipalType grantorType, bool grant_option)') + print(' bool revoke_role(string role_name, string principal_name, PrincipalType principal_type)') + print(' list_roles(string principal_name, PrincipalType principal_type)') + print(' GrantRevokeRoleResponse grant_revoke_role(GrantRevokeRoleRequest request)') + print(' GetPrincipalsInRoleResponse get_principals_in_role(GetPrincipalsInRoleRequest request)') + print(' GetRoleGrantsForPrincipalResponse get_role_grants_for_principal(GetRoleGrantsForPrincipalRequest request)') + print(' PrincipalPrivilegeSet get_privilege_set(HiveObjectRef hiveObject, string user_name, group_names)') + print(' list_privileges(string principal_name, PrincipalType principal_type, HiveObjectRef hiveObject)') + print(' bool grant_privileges(PrivilegeBag privileges)') + print(' bool revoke_privileges(PrivilegeBag privileges)') + print(' GrantRevokePrivilegeResponse grant_revoke_privileges(GrantRevokePrivilegeRequest request)') + print(' set_ugi(string user_name, group_names)') + print(' string get_delegation_token(string token_owner, string renewer_kerberos_principal_name)') + print(' i64 renew_delegation_token(string token_str_form)') + print(' void cancel_delegation_token(string token_str_form)') + print(' bool add_token(string token_identifier, string delegation_token)') + print(' bool remove_token(string token_identifier)') + print(' string get_token(string token_identifier)') + print(' get_all_token_identifiers()') + print(' i32 add_master_key(string key)') + print(' void update_master_key(i32 seq_number, string key)') + print(' bool remove_master_key(i32 key_seq)') + print(' get_master_keys()') + print(' GetOpenTxnsResponse get_open_txns()') + print(' GetOpenTxnsInfoResponse get_open_txns_info()') + print(' OpenTxnsResponse open_txns(OpenTxnRequest rqst)') + print(' void abort_txn(AbortTxnRequest rqst)') + print(' void commit_txn(CommitTxnRequest rqst)') + print(' LockResponse lock(LockRequest rqst)') + print(' LockResponse check_lock(CheckLockRequest rqst)') + print(' void unlock(UnlockRequest rqst)') + print(' ShowLocksResponse show_locks(ShowLocksRequest rqst)') + print(' void heartbeat(HeartbeatRequest ids)') + print(' HeartbeatTxnRangeResponse heartbeat_txn_range(HeartbeatTxnRangeRequest txns)') + print(' void compact(CompactionRequest rqst)') + print(' ShowCompactResponse show_compact(ShowCompactRequest rqst)') + print(' void add_dynamic_partitions(AddDynamicPartitions rqst)') + print(' NotificationEventResponse get_next_notification(NotificationEventRequest rqst)') + print(' CurrentNotificationEventId get_current_notificationEventId()') + print(' FireEventResponse fire_listener_event(FireEventRequest rqst)') + print(' void flushCache()') + print(' GetFileMetadataByExprResult get_file_metadata_by_expr(GetFileMetadataByExprRequest req)') + print(' GetFileMetadataResult get_file_metadata(GetFileMetadataRequest req)') + print(' PutFileMetadataResult put_file_metadata(PutFileMetadataRequest req)') + print(' ClearFileMetadataResult clear_file_metadata(ClearFileMetadataRequest req)') + print(' CacheFileMetadataResult cache_file_metadata(CacheFileMetadataRequest req)') + print(' GetChangeVersionResult get_change_version(GetChangeVersionRequest req)') + print(' string getName()') + print(' string getVersion()') + print(' fb_status getStatus()') + print(' string getStatusDetails()') + print(' getCounters()') + print(' i64 getCounter(string key)') + print(' void setOption(string key, string value)') + print(' string getOption(string key)') + print(' getOptions()') + print(' string getCpuProfile(i32 profileDurationInSec)') + print(' i64 aliveSince()') + print(' void reinitialize()') + print(' void shutdown()') + print('') + sys.exit(0) + +pp = pprint.PrettyPrinter(indent = 2) +host = 'localhost' +port = 9090 +uri = '' +framed = False +ssl = False +http = False +argi = 1 + +if sys.argv[argi] == '-h': + parts = sys.argv[argi+1].split(':') + host = parts[0] + if len(parts) > 1: + port = int(parts[1]) + argi += 2 + +if sys.argv[argi] == '-u': + url = urlparse(sys.argv[argi+1]) + parts = url[1].split(':') + host = parts[0] + if len(parts) > 1: + port = int(parts[1]) + else: + port = 80 + uri = url[2] + if url[4]: + uri += '?%s' % url[4] + http = True + argi += 2 + +if sys.argv[argi] == '-f' or sys.argv[argi] == '-framed': + framed = True + argi += 1 + +if sys.argv[argi] == '-s' or sys.argv[argi] == '-ssl': + ssl = True + argi += 1 + +cmd = sys.argv[argi] +args = sys.argv[argi+1:] + +if http: + transport = THttpClient.THttpClient(host, port, uri) +else: + socket = TSSLSocket.TSSLSocket(host, port, validate=False) if ssl else TSocket.TSocket(host, port) + if framed: + transport = TTransport.TFramedTransport(socket) + else: + transport = TTransport.TBufferedTransport(socket) +protocol = TBinaryProtocol.TBinaryProtocol(transport) +client = ThriftHive.Client(protocol) +transport.open() + +if cmd == 'execute': + if len(args) != 1: + print('execute requires 1 args') + sys.exit(1) + pp.pprint(client.execute(args[0],)) + +elif cmd == 'fetchOne': + if len(args) != 0: + print('fetchOne requires 0 args') + sys.exit(1) + pp.pprint(client.fetchOne()) + +elif cmd == 'fetchN': + if len(args) != 1: + print('fetchN requires 1 args') + sys.exit(1) + pp.pprint(client.fetchN(eval(args[0]),)) + +elif cmd == 'fetchAll': + if len(args) != 0: + print('fetchAll requires 0 args') + sys.exit(1) + pp.pprint(client.fetchAll()) + +elif cmd == 'getSchema': + if len(args) != 0: + print('getSchema requires 0 args') + sys.exit(1) + pp.pprint(client.getSchema()) + +elif cmd == 'getThriftSchema': + if len(args) != 0: + print('getThriftSchema requires 0 args') + sys.exit(1) + pp.pprint(client.getThriftSchema()) + +elif cmd == 'getClusterStatus': + if len(args) != 0: + print('getClusterStatus requires 0 args') + sys.exit(1) + pp.pprint(client.getClusterStatus()) + +elif cmd == 'getQueryPlan': + if len(args) != 0: + print('getQueryPlan requires 0 args') + sys.exit(1) + pp.pprint(client.getQueryPlan()) + +elif cmd == 'clean': + if len(args) != 0: + print('clean requires 0 args') + sys.exit(1) + pp.pprint(client.clean()) + +elif cmd == 'getMetaConf': + if len(args) != 1: + print('getMetaConf requires 1 args') + sys.exit(1) + pp.pprint(client.getMetaConf(args[0],)) + +elif cmd == 'setMetaConf': + if len(args) != 2: + print('setMetaConf requires 2 args') + sys.exit(1) + pp.pprint(client.setMetaConf(args[0],args[1],)) + +elif cmd == 'create_database': + if len(args) != 1: + print('create_database requires 1 args') + sys.exit(1) + pp.pprint(client.create_database(eval(args[0]),)) + +elif cmd == 'get_database': + if len(args) != 1: + print('get_database requires 1 args') + sys.exit(1) + pp.pprint(client.get_database(args[0],)) + +elif cmd == 'drop_database': + if len(args) != 3: + print('drop_database requires 3 args') + sys.exit(1) + pp.pprint(client.drop_database(args[0],eval(args[1]),eval(args[2]),)) + +elif cmd == 'get_databases': + if len(args) != 1: + print('get_databases requires 1 args') + sys.exit(1) + pp.pprint(client.get_databases(args[0],)) + +elif cmd == 'get_all_databases': + if len(args) != 0: + print('get_all_databases requires 0 args') + sys.exit(1) + pp.pprint(client.get_all_databases()) + +elif cmd == 'alter_database': + if len(args) != 2: + print('alter_database requires 2 args') + sys.exit(1) + pp.pprint(client.alter_database(args[0],eval(args[1]),)) + +elif cmd == 'get_type': + if len(args) != 1: + print('get_type requires 1 args') + sys.exit(1) + pp.pprint(client.get_type(args[0],)) + +elif cmd == 'create_type': + if len(args) != 1: + print('create_type requires 1 args') + sys.exit(1) + pp.pprint(client.create_type(eval(args[0]),)) + +elif cmd == 'drop_type': + if len(args) != 1: + print('drop_type requires 1 args') + sys.exit(1) + pp.pprint(client.drop_type(args[0],)) + +elif cmd == 'get_type_all': + if len(args) != 1: + print('get_type_all requires 1 args') + sys.exit(1) + pp.pprint(client.get_type_all(args[0],)) + +elif cmd == 'get_fields': + if len(args) != 2: + print('get_fields requires 2 args') + sys.exit(1) + pp.pprint(client.get_fields(args[0],args[1],)) + +elif cmd == 'get_fields_with_environment_context': + if len(args) != 3: + print('get_fields_with_environment_context requires 3 args') + sys.exit(1) + pp.pprint(client.get_fields_with_environment_context(args[0],args[1],eval(args[2]),)) + +elif cmd == 'get_schema': + if len(args) != 2: + print('get_schema requires 2 args') + sys.exit(1) + pp.pprint(client.get_schema(args[0],args[1],)) + +elif cmd == 'get_schema_with_environment_context': + if len(args) != 3: + print('get_schema_with_environment_context requires 3 args') + sys.exit(1) + pp.pprint(client.get_schema_with_environment_context(args[0],args[1],eval(args[2]),)) + +elif cmd == 'create_table': + if len(args) != 1: + print('create_table requires 1 args') + sys.exit(1) + pp.pprint(client.create_table(eval(args[0]),)) + +elif cmd == 'create_table_with_environment_context': + if len(args) != 2: + print('create_table_with_environment_context requires 2 args') + sys.exit(1) + pp.pprint(client.create_table_with_environment_context(eval(args[0]),eval(args[1]),)) + +elif cmd == 'drop_table': + if len(args) != 3: + print('drop_table requires 3 args') + sys.exit(1) + pp.pprint(client.drop_table(args[0],args[1],eval(args[2]),)) + +elif cmd == 'drop_table_with_environment_context': + if len(args) != 4: + print('drop_table_with_environment_context requires 4 args') + sys.exit(1) + pp.pprint(client.drop_table_with_environment_context(args[0],args[1],eval(args[2]),eval(args[3]),)) + +elif cmd == 'get_tables': + if len(args) != 2: + print('get_tables requires 2 args') + sys.exit(1) + pp.pprint(client.get_tables(args[0],args[1],)) + +elif cmd == 'get_table_meta': + if len(args) != 3: + print('get_table_meta requires 3 args') + sys.exit(1) + pp.pprint(client.get_table_meta(args[0],args[1],eval(args[2]),)) + +elif cmd == 'get_all_tables': + if len(args) != 1: + print('get_all_tables requires 1 args') + sys.exit(1) + pp.pprint(client.get_all_tables(args[0],)) + +elif cmd == 'get_table': + if len(args) != 2: + print('get_table requires 2 args') + sys.exit(1) + pp.pprint(client.get_table(args[0],args[1],)) + +elif cmd == 'get_table_objects_by_name': + if len(args) != 2: + print('get_table_objects_by_name requires 2 args') + sys.exit(1) + pp.pprint(client.get_table_objects_by_name(args[0],eval(args[1]),)) + +elif cmd == 'get_table_names_by_filter': + if len(args) != 3: + print('get_table_names_by_filter requires 3 args') + sys.exit(1) + pp.pprint(client.get_table_names_by_filter(args[0],args[1],eval(args[2]),)) + +elif cmd == 'alter_table': + if len(args) != 3: + print('alter_table requires 3 args') + sys.exit(1) + pp.pprint(client.alter_table(args[0],args[1],eval(args[2]),)) + +elif cmd == 'alter_table_with_environment_context': + if len(args) != 4: + print('alter_table_with_environment_context requires 4 args') + sys.exit(1) + pp.pprint(client.alter_table_with_environment_context(args[0],args[1],eval(args[2]),eval(args[3]),)) + +elif cmd == 'alter_table_with_cascade': + if len(args) != 4: + print('alter_table_with_cascade requires 4 args') + sys.exit(1) + pp.pprint(client.alter_table_with_cascade(args[0],args[1],eval(args[2]),eval(args[3]),)) + +elif cmd == 'add_partition': + if len(args) != 1: + print('add_partition requires 1 args') + sys.exit(1) + pp.pprint(client.add_partition(eval(args[0]),)) + +elif cmd == 'add_partition_with_environment_context': + if len(args) != 2: + print('add_partition_with_environment_context requires 2 args') + sys.exit(1) + pp.pprint(client.add_partition_with_environment_context(eval(args[0]),eval(args[1]),)) + +elif cmd == 'add_partitions': + if len(args) != 1: + print('add_partitions requires 1 args') + sys.exit(1) + pp.pprint(client.add_partitions(eval(args[0]),)) + +elif cmd == 'add_partitions_pspec': + if len(args) != 1: + print('add_partitions_pspec requires 1 args') + sys.exit(1) + pp.pprint(client.add_partitions_pspec(eval(args[0]),)) + +elif cmd == 'append_partition': + if len(args) != 3: + print('append_partition requires 3 args') + sys.exit(1) + pp.pprint(client.append_partition(args[0],args[1],eval(args[2]),)) + +elif cmd == 'add_partitions_req': + if len(args) != 1: + print('add_partitions_req requires 1 args') + sys.exit(1) + pp.pprint(client.add_partitions_req(eval(args[0]),)) + +elif cmd == 'append_partition_with_environment_context': + if len(args) != 4: + print('append_partition_with_environment_context requires 4 args') + sys.exit(1) + pp.pprint(client.append_partition_with_environment_context(args[0],args[1],eval(args[2]),eval(args[3]),)) + +elif cmd == 'append_partition_by_name': + if len(args) != 3: + print('append_partition_by_name requires 3 args') + sys.exit(1) + pp.pprint(client.append_partition_by_name(args[0],args[1],args[2],)) + +elif cmd == 'append_partition_by_name_with_environment_context': + if len(args) != 4: + print('append_partition_by_name_with_environment_context requires 4 args') + sys.exit(1) + pp.pprint(client.append_partition_by_name_with_environment_context(args[0],args[1],args[2],eval(args[3]),)) + +elif cmd == 'drop_partition': + if len(args) != 4: + print('drop_partition requires 4 args') + sys.exit(1) + pp.pprint(client.drop_partition(args[0],args[1],eval(args[2]),eval(args[3]),)) + +elif cmd == 'drop_partition_with_environment_context': + if len(args) != 5: + print('drop_partition_with_environment_context requires 5 args') + sys.exit(1) + pp.pprint(client.drop_partition_with_environment_context(args[0],args[1],eval(args[2]),eval(args[3]),eval(args[4]),)) + +elif cmd == 'drop_partition_by_name': + if len(args) != 4: + print('drop_partition_by_name requires 4 args') + sys.exit(1) + pp.pprint(client.drop_partition_by_name(args[0],args[1],args[2],eval(args[3]),)) + +elif cmd == 'drop_partition_by_name_with_environment_context': + if len(args) != 5: + print('drop_partition_by_name_with_environment_context requires 5 args') + sys.exit(1) + pp.pprint(client.drop_partition_by_name_with_environment_context(args[0],args[1],args[2],eval(args[3]),eval(args[4]),)) + +elif cmd == 'drop_partitions_req': + if len(args) != 1: + print('drop_partitions_req requires 1 args') + sys.exit(1) + pp.pprint(client.drop_partitions_req(eval(args[0]),)) + +elif cmd == 'get_partition': + if len(args) != 3: + print('get_partition requires 3 args') + sys.exit(1) + pp.pprint(client.get_partition(args[0],args[1],eval(args[2]),)) + +elif cmd == 'exchange_partition': + if len(args) != 5: + print('exchange_partition requires 5 args') + sys.exit(1) + pp.pprint(client.exchange_partition(eval(args[0]),args[1],args[2],args[3],args[4],)) + +elif cmd == 'exchange_partitions': + if len(args) != 5: + print('exchange_partitions requires 5 args') + sys.exit(1) + pp.pprint(client.exchange_partitions(eval(args[0]),args[1],args[2],args[3],args[4],)) + +elif cmd == 'get_partition_with_auth': + if len(args) != 5: + print('get_partition_with_auth requires 5 args') + sys.exit(1) + pp.pprint(client.get_partition_with_auth(args[0],args[1],eval(args[2]),args[3],eval(args[4]),)) + +elif cmd == 'get_partition_by_name': + if len(args) != 3: + print('get_partition_by_name requires 3 args') + sys.exit(1) + pp.pprint(client.get_partition_by_name(args[0],args[1],args[2],)) + +elif cmd == 'get_partitions': + if len(args) != 3: + print('get_partitions requires 3 args') + sys.exit(1) + pp.pprint(client.get_partitions(args[0],args[1],eval(args[2]),)) + +elif cmd == 'get_partitions_with_auth': + if len(args) != 5: + print('get_partitions_with_auth requires 5 args') + sys.exit(1) + pp.pprint(client.get_partitions_with_auth(args[0],args[1],eval(args[2]),args[3],eval(args[4]),)) + +elif cmd == 'get_partitions_pspec': + if len(args) != 3: + print('get_partitions_pspec requires 3 args') + sys.exit(1) + pp.pprint(client.get_partitions_pspec(args[0],args[1],eval(args[2]),)) + +elif cmd == 'get_partition_names': + if len(args) != 3: + print('get_partition_names requires 3 args') + sys.exit(1) + pp.pprint(client.get_partition_names(args[0],args[1],eval(args[2]),)) + +elif cmd == 'get_partitions_ps': + if len(args) != 4: + print('get_partitions_ps requires 4 args') + sys.exit(1) + pp.pprint(client.get_partitions_ps(args[0],args[1],eval(args[2]),eval(args[3]),)) + +elif cmd == 'get_partitions_ps_with_auth': + if len(args) != 6: + print('get_partitions_ps_with_auth requires 6 args') + sys.exit(1) + pp.pprint(client.get_partitions_ps_with_auth(args[0],args[1],eval(args[2]),eval(args[3]),args[4],eval(args[5]),)) + +elif cmd == 'get_partition_names_ps': + if len(args) != 4: + print('get_partition_names_ps requires 4 args') + sys.exit(1) + pp.pprint(client.get_partition_names_ps(args[0],args[1],eval(args[2]),eval(args[3]),)) + +elif cmd == 'get_partitions_by_filter': + if len(args) != 4: + print('get_partitions_by_filter requires 4 args') + sys.exit(1) + pp.pprint(client.get_partitions_by_filter(args[0],args[1],args[2],eval(args[3]),)) + +elif cmd == 'get_part_specs_by_filter': + if len(args) != 4: + print('get_part_specs_by_filter requires 4 args') + sys.exit(1) + pp.pprint(client.get_part_specs_by_filter(args[0],args[1],args[2],eval(args[3]),)) + +elif cmd == 'get_partitions_by_expr': + if len(args) != 1: + print('get_partitions_by_expr requires 1 args') + sys.exit(1) + pp.pprint(client.get_partitions_by_expr(eval(args[0]),)) + +elif cmd == 'get_num_partitions_by_filter': + if len(args) != 3: + print('get_num_partitions_by_filter requires 3 args') + sys.exit(1) + pp.pprint(client.get_num_partitions_by_filter(args[0],args[1],args[2],)) + +elif cmd == 'get_partitions_by_names': + if len(args) != 3: + print('get_partitions_by_names requires 3 args') + sys.exit(1) + pp.pprint(client.get_partitions_by_names(args[0],args[1],eval(args[2]),)) + +elif cmd == 'alter_partition': + if len(args) != 3: + print('alter_partition requires 3 args') + sys.exit(1) + pp.pprint(client.alter_partition(args[0],args[1],eval(args[2]),)) + +elif cmd == 'alter_partitions': + if len(args) != 3: + print('alter_partitions requires 3 args') + sys.exit(1) + pp.pprint(client.alter_partitions(args[0],args[1],eval(args[2]),)) + +elif cmd == 'alter_partitions_with_environment_context': + if len(args) != 4: + print('alter_partitions_with_environment_context requires 4 args') + sys.exit(1) + pp.pprint(client.alter_partitions_with_environment_context(args[0],args[1],eval(args[2]),eval(args[3]),)) + +elif cmd == 'alter_partition_with_environment_context': + if len(args) != 4: + print('alter_partition_with_environment_context requires 4 args') + sys.exit(1) + pp.pprint(client.alter_partition_with_environment_context(args[0],args[1],eval(args[2]),eval(args[3]),)) + +elif cmd == 'rename_partition': + if len(args) != 4: + print('rename_partition requires 4 args') + sys.exit(1) + pp.pprint(client.rename_partition(args[0],args[1],eval(args[2]),eval(args[3]),)) + +elif cmd == 'partition_name_has_valid_characters': + if len(args) != 2: + print('partition_name_has_valid_characters requires 2 args') + sys.exit(1) + pp.pprint(client.partition_name_has_valid_characters(eval(args[0]),eval(args[1]),)) + +elif cmd == 'get_config_value': + if len(args) != 2: + print('get_config_value requires 2 args') + sys.exit(1) + pp.pprint(client.get_config_value(args[0],args[1],)) + +elif cmd == 'partition_name_to_vals': + if len(args) != 1: + print('partition_name_to_vals requires 1 args') + sys.exit(1) + pp.pprint(client.partition_name_to_vals(args[0],)) + +elif cmd == 'partition_name_to_spec': + if len(args) != 1: + print('partition_name_to_spec requires 1 args') + sys.exit(1) + pp.pprint(client.partition_name_to_spec(args[0],)) + +elif cmd == 'markPartitionForEvent': + if len(args) != 4: + print('markPartitionForEvent requires 4 args') + sys.exit(1) + pp.pprint(client.markPartitionForEvent(args[0],args[1],eval(args[2]),eval(args[3]),)) + +elif cmd == 'isPartitionMarkedForEvent': + if len(args) != 4: + print('isPartitionMarkedForEvent requires 4 args') + sys.exit(1) + pp.pprint(client.isPartitionMarkedForEvent(args[0],args[1],eval(args[2]),eval(args[3]),)) + +elif cmd == 'add_index': + if len(args) != 2: + print('add_index requires 2 args') + sys.exit(1) + pp.pprint(client.add_index(eval(args[0]),eval(args[1]),)) + +elif cmd == 'alter_index': + if len(args) != 4: + print('alter_index requires 4 args') + sys.exit(1) + pp.pprint(client.alter_index(args[0],args[1],args[2],eval(args[3]),)) + +elif cmd == 'drop_index_by_name': + if len(args) != 4: + print('drop_index_by_name requires 4 args') + sys.exit(1) + pp.pprint(client.drop_index_by_name(args[0],args[1],args[2],eval(args[3]),)) + +elif cmd == 'get_index_by_name': + if len(args) != 3: + print('get_index_by_name requires 3 args') + sys.exit(1) + pp.pprint(client.get_index_by_name(args[0],args[1],args[2],)) + +elif cmd == 'get_indexes': + if len(args) != 3: + print('get_indexes requires 3 args') + sys.exit(1) + pp.pprint(client.get_indexes(args[0],args[1],eval(args[2]),)) + +elif cmd == 'get_index_names': + if len(args) != 3: + print('get_index_names requires 3 args') + sys.exit(1) + pp.pprint(client.get_index_names(args[0],args[1],eval(args[2]),)) + +elif cmd == 'get_primary_keys': + if len(args) != 2: + print('get_primary_keys requires 2 args') + sys.exit(1) + pp.pprint(client.get_primary_keys(args[0],args[1],)) + +elif cmd == 'get_foreign_keys': + if len(args) != 4: + print('get_foreign_keys requires 4 args') + sys.exit(1) + pp.pprint(client.get_foreign_keys(args[0],args[1],args[2],args[3],)) + +elif cmd == 'update_table_column_statistics': + if len(args) != 1: + print('update_table_column_statistics requires 1 args') + sys.exit(1) + pp.pprint(client.update_table_column_statistics(eval(args[0]),)) + +elif cmd == 'update_partition_column_statistics': + if len(args) != 1: + print('update_partition_column_statistics requires 1 args') + sys.exit(1) + pp.pprint(client.update_partition_column_statistics(eval(args[0]),)) + +elif cmd == 'get_table_column_statistics': + if len(args) != 3: + print('get_table_column_statistics requires 3 args') + sys.exit(1) + pp.pprint(client.get_table_column_statistics(args[0],args[1],args[2],)) + +elif cmd == 'get_partition_column_statistics': + if len(args) != 4: + print('get_partition_column_statistics requires 4 args') + sys.exit(1) + pp.pprint(client.get_partition_column_statistics(args[0],args[1],args[2],args[3],)) + +elif cmd == 'get_table_statistics_req': + if len(args) != 1: + print('get_table_statistics_req requires 1 args') + sys.exit(1) + pp.pprint(client.get_table_statistics_req(eval(args[0]),)) + +elif cmd == 'get_partitions_statistics_req': + if len(args) != 1: + print('get_partitions_statistics_req requires 1 args') + sys.exit(1) + pp.pprint(client.get_partitions_statistics_req(eval(args[0]),)) + +elif cmd == 'get_aggr_stats_for': + if len(args) != 1: + print('get_aggr_stats_for requires 1 args') + sys.exit(1) + pp.pprint(client.get_aggr_stats_for(eval(args[0]),)) + +elif cmd == 'set_aggr_stats_for': + if len(args) != 1: + print('set_aggr_stats_for requires 1 args') + sys.exit(1) + pp.pprint(client.set_aggr_stats_for(eval(args[0]),)) + +elif cmd == 'delete_partition_column_statistics': + if len(args) != 4: + print('delete_partition_column_statistics requires 4 args') + sys.exit(1) + pp.pprint(client.delete_partition_column_statistics(args[0],args[1],args[2],args[3],)) + +elif cmd == 'delete_table_column_statistics': + if len(args) != 3: + print('delete_table_column_statistics requires 3 args') + sys.exit(1) + pp.pprint(client.delete_table_column_statistics(args[0],args[1],args[2],)) + +elif cmd == 'create_function': + if len(args) != 1: + print('create_function requires 1 args') + sys.exit(1) + pp.pprint(client.create_function(eval(args[0]),)) + +elif cmd == 'drop_function': + if len(args) != 2: + print('drop_function requires 2 args') + sys.exit(1) + pp.pprint(client.drop_function(args[0],args[1],)) + +elif cmd == 'alter_function': + if len(args) != 3: + print('alter_function requires 3 args') + sys.exit(1) + pp.pprint(client.alter_function(args[0],args[1],eval(args[2]),)) + +elif cmd == 'get_functions': + if len(args) != 2: + print('get_functions requires 2 args') + sys.exit(1) + pp.pprint(client.get_functions(args[0],args[1],)) + +elif cmd == 'get_function': + if len(args) != 2: + print('get_function requires 2 args') + sys.exit(1) + pp.pprint(client.get_function(args[0],args[1],)) + +elif cmd == 'get_all_functions': + if len(args) != 0: + print('get_all_functions requires 0 args') + sys.exit(1) + pp.pprint(client.get_all_functions()) + +elif cmd == 'create_role': + if len(args) != 1: + print('create_role requires 1 args') + sys.exit(1) + pp.pprint(client.create_role(eval(args[0]),)) + +elif cmd == 'drop_role': + if len(args) != 1: + print('drop_role requires 1 args') + sys.exit(1) + pp.pprint(client.drop_role(args[0],)) + +elif cmd == 'get_role_names': + if len(args) != 0: + print('get_role_names requires 0 args') + sys.exit(1) + pp.pprint(client.get_role_names()) + +elif cmd == 'grant_role': + if len(args) != 6: + print('grant_role requires 6 args') + sys.exit(1) + pp.pprint(client.grant_role(args[0],args[1],eval(args[2]),args[3],eval(args[4]),eval(args[5]),)) + +elif cmd == 'revoke_role': + if len(args) != 3: + print('revoke_role requires 3 args') + sys.exit(1) + pp.pprint(client.revoke_role(args[0],args[1],eval(args[2]),)) + +elif cmd == 'list_roles': + if len(args) != 2: + print('list_roles requires 2 args') + sys.exit(1) + pp.pprint(client.list_roles(args[0],eval(args[1]),)) + +elif cmd == 'grant_revoke_role': + if len(args) != 1: + print('grant_revoke_role requires 1 args') + sys.exit(1) + pp.pprint(client.grant_revoke_role(eval(args[0]),)) + +elif cmd == 'get_principals_in_role': + if len(args) != 1: + print('get_principals_in_role requires 1 args') + sys.exit(1) + pp.pprint(client.get_principals_in_role(eval(args[0]),)) + +elif cmd == 'get_role_grants_for_principal': + if len(args) != 1: + print('get_role_grants_for_principal requires 1 args') + sys.exit(1) + pp.pprint(client.get_role_grants_for_principal(eval(args[0]),)) + +elif cmd == 'get_privilege_set': + if len(args) != 3: + print('get_privilege_set requires 3 args') + sys.exit(1) + pp.pprint(client.get_privilege_set(eval(args[0]),args[1],eval(args[2]),)) + +elif cmd == 'list_privileges': + if len(args) != 3: + print('list_privileges requires 3 args') + sys.exit(1) + pp.pprint(client.list_privileges(args[0],eval(args[1]),eval(args[2]),)) + +elif cmd == 'grant_privileges': + if len(args) != 1: + print('grant_privileges requires 1 args') + sys.exit(1) + pp.pprint(client.grant_privileges(eval(args[0]),)) + +elif cmd == 'revoke_privileges': + if len(args) != 1: + print('revoke_privileges requires 1 args') + sys.exit(1) + pp.pprint(client.revoke_privileges(eval(args[0]),)) + +elif cmd == 'grant_revoke_privileges': + if len(args) != 1: + print('grant_revoke_privileges requires 1 args') + sys.exit(1) + pp.pprint(client.grant_revoke_privileges(eval(args[0]),)) + +elif cmd == 'set_ugi': + if len(args) != 2: + print('set_ugi requires 2 args') + sys.exit(1) + pp.pprint(client.set_ugi(args[0],eval(args[1]),)) + +elif cmd == 'get_delegation_token': + if len(args) != 2: + print('get_delegation_token requires 2 args') + sys.exit(1) + pp.pprint(client.get_delegation_token(args[0],args[1],)) + +elif cmd == 'renew_delegation_token': + if len(args) != 1: + print('renew_delegation_token requires 1 args') + sys.exit(1) + pp.pprint(client.renew_delegation_token(args[0],)) + +elif cmd == 'cancel_delegation_token': + if len(args) != 1: + print('cancel_delegation_token requires 1 args') + sys.exit(1) + pp.pprint(client.cancel_delegation_token(args[0],)) + +elif cmd == 'add_token': + if len(args) != 2: + print('add_token requires 2 args') + sys.exit(1) + pp.pprint(client.add_token(args[0],args[1],)) + +elif cmd == 'remove_token': + if len(args) != 1: + print('remove_token requires 1 args') + sys.exit(1) + pp.pprint(client.remove_token(args[0],)) + +elif cmd == 'get_token': + if len(args) != 1: + print('get_token requires 1 args') + sys.exit(1) + pp.pprint(client.get_token(args[0],)) + +elif cmd == 'get_all_token_identifiers': + if len(args) != 0: + print('get_all_token_identifiers requires 0 args') + sys.exit(1) + pp.pprint(client.get_all_token_identifiers()) + +elif cmd == 'add_master_key': + if len(args) != 1: + print('add_master_key requires 1 args') + sys.exit(1) + pp.pprint(client.add_master_key(args[0],)) + +elif cmd == 'update_master_key': + if len(args) != 2: + print('update_master_key requires 2 args') + sys.exit(1) + pp.pprint(client.update_master_key(eval(args[0]),args[1],)) + +elif cmd == 'remove_master_key': + if len(args) != 1: + print('remove_master_key requires 1 args') + sys.exit(1) + pp.pprint(client.remove_master_key(eval(args[0]),)) + +elif cmd == 'get_master_keys': + if len(args) != 0: + print('get_master_keys requires 0 args') + sys.exit(1) + pp.pprint(client.get_master_keys()) + +elif cmd == 'get_open_txns': + if len(args) != 0: + print('get_open_txns requires 0 args') + sys.exit(1) + pp.pprint(client.get_open_txns()) + +elif cmd == 'get_open_txns_info': + if len(args) != 0: + print('get_open_txns_info requires 0 args') + sys.exit(1) + pp.pprint(client.get_open_txns_info()) + +elif cmd == 'open_txns': + if len(args) != 1: + print('open_txns requires 1 args') + sys.exit(1) + pp.pprint(client.open_txns(eval(args[0]),)) + +elif cmd == 'abort_txn': + if len(args) != 1: + print('abort_txn requires 1 args') + sys.exit(1) + pp.pprint(client.abort_txn(eval(args[0]),)) + +elif cmd == 'commit_txn': + if len(args) != 1: + print('commit_txn requires 1 args') + sys.exit(1) + pp.pprint(client.commit_txn(eval(args[0]),)) + +elif cmd == 'lock': + if len(args) != 1: + print('lock requires 1 args') + sys.exit(1) + pp.pprint(client.lock(eval(args[0]),)) + +elif cmd == 'check_lock': + if len(args) != 1: + print('check_lock requires 1 args') + sys.exit(1) + pp.pprint(client.check_lock(eval(args[0]),)) + +elif cmd == 'unlock': + if len(args) != 1: + print('unlock requires 1 args') + sys.exit(1) + pp.pprint(client.unlock(eval(args[0]),)) + +elif cmd == 'show_locks': + if len(args) != 1: + print('show_locks requires 1 args') + sys.exit(1) + pp.pprint(client.show_locks(eval(args[0]),)) + +elif cmd == 'heartbeat': + if len(args) != 1: + print('heartbeat requires 1 args') + sys.exit(1) + pp.pprint(client.heartbeat(eval(args[0]),)) + +elif cmd == 'heartbeat_txn_range': + if len(args) != 1: + print('heartbeat_txn_range requires 1 args') + sys.exit(1) + pp.pprint(client.heartbeat_txn_range(eval(args[0]),)) + +elif cmd == 'compact': + if len(args) != 1: + print('compact requires 1 args') + sys.exit(1) + pp.pprint(client.compact(eval(args[0]),)) + +elif cmd == 'show_compact': + if len(args) != 1: + print('show_compact requires 1 args') + sys.exit(1) + pp.pprint(client.show_compact(eval(args[0]),)) + +elif cmd == 'add_dynamic_partitions': + if len(args) != 1: + print('add_dynamic_partitions requires 1 args') + sys.exit(1) + pp.pprint(client.add_dynamic_partitions(eval(args[0]),)) + +elif cmd == 'get_next_notification': + if len(args) != 1: + print('get_next_notification requires 1 args') + sys.exit(1) + pp.pprint(client.get_next_notification(eval(args[0]),)) + +elif cmd == 'get_current_notificationEventId': + if len(args) != 0: + print('get_current_notificationEventId requires 0 args') + sys.exit(1) + pp.pprint(client.get_current_notificationEventId()) + +elif cmd == 'fire_listener_event': + if len(args) != 1: + print('fire_listener_event requires 1 args') + sys.exit(1) + pp.pprint(client.fire_listener_event(eval(args[0]),)) + +elif cmd == 'flushCache': + if len(args) != 0: + print('flushCache requires 0 args') + sys.exit(1) + pp.pprint(client.flushCache()) + +elif cmd == 'get_file_metadata_by_expr': + if len(args) != 1: + print('get_file_metadata_by_expr requires 1 args') + sys.exit(1) + pp.pprint(client.get_file_metadata_by_expr(eval(args[0]),)) + +elif cmd == 'get_file_metadata': + if len(args) != 1: + print('get_file_metadata requires 1 args') + sys.exit(1) + pp.pprint(client.get_file_metadata(eval(args[0]),)) + +elif cmd == 'put_file_metadata': + if len(args) != 1: + print('put_file_metadata requires 1 args') + sys.exit(1) + pp.pprint(client.put_file_metadata(eval(args[0]),)) + +elif cmd == 'clear_file_metadata': + if len(args) != 1: + print('clear_file_metadata requires 1 args') + sys.exit(1) + pp.pprint(client.clear_file_metadata(eval(args[0]),)) + +elif cmd == 'cache_file_metadata': + if len(args) != 1: + print('cache_file_metadata requires 1 args') + sys.exit(1) + pp.pprint(client.cache_file_metadata(eval(args[0]),)) + +elif cmd == 'get_change_version': + if len(args) != 1: + print('get_change_version requires 1 args') + sys.exit(1) + pp.pprint(client.get_change_version(eval(args[0]),)) + +elif cmd == 'getName': + if len(args) != 0: + print('getName requires 0 args') + sys.exit(1) + pp.pprint(client.getName()) + +elif cmd == 'getVersion': + if len(args) != 0: + print('getVersion requires 0 args') + sys.exit(1) + pp.pprint(client.getVersion()) + +elif cmd == 'getStatus': + if len(args) != 0: + print('getStatus requires 0 args') + sys.exit(1) + pp.pprint(client.getStatus()) + +elif cmd == 'getStatusDetails': + if len(args) != 0: + print('getStatusDetails requires 0 args') + sys.exit(1) + pp.pprint(client.getStatusDetails()) + +elif cmd == 'getCounters': + if len(args) != 0: + print('getCounters requires 0 args') + sys.exit(1) + pp.pprint(client.getCounters()) + +elif cmd == 'getCounter': + if len(args) != 1: + print('getCounter requires 1 args') + sys.exit(1) + pp.pprint(client.getCounter(args[0],)) + +elif cmd == 'setOption': + if len(args) != 2: + print('setOption requires 2 args') + sys.exit(1) + pp.pprint(client.setOption(args[0],args[1],)) + +elif cmd == 'getOption': + if len(args) != 1: + print('getOption requires 1 args') + sys.exit(1) + pp.pprint(client.getOption(args[0],)) + +elif cmd == 'getOptions': + if len(args) != 0: + print('getOptions requires 0 args') + sys.exit(1) + pp.pprint(client.getOptions()) + +elif cmd == 'getCpuProfile': + if len(args) != 1: + print('getCpuProfile requires 1 args') + sys.exit(1) + pp.pprint(client.getCpuProfile(eval(args[0]),)) + +elif cmd == 'aliveSince': + if len(args) != 0: + print('aliveSince requires 0 args') + sys.exit(1) + pp.pprint(client.aliveSince()) + +elif cmd == 'reinitialize': + if len(args) != 0: + print('reinitialize requires 0 args') + sys.exit(1) + pp.pprint(client.reinitialize()) + +elif cmd == 'shutdown': + if len(args) != 0: + print('shutdown requires 0 args') + sys.exit(1) + pp.pprint(client.shutdown()) + +else: + print('Unrecognized method %s' % cmd) + sys.exit(1) + +transport.close() diff --git a/service/src/gen/thrift/gen-py/hive_service/ThriftHive.py b/service/src/gen/thrift/gen-py/hive_service/ThriftHive.py new file mode 100644 index 0000000..978c2a3 --- /dev/null +++ b/service/src/gen/thrift/gen-py/hive_service/ThriftHive.py @@ -0,0 +1,1674 @@ +# +# Autogenerated by Thrift Compiler (0.9.3) +# +# DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING +# +# options string: py +# + +from thrift.Thrift import TType, TMessageType, TException, TApplicationException +import hive_metastore.ThriftHiveMetastore +import logging +from ttypes import * +from thrift.Thrift import TProcessor +from thrift.transport import TTransport +from thrift.protocol import TBinaryProtocol, TProtocol +try: + from thrift.protocol import fastbinary +except: + fastbinary = None + + +class Iface(hive_metastore.ThriftHiveMetastore.Iface): + def execute(self, query): + """ + Parameters: + - query + """ + pass + + def fetchOne(self): + pass + + def fetchN(self, numRows): + """ + Parameters: + - numRows + """ + pass + + def fetchAll(self): + pass + + def getSchema(self): + pass + + def getThriftSchema(self): + pass + + def getClusterStatus(self): + pass + + def getQueryPlan(self): + pass + + def clean(self): + pass + + +class Client(hive_metastore.ThriftHiveMetastore.Client, Iface): + def __init__(self, iprot, oprot=None): + hive_metastore.ThriftHiveMetastore.Client.__init__(self, iprot, oprot) + + def execute(self, query): + """ + Parameters: + - query + """ + self.send_execute(query) + self.recv_execute() + + def send_execute(self, query): + self._oprot.writeMessageBegin('execute', TMessageType.CALL, self._seqid) + args = execute_args() + args.query = query + args.write(self._oprot) + self._oprot.writeMessageEnd() + self._oprot.trans.flush() + + def recv_execute(self): + iprot = self._iprot + (fname, mtype, rseqid) = iprot.readMessageBegin() + if mtype == TMessageType.EXCEPTION: + x = TApplicationException() + x.read(iprot) + iprot.readMessageEnd() + raise x + result = execute_result() + result.read(iprot) + iprot.readMessageEnd() + if result.ex is not None: + raise result.ex + return + + def fetchOne(self): + self.send_fetchOne() + return self.recv_fetchOne() + + def send_fetchOne(self): + self._oprot.writeMessageBegin('fetchOne', TMessageType.CALL, self._seqid) + args = fetchOne_args() + args.write(self._oprot) + self._oprot.writeMessageEnd() + self._oprot.trans.flush() + + def recv_fetchOne(self): + iprot = self._iprot + (fname, mtype, rseqid) = iprot.readMessageBegin() + if mtype == TMessageType.EXCEPTION: + x = TApplicationException() + x.read(iprot) + iprot.readMessageEnd() + raise x + result = fetchOne_result() + result.read(iprot) + iprot.readMessageEnd() + if result.success is not None: + return result.success + if result.ex is not None: + raise result.ex + raise TApplicationException(TApplicationException.MISSING_RESULT, "fetchOne failed: unknown result") + + def fetchN(self, numRows): + """ + Parameters: + - numRows + """ + self.send_fetchN(numRows) + return self.recv_fetchN() + + def send_fetchN(self, numRows): + self._oprot.writeMessageBegin('fetchN', TMessageType.CALL, self._seqid) + args = fetchN_args() + args.numRows = numRows + args.write(self._oprot) + self._oprot.writeMessageEnd() + self._oprot.trans.flush() + + def recv_fetchN(self): + iprot = self._iprot + (fname, mtype, rseqid) = iprot.readMessageBegin() + if mtype == TMessageType.EXCEPTION: + x = TApplicationException() + x.read(iprot) + iprot.readMessageEnd() + raise x + result = fetchN_result() + result.read(iprot) + iprot.readMessageEnd() + if result.success is not None: + return result.success + if result.ex is not None: + raise result.ex + raise TApplicationException(TApplicationException.MISSING_RESULT, "fetchN failed: unknown result") + + def fetchAll(self): + self.send_fetchAll() + return self.recv_fetchAll() + + def send_fetchAll(self): + self._oprot.writeMessageBegin('fetchAll', TMessageType.CALL, self._seqid) + args = fetchAll_args() + args.write(self._oprot) + self._oprot.writeMessageEnd() + self._oprot.trans.flush() + + def recv_fetchAll(self): + iprot = self._iprot + (fname, mtype, rseqid) = iprot.readMessageBegin() + if mtype == TMessageType.EXCEPTION: + x = TApplicationException() + x.read(iprot) + iprot.readMessageEnd() + raise x + result = fetchAll_result() + result.read(iprot) + iprot.readMessageEnd() + if result.success is not None: + return result.success + if result.ex is not None: + raise result.ex + raise TApplicationException(TApplicationException.MISSING_RESULT, "fetchAll failed: unknown result") + + def getSchema(self): + self.send_getSchema() + return self.recv_getSchema() + + def send_getSchema(self): + self._oprot.writeMessageBegin('getSchema', TMessageType.CALL, self._seqid) + args = getSchema_args() + args.write(self._oprot) + self._oprot.writeMessageEnd() + self._oprot.trans.flush() + + def recv_getSchema(self): + iprot = self._iprot + (fname, mtype, rseqid) = iprot.readMessageBegin() + if mtype == TMessageType.EXCEPTION: + x = TApplicationException() + x.read(iprot) + iprot.readMessageEnd() + raise x + result = getSchema_result() + result.read(iprot) + iprot.readMessageEnd() + if result.success is not None: + return result.success + if result.ex is not None: + raise result.ex + raise TApplicationException(TApplicationException.MISSING_RESULT, "getSchema failed: unknown result") + + def getThriftSchema(self): + self.send_getThriftSchema() + return self.recv_getThriftSchema() + + def send_getThriftSchema(self): + self._oprot.writeMessageBegin('getThriftSchema', TMessageType.CALL, self._seqid) + args = getThriftSchema_args() + args.write(self._oprot) + self._oprot.writeMessageEnd() + self._oprot.trans.flush() + + def recv_getThriftSchema(self): + iprot = self._iprot + (fname, mtype, rseqid) = iprot.readMessageBegin() + if mtype == TMessageType.EXCEPTION: + x = TApplicationException() + x.read(iprot) + iprot.readMessageEnd() + raise x + result = getThriftSchema_result() + result.read(iprot) + iprot.readMessageEnd() + if result.success is not None: + return result.success + if result.ex is not None: + raise result.ex + raise TApplicationException(TApplicationException.MISSING_RESULT, "getThriftSchema failed: unknown result") + + def getClusterStatus(self): + self.send_getClusterStatus() + return self.recv_getClusterStatus() + + def send_getClusterStatus(self): + self._oprot.writeMessageBegin('getClusterStatus', TMessageType.CALL, self._seqid) + args = getClusterStatus_args() + args.write(self._oprot) + self._oprot.writeMessageEnd() + self._oprot.trans.flush() + + def recv_getClusterStatus(self): + iprot = self._iprot + (fname, mtype, rseqid) = iprot.readMessageBegin() + if mtype == TMessageType.EXCEPTION: + x = TApplicationException() + x.read(iprot) + iprot.readMessageEnd() + raise x + result = getClusterStatus_result() + result.read(iprot) + iprot.readMessageEnd() + if result.success is not None: + return result.success + if result.ex is not None: + raise result.ex + raise TApplicationException(TApplicationException.MISSING_RESULT, "getClusterStatus failed: unknown result") + + def getQueryPlan(self): + self.send_getQueryPlan() + return self.recv_getQueryPlan() + + def send_getQueryPlan(self): + self._oprot.writeMessageBegin('getQueryPlan', TMessageType.CALL, self._seqid) + args = getQueryPlan_args() + args.write(self._oprot) + self._oprot.writeMessageEnd() + self._oprot.trans.flush() + + def recv_getQueryPlan(self): + iprot = self._iprot + (fname, mtype, rseqid) = iprot.readMessageBegin() + if mtype == TMessageType.EXCEPTION: + x = TApplicationException() + x.read(iprot) + iprot.readMessageEnd() + raise x + result = getQueryPlan_result() + result.read(iprot) + iprot.readMessageEnd() + if result.success is not None: + return result.success + if result.ex is not None: + raise result.ex + raise TApplicationException(TApplicationException.MISSING_RESULT, "getQueryPlan failed: unknown result") + + def clean(self): + self.send_clean() + self.recv_clean() + + def send_clean(self): + self._oprot.writeMessageBegin('clean', TMessageType.CALL, self._seqid) + args = clean_args() + args.write(self._oprot) + self._oprot.writeMessageEnd() + self._oprot.trans.flush() + + def recv_clean(self): + iprot = self._iprot + (fname, mtype, rseqid) = iprot.readMessageBegin() + if mtype == TMessageType.EXCEPTION: + x = TApplicationException() + x.read(iprot) + iprot.readMessageEnd() + raise x + result = clean_result() + result.read(iprot) + iprot.readMessageEnd() + return + + +class Processor(hive_metastore.ThriftHiveMetastore.Processor, Iface, TProcessor): + def __init__(self, handler): + hive_metastore.ThriftHiveMetastore.Processor.__init__(self, handler) + self._processMap["execute"] = Processor.process_execute + self._processMap["fetchOne"] = Processor.process_fetchOne + self._processMap["fetchN"] = Processor.process_fetchN + self._processMap["fetchAll"] = Processor.process_fetchAll + self._processMap["getSchema"] = Processor.process_getSchema + self._processMap["getThriftSchema"] = Processor.process_getThriftSchema + self._processMap["getClusterStatus"] = Processor.process_getClusterStatus + self._processMap["getQueryPlan"] = Processor.process_getQueryPlan + self._processMap["clean"] = Processor.process_clean + + def process(self, iprot, oprot): + (name, type, seqid) = iprot.readMessageBegin() + if name not in self._processMap: + iprot.skip(TType.STRUCT) + iprot.readMessageEnd() + x = TApplicationException(TApplicationException.UNKNOWN_METHOD, 'Unknown function %s' % (name)) + oprot.writeMessageBegin(name, TMessageType.EXCEPTION, seqid) + x.write(oprot) + oprot.writeMessageEnd() + oprot.trans.flush() + return + else: + self._processMap[name](self, seqid, iprot, oprot) + return True + + def process_execute(self, seqid, iprot, oprot): + args = execute_args() + args.read(iprot) + iprot.readMessageEnd() + result = execute_result() + try: + self._handler.execute(args.query) + msg_type = TMessageType.REPLY + except (TTransport.TTransportException, KeyboardInterrupt, SystemExit): + raise + except HiveServerException as ex: + msg_type = TMessageType.REPLY + result.ex = ex + except Exception as ex: + msg_type = TMessageType.EXCEPTION + logging.exception(ex) + result = TApplicationException(TApplicationException.INTERNAL_ERROR, 'Internal error') + oprot.writeMessageBegin("execute", msg_type, seqid) + result.write(oprot) + oprot.writeMessageEnd() + oprot.trans.flush() + + def process_fetchOne(self, seqid, iprot, oprot): + args = fetchOne_args() + args.read(iprot) + iprot.readMessageEnd() + result = fetchOne_result() + try: + result.success = self._handler.fetchOne() + msg_type = TMessageType.REPLY + except (TTransport.TTransportException, KeyboardInterrupt, SystemExit): + raise + except HiveServerException as ex: + msg_type = TMessageType.REPLY + result.ex = ex + except Exception as ex: + msg_type = TMessageType.EXCEPTION + logging.exception(ex) + result = TApplicationException(TApplicationException.INTERNAL_ERROR, 'Internal error') + oprot.writeMessageBegin("fetchOne", msg_type, seqid) + result.write(oprot) + oprot.writeMessageEnd() + oprot.trans.flush() + + def process_fetchN(self, seqid, iprot, oprot): + args = fetchN_args() + args.read(iprot) + iprot.readMessageEnd() + result = fetchN_result() + try: + result.success = self._handler.fetchN(args.numRows) + msg_type = TMessageType.REPLY + except (TTransport.TTransportException, KeyboardInterrupt, SystemExit): + raise + except HiveServerException as ex: + msg_type = TMessageType.REPLY + result.ex = ex + except Exception as ex: + msg_type = TMessageType.EXCEPTION + logging.exception(ex) + result = TApplicationException(TApplicationException.INTERNAL_ERROR, 'Internal error') + oprot.writeMessageBegin("fetchN", msg_type, seqid) + result.write(oprot) + oprot.writeMessageEnd() + oprot.trans.flush() + + def process_fetchAll(self, seqid, iprot, oprot): + args = fetchAll_args() + args.read(iprot) + iprot.readMessageEnd() + result = fetchAll_result() + try: + result.success = self._handler.fetchAll() + msg_type = TMessageType.REPLY + except (TTransport.TTransportException, KeyboardInterrupt, SystemExit): + raise + except HiveServerException as ex: + msg_type = TMessageType.REPLY + result.ex = ex + except Exception as ex: + msg_type = TMessageType.EXCEPTION + logging.exception(ex) + result = TApplicationException(TApplicationException.INTERNAL_ERROR, 'Internal error') + oprot.writeMessageBegin("fetchAll", msg_type, seqid) + result.write(oprot) + oprot.writeMessageEnd() + oprot.trans.flush() + + def process_getSchema(self, seqid, iprot, oprot): + args = getSchema_args() + args.read(iprot) + iprot.readMessageEnd() + result = getSchema_result() + try: + result.success = self._handler.getSchema() + msg_type = TMessageType.REPLY + except (TTransport.TTransportException, KeyboardInterrupt, SystemExit): + raise + except HiveServerException as ex: + msg_type = TMessageType.REPLY + result.ex = ex + except Exception as ex: + msg_type = TMessageType.EXCEPTION + logging.exception(ex) + result = TApplicationException(TApplicationException.INTERNAL_ERROR, 'Internal error') + oprot.writeMessageBegin("getSchema", msg_type, seqid) + result.write(oprot) + oprot.writeMessageEnd() + oprot.trans.flush() + + def process_getThriftSchema(self, seqid, iprot, oprot): + args = getThriftSchema_args() + args.read(iprot) + iprot.readMessageEnd() + result = getThriftSchema_result() + try: + result.success = self._handler.getThriftSchema() + msg_type = TMessageType.REPLY + except (TTransport.TTransportException, KeyboardInterrupt, SystemExit): + raise + except HiveServerException as ex: + msg_type = TMessageType.REPLY + result.ex = ex + except Exception as ex: + msg_type = TMessageType.EXCEPTION + logging.exception(ex) + result = TApplicationException(TApplicationException.INTERNAL_ERROR, 'Internal error') + oprot.writeMessageBegin("getThriftSchema", msg_type, seqid) + result.write(oprot) + oprot.writeMessageEnd() + oprot.trans.flush() + + def process_getClusterStatus(self, seqid, iprot, oprot): + args = getClusterStatus_args() + args.read(iprot) + iprot.readMessageEnd() + result = getClusterStatus_result() + try: + result.success = self._handler.getClusterStatus() + msg_type = TMessageType.REPLY + except (TTransport.TTransportException, KeyboardInterrupt, SystemExit): + raise + except HiveServerException as ex: + msg_type = TMessageType.REPLY + result.ex = ex + except Exception as ex: + msg_type = TMessageType.EXCEPTION + logging.exception(ex) + result = TApplicationException(TApplicationException.INTERNAL_ERROR, 'Internal error') + oprot.writeMessageBegin("getClusterStatus", msg_type, seqid) + result.write(oprot) + oprot.writeMessageEnd() + oprot.trans.flush() + + def process_getQueryPlan(self, seqid, iprot, oprot): + args = getQueryPlan_args() + args.read(iprot) + iprot.readMessageEnd() + result = getQueryPlan_result() + try: + result.success = self._handler.getQueryPlan() + msg_type = TMessageType.REPLY + except (TTransport.TTransportException, KeyboardInterrupt, SystemExit): + raise + except HiveServerException as ex: + msg_type = TMessageType.REPLY + result.ex = ex + except Exception as ex: + msg_type = TMessageType.EXCEPTION + logging.exception(ex) + result = TApplicationException(TApplicationException.INTERNAL_ERROR, 'Internal error') + oprot.writeMessageBegin("getQueryPlan", msg_type, seqid) + result.write(oprot) + oprot.writeMessageEnd() + oprot.trans.flush() + + def process_clean(self, seqid, iprot, oprot): + args = clean_args() + args.read(iprot) + iprot.readMessageEnd() + result = clean_result() + try: + self._handler.clean() + msg_type = TMessageType.REPLY + except (TTransport.TTransportException, KeyboardInterrupt, SystemExit): + raise + except Exception as ex: + msg_type = TMessageType.EXCEPTION + logging.exception(ex) + result = TApplicationException(TApplicationException.INTERNAL_ERROR, 'Internal error') + oprot.writeMessageBegin("clean", msg_type, seqid) + result.write(oprot) + oprot.writeMessageEnd() + oprot.trans.flush() + + +# HELPER FUNCTIONS AND STRUCTURES + +class execute_args: + """ + Attributes: + - query + """ + + thrift_spec = ( + None, # 0 + (1, TType.STRING, 'query', None, None, ), # 1 + ) + + def __init__(self, query=None,): + self.query = query + + def read(self, iprot): + if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: + fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec)) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.STRING: + self.query = iprot.readString() + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: + oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) + return + oprot.writeStructBegin('execute_args') + if self.query is not None: + oprot.writeFieldBegin('query', TType.STRING, 1) + oprot.writeString(self.query) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + + def __hash__(self): + value = 17 + value = (value * 31) ^ hash(self.query) + return value + + def __repr__(self): + L = ['%s=%r' % (key, value) + for key, value in self.__dict__.iteritems()] + return '%s(%s)' % (self.__class__.__name__, ', '.join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + +class execute_result: + """ + Attributes: + - ex + """ + + thrift_spec = ( + None, # 0 + (1, TType.STRUCT, 'ex', (HiveServerException, HiveServerException.thrift_spec), None, ), # 1 + ) + + def __init__(self, ex=None,): + self.ex = ex + + def read(self, iprot): + if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: + fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec)) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.STRUCT: + self.ex = HiveServerException() + self.ex.read(iprot) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: + oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) + return + oprot.writeStructBegin('execute_result') + if self.ex is not None: + oprot.writeFieldBegin('ex', TType.STRUCT, 1) + self.ex.write(oprot) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + + def __hash__(self): + value = 17 + value = (value * 31) ^ hash(self.ex) + return value + + def __repr__(self): + L = ['%s=%r' % (key, value) + for key, value in self.__dict__.iteritems()] + return '%s(%s)' % (self.__class__.__name__, ', '.join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + +class fetchOne_args: + + thrift_spec = ( + ) + + def read(self, iprot): + if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: + fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec)) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: + oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) + return + oprot.writeStructBegin('fetchOne_args') + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + + def __hash__(self): + value = 17 + return value + + def __repr__(self): + L = ['%s=%r' % (key, value) + for key, value in self.__dict__.iteritems()] + return '%s(%s)' % (self.__class__.__name__, ', '.join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + +class fetchOne_result: + """ + Attributes: + - success + - ex + """ + + thrift_spec = ( + (0, TType.STRING, 'success', None, None, ), # 0 + (1, TType.STRUCT, 'ex', (HiveServerException, HiveServerException.thrift_spec), None, ), # 1 + ) + + def __init__(self, success=None, ex=None,): + self.success = success + self.ex = ex + + def read(self, iprot): + if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: + fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec)) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 0: + if ftype == TType.STRING: + self.success = iprot.readString() + else: + iprot.skip(ftype) + elif fid == 1: + if ftype == TType.STRUCT: + self.ex = HiveServerException() + self.ex.read(iprot) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: + oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) + return + oprot.writeStructBegin('fetchOne_result') + if self.success is not None: + oprot.writeFieldBegin('success', TType.STRING, 0) + oprot.writeString(self.success) + oprot.writeFieldEnd() + if self.ex is not None: + oprot.writeFieldBegin('ex', TType.STRUCT, 1) + self.ex.write(oprot) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + + def __hash__(self): + value = 17 + value = (value * 31) ^ hash(self.success) + value = (value * 31) ^ hash(self.ex) + return value + + def __repr__(self): + L = ['%s=%r' % (key, value) + for key, value in self.__dict__.iteritems()] + return '%s(%s)' % (self.__class__.__name__, ', '.join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + +class fetchN_args: + """ + Attributes: + - numRows + """ + + thrift_spec = ( + None, # 0 + (1, TType.I32, 'numRows', None, None, ), # 1 + ) + + def __init__(self, numRows=None,): + self.numRows = numRows + + def read(self, iprot): + if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: + fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec)) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.I32: + self.numRows = iprot.readI32() + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: + oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) + return + oprot.writeStructBegin('fetchN_args') + if self.numRows is not None: + oprot.writeFieldBegin('numRows', TType.I32, 1) + oprot.writeI32(self.numRows) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + + def __hash__(self): + value = 17 + value = (value * 31) ^ hash(self.numRows) + return value + + def __repr__(self): + L = ['%s=%r' % (key, value) + for key, value in self.__dict__.iteritems()] + return '%s(%s)' % (self.__class__.__name__, ', '.join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + +class fetchN_result: + """ + Attributes: + - success + - ex + """ + + thrift_spec = ( + (0, TType.LIST, 'success', (TType.STRING,None), None, ), # 0 + (1, TType.STRUCT, 'ex', (HiveServerException, HiveServerException.thrift_spec), None, ), # 1 + ) + + def __init__(self, success=None, ex=None,): + self.success = success + self.ex = ex + + def read(self, iprot): + if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: + fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec)) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 0: + if ftype == TType.LIST: + self.success = [] + (_etype3, _size0) = iprot.readListBegin() + for _i4 in xrange(_size0): + _elem5 = iprot.readString() + self.success.append(_elem5) + iprot.readListEnd() + else: + iprot.skip(ftype) + elif fid == 1: + if ftype == TType.STRUCT: + self.ex = HiveServerException() + self.ex.read(iprot) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: + oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) + return + oprot.writeStructBegin('fetchN_result') + if self.success is not None: + oprot.writeFieldBegin('success', TType.LIST, 0) + oprot.writeListBegin(TType.STRING, len(self.success)) + for iter6 in self.success: + oprot.writeString(iter6) + oprot.writeListEnd() + oprot.writeFieldEnd() + if self.ex is not None: + oprot.writeFieldBegin('ex', TType.STRUCT, 1) + self.ex.write(oprot) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + + def __hash__(self): + value = 17 + value = (value * 31) ^ hash(self.success) + value = (value * 31) ^ hash(self.ex) + return value + + def __repr__(self): + L = ['%s=%r' % (key, value) + for key, value in self.__dict__.iteritems()] + return '%s(%s)' % (self.__class__.__name__, ', '.join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + +class fetchAll_args: + + thrift_spec = ( + ) + + def read(self, iprot): + if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: + fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec)) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: + oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) + return + oprot.writeStructBegin('fetchAll_args') + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + + def __hash__(self): + value = 17 + return value + + def __repr__(self): + L = ['%s=%r' % (key, value) + for key, value in self.__dict__.iteritems()] + return '%s(%s)' % (self.__class__.__name__, ', '.join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + +class fetchAll_result: + """ + Attributes: + - success + - ex + """ + + thrift_spec = ( + (0, TType.LIST, 'success', (TType.STRING,None), None, ), # 0 + (1, TType.STRUCT, 'ex', (HiveServerException, HiveServerException.thrift_spec), None, ), # 1 + ) + + def __init__(self, success=None, ex=None,): + self.success = success + self.ex = ex + + def read(self, iprot): + if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: + fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec)) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 0: + if ftype == TType.LIST: + self.success = [] + (_etype10, _size7) = iprot.readListBegin() + for _i11 in xrange(_size7): + _elem12 = iprot.readString() + self.success.append(_elem12) + iprot.readListEnd() + else: + iprot.skip(ftype) + elif fid == 1: + if ftype == TType.STRUCT: + self.ex = HiveServerException() + self.ex.read(iprot) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: + oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) + return + oprot.writeStructBegin('fetchAll_result') + if self.success is not None: + oprot.writeFieldBegin('success', TType.LIST, 0) + oprot.writeListBegin(TType.STRING, len(self.success)) + for iter13 in self.success: + oprot.writeString(iter13) + oprot.writeListEnd() + oprot.writeFieldEnd() + if self.ex is not None: + oprot.writeFieldBegin('ex', TType.STRUCT, 1) + self.ex.write(oprot) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + + def __hash__(self): + value = 17 + value = (value * 31) ^ hash(self.success) + value = (value * 31) ^ hash(self.ex) + return value + + def __repr__(self): + L = ['%s=%r' % (key, value) + for key, value in self.__dict__.iteritems()] + return '%s(%s)' % (self.__class__.__name__, ', '.join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + +class getSchema_args: + + thrift_spec = ( + ) + + def read(self, iprot): + if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: + fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec)) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: + oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) + return + oprot.writeStructBegin('getSchema_args') + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + + def __hash__(self): + value = 17 + return value + + def __repr__(self): + L = ['%s=%r' % (key, value) + for key, value in self.__dict__.iteritems()] + return '%s(%s)' % (self.__class__.__name__, ', '.join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + +class getSchema_result: + """ + Attributes: + - success + - ex + """ + + thrift_spec = ( + (0, TType.STRUCT, 'success', (hive_metastore.ttypes.Schema, hive_metastore.ttypes.Schema.thrift_spec), None, ), # 0 + (1, TType.STRUCT, 'ex', (HiveServerException, HiveServerException.thrift_spec), None, ), # 1 + ) + + def __init__(self, success=None, ex=None,): + self.success = success + self.ex = ex + + def read(self, iprot): + if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: + fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec)) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 0: + if ftype == TType.STRUCT: + self.success = hive_metastore.ttypes.Schema() + self.success.read(iprot) + else: + iprot.skip(ftype) + elif fid == 1: + if ftype == TType.STRUCT: + self.ex = HiveServerException() + self.ex.read(iprot) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: + oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) + return + oprot.writeStructBegin('getSchema_result') + if self.success is not None: + oprot.writeFieldBegin('success', TType.STRUCT, 0) + self.success.write(oprot) + oprot.writeFieldEnd() + if self.ex is not None: + oprot.writeFieldBegin('ex', TType.STRUCT, 1) + self.ex.write(oprot) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + + def __hash__(self): + value = 17 + value = (value * 31) ^ hash(self.success) + value = (value * 31) ^ hash(self.ex) + return value + + def __repr__(self): + L = ['%s=%r' % (key, value) + for key, value in self.__dict__.iteritems()] + return '%s(%s)' % (self.__class__.__name__, ', '.join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + +class getThriftSchema_args: + + thrift_spec = ( + ) + + def read(self, iprot): + if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: + fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec)) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: + oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) + return + oprot.writeStructBegin('getThriftSchema_args') + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + + def __hash__(self): + value = 17 + return value + + def __repr__(self): + L = ['%s=%r' % (key, value) + for key, value in self.__dict__.iteritems()] + return '%s(%s)' % (self.__class__.__name__, ', '.join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + +class getThriftSchema_result: + """ + Attributes: + - success + - ex + """ + + thrift_spec = ( + (0, TType.STRUCT, 'success', (hive_metastore.ttypes.Schema, hive_metastore.ttypes.Schema.thrift_spec), None, ), # 0 + (1, TType.STRUCT, 'ex', (HiveServerException, HiveServerException.thrift_spec), None, ), # 1 + ) + + def __init__(self, success=None, ex=None,): + self.success = success + self.ex = ex + + def read(self, iprot): + if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: + fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec)) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 0: + if ftype == TType.STRUCT: + self.success = hive_metastore.ttypes.Schema() + self.success.read(iprot) + else: + iprot.skip(ftype) + elif fid == 1: + if ftype == TType.STRUCT: + self.ex = HiveServerException() + self.ex.read(iprot) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: + oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) + return + oprot.writeStructBegin('getThriftSchema_result') + if self.success is not None: + oprot.writeFieldBegin('success', TType.STRUCT, 0) + self.success.write(oprot) + oprot.writeFieldEnd() + if self.ex is not None: + oprot.writeFieldBegin('ex', TType.STRUCT, 1) + self.ex.write(oprot) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + + def __hash__(self): + value = 17 + value = (value * 31) ^ hash(self.success) + value = (value * 31) ^ hash(self.ex) + return value + + def __repr__(self): + L = ['%s=%r' % (key, value) + for key, value in self.__dict__.iteritems()] + return '%s(%s)' % (self.__class__.__name__, ', '.join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + +class getClusterStatus_args: + + thrift_spec = ( + ) + + def read(self, iprot): + if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: + fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec)) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: + oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) + return + oprot.writeStructBegin('getClusterStatus_args') + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + + def __hash__(self): + value = 17 + return value + + def __repr__(self): + L = ['%s=%r' % (key, value) + for key, value in self.__dict__.iteritems()] + return '%s(%s)' % (self.__class__.__name__, ', '.join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + +class getClusterStatus_result: + """ + Attributes: + - success + - ex + """ + + thrift_spec = ( + (0, TType.STRUCT, 'success', (HiveClusterStatus, HiveClusterStatus.thrift_spec), None, ), # 0 + (1, TType.STRUCT, 'ex', (HiveServerException, HiveServerException.thrift_spec), None, ), # 1 + ) + + def __init__(self, success=None, ex=None,): + self.success = success + self.ex = ex + + def read(self, iprot): + if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: + fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec)) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 0: + if ftype == TType.STRUCT: + self.success = HiveClusterStatus() + self.success.read(iprot) + else: + iprot.skip(ftype) + elif fid == 1: + if ftype == TType.STRUCT: + self.ex = HiveServerException() + self.ex.read(iprot) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: + oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) + return + oprot.writeStructBegin('getClusterStatus_result') + if self.success is not None: + oprot.writeFieldBegin('success', TType.STRUCT, 0) + self.success.write(oprot) + oprot.writeFieldEnd() + if self.ex is not None: + oprot.writeFieldBegin('ex', TType.STRUCT, 1) + self.ex.write(oprot) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + + def __hash__(self): + value = 17 + value = (value * 31) ^ hash(self.success) + value = (value * 31) ^ hash(self.ex) + return value + + def __repr__(self): + L = ['%s=%r' % (key, value) + for key, value in self.__dict__.iteritems()] + return '%s(%s)' % (self.__class__.__name__, ', '.join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + +class getQueryPlan_args: + + thrift_spec = ( + ) + + def read(self, iprot): + if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: + fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec)) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: + oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) + return + oprot.writeStructBegin('getQueryPlan_args') + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + + def __hash__(self): + value = 17 + return value + + def __repr__(self): + L = ['%s=%r' % (key, value) + for key, value in self.__dict__.iteritems()] + return '%s(%s)' % (self.__class__.__name__, ', '.join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + +class getQueryPlan_result: + """ + Attributes: + - success + - ex + """ + + thrift_spec = ( + (0, TType.STRUCT, 'success', (queryplan.ttypes.QueryPlan, queryplan.ttypes.QueryPlan.thrift_spec), None, ), # 0 + (1, TType.STRUCT, 'ex', (HiveServerException, HiveServerException.thrift_spec), None, ), # 1 + ) + + def __init__(self, success=None, ex=None,): + self.success = success + self.ex = ex + + def read(self, iprot): + if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: + fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec)) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 0: + if ftype == TType.STRUCT: + self.success = queryplan.ttypes.QueryPlan() + self.success.read(iprot) + else: + iprot.skip(ftype) + elif fid == 1: + if ftype == TType.STRUCT: + self.ex = HiveServerException() + self.ex.read(iprot) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: + oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) + return + oprot.writeStructBegin('getQueryPlan_result') + if self.success is not None: + oprot.writeFieldBegin('success', TType.STRUCT, 0) + self.success.write(oprot) + oprot.writeFieldEnd() + if self.ex is not None: + oprot.writeFieldBegin('ex', TType.STRUCT, 1) + self.ex.write(oprot) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + + def __hash__(self): + value = 17 + value = (value * 31) ^ hash(self.success) + value = (value * 31) ^ hash(self.ex) + return value + + def __repr__(self): + L = ['%s=%r' % (key, value) + for key, value in self.__dict__.iteritems()] + return '%s(%s)' % (self.__class__.__name__, ', '.join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + +class clean_args: + + thrift_spec = ( + ) + + def read(self, iprot): + if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: + fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec)) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: + oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) + return + oprot.writeStructBegin('clean_args') + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + + def __hash__(self): + value = 17 + return value + + def __repr__(self): + L = ['%s=%r' % (key, value) + for key, value in self.__dict__.iteritems()] + return '%s(%s)' % (self.__class__.__name__, ', '.join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + +class clean_result: + + thrift_spec = ( + ) + + def read(self, iprot): + if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: + fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec)) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: + oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) + return + oprot.writeStructBegin('clean_result') + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + + def __hash__(self): + value = 17 + return value + + def __repr__(self): + L = ['%s=%r' % (key, value) + for key, value in self.__dict__.iteritems()] + return '%s(%s)' % (self.__class__.__name__, ', '.join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) diff --git a/service/src/gen/thrift/gen-py/hive_service/__init__.py b/service/src/gen/thrift/gen-py/hive_service/__init__.py new file mode 100644 index 0000000..6a0dcc9 --- /dev/null +++ b/service/src/gen/thrift/gen-py/hive_service/__init__.py @@ -0,0 +1 @@ +__all__ = ['ttypes', 'constants', 'ThriftHive'] diff --git a/service/src/gen/thrift/gen-py/hive_service/constants.py b/service/src/gen/thrift/gen-py/hive_service/constants.py new file mode 100644 index 0000000..4a6492b --- /dev/null +++ b/service/src/gen/thrift/gen-py/hive_service/constants.py @@ -0,0 +1,11 @@ +# +# Autogenerated by Thrift Compiler (0.9.3) +# +# DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING +# +# options string: py +# + +from thrift.Thrift import TType, TMessageType, TException, TApplicationException +from ttypes import * + diff --git a/service/src/gen/thrift/gen-py/hive_service/ttypes.py b/service/src/gen/thrift/gen-py/hive_service/ttypes.py new file mode 100644 index 0000000..e19fe2a --- /dev/null +++ b/service/src/gen/thrift/gen-py/hive_service/ttypes.py @@ -0,0 +1,260 @@ +# +# Autogenerated by Thrift Compiler (0.9.3) +# +# DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING +# +# options string: py +# + +from thrift.Thrift import TType, TMessageType, TException, TApplicationException +import fb303.ttypes +import hive_metastore.ttypes +import queryplan.ttypes + + +from thrift.transport import TTransport +from thrift.protocol import TBinaryProtocol, TProtocol +try: + from thrift.protocol import fastbinary +except: + fastbinary = None + + +class JobTrackerState: + INITIALIZING = 1 + RUNNING = 2 + + _VALUES_TO_NAMES = { + 1: "INITIALIZING", + 2: "RUNNING", + } + + _NAMES_TO_VALUES = { + "INITIALIZING": 1, + "RUNNING": 2, + } + + +class HiveClusterStatus: + """ + Attributes: + - taskTrackers + - mapTasks + - reduceTasks + - maxMapTasks + - maxReduceTasks + - state + """ + + thrift_spec = ( + None, # 0 + (1, TType.I32, 'taskTrackers', None, None, ), # 1 + (2, TType.I32, 'mapTasks', None, None, ), # 2 + (3, TType.I32, 'reduceTasks', None, None, ), # 3 + (4, TType.I32, 'maxMapTasks', None, None, ), # 4 + (5, TType.I32, 'maxReduceTasks', None, None, ), # 5 + (6, TType.I32, 'state', None, None, ), # 6 + ) + + def __init__(self, taskTrackers=None, mapTasks=None, reduceTasks=None, maxMapTasks=None, maxReduceTasks=None, state=None,): + self.taskTrackers = taskTrackers + self.mapTasks = mapTasks + self.reduceTasks = reduceTasks + self.maxMapTasks = maxMapTasks + self.maxReduceTasks = maxReduceTasks + self.state = state + + def read(self, iprot): + if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: + fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec)) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.I32: + self.taskTrackers = iprot.readI32() + else: + iprot.skip(ftype) + elif fid == 2: + if ftype == TType.I32: + self.mapTasks = iprot.readI32() + else: + iprot.skip(ftype) + elif fid == 3: + if ftype == TType.I32: + self.reduceTasks = iprot.readI32() + else: + iprot.skip(ftype) + elif fid == 4: + if ftype == TType.I32: + self.maxMapTasks = iprot.readI32() + else: + iprot.skip(ftype) + elif fid == 5: + if ftype == TType.I32: + self.maxReduceTasks = iprot.readI32() + else: + iprot.skip(ftype) + elif fid == 6: + if ftype == TType.I32: + self.state = iprot.readI32() + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: + oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) + return + oprot.writeStructBegin('HiveClusterStatus') + if self.taskTrackers is not None: + oprot.writeFieldBegin('taskTrackers', TType.I32, 1) + oprot.writeI32(self.taskTrackers) + oprot.writeFieldEnd() + if self.mapTasks is not None: + oprot.writeFieldBegin('mapTasks', TType.I32, 2) + oprot.writeI32(self.mapTasks) + oprot.writeFieldEnd() + if self.reduceTasks is not None: + oprot.writeFieldBegin('reduceTasks', TType.I32, 3) + oprot.writeI32(self.reduceTasks) + oprot.writeFieldEnd() + if self.maxMapTasks is not None: + oprot.writeFieldBegin('maxMapTasks', TType.I32, 4) + oprot.writeI32(self.maxMapTasks) + oprot.writeFieldEnd() + if self.maxReduceTasks is not None: + oprot.writeFieldBegin('maxReduceTasks', TType.I32, 5) + oprot.writeI32(self.maxReduceTasks) + oprot.writeFieldEnd() + if self.state is not None: + oprot.writeFieldBegin('state', TType.I32, 6) + oprot.writeI32(self.state) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + + def __hash__(self): + value = 17 + value = (value * 31) ^ hash(self.taskTrackers) + value = (value * 31) ^ hash(self.mapTasks) + value = (value * 31) ^ hash(self.reduceTasks) + value = (value * 31) ^ hash(self.maxMapTasks) + value = (value * 31) ^ hash(self.maxReduceTasks) + value = (value * 31) ^ hash(self.state) + return value + + def __repr__(self): + L = ['%s=%r' % (key, value) + for key, value in self.__dict__.iteritems()] + return '%s(%s)' % (self.__class__.__name__, ', '.join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + +class HiveServerException(TException): + """ + Attributes: + - message + - errorCode + - SQLState + """ + + thrift_spec = ( + None, # 0 + (1, TType.STRING, 'message', None, None, ), # 1 + (2, TType.I32, 'errorCode', None, None, ), # 2 + (3, TType.STRING, 'SQLState', None, None, ), # 3 + ) + + def __init__(self, message=None, errorCode=None, SQLState=None,): + self.message = message + self.errorCode = errorCode + self.SQLState = SQLState + + def read(self, iprot): + if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: + fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec)) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.STRING: + self.message = iprot.readString() + else: + iprot.skip(ftype) + elif fid == 2: + if ftype == TType.I32: + self.errorCode = iprot.readI32() + else: + iprot.skip(ftype) + elif fid == 3: + if ftype == TType.STRING: + self.SQLState = iprot.readString() + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: + oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) + return + oprot.writeStructBegin('HiveServerException') + if self.message is not None: + oprot.writeFieldBegin('message', TType.STRING, 1) + oprot.writeString(self.message) + oprot.writeFieldEnd() + if self.errorCode is not None: + oprot.writeFieldBegin('errorCode', TType.I32, 2) + oprot.writeI32(self.errorCode) + oprot.writeFieldEnd() + if self.SQLState is not None: + oprot.writeFieldBegin('SQLState', TType.STRING, 3) + oprot.writeString(self.SQLState) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + + def __str__(self): + return repr(self) + + def __hash__(self): + value = 17 + value = (value * 31) ^ hash(self.message) + value = (value * 31) ^ hash(self.errorCode) + value = (value * 31) ^ hash(self.SQLState) + return value + + def __repr__(self): + L = ['%s=%r' % (key, value) + for key, value in self.__dict__.iteritems()] + return '%s(%s)' % (self.__class__.__name__, ', '.join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) diff --git a/service/src/gen/thrift/gen-rb/hive_service_constants.rb b/service/src/gen/thrift/gen-rb/hive_service_constants.rb new file mode 100644 index 0000000..ed7d2d7 --- /dev/null +++ b/service/src/gen/thrift/gen-rb/hive_service_constants.rb @@ -0,0 +1,9 @@ +# +# Autogenerated by Thrift Compiler (0.9.3) +# +# DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING +# + +require 'thrift' +require 'hive_service_types' + diff --git a/service/src/gen/thrift/gen-rb/hive_service_types.rb b/service/src/gen/thrift/gen-rb/hive_service_types.rb new file mode 100644 index 0000000..9191767 --- /dev/null +++ b/service/src/gen/thrift/gen-rb/hive_service_types.rb @@ -0,0 +1,68 @@ +# +# Autogenerated by Thrift Compiler (0.9.3) +# +# DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING +# + +require 'thrift' +require 'fb303_types' +require 'hive_metastore_types' +require 'queryplan_types' + + +module JobTrackerState + INITIALIZING = 1 + RUNNING = 2 + VALUE_MAP = {1 => "INITIALIZING", 2 => "RUNNING"} + VALID_VALUES = Set.new([INITIALIZING, RUNNING]).freeze +end + +class HiveClusterStatus + include ::Thrift::Struct, ::Thrift::Struct_Union + TASKTRACKERS = 1 + MAPTASKS = 2 + REDUCETASKS = 3 + MAXMAPTASKS = 4 + MAXREDUCETASKS = 5 + STATE = 6 + + FIELDS = { + TASKTRACKERS => {:type => ::Thrift::Types::I32, :name => 'taskTrackers'}, + MAPTASKS => {:type => ::Thrift::Types::I32, :name => 'mapTasks'}, + REDUCETASKS => {:type => ::Thrift::Types::I32, :name => 'reduceTasks'}, + MAXMAPTASKS => {:type => ::Thrift::Types::I32, :name => 'maxMapTasks'}, + MAXREDUCETASKS => {:type => ::Thrift::Types::I32, :name => 'maxReduceTasks'}, + STATE => {:type => ::Thrift::Types::I32, :name => 'state', :enum_class => ::JobTrackerState} + } + + def struct_fields; FIELDS; end + + def validate + unless @state.nil? || ::JobTrackerState::VALID_VALUES.include?(@state) + raise ::Thrift::ProtocolException.new(::Thrift::ProtocolException::UNKNOWN, 'Invalid value of field state!') + end + end + + ::Thrift::Struct.generate_accessors self +end + +class HiveServerException < ::Thrift::Exception + include ::Thrift::Struct, ::Thrift::Struct_Union + MESSAGE = 1 + ERRORCODE = 2 + SQLSTATE = 3 + + FIELDS = { + MESSAGE => {:type => ::Thrift::Types::STRING, :name => 'message'}, + ERRORCODE => {:type => ::Thrift::Types::I32, :name => 'errorCode'}, + SQLSTATE => {:type => ::Thrift::Types::STRING, :name => 'SQLState'} + } + + def struct_fields; FIELDS; end + + def validate + end + + ::Thrift::Struct.generate_accessors self +end + diff --git a/service/src/gen/thrift/gen-rb/thrift_hive.rb b/service/src/gen/thrift/gen-rb/thrift_hive.rb new file mode 100644 index 0000000..2b57cd9 --- /dev/null +++ b/service/src/gen/thrift/gen-rb/thrift_hive.rb @@ -0,0 +1,555 @@ +# +# Autogenerated by Thrift Compiler (0.9.3) +# +# DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING +# + +require 'thrift' +require 'thrift_hive_metastore' +require 'hive_service_types' + +module ThriftHive + class Client < ::ThriftHiveMetastore::Client + include ::Thrift::Client + + def execute(query) + send_execute(query) + recv_execute() + end + + def send_execute(query) + send_message('execute', Execute_args, :query => query) + end + + def recv_execute() + result = receive_message(Execute_result) + raise result.ex unless result.ex.nil? + return + end + + def fetchOne() + send_fetchOne() + return recv_fetchOne() + end + + def send_fetchOne() + send_message('fetchOne', FetchOne_args) + end + + def recv_fetchOne() + result = receive_message(FetchOne_result) + return result.success unless result.success.nil? + raise result.ex unless result.ex.nil? + raise ::Thrift::ApplicationException.new(::Thrift::ApplicationException::MISSING_RESULT, 'fetchOne failed: unknown result') + end + + def fetchN(numRows) + send_fetchN(numRows) + return recv_fetchN() + end + + def send_fetchN(numRows) + send_message('fetchN', FetchN_args, :numRows => numRows) + end + + def recv_fetchN() + result = receive_message(FetchN_result) + return result.success unless result.success.nil? + raise result.ex unless result.ex.nil? + raise ::Thrift::ApplicationException.new(::Thrift::ApplicationException::MISSING_RESULT, 'fetchN failed: unknown result') + end + + def fetchAll() + send_fetchAll() + return recv_fetchAll() + end + + def send_fetchAll() + send_message('fetchAll', FetchAll_args) + end + + def recv_fetchAll() + result = receive_message(FetchAll_result) + return result.success unless result.success.nil? + raise result.ex unless result.ex.nil? + raise ::Thrift::ApplicationException.new(::Thrift::ApplicationException::MISSING_RESULT, 'fetchAll failed: unknown result') + end + + def getSchema() + send_getSchema() + return recv_getSchema() + end + + def send_getSchema() + send_message('getSchema', GetSchema_args) + end + + def recv_getSchema() + result = receive_message(GetSchema_result) + return result.success unless result.success.nil? + raise result.ex unless result.ex.nil? + raise ::Thrift::ApplicationException.new(::Thrift::ApplicationException::MISSING_RESULT, 'getSchema failed: unknown result') + end + + def getThriftSchema() + send_getThriftSchema() + return recv_getThriftSchema() + end + + def send_getThriftSchema() + send_message('getThriftSchema', GetThriftSchema_args) + end + + def recv_getThriftSchema() + result = receive_message(GetThriftSchema_result) + return result.success unless result.success.nil? + raise result.ex unless result.ex.nil? + raise ::Thrift::ApplicationException.new(::Thrift::ApplicationException::MISSING_RESULT, 'getThriftSchema failed: unknown result') + end + + def getClusterStatus() + send_getClusterStatus() + return recv_getClusterStatus() + end + + def send_getClusterStatus() + send_message('getClusterStatus', GetClusterStatus_args) + end + + def recv_getClusterStatus() + result = receive_message(GetClusterStatus_result) + return result.success unless result.success.nil? + raise result.ex unless result.ex.nil? + raise ::Thrift::ApplicationException.new(::Thrift::ApplicationException::MISSING_RESULT, 'getClusterStatus failed: unknown result') + end + + def getQueryPlan() + send_getQueryPlan() + return recv_getQueryPlan() + end + + def send_getQueryPlan() + send_message('getQueryPlan', GetQueryPlan_args) + end + + def recv_getQueryPlan() + result = receive_message(GetQueryPlan_result) + return result.success unless result.success.nil? + raise result.ex unless result.ex.nil? + raise ::Thrift::ApplicationException.new(::Thrift::ApplicationException::MISSING_RESULT, 'getQueryPlan failed: unknown result') + end + + def clean() + send_clean() + recv_clean() + end + + def send_clean() + send_message('clean', Clean_args) + end + + def recv_clean() + result = receive_message(Clean_result) + return + end + + end + + class Processor < ::ThriftHiveMetastore::Processor + include ::Thrift::Processor + + def process_execute(seqid, iprot, oprot) + args = read_args(iprot, Execute_args) + result = Execute_result.new() + begin + @handler.execute(args.query) + rescue ::HiveServerException => ex + result.ex = ex + end + write_result(result, oprot, 'execute', seqid) + end + + def process_fetchOne(seqid, iprot, oprot) + args = read_args(iprot, FetchOne_args) + result = FetchOne_result.new() + begin + result.success = @handler.fetchOne() + rescue ::HiveServerException => ex + result.ex = ex + end + write_result(result, oprot, 'fetchOne', seqid) + end + + def process_fetchN(seqid, iprot, oprot) + args = read_args(iprot, FetchN_args) + result = FetchN_result.new() + begin + result.success = @handler.fetchN(args.numRows) + rescue ::HiveServerException => ex + result.ex = ex + end + write_result(result, oprot, 'fetchN', seqid) + end + + def process_fetchAll(seqid, iprot, oprot) + args = read_args(iprot, FetchAll_args) + result = FetchAll_result.new() + begin + result.success = @handler.fetchAll() + rescue ::HiveServerException => ex + result.ex = ex + end + write_result(result, oprot, 'fetchAll', seqid) + end + + def process_getSchema(seqid, iprot, oprot) + args = read_args(iprot, GetSchema_args) + result = GetSchema_result.new() + begin + result.success = @handler.getSchema() + rescue ::HiveServerException => ex + result.ex = ex + end + write_result(result, oprot, 'getSchema', seqid) + end + + def process_getThriftSchema(seqid, iprot, oprot) + args = read_args(iprot, GetThriftSchema_args) + result = GetThriftSchema_result.new() + begin + result.success = @handler.getThriftSchema() + rescue ::HiveServerException => ex + result.ex = ex + end + write_result(result, oprot, 'getThriftSchema', seqid) + end + + def process_getClusterStatus(seqid, iprot, oprot) + args = read_args(iprot, GetClusterStatus_args) + result = GetClusterStatus_result.new() + begin + result.success = @handler.getClusterStatus() + rescue ::HiveServerException => ex + result.ex = ex + end + write_result(result, oprot, 'getClusterStatus', seqid) + end + + def process_getQueryPlan(seqid, iprot, oprot) + args = read_args(iprot, GetQueryPlan_args) + result = GetQueryPlan_result.new() + begin + result.success = @handler.getQueryPlan() + rescue ::HiveServerException => ex + result.ex = ex + end + write_result(result, oprot, 'getQueryPlan', seqid) + end + + def process_clean(seqid, iprot, oprot) + args = read_args(iprot, Clean_args) + result = Clean_result.new() + @handler.clean() + write_result(result, oprot, 'clean', seqid) + end + + end + + # HELPER FUNCTIONS AND STRUCTURES + + class Execute_args + include ::Thrift::Struct, ::Thrift::Struct_Union + QUERY = 1 + + FIELDS = { + QUERY => {:type => ::Thrift::Types::STRING, :name => 'query'} + } + + def struct_fields; FIELDS; end + + def validate + end + + ::Thrift::Struct.generate_accessors self + end + + class Execute_result + include ::Thrift::Struct, ::Thrift::Struct_Union + EX = 1 + + FIELDS = { + EX => {:type => ::Thrift::Types::STRUCT, :name => 'ex', :class => ::HiveServerException} + } + + def struct_fields; FIELDS; end + + def validate + end + + ::Thrift::Struct.generate_accessors self + end + + class FetchOne_args + include ::Thrift::Struct, ::Thrift::Struct_Union + + FIELDS = { + + } + + def struct_fields; FIELDS; end + + def validate + end + + ::Thrift::Struct.generate_accessors self + end + + class FetchOne_result + include ::Thrift::Struct, ::Thrift::Struct_Union + SUCCESS = 0 + EX = 1 + + FIELDS = { + SUCCESS => {:type => ::Thrift::Types::STRING, :name => 'success'}, + EX => {:type => ::Thrift::Types::STRUCT, :name => 'ex', :class => ::HiveServerException} + } + + def struct_fields; FIELDS; end + + def validate + end + + ::Thrift::Struct.generate_accessors self + end + + class FetchN_args + include ::Thrift::Struct, ::Thrift::Struct_Union + NUMROWS = 1 + + FIELDS = { + NUMROWS => {:type => ::Thrift::Types::I32, :name => 'numRows'} + } + + def struct_fields; FIELDS; end + + def validate + end + + ::Thrift::Struct.generate_accessors self + end + + class FetchN_result + include ::Thrift::Struct, ::Thrift::Struct_Union + SUCCESS = 0 + EX = 1 + + FIELDS = { + SUCCESS => {:type => ::Thrift::Types::LIST, :name => 'success', :element => {:type => ::Thrift::Types::STRING}}, + EX => {:type => ::Thrift::Types::STRUCT, :name => 'ex', :class => ::HiveServerException} + } + + def struct_fields; FIELDS; end + + def validate + end + + ::Thrift::Struct.generate_accessors self + end + + class FetchAll_args + include ::Thrift::Struct, ::Thrift::Struct_Union + + FIELDS = { + + } + + def struct_fields; FIELDS; end + + def validate + end + + ::Thrift::Struct.generate_accessors self + end + + class FetchAll_result + include ::Thrift::Struct, ::Thrift::Struct_Union + SUCCESS = 0 + EX = 1 + + FIELDS = { + SUCCESS => {:type => ::Thrift::Types::LIST, :name => 'success', :element => {:type => ::Thrift::Types::STRING}}, + EX => {:type => ::Thrift::Types::STRUCT, :name => 'ex', :class => ::HiveServerException} + } + + def struct_fields; FIELDS; end + + def validate + end + + ::Thrift::Struct.generate_accessors self + end + + class GetSchema_args + include ::Thrift::Struct, ::Thrift::Struct_Union + + FIELDS = { + + } + + def struct_fields; FIELDS; end + + def validate + end + + ::Thrift::Struct.generate_accessors self + end + + class GetSchema_result + include ::Thrift::Struct, ::Thrift::Struct_Union + SUCCESS = 0 + EX = 1 + + FIELDS = { + SUCCESS => {:type => ::Thrift::Types::STRUCT, :name => 'success', :class => ::Schema}, + EX => {:type => ::Thrift::Types::STRUCT, :name => 'ex', :class => ::HiveServerException} + } + + def struct_fields; FIELDS; end + + def validate + end + + ::Thrift::Struct.generate_accessors self + end + + class GetThriftSchema_args + include ::Thrift::Struct, ::Thrift::Struct_Union + + FIELDS = { + + } + + def struct_fields; FIELDS; end + + def validate + end + + ::Thrift::Struct.generate_accessors self + end + + class GetThriftSchema_result + include ::Thrift::Struct, ::Thrift::Struct_Union + SUCCESS = 0 + EX = 1 + + FIELDS = { + SUCCESS => {:type => ::Thrift::Types::STRUCT, :name => 'success', :class => ::Schema}, + EX => {:type => ::Thrift::Types::STRUCT, :name => 'ex', :class => ::HiveServerException} + } + + def struct_fields; FIELDS; end + + def validate + end + + ::Thrift::Struct.generate_accessors self + end + + class GetClusterStatus_args + include ::Thrift::Struct, ::Thrift::Struct_Union + + FIELDS = { + + } + + def struct_fields; FIELDS; end + + def validate + end + + ::Thrift::Struct.generate_accessors self + end + + class GetClusterStatus_result + include ::Thrift::Struct, ::Thrift::Struct_Union + SUCCESS = 0 + EX = 1 + + FIELDS = { + SUCCESS => {:type => ::Thrift::Types::STRUCT, :name => 'success', :class => ::HiveClusterStatus}, + EX => {:type => ::Thrift::Types::STRUCT, :name => 'ex', :class => ::HiveServerException} + } + + def struct_fields; FIELDS; end + + def validate + end + + ::Thrift::Struct.generate_accessors self + end + + class GetQueryPlan_args + include ::Thrift::Struct, ::Thrift::Struct_Union + + FIELDS = { + + } + + def struct_fields; FIELDS; end + + def validate + end + + ::Thrift::Struct.generate_accessors self + end + + class GetQueryPlan_result + include ::Thrift::Struct, ::Thrift::Struct_Union + SUCCESS = 0 + EX = 1 + + FIELDS = { + SUCCESS => {:type => ::Thrift::Types::STRUCT, :name => 'success', :class => ::QueryPlan}, + EX => {:type => ::Thrift::Types::STRUCT, :name => 'ex', :class => ::HiveServerException} + } + + def struct_fields; FIELDS; end + + def validate + end + + ::Thrift::Struct.generate_accessors self + end + + class Clean_args + include ::Thrift::Struct, ::Thrift::Struct_Union + + FIELDS = { + + } + + def struct_fields; FIELDS; end + + def validate + end + + ::Thrift::Struct.generate_accessors self + end + + class Clean_result + include ::Thrift::Struct, ::Thrift::Struct_Union + + FIELDS = { + + } + + def struct_fields; FIELDS; end + + def validate + end + + ::Thrift::Struct.generate_accessors self + end + +end + diff --git a/service/src/java/org/apache/hive/service/cli/CLIService.java b/service/src/java/org/apache/hive/service/cli/CLIService.java index ab30ae2..4a83e38 100644 --- a/service/src/java/org/apache/hive/service/cli/CLIService.java +++ b/service/src/java/org/apache/hive/service/cli/CLIService.java @@ -365,6 +365,36 @@ public OperationHandle getFunctions(SessionHandle sessionHandle, } /* (non-Javadoc) + * @see org.apache.hive.service.cli.ICLIService#getPrimaryKeys(org.apache.hive.service.cli.SessionHandle) + */ + @Override + public OperationHandle getPrimaryKeys(SessionHandle sessionHandle, + String catalog, String schema, String table) + throws HiveSQLException { + OperationHandle opHandle = sessionManager.getSession(sessionHandle) + .getPrimaryKeys(catalog, schema, table); + LOG.debug(sessionHandle + ": getPrimaryKeys()"); + return opHandle; + } + + /* (non-Javadoc) + * @see org.apache.hive.service.cli.ICLIService#getCrossReference(org.apache.hive.service.cli.SessionHandle) + */ + @Override + public OperationHandle getCrossReference(SessionHandle sessionHandle, + String primaryCatalog, + String primarySchema, String primaryTable, String foreignCatalog, + String foreignSchema, String foreignTable) + throws HiveSQLException { + OperationHandle opHandle = sessionManager.getSession(sessionHandle) + .getCrossReference(primaryCatalog, primarySchema, primaryTable, + foreignCatalog, + foreignSchema, foreignTable); + LOG.debug(sessionHandle + ": getCrossReference()"); + return opHandle; + } + + /* (non-Javadoc) * @see org.apache.hive.service.cli.ICLIService#getOperationStatus(org.apache.hive.service.cli.OperationHandle) */ @Override diff --git a/service/src/java/org/apache/hive/service/cli/CLIService.java.orig b/service/src/java/org/apache/hive/service/cli/CLIService.java.orig new file mode 100644 index 0000000..ab30ae2 --- /dev/null +++ b/service/src/java/org/apache/hive/service/cli/CLIService.java.orig @@ -0,0 +1,506 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hive.service.cli; + +import java.io.IOException; +import java.util.List; +import java.util.Map; +import java.util.concurrent.CancellationException; +import java.util.concurrent.ExecutionException; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.TimeoutException; + +import javax.security.auth.login.LoginException; + +import org.apache.hadoop.hive.conf.HiveConf; +import org.apache.hadoop.hive.conf.HiveConf.ConfVars; +import org.apache.hadoop.hive.metastore.api.MetaException; +import org.apache.hadoop.hive.ql.exec.FunctionRegistry; +import org.apache.hadoop.hive.ql.metadata.Hive; +import org.apache.hadoop.hive.ql.metadata.HiveException; +import org.apache.hadoop.hive.ql.session.SessionState; +import org.apache.hadoop.hive.shims.Utils; +import org.apache.hadoop.security.UserGroupInformation; +import org.apache.hive.service.CompositeService; +import org.apache.hive.service.ServiceException; +import org.apache.hive.service.auth.HiveAuthFactory; +import org.apache.hive.service.cli.operation.Operation; +import org.apache.hive.service.cli.session.SessionManager; +import org.apache.hive.service.rpc.thrift.TProtocolVersion; +import org.apache.hive.service.server.HiveServer2; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +/** + * CLIService. + * + */ +public class CLIService extends CompositeService implements ICLIService { + + public static final TProtocolVersion SERVER_VERSION; + + static { + TProtocolVersion[] protocols = TProtocolVersion.values(); + SERVER_VERSION = protocols[protocols.length - 1]; + } + + private final Logger LOG = LoggerFactory.getLogger(CLIService.class.getName()); + + private HiveConf hiveConf; + private SessionManager sessionManager; + private UserGroupInformation serviceUGI; + private UserGroupInformation httpUGI; + // The HiveServer2 instance running this service + private final HiveServer2 hiveServer2; + + public CLIService(HiveServer2 hiveServer2) { + super(CLIService.class.getSimpleName()); + this.hiveServer2 = hiveServer2; + } + + @Override + public synchronized void init(HiveConf hiveConf) { + this.hiveConf = hiveConf; + sessionManager = new SessionManager(hiveServer2); + addService(sessionManager); + // If the hadoop cluster is secure, do a kerberos login for the service from the keytab + if (UserGroupInformation.isSecurityEnabled()) { + try { + HiveAuthFactory.loginFromKeytab(hiveConf); + this.serviceUGI = Utils.getUGI(); + } catch (IOException e) { + throw new ServiceException("Unable to login to kerberos with given principal/keytab", e); + } catch (LoginException e) { + throw new ServiceException("Unable to login to kerberos with given principal/keytab", e); + } + + // Also try creating a UGI object for the SPNego principal + String principal = hiveConf.getVar(ConfVars.HIVE_SERVER2_SPNEGO_PRINCIPAL); + String keyTabFile = hiveConf.getVar(ConfVars.HIVE_SERVER2_SPNEGO_KEYTAB); + if (principal.isEmpty() || keyTabFile.isEmpty()) { + LOG.info("SPNego httpUGI not created, spNegoPrincipal: " + principal + + ", ketabFile: " + keyTabFile); + } else { + try { + this.httpUGI = HiveAuthFactory.loginFromSpnegoKeytabAndReturnUGI(hiveConf); + LOG.info("SPNego httpUGI successfully created."); + } catch (IOException e) { + LOG.warn("SPNego httpUGI creation failed: ", e); + } + } + } + // creates connection to HMS and thus *must* occur after kerberos login above + try { + applyAuthorizationConfigPolicy(hiveConf); + } catch (Exception e) { + throw new RuntimeException("Error applying authorization policy on hive configuration: " + + e.getMessage(), e); + } + setupBlockedUdfs(); + super.init(hiveConf); + } + + private void applyAuthorizationConfigPolicy(HiveConf newHiveConf) throws HiveException, + MetaException { + // authorization setup using SessionState should be revisited eventually, as + // authorization and authentication are not session specific settings + SessionState ss = new SessionState(newHiveConf); + ss.setIsHiveServerQuery(true); + SessionState.start(ss); + ss.applyAuthorizationPolicy(); + } + + private void setupBlockedUdfs() { + FunctionRegistry.setupPermissionsForBuiltinUDFs( + hiveConf.getVar(ConfVars.HIVE_SERVER2_BUILTIN_UDF_WHITELIST), + hiveConf.getVar(ConfVars.HIVE_SERVER2_BUILTIN_UDF_BLACKLIST)); + } + + public UserGroupInformation getServiceUGI() { + return this.serviceUGI; + } + + public UserGroupInformation getHttpUGI() { + return this.httpUGI; + } + + @Override + public synchronized void start() { + super.start(); + } + + @Override + public synchronized void stop() { + super.stop(); + // Release the HMS connection for this service thread + Hive.closeCurrent(); + } + + /** + * @deprecated Use {@link #openSession(TProtocolVersion, String, String, String, Map)} + */ + @Deprecated + public SessionHandle openSession(TProtocolVersion protocol, String username, String password, + Map configuration) throws HiveSQLException { + SessionHandle sessionHandle = sessionManager.openSession(protocol, username, password, null, configuration, false, null); + LOG.debug(sessionHandle + ": openSession()"); + return sessionHandle; + } + + /** + * @deprecated Use {@link #openSessionWithImpersonation(TProtocolVersion, String, String, String, Map, String)} + */ + @Deprecated + public SessionHandle openSessionWithImpersonation(TProtocolVersion protocol, String username, + String password, Map configuration, String delegationToken) + throws HiveSQLException { + SessionHandle sessionHandle = sessionManager.openSession(protocol, username, password, null, configuration, + true, delegationToken); + LOG.debug(sessionHandle + ": openSessionWithImpersonation()"); + return sessionHandle; + } + + public SessionHandle openSession(TProtocolVersion protocol, String username, String password, String ipAddress, + Map configuration) throws HiveSQLException { + SessionHandle sessionHandle = sessionManager.openSession(protocol, username, password, ipAddress, configuration, false, null); + LOG.debug(sessionHandle + ": openSession()"); + return sessionHandle; + } + + public SessionHandle openSessionWithImpersonation(TProtocolVersion protocol, String username, + String password, String ipAddress, Map configuration, String delegationToken) + throws HiveSQLException { + SessionHandle sessionHandle = sessionManager.openSession(protocol, username, password, ipAddress, configuration, + true, delegationToken); + LOG.debug(sessionHandle + ": openSession()"); + return sessionHandle; + } + + /* (non-Javadoc) + * @see org.apache.hive.service.cli.ICLIService#openSession(java.lang.String, java.lang.String, java.util.Map) + */ + @Override + public SessionHandle openSession(String username, String password, Map configuration) + throws HiveSQLException { + SessionHandle sessionHandle = sessionManager.openSession(SERVER_VERSION, username, password, null, configuration, false, null); + LOG.debug(sessionHandle + ": openSession()"); + return sessionHandle; + } + + /** + * Used to restore session + */ + public void createSessionWithSessionHandle(SessionHandle sessionHandle, String username, String password, + Map configuration) + throws HiveSQLException { + sessionManager.createSession(sessionHandle, SERVER_VERSION, username, password, null, + configuration, false, null); + LOG.debug(sessionHandle + ": createSessionWithSessionHandle()"); + } + + /* (non-Javadoc) + * @see org.apache.hive.service.cli.ICLIService#openSession(java.lang.String, java.lang.String, java.util.Map) + */ + @Override + public SessionHandle openSessionWithImpersonation(String username, String password, Map configuration, + String delegationToken) throws HiveSQLException { + SessionHandle sessionHandle = sessionManager.openSession(SERVER_VERSION, username, password, null, configuration, + true, delegationToken); + LOG.debug(sessionHandle + ": openSession()"); + return sessionHandle; + } + + /* (non-Javadoc) + * @see org.apache.hive.service.cli.ICLIService#closeSession(org.apache.hive.service.cli.SessionHandle) + */ + @Override + public void closeSession(SessionHandle sessionHandle) + throws HiveSQLException { + sessionManager.closeSession(sessionHandle); + LOG.debug(sessionHandle + ": closeSession()"); + } + + /* (non-Javadoc) + * @see org.apache.hive.service.cli.ICLIService#getInfo(org.apache.hive.service.cli.SessionHandle, java.util.List) + */ + @Override + public GetInfoValue getInfo(SessionHandle sessionHandle, GetInfoType getInfoType) + throws HiveSQLException { + GetInfoValue infoValue = sessionManager.getSession(sessionHandle) + .getInfo(getInfoType); + LOG.debug(sessionHandle + ": getInfo()"); + return infoValue; + } + + /* (non-Javadoc) + * @see org.apache.hive.service.cli.ICLIService#executeStatement(org.apache.hive.service.cli.SessionHandle, + * java.lang.String, java.util.Map) + */ + @Override + public OperationHandle executeStatement(SessionHandle sessionHandle, String statement, + Map confOverlay) + throws HiveSQLException { + OperationHandle opHandle = sessionManager.getSession(sessionHandle) + .executeStatement(statement, confOverlay); + LOG.debug(sessionHandle + ": executeStatement()"); + return opHandle; + } + + /* (non-Javadoc) + * @see org.apache.hive.service.cli.ICLIService#executeStatementAsync(org.apache.hive.service.cli.SessionHandle, + * java.lang.String, java.util.Map) + */ + @Override + public OperationHandle executeStatementAsync(SessionHandle sessionHandle, String statement, + Map confOverlay) throws HiveSQLException { + OperationHandle opHandle = sessionManager.getSession(sessionHandle) + .executeStatementAsync(statement, confOverlay); + LOG.debug(sessionHandle + ": executeStatementAsync()"); + return opHandle; + } + + + /* (non-Javadoc) + * @see org.apache.hive.service.cli.ICLIService#getTypeInfo(org.apache.hive.service.cli.SessionHandle) + */ + @Override + public OperationHandle getTypeInfo(SessionHandle sessionHandle) + throws HiveSQLException { + OperationHandle opHandle = sessionManager.getSession(sessionHandle) + .getTypeInfo(); + LOG.debug(sessionHandle + ": getTypeInfo()"); + return opHandle; + } + + /* (non-Javadoc) + * @see org.apache.hive.service.cli.ICLIService#getCatalogs(org.apache.hive.service.cli.SessionHandle) + */ + @Override + public OperationHandle getCatalogs(SessionHandle sessionHandle) + throws HiveSQLException { + OperationHandle opHandle = sessionManager.getSession(sessionHandle) + .getCatalogs(); + LOG.debug(sessionHandle + ": getCatalogs()"); + return opHandle; + } + + /* (non-Javadoc) + * @see org.apache.hive.service.cli.ICLIService#getSchemas(org.apache.hive.service.cli.SessionHandle, java.lang.String, java.lang.String) + */ + @Override + public OperationHandle getSchemas(SessionHandle sessionHandle, + String catalogName, String schemaName) + throws HiveSQLException { + OperationHandle opHandle = sessionManager.getSession(sessionHandle) + .getSchemas(catalogName, schemaName); + LOG.debug(sessionHandle + ": getSchemas()"); + return opHandle; + } + + /* (non-Javadoc) + * @see org.apache.hive.service.cli.ICLIService#getTables(org.apache.hive.service.cli.SessionHandle, java.lang.String, java.lang.String, java.lang.String, java.util.List) + */ + @Override + public OperationHandle getTables(SessionHandle sessionHandle, + String catalogName, String schemaName, String tableName, List tableTypes) + throws HiveSQLException { + OperationHandle opHandle = sessionManager.getSession(sessionHandle) + .getTables(catalogName, schemaName, tableName, tableTypes); + LOG.debug(sessionHandle + ": getTables()"); + return opHandle; + } + + /* (non-Javadoc) + * @see org.apache.hive.service.cli.ICLIService#getTableTypes(org.apache.hive.service.cli.SessionHandle) + */ + @Override + public OperationHandle getTableTypes(SessionHandle sessionHandle) + throws HiveSQLException { + OperationHandle opHandle = sessionManager.getSession(sessionHandle) + .getTableTypes(); + LOG.debug(sessionHandle + ": getTableTypes()"); + return opHandle; + } + + /* (non-Javadoc) + * @see org.apache.hive.service.cli.ICLIService#getColumns(org.apache.hive.service.cli.SessionHandle) + */ + @Override + public OperationHandle getColumns(SessionHandle sessionHandle, + String catalogName, String schemaName, String tableName, String columnName) + throws HiveSQLException { + OperationHandle opHandle = sessionManager.getSession(sessionHandle) + .getColumns(catalogName, schemaName, tableName, columnName); + LOG.debug(sessionHandle + ": getColumns()"); + return opHandle; + } + + /* (non-Javadoc) + * @see org.apache.hive.service.cli.ICLIService#getFunctions(org.apache.hive.service.cli.SessionHandle) + */ + @Override + public OperationHandle getFunctions(SessionHandle sessionHandle, + String catalogName, String schemaName, String functionName) + throws HiveSQLException { + OperationHandle opHandle = sessionManager.getSession(sessionHandle) + .getFunctions(catalogName, schemaName, functionName); + LOG.debug(sessionHandle + ": getFunctions()"); + return opHandle; + } + + /* (non-Javadoc) + * @see org.apache.hive.service.cli.ICLIService#getOperationStatus(org.apache.hive.service.cli.OperationHandle) + */ + @Override + public OperationStatus getOperationStatus(OperationHandle opHandle) + throws HiveSQLException { + Operation operation = sessionManager.getOperationManager().getOperation(opHandle); + /** + * If this is a background operation run asynchronously, + * we block for a configured duration, before we return + * (duration: HIVE_SERVER2_LONG_POLLING_TIMEOUT). + * However, if the background operation is complete, we return immediately. + */ + if (operation.shouldRunAsync()) { + HiveConf conf = operation.getParentSession().getHiveConf(); + long timeout = HiveConf.getTimeVar(conf, + HiveConf.ConfVars.HIVE_SERVER2_LONG_POLLING_TIMEOUT, TimeUnit.MILLISECONDS); + try { + operation.getBackgroundHandle().get(timeout, TimeUnit.MILLISECONDS); + } catch (TimeoutException e) { + // No Op, return to the caller since long polling timeout has expired + LOG.trace(opHandle + ": Long polling timed out"); + } catch (CancellationException e) { + // The background operation thread was cancelled + LOG.trace(opHandle + ": The background operation was cancelled", e); + } catch (ExecutionException e) { + // The background operation thread was aborted + LOG.warn(opHandle + ": The background operation was aborted", e); + } catch (InterruptedException e) { + // No op, this thread was interrupted + // In this case, the call might return sooner than long polling timeout + } + } + OperationStatus opStatus = operation.getStatus(); + LOG.debug(opHandle + ": getOperationStatus()"); + return opStatus; + } + + /* (non-Javadoc) + * @see org.apache.hive.service.cli.ICLIService#cancelOperation(org.apache.hive.service.cli.OperationHandle) + */ + @Override + public void cancelOperation(OperationHandle opHandle) + throws HiveSQLException { + sessionManager.getOperationManager().getOperation(opHandle) + .getParentSession().cancelOperation(opHandle); + LOG.debug(opHandle + ": cancelOperation()"); + } + + /* (non-Javadoc) + * @see org.apache.hive.service.cli.ICLIService#closeOperation(org.apache.hive.service.cli.OperationHandle) + */ + @Override + public void closeOperation(OperationHandle opHandle) + throws HiveSQLException { + sessionManager.getOperationManager().getOperation(opHandle) + .getParentSession().closeOperation(opHandle); + LOG.debug(opHandle + ": closeOperation"); + } + + /* (non-Javadoc) + * @see org.apache.hive.service.cli.ICLIService#getResultSetMetadata(org.apache.hive.service.cli.OperationHandle) + */ + @Override + public TableSchema getResultSetMetadata(OperationHandle opHandle) + throws HiveSQLException { + TableSchema tableSchema = sessionManager.getOperationManager() + .getOperation(opHandle).getParentSession().getResultSetMetadata(opHandle); + LOG.debug(opHandle + ": getResultSetMetadata()"); + return tableSchema; + } + + /* (non-Javadoc) + * @see org.apache.hive.service.cli.ICLIService#fetchResults(org.apache.hive.service.cli.OperationHandle) + */ + @Override + public RowSet fetchResults(OperationHandle opHandle) + throws HiveSQLException { + return fetchResults(opHandle, Operation.DEFAULT_FETCH_ORIENTATION, + Operation.DEFAULT_FETCH_MAX_ROWS, FetchType.QUERY_OUTPUT); + } + + @Override + public RowSet fetchResults(OperationHandle opHandle, FetchOrientation orientation, + long maxRows, FetchType fetchType) throws HiveSQLException { + RowSet rowSet = sessionManager.getOperationManager().getOperation(opHandle) + .getParentSession().fetchResults(opHandle, orientation, maxRows, fetchType); + LOG.debug(opHandle + ": fetchResults()"); + return rowSet; + } + + // obtain delegation token for the give user from metastore + public synchronized String getDelegationTokenFromMetaStore(String owner) + throws HiveSQLException, UnsupportedOperationException, LoginException, IOException { + if (!hiveConf.getBoolVar(HiveConf.ConfVars.METASTORE_USE_THRIFT_SASL) || + !hiveConf.getBoolVar(HiveConf.ConfVars.HIVE_SERVER2_ENABLE_DOAS)) { + throw new UnsupportedOperationException( + "delegation token is can only be obtained for a secure remote metastore"); + } + + try { + Hive.closeCurrent(); + return Hive.get(hiveConf).getDelegationToken(owner, owner); + } catch (HiveException e) { + if (e.getCause() instanceof UnsupportedOperationException) { + throw (UnsupportedOperationException)e.getCause(); + } else { + throw new HiveSQLException("Error connect metastore to setup impersonation", e); + } + } + } + + @Override + public String getDelegationToken(SessionHandle sessionHandle, HiveAuthFactory authFactory, + String owner, String renewer) throws HiveSQLException { + String delegationToken = sessionManager.getSession(sessionHandle). + getDelegationToken(authFactory, owner, renewer); + LOG.info(sessionHandle + ": getDelegationToken()" + " owner: " + owner + ", renewer: " + renewer); + return delegationToken; + } + + @Override + public void cancelDelegationToken(SessionHandle sessionHandle, HiveAuthFactory authFactory, + String tokenStr) throws HiveSQLException { + sessionManager.getSession(sessionHandle). + cancelDelegationToken(authFactory, tokenStr); + LOG.info(sessionHandle + ": cancelDelegationToken()"); + } + + @Override + public void renewDelegationToken(SessionHandle sessionHandle, HiveAuthFactory authFactory, + String tokenStr) throws HiveSQLException { + sessionManager.getSession(sessionHandle).renewDelegationToken(authFactory, tokenStr); + LOG.info(sessionHandle + ": renewDelegationToken()"); + } + + public SessionManager getSessionManager() { + return sessionManager; + } +} diff --git a/service/src/java/org/apache/hive/service/cli/EmbeddedCLIServiceClient.java b/service/src/java/org/apache/hive/service/cli/EmbeddedCLIServiceClient.java index 9cad5be..79e0024 100644 --- a/service/src/java/org/apache/hive/service/cli/EmbeddedCLIServiceClient.java +++ b/service/src/java/org/apache/hive/service/cli/EmbeddedCLIServiceClient.java @@ -205,4 +205,19 @@ public void renewDelegationToken(SessionHandle sessionHandle, HiveAuthFactory au String tokenStr) throws HiveSQLException { cliService.renewDelegationToken(sessionHandle, authFactory, tokenStr); } + + @Override + public OperationHandle getPrimaryKeys(SessionHandle sessionHandle, + String catalog, String schema, String table) throws HiveSQLException { + return cliService.getPrimaryKeys(sessionHandle, catalog, schema, table); + } + + @Override + public OperationHandle getCrossReference(SessionHandle sessionHandle, + String primaryCatalog, String primarySchema, String primaryTable, + String foreignCatalog, String foreignSchema, String foreignTable) + throws HiveSQLException { + return cliService.getCrossReference(sessionHandle, primaryCatalog, primarySchema, + primaryTable, foreignCatalog, foreignSchema, foreignTable); + } } diff --git a/service/src/java/org/apache/hive/service/cli/ICLIService.java b/service/src/java/org/apache/hive/service/cli/ICLIService.java index 0a54bdd..e4aef96 100644 --- a/service/src/java/org/apache/hive/service/cli/ICLIService.java +++ b/service/src/java/org/apache/hive/service/cli/ICLIService.java @@ -98,5 +98,13 @@ void cancelDelegationToken(SessionHandle sessionHandle, HiveAuthFactory authFact void renewDelegationToken(SessionHandle sessionHandle, HiveAuthFactory authFactory, String tokenStr) throws HiveSQLException; + OperationHandle getPrimaryKeys(SessionHandle sessionHandle, String catalog, + String schema, String table) throws HiveSQLException; + + OperationHandle getCrossReference(SessionHandle sessionHandle, + String primaryCatalog, String primarySchema, String primaryTable, + String foreignCatalog, String foreignSchema, String foreignTable) + throws HiveSQLException; + } diff --git a/service/src/java/org/apache/hive/service/cli/operation/GetCrossReferenceOperation.java b/service/src/java/org/apache/hive/service/cli/operation/GetCrossReferenceOperation.java new file mode 100644 index 0000000..42ed040 --- /dev/null +++ b/service/src/java/org/apache/hive/service/cli/operation/GetCrossReferenceOperation.java @@ -0,0 +1,168 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hive.service.cli.operation; + +import java.util.List; + +import org.apache.commons.lang.NumberUtils; +import org.apache.hadoop.hive.conf.HiveConf; +import org.apache.hadoop.hive.metastore.IMetaStoreClient; +import org.apache.hadoop.hive.metastore.api.SQLForeignKey; +import org.apache.hadoop.hive.metastore.api.SQLPrimaryKey; +import org.apache.hive.service.cli.FetchOrientation; +import org.apache.hive.service.cli.HiveSQLException; +import org.apache.hive.service.cli.OperationState; +import org.apache.hive.service.cli.OperationType; +import org.apache.hive.service.cli.RowSet; +import org.apache.hive.service.cli.RowSetFactory; +import org.apache.hive.service.cli.TableSchema; +import org.apache.hive.service.cli.Type; +import org.apache.hive.service.cli.session.HiveSession; + +/** + * GetCrossReferenceOperation. + * + */ +public class GetCrossReferenceOperation extends MetadataOperation { +/** +PKTABLE_CAT String => parent key table catalog (may be null) +PKTABLE_SCHEM String => parent key table schema (may be null) +PKTABLE_NAME String => parent key table name +PKCOLUMN_NAME String => parent key column name +FKTABLE_CAT String => foreign key table catalog (may be null) being exported (may be null) +FKTABLE_SCHEM String => foreign key table schema (may be null) being exported (may be null) +FKTABLE_NAME String => foreign key table name being exported +FKCOLUMN_NAME String => foreign key column name being exported +KEY_SEQ short => sequence number within foreign key( a value of 1 represents the first column of the foreign key, a value of 2 would represent the second column within the foreign key). +UPDATE_RULE short => What happens to foreign key when parent key is updated: +importedNoAction - do not allow update of parent key if it has been imported +importedKeyCascade - change imported key to agree with parent key update +importedKeySetNull - change imported key to NULL if its parent key has been updated +importedKeySetDefault - change imported key to default values if its parent key has been updated +importedKeyRestrict - same as importedKeyNoAction (for ODBC 2.x compatibility) +DELETE_RULE short => What happens to the foreign key when parent key is deleted. +importedKeyNoAction - do not allow delete of parent key if it has been imported +importedKeyCascade - delete rows that import a deleted key +importedKeySetNull - change imported key to NULL if its primary key has been deleted +importedKeyRestrict - same as importedKeyNoAction (for ODBC 2.x compatibility) +importedKeySetDefault - change imported key to default if its parent key has been deleted +FK_NAME String => foreign key name (may be null) +PK_NAME String => parent key name (may be null) +DEFERRABILITY short => can the evaluation of foreign key constraints be deferred until commit +importedKeyInitiallyDeferred - see SQL92 for definition +importedKeyInitiallyImmediate - see SQL92 for definition +importedKeyNotDeferrable - see SQL92 for definition + */ + private static final TableSchema RESULT_SET_SCHEMA = new TableSchema() + .addPrimitiveColumn("PKTABLE_CAT", Type.STRING_TYPE, + "Parent key table catalog (may be null)") + .addPrimitiveColumn("PKTABLE_SCHEM", Type.STRING_TYPE, + "Parent key table schema (may be null)") + .addPrimitiveColumn("PKTABLE_NAME", Type.STRING_TYPE, + "Parent Key table name") + .addPrimitiveColumn("PKCOLUMN_NAME", Type.STRING_TYPE, + "Parent Key column name") + .addPrimitiveColumn("FKTABLE_CAT", Type.STRING_TYPE, + "Foreign key table catalog (may be null)") + .addPrimitiveColumn("FKTABLE_SCHEM", Type.STRING_TYPE, + "Foreign key table schema (may be null)") + .addPrimitiveColumn("FKTABLE_NAME", Type.STRING_TYPE, + "Foreign Key table name") + .addPrimitiveColumn("FKCOLUMN_NAME", Type.STRING_TYPE, + "Foreign Key column name") + .addPrimitiveColumn("KEQ_SEQ", Type.INT_TYPE, + "Sequence number within primary key") + .addPrimitiveColumn("UPDATE_RULE", Type.INT_TYPE, + "What happens to foreign key when parent key is updated") + .addPrimitiveColumn("DELETE_RULE", Type.INT_TYPE, + "What happens to foreign key when parent key is deleted") + .addPrimitiveColumn("FK_NAME", Type.STRING_TYPE, + "Foreign key name (may be null)") + .addPrimitiveColumn("PK_NAME", Type.STRING_TYPE, + "Primary key name (may be null)") + .addPrimitiveColumn("DEFERRABILITY", Type.INT_TYPE, + "Can the evaluation of foreign key constraints be deferred until commit"); + private final String parentCatalogName; + private final String parentSchemaName; + private final String parentTableName; + private final String foreignCatalogName; + private final String foreignSchemaName; + private final String foreignTableName; + private final RowSet rowSet; + + public GetCrossReferenceOperation(HiveSession parentSession, + String parentCatalogName, String parentSchemaName, String parentTableName, + String foreignCatalog, String foreignSchema, String foreignTable) { + super(parentSession, OperationType.GET_FUNCTIONS); + this.parentCatalogName = parentCatalogName; + this.parentSchemaName = parentSchemaName; + this.parentTableName = parentTableName; + this.foreignCatalogName = foreignCatalog; + this.foreignSchemaName = foreignSchema; + this.foreignTableName = foreignTable; + this.rowSet = RowSetFactory.create(RESULT_SET_SCHEMA, getProtocolVersion()); + } + + @Override + public void runInternal() throws HiveSQLException { + setState(OperationState.RUNNING); + try { + IMetaStoreClient metastoreClient = getParentSession().getMetaStoreClient(); + List fks = metastoreClient.getForeignKeys(parentSchemaName, parentTableName, foreignSchemaName, foreignTableName); + if (fks == null) { + return; + } + for (SQLForeignKey fk : fks) { + rowSet.addRow(new Object[] {parentCatalogName, + fk.getPktable_schem(), fk.getPktable_name(), fk.getPkcolumn_name(), + foreignCatalogName, + fk.getFktable_schem(), fk.getFktable_name(), fk.getFkcolumn_name(), + fk.getKey_seq(), fk.getUpdate_rule(), fk.getDelete_rule(), fk.getFk_name(), + fk.getPk_name(), 0}); + } + setState(OperationState.FINISHED); + } catch (Exception e) { + setState(OperationState.ERROR); + throw new HiveSQLException(e); + } + } + + + /* (non-Javadoc) + * @see org.apache.hive.service.cli.Operation#getResultSetSchema() + */ + @Override + public TableSchema getResultSetSchema() throws HiveSQLException { + assertState(OperationState.FINISHED); + return RESULT_SET_SCHEMA; + } + + /* (non-Javadoc) + * @see org.apache.hive.service.cli.Operation#getNextRowSet(org.apache.hive.service.cli.FetchOrientation, long) + */ + @Override + public RowSet getNextRowSet(FetchOrientation orientation, long maxRows) throws HiveSQLException { + assertState(OperationState.FINISHED); + validateDefaultFetchOrientation(orientation); + if (orientation.equals(FetchOrientation.FETCH_FIRST)) { + rowSet.setStartOffset(0); + } + return rowSet.extractSubset((int)maxRows); + } +} diff --git a/service/src/java/org/apache/hive/service/cli/operation/GetPrimaryKeysOperation.java b/service/src/java/org/apache/hive/service/cli/operation/GetPrimaryKeysOperation.java new file mode 100644 index 0000000..520b65a --- /dev/null +++ b/service/src/java/org/apache/hive/service/cli/operation/GetPrimaryKeysOperation.java @@ -0,0 +1,122 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hive.service.cli.operation; + +import java.util.List; + +import org.apache.commons.lang.NumberUtils; +import org.apache.hadoop.hive.conf.HiveConf; +import org.apache.hadoop.hive.metastore.IMetaStoreClient; +import org.apache.hadoop.hive.metastore.api.SQLPrimaryKey; +import org.apache.hadoop.hive.ql.security.authorization.plugin.HiveOperationType; +import org.apache.hive.service.cli.FetchOrientation; +import org.apache.hive.service.cli.HiveSQLException; +import org.apache.hive.service.cli.OperationState; +import org.apache.hive.service.cli.OperationType; +import org.apache.hive.service.cli.RowSet; +import org.apache.hive.service.cli.RowSetFactory; +import org.apache.hive.service.cli.TableSchema; +import org.apache.hive.service.cli.Type; +import org.apache.hive.service.cli.session.HiveSession; + +/** + * GetPrimaryKeysOperation. + * + */ +public class GetPrimaryKeysOperation extends MetadataOperation { +/** +TABLE_CAT String => table catalog (may be null) +TABLE_SCHEM String => table schema (may be null) +TABLE_NAME String => table name +COLUMN_NAME String => column name +KEY_SEQ short => sequence number within primary key( a value of 1 represents the first column of the primary key, a value of 2 would represent the second column within the primary key). +PK_NAME String => primary key name (may be null) + */ + private static final TableSchema RESULT_SET_SCHEMA = new TableSchema() + .addPrimitiveColumn("TABLE_CAT", Type.STRING_TYPE, + "Table catalog (may be null)") + .addPrimitiveColumn("TABLE_SCHEM", Type.STRING_TYPE, + "Table schema (may be null)") + .addPrimitiveColumn("TABLE_NAME", Type.STRING_TYPE, + "Table name") + .addPrimitiveColumn("COLUMN_NAME", Type.STRING_TYPE, + "Column name") + .addPrimitiveColumn("KEQ_SEQ", Type.INT_TYPE, + "Sequence number within primary key") + .addPrimitiveColumn("PK_NAME", Type.STRING_TYPE, + "Primary key name (may be null)"); + + private final String catalogName; + private final String schemaName; + private final String tableName; + + private final RowSet rowSet; + + public GetPrimaryKeysOperation(HiveSession parentSession, + String catalogName, String schemaName, String tableName) { + super(parentSession, OperationType.GET_FUNCTIONS); + this.catalogName = catalogName; + this.schemaName = schemaName; + this.tableName = tableName; + this.rowSet = RowSetFactory.create(RESULT_SET_SCHEMA, getProtocolVersion()); + } + + @Override + public void runInternal() throws HiveSQLException { + setState(OperationState.RUNNING); + try { + IMetaStoreClient metastoreClient = getParentSession().getMetaStoreClient(); + List pks = metastoreClient.getPrimaryKeys(schemaName, tableName); + if (pks == null) { + return; + } + for (SQLPrimaryKey pk : pks) { + rowSet.addRow(new Object[] {catalogName, pk.getTable_schem(), + pk.getTable_name(), pk.getColumn_name(), pk.getKey_seq(), pk.getPk_name()}); + } + setState(OperationState.FINISHED); + } catch (Exception e) { + setState(OperationState.ERROR); + throw new HiveSQLException(e); + } + } + + + /* (non-Javadoc) + * @see org.apache.hive.service.cli.Operation#getResultSetSchema() + */ + @Override + public TableSchema getResultSetSchema() throws HiveSQLException { + assertState(OperationState.FINISHED); + return RESULT_SET_SCHEMA; + } + + /* (non-Javadoc) + * @see org.apache.hive.service.cli.Operation#getNextRowSet(org.apache.hive.service.cli.FetchOrientation, long) + */ + @Override + public RowSet getNextRowSet(FetchOrientation orientation, long maxRows) throws HiveSQLException { + assertState(OperationState.FINISHED); + validateDefaultFetchOrientation(orientation); + if (orientation.equals(FetchOrientation.FETCH_FIRST)) { + rowSet.setStartOffset(0); + } + return rowSet.extractSubset((int)maxRows); + } +} diff --git a/service/src/java/org/apache/hive/service/cli/operation/OperationManager.java b/service/src/java/org/apache/hive/service/cli/operation/OperationManager.java index 56a9c18..8bb7c3b 100644 --- a/service/src/java/org/apache/hive/service/cli/operation/OperationManager.java +++ b/service/src/java/org/apache/hive/service/cli/operation/OperationManager.java @@ -169,6 +169,25 @@ public GetFunctionsOperation newGetFunctionsOperation(HiveSession parentSession, return operation; } + public GetPrimaryKeysOperation newGetPrimaryKeysOperation(HiveSession parentSession, + String catalogName, String schemaName, String tableName) { + GetPrimaryKeysOperation operation = new GetPrimaryKeysOperation(parentSession, + catalogName, schemaName, tableName); + addOperation(operation); + return operation; + } + + public GetCrossReferenceOperation newGetCrossReferenceOperation( + HiveSession session, String primaryCatalog, String primarySchema, + String primaryTable, String foreignCatalog, String foreignSchema, + String foreignTable) { + GetCrossReferenceOperation operation = new GetCrossReferenceOperation(session, + primaryCatalog, primarySchema, primaryTable, foreignCatalog, foreignSchema, + foreignTable); + addOperation(operation); + return operation; + } + public Operation getOperation(OperationHandle operationHandle) throws HiveSQLException { Operation operation = getOperationInternal(operationHandle); if (operation == null) { diff --git a/service/src/java/org/apache/hive/service/cli/session/HiveSession.java b/service/src/java/org/apache/hive/service/cli/session/HiveSession.java index 4f4e92d..9ea643b 100644 --- a/service/src/java/org/apache/hive/service/cli/session/HiveSession.java +++ b/service/src/java/org/apache/hive/service/cli/session/HiveSession.java @@ -136,6 +136,34 @@ OperationHandle getFunctions(String catalogName, String schemaName, String functionName) throws HiveSQLException; /** + * getPrimaryKeys operation handler + * @param catalog + * @param schema + * @param table + * @return + * @throws HiveSQLException + */ + OperationHandle getPrimaryKeys(String catalog, String schema, String table) + throws HiveSQLException; + + + /** + * getCrossReference operation handler + * @param primaryCatalog + * @param primarySchema + * @param primaryTable + * @param foreignCatalog + * @param foreignSchema + * @param foreignTable + * @return + * @throws HiveSQLException + */ + OperationHandle getCrossReference(String primaryCatalog, + String primarySchema, String primaryTable, String foreignCatalog, + String foreignSchema, String foreignTable) + throws HiveSQLException; + + /** * close the session * @throws HiveSQLException */ diff --git a/service/src/java/org/apache/hive/service/cli/session/HiveSessionImpl.java b/service/src/java/org/apache/hive/service/cli/session/HiveSessionImpl.java index 80a1844..f7c12dc 100644 --- a/service/src/java/org/apache/hive/service/cli/session/HiveSessionImpl.java +++ b/service/src/java/org/apache/hive/service/cli/session/HiveSessionImpl.java @@ -62,7 +62,9 @@ import org.apache.hive.service.cli.operation.ExecuteStatementOperation; import org.apache.hive.service.cli.operation.GetCatalogsOperation; import org.apache.hive.service.cli.operation.GetColumnsOperation; +import org.apache.hive.service.cli.operation.GetCrossReferenceOperation; import org.apache.hive.service.cli.operation.GetFunctionsOperation; +import org.apache.hive.service.cli.operation.GetPrimaryKeysOperation; import org.apache.hive.service.cli.operation.GetSchemasOperation; import org.apache.hive.service.cli.operation.GetTableTypesOperation; import org.apache.hive.service.cli.operation.GetTypeInfoOperation; @@ -836,4 +838,49 @@ public void renewDelegationToken(HiveAuthFactory authFactory, String tokenStr) private String getUserFromToken(HiveAuthFactory authFactory, String tokenStr) throws HiveSQLException { return authFactory.getUserFromToken(tokenStr); } + + @Override + public OperationHandle getPrimaryKeys(String catalog, String schema, + String table) throws HiveSQLException { + acquire(true); + + OperationManager operationManager = getOperationManager(); + GetPrimaryKeysOperation operation = operationManager + .newGetPrimaryKeysOperation(getSession(), catalog, schema, table); + OperationHandle opHandle = operation.getHandle(); + try { + operation.run(); + addOpHandle(opHandle); + return opHandle; + } catch (HiveSQLException e) { + operationManager.closeOperation(opHandle); + throw e; + } finally { + release(true); + } +} + + @Override + public OperationHandle getCrossReference(String primaryCatalog, + String primarySchema, String primaryTable, String foreignCatalog, + String foreignSchema, String foreignTable) throws HiveSQLException { + acquire(true); + + OperationManager operationManager = getOperationManager(); + GetCrossReferenceOperation operation = operationManager + .newGetCrossReferenceOperation(getSession(), primaryCatalog, + primarySchema, primaryTable, foreignCatalog, + foreignSchema, foreignTable); + OperationHandle opHandle = operation.getHandle(); + try { + operation.run(); + addOpHandle(opHandle); + return opHandle; + } catch (HiveSQLException e) { + operationManager.closeOperation(opHandle); + throw e; + } finally { + release(true); + } +} } diff --git a/service/src/java/org/apache/hive/service/cli/session/HiveSessionImpl.java.orig b/service/src/java/org/apache/hive/service/cli/session/HiveSessionImpl.java.orig new file mode 100644 index 0000000..80a1844 --- /dev/null +++ b/service/src/java/org/apache/hive/service/cli/session/HiveSessionImpl.java.orig @@ -0,0 +1,839 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hive.service.cli.session; + +import java.io.BufferedReader; +import java.io.File; +import java.io.FileInputStream; +import java.io.IOException; +import java.io.InputStreamReader; +import java.util.ArrayList; +import java.util.HashMap; +import java.util.HashSet; +import java.util.List; +import java.util.Map; +import java.util.Set; + +import org.apache.commons.io.FileUtils; +import org.apache.commons.lang.StringUtils; +import org.apache.hadoop.hive.common.cli.HiveFileProcessor; +import org.apache.hadoop.hive.common.cli.IHiveFileProcessor; +import org.apache.hadoop.hive.conf.HiveConf; +import org.apache.hadoop.hive.conf.HiveConf.ConfVars; +import org.apache.hadoop.hive.metastore.IMetaStoreClient; +import org.apache.hadoop.hive.metastore.api.MetaException; +import org.apache.hadoop.hive.ql.QueryPlan; +import org.apache.hadoop.hive.ql.exec.FetchFormatter; +import org.apache.hadoop.hive.ql.exec.ListSinkOperator; +import org.apache.hadoop.hive.ql.exec.Utilities; +import org.apache.hadoop.hive.ql.history.HiveHistory; +import org.apache.hadoop.hive.ql.metadata.Hive; +import org.apache.hadoop.hive.ql.metadata.HiveException; +import org.apache.hadoop.hive.ql.processors.SetProcessor; +import org.apache.hadoop.hive.ql.session.SessionState; +import org.apache.hadoop.hive.shims.ShimLoader; +import org.apache.hive.common.util.HiveVersionInfo; +import org.apache.hive.service.auth.HiveAuthFactory; +import org.apache.hive.service.cli.FetchOrientation; +import org.apache.hive.service.cli.FetchType; +import org.apache.hive.service.cli.GetInfoType; +import org.apache.hive.service.cli.GetInfoValue; +import org.apache.hive.service.cli.HiveSQLException; +import org.apache.hive.service.cli.OperationHandle; +import org.apache.hive.service.cli.RowSet; +import org.apache.hive.service.cli.SessionHandle; +import org.apache.hive.service.cli.TableSchema; +import org.apache.hive.service.cli.operation.ExecuteStatementOperation; +import org.apache.hive.service.cli.operation.GetCatalogsOperation; +import org.apache.hive.service.cli.operation.GetColumnsOperation; +import org.apache.hive.service.cli.operation.GetFunctionsOperation; +import org.apache.hive.service.cli.operation.GetSchemasOperation; +import org.apache.hive.service.cli.operation.GetTableTypesOperation; +import org.apache.hive.service.cli.operation.GetTypeInfoOperation; +import org.apache.hive.service.cli.operation.MetadataOperation; +import org.apache.hive.service.cli.operation.Operation; +import org.apache.hive.service.cli.operation.OperationManager; +import org.apache.hive.service.rpc.thrift.TProtocolVersion; +import org.apache.hive.service.server.ThreadWithGarbageCleanup; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +/** + * HiveSession + * + */ +public class HiveSessionImpl implements HiveSession { + + // Shared between threads (including SessionState!) + private final SessionHandle sessionHandle; + private String username; + private final String password; + private final HiveConf hiveConf; + private final long creationTime; + // TODO: some SessionState internals are not thread safe. The compile-time internals are synced + // via session-scope or global compile lock. The run-time internals work by magic! + // They probably work because races are relatively unlikely and few tools run parallel + // queries from the same session. + // 1) OperationState should be refactored out of SessionState, and made thread-local. + // 2) Some parts of session state, like mrStats and vars, need proper synchronization. + private SessionState sessionState; + private String ipAddress; + + private static final String FETCH_WORK_SERDE_CLASS = + "org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe"; + private static final Logger LOG = LoggerFactory.getLogger(HiveSessionImpl.class); + + private SessionManager sessionManager; + private OperationManager operationManager; + // Synchronized by locking on itself. + private final Set opHandleSet = new HashSet(); + private boolean isOperationLogEnabled; + private File sessionLogDir; + // TODO: the control flow for this needs to be defined. Hive is supposed to be thread-local. + private Hive sessionHive; + + private volatile long lastAccessTime; + private volatile long lastIdleTime; + + + public HiveSessionImpl(SessionHandle sessionHandle, TProtocolVersion protocol, String username, String password, + HiveConf serverhiveConf, String ipAddress) { + this.username = username; + this.password = password; + creationTime = System.currentTimeMillis(); + this.sessionHandle = sessionHandle != null ? sessionHandle : new SessionHandle(protocol); + this.hiveConf = new HiveConf(serverhiveConf); + this.ipAddress = ipAddress; + try { + // In non-impersonation mode, map scheduler queue to current user + // if fair scheduler is configured. + if (! hiveConf.getBoolVar(ConfVars.HIVE_SERVER2_ENABLE_DOAS) && + hiveConf.getBoolVar(ConfVars.HIVE_SERVER2_MAP_FAIR_SCHEDULER_QUEUE)) { + ShimLoader.getHadoopShims().refreshDefaultQueue(hiveConf, username); + } + } catch (IOException e) { + LOG.warn("Error setting scheduler queue: " + e, e); + } + // Set an explicit session name to control the download directory name + hiveConf.set(ConfVars.HIVESESSIONID.varname, + this.sessionHandle.getHandleIdentifier().toString()); + // Use thrift transportable formatter + hiveConf.set(ListSinkOperator.OUTPUT_FORMATTER, + FetchFormatter.ThriftFormatter.class.getName()); + hiveConf.setInt(ListSinkOperator.OUTPUT_PROTOCOL, protocol.getValue()); + } + + public HiveSessionImpl(TProtocolVersion protocol, String username, String password, + HiveConf serverhiveConf, String ipAddress) { + this(null, protocol, username, password, serverhiveConf, ipAddress); + } + + + @Override + /** + * Opens a new HiveServer2 session for the client connection. + * Creates a new SessionState object that will be associated with this HiveServer2 session. + * When the server executes multiple queries in the same session, + * this SessionState object is reused across multiple queries. + * Note that if doAs is true, this call goes through a proxy object, + * which wraps the method logic in a UserGroupInformation#doAs. + * That's why it is important to create SessionState here rather than in the constructor. + */ + public void open(Map sessionConfMap) throws HiveSQLException { + sessionState = new SessionState(hiveConf, username); + sessionState.setUserIpAddress(ipAddress); + sessionState.setIsHiveServerQuery(true); + SessionState.start(sessionState); + try { + sessionState.reloadAuxJars(); + } catch (IOException e) { + String msg = "Failed to load reloadable jar file path: " + e; + LOG.error(msg, e); + throw new HiveSQLException(msg, e); + } + try { + sessionHive = Hive.get(getHiveConf()); + } catch (HiveException e) { + throw new HiveSQLException("Failed to get metastore connection", e); + } + // Process global init file: .hiverc + processGlobalInitFile(); + if (sessionConfMap != null) { + configureSession(sessionConfMap); + } + lastAccessTime = System.currentTimeMillis(); + lastIdleTime = lastAccessTime; + } + + /** + * It is used for processing hiverc file from HiveServer2 side. + */ + private class GlobalHivercFileProcessor extends HiveFileProcessor { + @Override + protected BufferedReader loadFile(String fileName) throws IOException { + FileInputStream initStream = null; + BufferedReader bufferedReader = null; + initStream = new FileInputStream(fileName); + bufferedReader = new BufferedReader(new InputStreamReader(initStream)); + return bufferedReader; + } + + @Override + protected int processCmd(String cmd) { + int rc = 0; + String cmd_trimed = cmd.trim(); + OperationHandle opHandle = null; + try { + //execute in sync mode + opHandle = executeStatementInternal(cmd_trimed, null, false); + } catch (HiveSQLException e) { + LOG.warn("Failed to execute command in global .hiverc file.", e); + return -1; + } + if (opHandle != null) { + try { + closeOperation(opHandle); + } catch (HiveSQLException e) { + LOG.warn("Failed to close operation for command in .hiverc file.", e); + } + } + return rc; + } + } + + private void processGlobalInitFile() { + IHiveFileProcessor processor = new GlobalHivercFileProcessor(); + + try { + String hiverc = hiveConf.getVar(ConfVars.HIVE_SERVER2_GLOBAL_INIT_FILE_LOCATION); + if (hiverc != null) { + File hivercFile = new File(hiverc); + if (hivercFile.isDirectory()) { + hivercFile = new File(hivercFile, SessionManager.HIVERCFILE); + } + if (hivercFile.isFile()) { + LOG.info("Running global init file: " + hivercFile); + int rc = processor.processFile(hivercFile.getAbsolutePath()); + if (rc != 0) { + LOG.error("Failed on initializing global .hiverc file"); + } + } else { + LOG.debug("Global init file " + hivercFile + " does not exist"); + } + } + } catch (IOException e) { + LOG.warn("Failed on initializing global .hiverc file", e); + } + } + + private void configureSession(Map sessionConfMap) throws HiveSQLException { + SessionState.setCurrentSessionState(sessionState); + for (Map.Entry entry : sessionConfMap.entrySet()) { + String key = entry.getKey(); + if (key.startsWith("set:")) { + try { + SetProcessor.setVariable(key.substring(4), entry.getValue()); + } catch (Exception e) { + throw new HiveSQLException(e); + } + } else if (key.startsWith("use:")) { + SessionState.get().setCurrentDatabase(entry.getValue()); + } else { + hiveConf.verifyAndSet(key, entry.getValue()); + } + } + } + + @Override + public void setOperationLogSessionDir(File operationLogRootDir) { + if (!operationLogRootDir.exists()) { + LOG.warn("The operation log root directory is removed, recreating:" + + operationLogRootDir.getAbsolutePath()); + if (!operationLogRootDir.mkdirs()) { + LOG.warn("Unable to create operation log root directory: " + + operationLogRootDir.getAbsolutePath()); + } + } + if (!operationLogRootDir.canWrite()) { + LOG.warn("The operation log root directory is not writable: " + + operationLogRootDir.getAbsolutePath()); + } + sessionLogDir = new File(operationLogRootDir, sessionHandle.getHandleIdentifier().toString()); + isOperationLogEnabled = true; + if (!sessionLogDir.exists()) { + if (!sessionLogDir.mkdir()) { + LOG.warn("Unable to create operation log session directory: " + + sessionLogDir.getAbsolutePath()); + isOperationLogEnabled = false; + } + } + if (isOperationLogEnabled) { + LOG.info("Operation log session directory is created: " + sessionLogDir.getAbsolutePath()); + } + } + + @Override + public boolean isOperationLogEnabled() { + return isOperationLogEnabled; + } + + @Override + public File getOperationLogSessionDir() { + return sessionLogDir; + } + + @Override + public TProtocolVersion getProtocolVersion() { + return sessionHandle.getProtocolVersion(); + } + + @Override + public SessionManager getSessionManager() { + return sessionManager; + } + + @Override + public void setSessionManager(SessionManager sessionManager) { + this.sessionManager = sessionManager; + } + + private OperationManager getOperationManager() { + return operationManager; + } + + @Override + public void setOperationManager(OperationManager operationManager) { + this.operationManager = operationManager; + } + + protected synchronized void acquire(boolean userAccess) { + // Need to make sure that the this HiveServer2's session's SessionState is + // stored in the thread local for the handler thread. + SessionState.setCurrentSessionState(sessionState); + if (userAccess) { + lastAccessTime = System.currentTimeMillis(); + } + // set the thread name with the logging prefix. + sessionState.updateThreadName(); + Hive.set(sessionHive); + } + + /** + * 1. We'll remove the ThreadLocal SessionState as this thread might now serve + * other requests. + * 2. We'll cache the ThreadLocal RawStore object for this background thread for an orderly cleanup + * when this thread is garbage collected later. + * @see org.apache.hive.service.server.ThreadWithGarbageCleanup#finalize() + */ + protected synchronized void release(boolean userAccess) { + if (sessionState != null) { + // can be null in-case of junit tests. skip reset. + // reset thread name at release time. + sessionState.resetThreadName(); + } + + SessionState.detachSession(); + if (ThreadWithGarbageCleanup.currentThread() instanceof ThreadWithGarbageCleanup) { + ThreadWithGarbageCleanup currentThread = + (ThreadWithGarbageCleanup) ThreadWithGarbageCleanup.currentThread(); + currentThread.cacheThreadLocalRawStore(); + } + if (userAccess) { + lastAccessTime = System.currentTimeMillis(); + } + if (opHandleSet.isEmpty()) { + lastIdleTime = System.currentTimeMillis(); + } else { + lastIdleTime = 0; + } + } + + @Override + public SessionHandle getSessionHandle() { + return sessionHandle; + } + + @Override + public String getPassword() { + return password; + } + + @Override + public HiveConf getHiveConf() { + hiveConf.setVar(HiveConf.ConfVars.HIVEFETCHOUTPUTSERDE, FETCH_WORK_SERDE_CLASS); + return hiveConf; + } + + @Override + public Hive getSessionHive() { + return sessionHive; + } + + @Override + public IMetaStoreClient getMetaStoreClient() throws HiveSQLException { + try { + return getSessionHive().getMSC(); + } catch (MetaException e) { + throw new HiveSQLException("Failed to get metastore connection: " + e, e); + } + } + + @Override + public GetInfoValue getInfo(GetInfoType getInfoType) + throws HiveSQLException { + acquire(true); + try { + switch (getInfoType) { + case CLI_SERVER_NAME: + return new GetInfoValue("Hive"); + case CLI_DBMS_NAME: + return new GetInfoValue("Apache Hive"); + case CLI_DBMS_VER: + return new GetInfoValue(HiveVersionInfo.getVersion()); + case CLI_MAX_COLUMN_NAME_LEN: + return new GetInfoValue(128); + case CLI_MAX_SCHEMA_NAME_LEN: + return new GetInfoValue(128); + case CLI_MAX_TABLE_NAME_LEN: + return new GetInfoValue(128); + case CLI_TXN_CAPABLE: + default: + throw new HiveSQLException("Unrecognized GetInfoType value: " + getInfoType.toString()); + } + } finally { + release(true); + } + } + + @Override + public OperationHandle executeStatement(String statement, Map confOverlay) + throws HiveSQLException { + return executeStatementInternal(statement, confOverlay, false); + } + + @Override + public OperationHandle executeStatementAsync(String statement, Map confOverlay) + throws HiveSQLException { + return executeStatementInternal(statement, confOverlay, true); + } + + private OperationHandle executeStatementInternal(String statement, Map confOverlay, + boolean runAsync) + throws HiveSQLException { + acquire(true); + + // Make a copy of confOverlay + if (confOverlay == null) { + confOverlay = new HashMap(); + } else { + Map conf = new HashMap(); + conf.putAll(confOverlay); + confOverlay = conf; + } + confOverlay.put(HiveConf.ConfVars.HIVEQUERYID.varname, QueryPlan.makeQueryId()); + + OperationManager operationManager = getOperationManager(); + ExecuteStatementOperation operation = operationManager + .newExecuteStatementOperation(getSession(), statement, confOverlay, runAsync); + OperationHandle opHandle = operation.getHandle(); + try { + operation.run(); + addOpHandle(opHandle); + return opHandle; + } catch (HiveSQLException e) { + // Refering to SQLOperation.java,there is no chance that a HiveSQLException throws and the asyn + // background operation submits to thread pool successfully at the same time. So, Cleanup + // opHandle directly when got HiveSQLException + operationManager.closeOperation(opHandle); + throw e; + } finally { + release(true); + } + } + + @Override + public OperationHandle getTypeInfo() + throws HiveSQLException { + acquire(true); + + OperationManager operationManager = getOperationManager(); + GetTypeInfoOperation operation = operationManager.newGetTypeInfoOperation(getSession()); + OperationHandle opHandle = operation.getHandle(); + try { + operation.run(); + addOpHandle(opHandle); + return opHandle; + } catch (HiveSQLException e) { + operationManager.closeOperation(opHandle); + throw e; + } finally { + release(true); + } + } + + @Override + public OperationHandle getCatalogs() + throws HiveSQLException { + acquire(true); + + OperationManager operationManager = getOperationManager(); + GetCatalogsOperation operation = operationManager.newGetCatalogsOperation(getSession()); + OperationHandle opHandle = operation.getHandle(); + try { + operation.run(); + addOpHandle(opHandle); + return opHandle; + } catch (HiveSQLException e) { + operationManager.closeOperation(opHandle); + throw e; + } finally { + release(true); + } + } + + @Override + public OperationHandle getSchemas(String catalogName, String schemaName) + throws HiveSQLException { + acquire(true); + + OperationManager operationManager = getOperationManager(); + GetSchemasOperation operation = + operationManager.newGetSchemasOperation(getSession(), catalogName, schemaName); + OperationHandle opHandle = operation.getHandle(); + try { + operation.run(); + addOpHandle(opHandle); + return opHandle; + } catch (HiveSQLException e) { + operationManager.closeOperation(opHandle); + throw e; + } finally { + release(true); + } + } + + @Override + public OperationHandle getTables(String catalogName, String schemaName, String tableName, + List tableTypes) + throws HiveSQLException { + acquire(true); + + OperationManager operationManager = getOperationManager(); + MetadataOperation operation = + operationManager.newGetTablesOperation(getSession(), catalogName, schemaName, tableName, tableTypes); + OperationHandle opHandle = operation.getHandle(); + try { + operation.run(); + addOpHandle(opHandle); + return opHandle; + } catch (HiveSQLException e) { + operationManager.closeOperation(opHandle); + throw e; + } finally { + release(true); + } + } + + @Override + public OperationHandle getTableTypes() + throws HiveSQLException { + acquire(true); + + OperationManager operationManager = getOperationManager(); + GetTableTypesOperation operation = operationManager.newGetTableTypesOperation(getSession()); + OperationHandle opHandle = operation.getHandle(); + try { + operation.run(); + addOpHandle(opHandle); + return opHandle; + } catch (HiveSQLException e) { + operationManager.closeOperation(opHandle); + throw e; + } finally { + release(true); + } + } + + @Override + public OperationHandle getColumns(String catalogName, String schemaName, + String tableName, String columnName) throws HiveSQLException { + acquire(true); + String addedJars = Utilities.getResourceFiles(hiveConf, SessionState.ResourceType.JAR); + if (StringUtils.isNotBlank(addedJars)) { + IMetaStoreClient metastoreClient = getSession().getMetaStoreClient(); + metastoreClient.setHiveAddedJars(addedJars); + } + OperationManager operationManager = getOperationManager(); + GetColumnsOperation operation = operationManager.newGetColumnsOperation(getSession(), + catalogName, schemaName, tableName, columnName); + OperationHandle opHandle = operation.getHandle(); + try { + operation.run(); + addOpHandle(opHandle); + return opHandle; + } catch (HiveSQLException e) { + operationManager.closeOperation(opHandle); + throw e; + } finally { + release(true); + } + } + + private void addOpHandle(OperationHandle opHandle) { + synchronized (opHandleSet) { + opHandleSet.add(opHandle); + } + } + + @Override + public OperationHandle getFunctions(String catalogName, String schemaName, String functionName) + throws HiveSQLException { + acquire(true); + + OperationManager operationManager = getOperationManager(); + GetFunctionsOperation operation = operationManager + .newGetFunctionsOperation(getSession(), catalogName, schemaName, functionName); + OperationHandle opHandle = operation.getHandle(); + try { + operation.run(); + addOpHandle(opHandle); + return opHandle; + } catch (HiveSQLException e) { + operationManager.closeOperation(opHandle); + throw e; + } finally { + release(true); + } + } + + @Override + public void close() throws HiveSQLException { + try { + acquire(true); + // Iterate through the opHandles and close their operations + List ops = null; + synchronized (opHandleSet) { + ops = new ArrayList<>(opHandleSet); + opHandleSet.clear(); + } + for (OperationHandle opHandle : ops) { + operationManager.closeOperation(opHandle); + } + // Cleanup session log directory. + cleanupSessionLogDir(); + HiveHistory hiveHist = sessionState.getHiveHistory(); + if (null != hiveHist) { + hiveHist.closeStream(); + } + try { + sessionState.close(); + } finally { + sessionState = null; + } + } catch (IOException ioe) { + throw new HiveSQLException("Failure to close", ioe); + } finally { + if (sessionState != null) { + try { + sessionState.close(); + } catch (Throwable t) { + LOG.warn("Error closing session", t); + } + sessionState = null; + } + if (sessionHive != null) { + try { + Hive.closeCurrent(); + } catch (Throwable t) { + LOG.warn("Error closing sessionHive", t); + } + sessionHive = null; + } + release(true); + } + } + + private void cleanupSessionLogDir() { + if (isOperationLogEnabled) { + try { + FileUtils.forceDelete(sessionLogDir); + LOG.info("Operation log session directory is deleted: " + + sessionLogDir.getAbsolutePath()); + } catch (Exception e) { + LOG.error("Failed to cleanup session log dir: " + sessionHandle, e); + } + } + } + + @Override + public SessionState getSessionState() { + return sessionState; + } + + @Override + public String getUserName() { + return username; + } + + @Override + public void setUserName(String userName) { + this.username = userName; + } + + @Override + public long getLastAccessTime() { + return lastAccessTime; + } + + @Override + public long getCreationTime() { + return creationTime; + } + + @Override + public void closeExpiredOperations() { + OperationHandle[] handles; + synchronized (opHandleSet) { + handles = opHandleSet.toArray(new OperationHandle[opHandleSet.size()]); + } + if (handles.length > 0) { + List operations = operationManager.removeExpiredOperations(handles); + if (!operations.isEmpty()) { + closeTimedOutOperations(operations); + } + } + } + + @Override + public long getNoOperationTime() { + return lastIdleTime > 0 ? System.currentTimeMillis() - lastIdleTime : 0; + } + + private void closeTimedOutOperations(List operations) { + acquire(false); + try { + for (Operation operation : operations) { + synchronized (opHandleSet) { + opHandleSet.remove(operation.getHandle()); + } + try { + operation.close(); + } catch (Exception e) { + LOG.warn("Exception is thrown closing timed-out operation " + operation.getHandle(), e); + } + } + } finally { + release(false); + } + } + + @Override + public void cancelOperation(OperationHandle opHandle) throws HiveSQLException { + acquire(true); + try { + sessionManager.getOperationManager().cancelOperation(opHandle); + } finally { + release(true); + } + } + + @Override + public void closeOperation(OperationHandle opHandle) throws HiveSQLException { + acquire(true); + try { + operationManager.closeOperation(opHandle); + synchronized (opHandleSet) { + opHandleSet.remove(opHandle); + } + } finally { + release(true); + } + } + + @Override + public TableSchema getResultSetMetadata(OperationHandle opHandle) throws HiveSQLException { + acquire(true); + try { + return sessionManager.getOperationManager().getOperationResultSetSchema(opHandle); + } finally { + release(true); + } + } + + @Override + public RowSet fetchResults(OperationHandle opHandle, FetchOrientation orientation, + long maxRows, FetchType fetchType) throws HiveSQLException { + acquire(true); + try { + if (fetchType == FetchType.QUERY_OUTPUT) { + return operationManager.getOperationNextRowSet(opHandle, orientation, maxRows); + } + return operationManager.getOperationLogRowSet(opHandle, orientation, maxRows, hiveConf); + } finally { + release(true); + } + } + + protected HiveSession getSession() { + return this; + } + + @Override + public int getOpenOperationCount() { + return opHandleSet.size(); + } + + @Override + public String getIpAddress() { + return ipAddress; + } + + @Override + public void setIpAddress(String ipAddress) { + this.ipAddress = ipAddress; + } + + @Override + public String getDelegationToken(HiveAuthFactory authFactory, String owner, String renewer) + throws HiveSQLException { + HiveAuthFactory.verifyProxyAccess(getUserName(), owner, getIpAddress(), getHiveConf()); + return authFactory.getDelegationToken(owner, renewer, getIpAddress()); + } + + @Override + public void cancelDelegationToken(HiveAuthFactory authFactory, String tokenStr) + throws HiveSQLException { + HiveAuthFactory.verifyProxyAccess(getUserName(), getUserFromToken(authFactory, tokenStr), + getIpAddress(), getHiveConf()); + authFactory.cancelDelegationToken(tokenStr); + } + + @Override + public void renewDelegationToken(HiveAuthFactory authFactory, String tokenStr) + throws HiveSQLException { + HiveAuthFactory.verifyProxyAccess(getUserName(), getUserFromToken(authFactory, tokenStr), + getIpAddress(), getHiveConf()); + authFactory.renewDelegationToken(tokenStr); + } + + // extract the real user from the given token string + private String getUserFromToken(HiveAuthFactory authFactory, String tokenStr) throws HiveSQLException { + return authFactory.getUserFromToken(tokenStr); + } +} diff --git a/service/src/java/org/apache/hive/service/cli/thrift/RetryingThriftCLIServiceClient.java b/service/src/java/org/apache/hive/service/cli/thrift/RetryingThriftCLIServiceClient.java index 14191e5..0867ff5 100644 --- a/service/src/java/org/apache/hive/service/cli/thrift/RetryingThriftCLIServiceClient.java +++ b/service/src/java/org/apache/hive/service/cli/thrift/RetryingThriftCLIServiceClient.java @@ -207,6 +207,22 @@ public RowSet fetchResults(OperationHandle opHandle, FetchOrientation orientatio public void closeTransport() { tTransport.close(); } + + @Override + public OperationHandle getPrimaryKeys(SessionHandle sessionHandle, + String catalog, String schema, String table) + throws HiveSQLException { + return cliService.getPrimaryKeys(sessionHandle, catalog, schema, table); + } + + @Override + public OperationHandle getCrossReference(SessionHandle sessionHandle, + String primaryCatalog, String primarySchema, String primaryTable, + String foreignCatalog, String foreignSchema, String foreignTable) + throws HiveSQLException { + return cliService.getCrossReference(sessionHandle, primaryCatalog, primarySchema, + primaryTable, foreignCatalog, foreignSchema, foreignTable); + } } protected RetryingThriftCLIServiceClient(HiveConf conf) { diff --git a/service/src/java/org/apache/hive/service/cli/thrift/RetryingThriftCLIServiceClient.java.orig b/service/src/java/org/apache/hive/service/cli/thrift/RetryingThriftCLIServiceClient.java.orig new file mode 100644 index 0000000..14191e5 --- /dev/null +++ b/service/src/java/org/apache/hive/service/cli/thrift/RetryingThriftCLIServiceClient.java.orig @@ -0,0 +1,350 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hive.service.cli.thrift; + +import java.lang.reflect.InvocationHandler; +import java.lang.reflect.InvocationTargetException; +import java.lang.reflect.Method; +import java.lang.reflect.Proxy; +import java.lang.reflect.UndeclaredThrowableException; +import java.net.SocketException; +import java.util.Collections; +import java.util.List; +import java.util.Map; +import java.util.concurrent.TimeUnit; + +import javax.security.sasl.SaslException; + +import org.apache.hadoop.hive.conf.HiveConf; +import org.apache.hive.service.auth.HiveAuthFactory; +import org.apache.hive.service.auth.PlainSaslHelper; +import org.apache.hive.service.cli.CLIServiceClient; +import org.apache.hive.service.cli.FetchOrientation; +import org.apache.hive.service.cli.FetchType; +import org.apache.hive.service.cli.GetInfoType; +import org.apache.hive.service.cli.GetInfoValue; +import org.apache.hive.service.cli.HiveSQLException; +import org.apache.hive.service.cli.ICLIService; +import org.apache.hive.service.cli.OperationHandle; +import org.apache.hive.service.cli.OperationStatus; +import org.apache.hive.service.cli.RowSet; +import org.apache.hive.service.cli.SessionHandle; +import org.apache.hive.service.cli.TableSchema; +import org.apache.hive.service.rpc.thrift.TCLIService; +import org.apache.thrift.TApplicationException; +import org.apache.thrift.protocol.TBinaryProtocol; +import org.apache.thrift.protocol.TProtocol; +import org.apache.thrift.protocol.TProtocolException; +import org.apache.thrift.transport.TSocket; +import org.apache.thrift.transport.TTransport; +import org.apache.thrift.transport.TTransportException; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +/** + * RetryingThriftCLIServiceClient. Creates a proxy for a CLIServiceClient + * implementation and retries calls to it on failure. + */ +public class RetryingThriftCLIServiceClient implements InvocationHandler { + public static final Logger LOG = LoggerFactory.getLogger(RetryingThriftCLIServiceClient.class); + private ThriftCLIServiceClient base; + private final int retryLimit; + private final int retryDelaySeconds; + private HiveConf conf; + private TTransport transport; + + public static class CLIServiceClientWrapper extends CLIServiceClient { + private final ICLIService cliService; + private TTransport tTransport; + + public CLIServiceClientWrapper(ICLIService icliService, TTransport tTransport) { + cliService = icliService; + this.tTransport = tTransport; + } + + @Override + public SessionHandle openSession(String username, String password) throws HiveSQLException { + return cliService.openSession(username, password, Collections.emptyMap()); + } + + @Override + public String getDelegationToken(SessionHandle sessionHandle, HiveAuthFactory authFactory, String owner, + String renewer) throws HiveSQLException { + return cliService.getDelegationToken(sessionHandle, authFactory, owner, renewer); + } + + @Override + public void cancelDelegationToken(SessionHandle sessionHandle, HiveAuthFactory authFactory, + String tokenStr) throws HiveSQLException { + cliService.cancelDelegationToken(sessionHandle, authFactory, tokenStr); + } + + @Override + public void renewDelegationToken(SessionHandle sessionHandle, HiveAuthFactory authFactory, + String tokenStr) throws HiveSQLException { + cliService.renewDelegationToken(sessionHandle, authFactory, tokenStr); + } + + @Override + public SessionHandle openSession(String username, String password, Map configuration) + throws HiveSQLException { + return cliService.openSession(username, password, configuration); + } + + @Override + public SessionHandle openSessionWithImpersonation(String username, + String password, + Map configuration, + String delegationToken) throws HiveSQLException { + return cliService.openSessionWithImpersonation(username, password, configuration, delegationToken); + } + + @Override + public void closeSession(SessionHandle sessionHandle) throws HiveSQLException { + cliService.closeSession(sessionHandle); + } + + @Override + public GetInfoValue getInfo(SessionHandle sessionHandle, GetInfoType getInfoType) throws HiveSQLException { + return cliService.getInfo(sessionHandle, getInfoType); + } + + @Override + public OperationHandle executeStatement(SessionHandle sessionHandle, + String statement, + Map confOverlay) throws HiveSQLException { + return cliService.executeStatement(sessionHandle, statement, confOverlay); + } + + @Override + public OperationHandle executeStatementAsync(SessionHandle sessionHandle, + String statement, + Map confOverlay) throws HiveSQLException { + return cliService.executeStatementAsync(sessionHandle, statement, confOverlay); + } + + @Override + public OperationHandle getTypeInfo(SessionHandle sessionHandle) throws HiveSQLException { + return cliService.getTypeInfo(sessionHandle); + } + + @Override + public OperationHandle getCatalogs(SessionHandle sessionHandle) throws HiveSQLException { + return cliService.getCatalogs(sessionHandle); + } + + @Override + public OperationHandle getSchemas(SessionHandle sessionHandle, String catalogName, String schemaName) + throws HiveSQLException { + return cliService.getSchemas(sessionHandle, catalogName, schemaName); + } + + @Override + public OperationHandle getTables(SessionHandle sessionHandle, String catalogName, String schemaName, + String tableName, List tableTypes) throws HiveSQLException { + return cliService.getTables(sessionHandle, catalogName, schemaName, tableName, tableTypes); + } + + @Override + public OperationHandle getTableTypes(SessionHandle sessionHandle) throws HiveSQLException { + return null; + } + + @Override + public OperationHandle getColumns(SessionHandle sessionHandle, String catalogName, String schemaName, + String tableName, String columnName) throws HiveSQLException { + return cliService.getColumns(sessionHandle, catalogName, schemaName, tableName, columnName); + } + + @Override + public OperationHandle getFunctions(SessionHandle sessionHandle, String catalogName, String schemaName, + String functionName) throws HiveSQLException { + return cliService.getFunctions(sessionHandle, catalogName, schemaName, functionName); + } + + @Override + public OperationStatus getOperationStatus(OperationHandle opHandle) throws HiveSQLException { + return cliService.getOperationStatus(opHandle); + } + + @Override + public void cancelOperation(OperationHandle opHandle) throws HiveSQLException { + cliService.cancelOperation(opHandle); + } + + @Override + public void closeOperation(OperationHandle opHandle) throws HiveSQLException { + cliService.closeOperation(opHandle); + } + + @Override + public TableSchema getResultSetMetadata(OperationHandle opHandle) throws HiveSQLException { + return cliService.getResultSetMetadata(opHandle); + } + + @Override + public RowSet fetchResults(OperationHandle opHandle, FetchOrientation orientation, long maxRows, + FetchType fetchType) throws HiveSQLException { + return cliService.fetchResults(opHandle, orientation, maxRows, fetchType); + } + + public void closeTransport() { + tTransport.close(); + } + } + + protected RetryingThriftCLIServiceClient(HiveConf conf) { + this.conf = conf; + retryLimit = conf.getIntVar(HiveConf.ConfVars.HIVE_SERVER2_THRIFT_CLIENT_RETRY_LIMIT); + retryDelaySeconds = (int) conf.getTimeVar(HiveConf.ConfVars.HIVE_SERVER2_THRIFT_CLIENT_RETRY_DELAY_SECONDS, + TimeUnit.SECONDS); + } + + public static CLIServiceClientWrapper newRetryingCLIServiceClient(HiveConf conf) throws HiveSQLException { + RetryingThriftCLIServiceClient retryClient = new RetryingThriftCLIServiceClient(conf); + TTransport tTransport = retryClient + .connectWithRetry(conf.getIntVar(HiveConf.ConfVars.HIVE_SERVER2_THRIFT_CLIENT_CONNECTION_RETRY_LIMIT)); + ICLIService cliService = + (ICLIService) Proxy.newProxyInstance(RetryingThriftCLIServiceClient.class.getClassLoader(), + CLIServiceClient.class.getInterfaces(), retryClient); + return new CLIServiceClientWrapper(cliService, tTransport); + } + + protected TTransport connectWithRetry(int retries) throws HiveSQLException { + TTransportException exception = null; + for (int i = 0 ; i < retries; i++) { + try { + return connect(conf); + } catch (TTransportException e) { + exception = e; + LOG.warn("Connection attempt " + i, e); + } + try { + Thread.sleep(retryDelaySeconds * 1000); + } catch (InterruptedException e) { + LOG.warn("Interrupted", e); + } + } + throw new HiveSQLException("Unable to connect after " + retries + " retries", exception); + } + + protected synchronized TTransport connect(HiveConf conf) throws HiveSQLException, TTransportException { + if (transport != null && transport.isOpen()) { + transport.close(); + } + + String host = conf.getVar(HiveConf.ConfVars.HIVE_SERVER2_THRIFT_BIND_HOST); + int port = conf.getIntVar(HiveConf.ConfVars.HIVE_SERVER2_THRIFT_PORT); + LOG.info("Connecting to " + host + ":" + port); + + transport = new TSocket(host, port); + ((TSocket) transport).setTimeout((int) conf.getTimeVar(HiveConf.ConfVars.SERVER_READ_SOCKET_TIMEOUT, + TimeUnit.SECONDS) * 1000); + try { + ((TSocket) transport).getSocket().setKeepAlive(conf.getBoolVar(HiveConf.ConfVars.SERVER_TCP_KEEP_ALIVE)); + } catch (SocketException e) { + LOG.error("Error setting keep alive to " + conf.getBoolVar(HiveConf.ConfVars.SERVER_TCP_KEEP_ALIVE), e); + } + + String userName = conf.getVar(HiveConf.ConfVars.HIVE_SERVER2_THRIFT_CLIENT_USER); + String passwd = conf.getVar(HiveConf.ConfVars.HIVE_SERVER2_THRIFT_CLIENT_PASSWORD); + + try { + transport = PlainSaslHelper.getPlainTransport(userName, passwd, transport); + } catch (SaslException e) { + LOG.error("Error creating plain SASL transport", e); + } + + TProtocol protocol = new TBinaryProtocol(transport); + transport.open(); + base = new ThriftCLIServiceClient(new TCLIService.Client(protocol)); + LOG.info("Connected!"); + return transport; + } + + protected class InvocationResult { + final boolean success; + final Object result; + final Throwable exception; + + InvocationResult(boolean success, Object result, Throwable exception) { + this.success = success; + this.result = result; + this.exception = exception; + } + } + + protected InvocationResult invokeInternal(Method method, Object[] args) throws Throwable { + InvocationResult result; + try { + Object methodResult = method.invoke(base, args); + result = new InvocationResult(true, methodResult, null); + } catch (UndeclaredThrowableException e) { + throw e.getCause(); + } catch (InvocationTargetException e) { + if (e.getCause() instanceof HiveSQLException) { + HiveSQLException hiveExc = (HiveSQLException) e.getCause(); + Throwable cause = hiveExc.getCause(); + if ((cause instanceof TApplicationException) || + (cause instanceof TProtocolException) || + (cause instanceof TTransportException)) { + result = new InvocationResult(false, null, hiveExc); + } else { + throw hiveExc; + } + } else { + throw e.getCause(); + } + } + return result; + } + + @Override + public Object invoke(Object o, Method method, Object[] args) throws Throwable { + int attempts = 0; + + while (true) { + attempts++; + InvocationResult invokeResult = invokeInternal(method, args); + if (invokeResult.success) { + return invokeResult.result; + } + + // Error because of thrift client, we have to recreate base object + connectWithRetry(conf.getIntVar(HiveConf.ConfVars.HIVE_SERVER2_THRIFT_CLIENT_CONNECTION_RETRY_LIMIT)); + + if (attempts >= retryLimit) { + LOG.error(method.getName() + " failed after " + attempts + " retries.", invokeResult.exception); + throw invokeResult.exception; + } + + LOG.warn("Last call ThriftCLIServiceClient." + method.getName() + " failed, attempts = " + attempts, + invokeResult.exception); + Thread.sleep(retryDelaySeconds * 1000); + } + } + + public int getRetryLimit() { + return retryLimit; + } + + public int getRetryDelaySeconds() { + return retryDelaySeconds; + } +} diff --git a/service/src/java/org/apache/hive/service/cli/thrift/ThriftCLIService.java b/service/src/java/org/apache/hive/service/cli/thrift/ThriftCLIService.java index 62fcde5..19a84d8 100644 --- a/service/src/java/org/apache/hive/service/cli/thrift/ThriftCLIService.java +++ b/service/src/java/org/apache/hive/service/cli/thrift/ThriftCLIService.java @@ -70,6 +70,8 @@ import org.apache.hive.service.rpc.thrift.TGetCatalogsResp; import org.apache.hive.service.rpc.thrift.TGetColumnsReq; import org.apache.hive.service.rpc.thrift.TGetColumnsResp; +import org.apache.hive.service.rpc.thrift.TGetCrossReferenceReq; +import org.apache.hive.service.rpc.thrift.TGetCrossReferenceResp; import org.apache.hive.service.rpc.thrift.TGetDelegationTokenReq; import org.apache.hive.service.rpc.thrift.TGetDelegationTokenResp; import org.apache.hive.service.rpc.thrift.TGetFunctionsReq; @@ -78,6 +80,8 @@ import org.apache.hive.service.rpc.thrift.TGetInfoResp; import org.apache.hive.service.rpc.thrift.TGetOperationStatusReq; import org.apache.hive.service.rpc.thrift.TGetOperationStatusResp; +import org.apache.hive.service.rpc.thrift.TGetPrimaryKeysReq; +import org.apache.hive.service.rpc.thrift.TGetPrimaryKeysResp; import org.apache.hive.service.rpc.thrift.TGetResultSetMetadataReq; import org.apache.hive.service.rpc.thrift.TGetResultSetMetadataResp; import org.apache.hive.service.rpc.thrift.TGetSchemasReq; @@ -756,6 +760,41 @@ public TFetchResultsResp FetchResults(TFetchResultsReq req) throws TException { } @Override + public TGetPrimaryKeysResp GetPrimaryKeys(TGetPrimaryKeysReq req) + throws TException { + TGetPrimaryKeysResp resp = new TGetPrimaryKeysResp(); + try { + OperationHandle opHandle = cliService.getPrimaryKeys( + new SessionHandle(req.getSessionHandle()), req.getCatalogName(), + req.getSchemaName(), req.getTableName()); + resp.setOperationHandle(opHandle.toTOperationHandle()); + resp.setStatus(OK_STATUS); + } catch (Exception e) { + LOG.warn("Error getting functions: ", e); + resp.setStatus(HiveSQLException.toTStatus(e)); + } + return resp; + } + + @Override + public TGetCrossReferenceResp GetCrossReference(TGetCrossReferenceReq req) + throws TException { + TGetCrossReferenceResp resp = new TGetCrossReferenceResp(); + try { + OperationHandle opHandle = cliService.getCrossReference( + new SessionHandle(req.getSessionHandle()), req.getParentCatalogName(), + req.getParentSchemaName(), req.getParentTableName(), + req.getForeignCatalogName(), req.getForeignSchemaName(), req.getForeignTableName()); + resp.setOperationHandle(opHandle.toTOperationHandle()); + resp.setStatus(OK_STATUS); + } catch (Exception e) { + LOG.warn("Error getting functions: ", e); + resp.setStatus(HiveSQLException.toTStatus(e)); + } + return resp; + } + + @Override public abstract void run(); /** diff --git a/service/src/java/org/apache/hive/service/cli/thrift/ThriftCLIService.java.orig b/service/src/java/org/apache/hive/service/cli/thrift/ThriftCLIService.java.orig new file mode 100644 index 0000000..62fcde5 --- /dev/null +++ b/service/src/java/org/apache/hive/service/cli/thrift/ThriftCLIService.java.orig @@ -0,0 +1,811 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hive.service.cli.thrift; + +import java.io.IOException; +import java.net.InetAddress; +import java.net.UnknownHostException; +import java.util.HashMap; +import java.util.Map; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicInteger; + +import javax.security.auth.login.LoginException; + +import org.apache.hadoop.hive.common.metrics.common.Metrics; +import org.apache.hadoop.hive.common.metrics.common.MetricsConstant; +import org.apache.hadoop.hive.common.metrics.common.MetricsFactory; +import org.apache.hadoop.hive.conf.HiveConf; +import org.apache.hadoop.hive.conf.HiveConf.ConfVars; +import org.apache.hadoop.hive.common.ServerUtils; +import org.apache.hadoop.hive.shims.HadoopShims.KerberosNameShim; +import org.apache.hadoop.hive.shims.ShimLoader; +import org.apache.hive.service.AbstractService; +import org.apache.hive.service.ServiceException; +import org.apache.hive.service.ServiceUtils; +import org.apache.hive.service.auth.HiveAuthFactory; +import org.apache.hive.service.auth.TSetIpAddressProcessor; +import org.apache.hive.service.cli.CLIService; +import org.apache.hive.service.cli.FetchOrientation; +import org.apache.hive.service.cli.FetchType; +import org.apache.hive.service.cli.GetInfoType; +import org.apache.hive.service.cli.GetInfoValue; +import org.apache.hive.service.cli.HiveSQLException; +import org.apache.hive.service.cli.OperationHandle; +import org.apache.hive.service.cli.OperationStatus; +import org.apache.hive.service.cli.RowSet; +import org.apache.hive.service.cli.SessionHandle; +import org.apache.hive.service.cli.TableSchema; +import org.apache.hive.service.cli.session.SessionManager; +import org.apache.hive.service.rpc.thrift.TCLIService; +import org.apache.hive.service.rpc.thrift.TCancelDelegationTokenReq; +import org.apache.hive.service.rpc.thrift.TCancelDelegationTokenResp; +import org.apache.hive.service.rpc.thrift.TCancelOperationReq; +import org.apache.hive.service.rpc.thrift.TCancelOperationResp; +import org.apache.hive.service.rpc.thrift.TCloseOperationReq; +import org.apache.hive.service.rpc.thrift.TCloseOperationResp; +import org.apache.hive.service.rpc.thrift.TCloseSessionReq; +import org.apache.hive.service.rpc.thrift.TCloseSessionResp; +import org.apache.hive.service.rpc.thrift.TExecuteStatementReq; +import org.apache.hive.service.rpc.thrift.TExecuteStatementResp; +import org.apache.hive.service.rpc.thrift.TFetchResultsReq; +import org.apache.hive.service.rpc.thrift.TFetchResultsResp; +import org.apache.hive.service.rpc.thrift.TGetCatalogsReq; +import org.apache.hive.service.rpc.thrift.TGetCatalogsResp; +import org.apache.hive.service.rpc.thrift.TGetColumnsReq; +import org.apache.hive.service.rpc.thrift.TGetColumnsResp; +import org.apache.hive.service.rpc.thrift.TGetDelegationTokenReq; +import org.apache.hive.service.rpc.thrift.TGetDelegationTokenResp; +import org.apache.hive.service.rpc.thrift.TGetFunctionsReq; +import org.apache.hive.service.rpc.thrift.TGetFunctionsResp; +import org.apache.hive.service.rpc.thrift.TGetInfoReq; +import org.apache.hive.service.rpc.thrift.TGetInfoResp; +import org.apache.hive.service.rpc.thrift.TGetOperationStatusReq; +import org.apache.hive.service.rpc.thrift.TGetOperationStatusResp; +import org.apache.hive.service.rpc.thrift.TGetResultSetMetadataReq; +import org.apache.hive.service.rpc.thrift.TGetResultSetMetadataResp; +import org.apache.hive.service.rpc.thrift.TGetSchemasReq; +import org.apache.hive.service.rpc.thrift.TGetSchemasResp; +import org.apache.hive.service.rpc.thrift.TGetTableTypesReq; +import org.apache.hive.service.rpc.thrift.TGetTableTypesResp; +import org.apache.hive.service.rpc.thrift.TGetTablesReq; +import org.apache.hive.service.rpc.thrift.TGetTablesResp; +import org.apache.hive.service.rpc.thrift.TGetTypeInfoReq; +import org.apache.hive.service.rpc.thrift.TGetTypeInfoResp; +import org.apache.hive.service.rpc.thrift.TOpenSessionReq; +import org.apache.hive.service.rpc.thrift.TOpenSessionResp; +import org.apache.hive.service.rpc.thrift.TProtocolVersion; +import org.apache.hive.service.rpc.thrift.TRenewDelegationTokenReq; +import org.apache.hive.service.rpc.thrift.TRenewDelegationTokenResp; +import org.apache.hive.service.rpc.thrift.TStatus; +import org.apache.hive.service.rpc.thrift.TStatusCode; +import org.apache.hive.service.server.HiveServer2; +import org.apache.thrift.TException; +import org.apache.thrift.protocol.TProtocol; +import org.apache.thrift.server.ServerContext; +import org.apache.thrift.server.TServer; +import org.apache.thrift.server.TServerEventHandler; +import org.apache.thrift.transport.TTransport; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + + +/** + * ThriftCLIService. + * + */ +public abstract class ThriftCLIService extends AbstractService implements TCLIService.Iface, Runnable { + + public static final Logger LOG = LoggerFactory.getLogger(ThriftCLIService.class.getName()); + + protected CLIService cliService; + private static final TStatus OK_STATUS = new TStatus(TStatusCode.SUCCESS_STATUS); + protected static HiveAuthFactory hiveAuthFactory; + private static final AtomicInteger sessionCount = new AtomicInteger(); + + protected int portNum; + protected InetAddress serverIPAddress; + protected String hiveHost; + protected TServer server; + protected org.eclipse.jetty.server.Server httpServer; + + private boolean isStarted = false; + protected boolean isEmbedded = false; + + protected HiveConf hiveConf; + + protected int minWorkerThreads; + protected int maxWorkerThreads; + protected long workerKeepAliveTime; + + protected TServerEventHandler serverEventHandler; + protected ThreadLocal currentServerContext; + + static class ThriftCLIServerContext implements ServerContext { + private SessionHandle sessionHandle = null; + + public void setSessionHandle(SessionHandle sessionHandle) { + this.sessionHandle = sessionHandle; + } + + public SessionHandle getSessionHandle() { + return sessionHandle; + } + } + + public ThriftCLIService(CLIService service, String serviceName) { + super(serviceName); + this.cliService = service; + currentServerContext = new ThreadLocal(); + serverEventHandler = new TServerEventHandler() { + @Override + public ServerContext createContext( + TProtocol input, TProtocol output) { + Metrics metrics = MetricsFactory.getInstance(); + if (metrics != null) { + try { + metrics.incrementCounter(MetricsConstant.OPEN_CONNECTIONS); + metrics.incrementCounter(MetricsConstant.CUMULATIVE_CONNECTION_COUNT); + } catch (Exception e) { + LOG.warn("Error Reporting JDO operation to Metrics system", e); + } + } + return new ThriftCLIServerContext(); + } + + @Override + public void deleteContext(ServerContext serverContext, + TProtocol input, TProtocol output) { + Metrics metrics = MetricsFactory.getInstance(); + if (metrics != null) { + try { + metrics.decrementCounter(MetricsConstant.OPEN_CONNECTIONS); + } catch (Exception e) { + LOG.warn("Error Reporting JDO operation to Metrics system", e); + } + } + ThriftCLIServerContext context = (ThriftCLIServerContext) serverContext; + SessionHandle sessionHandle = context.getSessionHandle(); + if (sessionHandle != null) { + LOG.info("Session disconnected without closing properly, close it now"); + try { + cliService.closeSession(sessionHandle); + } catch (HiveSQLException e) { + LOG.warn("Failed to close session: " + e, e); + } + } + } + + @Override + public void preServe() { + } + + @Override + public void processContext(ServerContext serverContext, + TTransport input, TTransport output) { + currentServerContext.set(serverContext); + } + }; + } + + @Override + public synchronized void init(HiveConf hiveConf) { + this.hiveConf = hiveConf; + + String hiveHost = System.getenv("HIVE_SERVER2_THRIFT_BIND_HOST"); + if (hiveHost == null) { + hiveHost = hiveConf.getVar(ConfVars.HIVE_SERVER2_THRIFT_BIND_HOST); + } + try { + serverIPAddress = ServerUtils.getHostAddress(hiveHost); + } catch (UnknownHostException e) { + throw new ServiceException(e); + } + + // Initialize common server configs needed in both binary & http modes + String portString; + // HTTP mode + if (HiveServer2.isHTTPTransportMode(hiveConf)) { + workerKeepAliveTime = + hiveConf.getTimeVar(ConfVars.HIVE_SERVER2_THRIFT_HTTP_WORKER_KEEPALIVE_TIME, + TimeUnit.SECONDS); + portString = System.getenv("HIVE_SERVER2_THRIFT_HTTP_PORT"); + if (portString != null) { + portNum = Integer.valueOf(portString); + } else { + portNum = hiveConf.getIntVar(ConfVars.HIVE_SERVER2_THRIFT_HTTP_PORT); + } + } + // Binary mode + else { + workerKeepAliveTime = + hiveConf.getTimeVar(ConfVars.HIVE_SERVER2_THRIFT_WORKER_KEEPALIVE_TIME, TimeUnit.SECONDS); + portString = System.getenv("HIVE_SERVER2_THRIFT_PORT"); + if (portString != null) { + portNum = Integer.valueOf(portString); + } else { + portNum = hiveConf.getIntVar(ConfVars.HIVE_SERVER2_THRIFT_PORT); + } + } + minWorkerThreads = hiveConf.getIntVar(ConfVars.HIVE_SERVER2_THRIFT_MIN_WORKER_THREADS); + maxWorkerThreads = hiveConf.getIntVar(ConfVars.HIVE_SERVER2_THRIFT_MAX_WORKER_THREADS); + super.init(hiveConf); + } + + @Override + public synchronized void start() { + super.start(); + if (!isStarted && !isEmbedded) { + new Thread(this).start(); + isStarted = true; + } + } + + @Override + public synchronized void stop() { + if (isStarted && !isEmbedded) { + if(server != null) { + server.stop(); + LOG.info("Thrift server has stopped"); + } + if((httpServer != null) && httpServer.isStarted()) { + try { + httpServer.stop(); + LOG.info("Http server has stopped"); + } catch (Exception e) { + LOG.error("Error stopping Http server: ", e); + } + } + isStarted = false; + } + super.stop(); + } + + public int getPortNumber() { + return portNum; + } + + public InetAddress getServerIPAddress() { + return serverIPAddress; + } + + @Override + public TGetDelegationTokenResp GetDelegationToken(TGetDelegationTokenReq req) + throws TException { + TGetDelegationTokenResp resp = new TGetDelegationTokenResp(); + + if (hiveAuthFactory == null) { + resp.setStatus(unsecureTokenErrorStatus()); + } else { + try { + String token = cliService.getDelegationToken( + new SessionHandle(req.getSessionHandle()), + hiveAuthFactory, req.getOwner(), req.getRenewer()); + resp.setDelegationToken(token); + resp.setStatus(OK_STATUS); + } catch (HiveSQLException e) { + LOG.error("Error obtaining delegation token", e); + TStatus tokenErrorStatus = HiveSQLException.toTStatus(e); + tokenErrorStatus.setSqlState("42000"); + resp.setStatus(tokenErrorStatus); + } + } + return resp; + } + + @Override + public TCancelDelegationTokenResp CancelDelegationToken(TCancelDelegationTokenReq req) + throws TException { + TCancelDelegationTokenResp resp = new TCancelDelegationTokenResp(); + + if (hiveAuthFactory == null) { + resp.setStatus(unsecureTokenErrorStatus()); + } else { + try { + cliService.cancelDelegationToken(new SessionHandle(req.getSessionHandle()), + hiveAuthFactory, req.getDelegationToken()); + resp.setStatus(OK_STATUS); + } catch (HiveSQLException e) { + LOG.error("Error canceling delegation token", e); + resp.setStatus(HiveSQLException.toTStatus(e)); + } + } + return resp; + } + + @Override + public TRenewDelegationTokenResp RenewDelegationToken(TRenewDelegationTokenReq req) + throws TException { + TRenewDelegationTokenResp resp = new TRenewDelegationTokenResp(); + if (hiveAuthFactory == null) { + resp.setStatus(unsecureTokenErrorStatus()); + } else { + try { + cliService.renewDelegationToken(new SessionHandle(req.getSessionHandle()), + hiveAuthFactory, req.getDelegationToken()); + resp.setStatus(OK_STATUS); + } catch (HiveSQLException e) { + LOG.error("Error obtaining renewing token", e); + resp.setStatus(HiveSQLException.toTStatus(e)); + } + } + return resp; + } + + private TStatus unsecureTokenErrorStatus() { + TStatus errorStatus = new TStatus(TStatusCode.ERROR_STATUS); + errorStatus.setErrorMessage("Delegation token only supported over remote " + + "client with kerberos authentication"); + return errorStatus; + } + + @Override + public TOpenSessionResp OpenSession(TOpenSessionReq req) throws TException { + LOG.info("Client protocol version: " + req.getClient_protocol()); + TOpenSessionResp resp = new TOpenSessionResp(); + try { + SessionHandle sessionHandle = getSessionHandle(req, resp); + resp.setSessionHandle(sessionHandle.toTSessionHandle()); + // TODO: set real configuration map + resp.setConfiguration(new HashMap()); + resp.setStatus(OK_STATUS); + ThriftCLIServerContext context = + (ThriftCLIServerContext)currentServerContext.get(); + if (context != null) { + context.setSessionHandle(sessionHandle); + } + LOG.info("Opened a session, current sessions: " + sessionCount.incrementAndGet()); + } catch (Exception e) { + LOG.warn("Error opening session: ", e); + resp.setStatus(HiveSQLException.toTStatus(e)); + } + return resp; + } + + private String getIpAddress() { + String clientIpAddress; + // Http transport mode. + // We set the thread local ip address, in ThriftHttpServlet. + if (cliService.getHiveConf().getVar( + ConfVars.HIVE_SERVER2_TRANSPORT_MODE).equalsIgnoreCase("http")) { + clientIpAddress = SessionManager.getIpAddress(); + } + else { + // Kerberos + if (isKerberosAuthMode()) { + clientIpAddress = hiveAuthFactory.getIpAddress(); + } + // Except kerberos, NOSASL + else { + clientIpAddress = TSetIpAddressProcessor.getUserIpAddress(); + } + } + LOG.debug("Client's IP Address: " + clientIpAddress); + return clientIpAddress; + } + + /** + * Returns the effective username. + * 1. If hive.server2.allow.user.substitution = false: the username of the connecting user + * 2. If hive.server2.allow.user.substitution = true: the username of the end user, + * that the connecting user is trying to proxy for. + * This includes a check whether the connecting user is allowed to proxy for the end user. + * @param req + * @return + * @throws HiveSQLException + */ + private String getUserName(TOpenSessionReq req) throws HiveSQLException, IOException { + String userName = null; + // Kerberos + if (isKerberosAuthMode()) { + userName = hiveAuthFactory.getRemoteUser(); + } + // Except kerberos, NOSASL + if (userName == null) { + userName = TSetIpAddressProcessor.getUserName(); + } + // Http transport mode. + // We set the thread local username, in ThriftHttpServlet. + if (cliService.getHiveConf().getVar( + ConfVars.HIVE_SERVER2_TRANSPORT_MODE).equalsIgnoreCase("http")) { + userName = SessionManager.getUserName(); + } + if (userName == null) { + userName = req.getUsername(); + } + + userName = getShortName(userName); + String effectiveClientUser = getProxyUser(userName, req.getConfiguration(), getIpAddress()); + LOG.debug("Client's username: " + effectiveClientUser); + return effectiveClientUser; + } + + private String getShortName(String userName) throws IOException { + String ret = null; + + if (userName != null) { + KerberosNameShim fullKerberosName = ShimLoader.getHadoopShims().getKerberosNameShim(userName); + ret = fullKerberosName.getShortName(); + } + + return ret; + } + + /** + * Create a session handle + * @param req + * @param res + * @return + * @throws HiveSQLException + * @throws LoginException + * @throws IOException + */ + SessionHandle getSessionHandle(TOpenSessionReq req, TOpenSessionResp res) + throws HiveSQLException, LoginException, IOException { + String userName = getUserName(req); + String ipAddress = getIpAddress(); + TProtocolVersion protocol = getMinVersion(CLIService.SERVER_VERSION, + req.getClient_protocol()); + SessionHandle sessionHandle; + if (cliService.getHiveConf().getBoolVar(ConfVars.HIVE_SERVER2_ENABLE_DOAS) && + (userName != null)) { + String delegationTokenStr = getDelegationToken(userName); + sessionHandle = cliService.openSessionWithImpersonation(protocol, userName, + req.getPassword(), ipAddress, req.getConfiguration(), delegationTokenStr); + } else { + sessionHandle = cliService.openSession(protocol, userName, req.getPassword(), + ipAddress, req.getConfiguration()); + } + res.setServerProtocolVersion(protocol); + return sessionHandle; + } + + + private String getDelegationToken(String userName) + throws HiveSQLException, LoginException, IOException { + try { + return cliService.getDelegationTokenFromMetaStore(userName); + } catch (UnsupportedOperationException e) { + // The delegation token is not applicable in the given deployment mode + // such as HMS is not kerberos secured + } + return null; + } + + private TProtocolVersion getMinVersion(TProtocolVersion... versions) { + TProtocolVersion[] values = TProtocolVersion.values(); + int current = values[values.length - 1].getValue(); + for (TProtocolVersion version : versions) { + if (current > version.getValue()) { + current = version.getValue(); + } + } + for (TProtocolVersion version : values) { + if (version.getValue() == current) { + return version; + } + } + throw new IllegalArgumentException("never"); + } + + @Override + public TCloseSessionResp CloseSession(TCloseSessionReq req) throws TException { + TCloseSessionResp resp = new TCloseSessionResp(); + try { + SessionHandle sessionHandle = new SessionHandle(req.getSessionHandle()); + cliService.closeSession(sessionHandle); + LOG.info("Closed a session, current sessions: " + sessionCount.decrementAndGet()); + resp.setStatus(OK_STATUS); + ThriftCLIServerContext context = + (ThriftCLIServerContext)currentServerContext.get(); + if (context != null) { + context.setSessionHandle(null); + } + } catch (Exception e) { + LOG.warn("Error closing session: ", e); + resp.setStatus(HiveSQLException.toTStatus(e)); + } + return resp; + } + + @Override + public TGetInfoResp GetInfo(TGetInfoReq req) throws TException { + TGetInfoResp resp = new TGetInfoResp(); + try { + GetInfoValue getInfoValue = + cliService.getInfo(new SessionHandle(req.getSessionHandle()), + GetInfoType.getGetInfoType(req.getInfoType())); + resp.setInfoValue(getInfoValue.toTGetInfoValue()); + resp.setStatus(OK_STATUS); + } catch (Exception e) { + LOG.warn("Error getting info: ", e); + resp.setStatus(HiveSQLException.toTStatus(e)); + } + return resp; + } + + @Override + public TExecuteStatementResp ExecuteStatement(TExecuteStatementReq req) throws TException { + TExecuteStatementResp resp = new TExecuteStatementResp(); + try { + SessionHandle sessionHandle = new SessionHandle(req.getSessionHandle()); + String statement = req.getStatement(); + Map confOverlay = req.getConfOverlay(); + Boolean runAsync = req.isRunAsync(); + OperationHandle operationHandle = runAsync ? + cliService.executeStatementAsync(sessionHandle, statement, confOverlay) + : cliService.executeStatement(sessionHandle, statement, confOverlay); + resp.setOperationHandle(operationHandle.toTOperationHandle()); + resp.setStatus(OK_STATUS); + } catch (Exception e) { + // Note: it's rather important that this (and other methods) catch Exception, not Throwable; + // in combination with HiveSessionProxy.invoke code, perhaps unintentionally, it used + // to also catch all errors; and now it allows OOMs only to propagate. + LOG.warn("Error executing statement: ", e); + resp.setStatus(HiveSQLException.toTStatus(e)); + } + return resp; + } + + @Override + public TGetTypeInfoResp GetTypeInfo(TGetTypeInfoReq req) throws TException { + TGetTypeInfoResp resp = new TGetTypeInfoResp(); + try { + OperationHandle operationHandle = cliService.getTypeInfo(new SessionHandle(req.getSessionHandle())); + resp.setOperationHandle(operationHandle.toTOperationHandle()); + resp.setStatus(OK_STATUS); + } catch (Exception e) { + LOG.warn("Error getting type info: ", e); + resp.setStatus(HiveSQLException.toTStatus(e)); + } + return resp; + } + + @Override + public TGetCatalogsResp GetCatalogs(TGetCatalogsReq req) throws TException { + TGetCatalogsResp resp = new TGetCatalogsResp(); + try { + OperationHandle opHandle = cliService.getCatalogs(new SessionHandle(req.getSessionHandle())); + resp.setOperationHandle(opHandle.toTOperationHandle()); + resp.setStatus(OK_STATUS); + } catch (Exception e) { + LOG.warn("Error getting catalogs: ", e); + resp.setStatus(HiveSQLException.toTStatus(e)); + } + return resp; + } + + @Override + public TGetSchemasResp GetSchemas(TGetSchemasReq req) throws TException { + TGetSchemasResp resp = new TGetSchemasResp(); + try { + OperationHandle opHandle = cliService.getSchemas( + new SessionHandle(req.getSessionHandle()), req.getCatalogName(), req.getSchemaName()); + resp.setOperationHandle(opHandle.toTOperationHandle()); + resp.setStatus(OK_STATUS); + } catch (Exception e) { + LOG.warn("Error getting schemas: ", e); + resp.setStatus(HiveSQLException.toTStatus(e)); + } + return resp; + } + + @Override + public TGetTablesResp GetTables(TGetTablesReq req) throws TException { + TGetTablesResp resp = new TGetTablesResp(); + try { + OperationHandle opHandle = cliService + .getTables(new SessionHandle(req.getSessionHandle()), req.getCatalogName(), + req.getSchemaName(), req.getTableName(), req.getTableTypes()); + resp.setOperationHandle(opHandle.toTOperationHandle()); + resp.setStatus(OK_STATUS); + } catch (Exception e) { + LOG.warn("Error getting tables: ", e); + resp.setStatus(HiveSQLException.toTStatus(e)); + } + return resp; + } + + @Override + public TGetTableTypesResp GetTableTypes(TGetTableTypesReq req) throws TException { + TGetTableTypesResp resp = new TGetTableTypesResp(); + try { + OperationHandle opHandle = cliService.getTableTypes(new SessionHandle(req.getSessionHandle())); + resp.setOperationHandle(opHandle.toTOperationHandle()); + resp.setStatus(OK_STATUS); + } catch (Exception e) { + LOG.warn("Error getting table types: ", e); + resp.setStatus(HiveSQLException.toTStatus(e)); + } + return resp; + } + + @Override + public TGetColumnsResp GetColumns(TGetColumnsReq req) throws TException { + TGetColumnsResp resp = new TGetColumnsResp(); + try { + OperationHandle opHandle = cliService.getColumns( + new SessionHandle(req.getSessionHandle()), + req.getCatalogName(), + req.getSchemaName(), + req.getTableName(), + req.getColumnName()); + resp.setOperationHandle(opHandle.toTOperationHandle()); + resp.setStatus(OK_STATUS); + } catch (Exception e) { + LOG.warn("Error getting columns: ", e); + resp.setStatus(HiveSQLException.toTStatus(e)); + } + return resp; + } + + @Override + public TGetFunctionsResp GetFunctions(TGetFunctionsReq req) throws TException { + TGetFunctionsResp resp = new TGetFunctionsResp(); + try { + OperationHandle opHandle = cliService.getFunctions( + new SessionHandle(req.getSessionHandle()), req.getCatalogName(), + req.getSchemaName(), req.getFunctionName()); + resp.setOperationHandle(opHandle.toTOperationHandle()); + resp.setStatus(OK_STATUS); + } catch (Exception e) { + LOG.warn("Error getting functions: ", e); + resp.setStatus(HiveSQLException.toTStatus(e)); + } + return resp; + } + + @Override + public TGetOperationStatusResp GetOperationStatus(TGetOperationStatusReq req) throws TException { + TGetOperationStatusResp resp = new TGetOperationStatusResp(); + try { + OperationStatus operationStatus = cliService.getOperationStatus( + new OperationHandle(req.getOperationHandle())); + resp.setOperationState(operationStatus.getState().toTOperationState()); + HiveSQLException opException = operationStatus.getOperationException(); + resp.setTaskStatus(operationStatus.getTaskStatus()); + resp.setOperationStarted(operationStatus.getOperationStarted()); + resp.setOperationCompleted(operationStatus.getOperationCompleted()); + if (opException != null) { + resp.setSqlState(opException.getSQLState()); + resp.setErrorCode(opException.getErrorCode()); + resp.setErrorMessage(opException.getMessage()); + } + resp.setStatus(OK_STATUS); + } catch (Exception e) { + LOG.warn("Error getting operation status: ", e); + resp.setStatus(HiveSQLException.toTStatus(e)); + } + return resp; + } + + @Override + public TCancelOperationResp CancelOperation(TCancelOperationReq req) throws TException { + TCancelOperationResp resp = new TCancelOperationResp(); + try { + cliService.cancelOperation(new OperationHandle(req.getOperationHandle())); + resp.setStatus(OK_STATUS); + } catch (Exception e) { + LOG.warn("Error cancelling operation: ", e); + resp.setStatus(HiveSQLException.toTStatus(e)); + } + return resp; + } + + @Override + public TCloseOperationResp CloseOperation(TCloseOperationReq req) throws TException { + TCloseOperationResp resp = new TCloseOperationResp(); + try { + cliService.closeOperation(new OperationHandle(req.getOperationHandle())); + resp.setStatus(OK_STATUS); + } catch (Exception e) { + LOG.warn("Error closing operation: ", e); + resp.setStatus(HiveSQLException.toTStatus(e)); + } + return resp; + } + + @Override + public TGetResultSetMetadataResp GetResultSetMetadata(TGetResultSetMetadataReq req) + throws TException { + TGetResultSetMetadataResp resp = new TGetResultSetMetadataResp(); + try { + TableSchema schema = cliService.getResultSetMetadata(new OperationHandle(req.getOperationHandle())); + resp.setSchema(schema.toTTableSchema()); + resp.setStatus(OK_STATUS); + } catch (Exception e) { + LOG.warn("Error getting result set metadata: ", e); + resp.setStatus(HiveSQLException.toTStatus(e)); + } + return resp; + } + + @Override + public TFetchResultsResp FetchResults(TFetchResultsReq req) throws TException { + TFetchResultsResp resp = new TFetchResultsResp(); + try { + RowSet rowSet = cliService.fetchResults( + new OperationHandle(req.getOperationHandle()), + FetchOrientation.getFetchOrientation(req.getOrientation()), + req.getMaxRows(), + FetchType.getFetchType(req.getFetchType())); + resp.setResults(rowSet.toTRowSet()); + resp.setHasMoreRows(false); + resp.setStatus(OK_STATUS); + } catch (Exception e) { + LOG.warn("Error fetching results: ", e); + resp.setStatus(HiveSQLException.toTStatus(e)); + } + return resp; + } + + @Override + public abstract void run(); + + /** + * If the proxy user name is provided then check privileges to substitute the user. + * @param realUser + * @param sessionConf + * @param ipAddress + * @return + * @throws HiveSQLException + */ + private String getProxyUser(String realUser, Map sessionConf, + String ipAddress) throws HiveSQLException { + String proxyUser = null; + // Http transport mode. + // We set the thread local proxy username, in ThriftHttpServlet. + if (cliService.getHiveConf().getVar( + ConfVars.HIVE_SERVER2_TRANSPORT_MODE).equalsIgnoreCase("http")) { + proxyUser = SessionManager.getProxyUserName(); + LOG.debug("Proxy user from query string: " + proxyUser); + } + + if (proxyUser == null && sessionConf != null && sessionConf.containsKey(HiveAuthFactory.HS2_PROXY_USER)) { + String proxyUserFromThriftBody = sessionConf.get(HiveAuthFactory.HS2_PROXY_USER); + LOG.debug("Proxy user from thrift body: " + proxyUserFromThriftBody); + proxyUser = proxyUserFromThriftBody; + } + + if (proxyUser == null) { + return realUser; + } + + // check whether substitution is allowed + if (!hiveConf.getBoolVar(HiveConf.ConfVars.HIVE_SERVER2_ALLOW_USER_SUBSTITUTION)) { + throw new HiveSQLException("Proxy user substitution is not allowed"); + } + + // If there's no authentication, then directly substitute the user + if (HiveAuthFactory.AuthTypes.NONE.toString(). + equalsIgnoreCase(hiveConf.getVar(ConfVars.HIVE_SERVER2_AUTHENTICATION))) { + return proxyUser; + } + + // Verify proxy user privilege of the realUser for the proxyUser + HiveAuthFactory.verifyProxyAccess(realUser, proxyUser, ipAddress, hiveConf); + LOG.debug("Verified proxy user: " + proxyUser); + return proxyUser; + } + + private boolean isKerberosAuthMode() { + return cliService.getHiveConf().getVar(ConfVars.HIVE_SERVER2_AUTHENTICATION) + .equalsIgnoreCase(HiveAuthFactory.AuthTypes.KERBEROS.toString()); + } +} diff --git a/service/src/java/org/apache/hive/service/cli/thrift/ThriftCLIServiceClient.java b/service/src/java/org/apache/hive/service/cli/thrift/ThriftCLIServiceClient.java index ccce6dc..098aea6 100644 --- a/service/src/java/org/apache/hive/service/cli/thrift/ThriftCLIServiceClient.java +++ b/service/src/java/org/apache/hive/service/cli/thrift/ThriftCLIServiceClient.java @@ -52,6 +52,8 @@ import org.apache.hive.service.rpc.thrift.TGetCatalogsResp; import org.apache.hive.service.rpc.thrift.TGetColumnsReq; import org.apache.hive.service.rpc.thrift.TGetColumnsResp; +import org.apache.hive.service.rpc.thrift.TGetCrossReferenceReq; +import org.apache.hive.service.rpc.thrift.TGetCrossReferenceResp; import org.apache.hive.service.rpc.thrift.TGetDelegationTokenReq; import org.apache.hive.service.rpc.thrift.TGetDelegationTokenResp; import org.apache.hive.service.rpc.thrift.TGetFunctionsReq; @@ -60,6 +62,8 @@ import org.apache.hive.service.rpc.thrift.TGetInfoResp; import org.apache.hive.service.rpc.thrift.TGetOperationStatusReq; import org.apache.hive.service.rpc.thrift.TGetOperationStatusResp; +import org.apache.hive.service.rpc.thrift.TGetPrimaryKeysReq; +import org.apache.hive.service.rpc.thrift.TGetPrimaryKeysResp; import org.apache.hive.service.rpc.thrift.TGetResultSetMetadataReq; import org.apache.hive.service.rpc.thrift.TGetResultSetMetadataResp; import org.apache.hive.service.rpc.thrift.TGetSchemasReq; @@ -492,4 +496,47 @@ public void renewDelegationToken(SessionHandle sessionHandle, HiveAuthFactory au throw new HiveSQLException(e); } } + + @Override + public OperationHandle getPrimaryKeys(SessionHandle sessionHandle, + String catalog, String schema, String table) throws HiveSQLException { + try { + TGetPrimaryKeysReq req = new TGetPrimaryKeysReq(sessionHandle.toTSessionHandle()); + req.setCatalogName(catalog); + req.setSchemaName(schema); + req.setTableName(table); + TGetPrimaryKeysResp resp = cliService.GetPrimaryKeys(req); + checkStatus(resp.getStatus()); + TProtocolVersion protocol = sessionHandle.getProtocolVersion(); + return new OperationHandle(resp.getOperationHandle(), protocol); + } catch (HiveSQLException e) { + throw e; + } catch (Exception e) { + throw new HiveSQLException(e); + } + } + + @Override + public OperationHandle getCrossReference(SessionHandle sessionHandle, + String primaryCatalog, String primarySchema, String primaryTable, + String foreignCatalog, String foreignSchema, String foreignTable) + throws HiveSQLException { + try { + TGetCrossReferenceReq req = new TGetCrossReferenceReq(sessionHandle.toTSessionHandle()); + req.setParentCatalogName(primaryCatalog); + req.setParentSchemaName(primarySchema); + req.setParentTableName(primaryTable); + req.setForeignCatalogName(foreignCatalog); + req.setForeignSchemaName(foreignSchema); + req.setForeignTableName(foreignTable); + TGetCrossReferenceResp resp = cliService.GetCrossReference(req); + checkStatus(resp.getStatus()); + TProtocolVersion protocol = sessionHandle.getProtocolVersion(); + return new OperationHandle(resp.getOperationHandle(), protocol); + } catch (HiveSQLException e) { + throw e; + } catch (Exception e) { + throw new HiveSQLException(e); + } + } } diff --git a/service/src/java/org/apache/hive/service/cli/thrift/ThriftCLIServiceClient.java.orig b/service/src/java/org/apache/hive/service/cli/thrift/ThriftCLIServiceClient.java.orig new file mode 100644 index 0000000..ccce6dc --- /dev/null +++ b/service/src/java/org/apache/hive/service/cli/thrift/ThriftCLIServiceClient.java.orig @@ -0,0 +1,495 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hive.service.cli.thrift; + +import java.util.List; +import java.util.Map; + +import org.apache.hive.service.auth.HiveAuthFactory; +import org.apache.hive.service.cli.CLIServiceClient; +import org.apache.hive.service.cli.FetchOrientation; +import org.apache.hive.service.cli.FetchType; +import org.apache.hive.service.cli.GetInfoType; +import org.apache.hive.service.cli.GetInfoValue; +import org.apache.hive.service.cli.HiveSQLException; +import org.apache.hive.service.cli.OperationHandle; +import org.apache.hive.service.cli.OperationState; +import org.apache.hive.service.cli.OperationStatus; +import org.apache.hive.service.cli.RowSet; +import org.apache.hive.service.cli.RowSetFactory; +import org.apache.hive.service.cli.SessionHandle; +import org.apache.hive.service.cli.TableSchema; +import org.apache.hive.service.rpc.thrift.TCLIService; +import org.apache.hive.service.rpc.thrift.TCancelDelegationTokenReq; +import org.apache.hive.service.rpc.thrift.TCancelDelegationTokenResp; +import org.apache.hive.service.rpc.thrift.TCancelOperationReq; +import org.apache.hive.service.rpc.thrift.TCancelOperationResp; +import org.apache.hive.service.rpc.thrift.TCloseOperationReq; +import org.apache.hive.service.rpc.thrift.TCloseOperationResp; +import org.apache.hive.service.rpc.thrift.TCloseSessionReq; +import org.apache.hive.service.rpc.thrift.TCloseSessionResp; +import org.apache.hive.service.rpc.thrift.TExecuteStatementReq; +import org.apache.hive.service.rpc.thrift.TExecuteStatementResp; +import org.apache.hive.service.rpc.thrift.TFetchResultsReq; +import org.apache.hive.service.rpc.thrift.TFetchResultsResp; +import org.apache.hive.service.rpc.thrift.TGetCatalogsReq; +import org.apache.hive.service.rpc.thrift.TGetCatalogsResp; +import org.apache.hive.service.rpc.thrift.TGetColumnsReq; +import org.apache.hive.service.rpc.thrift.TGetColumnsResp; +import org.apache.hive.service.rpc.thrift.TGetDelegationTokenReq; +import org.apache.hive.service.rpc.thrift.TGetDelegationTokenResp; +import org.apache.hive.service.rpc.thrift.TGetFunctionsReq; +import org.apache.hive.service.rpc.thrift.TGetFunctionsResp; +import org.apache.hive.service.rpc.thrift.TGetInfoReq; +import org.apache.hive.service.rpc.thrift.TGetInfoResp; +import org.apache.hive.service.rpc.thrift.TGetOperationStatusReq; +import org.apache.hive.service.rpc.thrift.TGetOperationStatusResp; +import org.apache.hive.service.rpc.thrift.TGetResultSetMetadataReq; +import org.apache.hive.service.rpc.thrift.TGetResultSetMetadataResp; +import org.apache.hive.service.rpc.thrift.TGetSchemasReq; +import org.apache.hive.service.rpc.thrift.TGetSchemasResp; +import org.apache.hive.service.rpc.thrift.TGetTableTypesReq; +import org.apache.hive.service.rpc.thrift.TGetTableTypesResp; +import org.apache.hive.service.rpc.thrift.TGetTablesReq; +import org.apache.hive.service.rpc.thrift.TGetTablesResp; +import org.apache.hive.service.rpc.thrift.TGetTypeInfoReq; +import org.apache.hive.service.rpc.thrift.TGetTypeInfoResp; +import org.apache.hive.service.rpc.thrift.TOpenSessionReq; +import org.apache.hive.service.rpc.thrift.TOpenSessionResp; +import org.apache.hive.service.rpc.thrift.TProtocolVersion; +import org.apache.hive.service.rpc.thrift.TRenewDelegationTokenReq; +import org.apache.hive.service.rpc.thrift.TRenewDelegationTokenResp; +import org.apache.hive.service.rpc.thrift.TStatus; +import org.apache.hive.service.rpc.thrift.TStatusCode; +import org.apache.thrift.TException; + +/** + * ThriftCLIServiceClient. + * + */ +public class ThriftCLIServiceClient extends CLIServiceClient { + private final TCLIService.Iface cliService; + + public ThriftCLIServiceClient(TCLIService.Iface cliService) { + this.cliService = cliService; + } + + public void checkStatus(TStatus status) throws HiveSQLException { + if (TStatusCode.ERROR_STATUS.equals(status.getStatusCode())) { + throw new HiveSQLException(status); + } + } + + /* (non-Javadoc) + * @see org.apache.hive.service.cli.ICLIService#openSession(java.lang.String, java.lang.String, java.util.Map) + */ + @Override + public SessionHandle openSession(String username, String password, + Map configuration) + throws HiveSQLException { + try { + TOpenSessionReq req = new TOpenSessionReq(); + req.setUsername(username); + req.setPassword(password); + req.setConfiguration(configuration); + TOpenSessionResp resp = cliService.OpenSession(req); + checkStatus(resp.getStatus()); + return new SessionHandle(resp.getSessionHandle(), resp.getServerProtocolVersion()); + } catch (HiveSQLException e) { + throw e; + } catch (Exception e) { + throw new HiveSQLException(e); + } + } + + /* (non-Javadoc) + * @see org.apache.hive.service.cli.ICLIService#closeSession(org.apache.hive.service.cli.SessionHandle) + */ + @Override + public SessionHandle openSessionWithImpersonation(String username, String password, + Map configuration, String delegationToken) throws HiveSQLException { + throw new HiveSQLException("open with impersonation operation is not supported in the client"); + } + + /* (non-Javadoc) + * @see org.apache.hive.service.cli.ICLIService#closeSession(org.apache.hive.service.cli.SessionHandle) + */ + @Override + public void closeSession(SessionHandle sessionHandle) throws HiveSQLException { + try { + TCloseSessionReq req = new TCloseSessionReq(sessionHandle.toTSessionHandle()); + TCloseSessionResp resp = cliService.CloseSession(req); + checkStatus(resp.getStatus()); + } catch (HiveSQLException e) { + throw e; + } catch (Exception e) { + throw new HiveSQLException(e); + } + } + + /* (non-Javadoc) + * @see org.apache.hive.service.cli.ICLIService#getInfo(org.apache.hive.service.cli.SessionHandle, java.util.List) + */ + @Override + public GetInfoValue getInfo(SessionHandle sessionHandle, GetInfoType infoType) + throws HiveSQLException { + try { + // FIXME extract the right info type + TGetInfoReq req = new TGetInfoReq(sessionHandle.toTSessionHandle(), infoType.toTGetInfoType()); + TGetInfoResp resp = cliService.GetInfo(req); + checkStatus(resp.getStatus()); + return new GetInfoValue(resp.getInfoValue()); + } catch (HiveSQLException e) { + throw e; + } catch (Exception e) { + throw new HiveSQLException(e); + } + } + + /* (non-Javadoc) + * @see org.apache.hive.service.cli.ICLIService#executeStatement(org.apache.hive.service.cli.SessionHandle, java.lang.String, java.util.Map) + */ + @Override + public OperationHandle executeStatement(SessionHandle sessionHandle, String statement, + Map confOverlay) + throws HiveSQLException { + return executeStatementInternal(sessionHandle, statement, confOverlay, false); + } + + /* (non-Javadoc) + * @see org.apache.hive.service.cli.ICLIService#executeStatementAsync(org.apache.hive.service.cli.SessionHandle, java.lang.String, java.util.Map) + */ + @Override + public OperationHandle executeStatementAsync(SessionHandle sessionHandle, String statement, + Map confOverlay) + throws HiveSQLException { + return executeStatementInternal(sessionHandle, statement, confOverlay, true); + } + + private OperationHandle executeStatementInternal(SessionHandle sessionHandle, String statement, + Map confOverlay, boolean isAsync) + throws HiveSQLException { + try { + TExecuteStatementReq req = + new TExecuteStatementReq(sessionHandle.toTSessionHandle(), statement); + req.setConfOverlay(confOverlay); + req.setRunAsync(isAsync); + TExecuteStatementResp resp = cliService.ExecuteStatement(req); + checkStatus(resp.getStatus()); + TProtocolVersion protocol = sessionHandle.getProtocolVersion(); + return new OperationHandle(resp.getOperationHandle(), protocol); + } catch (HiveSQLException e) { + throw e; + } catch (Exception e) { + throw new HiveSQLException(e); + } + } + + /* (non-Javadoc) + * @see org.apache.hive.service.cli.ICLIService#getTypeInfo(org.apache.hive.service.cli.SessionHandle) + */ + @Override + public OperationHandle getTypeInfo(SessionHandle sessionHandle) throws HiveSQLException { + try { + TGetTypeInfoReq req = new TGetTypeInfoReq(sessionHandle.toTSessionHandle()); + TGetTypeInfoResp resp = cliService.GetTypeInfo(req); + checkStatus(resp.getStatus()); + TProtocolVersion protocol = sessionHandle.getProtocolVersion(); + return new OperationHandle(resp.getOperationHandle(), protocol); + } catch (HiveSQLException e) { + throw e; + } catch (Exception e) { + throw new HiveSQLException(e); + } + } + + /* (non-Javadoc) + * @see org.apache.hive.service.cli.ICLIService#getCatalogs(org.apache.hive.service.cli.SessionHandle) + */ + @Override + public OperationHandle getCatalogs(SessionHandle sessionHandle) throws HiveSQLException { + try { + TGetCatalogsReq req = new TGetCatalogsReq(sessionHandle.toTSessionHandle()); + TGetCatalogsResp resp = cliService.GetCatalogs(req); + checkStatus(resp.getStatus()); + TProtocolVersion protocol = sessionHandle.getProtocolVersion(); + return new OperationHandle(resp.getOperationHandle(), protocol); + } catch (HiveSQLException e) { + throw e; + } catch (Exception e) { + throw new HiveSQLException(e); + } + } + + /* (non-Javadoc) + * @see org.apache.hive.service.cli.ICLIService#getSchemas(org.apache.hive.service.cli.SessionHandle, java.lang.String, java.lang.String) + */ + @Override + public OperationHandle getSchemas(SessionHandle sessionHandle, String catalogName, + String schemaName) + throws HiveSQLException { + try { + TGetSchemasReq req = new TGetSchemasReq(sessionHandle.toTSessionHandle()); + req.setCatalogName(catalogName); + req.setSchemaName(schemaName); + TGetSchemasResp resp = cliService.GetSchemas(req); + checkStatus(resp.getStatus()); + TProtocolVersion protocol = sessionHandle.getProtocolVersion(); + return new OperationHandle(resp.getOperationHandle(), protocol); + } catch (HiveSQLException e) { + throw e; + } catch (Exception e) { + throw new HiveSQLException(e); + } + } + + /* (non-Javadoc) + * @see org.apache.hive.service.cli.ICLIService#getTables(org.apache.hive.service.cli.SessionHandle, java.lang.String, java.lang.String, java.lang.String, java.util.List) + */ + @Override + public OperationHandle getTables(SessionHandle sessionHandle, String catalogName, + String schemaName, String tableName, List tableTypes) + throws HiveSQLException { + try { + TGetTablesReq req = new TGetTablesReq(sessionHandle.toTSessionHandle()); + req.setTableName(tableName); + req.setTableTypes(tableTypes); + req.setSchemaName(schemaName); + TGetTablesResp resp = cliService.GetTables(req); + checkStatus(resp.getStatus()); + TProtocolVersion protocol = sessionHandle.getProtocolVersion(); + return new OperationHandle(resp.getOperationHandle(), protocol); + } catch (HiveSQLException e) { + throw e; + } catch (Exception e) { + throw new HiveSQLException(e); + } + } + + /* (non-Javadoc) + * @see org.apache.hive.service.cli.ICLIService#getTableTypes(org.apache.hive.service.cli.SessionHandle) + */ + @Override + public OperationHandle getTableTypes(SessionHandle sessionHandle) throws HiveSQLException { + try { + TGetTableTypesReq req = new TGetTableTypesReq(sessionHandle.toTSessionHandle()); + TGetTableTypesResp resp = cliService.GetTableTypes(req); + checkStatus(resp.getStatus()); + TProtocolVersion protocol = sessionHandle.getProtocolVersion(); + return new OperationHandle(resp.getOperationHandle(), protocol); + } catch (HiveSQLException e) { + throw e; + } catch (Exception e) { + throw new HiveSQLException(e); + } + } + + /* (non-Javadoc) + * @see org.apache.hive.service.cli.ICLIService#getColumns(org.apache.hive.service.cli.SessionHandle) + */ + @Override + public OperationHandle getColumns(SessionHandle sessionHandle, + String catalogName, String schemaName, String tableName, String columnName) + throws HiveSQLException { + try { + TGetColumnsReq req = new TGetColumnsReq(); + req.setSessionHandle(sessionHandle.toTSessionHandle()); + req.setCatalogName(catalogName); + req.setSchemaName(schemaName); + req.setTableName(tableName); + req.setColumnName(columnName); + TGetColumnsResp resp = cliService.GetColumns(req); + checkStatus(resp.getStatus()); + TProtocolVersion protocol = sessionHandle.getProtocolVersion(); + return new OperationHandle(resp.getOperationHandle(), protocol); + } catch (HiveSQLException e) { + throw e; + } catch (Exception e) { + throw new HiveSQLException(e); + } + } + + /* (non-Javadoc) + * @see org.apache.hive.service.cli.ICLIService#getFunctions(org.apache.hive.service.cli.SessionHandle) + */ + @Override + public OperationHandle getFunctions(SessionHandle sessionHandle, + String catalogName, String schemaName, String functionName) throws HiveSQLException { + try { + TGetFunctionsReq req = new TGetFunctionsReq(sessionHandle.toTSessionHandle(), functionName); + req.setCatalogName(catalogName); + req.setSchemaName(schemaName); + TGetFunctionsResp resp = cliService.GetFunctions(req); + checkStatus(resp.getStatus()); + TProtocolVersion protocol = sessionHandle.getProtocolVersion(); + return new OperationHandle(resp.getOperationHandle(), protocol); + } catch (HiveSQLException e) { + throw e; + } catch (Exception e) { + throw new HiveSQLException(e); + } + } + + /* (non-Javadoc) + * @see org.apache.hive.service.cli.ICLIService#getOperationStatus(org.apache.hive.service.cli.OperationHandle) + */ + @Override + public OperationStatus getOperationStatus(OperationHandle opHandle) throws HiveSQLException { + try { + TGetOperationStatusReq req = new TGetOperationStatusReq(opHandle.toTOperationHandle()); + TGetOperationStatusResp resp = cliService.GetOperationStatus(req); + // Checks the status of the RPC call, throws an exception in case of error + checkStatus(resp.getStatus()); + OperationState opState = OperationState.getOperationState(resp.getOperationState()); + HiveSQLException opException = null; + if (opState == OperationState.ERROR) { + opException = new HiveSQLException(resp.getErrorMessage(), resp.getSqlState(), resp.getErrorCode()); + } + return new OperationStatus(opState, resp.getTaskStatus(), resp.getOperationStarted(), + resp.getOperationCompleted(), opException); + } catch (HiveSQLException e) { + throw e; + } catch (Exception e) { + throw new HiveSQLException(e); + } + } + + /* (non-Javadoc) + * @see org.apache.hive.service.cli.ICLIService#cancelOperation(org.apache.hive.service.cli.OperationHandle) + */ + @Override + public void cancelOperation(OperationHandle opHandle) throws HiveSQLException { + try { + TCancelOperationReq req = new TCancelOperationReq(opHandle.toTOperationHandle()); + TCancelOperationResp resp = cliService.CancelOperation(req); + checkStatus(resp.getStatus()); + } catch (HiveSQLException e) { + throw e; + } catch (Exception e) { + throw new HiveSQLException(e); + } + } + + /* (non-Javadoc) + * @see org.apache.hive.service.cli.ICLIService#closeOperation(org.apache.hive.service.cli.OperationHandle) + */ + @Override + public void closeOperation(OperationHandle opHandle) + throws HiveSQLException { + try { + TCloseOperationReq req = new TCloseOperationReq(opHandle.toTOperationHandle()); + TCloseOperationResp resp = cliService.CloseOperation(req); + checkStatus(resp.getStatus()); + } catch (HiveSQLException e) { + throw e; + } catch (Exception e) { + throw new HiveSQLException(e); + } + } + + /* (non-Javadoc) + * @see org.apache.hive.service.cli.ICLIService#getResultSetMetadata(org.apache.hive.service.cli.OperationHandle) + */ + @Override + public TableSchema getResultSetMetadata(OperationHandle opHandle) + throws HiveSQLException { + try { + TGetResultSetMetadataReq req = new TGetResultSetMetadataReq(opHandle.toTOperationHandle()); + TGetResultSetMetadataResp resp = cliService.GetResultSetMetadata(req); + checkStatus(resp.getStatus()); + return new TableSchema(resp.getSchema()); + } catch (HiveSQLException e) { + throw e; + } catch (Exception e) { + throw new HiveSQLException(e); + } + } + + @Override + public RowSet fetchResults(OperationHandle opHandle, FetchOrientation orientation, long maxRows, + FetchType fetchType) throws HiveSQLException { + try { + TFetchResultsReq req = new TFetchResultsReq(); + req.setOperationHandle(opHandle.toTOperationHandle()); + req.setOrientation(orientation.toTFetchOrientation()); + req.setMaxRows(maxRows); + req.setFetchType(fetchType.toTFetchType()); + TFetchResultsResp resp = cliService.FetchResults(req); + checkStatus(resp.getStatus()); + return RowSetFactory.create(resp.getResults(), opHandle.getProtocolVersion()); + } catch (HiveSQLException e) { + throw e; + } catch (Exception e) { + throw new HiveSQLException(e); + } + } + + /* (non-Javadoc) + * @see org.apache.hive.service.cli.ICLIService#fetchResults(org.apache.hive.service.cli.OperationHandle) + */ + @Override + public RowSet fetchResults(OperationHandle opHandle) throws HiveSQLException { + // TODO: set the correct default fetch size + return fetchResults(opHandle, FetchOrientation.FETCH_NEXT, 10000, FetchType.QUERY_OUTPUT); + } + + @Override + public String getDelegationToken(SessionHandle sessionHandle, HiveAuthFactory authFactory, + String owner, String renewer) throws HiveSQLException { + TGetDelegationTokenReq req = new TGetDelegationTokenReq( + sessionHandle.toTSessionHandle(), owner, renewer); + try { + TGetDelegationTokenResp tokenResp = cliService.GetDelegationToken(req); + checkStatus(tokenResp.getStatus()); + return tokenResp.getDelegationToken(); + } catch (Exception e) { + throw new HiveSQLException(e); + } + } + + @Override + public void cancelDelegationToken(SessionHandle sessionHandle, HiveAuthFactory authFactory, + String tokenStr) throws HiveSQLException { + TCancelDelegationTokenReq cancelReq = new TCancelDelegationTokenReq( + sessionHandle.toTSessionHandle(), tokenStr); + try { + TCancelDelegationTokenResp cancelResp = + cliService.CancelDelegationToken(cancelReq); + checkStatus(cancelResp.getStatus()); + return; + } catch (TException e) { + throw new HiveSQLException(e); + } + } + + @Override + public void renewDelegationToken(SessionHandle sessionHandle, HiveAuthFactory authFactory, + String tokenStr) throws HiveSQLException { + TRenewDelegationTokenReq cancelReq = new TRenewDelegationTokenReq( + sessionHandle.toTSessionHandle(), tokenStr); + try { + TRenewDelegationTokenResp renewResp = + cliService.RenewDelegationToken(cancelReq); + checkStatus(renewResp.getStatus()); + return; + } catch (Exception e) { + throw new HiveSQLException(e); + } + } +}