diff --git a/accumulo-handler/pom.xml b/accumulo-handler/pom.xml new file mode 100644 index 0000000..c3efd0a --- /dev/null +++ b/accumulo-handler/pom.xml @@ -0,0 +1,158 @@ + + + + 4.0.0 + + org.apache.hive + hive + 0.14.0-SNAPSHOT + ../pom.xml + + + hive-accumulo-handler + jar + Hive Accumulo Handler + + + .. + + + + + commons-lang + commons-lang + + + commons-logging + commons-logging + + + org.apache.accumulo + accumulo-core + + + org.apache.accumulo + accumulo-fate + + + org.apache.accumulo + accumulo-start + + + org.apache.accumulo + accumulo-trace + + + org.apache.hive + hive-common + ${project.version} + + + org.apache.hive + hive-metastore + ${project.version} + + + org.apache.hive + hive-serde + ${project.version} + + + org.apache.hive + hive-service + ${project.version} + + + org.apache.hive + hive-exec + ${project.version} + + + org.apache.hive + hive-shims + ${project.version} + + + org.slf4j + slf4j-api + + + org.slf4j + slf4j-log4j12 + + + junit + junit + test + + + org.mockito + mockito-all + test + + + + + + hadoop-1 + + + org.apache.hadoop + hadoop-core + ${hadoop-20S.version} + true + + + + + hadoop-2 + + + org.apache.hadoop + hadoop-common + ${hadoop-23.version} + true + + + org.apache.hadoop + hadoop-mapreduce-client-core + ${hadoop-23.version} + true + + + + + + + ${basedir}/src/java + ${basedir}/src/test + + + org.apache.maven.plugins + maven-jar-plugin + + + + test-jar + + + + + + + + diff --git a/accumulo-handler/src/java/org/apache/hadoop/hive/accumulo/AccumuloConnectionParameters.java b/accumulo-handler/src/java/org/apache/hadoop/hive/accumulo/AccumuloConnectionParameters.java new file mode 100644 index 0000000..2b11f84 --- /dev/null +++ b/accumulo-handler/src/java/org/apache/hadoop/hive/accumulo/AccumuloConnectionParameters.java @@ -0,0 +1,127 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hive.accumulo; + +import org.apache.accumulo.core.client.AccumuloException; +import org.apache.accumulo.core.client.AccumuloSecurityException; +import org.apache.accumulo.core.client.Connector; +import org.apache.accumulo.core.client.Instance; +import org.apache.accumulo.core.client.ZooKeeperInstance; +import org.apache.accumulo.core.client.mock.MockInstance; +import org.apache.accumulo.core.client.security.tokens.PasswordToken; +import org.apache.hadoop.conf.Configuration; + +import com.google.common.base.Preconditions; + +/** + * + */ +public class AccumuloConnectionParameters { + public static final String USER_NAME = "accumulo.user.name"; + public static final String USER_PASS = "accumulo.user.pass"; + public static final String ZOOKEEPERS = "accumulo.zookeepers"; + public static final String INSTANCE_NAME = "accumulo.instance.name"; + public static final String TABLE_NAME = "accumulo.table.name"; + + public static final String USE_MOCK_INSTANCE = "accumulo.mock.instance"; + + protected Configuration conf; + protected boolean useMockInstance = false; + + public AccumuloConnectionParameters(Configuration conf) { + // TableDesc#getDeserializer will ultimately instantiate the AccumuloSerDe with a null + // Configuration + // We have to accept this and just fail late if data is attempted to be pulled from the + // Configuration + this.conf = conf; + } + + public Configuration getConf() { + return conf; + } + + public String getAccumuloUserName() { + Preconditions.checkNotNull(conf); + return conf.get(USER_NAME); + } + + public String getAccumuloPassword() { + Preconditions.checkNotNull(conf); + return conf.get(USER_PASS); + } + + public String getAccumuloInstanceName() { + Preconditions.checkNotNull(conf); + return conf.get(INSTANCE_NAME); + } + + public String getZooKeepers() { + Preconditions.checkNotNull(conf); + return conf.get(ZOOKEEPERS); + } + + public String getAccumuloTableName() { + Preconditions.checkNotNull(conf); + return conf.get(TABLE_NAME); + } + + public boolean useMockInstance() { + Preconditions.checkNotNull(conf); + return conf.getBoolean(USE_MOCK_INSTANCE, false); + } + + public Instance getInstance() { + String instanceName = getAccumuloInstanceName(); + + // Fail with a good message + if (null == instanceName) { + throw new IllegalArgumentException("Accumulo instance name must be provided in hiveconf using " + INSTANCE_NAME); + } + + if (useMockInstance()) { + return new MockInstance(instanceName); + } + + String zookeepers = getZooKeepers(); + + // Fail with a good message + if (null == zookeepers) { + throw new IllegalArgumentException("ZooKeeper quorum string must be provided in hiveconf using " + ZOOKEEPERS); + } + + return new ZooKeeperInstance(instanceName, zookeepers); + } + + public Connector getConnector() throws AccumuloException, AccumuloSecurityException { + Instance inst = getInstance(); + return getConnector(inst); + } + + public Connector getConnector(Instance inst) throws AccumuloException, AccumuloSecurityException { + String username = getAccumuloUserName(), password = getAccumuloPassword(); + + // Fail with a good message + if (null == username) { + throw new IllegalArgumentException("Accumulo user name must be provided in hiveconf using " + USER_NAME); + } + if (null == password) { + throw new IllegalArgumentException("Accumulo password must be provided in hiveconf using " + USER_PASS); + } + + return inst.getConnector(username, new PasswordToken(password)); + } +} diff --git a/accumulo-handler/src/java/org/apache/hadoop/hive/accumulo/AccumuloHiveConstants.java b/accumulo-handler/src/java/org/apache/hadoop/hive/accumulo/AccumuloHiveConstants.java new file mode 100644 index 0000000..6cdfe1b --- /dev/null +++ b/accumulo-handler/src/java/org/apache/hadoop/hive/accumulo/AccumuloHiveConstants.java @@ -0,0 +1,42 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hive.accumulo; + +import java.nio.charset.Charset; + +/** + * + */ +public class AccumuloHiveConstants { + public static final String ROWID = ":rowID"; + public static final char COLON = ':', COMMA = ',', ESCAPE = '\\', POUND = '#', ASTERISK = '*'; + + public static final String ESCAPED_COLON = Character.toString(ESCAPE) + Character.toString(COLON); + + // Escape the escape + public static final String ESCAPED_COLON_REGEX = Character.toString(ESCAPE) + + Character.toString(ESCAPE) + Character.toString(COLON); + + public static final String ESCAPED_ASTERISK = Character.toString(ESCAPE) + + Character.toString(ASTERISK); + + // Escape the escape, and escape the asterisk + public static final String ESCAPED_ASERTISK_REGEX = Character.toString(ESCAPE) + + Character.toString(ESCAPE) + Character.toString(ESCAPE) + Character.toString(ASTERISK); + + public static final Charset UTF_8 = Charset.forName("UTF-8"); +} diff --git a/accumulo-handler/src/java/org/apache/hadoop/hive/accumulo/AccumuloHiveRow.java b/accumulo-handler/src/java/org/apache/hadoop/hive/accumulo/AccumuloHiveRow.java new file mode 100644 index 0000000..c6ee5c4 --- /dev/null +++ b/accumulo-handler/src/java/org/apache/hadoop/hive/accumulo/AccumuloHiveRow.java @@ -0,0 +1,230 @@ +package org.apache.hadoop.hive.accumulo; + +import java.io.DataInput; +import java.io.DataOutput; +import java.io.IOException; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collections; +import java.util.List; + +import org.apache.commons.lang.builder.HashCodeBuilder; +import org.apache.hadoop.io.Text; +import org.apache.hadoop.io.Writable; + +import com.google.common.base.Preconditions; + +/** + * Holds column tuples for rowID. Each tuple contains column family label, qualifier label, and byte + * array value. + */ +public class AccumuloHiveRow implements Writable { + + private String rowId; + private List tuples = new ArrayList(); + + public AccumuloHiveRow() {} + + public AccumuloHiveRow(String rowId) { + this.rowId = rowId; + } + + public void setRowId(String rowId) { + this.rowId = rowId; + } + + public List getTuples() { + return Collections.unmodifiableList(tuples); + } + + /** + * @return true if this instance has a tuple containing fam and qual, false otherwise. + */ + public boolean hasFamAndQual(Text fam, Text qual) { + for (ColumnTuple tuple : tuples) { + if (tuple.getCf().equals(fam) && tuple.getCq().equals(qual)) { + return true; + } + } + return false; + } + + /** + * @return byte [] value for first tuple containing fam and qual or null if no match. + */ + public byte[] getValue(Text fam, Text qual) { + for (ColumnTuple tuple : tuples) { + if (tuple.getCf().equals(fam) && tuple.getCq().equals(qual)) { + return tuple.getValue(); + } + } + return null; + } + + public String getRowId() { + return rowId; + } + + public void clear() { + this.rowId = null; + this.tuples = new ArrayList(); + } + + @Override + public String toString() { + StringBuilder builder = new StringBuilder("AccumuloHiveRow{"); + builder.append("rowId='").append(rowId).append("', tuples: "); + for (ColumnTuple tuple : tuples) { + builder.append(tuple.toString()); + builder.append("\n"); + } + return builder.toString(); + } + + @Override + public boolean equals(Object o) { + if (o instanceof AccumuloHiveRow) { + AccumuloHiveRow other = (AccumuloHiveRow) o; + if (null == rowId) { + if (null != other.rowId) { + return false; + } + } else if (!rowId.equals(other.rowId)) { + return false; + } + + return tuples.equals(other.tuples); + } + + return false; + } + + @Override + public void write(DataOutput dataOutput) throws IOException { + if (null != rowId) { + dataOutput.writeBoolean(true); + dataOutput.writeUTF(rowId); + } else { + dataOutput.writeBoolean(false); + } + int size = tuples.size(); + dataOutput.writeInt(size); + for (ColumnTuple tuple : tuples) { + Text cf = tuple.getCf(), cq = tuple.getCq(); + dataOutput.writeInt(cf.getLength()); + dataOutput.write(cf.getBytes(), 0, cf.getLength()); + dataOutput.writeInt(cq.getLength()); + dataOutput.write(cq.getBytes(), 0, cq.getLength()); + byte[] value = tuple.getValue(); + dataOutput.writeInt(value.length); + dataOutput.write(value); + } + } + + @Override + public void readFields(DataInput dataInput) throws IOException { + if (dataInput.readBoolean()) { + rowId = dataInput.readUTF(); + } + int size = dataInput.readInt(); + for (int i = 0; i < size; i++) { + int cfLength = dataInput.readInt(); + byte[] cfData = new byte[cfLength]; + dataInput.readFully(cfData, 0, cfLength); + Text cf = new Text(cfData); + int cqLength = dataInput.readInt(); + byte[] cqData = new byte[cqLength]; + dataInput.readFully(cqData, 0, cqLength); + Text cq = new Text(cqData); + int valSize = dataInput.readInt(); + byte[] val = new byte[valSize]; + for (int j = 0; j < valSize; j++) { + val[j] = dataInput.readByte(); + } + tuples.add(new ColumnTuple(cf, cq, val)); + } + } + + public void add(String cf, String qual, byte[] val) { + Preconditions.checkNotNull(cf); + Preconditions.checkNotNull(qual); + Preconditions.checkNotNull(val); + + add(new Text(cf), new Text(qual), val); + } + + public void add(Text cf, Text qual, byte[] val) { + Preconditions.checkNotNull(cf); + Preconditions.checkNotNull(qual); + Preconditions.checkNotNull(val); + + tuples.add(new ColumnTuple(cf, qual, val)); + } + + public static class ColumnTuple { + private final Text cf; + private final Text cq; + private final byte[] value; + + public ColumnTuple(Text cf, Text cq, byte[] value) { + this.value = value; + this.cf = cf; + this.cq = cq; + } + + public byte[] getValue() { + return value; + } + + public Text getCf() { + return cf; + } + + public Text getCq() { + return cq; + } + + @Override + public int hashCode() { + HashCodeBuilder hcb = new HashCodeBuilder(9683, 68783); + return hcb.append(cf).append(cq).append(value).toHashCode(); + } + + @Override + public boolean equals(Object o) { + if (o instanceof ColumnTuple) { + ColumnTuple other = (ColumnTuple) o; + if (null == cf) { + if (null != other.cf) { + return false; + } + } else if (!cf.equals(other.cf)) { + return false; + } + + if (null == cq) { + if (null != other.cq) { + return false; + } + } else if (!cq.equals(other.cq)) { + return false; + } + + if (null == value) { + if (null != other.value) { + return false; + } + } + + return Arrays.equals(value, other.value); + } + + return false; + } + + @Override + public String toString() { + return cf + " " + cq + " " + new String(value); + } + } +} diff --git a/accumulo-handler/src/java/org/apache/hadoop/hive/accumulo/AccumuloStorageHandler.java b/accumulo-handler/src/java/org/apache/hadoop/hive/accumulo/AccumuloStorageHandler.java new file mode 100644 index 0000000..f7642cc --- /dev/null +++ b/accumulo-handler/src/java/org/apache/hadoop/hive/accumulo/AccumuloStorageHandler.java @@ -0,0 +1,341 @@ +package org.apache.hadoop.hive.accumulo; + +import java.io.IOException; +import java.util.Map; +import java.util.Properties; + +import org.apache.accumulo.core.client.AccumuloException; +import org.apache.accumulo.core.client.AccumuloSecurityException; +import org.apache.accumulo.core.client.Connector; +import org.apache.accumulo.core.client.TableExistsException; +import org.apache.accumulo.core.client.TableNotFoundException; +import org.apache.accumulo.core.client.admin.TableOperations; +import org.apache.accumulo.fate.Fate; +import org.apache.accumulo.start.Main; +import org.apache.accumulo.trace.instrument.Tracer; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hive.accumulo.mr.HiveAccumuloTableInputFormat; +import org.apache.hadoop.hive.accumulo.mr.HiveAccumuloTableOutputFormat; +import org.apache.hadoop.hive.accumulo.predicate.AccumuloPredicateHandler; +import org.apache.hadoop.hive.accumulo.serde.AccumuloSerDe; +import org.apache.hadoop.hive.accumulo.serde.AccumuloSerDeParameters; +import org.apache.hadoop.hive.metastore.HiveMetaHook; +import org.apache.hadoop.hive.metastore.MetaStoreUtils; +import org.apache.hadoop.hive.metastore.api.MetaException; +import org.apache.hadoop.hive.metastore.api.Table; +import org.apache.hadoop.hive.metastore.api.hive_metastoreConstants; +import org.apache.hadoop.hive.ql.metadata.DefaultStorageHandler; +import org.apache.hadoop.hive.ql.metadata.HiveException; +import org.apache.hadoop.hive.ql.metadata.HiveStoragePredicateHandler; +import org.apache.hadoop.hive.ql.plan.ExprNodeDesc; +import org.apache.hadoop.hive.ql.plan.TableDesc; +import org.apache.hadoop.hive.ql.security.authorization.HiveAuthorizationProvider; +import org.apache.hadoop.hive.serde2.Deserializer; +import org.apache.hadoop.hive.serde2.SerDe; +import org.apache.hadoop.hive.serde2.SerDeException; +import org.apache.hadoop.mapred.InputFormat; +import org.apache.hadoop.mapred.JobConf; +import org.apache.hadoop.mapred.OutputFormat; +import org.apache.hadoop.util.StringUtils; +import org.apache.zookeeper.ZooKeeper; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +/** + * Create table mapping to Accumulo for Hive. Handle predicate pushdown if necessary. + */ +public class AccumuloStorageHandler extends DefaultStorageHandler implements HiveMetaHook, + HiveStoragePredicateHandler { + private static final Logger log = LoggerFactory.getLogger(AccumuloStorageHandler.class); + private static final String DEFAULT_PREFIX = "default"; + + protected AccumuloPredicateHandler predicateHandler = AccumuloPredicateHandler.getInstance(); + protected AccumuloConnectionParameters connectionParams; + protected Configuration conf; + + /** + * Push down table properties into the JobConf. + * + * @param desc + * Hive table description + * @param jobProps + * Properties that will be added to the JobConf by Hive + */ + @Override + public void configureTableJobProperties(TableDesc desc, Map jobProps) { + // Should not be getting invoked, configureInputJobProperties or configureOutputJobProperties + // should be invoked instead. + configureInputJobProperties(desc, jobProps); + configureOutputJobProperties(desc, jobProps); + } + + protected String getTableName(Table table) throws MetaException { + // Use TBLPROPERTIES + String tableName = table.getParameters().get(AccumuloSerDeParameters.TABLE_NAME); + + if (null != tableName) { + return tableName; + } + + // Then try SERDEPROPERTIES + tableName = table.getSd().getSerdeInfo().getParameters() + .get(AccumuloSerDeParameters.TABLE_NAME); + + if (null != tableName) { + return tableName; + } + + // Use the hive table name, ignoring the default database + if (DEFAULT_PREFIX.equals(table.getDbName())) { + return table.getTableName(); + } else { + return table.getDbName() + "." + table.getTableName(); + } + } + + protected String getTableName(TableDesc tableDesc) { + Properties props = tableDesc.getProperties(); + String tableName = props.getProperty(AccumuloSerDeParameters.TABLE_NAME); + if (null != tableName) { + return tableName; + } + + tableName = props.getProperty(hive_metastoreConstants.META_TABLE_NAME); + + if (tableName.startsWith(DEFAULT_PREFIX + ".")) { + return tableName.substring(DEFAULT_PREFIX.length() + 1); + } + + return tableName; + } + + @Override + public Configuration getConf() { + return conf; + } + + @Override + public void setConf(Configuration conf) { + this.conf = conf; + connectionParams = new AccumuloConnectionParameters(conf); + } + + @SuppressWarnings("deprecation") + @Override + public Class getSerDeClass() { + return AccumuloSerDe.class; + } + + @Override + public HiveMetaHook getMetaHook() { + return this; + } + + @Override + public HiveAuthorizationProvider getAuthorizationProvider() throws HiveException { + return null; + } + + @Override + public void configureInputJobProperties(TableDesc tableDesc, Map jobProperties) { + Properties props = tableDesc.getProperties(); + + jobProperties.put(AccumuloSerDeParameters.COLUMN_MAPPINGS, + props.getProperty(AccumuloSerDeParameters.COLUMN_MAPPINGS)); + + String tableName = props.getProperty(AccumuloSerDeParameters.TABLE_NAME); + if (null == tableName) { + tableName = getTableName(tableDesc); + } + jobProperties.put(AccumuloSerDeParameters.TABLE_NAME, + tableName); + + String useIterators = props.getProperty(AccumuloSerDeParameters.ITERATOR_PUSHDOWN_KEY); + if (useIterators != null) { + if (!useIterators.equalsIgnoreCase("true") && !useIterators.equalsIgnoreCase("false")) { + throw new IllegalArgumentException("Expected value of true or false for " + + AccumuloSerDeParameters.ITERATOR_PUSHDOWN_KEY); + } + + jobProperties.put(AccumuloSerDeParameters.ITERATOR_PUSHDOWN_KEY, useIterators); + } + + String storageType = props.getProperty(AccumuloSerDeParameters.DEFAULT_STORAGE_TYPE); + if (null != storageType) { + jobProperties.put(AccumuloSerDeParameters.DEFAULT_STORAGE_TYPE, storageType); + } + + String authValue = props.getProperty(AccumuloSerDeParameters.AUTHORIZATIONS_KEY); + if (null != authValue) { + jobProperties.put(AccumuloSerDeParameters.AUTHORIZATIONS_KEY, authValue); + } + + log.info("Computed input job properties of " + jobProperties); + } + + @Override + public void configureOutputJobProperties(TableDesc tableDesc, Map jobProperties) { + Properties props = tableDesc.getProperties(); + // Adding these job properties will make them available to the OutputFormat in checkOutputSpecs + jobProperties.put(AccumuloSerDeParameters.COLUMN_MAPPINGS, + props.getProperty(AccumuloSerDeParameters.COLUMN_MAPPINGS)); + + String tableName = props.getProperty(AccumuloSerDeParameters.TABLE_NAME); + if (null == tableName) { + tableName = getTableName(tableDesc); + } + jobProperties.put(AccumuloSerDeParameters.TABLE_NAME, tableName); + + if (props.containsKey(AccumuloSerDeParameters.DEFAULT_STORAGE_TYPE)) { + jobProperties.put(AccumuloSerDeParameters.DEFAULT_STORAGE_TYPE, + props.getProperty(AccumuloSerDeParameters.DEFAULT_STORAGE_TYPE)); + } + + if (props.containsKey(AccumuloSerDeParameters.VISIBILITY_LABEL_KEY)) { + jobProperties.put(AccumuloSerDeParameters.VISIBILITY_LABEL_KEY, + props.getProperty(AccumuloSerDeParameters.VISIBILITY_LABEL_KEY)); + } + } + + @SuppressWarnings("rawtypes") + @Override + public Class getInputFormatClass() { + return HiveAccumuloTableInputFormat.class; + } + + @Override + @SuppressWarnings("rawtypes") + public Class getOutputFormatClass() { + return HiveAccumuloTableOutputFormat.class; + } + + @Override + public void preCreateTable(Table table) throws MetaException { + boolean isExternal = isExternalTable(table); + if (table.getSd().getLocation() != null) { + throw new MetaException("Location can't be specified for Accumulo"); + } + + Map serdeParams = table.getSd().getSerdeInfo().getParameters(); + String columnMapping = serdeParams.get(AccumuloSerDeParameters.COLUMN_MAPPINGS); + if (columnMapping == null) { + throw new MetaException(AccumuloSerDeParameters.COLUMN_MAPPINGS + + " missing from SERDEPROPERTIES"); + } + + try { + String tblName = getTableName(table); + Connector connector = connectionParams.getConnector(); + TableOperations tableOpts = connector.tableOperations(); + + // Attempt to create the table, taking EXTERNAL into consideration + if (!tableOpts.exists(tblName)) { + if (!isExternal) { + tableOpts.create(tblName); + } else { + throw new MetaException("Accumulo table " + tblName + + " doesn't exist even though declared external"); + } + } else { + if (!isExternal) { + throw new MetaException("Table " + tblName + + " already exists in Accumulo. Use CREATE EXTERNAL TABLE to register with Hive."); + } + } + } catch (AccumuloSecurityException e) { + throw new MetaException(StringUtils.stringifyException(e)); + } catch (TableExistsException e) { + throw new MetaException(StringUtils.stringifyException(e)); + } catch (AccumuloException e) { + throw new MetaException(StringUtils.stringifyException(e)); + } + } + + protected boolean isExternalTable(Table table) { + return MetaStoreUtils.isExternalTable(table); + } + + @Override + public void rollbackCreateTable(Table table) throws MetaException { + // Same as commitDropTable where we always delete the data (accumulo table) + commitDropTable(table, true); + } + + @Override + public void commitCreateTable(Table table) throws MetaException { + // do nothing + } + + @Override + public void commitDropTable(Table table, boolean deleteData) throws MetaException { + String tblName = getTableName(table); + if (!isExternalTable(table)) { + try { + if (deleteData) { + TableOperations tblOpts = connectionParams.getConnector().tableOperations(); + if (tblOpts.exists(tblName)) { + tblOpts.delete(tblName); + } + } + } catch (AccumuloException e) { + throw new MetaException(StringUtils.stringifyException(e)); + } catch (AccumuloSecurityException e) { + throw new MetaException(StringUtils.stringifyException(e)); + } catch (TableNotFoundException e) { + throw new MetaException(StringUtils.stringifyException(e)); + } + } + } + + @Override + public void preDropTable(Table table) throws MetaException { + // do nothing + } + + @Override + public void rollbackDropTable(Table table) throws MetaException { + // do nothing + } + + @Override + public DecomposedPredicate decomposePredicate(JobConf conf, Deserializer deserializer, + ExprNodeDesc desc) { + if (!(deserializer instanceof AccumuloSerDe)) { + throw new RuntimeException("Expected an AccumuloSerDe but got " + + deserializer.getClass().getName()); + } + + AccumuloSerDe serDe = (AccumuloSerDe) deserializer; + if (serDe.getIteratorPushdown()) { + return predicateHandler.decompose(conf, desc); + } else { + log.info("Set to ignore Accumulo iterator pushdown, skipping predicate handler."); + return null; + } + } + + @Override + public void configureJobConf(TableDesc tableDesc, JobConf jobConf) { + try { + Utils.addDependencyJars(jobConf, Tracer.class, Fate.class, Connector.class, Main.class, + ZooKeeper.class, AccumuloStorageHandler.class); + } catch (IOException e) { + log.error("Could not add necessary Accumulo dependencies to classpath", e); + } + + Properties tblProperties = tableDesc.getProperties(); + AccumuloSerDeParameters serDeParams = null; + try { + serDeParams = new AccumuloSerDeParameters(jobConf, tblProperties, AccumuloSerDe.class.getName()); + } catch (SerDeException e) { + log.error("Could not instantiate AccumuloSerDeParameters", e); + return; + } + + try { + serDeParams.getRowIdFactory().addDependencyJars(jobConf); + } catch (IOException e) { + log.error("Could not add necessary dependencies for " + serDeParams.getRowIdFactory().getClass(), e); + } + } +} diff --git a/accumulo-handler/src/java/org/apache/hadoop/hive/accumulo/LazyAccumuloMap.java b/accumulo-handler/src/java/org/apache/hadoop/hive/accumulo/LazyAccumuloMap.java new file mode 100644 index 0000000..effdc4b --- /dev/null +++ b/accumulo-handler/src/java/org/apache/hadoop/hive/accumulo/LazyAccumuloMap.java @@ -0,0 +1,150 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hive.accumulo; + +import java.util.LinkedHashMap; +import java.util.Map; + +import org.apache.hadoop.hive.accumulo.AccumuloHiveRow.ColumnTuple; +import org.apache.hadoop.hive.accumulo.columns.ColumnEncoding; +import org.apache.hadoop.hive.accumulo.columns.HiveAccumuloMapColumnMapping; +import org.apache.hadoop.hive.serde2.lazy.ByteArrayRef; +import org.apache.hadoop.hive.serde2.lazy.LazyFactory; +import org.apache.hadoop.hive.serde2.lazy.LazyMap; +import org.apache.hadoop.hive.serde2.lazy.LazyObject; +import org.apache.hadoop.hive.serde2.lazy.LazyPrimitive; +import org.apache.hadoop.hive.serde2.lazy.objectinspector.LazyMapObjectInspector; +import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspector; +import org.apache.hadoop.hive.serde2.objectinspector.PrimitiveObjectInspector; +import org.apache.hadoop.io.Text; +import org.apache.hadoop.io.Writable; + +import com.google.common.base.Charsets; + +/** + * A Hive Map created from some collection of Key-Values from one to many column families with one + * to many column qualifiers. + */ +public class LazyAccumuloMap extends LazyMap { + + protected AccumuloHiveRow sourceRow; + protected HiveAccumuloMapColumnMapping columnMapping; + + public LazyAccumuloMap(LazyMapObjectInspector oi) { + super(oi); + } + + public void init(AccumuloHiveRow row, HiveAccumuloMapColumnMapping columnMapping) { + this.sourceRow = row; + this.columnMapping = columnMapping; + + this.setParsed(false); + } + + protected void parse() { + if (null == this.cachedMap) { + this.cachedMap = new LinkedHashMap(); + } else { + this.cachedMap.clear(); + } + + LazyMapObjectInspector lazyMoi = getInspector(); + + Text cf = new Text(columnMapping.getColumnFamily()); + for (ColumnTuple tuple : sourceRow.getTuples()) { + String cq = tuple.getCq().toString(); + + if (!cf.equals(tuple.getCf()) || !cq.startsWith(columnMapping.getColumnQualifierPrefix())) { + // A column family or qualifier we don't want to include in the map + continue; + } + + // Because we append the cq prefix when serializing the column + // we should also remove it when pulling it from Accumulo + cq = cq.substring(columnMapping.getColumnQualifierPrefix().length()); + + // Keys are always primitive, respect the binary + LazyPrimitive key = LazyFactory + .createLazyPrimitiveClass((PrimitiveObjectInspector) lazyMoi.getMapKeyObjectInspector(), + ColumnEncoding.BINARY == columnMapping.getKeyEncoding()); + + ByteArrayRef keyRef = new ByteArrayRef(); + keyRef.setData(cq.getBytes(Charsets.UTF_8)); + key.init(keyRef, 0, keyRef.getData().length); + + // Value can be anything, use the obj inspector and respect binary + LazyObject value = LazyFactory.createLazyObject(lazyMoi.getMapValueObjectInspector(), + ColumnEncoding.BINARY == columnMapping.getValueEncoding()); + + ByteArrayRef valueRef = new ByteArrayRef(); + valueRef.setData(tuple.getValue()); + value.init(valueRef, 0, valueRef.getData().length); + + cachedMap.put(key, value); + } + + this.setParsed(true); + } + + /** + * Get the value in the map for the given key. + * + * @param key + * The key, a column qualifier, from the map + * @return The object in the map at the given key + */ + @Override + public Object getMapValueElement(Object key) { + if (!getParsed()) { + parse(); + } + + for (Map.Entry entry : cachedMap.entrySet()) { + LazyPrimitive lazyKey = (LazyPrimitive) entry.getKey(); + + // getWritableObject() will convert LazyPrimitive to actual primitive + // writable objects. + Object keyI = lazyKey.getWritableObject(); + if (keyI == null) { + continue; + } + if (keyI.equals(key)) { + // Got a match, return the value + LazyObject v = (LazyObject) entry.getValue(); + return v == null ? v : v.getObject(); + } + } + + return null; + } + + @Override + public Map getMap() { + if (!getParsed()) { + parse(); + } + return cachedMap; + } + + @Override + public int getMapSize() { + if (!getParsed()) { + parse(); + } + return cachedMap.size(); + } +} diff --git a/accumulo-handler/src/java/org/apache/hadoop/hive/accumulo/LazyAccumuloRow.java b/accumulo-handler/src/java/org/apache/hadoop/hive/accumulo/LazyAccumuloRow.java new file mode 100644 index 0000000..03cd250 --- /dev/null +++ b/accumulo-handler/src/java/org/apache/hadoop/hive/accumulo/LazyAccumuloRow.java @@ -0,0 +1,140 @@ +package org.apache.hadoop.hive.accumulo; + +import java.util.ArrayList; +import java.util.Arrays; +import java.util.List; + +import org.apache.hadoop.hive.accumulo.columns.ColumnEncoding; +import org.apache.hadoop.hive.accumulo.columns.ColumnMapping; +import org.apache.hadoop.hive.accumulo.columns.HiveAccumuloColumnMapping; +import org.apache.hadoop.hive.accumulo.columns.HiveAccumuloMapColumnMapping; +import org.apache.hadoop.hive.accumulo.columns.HiveAccumuloRowIdColumnMapping; +import org.apache.hadoop.hive.accumulo.serde.AccumuloRowIdFactory; +import org.apache.hadoop.hive.serde2.SerDeException; +import org.apache.hadoop.hive.serde2.lazy.ByteArrayRef; +import org.apache.hadoop.hive.serde2.lazy.LazyFactory; +import org.apache.hadoop.hive.serde2.lazy.LazyObjectBase; +import org.apache.hadoop.hive.serde2.lazy.LazyStruct; +import org.apache.hadoop.hive.serde2.lazy.objectinspector.LazyMapObjectInspector; +import org.apache.hadoop.hive.serde2.lazy.objectinspector.LazySimpleStructObjectInspector; +import org.apache.hadoop.hive.serde2.objectinspector.StructField; +import org.apache.hadoop.io.Text; +import org.apache.log4j.Logger; + +/** + * + * Parses column tuples in each AccumuloHiveRow and creates Lazy objects for each field. + * + */ +public class LazyAccumuloRow extends LazyStruct { + private static final Logger log = Logger.getLogger(LazyAccumuloRow.class); + + private AccumuloHiveRow row; + private List columnMappings; + private ArrayList cachedList = new ArrayList(); + private AccumuloRowIdFactory rowIdFactory; + + public LazyAccumuloRow(LazySimpleStructObjectInspector inspector) { + super(inspector); + } + + public void init(AccumuloHiveRow hiveRow, List columnMappings, + AccumuloRowIdFactory rowIdFactory) { + this.row = hiveRow; + this.columnMappings = columnMappings; + this.rowIdFactory = rowIdFactory; + setParsed(false); + } + + private void parse() { + if (getFields() == null) { + // Will properly set string or binary serialization via createLazyField(...) + initLazyFields(oi.getAllStructFieldRefs()); + } + if (!getParsed()) { + Arrays.fill(getFieldInited(), false); + setParsed(true); + } + } + + @Override + public Object getField(int id) { + if (!getParsed()) { + parse(); + } + return uncheckedGetField(id); + } + + /* + * split pairs by delimiter. + */ + private Object uncheckedGetField(int id) { + if (!getFieldInited()[id]) { + ByteArrayRef ref; + ColumnMapping columnMapping = columnMappings.get(id); + + if (columnMapping instanceof HiveAccumuloMapColumnMapping) { + HiveAccumuloMapColumnMapping mapColumnMapping = (HiveAccumuloMapColumnMapping) columnMapping; + + LazyAccumuloMap map = (LazyAccumuloMap) getFields()[id]; + map.init(row, mapColumnMapping); + } else { + if (columnMapping instanceof HiveAccumuloRowIdColumnMapping) { + // Use the rowID directly + ref = new ByteArrayRef(); + ref.setData(row.getRowId().getBytes()); + } else if (columnMapping instanceof HiveAccumuloColumnMapping) { + HiveAccumuloColumnMapping accumuloColumnMapping = (HiveAccumuloColumnMapping) columnMapping; + + // Use the colfam and colqual to get the value + byte[] val = row.getValue(new Text(accumuloColumnMapping.getColumnFamily()), new Text( + accumuloColumnMapping.getColumnQualifier())); + if (val == null) { + return null; + } else { + ref = new ByteArrayRef(); + ref.setData(val); + } + } else { + log.error("Could not process ColumnMapping of type " + columnMapping.getClass() + + " at offset " + id + " in column mapping: " + columnMapping.getMappingSpec()); + throw new IllegalArgumentException("Cannot process ColumnMapping of type " + + columnMapping.getClass()); + } + + getFields()[id].init(ref, 0, ref.getData().length); + } + + // HIVE-3179 only init the field when it isn't null + getFieldInited()[id] = true; + } + + return getFields()[id].getObject(); + } + + @Override + public ArrayList getFieldsAsList() { + if (!getParsed()) { + parse(); + } + cachedList.clear(); + for (int i = 0; i < getFields().length; i++) { + cachedList.add(uncheckedGetField(i)); + } + return cachedList; + } + + @Override + protected LazyObjectBase createLazyField(int fieldID, StructField fieldRef) throws SerDeException { + final ColumnMapping columnMapping = columnMappings.get(fieldID); + + if (columnMapping instanceof HiveAccumuloRowIdColumnMapping) { + return rowIdFactory.createRowId(fieldRef.getFieldObjectInspector()); + } else if (columnMapping instanceof HiveAccumuloMapColumnMapping) { + return new LazyAccumuloMap((LazyMapObjectInspector) fieldRef.getFieldObjectInspector()); + } else { + return LazyFactory.createLazyObject(fieldRef.getFieldObjectInspector(), + ColumnEncoding.BINARY == columnMapping.getEncoding()); + } + } +} diff --git a/accumulo-handler/src/java/org/apache/hadoop/hive/accumulo/Utils.java b/accumulo-handler/src/java/org/apache/hadoop/hive/accumulo/Utils.java new file mode 100644 index 0000000..16abac2 --- /dev/null +++ b/accumulo-handler/src/java/org/apache/hadoop/hive/accumulo/Utils.java @@ -0,0 +1,352 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with this + * work for additional information regarding copyright ownership. The ASF + * licenses this file to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + * License for the specific language governing permissions and limitations under + * the License. + */ +package org.apache.hadoop.hive.accumulo; + +import java.io.BufferedOutputStream; +import java.io.File; +import java.io.FileInputStream; +import java.io.FileOutputStream; +import java.io.IOException; +import java.io.InputStream; +import java.lang.reflect.InvocationTargetException; +import java.lang.reflect.Method; +import java.net.URL; +import java.net.URLDecoder; +import java.text.MessageFormat; +import java.util.Enumeration; +import java.util.HashMap; +import java.util.HashSet; +import java.util.Map; +import java.util.Set; +import java.util.jar.JarFile; +import java.util.jar.JarOutputStream; +import java.util.jar.Manifest; +import java.util.zip.ZipEntry; +import java.util.zip.ZipFile; +import java.util.zip.ZipOutputStream; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.util.StringUtils; +import org.apache.log4j.Logger; + +import com.google.common.base.Preconditions; + +/** + * Accumulo doesn't have a TableMapReduceUtil.addDependencyJars method like HBase which is very + * helpful + */ +public class Utils { + private static final Logger log = Logger.getLogger(Utils.class); + + // Thanks, HBase + public static void addDependencyJars(Configuration conf, Class... classes) throws IOException { + FileSystem localFs = FileSystem.getLocal(conf); + Set jars = new HashSet(); + // Add jars that are already in the tmpjars variable + jars.addAll(conf.getStringCollection("tmpjars")); + + // add jars as we find them to a map of contents jar name so that we can + // avoid + // creating new jars for classes that have already been packaged. + Map packagedClasses = new HashMap(); + + // Add jars containing the specified classes + for (Class clazz : classes) { + if (clazz == null) + continue; + + Path path = findOrCreateJar(clazz, localFs, packagedClasses); + if (path == null) { + log.warn("Could not find jar for class " + clazz + " in order to ship it to the cluster."); + continue; + } + if (!localFs.exists(path)) { + log.warn("Could not validate jar file " + path + " for class " + clazz); + continue; + } + jars.add(path.toString()); + } + if (jars.isEmpty()) + return; + + conf.set("tmpjars", StringUtils.arrayToString(jars.toArray(new String[jars.size()]))); + } + + /** + * If org.apache.hadoop.util.JarFinder is available (0.23+ hadoop), finds the Jar for a class or + * creates it if it doesn't exist. If the class is in a directory in the classpath, it creates a + * Jar on the fly with the contents of the directory and returns the path to that Jar. If a Jar is + * created, it is created in the system temporary directory. Otherwise, returns an existing jar + * that contains a class of the same name. Maintains a mapping from jar contents to the tmp jar + * created. + * + * @param my_class + * the class to find. + * @param fs + * the FileSystem with which to qualify the returned path. + * @param packagedClasses + * a map of class name to path. + * @return a jar file that contains the class. + * @throws IOException + */ + @SuppressWarnings("deprecation") + private static Path findOrCreateJar(Class my_class, FileSystem fs, + Map packagedClasses) throws IOException { + // attempt to locate an existing jar for the class. + String jar = findContainingJar(my_class, packagedClasses); + if (null == jar || jar.isEmpty()) { + jar = getJar(my_class); + updateMap(jar, packagedClasses); + } + + if (null == jar || jar.isEmpty()) { + return null; + } + + log.debug(String.format("For class %s, using jar %s", my_class.getName(), jar)); + return new Path(jar).makeQualified(fs); + } + + /** + * Add entries to packagedClasses corresponding to class files contained in + * jar. + * + * @param jar + * The jar who's content to list. + * @param packagedClasses + * map[class -> jar] + */ + private static void updateMap(String jar, Map packagedClasses) throws IOException { + if (null == jar || jar.isEmpty()) { + return; + } + ZipFile zip = null; + try { + zip = new ZipFile(jar); + for (Enumeration iter = zip.entries(); iter.hasMoreElements();) { + ZipEntry entry = iter.nextElement(); + if (entry.getName().endsWith("class")) { + packagedClasses.put(entry.getName(), jar); + } + } + } finally { + if (null != zip) + zip.close(); + } + } + + /** + * Find a jar that contains a class of the same name, if any. It will return a jar file, even if + * that is not the first thing on the class path that has a class with the same name. Looks first + * on the classpath and then in the packagedClasses map. + * + * @param my_class + * the class to find. + * @return a jar file that contains the class, or null. + * @throws IOException + */ + private static String findContainingJar(Class my_class, Map packagedClasses) + throws IOException { + ClassLoader loader = my_class.getClassLoader(); + String class_file = my_class.getName().replaceAll("\\.", "/") + ".class"; + + // first search the classpath + for (Enumeration itr = loader.getResources(class_file); itr.hasMoreElements();) { + URL url = itr.nextElement(); + if ("jar".equals(url.getProtocol())) { + String toReturn = url.getPath(); + if (toReturn.startsWith("file:")) { + toReturn = toReturn.substring("file:".length()); + } + // URLDecoder is a misnamed class, since it actually decodes + // x-www-form-urlencoded MIME type rather than actual + // URL encoding (which the file path has). Therefore it would + // decode +s to ' 's which is incorrect (spaces are actually + // either unencoded or encoded as "%20"). Replace +s first, so + // that they are kept sacred during the decoding process. + toReturn = toReturn.replaceAll("\\+", "%2B"); + toReturn = URLDecoder.decode(toReturn, "UTF-8"); + return toReturn.replaceAll("!.*$", ""); + } + } + + // now look in any jars we've packaged using JarFinder. Returns null + // when + // no jar is found. + return packagedClasses.get(class_file); + } + + /** + * Invoke 'getJar' on a JarFinder implementation. Useful for some job configuration contexts + * (HBASE-8140) and also for testing on MRv2. First check if we have HADOOP-9426. Lacking that, + * fall back to the backport. + * + * @param my_class + * the class to find. + * @return a jar file that contains the class, or null. + */ + private static String getJar(Class my_class) { + String ret = null; + String hadoopJarFinder = "org.apache.hadoop.util.JarFinder"; + Class jarFinder = null; + try { + log.debug("Looking for " + hadoopJarFinder + "."); + jarFinder = Class.forName(hadoopJarFinder); + log.debug(hadoopJarFinder + " found."); + Method getJar = jarFinder.getMethod("getJar", Class.class); + ret = (String) getJar.invoke(null, my_class); + } catch (ClassNotFoundException e) { + log.debug("Using backported JarFinder."); + ret = jarFinderGetJar(my_class); + } catch (InvocationTargetException e) { + // function was properly called, but threw it's own exception. + // Unwrap it + // and pass it on. + throw new RuntimeException(e.getCause()); + } catch (Exception e) { + // toss all other exceptions, related to reflection failure + throw new RuntimeException("getJar invocation failed.", e); + } + + return ret; + } + + /** + * Returns the full path to the Jar containing the class. It always return a JAR. + * + * @param klass + * class. + * + * @return path to the Jar containing the class. + */ + @SuppressWarnings("rawtypes") + public static String jarFinderGetJar(Class klass) { + Preconditions.checkNotNull(klass, "klass"); + ClassLoader loader = klass.getClassLoader(); + if (loader != null) { + String class_file = klass.getName().replaceAll("\\.", "/") + ".class"; + try { + for (Enumeration itr = loader.getResources(class_file); itr.hasMoreElements();) { + URL url = (URL) itr.nextElement(); + String path = url.getPath(); + if (path.startsWith("file:")) { + path = path.substring("file:".length()); + } + path = URLDecoder.decode(path, "UTF-8"); + if ("jar".equals(url.getProtocol())) { + path = URLDecoder.decode(path, "UTF-8"); + return path.replaceAll("!.*$", ""); + } else if ("file".equals(url.getProtocol())) { + String klassName = klass.getName(); + klassName = klassName.replace(".", "/") + ".class"; + path = path.substring(0, path.length() - klassName.length()); + File baseDir = new File(path); + File testDir = new File(System.getProperty("test.build.dir", "target/test-dir")); + testDir = testDir.getAbsoluteFile(); + if (!testDir.exists()) { + testDir.mkdirs(); + } + File tempJar = File.createTempFile("hadoop-", "", testDir); + tempJar = new File(tempJar.getAbsolutePath() + ".jar"); + createJar(baseDir, tempJar); + return tempJar.getAbsolutePath(); + } + } + } catch (IOException e) { + throw new RuntimeException(e); + } + } + return null; + } + + private static void copyToZipStream(InputStream is, ZipEntry entry, ZipOutputStream zos) + throws IOException { + zos.putNextEntry(entry); + byte[] arr = new byte[4096]; + int read = is.read(arr); + while (read > -1) { + zos.write(arr, 0, read); + read = is.read(arr); + } + is.close(); + zos.closeEntry(); + } + + public static void jarDir(File dir, String relativePath, ZipOutputStream zos) throws IOException { + Preconditions.checkNotNull(relativePath, "relativePath"); + Preconditions.checkNotNull(zos, "zos"); + + // by JAR spec, if there is a manifest, it must be the first entry in + // the + // ZIP. + File manifestFile = new File(dir, JarFile.MANIFEST_NAME); + ZipEntry manifestEntry = new ZipEntry(JarFile.MANIFEST_NAME); + if (!manifestFile.exists()) { + zos.putNextEntry(manifestEntry); + new Manifest().write(new BufferedOutputStream(zos)); + zos.closeEntry(); + } else { + InputStream is = new FileInputStream(manifestFile); + copyToZipStream(is, manifestEntry, zos); + } + zos.closeEntry(); + zipDir(dir, relativePath, zos, true); + zos.close(); + } + + private static void zipDir(File dir, String relativePath, ZipOutputStream zos, boolean start) + throws IOException { + String[] dirList = dir.list(); + for (String aDirList : dirList) { + File f = new File(dir, aDirList); + if (!f.isHidden()) { + if (f.isDirectory()) { + if (!start) { + ZipEntry dirEntry = new ZipEntry(relativePath + f.getName() + "/"); + zos.putNextEntry(dirEntry); + zos.closeEntry(); + } + String filePath = f.getPath(); + File file = new File(filePath); + zipDir(file, relativePath + f.getName() + "/", zos, false); + } else { + String path = relativePath + f.getName(); + if (!path.equals(JarFile.MANIFEST_NAME)) { + ZipEntry anEntry = new ZipEntry(path); + InputStream is = new FileInputStream(f); + copyToZipStream(is, anEntry, zos); + } + } + } + } + } + + private static void createJar(File dir, File jarFile) throws IOException { + Preconditions.checkNotNull(dir, "dir"); + Preconditions.checkNotNull(jarFile, "jarFile"); + File jarDir = jarFile.getParentFile(); + if (!jarDir.exists()) { + if (!jarDir.mkdirs()) { + throw new IOException(MessageFormat.format("could not create dir [{0}]", jarDir)); + } + } + JarOutputStream zos = new JarOutputStream(new FileOutputStream(jarFile)); + jarDir(dir, "", zos); + } +} diff --git a/accumulo-handler/src/java/org/apache/hadoop/hive/accumulo/columns/ColumnEncoding.java b/accumulo-handler/src/java/org/apache/hadoop/hive/accumulo/columns/ColumnEncoding.java new file mode 100644 index 0000000..8e10313 --- /dev/null +++ b/accumulo-handler/src/java/org/apache/hadoop/hive/accumulo/columns/ColumnEncoding.java @@ -0,0 +1,179 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hive.accumulo.columns; + +import java.util.HashMap; +import java.util.Map.Entry; + +import org.apache.hadoop.hive.accumulo.AccumuloHiveConstants; + +import com.google.common.base.Preconditions; +import com.google.common.collect.Maps; + +/** + * Encapsulate the encoding of values within the given column in Accumulo + */ +public enum ColumnEncoding { + STRING("string", "s"), BINARY("binary", "b"); + + private static final HashMap CODE_CACHE = new HashMap(), + NAME_CACHE = new HashMap(); + + static { + CODE_CACHE.put(STRING.getCode(), STRING); + CODE_CACHE.put(BINARY.getCode(), BINARY); + + NAME_CACHE.put(STRING.getName(), STRING); + NAME_CACHE.put(BINARY.getName(), BINARY); + } + + private final String name; + private final String code; + + private ColumnEncoding(String name, String code) { + this.name = name; + this.code = code; + } + + public String getName() { + return this.name; + } + + public String getCode() { + return code; + } + + /** + * Get the ColumnEncoding which has the given code. + * + * @param code + * The one-character 'code' which uniquely identifies the ColumnEncoding + * @return The ColumnEncoding with the code equal to the provided argument + */ + public static ColumnEncoding fromCode(String code) { + if (!CODE_CACHE.containsKey(code)) { + throw new IllegalArgumentException("No ColumnEncoding defined with code " + code); + } + + return CODE_CACHE.get(code); + } + + public static ColumnEncoding fromName(String name) { + if (!NAME_CACHE.containsKey(name)) { + throw new IllegalArgumentException("No ColumnEncoding defined with name " + name); + } + + return NAME_CACHE.get(name); + } + + public static ColumnEncoding get(String nameOrCode) { + ColumnEncoding encoding = CODE_CACHE.get(nameOrCode); + if (null != encoding) { + return encoding; + } + + encoding = NAME_CACHE.get(nameOrCode); + if (null != encoding) { + return encoding; + } + + throw new IllegalArgumentException("No ColumnEncoding defined for " + nameOrCode); + } + + public static ColumnEncoding getFromMapping(String columnMapping) { + Preconditions.checkNotNull(columnMapping); + + String encoding = getColumnEncoding(columnMapping); + + return get(encoding); + } + + /** + * Determines if a custom encoding was specified for the give column. + * + * @param columnMapping + * The mapping from Hive column to an Accumulo column + * @return True if the column mapping string specifies an encoding, false otherwise + */ + public static boolean hasColumnEncoding(String columnMapping) { + Preconditions.checkNotNull(columnMapping); + + int offset = columnMapping.lastIndexOf(AccumuloHiveConstants.POUND); + + // Make sure that the '#' wasn't escaped + if (0 < offset && AccumuloHiveConstants.ESCAPE == columnMapping.charAt(offset - 1)) { + // The encoding name/codes don't contain pound signs + return false; + } + + return -1 != offset; + } + + public static String getColumnEncoding(String columnMapping) { + int offset = columnMapping.lastIndexOf(AccumuloHiveConstants.POUND); + + // Make sure that the '#' wasn't escaped + if (0 < offset && AccumuloHiveConstants.ESCAPE == columnMapping.charAt(offset - 1)) { + throw new IllegalArgumentException("Column mapping did not contain a column encoding: " + + columnMapping); + } + + return columnMapping.substring(offset + 1); + } + + public static ColumnEncoding getDefault() { + return STRING; + } + + /** + * Removes the column encoding code and separator from the original column mapping string. Throws + * an IllegalArgumentException if this method is called on a string that doesn't contain a code. + * + * @param columnMapping + * The mapping from Hive column to Accumulo column + * @return The column mapping with the code removed + */ + public static String stripCode(String columnMapping) { + Preconditions.checkNotNull(columnMapping); + + int offset = columnMapping.lastIndexOf(AccumuloHiveConstants.POUND); + if (-1 == offset + || (0 < offset && AccumuloHiveConstants.ESCAPE == columnMapping.charAt(offset - 1))) { + throw new IllegalArgumentException( + "Provided column mapping does not define a column encoding"); + } + + return columnMapping.substring(0, offset); + } + + public static boolean isMapEncoding(String columnEncoding) { + return -1 != columnEncoding.indexOf(AccumuloHiveConstants.COLON); + } + + public static Entry getMapEncoding(String columnEncoding) { + int index = columnEncoding.indexOf(AccumuloHiveConstants.COLON); + if (-1 == index) { + throw new IllegalArgumentException( + "Serialized column encoding did not contain a pair of encodings to split"); + } + + String encoding1 = columnEncoding.substring(0, index), encoding2 = columnEncoding + .substring(index + 1); + + return Maps.immutableEntry(get(encoding1), get(encoding2)); + } +} diff --git a/accumulo-handler/src/java/org/apache/hadoop/hive/accumulo/columns/ColumnMapper.java b/accumulo-handler/src/java/org/apache/hadoop/hive/accumulo/columns/ColumnMapper.java new file mode 100644 index 0000000..ff9db46 --- /dev/null +++ b/accumulo-handler/src/java/org/apache/hadoop/hive/accumulo/columns/ColumnMapper.java @@ -0,0 +1,181 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hive.accumulo.columns; + +import java.util.ArrayList; +import java.util.Collections; +import java.util.List; + +import org.apache.hadoop.hive.accumulo.AccumuloHiveConstants; +import org.apache.hadoop.hive.accumulo.serde.TooManyAccumuloColumnsException; +import org.apache.hadoop.hive.serde.serdeConstants; +import org.apache.hadoop.hive.serde2.typeinfo.TypeInfo; +import org.apache.hadoop.util.StringUtils; +import org.apache.log4j.Logger; + +import com.google.common.base.Preconditions; + +/** + * + */ +public class ColumnMapper { + private static final Logger log = Logger.getLogger(ColumnMapper.class); + + private List columnMappings; + private int rowIdOffset; + private HiveAccumuloRowIdColumnMapping rowIdMapping = null; + private final ColumnEncoding defaultEncoding; + + /** + * Create a mapping from Hive columns (rowID and column) to Accumulo columns (column family and + * qualifier). The ordering of the {@link ColumnMapping}s is important as it aligns with the + * ordering of the columns for the Hive table schema. + * + * @param serializedColumnMappings + * Comma-separated list of designators that map to Accumulo columns whose offsets + * correspond to the Hive table schema + * @throws TooManyAccumuloColumnsException + */ + public ColumnMapper(String serializedColumnMappings, String defaultStorageType, + List columnNames, List columnTypes) throws TooManyAccumuloColumnsException { + Preconditions.checkNotNull(serializedColumnMappings); + + String[] parsedColumnMappingValue = StringUtils.split(serializedColumnMappings, + AccumuloHiveConstants.COMMA); + columnMappings = new ArrayList(parsedColumnMappingValue.length); + rowIdOffset = -1; + + // Determine the default encoding type (specified on the table, or the global default + // if none was provided) + if (null == defaultStorageType || "".equals(defaultStorageType)) { + defaultEncoding = ColumnEncoding.getDefault(); + } else { + defaultEncoding = ColumnEncoding.get(defaultStorageType.toLowerCase()); + } + + if (parsedColumnMappingValue.length > columnNames.size()) { + throw new TooManyAccumuloColumnsException("Found " + parsedColumnMappingValue.length + + " columns, but only know of " + columnNames.size() + " Hive column names"); + } + + if (parsedColumnMappingValue.length > columnTypes.size()) { + throw new TooManyAccumuloColumnsException("Found " + parsedColumnMappingValue.length + + " columns, but only know of " + columnNames.size() + " Hive column types"); + } + + for (int i = 0; i < parsedColumnMappingValue.length; i++) { + String columnMappingStr = parsedColumnMappingValue[i]; + + // Create the mapping for this column, with configured encoding + ColumnMapping columnMapping = ColumnMappingFactory.get(columnMappingStr, defaultEncoding, + columnNames.get(i), columnTypes.get(i)); + + if (columnMapping instanceof HiveAccumuloRowIdColumnMapping) { + if (-1 != rowIdOffset) { + throw new IllegalArgumentException( + "Column mapping should only have one definition with a value of " + + AccumuloHiveConstants.ROWID); + } + + rowIdOffset = i; + rowIdMapping = (HiveAccumuloRowIdColumnMapping) columnMapping; + } + + columnMappings.add(columnMapping); + } + } + + public int size() { + return columnMappings.size(); + } + + public ColumnMapping get(int i) { + return columnMappings.get(i); + } + + public List getColumnMappings() { + return Collections.unmodifiableList(columnMappings); + } + + public boolean hasRowIdMapping() { + return null != rowIdMapping; + } + + public HiveAccumuloRowIdColumnMapping getRowIdMapping() { + return rowIdMapping; + } + + public int getRowIdOffset() { + return rowIdOffset; + } + + public String getTypesString() { + StringBuilder sb = new StringBuilder(); + for (ColumnMapping columnMapping : columnMappings) { + if (sb.length() > 0) { + sb.append(AccumuloHiveConstants.COLON); + } + + if (columnMapping instanceof HiveAccumuloRowIdColumnMapping) { + // the rowID column is a string + sb.append(serdeConstants.STRING_TYPE_NAME); + } else if (columnMapping instanceof HiveAccumuloColumnMapping) { + // a normal column is also a string + sb.append(serdeConstants.STRING_TYPE_NAME); + } else if (columnMapping instanceof HiveAccumuloMapColumnMapping) { + // TODO can we be more precise than string,string? + sb.append(serdeConstants.MAP_TYPE_NAME).append("<").append(serdeConstants.STRING_TYPE_NAME) + .append(",").append(serdeConstants.STRING_TYPE_NAME).append(">"); + } else { + throw new IllegalArgumentException("Cannot process ColumnMapping of type " + + columnMapping.getClass().getName()); + } + } + + return sb.toString(); + } + + public ColumnMapping getColumnMappingForHiveColumn(List hiveColumns, String hiveColumnName) { + Preconditions.checkNotNull(hiveColumns); + Preconditions.checkNotNull(hiveColumnName); + Preconditions.checkArgument(columnMappings.size() <= hiveColumns.size(), + "Expected equal number of column mappings and Hive columns, " + columnMappings + ", " + + hiveColumns); + + int hiveColumnOffset = 0; + for (; hiveColumnOffset < hiveColumns.size() && hiveColumnOffset < columnMappings.size(); hiveColumnOffset++) { + if (hiveColumns.get(hiveColumnOffset).equals(hiveColumnName)) { + return columnMappings.get(hiveColumnOffset); + } + } + + log.error("Could not find offset for Hive column with name '" + hiveColumnName + + "' with columns " + hiveColumns); + throw new IllegalArgumentException("Could not find offset for Hive column with name " + + hiveColumnName); + } + + @Override + public String toString() { + StringBuilder sb = new StringBuilder(32); + sb.append("[").append(this.getClass().getSimpleName()).append(" "); + sb.append(columnMappings).append(", rowIdOffset: ").append(this.rowIdOffset) + .append(", defaultEncoding: "); + sb.append(this.defaultEncoding).append("]"); + return sb.toString(); + } +} diff --git a/accumulo-handler/src/java/org/apache/hadoop/hive/accumulo/columns/ColumnMapping.java b/accumulo-handler/src/java/org/apache/hadoop/hive/accumulo/columns/ColumnMapping.java new file mode 100644 index 0000000..e1d19f9 --- /dev/null +++ b/accumulo-handler/src/java/org/apache/hadoop/hive/accumulo/columns/ColumnMapping.java @@ -0,0 +1,95 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hive.accumulo.columns; + +import org.apache.hadoop.hive.serde2.typeinfo.TypeInfo; + +import com.google.common.base.Preconditions; + +/** + * + */ +public abstract class ColumnMapping { + + // SerDe property for how the Hive column maps to Accumulo + protected final String mappingSpec; + + // The manner in which the values in this column are de/serialized from/to Accumulo + protected final ColumnEncoding encoding; + + // The name of the Hive column + protected final String columnName; + + // The type of the Hive column + // Cannot store the actual TypeInfo because that would require + // Hive jars on the Accumulo classpath which we don't want + protected final String columnType; + + protected ColumnMapping(String mappingSpec, ColumnEncoding encoding, String columnName, + String columnType) { + Preconditions.checkNotNull(mappingSpec); + Preconditions.checkNotNull(encoding); + Preconditions.checkNotNull(columnName); + Preconditions.checkNotNull(columnType); + + this.mappingSpec = mappingSpec; + this.encoding = encoding; + this.columnName = columnName; + this.columnType = columnType; + } + + protected ColumnMapping(String mappingSpec, ColumnEncoding encoding, String columnName, + TypeInfo columnType) { + Preconditions.checkNotNull(mappingSpec); + Preconditions.checkNotNull(encoding); + Preconditions.checkNotNull(columnName); + Preconditions.checkNotNull(columnType); + + this.mappingSpec = mappingSpec; + this.encoding = encoding; + this.columnName = columnName; + this.columnType = columnType.getTypeName(); + } + + /** + * The property defining how this Column is mapped into Accumulo + */ + public String getMappingSpec() { + return mappingSpec; + } + + /** + * The manner in which the value is encoded in Accumulo + */ + public ColumnEncoding getEncoding() { + return encoding; + } + + /** + * The name of the Hive column this is mapping + */ + public String getColumnName() { + return columnName; + } + + /** + * The @{link TypeInfo} of the Hive column this is mapping + */ + public String getColumnType() { + return columnType; + } +} diff --git a/accumulo-handler/src/java/org/apache/hadoop/hive/accumulo/columns/ColumnMappingFactory.java b/accumulo-handler/src/java/org/apache/hadoop/hive/accumulo/columns/ColumnMappingFactory.java new file mode 100644 index 0000000..a241882 --- /dev/null +++ b/accumulo-handler/src/java/org/apache/hadoop/hive/accumulo/columns/ColumnMappingFactory.java @@ -0,0 +1,180 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hive.accumulo.columns; + +import java.util.Map.Entry; + +import org.apache.hadoop.hive.accumulo.AccumuloHiveConstants; +import org.apache.hadoop.hive.serde2.typeinfo.TypeInfo; +import org.apache.log4j.Logger; + +import com.google.common.base.Preconditions; +import com.google.common.collect.Maps; + +/** + * + */ +public class ColumnMappingFactory { + private static final Logger log = Logger.getLogger(ColumnMappingFactory.class); + + /** + * Generate the proper instance of a ColumnMapping + * + * @param columnSpec + * Specification for mapping this column to Accumulo + * @param defaultEncoding + * The default encoding in which values should be encoded to Accumulo + */ + public static ColumnMapping get(String columnSpec, ColumnEncoding defaultEncoding, + String columnName, TypeInfo columnType) { + Preconditions.checkNotNull(columnSpec); + Preconditions.checkNotNull(columnName); + Preconditions.checkNotNull(columnType); + ColumnEncoding encoding = defaultEncoding; + + // Check for column encoding specification + if (ColumnEncoding.hasColumnEncoding(columnSpec)) { + String columnEncodingStr = ColumnEncoding.getColumnEncoding(columnSpec); + columnSpec = ColumnEncoding.stripCode(columnSpec); + + if (AccumuloHiveConstants.ROWID.equalsIgnoreCase(columnSpec)) { + return new HiveAccumuloRowIdColumnMapping(columnSpec, + ColumnEncoding.get(columnEncodingStr), columnName, columnType.getTypeName()); + } else { + Entry pair = parseMapping(columnSpec); + + if (isPrefix(pair.getValue())) { + // Sanity check that, for a map, we got 2 encodings + if (!ColumnEncoding.isMapEncoding(columnEncodingStr)) { + throw new IllegalArgumentException("Expected map encoding for a map specification, " + + columnSpec + " with encoding " + columnEncodingStr); + } + + Entry encodings = ColumnEncoding + .getMapEncoding(columnEncodingStr); + + return new HiveAccumuloMapColumnMapping(pair.getKey(), pair.getValue(), + encodings.getKey(), encodings.getValue(), columnName, columnType.getTypeName()); + } else { + return new HiveAccumuloColumnMapping(pair.getKey(), pair.getValue(), + ColumnEncoding.getFromMapping(columnEncodingStr), columnName, columnType.getTypeName()); + } + } + } else { + if (AccumuloHiveConstants.ROWID.equalsIgnoreCase(columnSpec)) { + return new HiveAccumuloRowIdColumnMapping(columnSpec, defaultEncoding, columnName, + columnType.getTypeName()); + } else { + Entry pair = parseMapping(columnSpec); + boolean isPrefix = isPrefix(pair.getValue()); + + String cq = pair.getValue(); + + // Replace any \* that appear in the prefix with a regular * + if (-1 != cq.indexOf(AccumuloHiveConstants.ESCAPED_ASTERISK)) { + cq = cq.replaceAll(AccumuloHiveConstants.ESCAPED_ASERTISK_REGEX, + Character.toString(AccumuloHiveConstants.ASTERISK)); + } + + if (isPrefix) { + return new HiveAccumuloMapColumnMapping(pair.getKey(), cq.substring(0, cq.length() - 1), + defaultEncoding, defaultEncoding, columnName, columnType.getTypeName()); + } else { + return new HiveAccumuloColumnMapping(pair.getKey(), cq, encoding, columnName, columnType.getTypeName()); + } + } + } + } + + public static ColumnMapping getMap(String columnSpec, ColumnEncoding keyEncoding, + ColumnEncoding valueEncoding, String columnName, TypeInfo columnType) { + Entry pair = parseMapping(columnSpec); + return new HiveAccumuloMapColumnMapping(pair.getKey(), pair.getValue(), keyEncoding, + valueEncoding, columnName, columnType.toString()); + + } + + public static boolean isPrefix(String maybePrefix) { + Preconditions.checkNotNull(maybePrefix); + + if (AccumuloHiveConstants.ASTERISK == maybePrefix.charAt(maybePrefix.length() - 1)) { + if (maybePrefix.length() > 1) { + return AccumuloHiveConstants.ESCAPE != maybePrefix.charAt(maybePrefix.length() - 2); + } else { + return true; + } + } + + // If we couldn't find an asterisk, it's not a prefix + return false; + } + + /** + * Consumes the column mapping specification and breaks it into column family and column + * qualifier. + */ + public static Entry parseMapping(String columnSpec) + throws InvalidColumnMappingException { + int index = 0; + while (true) { + if (index >= columnSpec.length()) { + log.error("Cannot parse '" + columnSpec + "' as colon-separated column configuration"); + throw new InvalidColumnMappingException( + "Columns must be provided as colon-separated family and qualifier pairs"); + } + + index = columnSpec.indexOf(AccumuloHiveConstants.COLON, index); + + if (-1 == index) { + log.error("Cannot parse '" + columnSpec + "' as colon-separated column configuration"); + throw new InvalidColumnMappingException( + "Columns must be provided as colon-separated family and qualifier pairs"); + } + + // Check for an escape character before the colon + if (index - 1 > 0) { + char testChar = columnSpec.charAt(index - 1); + if (AccumuloHiveConstants.ESCAPE == testChar) { + // this colon is escaped, search again after it + index++; + continue; + } + + // If the previous character isn't an escape characters, it's the separator + } + + // Can't be escaped, it is the separator + break; + } + + String cf = columnSpec.substring(0, index), cq = columnSpec.substring(index + 1); + + // Check for the escaped colon to remove before doing the expensive regex replace + if (-1 != cf.indexOf(AccumuloHiveConstants.ESCAPED_COLON)) { + cf = cf.replaceAll(AccumuloHiveConstants.ESCAPED_COLON_REGEX, + Character.toString(AccumuloHiveConstants.COLON)); + } + + // Check for the escaped colon to remove before doing the expensive regex replace + if (-1 != cq.indexOf(AccumuloHiveConstants.ESCAPED_COLON)) { + cq = cq.replaceAll(AccumuloHiveConstants.ESCAPED_COLON_REGEX, + Character.toString(AccumuloHiveConstants.COLON)); + } + + return Maps.immutableEntry(cf, cq); + } +} diff --git a/accumulo-handler/src/java/org/apache/hadoop/hive/accumulo/columns/HiveAccumuloColumnMapping.java b/accumulo-handler/src/java/org/apache/hadoop/hive/accumulo/columns/HiveAccumuloColumnMapping.java new file mode 100644 index 0000000..d09ade1 --- /dev/null +++ b/accumulo-handler/src/java/org/apache/hadoop/hive/accumulo/columns/HiveAccumuloColumnMapping.java @@ -0,0 +1,92 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hive.accumulo.columns; + +import org.apache.hadoop.hive.accumulo.AccumuloHiveConstants; +import org.apache.log4j.Logger; + +import com.google.common.base.Charsets; + +/** + * A Hive column which maps to a column family and column qualifier pair in Accumulo + */ +public class HiveAccumuloColumnMapping extends ColumnMapping { + @SuppressWarnings("unused") + private static final Logger log = Logger.getLogger(HiveAccumuloColumnMapping.class); + + protected String columnFamily, columnQualifier; + protected byte[] columnFamilyBytes, columnQualifierBytes; + + public HiveAccumuloColumnMapping(String cf, String cq, ColumnEncoding encoding, + String columnName, String columnType) { + super(cf + AccumuloHiveConstants.COLON + cq, encoding, columnName, columnType); + + columnFamily = cf; + columnQualifier = cq; + } + + public String getColumnFamily() { + return this.columnFamily; + } + + /** + * Cached bytes for the columnFamily. Modifications to the bytes will affect those stored in this + * ColumnMapping -- such modifications are highly recommended against. + * + * @return UTF8 formatted bytes + */ + public byte[] getColumnFamilyBytes() { + if (null == columnFamilyBytes) { + columnFamilyBytes = columnFamily.getBytes(Charsets.UTF_8); + } + + return columnFamilyBytes; + } + + public String getColumnQualifier() { + return this.columnQualifier; + } + + /** + * Cached bytes for the columnQualifier. Modifications to the bytes will affect those stored in + * this ColumnMapping -- such modifications are highly recommended against. + * + * @return UTF8 formatted bytes + */ + public byte[] getColumnQualifierBytes() { + if (null == columnQualifierBytes) { + columnQualifierBytes = columnQualifier.getBytes(Charsets.UTF_8); + } + + return columnQualifierBytes; + } + + public String serialize() { + StringBuilder sb = new StringBuilder(16); + sb.append(columnFamily).append(AccumuloHiveConstants.COLON); + if (null != columnQualifier) { + sb.append(columnQualifier); + } + return sb.toString(); + } + + @Override + public String toString() { + return "[" + this.getClass().getSimpleName() + ": " + columnFamily + ":" + columnQualifier + + ", encoding " + encoding + "]"; + } +} diff --git a/accumulo-handler/src/java/org/apache/hadoop/hive/accumulo/columns/HiveAccumuloMapColumnMapping.java b/accumulo-handler/src/java/org/apache/hadoop/hive/accumulo/columns/HiveAccumuloMapColumnMapping.java new file mode 100644 index 0000000..b2082e8 --- /dev/null +++ b/accumulo-handler/src/java/org/apache/hadoop/hive/accumulo/columns/HiveAccumuloMapColumnMapping.java @@ -0,0 +1,99 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hive.accumulo.columns; + +import org.apache.commons.lang.builder.HashCodeBuilder; +import org.apache.hadoop.hive.accumulo.AccumuloHiveConstants; + +import com.google.common.base.Preconditions; + +/** + * ColumnMapping for combining Accumulo columns into a single Hive Map. Expects ColumnEncoding + * values for both the Key and Value of the Map. + */ +public class HiveAccumuloMapColumnMapping extends ColumnMapping { + + protected final String columnFamily, columnQualifierPrefix; + protected final ColumnEncoding keyEncoding, valueEncoding; + + /** + * @param columnFamily + * The column family that all qualifiers within should be placed into the same Hive map + * @param columnQualifierPrefix + * The column qualifier prefix to include in the map, null is treated as an empty prefix + * @param keyEncoding + * The encoding scheme for keys in this column family + * @param valueEncoding + * The encoding scheme for the Accumulo values + */ + public HiveAccumuloMapColumnMapping(String columnFamily, String columnQualifierPrefix, + ColumnEncoding keyEncoding, ColumnEncoding valueEncoding, String columnName, + String columnType) { + // Try to make something reasonable to pass up to the base class + super((null == columnFamily ? "" : columnFamily) + AccumuloHiveConstants.COLON, valueEncoding, + columnName, columnType); + + Preconditions.checkNotNull(columnFamily, "Must provide a column family"); + + this.columnFamily = columnFamily; + this.columnQualifierPrefix = (null == columnQualifierPrefix) ? "" : columnQualifierPrefix; + this.keyEncoding = keyEncoding; + this.valueEncoding = valueEncoding; + } + + public String getColumnFamily() { + return columnFamily; + } + + public String getColumnQualifierPrefix() { + return columnQualifierPrefix; + } + + public ColumnEncoding getKeyEncoding() { + return keyEncoding; + } + + public ColumnEncoding getValueEncoding() { + return valueEncoding; + } + + @Override + public boolean equals(Object o) { + if (o instanceof HiveAccumuloMapColumnMapping) { + HiveAccumuloMapColumnMapping other = (HiveAccumuloMapColumnMapping) o; + return columnFamily.equals(other.columnFamily) + && columnQualifierPrefix.equals(other.columnQualifierPrefix) + && keyEncoding.equals(other.keyEncoding) && valueEncoding.equals(other.valueEncoding); + } + + return false; + } + + @Override + public int hashCode() { + HashCodeBuilder hcb = new HashCodeBuilder(23, 31); + hcb.append(columnFamily).append(columnQualifierPrefix).append(keyEncoding) + .append(valueEncoding); + return hcb.toHashCode(); + } + + @Override + public String toString() { + return "[" + this.getClass().getSimpleName() + ": " + columnFamily + ":" + + columnQualifierPrefix + "* encoding: " + keyEncoding + ":" + valueEncoding + "]"; + } +} diff --git a/accumulo-handler/src/java/org/apache/hadoop/hive/accumulo/columns/HiveAccumuloRowIdColumnMapping.java b/accumulo-handler/src/java/org/apache/hadoop/hive/accumulo/columns/HiveAccumuloRowIdColumnMapping.java new file mode 100644 index 0000000..d40b025 --- /dev/null +++ b/accumulo-handler/src/java/org/apache/hadoop/hive/accumulo/columns/HiveAccumuloRowIdColumnMapping.java @@ -0,0 +1,43 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hive.accumulo.columns; + +import org.apache.accumulo.core.data.Mutation; +import org.apache.hadoop.hive.accumulo.AccumuloHiveConstants; + +import com.google.common.base.Preconditions; + +/** + * {@link ColumnMapping} which corresponds to the Hive column which should be used as the rowID in a + * {@link Mutation} + */ +public class HiveAccumuloRowIdColumnMapping extends ColumnMapping { + + public HiveAccumuloRowIdColumnMapping(String columnSpec, ColumnEncoding encoding, + String columnName, String columnType) { + super(columnSpec, encoding, columnName, columnType); + + // Ensure that we have the correct identifier as the column name + Preconditions.checkArgument(columnSpec.equalsIgnoreCase(AccumuloHiveConstants.ROWID)); + } + + @Override + public String toString() { + return "[" + this.getClass().getSimpleName() + ", " + this.mappingSpec + ", encoding " + + encoding + "]"; + } +} diff --git a/accumulo-handler/src/java/org/apache/hadoop/hive/accumulo/columns/HiveColumn.java b/accumulo-handler/src/java/org/apache/hadoop/hive/accumulo/columns/HiveColumn.java new file mode 100644 index 0000000..a8855f7 --- /dev/null +++ b/accumulo-handler/src/java/org/apache/hadoop/hive/accumulo/columns/HiveColumn.java @@ -0,0 +1,55 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hive.accumulo.columns; + +import org.apache.hadoop.hive.serde2.typeinfo.TypeInfo; + +import com.google.common.base.Preconditions; + +/** + * + */ +public class HiveColumn { + + // The name of this column in the Hive schema + protected final String columnName; + + // The Hive type of this column + protected final TypeInfo columnType; + + public HiveColumn(String columnName, TypeInfo columnType) { + Preconditions.checkNotNull(columnName); + Preconditions.checkNotNull(columnType); + + this.columnName = columnName; + this.columnType = columnType; + } + + /** + * Get the name of the Hive column + */ + public String getColumnName() { + return columnName; + } + + /** + * The Hive type of this column + */ + public TypeInfo getColumnType() { + return columnType; + } +} diff --git a/accumulo-handler/src/java/org/apache/hadoop/hive/accumulo/columns/InvalidColumnMappingException.java b/accumulo-handler/src/java/org/apache/hadoop/hive/accumulo/columns/InvalidColumnMappingException.java new file mode 100644 index 0000000..eb230c4 --- /dev/null +++ b/accumulo-handler/src/java/org/apache/hadoop/hive/accumulo/columns/InvalidColumnMappingException.java @@ -0,0 +1,41 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hive.accumulo.columns; + +/** + * + */ +public class InvalidColumnMappingException extends IllegalArgumentException { + + private static final long serialVersionUID = 1L; + + public InvalidColumnMappingException() { + super(); + } + + public InvalidColumnMappingException(String msg) { + super(msg); + } + + public InvalidColumnMappingException(String message, Throwable cause) { + super(message, cause); + } + + public InvalidColumnMappingException(Throwable cause) { + super(cause); + } +} diff --git a/accumulo-handler/src/java/org/apache/hadoop/hive/accumulo/mr/HiveAccumuloRecordReader.java b/accumulo-handler/src/java/org/apache/hadoop/hive/accumulo/mr/HiveAccumuloRecordReader.java new file mode 100644 index 0000000..45607cb --- /dev/null +++ b/accumulo-handler/src/java/org/apache/hadoop/hive/accumulo/mr/HiveAccumuloRecordReader.java @@ -0,0 +1,122 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hive.accumulo.mr; + +import java.io.IOException; +import java.util.Iterator; +import java.util.List; +import java.util.Map; +import java.util.Map.Entry; +import java.util.SortedMap; + +import org.apache.accumulo.core.client.mapreduce.AccumuloInputFormat; +import org.apache.accumulo.core.data.Key; +import org.apache.accumulo.core.data.Value; +import org.apache.accumulo.core.util.PeekingIterator; +import org.apache.hadoop.hive.accumulo.AccumuloHiveRow; +import org.apache.hadoop.hive.accumulo.predicate.PrimitiveComparisonFilter; +import org.apache.hadoop.hive.accumulo.serde.AccumuloSerDe; +import org.apache.hadoop.io.Text; +import org.apache.hadoop.io.Writable; +import org.apache.hadoop.mapred.RecordReader; + +import com.google.common.collect.Lists; + +/** + * Translate the {@link Key} {@link Value} pairs from {@link AccumuloInputFormat} to a + * {@link Writable} for consumption by the {@link AccumuloSerDe}. + */ +public class HiveAccumuloRecordReader implements RecordReader { + private RecordReader>> recordReader; + private int iteratorCount; + + public HiveAccumuloRecordReader( + RecordReader>> recordReader, int iteratorCount) { + this.recordReader = recordReader; + this.iteratorCount = iteratorCount; + } + + @Override + public void close() throws IOException { + recordReader.close(); + } + + @Override + public Text createKey() { + return new Text(); + } + + @Override + public AccumuloHiveRow createValue() { + return new AccumuloHiveRow(); + } + + @Override + public long getPos() throws IOException { + return 0; + } + + @Override + public float getProgress() throws IOException { + return recordReader.getProgress(); + } + + @Override + public boolean next(Text rowKey, AccumuloHiveRow row) throws IOException { + Text key = recordReader.createKey(); + PeekingIterator> iter = recordReader.createValue(); + if (recordReader.next(key, iter)) { + row.clear(); + row.setRowId(key.toString()); + List keys = Lists.newArrayList(); + List values = Lists.newArrayList(); + while (iter.hasNext()) { // collect key/values for this row. + Map.Entry kv = iter.next(); + keys.add(kv.getKey()); + values.add(kv.getValue()); + + } + if (iteratorCount == 0) { // no encoded values, we can push directly to row. + pushToValue(keys, values, row); + } else { + for (int i = 0; i < iteratorCount; i++) { // each iterator creates a level of encoding. + SortedMap decoded = PrimitiveComparisonFilter.decodeRow(keys.get(0), + values.get(0)); + keys = Lists.newArrayList(decoded.keySet()); + values = Lists.newArrayList(decoded.values()); + } + pushToValue(keys, values, row); // after decoding we can push to value. + } + + return true; + } else { + return false; + } + } + + // flatten key/value pairs into row object for use in Serde. + private void pushToValue(List keys, List values, AccumuloHiveRow row) + throws IOException { + Iterator kIter = keys.iterator(); + Iterator vIter = values.iterator(); + while (kIter.hasNext()) { + Key k = kIter.next(); + Value v = vIter.next(); + row.add(k.getColumnFamily().toString(), k.getColumnQualifier().toString(), v.get()); + } + } +} diff --git a/accumulo-handler/src/java/org/apache/hadoop/hive/accumulo/mr/HiveAccumuloSplit.java b/accumulo-handler/src/java/org/apache/hadoop/hive/accumulo/mr/HiveAccumuloSplit.java new file mode 100644 index 0000000..530f232 --- /dev/null +++ b/accumulo-handler/src/java/org/apache/hadoop/hive/accumulo/mr/HiveAccumuloSplit.java @@ -0,0 +1,93 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hive.accumulo.mr; + +import java.io.DataInput; +import java.io.DataOutput; +import java.io.IOException; + +import org.apache.accumulo.core.client.mapred.RangeInputSplit; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hive.ql.io.HiveInputFormat.HiveInputSplit; +import org.apache.hadoop.mapred.FileSplit; +import org.apache.hadoop.mapred.InputSplit; +import org.apache.hadoop.util.StringUtils; +import org.apache.log4j.Logger; + +/** + * Wraps RangeInputSplit into a FileSplit so Hadoop won't complain when it tries to make its own + * Path. + * + *

+ * If the {@link RangeInputSplit} is used directly, it will hit a branch of code in + * {@link HiveInputSplit} which generates an invalid Path. Wrap it ourselves so that it doesn't + * error + */ +public class HiveAccumuloSplit extends FileSplit implements InputSplit { + private static final Logger log = Logger.getLogger(HiveAccumuloSplit.class); + + private RangeInputSplit split; + + public HiveAccumuloSplit() { + super((Path) null, 0, 0, (String[]) null); + split = new RangeInputSplit(); + } + + public HiveAccumuloSplit(RangeInputSplit split, Path dummyPath) { + super(dummyPath, 0, 0, (String[]) null); + this.split = split; + } + + public RangeInputSplit getSplit() { + return this.split; + } + + @Override + public void readFields(DataInput in) throws IOException { + super.readFields(in); + split.readFields(in); + } + + @Override + public String toString() { + return "HiveAccumuloSplit: " + split; + } + + @Override + public void write(DataOutput out) throws IOException { + super.write(out); + split.write(out); + } + + @Override + public long getLength() { + int len = 0; + try { + return split.getLength(); + } catch (IOException e) { + log.error("Error getting length for split: " + StringUtils.stringifyException(e)); + } + return len; + } + + @Override + public String[] getLocations() throws IOException { + return split.getLocations(); + } +} diff --git a/accumulo-handler/src/java/org/apache/hadoop/hive/accumulo/mr/HiveAccumuloTableInputFormat.java b/accumulo-handler/src/java/org/apache/hadoop/hive/accumulo/mr/HiveAccumuloTableInputFormat.java new file mode 100644 index 0000000..385b2f4 --- /dev/null +++ b/accumulo-handler/src/java/org/apache/hadoop/hive/accumulo/mr/HiveAccumuloTableInputFormat.java @@ -0,0 +1,485 @@ +package org.apache.hadoop.hive.accumulo.mr; + +import java.io.IOException; +import java.lang.reflect.InvocationTargetException; +import java.lang.reflect.Method; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collection; +import java.util.HashSet; +import java.util.List; +import java.util.Map; +import java.util.Set; + +import org.apache.accumulo.core.client.AccumuloException; +import org.apache.accumulo.core.client.AccumuloSecurityException; +import org.apache.accumulo.core.client.Connector; +import org.apache.accumulo.core.client.Instance; +import org.apache.accumulo.core.client.IteratorSetting; +import org.apache.accumulo.core.client.mapred.AccumuloInputFormat; +import org.apache.accumulo.core.client.mapred.AccumuloRowInputFormat; +import org.apache.accumulo.core.client.mapred.RangeInputSplit; +import org.apache.accumulo.core.client.mock.MockInstance; +import org.apache.accumulo.core.client.security.tokens.AuthenticationToken; +import org.apache.accumulo.core.client.security.tokens.PasswordToken; +import org.apache.accumulo.core.data.Key; +import org.apache.accumulo.core.data.Range; +import org.apache.accumulo.core.data.Value; +import org.apache.accumulo.core.security.Authorizations; +import org.apache.accumulo.core.util.Pair; +import org.apache.accumulo.core.util.PeekingIterator; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hive.accumulo.AccumuloConnectionParameters; +import org.apache.hadoop.hive.accumulo.AccumuloHiveRow; +import org.apache.hadoop.hive.accumulo.columns.ColumnMapper; +import org.apache.hadoop.hive.accumulo.columns.ColumnMapping; +import org.apache.hadoop.hive.accumulo.columns.HiveAccumuloColumnMapping; +import org.apache.hadoop.hive.accumulo.columns.HiveAccumuloMapColumnMapping; +import org.apache.hadoop.hive.accumulo.predicate.AccumuloPredicateHandler; +import org.apache.hadoop.hive.accumulo.serde.AccumuloSerDeParameters; +import org.apache.hadoop.hive.accumulo.serde.TooManyAccumuloColumnsException; +import org.apache.hadoop.hive.serde.serdeConstants; +import org.apache.hadoop.hive.serde2.ColumnProjectionUtils; +import org.apache.hadoop.hive.serde2.SerDeException; +import org.apache.hadoop.hive.serde2.typeinfo.TypeInfo; +import org.apache.hadoop.hive.serde2.typeinfo.TypeInfoUtils; +import org.apache.hadoop.hive.shims.ShimLoader; +import org.apache.hadoop.io.Text; +import org.apache.hadoop.mapred.InputSplit; +import org.apache.hadoop.mapred.JobConf; +import org.apache.hadoop.mapred.RecordReader; +import org.apache.hadoop.mapred.Reporter; +import org.apache.hadoop.mapreduce.Job; +import org.apache.hadoop.mapreduce.JobContext; +import org.apache.hadoop.mapreduce.lib.input.FileInputFormat; +import org.apache.hadoop.util.StringUtils; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +/** + * Wraps older InputFormat for use with Hive. + * + * Configure input scan with proper ranges, iterators, and columns based on serde properties for + * Hive table. + */ +public class HiveAccumuloTableInputFormat implements + org.apache.hadoop.mapred.InputFormat { + private static final Logger log = LoggerFactory.getLogger(HiveAccumuloTableInputFormat.class); + + // Visible for testing + protected AccumuloRowInputFormat accumuloInputFormat = new AccumuloRowInputFormat(); + protected AccumuloPredicateHandler predicateHandler = AccumuloPredicateHandler.getInstance(); + + @Override + public InputSplit[] getSplits(JobConf jobConf, int numSplits) throws IOException { + final AccumuloConnectionParameters accumuloParams = new AccumuloConnectionParameters(jobConf); + final Instance instance = accumuloParams.getInstance(); + final ColumnMapper columnMapper; + try { + columnMapper = getColumnMapper(jobConf); + } catch (TooManyAccumuloColumnsException e) { + throw new IOException(e); + } + + JobContext context = ShimLoader.getHadoopShims().newJobContext(Job.getInstance(jobConf)); + Path[] tablePaths = FileInputFormat.getInputPaths(context); + + try { + final Connector connector = accumuloParams.getConnector(instance); + final List columnMappings = columnMapper.getColumnMappings(); + final List iterators = predicateHandler.getIterators(jobConf, columnMapper); + final Collection ranges = predicateHandler.getRanges(jobConf, columnMapper); + + // Setting an empty collection of ranges will, unexpectedly, scan all data + // We don't want that. + if (null != ranges && ranges.isEmpty()) { + return new InputSplit[0]; + } + + // Set the relevant information in the Configuration for the AccumuloInputFormat + configure(jobConf, instance, connector, accumuloParams, columnMapper, iterators, ranges); + + int numColumns = columnMappings.size(); + + List readColIds = ColumnProjectionUtils.getReadColumnIDs(jobConf); + + // Sanity check + if (numColumns < readColIds.size()) + throw new IOException("Number of column mappings (" + numColumns + ")" + + " numbers less than the hive table columns. (" + readColIds.size() + ")"); + + // get splits from Accumulo + InputSplit[] splits = accumuloInputFormat.getSplits(jobConf, numSplits); + + HiveAccumuloSplit[] hiveSplits = new HiveAccumuloSplit[splits.length]; + for (int i = 0; i < splits.length; i++) { + RangeInputSplit ris = (RangeInputSplit) splits[i]; + hiveSplits[i] = new HiveAccumuloSplit(ris, tablePaths[0]); + } + + return hiveSplits; + } catch (AccumuloException e) { + log.error("Could not configure AccumuloInputFormat", e); + throw new IOException(StringUtils.stringifyException(e)); + } catch (AccumuloSecurityException e) { + log.error("Could not configure AccumuloInputFormat", e); + throw new IOException(StringUtils.stringifyException(e)); + } catch (SerDeException e) { + log.error("Could not configure AccumuloInputFormat", e); + throw new IOException(StringUtils.stringifyException(e)); + } + } + + /** + * Setup accumulo input format from conf properties. Delegates to final RecordReader from mapred + * package. + * + * @param inputSplit + * @param jobConf + * @param reporter + * @return RecordReader + * @throws IOException + */ + @Override + public RecordReader getRecordReader(InputSplit inputSplit, + final JobConf jobConf, final Reporter reporter) throws IOException { + final ColumnMapper columnMapper; + try { + columnMapper = getColumnMapper(jobConf); + } catch (TooManyAccumuloColumnsException e) { + throw new IOException(e); + } + + try { + final List iterators = predicateHandler.getIterators(jobConf, columnMapper); + + HiveAccumuloSplit hiveSplit = (HiveAccumuloSplit) inputSplit; + RangeInputSplit rangeSplit = hiveSplit.getSplit(); + + log.info("Split: " + rangeSplit); + + // The RangeInputSplit *should* have all of the necesary information contained in it + // which alleviates us from re-parsing our configuration from the AccumuloStorageHandler + // and re-setting it into the Configuration (like we did in getSplits(...)). Thus, it should + // be unnecessary to re-invoke configure(...) + + // ACCUMULO-2962 Iterators weren't getting serialized into the InputSplit, but we can + // compensate because we still have that info. + // Should be fixed in Accumulo 1.5.2 and 1.6.1 + if (null == rangeSplit.getIterators() + || (rangeSplit.getIterators().isEmpty() && !iterators.isEmpty())) { + log.debug("Re-setting iterators on InputSplit due to Accumulo bug."); + rangeSplit.setIterators(iterators); + } + + // ACCUMULO-3015 Like the above, RangeInputSplit should have the table name + // but we want it to, so just re-set it if it's null. + if (null == getTableName(rangeSplit)) { + final AccumuloConnectionParameters accumuloParams = new AccumuloConnectionParameters( + jobConf); + log.debug("Re-setting table name on InputSplit due to Accumulo bug."); + setTableName(rangeSplit, accumuloParams.getAccumuloTableName()); + } + + final RecordReader>> recordReader = accumuloInputFormat + .getRecordReader(rangeSplit, jobConf, reporter); + + return new HiveAccumuloRecordReader(recordReader, iterators.size()); + } catch (SerDeException e) { + throw new IOException(StringUtils.stringifyException(e)); + } + } + + protected ColumnMapper getColumnMapper(Configuration conf) throws IOException, + TooManyAccumuloColumnsException { + final String defaultStorageType = conf.get(AccumuloSerDeParameters.DEFAULT_STORAGE_TYPE); + + String[] columnNamesArr = conf.getStrings(serdeConstants.LIST_COLUMNS); + if (null == columnNamesArr) { + throw new IOException( + "Hive column names must be provided to InputFormat in the Configuration"); + } + List columnNames = Arrays.asList(columnNamesArr); + + String serializedTypes = conf.get(serdeConstants.LIST_COLUMN_TYPES); + if (null == serializedTypes) { + throw new IOException( + "Hive column types must be provided to InputFormat in the Configuration"); + } + ArrayList columnTypes = TypeInfoUtils.getTypeInfosFromTypeString(serializedTypes); + + return new ColumnMapper(conf.get(AccumuloSerDeParameters.COLUMN_MAPPINGS), defaultStorageType, + columnNames, columnTypes); + } + + /** + * Configure the underlying AccumuloInputFormat + * + * @param conf + * Job configuration + * @param instance + * Accumulo instance + * @param connector + * Accumulo connector + * @param accumuloParams + * Connection information to the Accumulo instance + * @param columnMapper + * Configuration of Hive to Accumulo columns + * @param iterators + * Any iterators to be configured server-side + * @param ranges + * Accumulo ranges on for the query + * @throws AccumuloSecurityException + * @throws AccumuloException + * @throws SerDeException + */ + protected void configure(JobConf conf, Instance instance, Connector connector, + AccumuloConnectionParameters accumuloParams, ColumnMapper columnMapper, + List iterators, Collection ranges) throws AccumuloSecurityException, + AccumuloException, SerDeException { + + // Handle implementation of Instance and invoke appropriate InputFormat method + if (instance instanceof MockInstance) { + setMockInstance(conf, instance.getInstanceName()); + } else { + setZooKeeperInstance(conf, instance.getInstanceName(), instance.getZooKeepers()); + } + + // Set the username/passwd for the Accumulo connection + setConnectorInfo(conf, accumuloParams.getAccumuloUserName(), + new PasswordToken(accumuloParams.getAccumuloPassword())); + + // Read from the given Accumulo table + setInputTableName(conf, accumuloParams.getAccumuloTableName()); + + // Check Configuration for any user-provided Authorization definition + Authorizations auths = AccumuloSerDeParameters.getAuthorizationsFromConf(conf); + + if (null == auths) { + // Default to all of user's authorizations when no configuration is provided + auths = connector.securityOperations().getUserAuthorizations( + accumuloParams.getAccumuloUserName()); + } + + // Implicitly handles users providing invalid authorizations + setScanAuthorizations(conf, auths); + + // restrict with any filters found from WHERE predicates. + addIterators(conf, iterators); + + // restrict with any ranges found from WHERE predicates. + // not setting ranges scans the entire table + if (null != ranges) { + log.info("Setting ranges: " + ranges); + setRanges(conf, ranges); + } + + // Restrict the set of columns that we want to read from the Accumulo table + HashSet> pairs = getPairCollection(columnMapper.getColumnMappings()); + if (null != pairs && !pairs.isEmpty()) { + fetchColumns(conf, pairs); + } + } + + // Wrap the static AccumuloInputFormat methods with methods that we can + // verify were correctly called via Mockito + + protected void setMockInstance(JobConf conf, String instanceName) { + try { + AccumuloInputFormat.setMockInstance(conf, instanceName); + } catch (IllegalStateException e) { + // AccumuloInputFormat complains if you re-set an already set value. We just don't care. + log.debug("Ignoring exception setting mock instance of " + instanceName, e); + } + } + + @SuppressWarnings("deprecation") + protected void setZooKeeperInstance(JobConf conf, String instanceName, String zkHosts) { + // To support builds against 1.5, we can't use the new 1.6 setZooKeeperInstance which + // takes a ClientConfiguration class that only exists in 1.6 + try { + AccumuloInputFormat.setZooKeeperInstance(conf, instanceName, zkHosts); + } catch (IllegalStateException ise) { + // AccumuloInputFormat complains if you re-set an already set value. We just don't care. + log.debug("Ignoring exception setting ZooKeeper instance of " + instanceName + " at " + + zkHosts, ise); + } + } + + protected void setConnectorInfo(JobConf conf, String user, AuthenticationToken token) + throws AccumuloSecurityException { + try { + AccumuloInputFormat.setConnectorInfo(conf, user, token); + } catch (IllegalStateException e) { + // AccumuloInputFormat complains if you re-set an already set value. We just don't care. + log.debug("Ignoring exception setting Accumulo Connector instance for user " + user, e); + } + } + + protected void setInputTableName(JobConf conf, String tableName) { + AccumuloInputFormat.setInputTableName(conf, tableName); + } + + protected void setScanAuthorizations(JobConf conf, Authorizations auths) { + AccumuloInputFormat.setScanAuthorizations(conf, auths); + } + + protected void addIterators(JobConf conf, List iterators) { + for (IteratorSetting is : iterators) { + AccumuloInputFormat.addIterator(conf, is); + } + } + + protected void setRanges(JobConf conf, Collection ranges) { + AccumuloInputFormat.setRanges(conf, ranges); + } + + protected void fetchColumns(JobConf conf, Set> cfCqPairs) { + AccumuloInputFormat.fetchColumns(conf, cfCqPairs); + } + + /** + * Create col fam/qual pairs from pipe separated values, usually from config object. Ignores + * rowID. + * + * @param columnMappings + * The list of ColumnMappings for the given query + * @return a Set of Pairs of colfams and colquals + */ + protected HashSet> getPairCollection(List columnMappings) { + final HashSet> pairs = new HashSet>(); + + for (ColumnMapping columnMapping : columnMappings) { + if (columnMapping instanceof HiveAccumuloColumnMapping) { + HiveAccumuloColumnMapping accumuloColumnMapping = (HiveAccumuloColumnMapping) columnMapping; + + Text cf = new Text(accumuloColumnMapping.getColumnFamily()); + Text cq = null; + + // A null cq implies an empty column qualifier + if (null != accumuloColumnMapping.getColumnQualifier()) { + cq = new Text(accumuloColumnMapping.getColumnQualifier()); + } + + pairs.add(new Pair(cf, cq)); + } else if (columnMapping instanceof HiveAccumuloMapColumnMapping) { + HiveAccumuloMapColumnMapping mapMapping = (HiveAccumuloMapColumnMapping) columnMapping; + + // Can't fetch prefix on colqual, must pull the entire qualifier + // TODO use an iterator to do the filter, server-side. + pairs.add(new Pair(new Text(mapMapping.getColumnFamily()), null)); + } + } + + log.info("Computed columns to fetch (" + pairs + ") from " + columnMappings); + + return pairs; + } + + /** + * Reflection to work around Accumulo 1.5 and 1.6 incompatibilities. Throws an {@link IOException} + * for any reflection related exceptions + * + * @param split + * A RangeInputSplit + * @return The name of the table from the split + * @throws IOException + */ + protected String getTableName(RangeInputSplit split) throws IOException { + // ACCUMULO-3017 shenanigans with method names changing without deprecation + Method getTableName = null; + try { + getTableName = RangeInputSplit.class.getMethod("getTableName"); + } catch (SecurityException e) { + log.debug("Could not get getTableName method from RangeInputSplit", e); + } catch (NoSuchMethodException e) { + log.debug("Could not get getTableName method from RangeInputSplit", e); + } + + if (null != getTableName) { + try { + return (String) getTableName.invoke(split); + } catch (IllegalArgumentException e) { + log.debug("Could not invoke getTableName method from RangeInputSplit", e); + } catch (IllegalAccessException e) { + log.debug("Could not invoke getTableName method from RangeInputSplit", e); + } catch (InvocationTargetException e) { + log.debug("Could not invoke getTableName method from RangeInputSplit", e); + } + } + + Method getTable; + try { + getTable = RangeInputSplit.class.getMethod("getTable"); + } catch (SecurityException e) { + throw new IOException("Could not get table name from RangeInputSplit", e); + } catch (NoSuchMethodException e) { + throw new IOException("Could not get table name from RangeInputSplit", e); + } + + try { + return (String) getTable.invoke(split); + } catch (IllegalArgumentException e) { + throw new IOException("Could not get table name from RangeInputSplit", e); + } catch (IllegalAccessException e) { + throw new IOException("Could not get table name from RangeInputSplit", e); + } catch (InvocationTargetException e) { + throw new IOException("Could not get table name from RangeInputSplit", e); + } + } + + /** + * Sets the table name on a RangeInputSplit, accounting for change in method name. Any reflection + * related exception is wrapped in an {@link IOException} + * + * @param split + * The RangeInputSplit to operate on + * @param tableName + * The name of the table to set + * @throws IOException + */ + protected void setTableName(RangeInputSplit split, String tableName) throws IOException { + // ACCUMULO-3017 shenanigans with method names changing without deprecation + Method setTableName = null; + try { + setTableName = RangeInputSplit.class.getMethod("setTableName", String.class); + } catch (SecurityException e) { + log.debug("Could not get getTableName method from RangeInputSplit", e); + } catch (NoSuchMethodException e) { + log.debug("Could not get getTableName method from RangeInputSplit", e); + } + + if (null != setTableName) { + try { + setTableName.invoke(split, tableName); + return; + } catch (IllegalArgumentException e) { + log.debug("Could not invoke getTableName method from RangeInputSplit", e); + } catch (IllegalAccessException e) { + log.debug("Could not invoke getTableName method from RangeInputSplit", e); + } catch (InvocationTargetException e) { + log.debug("Could not invoke getTableName method from RangeInputSplit", e); + } + } + + Method setTable; + try { + setTable = RangeInputSplit.class.getMethod("setTable", String.class); + } catch (SecurityException e) { + throw new IOException("Could not set table name from RangeInputSplit", e); + } catch (NoSuchMethodException e) { + throw new IOException("Could not set table name from RangeInputSplit", e); + } + + try { + setTable.invoke(split, tableName); + } catch (IllegalArgumentException e) { + throw new IOException("Could not set table name from RangeInputSplit", e); + } catch (IllegalAccessException e) { + throw new IOException("Could not set table name from RangeInputSplit", e); + } catch (InvocationTargetException e) { + throw new IOException("Could not set table name from RangeInputSplit", e); + } + } +} diff --git a/accumulo-handler/src/java/org/apache/hadoop/hive/accumulo/mr/HiveAccumuloTableOutputFormat.java b/accumulo-handler/src/java/org/apache/hadoop/hive/accumulo/mr/HiveAccumuloTableOutputFormat.java new file mode 100644 index 0000000..5cf008e --- /dev/null +++ b/accumulo-handler/src/java/org/apache/hadoop/hive/accumulo/mr/HiveAccumuloTableOutputFormat.java @@ -0,0 +1,94 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hive.accumulo.mr; + +import java.io.IOException; + +import org.apache.accumulo.core.client.AccumuloSecurityException; +import org.apache.accumulo.core.client.mapred.AccumuloOutputFormat; +import org.apache.accumulo.core.client.security.tokens.AuthenticationToken; +import org.apache.accumulo.core.client.security.tokens.PasswordToken; +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.hive.accumulo.AccumuloConnectionParameters; +import org.apache.hadoop.hive.accumulo.serde.AccumuloSerDeParameters; +import org.apache.hadoop.mapred.JobConf; + +import com.google.common.base.Preconditions; + +/** + * + */ +public class HiveAccumuloTableOutputFormat extends AccumuloOutputFormat { + + @Override + public void checkOutputSpecs(FileSystem ignored, JobConf job) throws IOException { + configureAccumuloOutputFormat(job); + + super.checkOutputSpecs(ignored, job); + } + + protected void configureAccumuloOutputFormat(JobConf job) throws IOException { + AccumuloConnectionParameters cnxnParams = new AccumuloConnectionParameters(job); + + final String tableName = job.get(AccumuloSerDeParameters.TABLE_NAME); + + // Make sure we actually go the table name + Preconditions.checkNotNull(tableName, + "Expected Accumulo table name to be provided in job configuration"); + + // Set the necessary Accumulo information + try { + // Username/passwd for Accumulo + setAccumuloConnectorInfo(job, cnxnParams.getAccumuloUserName(), + new PasswordToken(cnxnParams.getAccumuloPassword())); + + if (cnxnParams.useMockInstance()) { + setAccumuloMockInstance(job, cnxnParams.getAccumuloInstanceName()); + } else { + // Accumulo instance name with ZK quorum + setAccumuloZooKeeperInstance(job, cnxnParams.getAccumuloInstanceName(), + cnxnParams.getZooKeepers()); + } + + // Set the table where we're writing this data + setDefaultAccumuloTableName(job, tableName); + } catch (AccumuloSecurityException e) { + log.error("Could not connect to Accumulo with provided credentials", e); + throw new IOException(e); + } + } + + // Non-static methods to wrap the static AccumuloOutputFormat methods to enable testing + + protected void setAccumuloConnectorInfo(JobConf conf, String username, AuthenticationToken token) + throws AccumuloSecurityException { + AccumuloOutputFormat.setConnectorInfo(conf, username, token); + } + + @SuppressWarnings("deprecation") + protected void setAccumuloZooKeeperInstance(JobConf conf, String instanceName, String zookeepers) { + AccumuloOutputFormat.setZooKeeperInstance(conf, instanceName, zookeepers); + } + + protected void setAccumuloMockInstance(JobConf conf, String instanceName) { + AccumuloOutputFormat.setMockInstance(conf, instanceName); + } + + protected void setDefaultAccumuloTableName(JobConf conf, String tableName) { + AccumuloOutputFormat.setDefaultTableName(conf, tableName); + } +} diff --git a/accumulo-handler/src/java/org/apache/hadoop/hive/accumulo/package-info.java b/accumulo-handler/src/java/org/apache/hadoop/hive/accumulo/package-info.java new file mode 100644 index 0000000..4fd6ba7 --- /dev/null +++ b/accumulo-handler/src/java/org/apache/hadoop/hive/accumulo/package-info.java @@ -0,0 +1,4 @@ +/** + * Serde and InputFormat support for connecting Hive to Accumulo tables. + */ +package org.apache.hadoop.hive.accumulo; \ No newline at end of file diff --git a/accumulo-handler/src/java/org/apache/hadoop/hive/accumulo/predicate/AccumuloPredicateHandler.java b/accumulo-handler/src/java/org/apache/hadoop/hive/accumulo/predicate/AccumuloPredicateHandler.java new file mode 100644 index 0000000..5edc9b5 --- /dev/null +++ b/accumulo-handler/src/java/org/apache/hadoop/hive/accumulo/predicate/AccumuloPredicateHandler.java @@ -0,0 +1,408 @@ +package org.apache.hadoop.hive.accumulo.predicate; + +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collections; +import java.util.HashMap; +import java.util.List; +import java.util.Map; +import java.util.Set; + +import org.apache.accumulo.core.client.IteratorSetting; +import org.apache.accumulo.core.data.Range; +import org.apache.commons.codec.binary.Base64; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hive.accumulo.columns.ColumnMapper; +import org.apache.hadoop.hive.accumulo.columns.HiveAccumuloColumnMapping; +import org.apache.hadoop.hive.accumulo.predicate.compare.CompareOp; +import org.apache.hadoop.hive.accumulo.predicate.compare.DoubleCompare; +import org.apache.hadoop.hive.accumulo.predicate.compare.Equal; +import org.apache.hadoop.hive.accumulo.predicate.compare.GreaterThan; +import org.apache.hadoop.hive.accumulo.predicate.compare.GreaterThanOrEqual; +import org.apache.hadoop.hive.accumulo.predicate.compare.IntCompare; +import org.apache.hadoop.hive.accumulo.predicate.compare.LessThan; +import org.apache.hadoop.hive.accumulo.predicate.compare.LessThanOrEqual; +import org.apache.hadoop.hive.accumulo.predicate.compare.Like; +import org.apache.hadoop.hive.accumulo.predicate.compare.LongCompare; +import org.apache.hadoop.hive.accumulo.predicate.compare.NotEqual; +import org.apache.hadoop.hive.accumulo.predicate.compare.PrimitiveComparison; +import org.apache.hadoop.hive.accumulo.predicate.compare.StringCompare; +import org.apache.hadoop.hive.accumulo.serde.AccumuloSerDeParameters; +import org.apache.hadoop.hive.ql.exec.Utilities; +import org.apache.hadoop.hive.ql.index.IndexPredicateAnalyzer; +import org.apache.hadoop.hive.ql.index.IndexSearchCondition; +import org.apache.hadoop.hive.ql.lib.DefaultGraphWalker; +import org.apache.hadoop.hive.ql.lib.DefaultRuleDispatcher; +import org.apache.hadoop.hive.ql.lib.Dispatcher; +import org.apache.hadoop.hive.ql.lib.GraphWalker; +import org.apache.hadoop.hive.ql.lib.Node; +import org.apache.hadoop.hive.ql.lib.NodeProcessor; +import org.apache.hadoop.hive.ql.lib.Rule; +import org.apache.hadoop.hive.ql.metadata.HiveStoragePredicateHandler.DecomposedPredicate; +import org.apache.hadoop.hive.ql.parse.SemanticException; +import org.apache.hadoop.hive.ql.plan.ExprNodeDesc; +import org.apache.hadoop.hive.ql.plan.ExprNodeGenericFuncDesc; +import org.apache.hadoop.hive.ql.plan.TableScanDesc; +import org.apache.hadoop.hive.ql.udf.UDFLike; +import org.apache.hadoop.hive.ql.udf.generic.GenericUDFOPEqual; +import org.apache.hadoop.hive.ql.udf.generic.GenericUDFOPEqualOrGreaterThan; +import org.apache.hadoop.hive.ql.udf.generic.GenericUDFOPEqualOrLessThan; +import org.apache.hadoop.hive.ql.udf.generic.GenericUDFOPGreaterThan; +import org.apache.hadoop.hive.ql.udf.generic.GenericUDFOPLessThan; +import org.apache.hadoop.hive.ql.udf.generic.GenericUDFOPNotEqual; +import org.apache.hadoop.hive.serde.serdeConstants; +import org.apache.hadoop.hive.serde2.SerDeException; +import org.apache.log4j.Logger; + +import com.google.common.collect.Lists; +import com.google.common.collect.Maps; + +/** + * + * Supporting operations dealing with Hive Predicate pushdown to iterators and ranges. + * + * See {@link PrimitiveComparisonFilter} + * + */ +public class AccumuloPredicateHandler { + private static final List TOTAL_RANGE = Collections.singletonList(new Range()); + + private static AccumuloPredicateHandler handler = new AccumuloPredicateHandler(); + private static Map> compareOps = Maps.newHashMap(); + private static Map> pComparisons = Maps.newHashMap(); + + // Want to start sufficiently "high" enough in the iterator stack + private static int iteratorCount = 50; + + private static final Logger log = Logger.getLogger(AccumuloPredicateHandler.class); + static { + compareOps.put(GenericUDFOPEqual.class.getName(), Equal.class); + compareOps.put(GenericUDFOPNotEqual.class.getName(), NotEqual.class); + compareOps.put(GenericUDFOPGreaterThan.class.getName(), GreaterThan.class); + compareOps.put(GenericUDFOPEqualOrGreaterThan.class.getName(), GreaterThanOrEqual.class); + compareOps.put(GenericUDFOPEqualOrLessThan.class.getName(), LessThanOrEqual.class); + compareOps.put(GenericUDFOPLessThan.class.getName(), LessThan.class); + compareOps.put(UDFLike.class.getName(), Like.class); + + pComparisons.put("bigint", LongCompare.class); + pComparisons.put("int", IntCompare.class); + pComparisons.put("double", DoubleCompare.class); + pComparisons.put("string", StringCompare.class); + } + + public static AccumuloPredicateHandler getInstance() { + return handler; + } + + /** + * + * @return set of all UDF class names with matching CompareOpt implementations. + */ + public Set cOpKeyset() { + return compareOps.keySet(); + } + + /** + * + * @return set of all hive data types with matching PrimitiveCompare implementations. + */ + public Set pComparisonKeyset() { + return pComparisons.keySet(); + } + + /** + * + * @param udfType + * GenericUDF classname to lookup matching CompareOpt + * @return Class + */ + public Class getCompareOpClass(String udfType) + throws NoSuchCompareOpException { + if (!compareOps.containsKey(udfType)) + throw new NoSuchCompareOpException("Null compare op for specified key: " + udfType); + return compareOps.get(udfType); + } + + public CompareOp getCompareOp(String udfType, IndexSearchCondition sc) + throws NoSuchCompareOpException, SerDeException { + Class clz = getCompareOpClass(udfType); + + try { + return clz.newInstance(); + } catch (ClassCastException e) { + throw new SerDeException("Column type mismatch in WHERE clause " + + sc.getComparisonExpr().getExprString() + " found type " + + sc.getConstantDesc().getTypeString() + " instead of " + + sc.getColumnDesc().getTypeString()); + } catch (IllegalAccessException e) { + throw new SerDeException("Could not instantiate class for WHERE clause", e); + } catch (InstantiationException e) { + throw new SerDeException("Could not instantiate class for WHERE clause", e); + } + } + + /** + * + * @param type + * String hive column lookup matching PrimitiveCompare + * @return Class + */ + public Class getPrimitiveComparisonClass(String type) + throws NoSuchPrimitiveComparisonException { + if (!pComparisons.containsKey(type)) + throw new NoSuchPrimitiveComparisonException("Null primitive comparison for specified key: " + + type); + return pComparisons.get(type); + } + + public PrimitiveComparison getPrimitiveComparison(String type, IndexSearchCondition sc) + throws NoSuchPrimitiveComparisonException, SerDeException { + Class clz = getPrimitiveComparisonClass(type); + + try { + return clz.newInstance(); + } catch (ClassCastException e) { + throw new SerDeException("Column type mismatch in WHERE clause " + + sc.getComparisonExpr().getExprString() + " found type " + + sc.getConstantDesc().getTypeString() + " instead of " + + sc.getColumnDesc().getTypeString()); + } catch (IllegalAccessException e) { + throw new SerDeException("Could not instantiate class for WHERE clause", e); + } catch (InstantiationException e) { + throw new SerDeException("Could not instantiate class for WHERE clause", e); + } + } + + private AccumuloPredicateHandler() {} + + /** + * Loop through search conditions and build ranges for predicates involving rowID column, if any. + */ + public List getRanges(Configuration conf, ColumnMapper columnMapper) throws SerDeException { + if (!columnMapper.hasRowIdMapping()) { + return TOTAL_RANGE; + } + + int rowIdOffset = columnMapper.getRowIdOffset(); + String[] hiveColumnNamesArr = conf.getStrings(serdeConstants.LIST_COLUMNS); + + if (null == hiveColumnNamesArr) { + throw new IllegalArgumentException("Could not find Hive columns in configuration"); + } + + // Already verified that we should have the rowId mapping + String hiveRowIdColumnName = hiveColumnNamesArr[rowIdOffset]; + + ExprNodeDesc root = this.getExpression(conf); + + // No expression, therefore scan the whole table + if (null == root) { + return TOTAL_RANGE; + } + + Object result = generateRanges(columnMapper, hiveRowIdColumnName, root); + + if (null == result) { + log.info("Calculated null set of ranges, scanning full table"); + return TOTAL_RANGE; + } else if (result instanceof Range) { + log.info("Computed a single Range for the query: " + result); + return Collections.singletonList((Range) result); + } else if (result instanceof List) { + log.info("Computed a collection of Ranges for the query: " + result); + @SuppressWarnings("unchecked") + List ranges = (List) result; + return ranges; + } else { + throw new IllegalArgumentException("Unhandled return from Range generation: " + result); + } + } + + /** + * Encapsulates the traversal over some {@link ExprNodeDesc} tree for the generation of Accumuluo + * Ranges using expressions involving the Accumulo rowid-mapped Hive column + * + * @param columnMapper + * Mapping of Hive to Accumulo columns for the query + * @param hiveRowIdColumnName + * Name of the hive column mapped to the Accumulo rowid + * @param root + * Root of some ExprNodeDesc tree to traverse, the WHERE clause + * @return An object representing the result from the ExprNodeDesc tree traversal using the + * AccumuloRangeGenerator + */ + protected Object generateRanges(ColumnMapper columnMapper, String hiveRowIdColumnName, ExprNodeDesc root) { + AccumuloRangeGenerator rangeGenerator = new AccumuloRangeGenerator(handler, + columnMapper.getRowIdMapping(), hiveRowIdColumnName); + Dispatcher disp = new DefaultRuleDispatcher(rangeGenerator, + Collections. emptyMap(), null); + GraphWalker ogw = new DefaultGraphWalker(disp); + ArrayList roots = new ArrayList(); + roots.add(root); + HashMap nodeOutput = new HashMap(); + + try { + ogw.startWalking(roots, nodeOutput); + } catch (SemanticException ex) { + throw new RuntimeException(ex); + } + + return nodeOutput.get(root); + } + + /** + * Loop through search conditions and build iterator settings for predicates involving columns + * other than rowID, if any. + * + * @param conf + * Configuration + * @throws SerDeException + */ + public List getIterators(Configuration conf, ColumnMapper columnMapper) + throws SerDeException { + List itrs = Lists.newArrayList(); + boolean shouldPushdown = conf.getBoolean(AccumuloSerDeParameters.ITERATOR_PUSHDOWN_KEY, + AccumuloSerDeParameters.ITERATOR_PUSHDOWN_DEFAULT); + if (!shouldPushdown) { + log.info("Iterator pushdown is disabled for this table"); + return itrs; + } + + int rowIdOffset = columnMapper.getRowIdOffset(); + String[] hiveColumnNamesArr = conf.getStrings(serdeConstants.LIST_COLUMNS); + + if (null == hiveColumnNamesArr) { + throw new IllegalArgumentException("Could not find Hive columns in configuration"); + } + + String hiveRowIdColumnName = null; + + if (rowIdOffset >= 0 && rowIdOffset < hiveColumnNamesArr.length) { + hiveRowIdColumnName = hiveColumnNamesArr[rowIdOffset]; + } + + List hiveColumnNames = Arrays.asList(hiveColumnNamesArr); + + for (IndexSearchCondition sc : getSearchConditions(conf)) { + String col = sc.getColumnDesc().getColumn(); + if (hiveRowIdColumnName == null || !hiveRowIdColumnName.equals(col)) { + HiveAccumuloColumnMapping mapping = (HiveAccumuloColumnMapping) columnMapper + .getColumnMappingForHiveColumn(hiveColumnNames, col); + itrs.add(toSetting(mapping, sc)); + } + } + if (log.isInfoEnabled()) + log.info("num iterators = " + itrs.size()); + return itrs; + } + + /** + * Create an IteratorSetting for the right qualifier, constant, CompareOpt, and PrimitiveCompare + * type. + * + * @param accumuloColumnMapping + * ColumnMapping to filter + * @param sc + * IndexSearchCondition + * @return IteratorSetting + * @throws SerDeException + */ + public IteratorSetting toSetting(HiveAccumuloColumnMapping accumuloColumnMapping, + IndexSearchCondition sc) throws SerDeException { + iteratorCount++; + final IteratorSetting is = new IteratorSetting(iteratorCount, + PrimitiveComparisonFilter.FILTER_PREFIX + iteratorCount, PrimitiveComparisonFilter.class); + final String type = sc.getColumnDesc().getTypeString(); + final String comparisonOpStr = sc.getComparisonOp(); + + PushdownTuple tuple; + try { + tuple = new PushdownTuple(sc, getPrimitiveComparison(type, sc), getCompareOp(comparisonOpStr, + sc)); + } catch (NoSuchPrimitiveComparisonException e) { + throw new SerDeException("No configured PrimitiveComparison class for " + type, e); + } catch (NoSuchCompareOpException e) { + throw new SerDeException("No configured CompareOp class for " + comparisonOpStr, e); + } + + is.addOption(PrimitiveComparisonFilter.P_COMPARE_CLASS, tuple.getpCompare().getClass() + .getName()); + is.addOption(PrimitiveComparisonFilter.COMPARE_OPT_CLASS, tuple.getcOpt().getClass().getName()); + is.addOption(PrimitiveComparisonFilter.CONST_VAL, + new String(Base64.encodeBase64(tuple.getConstVal()))); + is.addOption(PrimitiveComparisonFilter.COLUMN, accumuloColumnMapping.serialize()); + + return is; + } + + public ExprNodeDesc getExpression(Configuration conf) { + String filteredExprSerialized = conf.get(TableScanDesc.FILTER_EXPR_CONF_STR); + if (filteredExprSerialized == null) + return null; + + return Utilities.deserializeExpression(filteredExprSerialized); + } + + /** + * + * @param conf + * Configuration + * @return list of IndexSearchConditions from the filter expression. + */ + public List getSearchConditions(Configuration conf) { + final List sConditions = Lists.newArrayList(); + ExprNodeDesc filterExpr = getExpression(conf); + if (null == filterExpr) { + return sConditions; + } + IndexPredicateAnalyzer analyzer = newAnalyzer(conf); + ExprNodeDesc residual = analyzer.analyzePredicate(filterExpr, sConditions); + if (residual != null) + throw new RuntimeException("Unexpected residual predicate: " + residual.getExprString()); + return sConditions; + } + + /** + * + * @param conf + * Configuration + * @param desc + * predicate expression node. + * @return DecomposedPredicate containing translated search conditions the analyzer can support. + */ + public DecomposedPredicate decompose(Configuration conf, ExprNodeDesc desc) { + IndexPredicateAnalyzer analyzer = newAnalyzer(conf); + List sConditions = new ArrayList(); + ExprNodeDesc residualPredicate = analyzer.analyzePredicate(desc, sConditions); + + if (sConditions.size() == 0) { + if (log.isInfoEnabled()) + log.info("nothing to decompose. Returning"); + return null; + } + + DecomposedPredicate decomposedPredicate = new DecomposedPredicate(); + decomposedPredicate.pushedPredicate = analyzer.translateSearchConditions(sConditions); + decomposedPredicate.residualPredicate = (ExprNodeGenericFuncDesc) residualPredicate; + return decomposedPredicate; + } + + /** + * Build an analyzer that allows comparison opts from compareOpts map, and all columns from table + * definition. + */ + private IndexPredicateAnalyzer newAnalyzer(Configuration conf) { + IndexPredicateAnalyzer analyzer = new IndexPredicateAnalyzer(); + analyzer.clearAllowedColumnNames(); + for (String op : cOpKeyset()) { + analyzer.addComparisonOp(op); + } + + String[] hiveColumnNames = conf.getStrings(serdeConstants.LIST_COLUMNS); + for (String col : hiveColumnNames) { + analyzer.allowColumnName(col); + } + + return analyzer; + } +} diff --git a/accumulo-handler/src/java/org/apache/hadoop/hive/accumulo/predicate/AccumuloRangeGenerator.java b/accumulo-handler/src/java/org/apache/hadoop/hive/accumulo/predicate/AccumuloRangeGenerator.java new file mode 100644 index 0000000..d794e94 --- /dev/null +++ b/accumulo-handler/src/java/org/apache/hadoop/hive/accumulo/predicate/AccumuloRangeGenerator.java @@ -0,0 +1,355 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hive.accumulo.predicate; + +import java.io.ByteArrayOutputStream; +import java.io.IOException; +import java.util.ArrayList; +import java.util.List; +import java.util.Stack; + +import org.apache.accumulo.core.data.Range; +import org.apache.hadoop.hive.accumulo.columns.HiveAccumuloRowIdColumnMapping; +import org.apache.hadoop.hive.accumulo.predicate.compare.CompareOp; +import org.apache.hadoop.hive.accumulo.predicate.compare.Equal; +import org.apache.hadoop.hive.accumulo.predicate.compare.GreaterThan; +import org.apache.hadoop.hive.accumulo.predicate.compare.GreaterThanOrEqual; +import org.apache.hadoop.hive.accumulo.predicate.compare.LessThan; +import org.apache.hadoop.hive.accumulo.predicate.compare.LessThanOrEqual; +import org.apache.hadoop.hive.ql.exec.FunctionRegistry; +import org.apache.hadoop.hive.ql.lib.Node; +import org.apache.hadoop.hive.ql.lib.NodeProcessor; +import org.apache.hadoop.hive.ql.lib.NodeProcessorCtx; +import org.apache.hadoop.hive.ql.parse.SemanticException; +import org.apache.hadoop.hive.ql.plan.ExprNodeColumnDesc; +import org.apache.hadoop.hive.ql.plan.ExprNodeConstantDesc; +import org.apache.hadoop.hive.ql.plan.ExprNodeDesc; +import org.apache.hadoop.hive.ql.plan.ExprNodeGenericFuncDesc; +import org.apache.hadoop.hive.ql.udf.generic.GenericUDF; +import org.apache.hadoop.hive.serde2.lazy.LazyUtils; +import org.apache.hadoop.hive.serde2.objectinspector.ConstantObjectInspector; +import org.apache.hadoop.hive.serde2.objectinspector.primitive.WritableConstantBooleanObjectInspector; +import org.apache.hadoop.hive.serde2.objectinspector.primitive.WritableConstantByteObjectInspector; +import org.apache.hadoop.hive.serde2.objectinspector.primitive.WritableConstantDoubleObjectInspector; +import org.apache.hadoop.hive.serde2.objectinspector.primitive.WritableConstantFloatObjectInspector; +import org.apache.hadoop.hive.serde2.objectinspector.primitive.WritableConstantIntObjectInspector; +import org.apache.hadoop.hive.serde2.objectinspector.primitive.WritableConstantLongObjectInspector; +import org.apache.hadoop.hive.serde2.objectinspector.primitive.WritableConstantShortObjectInspector; +import org.apache.hadoop.io.Text; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +/** + * + */ +public class AccumuloRangeGenerator implements NodeProcessor { + private static final Logger log = LoggerFactory.getLogger(AccumuloRangeGenerator.class); + + private final AccumuloPredicateHandler predicateHandler; + private final HiveAccumuloRowIdColumnMapping rowIdMapping; + private final String hiveRowIdColumnName; + + public AccumuloRangeGenerator(AccumuloPredicateHandler predicateHandler, + HiveAccumuloRowIdColumnMapping rowIdMapping, String hiveRowIdColumnName) { + this.predicateHandler = predicateHandler; + this.rowIdMapping = rowIdMapping; + this.hiveRowIdColumnName = hiveRowIdColumnName; + } + + @Override + public Object process(Node nd, Stack stack, NodeProcessorCtx procCtx, Object... nodeOutputs) + throws SemanticException { + // If it's not some operator, pass it back + if (!(nd instanceof ExprNodeGenericFuncDesc)) { + return nd; + } + + ExprNodeGenericFuncDesc func = (ExprNodeGenericFuncDesc) nd; + + // 'and' nodes need to be intersected + if (FunctionRegistry.isOpAnd(func)) { + return processAndOpNode(nd, nodeOutputs); + // 'or' nodes need to be merged + } else if (FunctionRegistry.isOpOr(func)) { + return processOrOpNode(nd, nodeOutputs); + } else if (FunctionRegistry.isOpNot(func)) { + // TODO handle negations + throw new IllegalArgumentException("Negations not yet implemented"); + } else { + return processExpression(func, nodeOutputs); + } + } + + protected Object processAndOpNode(Node nd, Object[] nodeOutputs) { + // We might have multiple ranges coming from children + List andRanges = null; + + for (Object nodeOutput : nodeOutputs) { + // null signifies nodes that are irrelevant to the generation + // of Accumulo Ranges + if (null == nodeOutput) { + continue; + } + + // When an AND has no children (some conjunction over a field that isn't the column + // mapped to the Accumulo rowid) and when a conjunction generates Ranges which are empty + // (the children of the conjunction are disjoint), these two cases need to be kept separate. + // + // A null `andRanges` implies that ranges couldn't be computed, while an empty List + // of Ranges implies that there are no possible Ranges to lookup. + if (null == andRanges) { + andRanges = new ArrayList(); + } + + // The child is a single Range + if (nodeOutput instanceof Range) { + Range childRange = (Range) nodeOutput; + + // No existing ranges, just accept the current + if (andRanges.isEmpty()) { + andRanges.add(childRange); + } else { + // For each range we have, intersect them. If they don't overlap + // the range can be discarded + List newRanges = new ArrayList(); + for (Range andRange : andRanges) { + Range intersectedRange = andRange.clip(childRange, true); + if (null != intersectedRange) { + newRanges.add(intersectedRange); + } + } + + // Set the newly-constructed ranges as the current state + andRanges = newRanges; + } + } else if (nodeOutput instanceof List) { + @SuppressWarnings("unchecked") + List childRanges = (List) nodeOutput; + + // No ranges, use the ranges from the child + if (andRanges.isEmpty()) { + andRanges.addAll(childRanges); + } else { + List newRanges = new ArrayList(); + + // Cartesian product of our ranges, to the child ranges + for (Range andRange : andRanges) { + for (Range childRange : childRanges) { + Range intersectedRange = andRange.clip(childRange, true); + + // Retain only valid intersections (discard disjoint ranges) + if (null != intersectedRange) { + newRanges.add(intersectedRange); + } + } + } + + // Set the newly-constructed ranges as the current state + andRanges = newRanges; + } + } else { + log.error("Expected Range from {} but got {}", nd, nodeOutput); + throw new IllegalArgumentException("Expected Range but got " + + nodeOutput.getClass().getName()); + } + } + + return andRanges; + } + + protected Object processOrOpNode(Node nd, Object[] nodeOutputs) { + List orRanges = new ArrayList(nodeOutputs.length); + for (Object nodeOutput : nodeOutputs) { + if (nodeOutput instanceof Range) { + orRanges.add((Range) nodeOutput); + } else if (nodeOutput instanceof List) { + @SuppressWarnings("unchecked") + List childRanges = (List) nodeOutput; + orRanges.addAll(childRanges); + } else { + log.error("Expected Range from " + nd + " but got " + nodeOutput); + throw new IllegalArgumentException("Expected Range but got " + + nodeOutput.getClass().getName()); + } + } + + // Try to merge multiple ranges together + if (orRanges.size() > 1) { + return Range.mergeOverlapping(orRanges); + } else if (1 == orRanges.size()) { + // Return just the single Range + return orRanges.get(0); + } else { + // No ranges, just return the empty list + return orRanges; + } + } + + protected Object processExpression(ExprNodeGenericFuncDesc func, Object[] nodeOutputs) + throws SemanticException { + // a binary operator (gt, lt, ge, le, eq, ne) + GenericUDF genericUdf = func.getGenericUDF(); + + // Find the argument to the operator which is a constant + ExprNodeConstantDesc constantDesc = null; + ExprNodeColumnDesc columnDesc = null; + ExprNodeDesc leftHandNode = null; + for (Object nodeOutput : nodeOutputs) { + if (nodeOutput instanceof ExprNodeConstantDesc) { + // Ordering of constant and column in expression is important in correct range generation + if (null == leftHandNode) { + leftHandNode = (ExprNodeDesc) nodeOutput; + } + + constantDesc = (ExprNodeConstantDesc) nodeOutput; + } else if (nodeOutput instanceof ExprNodeColumnDesc) { + // Ordering of constant and column in expression is important in correct range generation + if (null == leftHandNode) { + leftHandNode = (ExprNodeDesc) nodeOutput; + } + + columnDesc = (ExprNodeColumnDesc) nodeOutput; + } + } + + // If it's constant = constant or column = column, we can't fetch any ranges + // TODO We can try to be smarter and push up the value to some node which + // we can generate ranges from e.g. rowid > (4 + 5) + if (null == constantDesc || null == columnDesc) { + return null; + } + + // Reject any clauses that are against a column that isn't the rowId mapping + if (!this.hiveRowIdColumnName.equals(columnDesc.getColumn())) { + return null; + } + + ConstantObjectInspector objInspector = constantDesc.getWritableObjectInspector(); + + Text constText; + switch (rowIdMapping.getEncoding()) { + case STRING: + constText = getUtf8Value(objInspector); + break; + case BINARY: + try { + constText = getBinaryValue(objInspector); + } catch (IOException e) { + throw new SemanticException(e); + } + break; + default: + throw new SemanticException("Unable to parse unknown encoding: " + + rowIdMapping.getEncoding()); + } + + Class opClz; + try { + opClz = predicateHandler.getCompareOpClass(genericUdf.getUdfName()); + } catch (NoSuchCompareOpException e) { + throw new IllegalArgumentException("Unhandled UDF class: " + genericUdf.getUdfName()); + } + + if (leftHandNode instanceof ExprNodeConstantDesc) { + return getConstantOpColumnRange(opClz, constText); + } else if (leftHandNode instanceof ExprNodeColumnDesc) { + return getColumnOpConstantRange(opClz, constText); + } else { + throw new IllegalStateException("Expected column or constant on LHS of expression"); + } + } + + protected Range getConstantOpColumnRange(Class opClz, Text constText) { + if (opClz.equals(Equal.class)) { + // 100 == x + return new Range(constText); // single row + } else if (opClz.equals(GreaterThanOrEqual.class)) { + // 100 >= x + return new Range(null, constText); // neg-infinity to end inclusive + } else if (opClz.equals(GreaterThan.class)) { + // 100 > x + return new Range(null, false, constText, false); // neg-infinity to end exclusive + } else if (opClz.equals(LessThanOrEqual.class)) { + // 100 <= x + return new Range(constText, true, null, false); // start inclusive to infinity + } else if (opClz.equals(LessThan.class)) { + // 100 < x + return new Range(constText, false, null, false); // start exclusive to infinity + } else { + throw new IllegalArgumentException("Could not process " + opClz); + } + } + + protected Range getColumnOpConstantRange(Class opClz, Text constText) { + if (opClz.equals(Equal.class)) { + return new Range(constText); // start inclusive to end inclusive + } else if (opClz.equals(GreaterThanOrEqual.class)) { + return new Range(constText, null); // start inclusive to infinity inclusive + } else if (opClz.equals(GreaterThan.class)) { + return new Range(constText, false, null, false); // start exclusive to infinity inclusive + } else if (opClz.equals(LessThanOrEqual.class)) { + return new Range(null, false, constText, true); // neg-infinity to start inclusive + } else if (opClz.equals(LessThan.class)) { + return new Range(null, false, constText, false); // neg-infinity to start exclusive + } else { + throw new IllegalArgumentException("Could not process " + opClz); + } + } + + protected Text getUtf8Value(ConstantObjectInspector objInspector) { + // TODO is there a more correct way to get the literal value for the Object? + return new Text(objInspector.getWritableConstantValue().toString()); + } + + /** + * Attempts to construct the binary value from the given inspector. Falls back to UTF8 encoding + * when the value cannot be coerced into binary. + * + * @return Binary value when possible, utf8 otherwise + * @throws IOException + */ + protected Text getBinaryValue(ConstantObjectInspector objInspector) throws IOException { + ByteArrayOutputStream out = new ByteArrayOutputStream(); + if (objInspector instanceof WritableConstantBooleanObjectInspector) { + LazyUtils.writePrimitive(out, objInspector.getWritableConstantValue(), + (WritableConstantBooleanObjectInspector) objInspector); + } else if (objInspector instanceof WritableConstantByteObjectInspector) { + LazyUtils.writePrimitive(out, objInspector.getWritableConstantValue(), + (WritableConstantByteObjectInspector) objInspector); + } else if (objInspector instanceof WritableConstantShortObjectInspector) { + LazyUtils.writePrimitive(out, objInspector.getWritableConstantValue(), + (WritableConstantShortObjectInspector) objInspector); + } else if (objInspector instanceof WritableConstantIntObjectInspector) { + LazyUtils.writePrimitive(out, objInspector.getWritableConstantValue(), + (WritableConstantIntObjectInspector) objInspector); + } else if (objInspector instanceof WritableConstantLongObjectInspector) { + LazyUtils.writePrimitive(out, objInspector.getWritableConstantValue(), + (WritableConstantLongObjectInspector) objInspector); + } else if (objInspector instanceof WritableConstantDoubleObjectInspector) { + LazyUtils.writePrimitive(out, objInspector.getWritableConstantValue(), + (WritableConstantDoubleObjectInspector) objInspector); + } else if (objInspector instanceof WritableConstantFloatObjectInspector) { + LazyUtils.writePrimitive(out, objInspector.getWritableConstantValue(), + (WritableConstantDoubleObjectInspector) objInspector); + } else { + return getUtf8Value(objInspector); + } + + out.close(); + return new Text(out.toByteArray()); + } +} diff --git a/accumulo-handler/src/java/org/apache/hadoop/hive/accumulo/predicate/NoSuchCompareOpException.java b/accumulo-handler/src/java/org/apache/hadoop/hive/accumulo/predicate/NoSuchCompareOpException.java new file mode 100644 index 0000000..962185c --- /dev/null +++ b/accumulo-handler/src/java/org/apache/hadoop/hive/accumulo/predicate/NoSuchCompareOpException.java @@ -0,0 +1,37 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hive.accumulo.predicate; + +/** + * + */ +public class NoSuchCompareOpException extends Exception { + + private static final long serialVersionUID = 1L; + + public NoSuchCompareOpException() { + super(); + } + + public NoSuchCompareOpException(String msg) { + super(msg); + } + + public NoSuchCompareOpException(String msg, Throwable cause) { + super(msg, cause); + } +} diff --git a/accumulo-handler/src/java/org/apache/hadoop/hive/accumulo/predicate/NoSuchPrimitiveComparisonException.java b/accumulo-handler/src/java/org/apache/hadoop/hive/accumulo/predicate/NoSuchPrimitiveComparisonException.java new file mode 100644 index 0000000..c305a9e --- /dev/null +++ b/accumulo-handler/src/java/org/apache/hadoop/hive/accumulo/predicate/NoSuchPrimitiveComparisonException.java @@ -0,0 +1,39 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hive.accumulo.predicate; + +import org.apache.hadoop.hive.accumulo.predicate.compare.PrimitiveComparison; + +/** + * Used when a {@link PrimitiveComparison} was specified but one with that name cannot be found + */ +public class NoSuchPrimitiveComparisonException extends Exception { + + private static final long serialVersionUID = 1L; + + public NoSuchPrimitiveComparisonException() { + super(); + } + + public NoSuchPrimitiveComparisonException(String msg) { + super(msg); + } + + public NoSuchPrimitiveComparisonException(String msg, Throwable cause) { + super(msg, cause); + } +} diff --git a/accumulo-handler/src/java/org/apache/hadoop/hive/accumulo/predicate/PrimitiveComparisonFilter.java b/accumulo-handler/src/java/org/apache/hadoop/hive/accumulo/predicate/PrimitiveComparisonFilter.java new file mode 100644 index 0000000..c303d49 --- /dev/null +++ b/accumulo-handler/src/java/org/apache/hadoop/hive/accumulo/predicate/PrimitiveComparisonFilter.java @@ -0,0 +1,123 @@ +package org.apache.hadoop.hive.accumulo.predicate; + +import java.io.IOException; +import java.util.Collection; +import java.util.Iterator; +import java.util.List; +import java.util.Map; +import java.util.Map.Entry; +import java.util.SortedMap; + +import org.apache.accumulo.core.data.Key; +import org.apache.accumulo.core.data.Value; +import org.apache.accumulo.core.iterators.IteratorEnvironment; +import org.apache.accumulo.core.iterators.SortedKeyValueIterator; +import org.apache.accumulo.core.iterators.user.WholeRowIterator; +import org.apache.commons.codec.binary.Base64; +import org.apache.hadoop.hive.accumulo.columns.ColumnEncoding; +import org.apache.hadoop.hive.accumulo.columns.ColumnMappingFactory; +import org.apache.hadoop.hive.accumulo.columns.HiveAccumuloColumnMapping; +import org.apache.hadoop.hive.accumulo.predicate.compare.CompareOp; +import org.apache.hadoop.hive.accumulo.predicate.compare.PrimitiveComparison; +import org.apache.hadoop.hive.serde2.typeinfo.TypeInfoFactory; +import org.apache.hadoop.io.Text; +import org.apache.log4j.Logger; + +import com.google.common.collect.Lists; + +/** + * Operates over a single qualifier. + * + * Delegates to PrimitiveCompare and CompareOpt instances for value acceptance. + * + * The PrimitiveCompare strategy assumes a consistent value type for the same column family and + * qualifier. + */ +public class PrimitiveComparisonFilter extends WholeRowIterator { + @SuppressWarnings("unused") + private static final Logger log = Logger.getLogger(PrimitiveComparisonFilter.class); + + public static final String FILTER_PREFIX = "accumulo.filter.compare.iterator."; + public static final String P_COMPARE_CLASS = "accumulo.filter.iterator.p.compare.class"; + public static final String COMPARE_OPT_CLASS = "accumulo.filter.iterator.compare.opt.class"; + public static final String CONST_VAL = "accumulo.filter.iterator.const.val"; + public static final String COLUMN = "accumulo.filter.iterator.qual"; + + private Text cfHolder, cqHolder, columnMappingFamily, columnMappingQualifier; + private HiveAccumuloColumnMapping columnMapping; + private CompareOp compOpt; + + @Override + protected boolean filter(Text currentRow, List keys, List values) { + SortedMap items; + boolean allow; + try { // if key doesn't contain CF, it's an encoded value from a previous iterator. + while (keys.get(0).getColumnFamily().getBytes().length == 0) { + items = decodeRow(keys.get(0), values.get(0)); + keys = Lists.newArrayList(items.keySet()); + values = Lists.newArrayList(items.values()); + } + allow = accept(keys, values); + } catch (IOException e) { + throw new RuntimeException(e); + } + return allow; + } + + private boolean accept(Collection keys, Collection values) { + Iterator kIter = keys.iterator(); + Iterator vIter = values.iterator(); + while (kIter.hasNext()) { + Key k = kIter.next(); + Value v = vIter.next(); + if (matchQualAndFam(k)) { + return compOpt.accept(v.get()); + } + } + return false; + } + + private boolean matchQualAndFam(Key k) { + k.getColumnFamily(cfHolder); + k.getColumnQualifier(cqHolder); + return cfHolder.equals(columnMappingFamily) && cqHolder.equals(columnMappingQualifier); + } + + @Override + public void init(SortedKeyValueIterator source, Map options, + IteratorEnvironment env) throws IOException { + super.init(source, options, env); + String serializedColumnMapping = options.get(COLUMN); + Entry pair = ColumnMappingFactory.parseMapping(serializedColumnMapping); + + // The ColumnEncoding, column name and type are all irrelevant at this point, just need the + // cf:[cq] + columnMapping = new HiveAccumuloColumnMapping(pair.getKey(), pair.getValue(), + ColumnEncoding.STRING, "column", "string"); + columnMappingFamily = new Text(columnMapping.getColumnFamily()); + columnMappingQualifier = new Text(columnMapping.getColumnQualifier()); + cfHolder = new Text(); + cqHolder = new Text(); + + try { + Class pClass = Class.forName(options.get(P_COMPARE_CLASS)); + Class cClazz = Class.forName(options.get(COMPARE_OPT_CLASS)); + PrimitiveComparison pCompare = pClass.asSubclass(PrimitiveComparison.class).newInstance(); + compOpt = cClazz.asSubclass(CompareOp.class).newInstance(); + byte[] constant = getConstant(options); + pCompare.init(constant); + compOpt.setPrimitiveCompare(pCompare); + } catch (ClassNotFoundException e) { + throw new IOException(e); + } catch (InstantiationException e) { + throw new IOException(e); + } catch (IllegalAccessException e) { + throw new IOException(e); + } + } + + protected byte[] getConstant(Map options) { + String b64Const = options.get(CONST_VAL); + return Base64.decodeBase64(b64Const.getBytes()); + } +} diff --git a/accumulo-handler/src/java/org/apache/hadoop/hive/accumulo/predicate/PushdownTuple.java b/accumulo-handler/src/java/org/apache/hadoop/hive/accumulo/predicate/PushdownTuple.java new file mode 100644 index 0000000..32d143a --- /dev/null +++ b/accumulo-handler/src/java/org/apache/hadoop/hive/accumulo/predicate/PushdownTuple.java @@ -0,0 +1,110 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hive.accumulo.predicate; + +import java.nio.ByteBuffer; + +import org.apache.hadoop.hive.accumulo.predicate.compare.CompareOp; +import org.apache.hadoop.hive.accumulo.predicate.compare.DoubleCompare; +import org.apache.hadoop.hive.accumulo.predicate.compare.IntCompare; +import org.apache.hadoop.hive.accumulo.predicate.compare.LongCompare; +import org.apache.hadoop.hive.accumulo.predicate.compare.PrimitiveComparison; +import org.apache.hadoop.hive.accumulo.predicate.compare.StringCompare; +import org.apache.hadoop.hive.ql.exec.ExprNodeConstantEvaluator; +import org.apache.hadoop.hive.ql.index.IndexSearchCondition; +import org.apache.hadoop.hive.ql.metadata.HiveException; +import org.apache.hadoop.hive.serde2.SerDeException; +import org.apache.hadoop.hive.serde2.io.DoubleWritable; +import org.apache.hadoop.io.IntWritable; +import org.apache.hadoop.io.LongWritable; +import org.apache.hadoop.io.Writable; +import org.apache.hadoop.util.StringUtils; +import org.apache.log4j.Logger; + +/** + * For use in IteratorSetting construction. + * + * encapsulates a constant byte [], PrimitiveCompare instance, and CompareOp instance. + */ +public class PushdownTuple { + private static final Logger log = Logger.getLogger(PushdownTuple.class); + + private byte[] constVal; + private PrimitiveComparison pCompare; + private CompareOp cOpt; + + public PushdownTuple(IndexSearchCondition sc, PrimitiveComparison pCompare, CompareOp cOpt) + throws SerDeException { + ExprNodeConstantEvaluator eval = new ExprNodeConstantEvaluator(sc.getConstantDesc()); + + try { + this.pCompare = pCompare; + this.cOpt = cOpt; + Writable writable = (Writable) eval.evaluate(null); + constVal = getConstantAsBytes(writable); + } catch (ClassCastException cce) { + log.info(StringUtils.stringifyException(cce)); + throw new SerDeException(" Column type mismatch in where clause " + + sc.getComparisonExpr().getExprString() + " found type " + + sc.getConstantDesc().getTypeString() + " instead of " + + sc.getColumnDesc().getTypeString()); + } catch (HiveException e) { + throw new SerDeException(e); + } + } + + public byte[] getConstVal() { + return constVal; + } + + public PrimitiveComparison getpCompare() { + return pCompare; + } + + public CompareOp getcOpt() { + return cOpt; + } + + /** + * + * @return byte [] value from writable. + * @throws SerDeException + */ + public byte[] getConstantAsBytes(Writable writable) throws SerDeException { + if (pCompare instanceof StringCompare) { + return writable.toString().getBytes(); + } else if (pCompare instanceof DoubleCompare) { + byte[] bts = new byte[8]; + double val = ((DoubleWritable) writable).get(); + ByteBuffer.wrap(bts).putDouble(val); + return bts; + } else if (pCompare instanceof IntCompare) { + byte[] bts = new byte[4]; + int val = ((IntWritable) writable).get(); + ByteBuffer.wrap(bts).putInt(val); + return bts; + } else if (pCompare instanceof LongCompare) { + byte[] bts = new byte[8]; + long val = ((LongWritable) writable).get(); + ByteBuffer.wrap(bts).putLong(val); + return bts; + } else { + throw new SerDeException("Unsupported primitive category: " + pCompare.getClass().getName()); + } + } + +} diff --git a/accumulo-handler/src/java/org/apache/hadoop/hive/accumulo/predicate/compare/CompareOp.java b/accumulo-handler/src/java/org/apache/hadoop/hive/accumulo/predicate/compare/CompareOp.java new file mode 100644 index 0000000..0585333 --- /dev/null +++ b/accumulo-handler/src/java/org/apache/hadoop/hive/accumulo/predicate/compare/CompareOp.java @@ -0,0 +1,26 @@ +package org.apache.hadoop.hive.accumulo.predicate.compare; + +/** + * Handles different types of comparisons in hive predicates. Filter iterator delegates value + * acceptance to the CompareOpt. + * + * Used by {@link org.apache.hadoop.hive.accumulo.predicate.PrimitiveComparisonFilter}. Works with + * {@link PrimitiveComparison} + */ +public interface CompareOp { + /** + * Sets the PrimitiveComparison for this CompareOp + */ + public void setPrimitiveCompare(PrimitiveComparison comp); + + /** + * @return The PrimitiveComparison this CompareOp is a part of + */ + public PrimitiveComparison getPrimitiveCompare(); + + /** + * @param val The bytes from the Accumulo Value + * @return true if the value is accepted by this CompareOp + */ + public boolean accept(byte[] val); +} diff --git a/accumulo-handler/src/java/org/apache/hadoop/hive/accumulo/predicate/compare/DoubleCompare.java b/accumulo-handler/src/java/org/apache/hadoop/hive/accumulo/predicate/compare/DoubleCompare.java new file mode 100644 index 0000000..210ad72 --- /dev/null +++ b/accumulo-handler/src/java/org/apache/hadoop/hive/accumulo/predicate/compare/DoubleCompare.java @@ -0,0 +1,90 @@ +package org.apache.hadoop.hive.accumulo.predicate.compare; + +import java.math.BigDecimal; +import java.nio.ByteBuffer; + +/** + * Set of comparison operations over a double constant. Used for Hive predicates involving double + * comparison. + * + * Used by {@link org.apache.hadoop.hive.accumulo.predicate.PrimitiveComparisonFilter} + */ +public class DoubleCompare implements PrimitiveComparison { + + private BigDecimal constant; + + /** + * + */ + public void init(byte[] constant) { + this.constant = serialize(constant); + } + + /** + * @return BigDecimal holding double byte [] value + */ + public BigDecimal serialize(byte[] value) { + try { + return new BigDecimal(ByteBuffer.wrap(value).asDoubleBuffer().get()); + } catch (Exception e) { + throw new RuntimeException(e.toString() + " occurred trying to build double value. " + + "Make sure the value type for the byte[] is double."); + } + } + + /** + * @return true if double value is equal to constant, false otherwise. + */ + @Override + public boolean isEqual(byte[] value) { + return serialize(value).compareTo(constant) == 0; + } + + /** + * @return true if double value not equal to constant, false otherwise. + */ + @Override + public boolean isNotEqual(byte[] value) { + return serialize(value).compareTo(constant) != 0; + } + + /** + * @return true if value greater than or equal to constant, false otherwise. + */ + @Override + public boolean greaterThanOrEqual(byte[] value) { + return serialize(value).compareTo(constant) >= 0; + } + + /** + * @return true if value greater than constant, false otherwise. + */ + @Override + public boolean greaterThan(byte[] value) { + return serialize(value).compareTo(constant) > 0; + } + + /** + * @return true if value less than or equal than constant, false otherwise. + */ + @Override + public boolean lessThanOrEqual(byte[] value) { + return serialize(value).compareTo(constant) <= 0; + } + + /** + * @return true if value less than constant, false otherwise. + */ + @Override + public boolean lessThan(byte[] value) { + return serialize(value).compareTo(constant) < 0; + } + + /** + * not supported for this PrimitiveCompare implementation. + */ + @Override + public boolean like(byte[] value) { + throw new UnsupportedOperationException("Like not supported for " + getClass().getName()); + } +} diff --git a/accumulo-handler/src/java/org/apache/hadoop/hive/accumulo/predicate/compare/Equal.java b/accumulo-handler/src/java/org/apache/hadoop/hive/accumulo/predicate/compare/Equal.java new file mode 100644 index 0000000..3a34f12 --- /dev/null +++ b/accumulo-handler/src/java/org/apache/hadoop/hive/accumulo/predicate/compare/Equal.java @@ -0,0 +1,32 @@ +package org.apache.hadoop.hive.accumulo.predicate.compare; + +/** + * Wraps call to isEqual() over PrimitiveCompare instance. + * + * Used by {@link org.apache.hadoop.hive.accumulo.predicate.PrimitiveComparisonFilter} + */ +public class Equal implements CompareOp { + + private PrimitiveComparison comp; + + public Equal() {} + + public Equal(PrimitiveComparison comp) { + this.comp = comp; + } + + @Override + public void setPrimitiveCompare(PrimitiveComparison comp) { + this.comp = comp; + } + + @Override + public PrimitiveComparison getPrimitiveCompare() { + return comp; + } + + @Override + public boolean accept(byte[] val) { + return comp.isEqual(val); + } +} diff --git a/accumulo-handler/src/java/org/apache/hadoop/hive/accumulo/predicate/compare/GreaterThan.java b/accumulo-handler/src/java/org/apache/hadoop/hive/accumulo/predicate/compare/GreaterThan.java new file mode 100644 index 0000000..a47b2a3 --- /dev/null +++ b/accumulo-handler/src/java/org/apache/hadoop/hive/accumulo/predicate/compare/GreaterThan.java @@ -0,0 +1,32 @@ +package org.apache.hadoop.hive.accumulo.predicate.compare; + +/** + * Wraps call to greaterThan over {@link PrimitiveComparison} instance. + * + * Used by {@link org.apache.hadoop.hive.accumulo.predicate.PrimitiveComparisonFilter} + */ +public class GreaterThan implements CompareOp { + + private PrimitiveComparison comp; + + public GreaterThan() {} + + public GreaterThan(PrimitiveComparison comp) { + this.comp = comp; + } + + @Override + public void setPrimitiveCompare(PrimitiveComparison comp) { + this.comp = comp; + } + + @Override + public PrimitiveComparison getPrimitiveCompare() { + return this.comp; + } + + @Override + public boolean accept(byte[] val) { + return comp.greaterThan(val); + } +} diff --git a/accumulo-handler/src/java/org/apache/hadoop/hive/accumulo/predicate/compare/GreaterThanOrEqual.java b/accumulo-handler/src/java/org/apache/hadoop/hive/accumulo/predicate/compare/GreaterThanOrEqual.java new file mode 100644 index 0000000..c502a45 --- /dev/null +++ b/accumulo-handler/src/java/org/apache/hadoop/hive/accumulo/predicate/compare/GreaterThanOrEqual.java @@ -0,0 +1,32 @@ +package org.apache.hadoop.hive.accumulo.predicate.compare; + +/** + * Wraps call to greaterThanOrEqual over {@link PrimitiveComparison} instance. + * + * Used by {@link org.apache.hadoop.hive.accumulo.predicate.PrimitiveComparisonFilter} + */ +public class GreaterThanOrEqual implements CompareOp { + + private PrimitiveComparison comp; + + public GreaterThanOrEqual() {} + + public GreaterThanOrEqual(PrimitiveComparison comp) { + this.comp = comp; + } + + @Override + public void setPrimitiveCompare(PrimitiveComparison comp) { + this.comp = comp; + } + + @Override + public PrimitiveComparison getPrimitiveCompare() { + return comp; + } + + @Override + public boolean accept(byte[] val) { + return comp.greaterThanOrEqual(val); + } +} diff --git a/accumulo-handler/src/java/org/apache/hadoop/hive/accumulo/predicate/compare/IntCompare.java b/accumulo-handler/src/java/org/apache/hadoop/hive/accumulo/predicate/compare/IntCompare.java new file mode 100644 index 0000000..d7de1ff --- /dev/null +++ b/accumulo-handler/src/java/org/apache/hadoop/hive/accumulo/predicate/compare/IntCompare.java @@ -0,0 +1,63 @@ +package org.apache.hadoop.hive.accumulo.predicate.compare; + +import java.nio.ByteBuffer; + +/** + * Set of comparison operations over a integer constant. Used for Hive predicates involving int + * comparison. + * + * Used by {@link org.apache.hadoop.hive.accumulo.predicate.PrimitiveComparisonFilter} + */ +public class IntCompare implements PrimitiveComparison { + + private int constant; + + @Override + public void init(byte[] constant) { + this.constant = serialize(constant); + } + + @Override + public boolean isEqual(byte[] value) { + return serialize(value) == constant; + } + + @Override + public boolean isNotEqual(byte[] value) { + return serialize(value) != constant; + } + + @Override + public boolean greaterThanOrEqual(byte[] value) { + return serialize(value) >= constant; + } + + @Override + public boolean greaterThan(byte[] value) { + return serialize(value) > constant; + } + + @Override + public boolean lessThanOrEqual(byte[] value) { + return serialize(value) <= constant; + } + + @Override + public boolean lessThan(byte[] value) { + return serialize(value) < constant; + } + + @Override + public boolean like(byte[] value) { + throw new UnsupportedOperationException("Like not supported for " + getClass().getName()); + } + + public Integer serialize(byte[] value) { + try { + return ByteBuffer.wrap(value).asIntBuffer().get(); + } catch (Exception e) { + throw new RuntimeException(e.toString() + " occurred trying to build int value. " + + "Make sure the value type for the byte[] is int "); + } + } +} diff --git a/accumulo-handler/src/java/org/apache/hadoop/hive/accumulo/predicate/compare/LessThan.java b/accumulo-handler/src/java/org/apache/hadoop/hive/accumulo/predicate/compare/LessThan.java new file mode 100644 index 0000000..2933131 --- /dev/null +++ b/accumulo-handler/src/java/org/apache/hadoop/hive/accumulo/predicate/compare/LessThan.java @@ -0,0 +1,32 @@ +package org.apache.hadoop.hive.accumulo.predicate.compare; + +/** + * Wraps call to lessThan over {@link PrimitiveComparison} instance. + * + * Used by {@link org.apache.hadoop.hive.accumulo.predicate.PrimitiveComparisonFilter} + */ +public class LessThan implements CompareOp { + + private PrimitiveComparison comp; + + public LessThan() {} + + public LessThan(PrimitiveComparison comp) { + this.comp = comp; + } + + @Override + public void setPrimitiveCompare(PrimitiveComparison comp) { + this.comp = comp; + } + + @Override + public PrimitiveComparison getPrimitiveCompare() { + return comp; + } + + @Override + public boolean accept(byte[] val) { + return comp.lessThan(val); + } +} diff --git a/accumulo-handler/src/java/org/apache/hadoop/hive/accumulo/predicate/compare/LessThanOrEqual.java b/accumulo-handler/src/java/org/apache/hadoop/hive/accumulo/predicate/compare/LessThanOrEqual.java new file mode 100644 index 0000000..86acb73 --- /dev/null +++ b/accumulo-handler/src/java/org/apache/hadoop/hive/accumulo/predicate/compare/LessThanOrEqual.java @@ -0,0 +1,32 @@ +package org.apache.hadoop.hive.accumulo.predicate.compare; + +/** + * Wraps call to lessThanOrEqual over {@link PrimitiveComparison} instance. + * + * Used by {@link org.apache.hadoop.hive.accumulo.predicate.PrimitiveComparisonFilter} + */ +public class LessThanOrEqual implements CompareOp { + + private PrimitiveComparison comp; + + public LessThanOrEqual() {} + + public LessThanOrEqual(PrimitiveComparison comp) { + this.comp = comp; + } + + @Override + public void setPrimitiveCompare(PrimitiveComparison comp) { + this.comp = comp; + } + + @Override + public PrimitiveComparison getPrimitiveCompare() { + return comp; + } + + @Override + public boolean accept(byte[] val) { + return comp.lessThanOrEqual(val); + } +} diff --git a/accumulo-handler/src/java/org/apache/hadoop/hive/accumulo/predicate/compare/Like.java b/accumulo-handler/src/java/org/apache/hadoop/hive/accumulo/predicate/compare/Like.java new file mode 100644 index 0000000..612641d --- /dev/null +++ b/accumulo-handler/src/java/org/apache/hadoop/hive/accumulo/predicate/compare/Like.java @@ -0,0 +1,33 @@ +package org.apache.hadoop.hive.accumulo.predicate.compare; + +/** + * Wraps call to like over {@link PrimitiveComparison} instance. Currently only supported by + * StringCompare. + * + * Used by {@link org.apache.hadoop.hive.accumulo.predicate.PrimitiveComparisonFilter} + */ +public class Like implements CompareOp { + + PrimitiveComparison comp; + + public Like() {} + + public Like(PrimitiveComparison comp) { + this.comp = comp; + } + + @Override + public void setPrimitiveCompare(PrimitiveComparison comp) { + this.comp = comp; + } + + @Override + public PrimitiveComparison getPrimitiveCompare() { + return comp; + } + + @Override + public boolean accept(byte[] val) { + return comp.like(val); + } +} diff --git a/accumulo-handler/src/java/org/apache/hadoop/hive/accumulo/predicate/compare/LongCompare.java b/accumulo-handler/src/java/org/apache/hadoop/hive/accumulo/predicate/compare/LongCompare.java new file mode 100644 index 0000000..b32874f --- /dev/null +++ b/accumulo-handler/src/java/org/apache/hadoop/hive/accumulo/predicate/compare/LongCompare.java @@ -0,0 +1,64 @@ +package org.apache.hadoop.hive.accumulo.predicate.compare; + +import java.nio.ByteBuffer; + +/** + * Set of comparison operations over a long constant. Used for Hive predicates involving long + * comparison. + * + * Used by {@link org.apache.hadoop.hive.accumulo.predicate.PrimitiveComparisonFilter} + */ +public class LongCompare implements PrimitiveComparison { + + private long constant; + + @Override + public void init(byte[] constant) { + this.constant = serialize(constant); + } + + @Override + public boolean isEqual(byte[] value) { + long lonVal = serialize(value); + return lonVal == constant; + } + + @Override + public boolean isNotEqual(byte[] value) { + return serialize(value) != constant; + } + + @Override + public boolean greaterThanOrEqual(byte[] value) { + return serialize(value) >= constant; + } + + @Override + public boolean greaterThan(byte[] value) { + return serialize(value) > constant; + } + + @Override + public boolean lessThanOrEqual(byte[] value) { + return serialize(value) <= constant; + } + + @Override + public boolean lessThan(byte[] value) { + return serialize(value) < constant; + } + + @Override + public boolean like(byte[] value) { + throw new UnsupportedOperationException("Like not supported for " + getClass().getName()); + } + + public Long serialize(byte[] value) { + try { + return ByteBuffer.wrap(value).asLongBuffer().get(); + } catch (Exception e) { + throw new RuntimeException(e.toString() + " occurred trying to build long value. " + + "Make sure the value type for the byte[] is long "); + } + } +} diff --git a/accumulo-handler/src/java/org/apache/hadoop/hive/accumulo/predicate/compare/NotEqual.java b/accumulo-handler/src/java/org/apache/hadoop/hive/accumulo/predicate/compare/NotEqual.java new file mode 100644 index 0000000..22b84ba --- /dev/null +++ b/accumulo-handler/src/java/org/apache/hadoop/hive/accumulo/predicate/compare/NotEqual.java @@ -0,0 +1,32 @@ +package org.apache.hadoop.hive.accumulo.predicate.compare; + +/** + * Wraps call to isEqual over {@link PrimitiveComparison} instance and returns the negation. + * + * Used by {@link org.apache.hadoop.hive.accumulo.predicate.PrimitiveComparisonFilter} + */ +public class NotEqual implements CompareOp { + + private PrimitiveComparison comp; + + public NotEqual() {} + + public NotEqual(PrimitiveComparison comp) { + this.comp = comp; + } + + @Override + public void setPrimitiveCompare(PrimitiveComparison comp) { + this.comp = comp; + } + + @Override + public PrimitiveComparison getPrimitiveCompare() { + return comp; + } + + @Override + public boolean accept(byte[] val) { + return !comp.isEqual(val); + } +} diff --git a/accumulo-handler/src/java/org/apache/hadoop/hive/accumulo/predicate/compare/PrimitiveComparison.java b/accumulo-handler/src/java/org/apache/hadoop/hive/accumulo/predicate/compare/PrimitiveComparison.java new file mode 100644 index 0000000..26e194f --- /dev/null +++ b/accumulo-handler/src/java/org/apache/hadoop/hive/accumulo/predicate/compare/PrimitiveComparison.java @@ -0,0 +1,32 @@ +package org.apache.hadoop.hive.accumulo.predicate.compare; + +/** + * Wraps type-specific comparison operations over a constant value. Methods take raw byte from + * incoming Accumulo values. + * + * The CompareOpt instance in the iterator uses one or more methods from a PrimitiveCompare + * implementation to perform type-specific comparisons and determine acceptances. + * + * Used by {@link org.apache.hadoop.hive.accumulo.predicate.PrimitiveComparisonFilter}. Works with + * {@link CompareOp} + */ +public interface PrimitiveComparison { + + public boolean isEqual(byte[] value); + + public boolean isNotEqual(byte[] value); + + public boolean greaterThanOrEqual(byte[] value); + + public boolean greaterThan(byte[] value); + + public boolean lessThanOrEqual(byte[] value); + + public boolean lessThan(byte[] value); + + public boolean like(byte[] value); + + public Object serialize(byte[] value); + + public void init(byte[] constant); +} diff --git a/accumulo-handler/src/java/org/apache/hadoop/hive/accumulo/predicate/compare/StringCompare.java b/accumulo-handler/src/java/org/apache/hadoop/hive/accumulo/predicate/compare/StringCompare.java new file mode 100644 index 0000000..b71b8a8 --- /dev/null +++ b/accumulo-handler/src/java/org/apache/hadoop/hive/accumulo/predicate/compare/StringCompare.java @@ -0,0 +1,65 @@ +package org.apache.hadoop.hive.accumulo.predicate.compare; + +import java.util.regex.Pattern; + +import org.apache.log4j.Logger; + +/** + * Set of comparison operations over a string constant. Used for Hive predicates involving string + * comparison. + * + * Used by {@link org.apache.hadoop.hive.accumulo.predicate.PrimitiveComparisonFilter} + */ +public class StringCompare implements PrimitiveComparison { + @SuppressWarnings("unused") + private static final Logger log = Logger.getLogger(StringCompare.class); + + private String constant; + + @Override + public void init(byte[] constant) { + this.constant = serialize(constant); + } + + @Override + public boolean isEqual(byte[] value) { + return serialize(value).equals(constant); + } + + @Override + public boolean isNotEqual(byte[] value) { + return !isEqual(value); + } + + @Override + public boolean greaterThanOrEqual(byte[] value) { + return serialize(value).compareTo(constant) >= 0; + } + + @Override + public boolean greaterThan(byte[] value) { + return serialize(value).compareTo(constant) > 0; + } + + @Override + public boolean lessThanOrEqual(byte[] value) { + return serialize(value).compareTo(constant) <= 0; + } + + @Override + public boolean lessThan(byte[] value) { + return serialize(value).compareTo(constant) < 0; + } + + @Override + public boolean like(byte[] value) { + String temp = new String(value).replaceAll("%", "[\\\\\\w]+?"); + Pattern pattern = Pattern.compile(temp); + boolean match = pattern.matcher(constant).matches(); + return match; + } + + public String serialize(byte[] value) { + return new String(value); + } +} diff --git a/accumulo-handler/src/java/org/apache/hadoop/hive/accumulo/predicate/compare/package-info.java b/accumulo-handler/src/java/org/apache/hadoop/hive/accumulo/predicate/compare/package-info.java new file mode 100644 index 0000000..875fad2 --- /dev/null +++ b/accumulo-handler/src/java/org/apache/hadoop/hive/accumulo/predicate/compare/package-info.java @@ -0,0 +1,4 @@ +/** + * PrimitiveCompare and CompareOpt implementations for use in PrimitiveComparisonFilter iterator + */ +package org.apache.hadoop.hive.accumulo.predicate.compare; \ No newline at end of file diff --git a/accumulo-handler/src/java/org/apache/hadoop/hive/accumulo/predicate/package-info.java b/accumulo-handler/src/java/org/apache/hadoop/hive/accumulo/predicate/package-info.java new file mode 100644 index 0000000..419ce01 --- /dev/null +++ b/accumulo-handler/src/java/org/apache/hadoop/hive/accumulo/predicate/package-info.java @@ -0,0 +1,4 @@ +/** + * Predicate pushdown to Accumulo filter iterators. + */ +package org.apache.hadoop.hive.accumulo.predicate; \ No newline at end of file diff --git a/accumulo-handler/src/java/org/apache/hadoop/hive/accumulo/serde/AccumuloCompositeRowId.java b/accumulo-handler/src/java/org/apache/hadoop/hive/accumulo/serde/AccumuloCompositeRowId.java new file mode 100644 index 0000000..f3ebbd1 --- /dev/null +++ b/accumulo-handler/src/java/org/apache/hadoop/hive/accumulo/serde/AccumuloCompositeRowId.java @@ -0,0 +1,95 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hive.accumulo.serde; + +import java.util.ArrayList; +import java.util.List; + +import org.apache.hadoop.hive.serde2.lazy.ByteArrayRef; +import org.apache.hadoop.hive.serde2.lazy.LazyFactory; +import org.apache.hadoop.hive.serde2.lazy.LazyObject; +import org.apache.hadoop.hive.serde2.lazy.LazyStruct; +import org.apache.hadoop.hive.serde2.lazy.objectinspector.LazySimpleStructObjectInspector; +import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspector; +import org.apache.hadoop.hive.serde2.objectinspector.StructField; + +/** + * AccumuloCompositeKey extension of LazyStruct. All complex composite keys should extend this class + * and override the {@link LazyStruct#getField(int)} method where fieldID corresponds to the ID of a + * key in the composite key. + *

+ * For example, for a composite key "/part1/part2/part3", part1 will have an id + * 0, part2 will have an id 1 and part3 will have an id 2. Custom + * implementations of getField(fieldID) should return the value corresponding to that fieldID. So, + * for the above example, the value returned for getField(0) should be part1, + * getField(1) should be part2 and getField(2) should be part3. + *

+ * + *

+ * All custom implementation are expected to have a constructor of the form: + * + *

+ * MyCustomCompositeKey(LazySimpleStructObjectInspector oi, Properties tbl, Configuration conf)
+ * 
+ *

+ * + */ +public class AccumuloCompositeRowId extends LazyStruct { + + public AccumuloCompositeRowId(LazySimpleStructObjectInspector oi) { + super(oi); + } + + @Override + public ArrayList getFieldsAsList() { + ArrayList allFields = new ArrayList(); + + List fields = oi.getAllStructFieldRefs(); + + for (int i = 0; i < fields.size(); i++) { + allFields.add(getField(i)); + } + + return allFields; + } + + /** + * Create an initialize a {@link LazyObject} with the given bytes for the given fieldID. + * + * @param fieldID + * field for which the object is to be created + * @param bytes + * value with which the object is to be initialized with + * @return initialized {@link LazyObject} + * */ + public LazyObject toLazyObject(int fieldID, byte[] bytes) { + ObjectInspector fieldOI = oi.getAllStructFieldRefs().get(fieldID).getFieldObjectInspector(); + + LazyObject lazyObject = LazyFactory.createLazyObject(fieldOI); + + ByteArrayRef ref = new ByteArrayRef(); + + ref.setData(bytes); + + // initialize the lazy object + lazyObject.init(ref, 0, ref.getData().length); + + return lazyObject; + } +} diff --git a/accumulo-handler/src/java/org/apache/hadoop/hive/accumulo/serde/AccumuloRowIdFactory.java b/accumulo-handler/src/java/org/apache/hadoop/hive/accumulo/serde/AccumuloRowIdFactory.java new file mode 100644 index 0000000..d82a392 --- /dev/null +++ b/accumulo-handler/src/java/org/apache/hadoop/hive/accumulo/serde/AccumuloRowIdFactory.java @@ -0,0 +1,67 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hive.accumulo.serde; + +import java.io.IOException; +import java.util.Properties; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hive.serde2.ByteStream; +import org.apache.hadoop.hive.serde2.SerDeException; +import org.apache.hadoop.hive.serde2.lazy.LazyObjectBase; +import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspector; +import org.apache.hadoop.hive.serde2.objectinspector.StructField; +import org.apache.hadoop.hive.serde2.typeinfo.TypeInfo; + +/** + * Interface for providing custom Accumulo RowID generation/parsing + */ +public interface AccumuloRowIdFactory { + + /** + * initialize factory with properties + */ + public void init(AccumuloSerDeParameters serDeParams, Properties properties) + throws SerDeException; + + /** + * create custom object inspector for accumulo rowId + * + * @param type + * type information + */ + public ObjectInspector createRowIdObjectInspector(TypeInfo type) throws SerDeException; + + /** + * create custom object for accumulo + * + * @param inspector + * OI create by {@link AccumuloRowIdFactory#createRowIdObjectInspector} + */ + public LazyObjectBase createRowId(ObjectInspector inspector) throws SerDeException; + + /** + * serialize hive object in internal format of custom key + */ + public byte[] serializeRowId(Object object, StructField field, ByteStream.Output output) + throws IOException; + + /** + * Add this implementation to the classpath for the Job + */ + public void addDependencyJars(Configuration conf) throws IOException; +} diff --git a/accumulo-handler/src/java/org/apache/hadoop/hive/accumulo/serde/AccumuloRowSerializer.java b/accumulo-handler/src/java/org/apache/hadoop/hive/accumulo/serde/AccumuloRowSerializer.java new file mode 100644 index 0000000..d168012 --- /dev/null +++ b/accumulo-handler/src/java/org/apache/hadoop/hive/accumulo/serde/AccumuloRowSerializer.java @@ -0,0 +1,383 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hive.accumulo.serde; + +import java.io.IOException; +import java.util.List; +import java.util.Map; +import java.util.Map.Entry; + +import org.apache.accumulo.core.data.Mutation; +import org.apache.accumulo.core.security.ColumnVisibility; +import org.apache.hadoop.hive.accumulo.columns.ColumnEncoding; +import org.apache.hadoop.hive.accumulo.columns.ColumnMapping; +import org.apache.hadoop.hive.accumulo.columns.HiveAccumuloColumnMapping; +import org.apache.hadoop.hive.accumulo.columns.HiveAccumuloMapColumnMapping; +import org.apache.hadoop.hive.serde2.ByteStream; +import org.apache.hadoop.hive.serde2.SerDeException; +import org.apache.hadoop.hive.serde2.SerDeUtils; +import org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe.SerDeParameters; +import org.apache.hadoop.hive.serde2.lazy.LazyUtils; +import org.apache.hadoop.hive.serde2.objectinspector.ListObjectInspector; +import org.apache.hadoop.hive.serde2.objectinspector.MapObjectInspector; +import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspector; +import org.apache.hadoop.hive.serde2.objectinspector.PrimitiveObjectInspector; +import org.apache.hadoop.hive.serde2.objectinspector.PrimitiveObjectInspector.PrimitiveCategory; +import org.apache.hadoop.hive.serde2.objectinspector.StructField; +import org.apache.hadoop.hive.serde2.objectinspector.StructObjectInspector; +import org.apache.hadoop.hive.serde2.objectinspector.primitive.PrimitiveObjectInspectorFactory; +import org.apache.hadoop.hive.serde2.typeinfo.TypeInfo; +import org.apache.hadoop.hive.serde2.typeinfo.TypeInfoUtils; +import org.apache.log4j.Logger; + +import com.google.common.base.Charsets; +import com.google.common.base.Preconditions; + +/** + * Serializes a Struct to an Accumulo row as per the definition provided by the + * {@link ColumnMapping}s + */ +public class AccumuloRowSerializer { + private static final Logger log = Logger.getLogger(AccumuloRowSerializer.class); + + private final int rowIdOffset; + private final ByteStream.Output output; + private final SerDeParameters serDeParams; + private final List mappings; + private final ColumnVisibility visibility; + private final AccumuloRowIdFactory rowIdFactory; + + public AccumuloRowSerializer(int primaryKeyOffset, SerDeParameters serDeParams, + List mappings, ColumnVisibility visibility, AccumuloRowIdFactory rowIdFactory) { + Preconditions.checkArgument(primaryKeyOffset >= 0, + "A valid offset to the mapping for the Accumulo RowID is required, received " + + primaryKeyOffset); + this.rowIdOffset = primaryKeyOffset; + this.output = new ByteStream.Output(); + this.serDeParams = serDeParams; + this.mappings = mappings; + this.visibility = visibility; + this.rowIdFactory = rowIdFactory; + } + + public Mutation serialize(Object obj, ObjectInspector objInspector) throws SerDeException, + IOException { + if (objInspector.getCategory() != ObjectInspector.Category.STRUCT) { + throw new SerDeException(getClass().toString() + + " can only serialize struct types, but we got: " + objInspector.getTypeName()); + } + + // Prepare the field ObjectInspectors + StructObjectInspector soi = (StructObjectInspector) objInspector; + List fields = soi.getAllStructFieldRefs(); + List columnValues = soi.getStructFieldsDataAsList(obj); + + // Fail if we try to access an offset out of bounds + if (rowIdOffset >= fields.size()) { + throw new IllegalStateException( + "Attempted to access field outside of definition for struct. Have " + fields.size() + + " fields and tried to access offset " + rowIdOffset); + } + + StructField field = fields.get(rowIdOffset); + Object value = columnValues.get(rowIdOffset); + + // The ObjectInspector for the row ID + ObjectInspector fieldObjectInspector = field.getFieldObjectInspector(); + + log.info("Serializing rowId with " + value + " in " + field + " using " + + rowIdFactory.getClass()); + + // Serialize the row component using the RowIdFactory. In the normal case, this will just + // delegate back to the "local" serializeRowId method + byte[] data = rowIdFactory.serializeRowId(value, field, output); + + // Set that as the row id in the mutation + Mutation mutation = new Mutation(data); + + // Each column in the row + for (int i = 0; i < fields.size(); i++) { + if (rowIdOffset == i) { + continue; + } + + // Get the relevant information for this column + field = fields.get(i); + value = columnValues.get(i); + + // Despite having a fixed schema from Hive, we have sparse columns in Accumulo + if (null == value) { + continue; + } + + // The ObjectInspector for the current column + fieldObjectInspector = field.getFieldObjectInspector(); + + // Make sure we got the right implementation of a ColumnMapping + ColumnMapping mapping = mappings.get(i); + if (mapping instanceof HiveAccumuloColumnMapping) { + serializeColumnMapping((HiveAccumuloColumnMapping) mapping, fieldObjectInspector, value, + mutation); + } else if (mapping instanceof HiveAccumuloMapColumnMapping) { + serializeColumnMapping((HiveAccumuloMapColumnMapping) mapping, fieldObjectInspector, value, + mutation); + } else { + throw new IllegalArgumentException("Mapping for " + field.getFieldName() + + " was not a HiveColumnMapping, but was " + mapping.getClass()); + } + + } + + return mutation; + } + + protected void serializeColumnMapping(HiveAccumuloColumnMapping columnMapping, + ObjectInspector fieldObjectInspector, Object value, Mutation mutation) throws IOException { + // Get the serialized value for the column + byte[] serializedValue = getSerializedValue(fieldObjectInspector, value, output, columnMapping); + + // Put it all in the Mutation + mutation.put(columnMapping.getColumnFamilyBytes(), columnMapping.getColumnQualifierBytes(), + visibility, serializedValue); + } + + /** + * Serialize the Hive Map into an Accumulo row + */ + protected void serializeColumnMapping(HiveAccumuloMapColumnMapping columnMapping, + ObjectInspector fieldObjectInspector, Object value, Mutation mutation) throws IOException { + MapObjectInspector mapObjectInspector = (MapObjectInspector) fieldObjectInspector; + + Map map = mapObjectInspector.getMap(value); + if (map == null) { + return; + } + + ObjectInspector keyObjectInspector = mapObjectInspector.getMapKeyObjectInspector(), valueObjectInspector = mapObjectInspector + .getMapValueObjectInspector(); + + byte[] cfBytes = columnMapping.getColumnFamily().getBytes(Charsets.UTF_8), cqPrefixBytes = columnMapping + .getColumnQualifierPrefix().getBytes(Charsets.UTF_8); + byte[] cqBytes, valueBytes; + for (Entry entry : map.entrySet()) { + output.reset(); + + // If the cq prefix is non-empty, add it to the CQ before we set the mutation + if (0 < cqPrefixBytes.length) { + output.write(cqPrefixBytes, 0, cqPrefixBytes.length); + } + + // Write the "suffix" of the cq + writeWithLevel(keyObjectInspector, entry.getKey(), output, columnMapping, 3); + cqBytes = output.toByteArray(); + + output.reset(); + + // Write the value + writeWithLevel(valueObjectInspector, entry.getValue(), output, columnMapping, 3); + valueBytes = output.toByteArray(); + + mutation.put(cfBytes, cqBytes, visibility, valueBytes); + } + } + + /** + * Serialize an Accumulo rowid + */ + protected byte[] serializeRowId(Object rowId, StructField rowIdField, ColumnMapping rowIdMapping) + throws IOException { + if (rowId == null) { + throw new IOException("Accumulo rowId cannot be NULL"); + } + // Reset the buffer we're going to use + output.reset(); + ObjectInspector rowIdFieldOI = rowIdField.getFieldObjectInspector(); + String rowIdMappingType = rowIdMapping.getColumnType(); + TypeInfo rowIdTypeInfo = TypeInfoUtils.getTypeInfoFromTypeString(rowIdMappingType); + + if (!rowIdFieldOI.getCategory().equals(ObjectInspector.Category.PRIMITIVE) + && rowIdTypeInfo.getCategory() == ObjectInspector.Category.PRIMITIVE) { + // we always serialize the String type using the escaped algorithm for LazyString + writeString(output, SerDeUtils.getJSONString(rowId, rowIdFieldOI), + PrimitiveObjectInspectorFactory.javaStringObjectInspector); + return output.toByteArray(); + } + + // use the serialization option switch to write primitive values as either a variable + // length UTF8 string or a fixed width bytes if serializing in binary format + getSerializedValue(rowIdFieldOI, rowId, output, rowIdMapping); + return output.toByteArray(); + } + + /** + * Compute the serialized value from the given element and object inspectors. Based on the Hive + * types, represented through the ObjectInspectors for the whole object and column within the + * object, serialize the object appropriately. + * + * @param fieldObjectInspector + * ObjectInspector for the column value being serialized + * @param value + * The Object itself being serialized + * @param output + * A temporary buffer to reduce object creation + * @return The serialized bytes from the provided value. + * @throws IOException + * An error occurred when performing IO to serialize the data + */ + protected byte[] getSerializedValue(ObjectInspector fieldObjectInspector, Object value, + ByteStream.Output output, ColumnMapping mapping) throws IOException { + // Reset the buffer we're going to use + output.reset(); + + // Start by only serializing primitives as-is + if (fieldObjectInspector.getCategory() == ObjectInspector.Category.PRIMITIVE) { + writeSerializedPrimitive((PrimitiveObjectInspector) fieldObjectInspector, output, value, + mapping.getEncoding()); + } else { + // We only accept a struct, which means that we're already nested one level deep + writeWithLevel(fieldObjectInspector, value, output, mapping, 2); + } + + return output.toByteArray(); + } + + /** + * Recursively serialize an Object using its {@link ObjectInspector}, respecting the + * separators defined by the {@link SerDeParameters}. + * @param oi ObjectInspector for the current object + * @param value The current object + * @param output A buffer output is written to + * @param mapping The mapping for this Hive column + * @param level The current level/offset for the SerDe separator + * @throws IOException + */ + protected void writeWithLevel(ObjectInspector oi, Object value, ByteStream.Output output, + ColumnMapping mapping, int level) throws IOException { + switch (oi.getCategory()) { + case PRIMITIVE: + if (mapping.getEncoding() == ColumnEncoding.BINARY) { + this.writeBinary(output, value, (PrimitiveObjectInspector) oi); + } else { + this.writeString(output, value, (PrimitiveObjectInspector) oi); + } + return; + case LIST: + char separator = (char) serDeParams.getSeparators()[level]; + ListObjectInspector loi = (ListObjectInspector) oi; + List list = loi.getList(value); + ObjectInspector eoi = loi.getListElementObjectInspector(); + if (list == null) { + log.debug("No objects found when serializing list"); + return; + } else { + for (int i = 0; i < list.size(); i++) { + if (i > 0) { + output.write(separator); + } + writeWithLevel(eoi, list.get(i), output, mapping, level + 1); + } + } + return; + case MAP: + char sep = (char) serDeParams.getSeparators()[level]; + char keyValueSeparator = (char) serDeParams.getSeparators()[level + 1]; + MapObjectInspector moi = (MapObjectInspector) oi; + ObjectInspector koi = moi.getMapKeyObjectInspector(); + ObjectInspector voi = moi.getMapValueObjectInspector(); + + Map map = moi.getMap(value); + if (map == null) { + log.debug("No object found when serializing map"); + return; + } else { + boolean first = true; + for (Map.Entry entry : map.entrySet()) { + if (first) { + first = false; + } else { + output.write(sep); + } + writeWithLevel(koi, entry.getKey(), output, mapping, level + 2); + output.write(keyValueSeparator); + writeWithLevel(voi, entry.getValue(), output, mapping, level + 2); + } + } + return; + case STRUCT: + sep = (char) serDeParams.getSeparators()[level]; + StructObjectInspector soi = (StructObjectInspector) oi; + List fields = soi.getAllStructFieldRefs(); + list = soi.getStructFieldsDataAsList(value); + if (list == null) { + log.debug("No object found when serializing struct"); + return; + } else { + for (int i = 0; i < list.size(); i++) { + if (i > 0) { + output.write(sep); + } + + writeWithLevel(fields.get(i).getFieldObjectInspector(), list.get(i), output, mapping, + level + 1); + } + } + + return; + default: + throw new RuntimeException("Unknown category type: " + oi.getCategory()); + } + } + + /** + * Serialize the given primitive to the given output buffer, using the provided encoding + * mechanism. + * + * @param objectInspector + * The PrimitiveObjectInspector for this Object + * @param output + * A buffer to write the serialized value to + * @param value + * The Object being serialized + * @param encoding + * The means in which the Object should be serialized + * @throws IOException + */ + protected void writeSerializedPrimitive(PrimitiveObjectInspector objectInspector, + ByteStream.Output output, Object value, ColumnEncoding encoding) throws IOException { + // Despite STRING being a primitive, it can't be serialized as binary + if (objectInspector.getPrimitiveCategory() != PrimitiveCategory.STRING && ColumnEncoding.BINARY == encoding) { + writeBinary(output, value, objectInspector); + } else { + writeString(output, value, objectInspector); + } + } + + protected void writeBinary(ByteStream.Output output, Object value, + PrimitiveObjectInspector inspector) throws IOException { + LazyUtils.writePrimitive(output, value, inspector); + } + + protected void writeString(ByteStream.Output output, Object value, + PrimitiveObjectInspector inspector) throws IOException { + LazyUtils.writePrimitiveUTF8(output, value, inspector, serDeParams.isEscaped(), + serDeParams.getEscapeChar(), serDeParams.getNeedsEscape()); + } + + protected ColumnVisibility getVisibility() { + return visibility; + } +} diff --git a/accumulo-handler/src/java/org/apache/hadoop/hive/accumulo/serde/AccumuloSerDe.java b/accumulo-handler/src/java/org/apache/hadoop/hive/accumulo/serde/AccumuloSerDe.java new file mode 100644 index 0000000..240521f --- /dev/null +++ b/accumulo-handler/src/java/org/apache/hadoop/hive/accumulo/serde/AccumuloSerDe.java @@ -0,0 +1,140 @@ +package org.apache.hadoop.hive.accumulo.serde; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.List; +import java.util.Properties; + +import org.apache.accumulo.core.data.Mutation; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hive.accumulo.AccumuloHiveRow; +import org.apache.hadoop.hive.accumulo.LazyAccumuloRow; +import org.apache.hadoop.hive.accumulo.columns.ColumnMapping; +import org.apache.hadoop.hive.accumulo.columns.HiveAccumuloRowIdColumnMapping; +import org.apache.hadoop.hive.serde2.SerDe; +import org.apache.hadoop.hive.serde2.SerDeException; +import org.apache.hadoop.hive.serde2.SerDeStats; +import org.apache.hadoop.hive.serde2.lazy.LazyFactory; +import org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe.SerDeParameters; +import org.apache.hadoop.hive.serde2.lazy.objectinspector.LazyObjectInspectorFactory; +import org.apache.hadoop.hive.serde2.lazy.objectinspector.LazySimpleStructObjectInspector; +import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspector; +import org.apache.hadoop.hive.serde2.typeinfo.TypeInfo; +import org.apache.hadoop.io.Writable; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +/** + * Deserialization from Accumulo to LazyAccumuloRow for Hive. + * + */ +public class AccumuloSerDe implements SerDe { + + private AccumuloSerDeParameters accumuloSerDeParameters; + private LazyAccumuloRow cachedRow; + private ObjectInspector cachedObjectInspector; + private AccumuloRowSerializer serializer; + + private static final Logger log = LoggerFactory.getLogger(AccumuloSerDe.class); + + public void initialize(Configuration conf, Properties properties) throws SerDeException { + accumuloSerDeParameters = new AccumuloSerDeParameters(conf, properties, getClass().getName()); + + final SerDeParameters serDeParams = accumuloSerDeParameters.getSerDeParameters(); + final List mappings = accumuloSerDeParameters.getColumnMappings(); + final List columnTypes = accumuloSerDeParameters.getHiveColumnTypes(); + final AccumuloRowIdFactory factory = accumuloSerDeParameters.getRowIdFactory(); + + ArrayList columnObjectInspectors = getColumnObjectInspectors(columnTypes, serDeParams, mappings, factory); + + cachedObjectInspector = LazyObjectInspectorFactory.getLazySimpleStructObjectInspector( + serDeParams.getColumnNames(), columnObjectInspectors, serDeParams.getSeparators()[0], + serDeParams.getNullSequence(), serDeParams.isLastColumnTakesRest(), + serDeParams.isEscaped(), serDeParams.getEscapeChar()); + + cachedRow = new LazyAccumuloRow((LazySimpleStructObjectInspector) cachedObjectInspector); + + serializer = new AccumuloRowSerializer(accumuloSerDeParameters.getRowIdOffset(), + accumuloSerDeParameters.getSerDeParameters(), accumuloSerDeParameters.getColumnMappings(), + accumuloSerDeParameters.getTableVisibilityLabel(), + accumuloSerDeParameters.getRowIdFactory()); + + if (log.isInfoEnabled()) { + log.info("Initialized with {} type: {}", accumuloSerDeParameters.getSerDeParameters() + .getColumnNames(), accumuloSerDeParameters.getSerDeParameters().getColumnTypes()); + } + } + + protected ArrayList getColumnObjectInspectors(List columnTypes, + SerDeParameters serDeParams, List mappings, AccumuloRowIdFactory factory) + throws SerDeException { + ArrayList columnObjectInspectors = new ArrayList( + columnTypes.size()); + for (int i = 0; i < columnTypes.size(); i++) { + TypeInfo type = columnTypes.get(i); + ColumnMapping mapping = mappings.get(i); + if (mapping instanceof HiveAccumuloRowIdColumnMapping) { + columnObjectInspectors.add(factory.createRowIdObjectInspector(type)); + } else { + columnObjectInspectors.add(LazyFactory.createLazyObjectInspector(type, + serDeParams.getSeparators(), 1, serDeParams.getNullSequence(), serDeParams.isEscaped(), + serDeParams.getEscapeChar())); + } + } + + return columnObjectInspectors; + } + + /*** + * For testing purposes. + */ + public LazyAccumuloRow getCachedRow() { + return cachedRow; + } + + public Class getSerializedClass() { + return Mutation.class; + } + + @Override + public Writable serialize(Object o, ObjectInspector objectInspector) throws SerDeException { + try { + return serializer.serialize(o, objectInspector); + } catch (IOException e) { + throw new SerDeException(e); + } + } + + @Override + public Object deserialize(Writable writable) throws SerDeException { + if (!(writable instanceof AccumuloHiveRow)) { + throw new SerDeException(getClass().getName() + " : " + "Expected AccumuloHiveRow. Got " + + writable.getClass().getName()); + } + + cachedRow.init((AccumuloHiveRow) writable, accumuloSerDeParameters.getColumnMappings(), + accumuloSerDeParameters.getRowIdFactory()); + + return cachedRow; + } + + public ObjectInspector getObjectInspector() throws SerDeException { + return cachedObjectInspector; + } + + public SerDeStats getSerDeStats() { + throw new UnsupportedOperationException("SerdeStats not supported."); + } + + public AccumuloSerDeParameters getParams() { + return accumuloSerDeParameters; + } + + public boolean getIteratorPushdown() { + return accumuloSerDeParameters.getIteratorPushdown(); + } + + protected AccumuloRowSerializer getSerializer() { + return serializer; + } +} diff --git a/accumulo-handler/src/java/org/apache/hadoop/hive/accumulo/serde/AccumuloSerDeParameters.java b/accumulo-handler/src/java/org/apache/hadoop/hive/accumulo/serde/AccumuloSerDeParameters.java new file mode 100644 index 0000000..ef77697 --- /dev/null +++ b/accumulo-handler/src/java/org/apache/hadoop/hive/accumulo/serde/AccumuloSerDeParameters.java @@ -0,0 +1,291 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hive.accumulo.serde; + +import java.util.Collections; +import java.util.List; +import java.util.NoSuchElementException; +import java.util.Properties; + +import org.apache.accumulo.core.security.Authorizations; +import org.apache.accumulo.core.security.ColumnVisibility; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hive.accumulo.AccumuloConnectionParameters; +import org.apache.hadoop.hive.accumulo.columns.ColumnMapper; +import org.apache.hadoop.hive.accumulo.columns.ColumnMapping; +import org.apache.hadoop.hive.accumulo.columns.HiveAccumuloRowIdColumnMapping; +import org.apache.hadoop.hive.serde.serdeConstants; +import org.apache.hadoop.hive.serde2.SerDeException; +import org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe; +import org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe.SerDeParameters; +import org.apache.hadoop.hive.serde2.typeinfo.TypeInfo; +import org.apache.hadoop.util.ReflectionUtils; +import org.apache.log4j.Logger; + +import com.google.common.base.Preconditions; + +/** + * + */ +public class AccumuloSerDeParameters extends AccumuloConnectionParameters { + private static final Logger log = Logger.getLogger(AccumuloSerDeParameters.class); + + public static final String COLUMN_MAPPINGS = "accumulo.columns.mapping"; + public static final String ITERATOR_PUSHDOWN_KEY = "accumulo.iterator.pushdown"; + public static final boolean ITERATOR_PUSHDOWN_DEFAULT = true; + + public static final String DEFAULT_STORAGE_TYPE = "accumulo.default.storage"; + + public static final String VISIBILITY_LABEL_KEY = "accumulo.visibility.label"; + public static final ColumnVisibility DEFAULT_VISIBILITY_LABEL = new ColumnVisibility(); + + public static final String AUTHORIZATIONS_KEY = "accumulo.authorizations"; + + public static final String COMPOSITE_ROWID_FACTORY = "accumulo.composite.rowid.factory"; + public static final String COMPOSITE_ROWID_CLASS = "accumulo.composite.rowid"; + + protected final ColumnMapper columnMapper; + + private Properties tableProperties; + private String serdeName; + private SerDeParameters lazySerDeParameters; + private AccumuloRowIdFactory rowIdFactory; + + public AccumuloSerDeParameters(Configuration conf, Properties tableProperties, String serdeName) + throws SerDeException { + super(conf); + this.tableProperties = tableProperties; + this.serdeName = serdeName; + + lazySerDeParameters = LazySimpleSerDe.initSerdeParams(conf, tableProperties, serdeName); + + // The default encoding for this table when not otherwise specified + String defaultStorage = tableProperties.getProperty(DEFAULT_STORAGE_TYPE); + + columnMapper = new ColumnMapper(getColumnMappingValue(), defaultStorage, + lazySerDeParameters.getColumnNames(), lazySerDeParameters.getColumnTypes()); + + log.info("Constructed column mapping " + columnMapper); + + // Generate types for column mapping + if (null == getColumnTypeValue()) { + tableProperties.setProperty(serdeConstants.LIST_COLUMN_TYPES, columnMapper.getTypesString()); + } + + if (columnMapper.size() < lazySerDeParameters.getColumnNames().size()) { + throw new TooManyHiveColumnsException("You have more " + COLUMN_MAPPINGS + + " fields than hive columns"); + } else if (columnMapper.size() > lazySerDeParameters.getColumnNames().size()) { + throw new TooManyAccumuloColumnsException( + "You have more hive columns than fields mapped with " + COLUMN_MAPPINGS); + } + + this.rowIdFactory = initRowIdFactory(conf, tableProperties); + } + + protected AccumuloRowIdFactory initRowIdFactory(Configuration conf, Properties tbl) + throws SerDeException { + try { + AccumuloRowIdFactory keyFactory = createRowIdFactory(conf, tbl); + if (keyFactory != null) { + keyFactory.init(this, tbl); + } + return keyFactory; + } catch (Exception e) { + throw new SerDeException(e); + } + } + + @SuppressWarnings({"rawtypes", "unchecked"}) + protected AccumuloRowIdFactory createRowIdFactory(Configuration job, Properties tbl) + throws Exception { + // Try to load the composite factory if one was provided + String factoryClassName = tbl.getProperty(COMPOSITE_ROWID_FACTORY); + if (factoryClassName != null) { + log.info("Loading CompositeRowIdFactory class " + factoryClassName); + Class factoryClazz = Class.forName(factoryClassName); + return (AccumuloRowIdFactory) ReflectionUtils.newInstance(factoryClazz, job); + } + + // See if a custom CompositeKey class was provided + String keyClassName = tbl.getProperty(COMPOSITE_ROWID_CLASS); + if (keyClassName != null) { + log.info("Loading CompositeRowId class " + keyClassName); + Class keyClass = Class.forName(keyClassName); + Class compositeRowIdClass = keyClass + .asSubclass(AccumuloCompositeRowId.class); + return new CompositeAccumuloRowIdFactory(compositeRowIdClass); + } + + return new DefaultAccumuloRowIdFactory(); + } + + public SerDeParameters getSerDeParameters() { + return lazySerDeParameters; + } + + public Properties getTableProperties() { + return tableProperties; + } + + public String getColumnTypeValue() { + return tableProperties.getProperty(serdeConstants.LIST_COLUMN_TYPES); + } + + public String getSerDeName() { + return serdeName; + } + + public String getColumnMappingValue() { + return tableProperties.getProperty(COLUMN_MAPPINGS); + } + + public HiveAccumuloRowIdColumnMapping getRowIdColumnMapping() { + return columnMapper.getRowIdMapping(); + } + + public boolean getIteratorPushdown() { + return conf.getBoolean(ITERATOR_PUSHDOWN_KEY, ITERATOR_PUSHDOWN_DEFAULT); + } + + public List getHiveColumnNames() { + return Collections.unmodifiableList(lazySerDeParameters.getColumnNames()); + } + + public List getHiveColumnTypes() { + return Collections.unmodifiableList(lazySerDeParameters.getColumnTypes()); + } + + public ColumnMapper getColumnMapper() { + return columnMapper; + } + + public int getRowIdOffset() { + return columnMapper.getRowIdOffset(); + } + + public List getColumnMappings() { + return columnMapper.getColumnMappings(); + } + + public AccumuloRowIdFactory getRowIdFactory() { + return rowIdFactory; + } + + public String getRowIdHiveColumnName() { + int rowIdOffset = columnMapper.getRowIdOffset(); + if (-1 == rowIdOffset) { + return null; + } + + List hiveColumnNames = lazySerDeParameters.getColumnNames(); + if (0 > rowIdOffset || hiveColumnNames.size() <= rowIdOffset) { + throw new IllegalStateException("Tried to find rowID offset at position " + rowIdOffset + + " from Hive columns " + hiveColumnNames); + } + + return hiveColumnNames.get(rowIdOffset); + } + + public ColumnMapping getColumnMappingForHiveColumn(String hiveColumn) { + List hiveColumnNames = lazySerDeParameters.getColumnNames(); + + for (int offset = 0; offset < hiveColumnNames.size() && offset < columnMapper.size(); offset++) { + String hiveColumnName = hiveColumnNames.get(offset); + if (hiveColumn.equals(hiveColumnName)) { + return columnMapper.get(offset); + } + } + + throw new NoSuchElementException("Could not find column mapping for Hive column " + hiveColumn); + } + + public TypeInfo getTypeForHiveColumn(String hiveColumn) { + List hiveColumnNames = lazySerDeParameters.getColumnNames(); + List hiveColumnTypes = lazySerDeParameters.getColumnTypes(); + + for (int i = 0; i < hiveColumnNames.size() && i < hiveColumnTypes.size(); i++) { + String columnName = hiveColumnNames.get(i); + if (hiveColumn.equals(columnName)) { + return hiveColumnTypes.get(i); + } + } + + throw new NoSuchElementException("Could not find Hive column type for " + hiveColumn); + } + + /** + * Extracts the table property to allow a custom ColumnVisibility label to be set on updates to be + * written to an Accumulo table. The value in the table property must be a properly formatted + * {@link ColumnVisibility}. If not value is present in the table properties, an empty + * ColumnVisibility is returned. + * + * @return The ColumnVisibility to be applied to all updates sent to Accumulo + */ + public ColumnVisibility getTableVisibilityLabel() { + String visibilityLabel = tableProperties.getProperty(VISIBILITY_LABEL_KEY, null); + if (null == visibilityLabel || visibilityLabel.isEmpty()) { + return DEFAULT_VISIBILITY_LABEL; + } + + return new ColumnVisibility(visibilityLabel); + } + + /** + * Extracts the table property to allow dynamic Accumulo Authorizations to be used when reading + * data from an Accumulo table. If no Authorizations are provided in the table properties, null is + * returned to preserve the functionality to read all data that the current user has access to. + * + * @return The Authorizations that should be used to read data from Accumulo, null if no + * configuration is supplied. + */ + public Authorizations getAuthorizations() { + String authorizationStr = tableProperties.getProperty(AUTHORIZATIONS_KEY, null); + + return getAuthorizationsFromValue(authorizationStr); + } + + /** + * Create an Authorizations object when the provided value is not null. Will return null, + * otherwise. + * + * @param authorizationStr + * Configuration value to parse + * @return Authorization object or null + */ + protected static Authorizations getAuthorizationsFromValue(String authorizationStr) { + if (null == authorizationStr) { + return null; + } + + return new Authorizations(authorizationStr); + } + + /** + * Extract any configuration on Authorizations to be used from the provided Configuration. If a + * non-null value is not present in the configuration, a null object is returned + * + * @return Authorization built from configuration value, null if no value is present in conf + */ + public static Authorizations getAuthorizationsFromConf(Configuration conf) { + Preconditions.checkNotNull(conf); + + String authorizationStr = conf.get(AUTHORIZATIONS_KEY, null); + + return getAuthorizationsFromValue(authorizationStr); + } +} diff --git a/accumulo-handler/src/java/org/apache/hadoop/hive/accumulo/serde/CompositeAccumuloRowIdFactory.java b/accumulo-handler/src/java/org/apache/hadoop/hive/accumulo/serde/CompositeAccumuloRowIdFactory.java new file mode 100644 index 0000000..574a8aa --- /dev/null +++ b/accumulo-handler/src/java/org/apache/hadoop/hive/accumulo/serde/CompositeAccumuloRowIdFactory.java @@ -0,0 +1,71 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hive.accumulo.serde; + +import java.io.IOException; +import java.lang.reflect.Constructor; +import java.util.Properties; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hive.accumulo.Utils; +import org.apache.hadoop.hive.serde2.SerDeException; +import org.apache.hadoop.hive.serde2.lazy.objectinspector.LazySimpleStructObjectInspector; +import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspector; +import org.apache.log4j.Logger; + +/** + * {@link AccumuloRowIdFactory} designed for injection of the {@link AccumuloCompositeRowId} to be + * used to generate the Accumulo rowId. Allows for custom {@link AccumuloCompositeRowId}s to be + * specified without overriding the entire ObjectInspector for the Hive row. + * + * @param + */ +public class CompositeAccumuloRowIdFactory extends + DefaultAccumuloRowIdFactory { + + public static final Logger log = Logger.getLogger(CompositeAccumuloRowIdFactory.class); + + private final Class keyClass; + private final Constructor constructor; + + public CompositeAccumuloRowIdFactory(Class keyClass) throws SecurityException, + NoSuchMethodException { + // see javadoc of AccumuloCompositeRowId + this.keyClass = keyClass; + this.constructor = keyClass.getDeclaredConstructor(LazySimpleStructObjectInspector.class, + Properties.class, Configuration.class); + } + + @Override + public void addDependencyJars(Configuration jobConf) throws IOException { + // Make sure the jar containing the custom CompositeRowId is included + // in the mapreduce job's classpath (libjars) + Utils.addDependencyJars(jobConf, keyClass); + } + + @Override + public T createRowId(ObjectInspector inspector) throws SerDeException { + try { + return (T) constructor.newInstance(inspector, this.properties, + this.accumuloSerDeParams.getConf()); + } catch (Exception e) { + throw new SerDeException(e); + } + } +} diff --git a/accumulo-handler/src/java/org/apache/hadoop/hive/accumulo/serde/DefaultAccumuloRowIdFactory.java b/accumulo-handler/src/java/org/apache/hadoop/hive/accumulo/serde/DefaultAccumuloRowIdFactory.java new file mode 100644 index 0000000..1180679 --- /dev/null +++ b/accumulo-handler/src/java/org/apache/hadoop/hive/accumulo/serde/DefaultAccumuloRowIdFactory.java @@ -0,0 +1,89 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hive.accumulo.serde; + +import java.io.IOException; +import java.util.Properties; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hive.accumulo.Utils; +import org.apache.hadoop.hive.accumulo.columns.ColumnEncoding; +import org.apache.hadoop.hive.accumulo.columns.HiveAccumuloRowIdColumnMapping; +import org.apache.hadoop.hive.serde2.ByteStream; +import org.apache.hadoop.hive.serde2.SerDeException; +import org.apache.hadoop.hive.serde2.lazy.LazyFactory; +import org.apache.hadoop.hive.serde2.lazy.LazyObjectBase; +import org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe; +import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspector; +import org.apache.hadoop.hive.serde2.objectinspector.StructField; +import org.apache.hadoop.hive.serde2.typeinfo.TypeInfo; +import org.apache.hadoop.hive.serde2.typeinfo.TypeInfoFactory; + +/** + * Default implementation of the AccumuloRowIdFactory which uses the normal + * {@link AccumuloRowSerializer} methods to serialize the field for storage into Accumulo. + */ +public class DefaultAccumuloRowIdFactory implements AccumuloRowIdFactory { + + protected AccumuloSerDeParameters accumuloSerDeParams; + protected LazySimpleSerDe.SerDeParameters serdeParams; + protected Properties properties; + protected HiveAccumuloRowIdColumnMapping rowIdMapping; + protected AccumuloRowSerializer serializer; + + @Override + public void init(AccumuloSerDeParameters accumuloSerDeParams, Properties properties) + throws SerDeException { + this.accumuloSerDeParams = accumuloSerDeParams; + this.serdeParams = accumuloSerDeParams.getSerDeParameters(); + this.properties = properties; + this.serializer = new AccumuloRowSerializer(accumuloSerDeParams.getRowIdOffset(), serdeParams, + accumuloSerDeParams.getColumnMappings(), accumuloSerDeParams.getTableVisibilityLabel(), + this); + this.rowIdMapping = accumuloSerDeParams.getRowIdColumnMapping(); + } + + @Override + public void addDependencyJars(Configuration conf) throws IOException { + Utils.addDependencyJars(conf, getClass()); + } + + @Override + public ObjectInspector createRowIdObjectInspector(TypeInfo type) throws SerDeException { + return LazyFactory.createLazyObjectInspector(type, serdeParams.getSeparators(), 1, + serdeParams.getNullSequence(), serdeParams.isEscaped(), serdeParams.getEscapeChar()); + } + + @Override + public LazyObjectBase createRowId(ObjectInspector inspector) throws SerDeException { + // LazyObject can only be binary when it's not a string as well +// return LazyFactory.createLazyObject(inspector, +// ColumnEncoding.BINARY == rowIdMapping.getEncoding()); + return LazyFactory.createLazyObject(inspector, + inspector.getTypeName() != TypeInfoFactory.stringTypeInfo.getTypeName() + && ColumnEncoding.BINARY == rowIdMapping.getEncoding()); + } + + @Override + public byte[] serializeRowId(Object object, StructField field, ByteStream.Output output) + throws IOException { + return serializer.serializeRowId(object, field, rowIdMapping); + } + +} diff --git a/accumulo-handler/src/java/org/apache/hadoop/hive/accumulo/serde/TooManyAccumuloColumnsException.java b/accumulo-handler/src/java/org/apache/hadoop/hive/accumulo/serde/TooManyAccumuloColumnsException.java new file mode 100644 index 0000000..7a84b7d --- /dev/null +++ b/accumulo-handler/src/java/org/apache/hadoop/hive/accumulo/serde/TooManyAccumuloColumnsException.java @@ -0,0 +1,44 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hive.accumulo.serde; + +import org.apache.hadoop.hive.serde2.SerDeException; + +/** + * + */ +public class TooManyAccumuloColumnsException extends SerDeException { + + private static final long serialVersionUID = 1L; + + public TooManyAccumuloColumnsException() { + super(); + } + + public TooManyAccumuloColumnsException(String message, Throwable cause) { + super(message, cause); + } + + public TooManyAccumuloColumnsException(String message) { + super(message); + } + + public TooManyAccumuloColumnsException(Throwable cause) { + super(cause); + } + +} diff --git a/accumulo-handler/src/java/org/apache/hadoop/hive/accumulo/serde/TooManyHiveColumnsException.java b/accumulo-handler/src/java/org/apache/hadoop/hive/accumulo/serde/TooManyHiveColumnsException.java new file mode 100644 index 0000000..848d7a4 --- /dev/null +++ b/accumulo-handler/src/java/org/apache/hadoop/hive/accumulo/serde/TooManyHiveColumnsException.java @@ -0,0 +1,44 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hive.accumulo.serde; + +import org.apache.hadoop.hive.serde2.SerDeException; + +/** + * + */ +public class TooManyHiveColumnsException extends SerDeException { + + private static final long serialVersionUID = 1L; + + public TooManyHiveColumnsException() { + super(); + } + + public TooManyHiveColumnsException(String message, Throwable cause) { + super(message, cause); + } + + public TooManyHiveColumnsException(String message) { + super(message); + } + + public TooManyHiveColumnsException(Throwable cause) { + super(cause); + } + +} diff --git a/accumulo-handler/src/test/org/apache/hadoop/hive/accumulo/TestAccumuloConnectionParameters.java b/accumulo-handler/src/test/org/apache/hadoop/hive/accumulo/TestAccumuloConnectionParameters.java new file mode 100644 index 0000000..8b4c9ff --- /dev/null +++ b/accumulo-handler/src/test/org/apache/hadoop/hive/accumulo/TestAccumuloConnectionParameters.java @@ -0,0 +1,100 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hive.accumulo; + +import org.apache.accumulo.core.client.AccumuloException; +import org.apache.accumulo.core.client.AccumuloSecurityException; +import org.apache.accumulo.core.client.Instance; +import org.apache.hadoop.conf.Configuration; +import org.junit.Assert; +import org.junit.Test; +import org.mockito.Mockito; + +/** + * + */ +public class TestAccumuloConnectionParameters { + + @Test + public void testInstantiatesWithNullConfiguration() { + // TableDesc#getDeserializer() passes a null Configuration into the SerDe. + // We shouldn't fail immediately in this case + AccumuloConnectionParameters cnxnParams = new AccumuloConnectionParameters(null); + + // We should fail if we try to get info out of the params + try { + cnxnParams.getAccumuloInstanceName(); + Assert.fail("Should have gotten an NPE"); + } catch (NullPointerException e) {} + } + + @Test(expected = IllegalArgumentException.class) + public void testMissingInstanceName() { + Configuration conf = new Configuration(false); + conf.set(AccumuloConnectionParameters.ZOOKEEPERS, "localhost:2181"); + conf.set(AccumuloConnectionParameters.USER_NAME, "user"); + conf.set(AccumuloConnectionParameters.USER_PASS, "password"); + + AccumuloConnectionParameters cnxnParams = new AccumuloConnectionParameters(conf); + cnxnParams.getInstance(); + } + + @Test(expected = IllegalArgumentException.class) + public void testMissingZooKeepers() { + Configuration conf = new Configuration(false); + conf.set(AccumuloConnectionParameters.INSTANCE_NAME, "accumulo"); + conf.set(AccumuloConnectionParameters.USER_NAME, "user"); + conf.set(AccumuloConnectionParameters.USER_PASS, "password"); + + AccumuloConnectionParameters cnxnParams = new AccumuloConnectionParameters(conf); + cnxnParams.getInstance(); + } + + @Test(expected = IllegalArgumentException.class) + public void testMissingUserName() throws AccumuloException, AccumuloSecurityException { + Configuration conf = new Configuration(false); + conf.set(AccumuloConnectionParameters.INSTANCE_NAME, "accumulo"); + conf.set(AccumuloConnectionParameters.ZOOKEEPERS, "localhost:2181"); + conf.set(AccumuloConnectionParameters.USER_PASS, "password"); + + Instance instance = Mockito.mock(Instance.class); + + AccumuloConnectionParameters cnxnParams = new AccumuloConnectionParameters(conf); + + // Provide an instance of the code doesn't try to make a real Instance + // We just want to test that we fail before trying to make a connector + // with null username + cnxnParams.getConnector(instance); + } + + @Test(expected = IllegalArgumentException.class) + public void testMissingPassword() throws AccumuloException, AccumuloSecurityException { + Configuration conf = new Configuration(false); + conf.set(AccumuloConnectionParameters.INSTANCE_NAME, "accumulo"); + conf.set(AccumuloConnectionParameters.ZOOKEEPERS, "localhost:2181"); + conf.set(AccumuloConnectionParameters.USER_NAME, "user"); + + Instance instance = Mockito.mock(Instance.class); + + AccumuloConnectionParameters cnxnParams = new AccumuloConnectionParameters(conf); + + // Provide an instance of the code doesn't try to make a real Instance + // We just want to test that we fail before trying to make a connector + // with null password + cnxnParams.getConnector(instance); + } +} diff --git a/accumulo-handler/src/test/org/apache/hadoop/hive/accumulo/TestAccumuloHiveRow.java b/accumulo-handler/src/test/org/apache/hadoop/hive/accumulo/TestAccumuloHiveRow.java new file mode 100644 index 0000000..fc90e36 --- /dev/null +++ b/accumulo-handler/src/test/org/apache/hadoop/hive/accumulo/TestAccumuloHiveRow.java @@ -0,0 +1,115 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hive.accumulo; + +import static org.junit.Assert.assertArrayEquals; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertNull; +import static org.junit.Assert.assertTrue; + +import java.io.ByteArrayInputStream; +import java.io.ByteArrayOutputStream; +import java.io.DataInputStream; +import java.io.DataOutputStream; +import java.io.IOException; + +import org.apache.hadoop.io.Text; +import org.junit.Test; + +/** + * Test basic operations on AccumuloHiveRow + */ +public class TestAccumuloHiveRow { + + @Test + public void testHasFamilyAndQualifier() { + AccumuloHiveRow row = new AccumuloHiveRow("row1"); + + // Add some columns + for (int i = 1; i <= 5; i++) { + row.add("cf1", "cq" + i, Integer.toString(i).getBytes()); + } + + // Check that we don't find unexpected columns + assertFalse(row.hasFamAndQual(new Text(""), new Text(""))); + assertFalse(row.hasFamAndQual(new Text("cf0"), new Text("cq1"))); + assertFalse(row.hasFamAndQual(new Text("cf1"), new Text("cq0"))); + + // Check that we do find all expected columns + for (int i = 1; i <= 5; i++) { + assertTrue(row.hasFamAndQual(new Text("cf1"), new Text("cq" + i))); + } + } + + @Test + public void testGetValueFromColumn() { + AccumuloHiveRow row = new AccumuloHiveRow("row1"); + + // Should return null when there is no column + assertNull(row.getValue(new Text(""), new Text(""))); + + for (int i = 1; i <= 5; i++) { + row.add("cf", "cq" + i, Integer.toString(i).getBytes()); + } + + assertNull(row.getValue(new Text("cf"), new Text("cq0"))); + + for (int i = 1; i <= 5; i++) { + assertArrayEquals(Integer.toString(i).getBytes(), + row.getValue(new Text("cf"), new Text("cq" + i))); + } + } + + @Test + public void testWritableEmptyRow() throws IOException { + AccumuloHiveRow emptyRow = new AccumuloHiveRow(); + + ByteArrayOutputStream baos = new ByteArrayOutputStream(); + DataOutputStream out = new DataOutputStream(baos); + emptyRow.write(out); + out.close(); + + AccumuloHiveRow emptyCopy = new AccumuloHiveRow(); + + ByteArrayInputStream bais = new ByteArrayInputStream(baos.toByteArray()); + DataInputStream in = new DataInputStream(bais); + emptyCopy.readFields(in); + + assertEquals(emptyRow, emptyCopy); + } + + @Test + public void testWritableWithColumns() throws IOException { + AccumuloHiveRow rowWithColumns = new AccumuloHiveRow("row"); + rowWithColumns.add("cf", "cq1", "1".getBytes()); + rowWithColumns.add("cf", "cq2", "2".getBytes()); + + ByteArrayOutputStream baos = new ByteArrayOutputStream(); + DataOutputStream out = new DataOutputStream(baos); + rowWithColumns.write(out); + out.close(); + + AccumuloHiveRow copy = new AccumuloHiveRow(); + + ByteArrayInputStream bais = new ByteArrayInputStream(baos.toByteArray()); + DataInputStream in = new DataInputStream(bais); + copy.readFields(in); + + assertEquals(rowWithColumns, copy); + } +} diff --git a/accumulo-handler/src/test/org/apache/hadoop/hive/accumulo/TestAccumuloStorageHandler.java b/accumulo-handler/src/test/org/apache/hadoop/hive/accumulo/TestAccumuloStorageHandler.java new file mode 100644 index 0000000..0aaa782 --- /dev/null +++ b/accumulo-handler/src/test/org/apache/hadoop/hive/accumulo/TestAccumuloStorageHandler.java @@ -0,0 +1,536 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hive.accumulo; + +import java.util.HashMap; +import java.util.Map; +import java.util.Properties; + +import org.apache.accumulo.core.client.Connector; +import org.apache.accumulo.core.client.mock.MockInstance; +import org.apache.accumulo.core.client.security.tokens.PasswordToken; +import org.apache.hadoop.hive.accumulo.columns.ColumnEncoding; +import org.apache.hadoop.hive.accumulo.serde.AccumuloSerDeParameters; +import org.apache.hadoop.hive.metastore.api.MetaException; +import org.apache.hadoop.hive.metastore.api.SerDeInfo; +import org.apache.hadoop.hive.metastore.api.StorageDescriptor; +import org.apache.hadoop.hive.metastore.api.Table; +import org.apache.hadoop.hive.ql.plan.TableDesc; +import org.junit.Assert; +import org.junit.Before; +import org.junit.Rule; +import org.junit.Test; +import org.junit.rules.TestName; +import org.mockito.Mockito; + +/** + * + */ +public class TestAccumuloStorageHandler { + + protected AccumuloStorageHandler storageHandler; + + @Rule + public TestName test = new TestName(); + + @Before + public void setup() { + storageHandler = new AccumuloStorageHandler(); + } + + @Test + public void testTablePropertiesPassedToOutputJobProperties() { + TableDesc tableDesc = Mockito.mock(TableDesc.class); + Properties props = new Properties(); + Map jobProperties = new HashMap(); + + props.setProperty(AccumuloSerDeParameters.COLUMN_MAPPINGS, "cf:cq1,cf:cq2,cf:cq3"); + props.setProperty(AccumuloSerDeParameters.TABLE_NAME, "table"); + props.setProperty(AccumuloSerDeParameters.VISIBILITY_LABEL_KEY, "foo"); + + Mockito.when(tableDesc.getProperties()).thenReturn(props); + + storageHandler.configureOutputJobProperties(tableDesc, jobProperties); + + Assert.assertEquals(3, jobProperties.size()); + Assert.assertTrue("Job properties did not contain column mappings", + jobProperties.containsKey(AccumuloSerDeParameters.COLUMN_MAPPINGS)); + Assert.assertEquals(props.getProperty(AccumuloSerDeParameters.COLUMN_MAPPINGS), + jobProperties.get(AccumuloSerDeParameters.COLUMN_MAPPINGS)); + + Assert.assertTrue("Job properties did not contain accumulo table name", + jobProperties.containsKey(AccumuloSerDeParameters.TABLE_NAME)); + Assert.assertEquals(props.getProperty(AccumuloSerDeParameters.TABLE_NAME), + jobProperties.get(AccumuloSerDeParameters.TABLE_NAME)); + + Assert.assertTrue("Job properties did not contain visibility label", + jobProperties.containsKey(AccumuloSerDeParameters.VISIBILITY_LABEL_KEY)); + Assert.assertEquals(props.getProperty(AccumuloSerDeParameters.VISIBILITY_LABEL_KEY), + jobProperties.get(AccumuloSerDeParameters.VISIBILITY_LABEL_KEY)); + } + + @Test + public void testTablePropertiesPassedToInputJobProperties() { + TableDesc tableDesc = Mockito.mock(TableDesc.class); + Properties props = new Properties(); + Map jobProperties = new HashMap(); + + props.setProperty(AccumuloSerDeParameters.COLUMN_MAPPINGS, "cf:cq1,cf:cq2,cf:cq3"); + props.setProperty(AccumuloSerDeParameters.TABLE_NAME, "table"); + props.setProperty(AccumuloSerDeParameters.ITERATOR_PUSHDOWN_KEY, "true"); + props + .setProperty(AccumuloSerDeParameters.DEFAULT_STORAGE_TYPE, ColumnEncoding.BINARY.getName()); + props.setProperty(AccumuloSerDeParameters.AUTHORIZATIONS_KEY, "foo,bar"); + + Mockito.when(tableDesc.getProperties()).thenReturn(props); + + storageHandler.configureInputJobProperties(tableDesc, jobProperties); + + Assert.assertEquals(5, jobProperties.size()); + + Assert.assertTrue(jobProperties.containsKey(AccumuloSerDeParameters.COLUMN_MAPPINGS)); + Assert.assertEquals(props.getProperty(AccumuloSerDeParameters.COLUMN_MAPPINGS), + jobProperties.get(AccumuloSerDeParameters.COLUMN_MAPPINGS)); + + Assert.assertTrue(jobProperties.containsKey(AccumuloSerDeParameters.TABLE_NAME)); + Assert.assertEquals(props.getProperty(AccumuloSerDeParameters.TABLE_NAME), + jobProperties.get(AccumuloSerDeParameters.TABLE_NAME)); + + Assert.assertTrue(jobProperties.containsKey(AccumuloSerDeParameters.ITERATOR_PUSHDOWN_KEY)); + Assert.assertEquals(props.getProperty(AccumuloSerDeParameters.ITERATOR_PUSHDOWN_KEY), + jobProperties.get(AccumuloSerDeParameters.ITERATOR_PUSHDOWN_KEY)); + + Assert.assertTrue(jobProperties.containsKey(AccumuloSerDeParameters.DEFAULT_STORAGE_TYPE)); + Assert.assertEquals(props.getProperty(AccumuloSerDeParameters.DEFAULT_STORAGE_TYPE), + jobProperties.get(AccumuloSerDeParameters.DEFAULT_STORAGE_TYPE)); + + Assert.assertTrue(jobProperties.containsKey(AccumuloSerDeParameters.AUTHORIZATIONS_KEY)); + Assert.assertEquals(props.getProperty(AccumuloSerDeParameters.AUTHORIZATIONS_KEY), + jobProperties.get(AccumuloSerDeParameters.AUTHORIZATIONS_KEY)); + } + + @Test(expected = IllegalArgumentException.class) + public void testNonBooleanIteratorPushdownValue() { + TableDesc tableDesc = Mockito.mock(TableDesc.class); + Properties props = new Properties(); + Map jobProperties = new HashMap(); + + props.setProperty(AccumuloSerDeParameters.COLUMN_MAPPINGS, "cf:cq1,cf:cq2,cf:cq3"); + props.setProperty(AccumuloSerDeParameters.TABLE_NAME, "table"); + props.setProperty(AccumuloSerDeParameters.ITERATOR_PUSHDOWN_KEY, "foo"); + + Mockito.when(tableDesc.getProperties()).thenReturn(props); + + storageHandler.configureInputJobProperties(tableDesc, jobProperties); + } + + @Test(expected = IllegalArgumentException.class) + public void testEmptyIteratorPushdownValue() { + TableDesc tableDesc = Mockito.mock(TableDesc.class); + Properties props = new Properties(); + Map jobProperties = new HashMap(); + + props.setProperty(AccumuloSerDeParameters.COLUMN_MAPPINGS, "cf:cq1,cf:cq2,cf:cq3"); + props.setProperty(AccumuloSerDeParameters.TABLE_NAME, "table"); + props.setProperty(AccumuloSerDeParameters.ITERATOR_PUSHDOWN_KEY, ""); + + Mockito.when(tableDesc.getProperties()).thenReturn(props); + + storageHandler.configureInputJobProperties(tableDesc, jobProperties); + } + + @Test + public void testTableJobPropertiesCallsInputAndOutputMethods() { + AccumuloStorageHandler mockStorageHandler = Mockito.mock(AccumuloStorageHandler.class); + TableDesc tableDesc = Mockito.mock(TableDesc.class); + Map jobProperties = new HashMap(); + + Mockito.doCallRealMethod().when(mockStorageHandler) + .configureTableJobProperties(tableDesc, jobProperties); + + // configureTableJobProperties shouldn't be getting called by Hive, but, if it somehow does, + // we should just set all of the configurations for input and output. + mockStorageHandler.configureTableJobProperties(tableDesc, jobProperties); + + Mockito.verify(mockStorageHandler).configureInputJobProperties(tableDesc, jobProperties); + Mockito.verify(mockStorageHandler).configureOutputJobProperties(tableDesc, jobProperties); + } + + @Test + public void testPreCreateTable() throws Exception { + MockInstance inst = new MockInstance(test.getMethodName()); + Connector conn = inst.getConnector("root", new PasswordToken("")); + String tableName = "table"; + + // Define the SerDe Parameters + Map params = new HashMap(); + params.put(AccumuloSerDeParameters.COLUMN_MAPPINGS, "cf:cq"); + + AccumuloConnectionParameters connectionParams = Mockito + .mock(AccumuloConnectionParameters.class); + AccumuloStorageHandler storageHandler = Mockito.mock(AccumuloStorageHandler.class); + StorageDescriptor sd = Mockito.mock(StorageDescriptor.class); + Table table = Mockito.mock(Table.class); + SerDeInfo serDeInfo = Mockito.mock(SerDeInfo.class); + + // Call the real preCreateTable method + Mockito.doCallRealMethod().when(storageHandler).preCreateTable(table); + + // Return our known table name + Mockito.when(storageHandler.getTableName(table)).thenReturn(tableName); + + // Not an EXTERNAL table + Mockito.when(storageHandler.isExternalTable(table)).thenReturn(false); + + // Return the mocked StorageDescriptor + Mockito.when(table.getSd()).thenReturn(sd); + + // No location expected with AccumuloStorageHandler + Mockito.when(sd.getLocation()).thenReturn(null); + + // Return mocked SerDeInfo + Mockito.when(sd.getSerdeInfo()).thenReturn(serDeInfo); + + // Custom parameters + Mockito.when(serDeInfo.getParameters()).thenReturn(params); + + // Return the MockInstance's Connector + Mockito.when(connectionParams.getConnector()).thenReturn(conn); + + storageHandler.connectionParams = connectionParams; + + storageHandler.preCreateTable(table); + + Assert.assertTrue("Table does not exist when we expect it to", + conn.tableOperations().exists(tableName)); + } + + @Test(expected = MetaException.class) + public void testMissingColumnMappingFails() throws Exception { + MockInstance inst = new MockInstance(test.getMethodName()); + Connector conn = inst.getConnector("root", new PasswordToken("")); + String tableName = "table"; + + // Empty parameters are sent, no COLUMN_MAPPING + Map params = new HashMap(); + + AccumuloConnectionParameters connectionParams = Mockito + .mock(AccumuloConnectionParameters.class); + AccumuloStorageHandler storageHandler = Mockito.mock(AccumuloStorageHandler.class); + StorageDescriptor sd = Mockito.mock(StorageDescriptor.class); + Table table = Mockito.mock(Table.class); + SerDeInfo serDeInfo = Mockito.mock(SerDeInfo.class); + + // Call the real preCreateTable method + Mockito.doCallRealMethod().when(storageHandler).preCreateTable(table); + + // Return our known table name + Mockito.when(storageHandler.getTableName(table)).thenReturn(tableName); + + // Not an EXTERNAL table + Mockito.when(storageHandler.isExternalTable(table)).thenReturn(false); + + // Return the mocked StorageDescriptor + Mockito.when(table.getSd()).thenReturn(sd); + + // No location expected with AccumuloStorageHandler + Mockito.when(sd.getLocation()).thenReturn(null); + + // Return mocked SerDeInfo + Mockito.when(sd.getSerdeInfo()).thenReturn(serDeInfo); + + // Custom parameters + Mockito.when(serDeInfo.getParameters()).thenReturn(params); + + // Return the MockInstance's Connector + Mockito.when(connectionParams.getConnector()).thenReturn(conn); + + storageHandler.connectionParams = connectionParams; + + storageHandler.preCreateTable(table); + } + + @Test(expected = MetaException.class) + public void testNonNullLocation() throws Exception { + MockInstance inst = new MockInstance(test.getMethodName()); + Connector conn = inst.getConnector("root", new PasswordToken("")); + String tableName = "table"; + + // Empty parameters are sent, no COLUMN_MAPPING + Map params = new HashMap(); + params.put(AccumuloSerDeParameters.COLUMN_MAPPINGS, "cf:cq"); + + AccumuloConnectionParameters connectionParams = Mockito + .mock(AccumuloConnectionParameters.class); + AccumuloStorageHandler storageHandler = Mockito.mock(AccumuloStorageHandler.class); + StorageDescriptor sd = Mockito.mock(StorageDescriptor.class); + Table table = Mockito.mock(Table.class); + SerDeInfo serDeInfo = Mockito.mock(SerDeInfo.class); + + // Call the real preCreateTable method + Mockito.doCallRealMethod().when(storageHandler).preCreateTable(table); + + // Return our known table name + Mockito.when(storageHandler.getTableName(table)).thenReturn(tableName); + + // Not an EXTERNAL table + Mockito.when(storageHandler.isExternalTable(table)).thenReturn(false); + + // Return the mocked StorageDescriptor + Mockito.when(table.getSd()).thenReturn(sd); + + // No location expected with AccumuloStorageHandler + Mockito.when(sd.getLocation()).thenReturn("foobar"); + + // Return mocked SerDeInfo + Mockito.when(sd.getSerdeInfo()).thenReturn(serDeInfo); + + // Custom parameters + Mockito.when(serDeInfo.getParameters()).thenReturn(params); + + // Return the MockInstance's Connector + Mockito.when(connectionParams.getConnector()).thenReturn(conn); + + storageHandler.connectionParams = connectionParams; + + storageHandler.preCreateTable(table); + } + + @Test(expected = MetaException.class) + public void testExternalNonExistentTableFails() throws Exception { + MockInstance inst = new MockInstance(test.getMethodName()); + Connector conn = inst.getConnector("root", new PasswordToken("")); + String tableName = "table"; + + // Define the SerDe Parameters + Map params = new HashMap(); + params.put(AccumuloSerDeParameters.COLUMN_MAPPINGS, "cf:cq"); + + AccumuloConnectionParameters connectionParams = Mockito + .mock(AccumuloConnectionParameters.class); + AccumuloStorageHandler storageHandler = Mockito.mock(AccumuloStorageHandler.class); + StorageDescriptor sd = Mockito.mock(StorageDescriptor.class); + Table table = Mockito.mock(Table.class); + SerDeInfo serDeInfo = Mockito.mock(SerDeInfo.class); + + // Call the real preCreateTable method + Mockito.doCallRealMethod().when(storageHandler).preCreateTable(table); + + // Return our known table name + Mockito.when(storageHandler.getTableName(table)).thenReturn(tableName); + + // Is an EXTERNAL table + Mockito.when(storageHandler.isExternalTable(table)).thenReturn(true); + + // Return the mocked StorageDescriptor + Mockito.when(table.getSd()).thenReturn(sd); + + // No location expected with AccumuloStorageHandler + Mockito.when(sd.getLocation()).thenReturn(null); + + // Return mocked SerDeInfo + Mockito.when(sd.getSerdeInfo()).thenReturn(serDeInfo); + + // Custom parameters + Mockito.when(serDeInfo.getParameters()).thenReturn(params); + + // Return the MockInstance's Connector + Mockito.when(connectionParams.getConnector()).thenReturn(conn); + + storageHandler.connectionParams = connectionParams; + + storageHandler.preCreateTable(table); + } + + @Test(expected = MetaException.class) + public void testNonExternalExistentTable() throws Exception { + MockInstance inst = new MockInstance(test.getMethodName()); + Connector conn = inst.getConnector("root", new PasswordToken("")); + String tableName = "table"; + + // Create the table + conn.tableOperations().create(tableName); + + // Define the SerDe Parameters + Map params = new HashMap(); + params.put(AccumuloSerDeParameters.COLUMN_MAPPINGS, "cf:cq"); + + AccumuloConnectionParameters connectionParams = Mockito + .mock(AccumuloConnectionParameters.class); + AccumuloStorageHandler storageHandler = Mockito.mock(AccumuloStorageHandler.class); + StorageDescriptor sd = Mockito.mock(StorageDescriptor.class); + Table table = Mockito.mock(Table.class); + SerDeInfo serDeInfo = Mockito.mock(SerDeInfo.class); + + // Call the real preCreateTable method + Mockito.doCallRealMethod().when(storageHandler).preCreateTable(table); + + // Return our known table name + Mockito.when(storageHandler.getTableName(table)).thenReturn(tableName); + + // Is not an EXTERNAL table + Mockito.when(storageHandler.isExternalTable(table)).thenReturn(false); + + // Return the mocked StorageDescriptor + Mockito.when(table.getSd()).thenReturn(sd); + + // No location expected with AccumuloStorageHandler + Mockito.when(sd.getLocation()).thenReturn(null); + + // Return mocked SerDeInfo + Mockito.when(sd.getSerdeInfo()).thenReturn(serDeInfo); + + // Custom parameters + Mockito.when(serDeInfo.getParameters()).thenReturn(params); + + // Return the MockInstance's Connector + Mockito.when(connectionParams.getConnector()).thenReturn(conn); + + storageHandler.connectionParams = connectionParams; + + storageHandler.preCreateTable(table); + } + + @Test() + public void testRollbackCreateTableOnNonExistentTable() throws Exception { + MockInstance inst = new MockInstance(test.getMethodName()); + Connector conn = inst.getConnector("root", new PasswordToken("")); + AccumuloStorageHandler storageHandler = Mockito.mock(AccumuloStorageHandler.class); + String tableName = "table"; + + AccumuloConnectionParameters connectionParams = Mockito + .mock(AccumuloConnectionParameters.class); + Table table = Mockito.mock(Table.class); + + // Call the real preCreateTable method + Mockito.doCallRealMethod().when(storageHandler).rollbackCreateTable(table); + + // Return our known table name + Mockito.when(storageHandler.getTableName(table)).thenReturn(tableName); + + // Is not an EXTERNAL table + Mockito.when(storageHandler.isExternalTable(table)).thenReturn(false); + + // Return the MockInstance's Connector + Mockito.when(connectionParams.getConnector()).thenReturn(conn); + + storageHandler.connectionParams = connectionParams; + + storageHandler.rollbackCreateTable(table); + } + + @Test() + public void testRollbackCreateTableDeletesExistentTable() throws Exception { + MockInstance inst = new MockInstance(test.getMethodName()); + Connector conn = inst.getConnector("root", new PasswordToken("")); + AccumuloStorageHandler storageHandler = Mockito.mock(AccumuloStorageHandler.class); + String tableName = "table"; + + // Create the table + conn.tableOperations().create(tableName); + + AccumuloConnectionParameters connectionParams = Mockito + .mock(AccumuloConnectionParameters.class); + Table table = Mockito.mock(Table.class); + + // Call the real preCreateTable method + Mockito.doCallRealMethod().when(storageHandler).rollbackCreateTable(table); + Mockito.doCallRealMethod().when(storageHandler).commitDropTable(table, true); + + // Return our known table name + Mockito.when(storageHandler.getTableName(table)).thenReturn(tableName); + + // Is not an EXTERNAL table + Mockito.when(storageHandler.isExternalTable(table)).thenReturn(false); + + // Return the MockInstance's Connector + Mockito.when(connectionParams.getConnector()).thenReturn(conn); + + storageHandler.connectionParams = connectionParams; + + storageHandler.rollbackCreateTable(table); + + Assert.assertFalse(conn.tableOperations().exists(tableName)); + } + + @Test() + public void testRollbackCreateTableDoesntDeleteExternalExistentTable() throws Exception { + MockInstance inst = new MockInstance(test.getMethodName()); + Connector conn = inst.getConnector("root", new PasswordToken("")); + AccumuloStorageHandler storageHandler = Mockito.mock(AccumuloStorageHandler.class); + String tableName = "table"; + + // Create the table + conn.tableOperations().create(tableName); + + AccumuloConnectionParameters connectionParams = Mockito + .mock(AccumuloConnectionParameters.class); + Table table = Mockito.mock(Table.class); + + // Call the real preCreateTable method + Mockito.doCallRealMethod().when(storageHandler).rollbackCreateTable(table); + Mockito.doCallRealMethod().when(storageHandler).commitDropTable(table, true); + + // Return our known table name + Mockito.when(storageHandler.getTableName(table)).thenReturn(tableName); + + // Is not an EXTERNAL table + Mockito.when(storageHandler.isExternalTable(table)).thenReturn(true); + + // Return the MockInstance's Connector + Mockito.when(connectionParams.getConnector()).thenReturn(conn); + + storageHandler.connectionParams = connectionParams; + + storageHandler.rollbackCreateTable(table); + + Assert.assertTrue(conn.tableOperations().exists(tableName)); + } + + @Test + public void testDropTableWithoutDeleteLeavesTableIntact() throws Exception { + MockInstance inst = new MockInstance(test.getMethodName()); + Connector conn = inst.getConnector("root", new PasswordToken("")); + AccumuloStorageHandler storageHandler = Mockito.mock(AccumuloStorageHandler.class); + String tableName = "table"; + + // Create the table + conn.tableOperations().create(tableName); + + AccumuloConnectionParameters connectionParams = Mockito + .mock(AccumuloConnectionParameters.class); + Table table = Mockito.mock(Table.class); + + // Call the real preCreateTable method + Mockito.doCallRealMethod().when(storageHandler).commitDropTable(table, false); + + // Return our known table name + Mockito.when(storageHandler.getTableName(table)).thenReturn(tableName); + + // Is not an EXTERNAL table + Mockito.when(storageHandler.isExternalTable(table)).thenReturn(false); + + // Return the MockInstance's Connector + Mockito.when(connectionParams.getConnector()).thenReturn(conn); + + storageHandler.connectionParams = connectionParams; + + storageHandler.rollbackCreateTable(table); + + Assert.assertTrue(conn.tableOperations().exists(tableName)); + } +} diff --git a/accumulo-handler/src/test/org/apache/hadoop/hive/accumulo/TestLazyAccumuloMap.java b/accumulo-handler/src/test/org/apache/hadoop/hive/accumulo/TestLazyAccumuloMap.java new file mode 100644 index 0000000..2479fb4 --- /dev/null +++ b/accumulo-handler/src/test/org/apache/hadoop/hive/accumulo/TestLazyAccumuloMap.java @@ -0,0 +1,193 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hive.accumulo; + +import java.io.DataOutputStream; +import java.io.IOException; + +import org.apache.commons.io.output.ByteArrayOutputStream; +import org.apache.hadoop.hive.accumulo.columns.ColumnEncoding; +import org.apache.hadoop.hive.accumulo.columns.HiveAccumuloMapColumnMapping; +import org.apache.hadoop.hive.serde2.SerDeException; +import org.apache.hadoop.hive.serde2.lazy.LazyFactory; +import org.apache.hadoop.hive.serde2.lazy.LazyInteger; +import org.apache.hadoop.hive.serde2.lazy.LazyString; +import org.apache.hadoop.hive.serde2.lazy.objectinspector.LazyMapObjectInspector; +import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspector; +import org.apache.hadoop.hive.serde2.typeinfo.TypeInfoFactory; +import org.apache.hadoop.hive.serde2.typeinfo.TypeInfoUtils; +import org.apache.hadoop.io.IntWritable; +import org.apache.hadoop.io.Text; +import org.junit.Assert; +import org.junit.Test; + +/** + * + */ +public class TestLazyAccumuloMap { + + protected byte[] toBytes(int i) throws IOException { + ByteArrayOutputStream baos = new ByteArrayOutputStream(); + DataOutputStream out = new DataOutputStream(baos); + out.writeInt(i); + out.close(); + return baos.toByteArray(); + } + + @Test + public void testStringMapWithProjection() throws SerDeException { + AccumuloHiveRow row = new AccumuloHiveRow("row"); + + row.add("cf1", "foo", "bar".getBytes()); + row.add("cf1", "bar", "foo".getBytes()); + + row.add("cf2", "foo1", "bar1".getBytes()); + row.add("cf3", "bar1", "foo1".getBytes()); + + HiveAccumuloMapColumnMapping mapping = new HiveAccumuloMapColumnMapping("cf1", null, + ColumnEncoding.STRING, ColumnEncoding.STRING, "column", TypeInfoFactory.getMapTypeInfo( + TypeInfoFactory.stringTypeInfo, TypeInfoFactory.stringTypeInfo).toString()); + + // Map of Integer to String + Text nullSequence = new Text("\\N"); + ObjectInspector oi = LazyFactory.createLazyObjectInspector(TypeInfoUtils + .getTypeInfosFromTypeString("map").get(0), new byte[] {(byte) 1, (byte) 2}, + 0, nullSequence, false, (byte) 0); + + LazyAccumuloMap map = new LazyAccumuloMap((LazyMapObjectInspector) oi); + map.init(row, mapping); + + Assert.assertEquals(2, map.getMapSize()); + + Object o = map.getMapValueElement(new Text("foo")); + Assert.assertNotNull(o); + Assert.assertEquals(new Text("bar"), ((LazyString) o).getWritableObject()); + + o = map.getMapValueElement(new Text("bar")); + Assert.assertNotNull(o); + Assert.assertEquals(new Text("foo"), ((LazyString) o).getWritableObject()); + } + + @Test + public void testIntMap() throws SerDeException, IOException { + AccumuloHiveRow row = new AccumuloHiveRow("row"); + + row.add(new Text("cf1"), new Text("1"), "2".getBytes()); + row.add(new Text("cf1"), new Text("2"), "4".getBytes()); + row.add(new Text("cf1"), new Text("3"), "6".getBytes()); + + HiveAccumuloMapColumnMapping mapping = new HiveAccumuloMapColumnMapping("cf1", null, + ColumnEncoding.STRING, ColumnEncoding.STRING, "column", TypeInfoFactory.getMapTypeInfo( + TypeInfoFactory.intTypeInfo, TypeInfoFactory.intTypeInfo).toString()); + + // Map of Integer to Integer + Text nullSequence = new Text("\\N"); + ObjectInspector oi = LazyFactory.createLazyObjectInspector(TypeInfoUtils + .getTypeInfosFromTypeString("map").get(0), new byte[] {(byte) 1, (byte) 2}, 0, + nullSequence, false, (byte) 0); + + LazyAccumuloMap map = new LazyAccumuloMap((LazyMapObjectInspector) oi); + map.init(row, mapping); + + Assert.assertEquals(3, map.getMapSize()); + + Object o = map.getMapValueElement(new IntWritable(1)); + Assert.assertNotNull(o); + Assert.assertEquals(new IntWritable(2), ((LazyInteger) o).getWritableObject()); + + o = map.getMapValueElement(new IntWritable(2)); + Assert.assertNotNull(o); + Assert.assertEquals(new IntWritable(4), ((LazyInteger) o).getWritableObject()); + + o = map.getMapValueElement(new IntWritable(3)); + Assert.assertNotNull(o); + Assert.assertEquals(new IntWritable(6), ((LazyInteger) o).getWritableObject()); + } + + @Test + public void testBinaryIntMap() throws SerDeException, IOException { + AccumuloHiveRow row = new AccumuloHiveRow("row"); + + row.add(new Text("cf1"), new Text(toBytes(1)), toBytes(2)); + row.add(new Text("cf1"), new Text(toBytes(2)), toBytes(4)); + row.add(new Text("cf1"), new Text(toBytes(3)), toBytes(6)); + + HiveAccumuloMapColumnMapping mapping = new HiveAccumuloMapColumnMapping("cf1", null, + ColumnEncoding.BINARY, ColumnEncoding.BINARY, "column", TypeInfoFactory.getMapTypeInfo( + TypeInfoFactory.intTypeInfo, TypeInfoFactory.intTypeInfo).toString()); + + // Map of Integer to String + Text nullSequence = new Text("\\N"); + ObjectInspector oi = LazyFactory.createLazyObjectInspector(TypeInfoUtils + .getTypeInfosFromTypeString("map").get(0), new byte[] {(byte) 1, (byte) 2}, 0, + nullSequence, false, (byte) 0); + + LazyAccumuloMap map = new LazyAccumuloMap((LazyMapObjectInspector) oi); + map.init(row, mapping); + + Assert.assertEquals(3, map.getMapSize()); + + Object o = map.getMapValueElement(new IntWritable(1)); + Assert.assertNotNull(o); + Assert.assertEquals(new IntWritable(2), ((LazyInteger) o).getWritableObject()); + + o = map.getMapValueElement(new IntWritable(2)); + Assert.assertNotNull(o); + Assert.assertEquals(new IntWritable(4), ((LazyInteger) o).getWritableObject()); + + o = map.getMapValueElement(new IntWritable(3)); + Assert.assertNotNull(o); + Assert.assertEquals(new IntWritable(6), ((LazyInteger) o).getWritableObject()); + } + + @Test + public void testMixedSerializationMap() throws SerDeException, IOException { + AccumuloHiveRow row = new AccumuloHiveRow("row"); + + row.add(new Text("cf1"), new Text(toBytes(1)), "2".getBytes()); + row.add(new Text("cf1"), new Text(toBytes(2)), "4".getBytes()); + row.add(new Text("cf1"), new Text(toBytes(3)), "6".getBytes()); + + HiveAccumuloMapColumnMapping mapping = new HiveAccumuloMapColumnMapping("cf1", null, + ColumnEncoding.BINARY, ColumnEncoding.STRING, "column", TypeInfoFactory.getMapTypeInfo( + TypeInfoFactory.intTypeInfo, TypeInfoFactory.intTypeInfo).toString()); + + // Map of Integer to String + Text nullSequence = new Text("\\N"); + ObjectInspector oi = LazyFactory.createLazyObjectInspector(TypeInfoUtils + .getTypeInfosFromTypeString("map").get(0), new byte[] {(byte) 1, (byte) 2}, 0, + nullSequence, false, (byte) 0); + + LazyAccumuloMap map = new LazyAccumuloMap((LazyMapObjectInspector) oi); + map.init(row, mapping); + + Assert.assertEquals(3, map.getMapSize()); + + Object o = map.getMapValueElement(new IntWritable(1)); + Assert.assertNotNull(o); + Assert.assertEquals(new IntWritable(2), ((LazyInteger) o).getWritableObject()); + + o = map.getMapValueElement(new IntWritable(2)); + Assert.assertNotNull(o); + Assert.assertEquals(new IntWritable(4), ((LazyInteger) o).getWritableObject()); + + o = map.getMapValueElement(new IntWritable(3)); + Assert.assertNotNull(o); + Assert.assertEquals(new IntWritable(6), ((LazyInteger) o).getWritableObject()); + } + +} diff --git a/accumulo-handler/src/test/org/apache/hadoop/hive/accumulo/TestLazyAccumuloRow.java b/accumulo-handler/src/test/org/apache/hadoop/hive/accumulo/TestLazyAccumuloRow.java new file mode 100644 index 0000000..e0b51cb --- /dev/null +++ b/accumulo-handler/src/test/org/apache/hadoop/hive/accumulo/TestLazyAccumuloRow.java @@ -0,0 +1,237 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hive.accumulo; + +import java.io.ByteArrayOutputStream; +import java.io.DataOutputStream; +import java.util.Arrays; +import java.util.List; +import java.util.Properties; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hive.accumulo.columns.ColumnEncoding; +import org.apache.hadoop.hive.accumulo.columns.ColumnMapper; +import org.apache.hadoop.hive.accumulo.serde.AccumuloSerDe; +import org.apache.hadoop.hive.accumulo.serde.AccumuloSerDeParameters; +import org.apache.hadoop.hive.accumulo.serde.DefaultAccumuloRowIdFactory; +import org.apache.hadoop.hive.serde.serdeConstants; +import org.apache.hadoop.hive.serde2.SerDeException; +import org.apache.hadoop.hive.serde2.SerDeUtils; +import org.apache.hadoop.hive.serde2.lazy.LazyFactory; +import org.apache.hadoop.hive.serde2.lazy.LazyInteger; +import org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe; +import org.apache.hadoop.hive.serde2.lazy.LazyString; +import org.apache.hadoop.hive.serde2.lazy.objectinspector.LazySimpleStructObjectInspector; +import org.apache.hadoop.hive.serde2.lazydio.LazyDioInteger; +import org.apache.hadoop.hive.serde2.typeinfo.TypeInfo; +import org.apache.hadoop.hive.serde2.typeinfo.TypeInfoFactory; +import org.apache.hadoop.io.Text; +import org.junit.Assert; +import org.junit.Test; + +import com.google.common.base.Joiner; + +/** + * + */ +public class TestLazyAccumuloRow { + + @Test + public void testExpectedDeserializationOfColumns() throws Exception { + List columns = Arrays.asList("row", "given_name", "surname", "age", "weight", "height"); + List types = Arrays. asList(TypeInfoFactory.stringTypeInfo, + TypeInfoFactory.stringTypeInfo, TypeInfoFactory.stringTypeInfo, + TypeInfoFactory.intTypeInfo, TypeInfoFactory.intTypeInfo, TypeInfoFactory.intTypeInfo); + + LazySimpleStructObjectInspector objectInspector = (LazySimpleStructObjectInspector) LazyFactory + .createLazyStructInspector(columns, types, LazySimpleSerDe.DefaultSeparators, new Text( + "\\N"), false, false, (byte) '\\'); + + DefaultAccumuloRowIdFactory rowIdFactory = new DefaultAccumuloRowIdFactory(); + + Properties props = new Properties(); + props.setProperty(AccumuloSerDeParameters.COLUMN_MAPPINGS, + ":rowid,personal:given_name,personal:surname,personal:age,personal:weight,personal:height"); + props.setProperty(serdeConstants.LIST_COLUMNS, Joiner.on(',').join(columns)); + props.setProperty(serdeConstants.LIST_COLUMN_TYPES, Joiner.on(',').join(types)); + + AccumuloSerDeParameters params = new AccumuloSerDeParameters(new Configuration(), props, + AccumuloSerDe.class.getName()); + + rowIdFactory.init(params, props); + + LazyAccumuloRow lazyRow = new LazyAccumuloRow(objectInspector); + AccumuloHiveRow hiveRow = new AccumuloHiveRow("1"); + hiveRow.add("personal", "given_name", "Bob".getBytes()); + hiveRow.add("personal", "surname", "Stevens".getBytes()); + hiveRow.add("personal", "age", "30".getBytes()); + hiveRow.add("personal", "weight", "200".getBytes()); + hiveRow.add("personal", "height", "72".getBytes()); + + ColumnMapper columnMapper = params.getColumnMapper(); + + lazyRow.init(hiveRow, columnMapper.getColumnMappings(), rowIdFactory); + + Object o = lazyRow.getField(0); + Assert.assertEquals(LazyString.class, o.getClass()); + Assert.assertEquals("1", ((LazyString) o).toString()); + + o = lazyRow.getField(1); + Assert.assertEquals(LazyString.class, o.getClass()); + Assert.assertEquals("Bob", ((LazyString) o).toString()); + + o = lazyRow.getField(2); + Assert.assertEquals(LazyString.class, o.getClass()); + Assert.assertEquals("Stevens", ((LazyString) o).toString()); + + o = lazyRow.getField(3); + Assert.assertEquals(LazyInteger.class, o.getClass()); + Assert.assertEquals("30", ((LazyInteger) o).toString()); + + o = lazyRow.getField(4); + Assert.assertEquals(LazyInteger.class, o.getClass()); + Assert.assertEquals("200", ((LazyInteger) o).toString()); + + o = lazyRow.getField(5); + Assert.assertEquals(LazyInteger.class, o.getClass()); + Assert.assertEquals("72", ((LazyInteger) o).toString()); + } + + @Test + public void testDeserializationOfBinaryEncoding() throws Exception { + List columns = Arrays.asList("row", "given_name", "surname", "age", "weight", "height"); + List types = Arrays. asList(TypeInfoFactory.stringTypeInfo, + TypeInfoFactory.stringTypeInfo, TypeInfoFactory.stringTypeInfo, + TypeInfoFactory.intTypeInfo, TypeInfoFactory.intTypeInfo, TypeInfoFactory.intTypeInfo); + + LazySimpleStructObjectInspector objectInspector = (LazySimpleStructObjectInspector) LazyFactory + .createLazyStructInspector(columns, types, LazySimpleSerDe.DefaultSeparators, new Text( + "\\N"), false, false, (byte) '\\'); + + DefaultAccumuloRowIdFactory rowIdFactory = new DefaultAccumuloRowIdFactory(); + + Properties props = new Properties(); + props + .setProperty(AccumuloSerDeParameters.COLUMN_MAPPINGS, + ":rowid#s,personal:given_name#s,personal:surname#s,personal:age,personal:weight,personal:height"); + props.setProperty(serdeConstants.LIST_COLUMNS, Joiner.on(',').join(columns)); + props.setProperty(serdeConstants.LIST_COLUMN_TYPES, Joiner.on(',').join(types)); + props + .setProperty(AccumuloSerDeParameters.DEFAULT_STORAGE_TYPE, ColumnEncoding.BINARY.getName()); + + AccumuloSerDeParameters params = new AccumuloSerDeParameters(new Configuration(), props, + AccumuloSerDe.class.getName()); + + rowIdFactory.init(params, props); + + ByteArrayOutputStream baos = new ByteArrayOutputStream(); + DataOutputStream out = new DataOutputStream(baos); + + LazyAccumuloRow lazyRow = new LazyAccumuloRow(objectInspector); + AccumuloHiveRow hiveRow = new AccumuloHiveRow("1"); + hiveRow.add("personal", "given_name", "Bob".getBytes()); + hiveRow.add("personal", "surname", "Stevens".getBytes()); + + out.writeInt(30); + hiveRow.add("personal", "age", baos.toByteArray()); + + baos.reset(); + out.writeInt(200); + hiveRow.add("personal", "weight", baos.toByteArray()); + + baos.reset(); + out.writeInt(72); + hiveRow.add("personal", "height", baos.toByteArray()); + + ColumnMapper columnMapper = params.getColumnMapper(); + + lazyRow.init(hiveRow, columnMapper.getColumnMappings(), rowIdFactory); + + Object o = lazyRow.getField(0); + Assert.assertNotNull(o); + Assert.assertEquals(LazyString.class, o.getClass()); + Assert.assertEquals("1", ((LazyString) o).toString()); + + o = lazyRow.getField(1); + Assert.assertNotNull(o); + Assert.assertEquals(LazyString.class, o.getClass()); + Assert.assertEquals("Bob", ((LazyString) o).toString()); + + o = lazyRow.getField(2); + Assert.assertNotNull(o); + Assert.assertEquals(LazyString.class, o.getClass()); + Assert.assertEquals("Stevens", ((LazyString) o).toString()); + + o = lazyRow.getField(3); + Assert.assertNotNull(o); + Assert.assertEquals(LazyDioInteger.class, o.getClass()); + Assert.assertEquals("30", ((LazyDioInteger) o).toString()); + + o = lazyRow.getField(4); + Assert.assertNotNull(o); + Assert.assertEquals(LazyDioInteger.class, o.getClass()); + Assert.assertEquals("200", ((LazyDioInteger) o).toString()); + + o = lazyRow.getField(5); + Assert.assertNotNull(o); + Assert.assertEquals(LazyDioInteger.class, o.getClass()); + Assert.assertEquals("72", ((LazyDioInteger) o).toString()); + } + + @Test + public void testNullInit() throws SerDeException { + List columns = Arrays.asList("row", "1", "2", "3"); + List types = Arrays. asList( + TypeInfoFactory.getPrimitiveTypeInfo(serdeConstants.STRING_TYPE_NAME), + TypeInfoFactory.getPrimitiveTypeInfo(serdeConstants.STRING_TYPE_NAME), + TypeInfoFactory.getPrimitiveTypeInfo(serdeConstants.STRING_TYPE_NAME), + TypeInfoFactory.getPrimitiveTypeInfo(serdeConstants.STRING_TYPE_NAME)); + + LazySimpleStructObjectInspector objectInspector = (LazySimpleStructObjectInspector) LazyFactory + .createLazyStructInspector(columns, types, LazySimpleSerDe.DefaultSeparators, new Text( + "\\N"), false, false, (byte) '\\'); + + DefaultAccumuloRowIdFactory rowIdFactory = new DefaultAccumuloRowIdFactory(); + + Properties props = new Properties(); + props.setProperty(AccumuloSerDeParameters.COLUMN_MAPPINGS, ":rowid,cf:cq1,cf:cq2,cf:cq3"); + props.setProperty(serdeConstants.LIST_COLUMNS, Joiner.on(',').join(columns)); + props.setProperty(serdeConstants.LIST_COLUMN_TYPES, Joiner.on(',').join(types)); + + AccumuloSerDeParameters params = new AccumuloSerDeParameters(new Configuration(), props, + AccumuloSerDe.class.getName()); + + rowIdFactory.init(params, props); + + ColumnMapper columnMapper = params.getColumnMapper(); + + LazyAccumuloRow lazyRow = new LazyAccumuloRow(objectInspector); + AccumuloHiveRow hiveRow = new AccumuloHiveRow("1"); + hiveRow.add("cf", "cq1", "foo".getBytes()); + hiveRow.add("cf", "cq3", "bar".getBytes()); + + lazyRow.init(hiveRow, columnMapper.getColumnMappings(), rowIdFactory); + + // Noticed that we also suffer from the same issue as HIVE-3179 + // Only want to call a field init'ed when it's non-NULL + // Check it twice, make sure we get null both times + Assert.assertEquals("{'row':'1','1':'foo','2':null,'3':'bar'}".replace('\'', '"'), + SerDeUtils.getJSONString(lazyRow, objectInspector)); + Assert.assertEquals("{'row':'1','1':'foo','2':null,'3':'bar'}".replace('\'', '"'), + SerDeUtils.getJSONString(lazyRow, objectInspector)); + } +} diff --git a/accumulo-handler/src/test/org/apache/hadoop/hive/accumulo/columns/TestColumnEncoding.java b/accumulo-handler/src/test/org/apache/hadoop/hive/accumulo/columns/TestColumnEncoding.java new file mode 100644 index 0000000..8183181 --- /dev/null +++ b/accumulo-handler/src/test/org/apache/hadoop/hive/accumulo/columns/TestColumnEncoding.java @@ -0,0 +1,146 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hive.accumulo.columns; + +import java.util.Map.Entry; + +import org.apache.hadoop.hive.accumulo.AccumuloHiveConstants; +import org.junit.Assert; +import org.junit.Test; + +import com.google.common.collect.Maps; + +/** + * + */ +public class TestColumnEncoding { + + @Test(expected = IllegalArgumentException.class) + public void testInvalidCodeThrowsException() { + ColumnEncoding.fromCode("foo"); + } + + @Test + public void testStringEncoding() { + Assert.assertEquals(ColumnEncoding.STRING, ColumnEncoding.fromCode("s")); + } + + @Test + public void testBinaryEncoding() { + Assert.assertEquals(ColumnEncoding.BINARY, ColumnEncoding.fromCode("b")); + } + + @Test + public void testMissingColumnEncoding() { + Assert.assertFalse(ColumnEncoding.hasColumnEncoding("foo:bar")); + } + + @Test + public void testColumnEncodingSpecified() { + Assert.assertTrue(ColumnEncoding.hasColumnEncoding("foo:bar#s")); + } + + @Test + public void testEscapedPoundIsNoEncodingSpecified() { + Assert.assertFalse(ColumnEncoding.hasColumnEncoding("foo:b\\#ar")); + } + + @Test + public void testEscapedPoundWithRealPound() { + Assert.assertTrue(ColumnEncoding.hasColumnEncoding("foo:b\\#ar#b")); + } + + @Test + public void testParse() { + Assert.assertEquals(ColumnEncoding.STRING, ColumnEncoding.getFromMapping("foo:bar#s")); + } + + @Test + public void testParseWithEscapedPound() { + Assert.assertEquals(ColumnEncoding.BINARY, ColumnEncoding.getFromMapping("fo\\#o:bar#b")); + } + + @Test(expected = IllegalArgumentException.class) + public void testMissingEncodingOnParse() { + ColumnEncoding.getFromMapping("foo:bar"); + } + + @Test + public void testStripCode() { + String mapping = "foo:bar"; + Assert.assertEquals( + mapping, + ColumnEncoding.stripCode(mapping + AccumuloHiveConstants.POUND + + ColumnEncoding.BINARY.getCode())); + } + + @Test(expected = IllegalArgumentException.class) + public void testStripNonExistentCodeFails() { + ColumnEncoding.stripCode("foo:bar"); + } + + @Test + public void testStripCodeWithEscapedPound() { + String mapping = "foo:ba\\#r"; + + Assert.assertEquals( + mapping, + ColumnEncoding.stripCode(mapping + AccumuloHiveConstants.POUND + + ColumnEncoding.BINARY.getCode())); + } + + @Test + public void testMapEncoding() { + Assert.assertFalse(ColumnEncoding.isMapEncoding("s")); + Assert.assertFalse(ColumnEncoding.isMapEncoding("string")); + Assert.assertFalse(ColumnEncoding.isMapEncoding("binary")); + + Assert.assertTrue(ColumnEncoding.isMapEncoding("s:s")); + Assert.assertTrue(ColumnEncoding.isMapEncoding("s:string")); + Assert.assertTrue(ColumnEncoding.isMapEncoding("string:s")); + Assert.assertTrue(ColumnEncoding.isMapEncoding("string:string")); + } + + @Test + public void testMapEncodingParsing() { + Entry stringString = Maps.immutableEntry(ColumnEncoding.STRING, + ColumnEncoding.STRING), stringBinary = Maps.immutableEntry(ColumnEncoding.STRING, + ColumnEncoding.BINARY), binaryBinary = Maps.immutableEntry(ColumnEncoding.BINARY, + ColumnEncoding.BINARY), binaryString = Maps.immutableEntry(ColumnEncoding.BINARY, + ColumnEncoding.STRING); + + Assert.assertEquals(stringString, ColumnEncoding.getMapEncoding("s:s")); + Assert.assertEquals(stringString, ColumnEncoding.getMapEncoding("s:string")); + Assert.assertEquals(stringString, ColumnEncoding.getMapEncoding("string:s")); + Assert.assertEquals(stringString, ColumnEncoding.getMapEncoding("string:string")); + + Assert.assertEquals(stringBinary, ColumnEncoding.getMapEncoding("s:b")); + Assert.assertEquals(stringBinary, ColumnEncoding.getMapEncoding("string:b")); + Assert.assertEquals(stringBinary, ColumnEncoding.getMapEncoding("s:binary")); + Assert.assertEquals(stringBinary, ColumnEncoding.getMapEncoding("string:binary")); + + Assert.assertEquals(binaryString, ColumnEncoding.getMapEncoding("b:s")); + Assert.assertEquals(binaryString, ColumnEncoding.getMapEncoding("b:string")); + Assert.assertEquals(binaryString, ColumnEncoding.getMapEncoding("binary:s")); + Assert.assertEquals(binaryString, ColumnEncoding.getMapEncoding("binary:string")); + + Assert.assertEquals(binaryBinary, ColumnEncoding.getMapEncoding("b:b")); + Assert.assertEquals(binaryBinary, ColumnEncoding.getMapEncoding("binary:b")); + Assert.assertEquals(binaryBinary, ColumnEncoding.getMapEncoding("b:binary")); + Assert.assertEquals(binaryBinary, ColumnEncoding.getMapEncoding("binary:binary")); + } +} diff --git a/accumulo-handler/src/test/org/apache/hadoop/hive/accumulo/columns/TestColumnMapper.java b/accumulo-handler/src/test/org/apache/hadoop/hive/accumulo/columns/TestColumnMapper.java new file mode 100644 index 0000000..e5c1e61 --- /dev/null +++ b/accumulo-handler/src/test/org/apache/hadoop/hive/accumulo/columns/TestColumnMapper.java @@ -0,0 +1,203 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hive.accumulo.columns; + +import java.util.Arrays; +import java.util.Iterator; +import java.util.List; + +import org.apache.hadoop.hive.accumulo.AccumuloHiveConstants; +import org.apache.hadoop.hive.accumulo.serde.TooManyAccumuloColumnsException; +import org.apache.hadoop.hive.serde.serdeConstants; +import org.apache.hadoop.hive.serde2.typeinfo.TypeInfo; +import org.apache.hadoop.hive.serde2.typeinfo.TypeInfoFactory; +import org.apache.hadoop.util.StringUtils; +import org.junit.Assert; +import org.junit.Test; + +import com.google.common.base.Joiner; + +/** + * + */ +public class TestColumnMapper { + + @Test + public void testNormalMapping() throws TooManyAccumuloColumnsException { + List rawMappings = Arrays.asList(AccumuloHiveConstants.ROWID, "cf:cq", "cf:_", + "cf:qual"); + List columnNames = Arrays.asList("row", "col1", "col2", "col3"); + List columnTypes = Arrays. asList(TypeInfoFactory.stringTypeInfo, + TypeInfoFactory.stringTypeInfo, TypeInfoFactory.stringTypeInfo, + TypeInfoFactory.stringTypeInfo); + ColumnMapper mapper = new ColumnMapper( + Joiner.on(AccumuloHiveConstants.COMMA).join(rawMappings), ColumnEncoding.STRING.getName(), + columnNames, columnTypes); + + List mappings = mapper.getColumnMappings(); + + Assert.assertEquals(rawMappings.size(), mappings.size()); + Assert.assertEquals(mappings.size(), mapper.size()); + + // Compare the Mapper get at offset method to the list of mappings + Iterator rawIter = rawMappings.iterator(); + Iterator iter = mappings.iterator(); + for (int i = 0; i < mappings.size() && iter.hasNext(); i++) { + String rawMapping = rawIter.next(); + ColumnMapping mapping = iter.next(); + ColumnMapping mappingByOffset = mapper.get(i); + + Assert.assertEquals(mapping, mappingByOffset); + + // Ensure that we get the right concrete ColumnMapping + if (AccumuloHiveConstants.ROWID.equals(rawMapping)) { + Assert.assertEquals(HiveAccumuloRowIdColumnMapping.class, mapping.getClass()); + } else { + Assert.assertEquals(HiveAccumuloColumnMapping.class, mapping.getClass()); + } + } + + Assert.assertEquals(0, mapper.getRowIdOffset()); + Assert.assertTrue(mapper.hasRowIdMapping()); + } + + @Test(expected = IllegalArgumentException.class) + public void testMultipleRowIDsFails() throws TooManyAccumuloColumnsException { + new ColumnMapper(AccumuloHiveConstants.ROWID + AccumuloHiveConstants.COMMA + + AccumuloHiveConstants.ROWID, null, Arrays.asList("row", "row2"), + Arrays. asList(TypeInfoFactory.stringTypeInfo, TypeInfoFactory.stringTypeInfo)); + } + + @Test + public void testGetMappingFromHiveColumn() throws TooManyAccumuloColumnsException { + List hiveColumns = Arrays.asList("rowid", "col1", "col2", "col3"); + List columnTypes = Arrays. asList(TypeInfoFactory.stringTypeInfo, + TypeInfoFactory.stringTypeInfo, TypeInfoFactory.stringTypeInfo, + TypeInfoFactory.stringTypeInfo); + List rawMappings = Arrays.asList(AccumuloHiveConstants.ROWID, "cf:cq", "cf:_", + "cf:qual"); + ColumnMapper mapper = new ColumnMapper( + Joiner.on(AccumuloHiveConstants.COMMA).join(rawMappings), null, hiveColumns, columnTypes); + + for (int i = 0; i < hiveColumns.size(); i++) { + String hiveColumn = hiveColumns.get(i), accumuloMapping = rawMappings.get(i); + ColumnMapping mapping = mapper.getColumnMappingForHiveColumn(hiveColumns, hiveColumn); + + Assert.assertEquals(accumuloMapping, mapping.getMappingSpec()); + } + } + + @Test + public void testGetTypesString() throws TooManyAccumuloColumnsException { + List hiveColumns = Arrays.asList("rowid", "col1", "col2", "col3"); + List rawMappings = Arrays.asList(AccumuloHiveConstants.ROWID, "cf:cq", "cf:_", + "cf:qual"); + List columnTypes = Arrays. asList(TypeInfoFactory.stringTypeInfo, + TypeInfoFactory.stringTypeInfo, TypeInfoFactory.stringTypeInfo, + TypeInfoFactory.stringTypeInfo); + ColumnMapper mapper = new ColumnMapper( + Joiner.on(AccumuloHiveConstants.COMMA).join(rawMappings), null, hiveColumns, columnTypes); + + String typeString = mapper.getTypesString(); + String[] types = StringUtils.split(typeString, AccumuloHiveConstants.COLON); + Assert.assertEquals(rawMappings.size(), types.length); + for (String type : types) { + Assert.assertEquals(serdeConstants.STRING_TYPE_NAME, type); + } + } + + @Test + public void testDefaultBinary() throws TooManyAccumuloColumnsException { + List hiveColumns = Arrays.asList("rowid", "col1", "col2", "col3", "col4"); + List rawMappings = Arrays.asList(AccumuloHiveConstants.ROWID, "cf:cq", "cf:_#s", + "cf:qual#s", "cf:qual2"); + List columnTypes = Arrays. asList(TypeInfoFactory.stringTypeInfo, + TypeInfoFactory.stringTypeInfo, TypeInfoFactory.stringTypeInfo, + TypeInfoFactory.stringTypeInfo, TypeInfoFactory.stringTypeInfo); + ColumnMapper mapper = new ColumnMapper( + Joiner.on(AccumuloHiveConstants.COMMA).join(rawMappings), ColumnEncoding.BINARY.getName(), + hiveColumns, columnTypes); + + List mappings = mapper.getColumnMappings(); + Assert.assertEquals(5, mappings.size()); + + Assert.assertEquals(ColumnEncoding.BINARY, mappings.get(0).getEncoding()); + Assert.assertEquals(columnTypes.get(0).toString(), mappings.get(0).getColumnType()); + + Assert.assertEquals(ColumnEncoding.BINARY, mappings.get(1).getEncoding()); + Assert.assertEquals(columnTypes.get(1).toString(), mappings.get(1).getColumnType()); + + Assert.assertEquals(ColumnEncoding.STRING, mappings.get(2).getEncoding()); + Assert.assertEquals(columnTypes.get(2).toString(), mappings.get(2).getColumnType()); + + Assert.assertEquals(ColumnEncoding.STRING, mappings.get(3).getEncoding()); + Assert.assertEquals(columnTypes.get(3).toString(), mappings.get(3).getColumnType()); + + Assert.assertEquals(ColumnEncoding.BINARY, mappings.get(4).getEncoding()); + Assert.assertEquals(columnTypes.get(4).toString(), mappings.get(4).getColumnType()); + + } + + @Test + public void testMap() throws TooManyAccumuloColumnsException { + List hiveColumns = Arrays.asList("rowid", "col1", "col2", "col3"); + List columnTypes = Arrays. asList(TypeInfoFactory.stringTypeInfo, + TypeInfoFactory.getMapTypeInfo(TypeInfoFactory.stringTypeInfo, + TypeInfoFactory.stringTypeInfo), TypeInfoFactory.getMapTypeInfo( + TypeInfoFactory.stringTypeInfo, TypeInfoFactory.stringTypeInfo), + TypeInfoFactory.stringTypeInfo); + List rawMappings = Arrays.asList(AccumuloHiveConstants.ROWID, "cf1:*", "cf2:2*", + "cq3:bar\\*"); + ColumnMapper mapper = new ColumnMapper( + Joiner.on(AccumuloHiveConstants.COMMA).join(rawMappings), ColumnEncoding.BINARY.getName(), + hiveColumns, columnTypes); + + List mappings = mapper.getColumnMappings(); + Assert.assertEquals(4, mappings.size()); + + Assert.assertEquals(HiveAccumuloRowIdColumnMapping.class, mappings.get(0).getClass()); + Assert.assertEquals(HiveAccumuloMapColumnMapping.class, mappings.get(1).getClass()); + Assert.assertEquals(HiveAccumuloMapColumnMapping.class, mappings.get(2).getClass()); + Assert.assertEquals(HiveAccumuloColumnMapping.class, mappings.get(3).getClass()); + + HiveAccumuloRowIdColumnMapping row = (HiveAccumuloRowIdColumnMapping) mappings.get(0); + Assert.assertEquals(ColumnEncoding.BINARY, row.getEncoding()); + Assert.assertEquals(hiveColumns.get(0), row.getColumnName()); + Assert.assertEquals(columnTypes.get(0).toString(), row.getColumnType()); + + HiveAccumuloMapColumnMapping map = (HiveAccumuloMapColumnMapping) mappings.get(1); + Assert.assertEquals("cf1", map.getColumnFamily()); + Assert.assertEquals("", map.getColumnQualifierPrefix()); + Assert.assertEquals(ColumnEncoding.BINARY, map.getEncoding()); + Assert.assertEquals(hiveColumns.get(1), map.getColumnName()); + Assert.assertEquals(columnTypes.get(1).toString(), map.getColumnType()); + + map = (HiveAccumuloMapColumnMapping) mappings.get(2); + Assert.assertEquals("cf2", map.getColumnFamily()); + Assert.assertEquals("2", map.getColumnQualifierPrefix()); + Assert.assertEquals(ColumnEncoding.BINARY, map.getEncoding()); + Assert.assertEquals(hiveColumns.get(2), map.getColumnName()); + Assert.assertEquals(columnTypes.get(2).toString(), map.getColumnType()); + + HiveAccumuloColumnMapping column = (HiveAccumuloColumnMapping) mappings.get(3); + Assert.assertEquals("cq3", column.getColumnFamily()); + Assert.assertEquals("bar*", column.getColumnQualifier()); + Assert.assertEquals(ColumnEncoding.BINARY, column.getEncoding()); + Assert.assertEquals(hiveColumns.get(3), column.getColumnName()); + Assert.assertEquals(columnTypes.get(3).toString(), column.getColumnType()); + } +} diff --git a/accumulo-handler/src/test/org/apache/hadoop/hive/accumulo/columns/TestColumnMappingFactory.java b/accumulo-handler/src/test/org/apache/hadoop/hive/accumulo/columns/TestColumnMappingFactory.java new file mode 100644 index 0000000..7e7ee4c --- /dev/null +++ b/accumulo-handler/src/test/org/apache/hadoop/hive/accumulo/columns/TestColumnMappingFactory.java @@ -0,0 +1,181 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hive.accumulo.columns; + +import java.util.Map.Entry; + +import org.apache.hadoop.hive.accumulo.AccumuloHiveConstants; +import org.apache.hadoop.hive.serde2.typeinfo.TypeInfoFactory; +import org.junit.Assert; +import org.junit.Test; + +/** + * + */ +public class TestColumnMappingFactory { + + @Test(expected = NullPointerException.class) + public void testNullArgumentsFailFast() { + ColumnMappingFactory.get(null, null, null, null); + } + + @Test + public void testRowIdCreatesRowIdMapping() { + ColumnMapping mapping = ColumnMappingFactory.get(AccumuloHiveConstants.ROWID, + ColumnEncoding.STRING, "row", TypeInfoFactory.stringTypeInfo); + + Assert.assertEquals(HiveAccumuloRowIdColumnMapping.class, mapping.getClass()); + Assert.assertEquals("row", mapping.getColumnName()); + Assert.assertEquals(TypeInfoFactory.stringTypeInfo.toString(), mapping.getColumnType()); + } + + @Test + public void testColumnMappingCreatesAccumuloColumnMapping() { + ColumnMapping mapping = ColumnMappingFactory.get("cf:cq", ColumnEncoding.STRING, "col", + TypeInfoFactory.stringTypeInfo); + + Assert.assertEquals(HiveAccumuloColumnMapping.class, mapping.getClass()); + Assert.assertEquals("col", mapping.getColumnName()); + Assert.assertEquals(TypeInfoFactory.stringTypeInfo.toString(), mapping.getColumnType()); + } + + @Test(expected = InvalidColumnMappingException.class) + public void testColumnMappingRequiresCfAndCq() { + ColumnMappingFactory.parseMapping("cf"); + } + + @Test + public void testColumnMappingWithMultipleColons() { + // A column qualifier with a colon + String cf = "cf", cq = "cq1:cq2"; + Entry pair = ColumnMappingFactory.parseMapping(cf + ":" + cq); + + Assert.assertEquals(cf, pair.getKey()); + Assert.assertEquals(cq, pair.getValue()); + } + + @Test + public void testEscapedColumnFamily() { + String cf = "c" + '\\' + ":f", cq = "cq1:cq2"; + Entry pair = ColumnMappingFactory.parseMapping(cf + ":" + cq); + + // The getter should remove the escape character for us + Assert.assertEquals("c:f", pair.getKey()); + Assert.assertEquals(cq, pair.getValue()); + } + + @Test + public void testEscapedColumnFamilyAndQualifier() { + String cf = "c" + '\\' + ":f", cq = "cq1\\:cq2"; + Entry pair = ColumnMappingFactory.parseMapping(cf + ":" + cq); + + // The getter should remove the escape character for us + Assert.assertEquals("c:f", pair.getKey()); + Assert.assertEquals("cq1:cq2", pair.getValue()); + } + + @Test + public void testGetMap() { + String mappingStr = "cf:*"; + ColumnMapping mapping = ColumnMappingFactory.get(mappingStr, ColumnEncoding.getDefault(), + "col", TypeInfoFactory.getMapTypeInfo(TypeInfoFactory.stringTypeInfo, + TypeInfoFactory.stringTypeInfo)); + + Assert.assertEquals(HiveAccumuloMapColumnMapping.class, mapping.getClass()); + HiveAccumuloMapColumnMapping mapMapping = (HiveAccumuloMapColumnMapping) mapping; + + Assert.assertEquals("cf", mapMapping.getColumnFamily()); + Assert.assertEquals("", mapMapping.getColumnQualifierPrefix()); + Assert.assertEquals(ColumnEncoding.getDefault(), mapMapping.getKeyEncoding()); + Assert.assertEquals(ColumnEncoding.getDefault(), mapMapping.getValueEncoding()); + } + + @Test + public void testGetMapWithPrefix() { + String mappingStr = "cf:foo*"; + ColumnMapping mapping = ColumnMappingFactory.get(mappingStr, ColumnEncoding.getDefault(), + "col", TypeInfoFactory.getMapTypeInfo(TypeInfoFactory.stringTypeInfo, + TypeInfoFactory.stringTypeInfo)); + + Assert.assertEquals(HiveAccumuloMapColumnMapping.class, mapping.getClass()); + HiveAccumuloMapColumnMapping mapMapping = (HiveAccumuloMapColumnMapping) mapping; + + Assert.assertEquals("cf", mapMapping.getColumnFamily()); + Assert.assertEquals("foo", mapMapping.getColumnQualifierPrefix()); + Assert.assertEquals(ColumnEncoding.getDefault(), mapMapping.getKeyEncoding()); + Assert.assertEquals(ColumnEncoding.getDefault(), mapMapping.getValueEncoding()); + } + + @Test + public void testEscapedAsterisk() { + String mappingStr = "cf:\\*"; + ColumnMapping mapping = ColumnMappingFactory.get(mappingStr, ColumnEncoding.getDefault(), + "col", TypeInfoFactory.stringTypeInfo); + + Assert.assertEquals(HiveAccumuloColumnMapping.class, mapping.getClass()); + HiveAccumuloColumnMapping colMapping = (HiveAccumuloColumnMapping) mapping; + + Assert.assertEquals("cf", colMapping.getColumnFamily()); + Assert.assertEquals("*", colMapping.getColumnQualifier()); + Assert.assertEquals(ColumnEncoding.getDefault(), colMapping.getEncoding()); + } + + @Test + public void testPrefixWithEscape() { + String mappingStr = "cf:foo\\*bar*"; + ColumnMapping mapping = ColumnMappingFactory.get(mappingStr, ColumnEncoding.getDefault(), + "col", TypeInfoFactory.getMapTypeInfo(TypeInfoFactory.stringTypeInfo, + TypeInfoFactory.stringTypeInfo)); + + Assert.assertEquals(HiveAccumuloMapColumnMapping.class, mapping.getClass()); + HiveAccumuloMapColumnMapping mapMapping = (HiveAccumuloMapColumnMapping) mapping; + + Assert.assertEquals("cf", mapMapping.getColumnFamily()); + Assert.assertEquals("foo*bar", mapMapping.getColumnQualifierPrefix()); + Assert.assertEquals(ColumnEncoding.getDefault(), mapMapping.getKeyEncoding()); + Assert.assertEquals(ColumnEncoding.getDefault(), mapMapping.getValueEncoding()); + } + + @Test + public void testInlineEncodingOverridesDefault() { + String mappingStr = "cf:foo#s"; + ColumnMapping mapping = ColumnMappingFactory.get(mappingStr, ColumnEncoding.BINARY, "col", + TypeInfoFactory.stringTypeInfo); + + Assert.assertEquals(HiveAccumuloColumnMapping.class, mapping.getClass()); + HiveAccumuloColumnMapping colMapping = (HiveAccumuloColumnMapping) mapping; + + Assert.assertEquals("cf", colMapping.getColumnFamily()); + Assert.assertEquals("foo", colMapping.getColumnQualifier()); + Assert.assertEquals(ColumnEncoding.STRING, colMapping.getEncoding()); + } + + @Test + public void testCaseInsensitiveRowId() { + String mappingStr = ":rowid"; + ColumnMapping mapping = ColumnMappingFactory.get(mappingStr, ColumnEncoding.getDefault(), + "col", TypeInfoFactory.stringTypeInfo); + + Assert.assertEquals(HiveAccumuloRowIdColumnMapping.class, mapping.getClass()); + + mappingStr = ":rowid#b"; + mapping = ColumnMappingFactory.get(mappingStr, ColumnEncoding.getDefault(), "col", + TypeInfoFactory.stringTypeInfo); + + Assert.assertEquals(HiveAccumuloRowIdColumnMapping.class, mapping.getClass()); + } +} diff --git a/accumulo-handler/src/test/org/apache/hadoop/hive/accumulo/columns/TestHiveAccumuloColumnMapping.java b/accumulo-handler/src/test/org/apache/hadoop/hive/accumulo/columns/TestHiveAccumuloColumnMapping.java new file mode 100644 index 0000000..240560d --- /dev/null +++ b/accumulo-handler/src/test/org/apache/hadoop/hive/accumulo/columns/TestHiveAccumuloColumnMapping.java @@ -0,0 +1,38 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hive.accumulo.columns; + +import org.apache.hadoop.hive.serde2.typeinfo.TypeInfoFactory; +import org.junit.Assert; +import org.junit.Test; + +/** + * + */ +public class TestHiveAccumuloColumnMapping { + + @Test + public void testColumnMappingWithMultipleColons() { + // A column qualifier with a colon + String cf = "cf", cq = "cq1:cq2"; + HiveAccumuloColumnMapping mapping = new HiveAccumuloColumnMapping(cf, cq, + ColumnEncoding.STRING, "col", TypeInfoFactory.stringTypeInfo.toString()); + + Assert.assertEquals(cf, mapping.getColumnFamily()); + Assert.assertEquals(cq, mapping.getColumnQualifier()); + } +} diff --git a/accumulo-handler/src/test/org/apache/hadoop/hive/accumulo/columns/TestHiveRowIdColumnMapping.java b/accumulo-handler/src/test/org/apache/hadoop/hive/accumulo/columns/TestHiveRowIdColumnMapping.java new file mode 100644 index 0000000..468c59b --- /dev/null +++ b/accumulo-handler/src/test/org/apache/hadoop/hive/accumulo/columns/TestHiveRowIdColumnMapping.java @@ -0,0 +1,33 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hive.accumulo.columns; + +import org.apache.hadoop.hive.serde2.typeinfo.TypeInfoFactory; +import org.junit.Test; + +/** + * + */ +public class TestHiveRowIdColumnMapping { + + @Test(expected = IllegalArgumentException.class) + public void testNonRowIdMappingFails() { + new HiveAccumuloRowIdColumnMapping("foo", ColumnEncoding.STRING, "col", + TypeInfoFactory.stringTypeInfo.toString()); + } + +} diff --git a/accumulo-handler/src/test/org/apache/hadoop/hive/accumulo/mr/TestHiveAccumuloTableInputFormat.java b/accumulo-handler/src/test/org/apache/hadoop/hive/accumulo/mr/TestHiveAccumuloTableInputFormat.java new file mode 100644 index 0000000..e2ad8ef --- /dev/null +++ b/accumulo-handler/src/test/org/apache/hadoop/hive/accumulo/mr/TestHiveAccumuloTableInputFormat.java @@ -0,0 +1,743 @@ +package org.apache.hadoop.hive.accumulo.mr; + +import static org.junit.Assert.assertArrayEquals; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertTrue; + +import java.io.ByteArrayOutputStream; +import java.io.DataOutputStream; +import java.io.IOException; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collections; +import java.util.HashSet; +import java.util.List; +import java.util.Map; +import java.util.Set; +import java.util.SortedMap; + +import org.apache.accumulo.core.client.BatchWriter; +import org.apache.accumulo.core.client.BatchWriterConfig; +import org.apache.accumulo.core.client.Connector; +import org.apache.accumulo.core.client.Instance; +import org.apache.accumulo.core.client.IteratorSetting; +import org.apache.accumulo.core.client.Scanner; +import org.apache.accumulo.core.client.ZooKeeperInstance; +import org.apache.accumulo.core.client.mock.MockInstance; +import org.apache.accumulo.core.client.security.tokens.PasswordToken; +import org.apache.accumulo.core.data.Key; +import org.apache.accumulo.core.data.Mutation; +import org.apache.accumulo.core.data.Range; +import org.apache.accumulo.core.data.Value; +import org.apache.accumulo.core.security.Authorizations; +import org.apache.accumulo.core.security.ColumnVisibility; +import org.apache.accumulo.core.util.Pair; +import org.apache.commons.codec.binary.Base64; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hive.accumulo.AccumuloConnectionParameters; +import org.apache.hadoop.hive.accumulo.AccumuloHiveConstants; +import org.apache.hadoop.hive.accumulo.AccumuloHiveRow; +import org.apache.hadoop.hive.accumulo.columns.ColumnEncoding; +import org.apache.hadoop.hive.accumulo.columns.ColumnMapper; +import org.apache.hadoop.hive.accumulo.columns.ColumnMapping; +import org.apache.hadoop.hive.accumulo.columns.HiveAccumuloColumnMapping; +import org.apache.hadoop.hive.accumulo.columns.HiveAccumuloRowIdColumnMapping; +import org.apache.hadoop.hive.accumulo.predicate.AccumuloPredicateHandler; +import org.apache.hadoop.hive.accumulo.predicate.PrimitiveComparisonFilter; +import org.apache.hadoop.hive.accumulo.predicate.compare.DoubleCompare; +import org.apache.hadoop.hive.accumulo.predicate.compare.Equal; +import org.apache.hadoop.hive.accumulo.predicate.compare.GreaterThan; +import org.apache.hadoop.hive.accumulo.predicate.compare.GreaterThanOrEqual; +import org.apache.hadoop.hive.accumulo.predicate.compare.IntCompare; +import org.apache.hadoop.hive.accumulo.predicate.compare.LessThan; +import org.apache.hadoop.hive.accumulo.predicate.compare.LongCompare; +import org.apache.hadoop.hive.accumulo.predicate.compare.StringCompare; +import org.apache.hadoop.hive.accumulo.serde.AccumuloSerDeParameters; +import org.apache.hadoop.hive.accumulo.serde.TooManyAccumuloColumnsException; +import org.apache.hadoop.hive.serde.serdeConstants; +import org.apache.hadoop.hive.serde2.typeinfo.TypeInfo; +import org.apache.hadoop.hive.serde2.typeinfo.TypeInfoFactory; +import org.apache.hadoop.io.Text; +import org.apache.hadoop.mapred.FileInputFormat; +import org.apache.hadoop.mapred.InputSplit; +import org.apache.hadoop.mapred.JobConf; +import org.apache.hadoop.mapred.RecordReader; +import org.junit.Assert; +import org.junit.Before; +import org.junit.Rule; +import org.junit.Test; +import org.junit.rules.TestName; +import org.mockito.Mockito; + +import com.google.common.collect.Sets; + +public class TestHiveAccumuloTableInputFormat { + public static final String USER = "user"; + public static final String PASS = "password"; + public static final String TEST_TABLE = "table1"; + public static final Text COLUMN_FAMILY = new Text("cf"); + + private static final Text NAME = new Text("name"); + private static final Text SID = new Text("sid"); + private static final Text DEGREES = new Text("dgrs"); + private static final Text MILLIS = new Text("mills"); + + private Instance mockInstance; + private Connector con; + private HiveAccumuloTableInputFormat inputformat; + private JobConf conf; + private List columnNames; + private List columnTypes; + + @Rule + public TestName test = new TestName(); + + @Before + public void createMockKeyValues() throws Exception { + // Make a MockInstance here, by setting the instance name to be the same as this mock instance + // we can "trick" the InputFormat into using a MockInstance + mockInstance = new MockInstance(test.getMethodName()); + inputformat = new HiveAccumuloTableInputFormat(); + conf = new JobConf(); + conf.set(AccumuloSerDeParameters.TABLE_NAME, TEST_TABLE); + conf.set(AccumuloSerDeParameters.USE_MOCK_INSTANCE, "true"); + conf.set(AccumuloSerDeParameters.INSTANCE_NAME, test.getMethodName()); + conf.set(AccumuloSerDeParameters.USER_NAME, USER); + conf.set(AccumuloSerDeParameters.USER_PASS, PASS); + conf.set(AccumuloSerDeParameters.ZOOKEEPERS, "localhost:2181"); // not used for mock, but + // required by input format. + + columnNames = Arrays.asList("name", "sid", "dgrs", "mills"); + columnTypes = Arrays. asList(TypeInfoFactory.stringTypeInfo, + TypeInfoFactory.intTypeInfo, TypeInfoFactory.doubleTypeInfo, TypeInfoFactory.longTypeInfo); + conf.set(AccumuloSerDeParameters.COLUMN_MAPPINGS, "cf:name,cf:sid,cf:dgrs,cf:mills"); + conf.set(serdeConstants.LIST_COLUMNS, "name,sid,dgrs,mills"); + conf.set(serdeConstants.LIST_COLUMN_TYPES, "string,int,double,bigint"); + + con = mockInstance.getConnector(USER, new PasswordToken(PASS.getBytes())); + con.tableOperations().create(TEST_TABLE); + con.securityOperations().changeUserAuthorizations(USER, new Authorizations("blah")); + BatchWriterConfig writerConf = new BatchWriterConfig(); + BatchWriter writer = con.createBatchWriter(TEST_TABLE, writerConf); + + Mutation m1 = new Mutation(new Text("r1")); + m1.put(COLUMN_FAMILY, NAME, new Value("brian".getBytes())); + m1.put(COLUMN_FAMILY, SID, new Value(parseIntBytes("1"))); + m1.put(COLUMN_FAMILY, DEGREES, new Value(parseDoubleBytes("44.5"))); + m1.put(COLUMN_FAMILY, MILLIS, new Value(parseLongBytes("555"))); + + Mutation m2 = new Mutation(new Text("r2")); + m2.put(COLUMN_FAMILY, NAME, new Value("mark".getBytes())); + m2.put(COLUMN_FAMILY, SID, new Value(parseIntBytes("2"))); + m2.put(COLUMN_FAMILY, DEGREES, new Value(parseDoubleBytes("55.5"))); + m2.put(COLUMN_FAMILY, MILLIS, new Value(parseLongBytes("666"))); + + Mutation m3 = new Mutation(new Text("r3")); + m3.put(COLUMN_FAMILY, NAME, new Value("dennis".getBytes())); + m3.put(COLUMN_FAMILY, SID, new Value(parseIntBytes("3"))); + m3.put(COLUMN_FAMILY, DEGREES, new Value(parseDoubleBytes("65.5"))); + m3.put(COLUMN_FAMILY, MILLIS, new Value(parseLongBytes("777"))); + + writer.addMutation(m1); + writer.addMutation(m2); + writer.addMutation(m3); + + writer.close(); + } + + private byte[] parseIntBytes(String s) throws IOException { + int val = Integer.parseInt(s); + ByteArrayOutputStream baos = new ByteArrayOutputStream(4); + DataOutputStream out = new DataOutputStream(baos); + out.writeInt(val); + out.close(); + return baos.toByteArray(); + } + + private byte[] parseLongBytes(String s) throws IOException { + long val = Long.parseLong(s); + ByteArrayOutputStream baos = new ByteArrayOutputStream(8); + DataOutputStream out = new DataOutputStream(baos); + out.writeLong(val); + out.close(); + return baos.toByteArray(); + } + + private byte[] parseDoubleBytes(String s) throws IOException { + double val = Double.parseDouble(s); + ByteArrayOutputStream baos = new ByteArrayOutputStream(8); + DataOutputStream out = new DataOutputStream(baos); + out.writeDouble(val); + out.close(); + return baos.toByteArray(); + } + + @Test + public void testHiveAccumuloRecord() throws Exception { + FileInputFormat.addInputPath(conf, new Path("unused")); + InputSplit[] splits = inputformat.getSplits(conf, 0); + assertEquals(splits.length, 1); + RecordReader reader = inputformat.getRecordReader(splits[0], conf, null); + Text rowId = new Text("r1"); + AccumuloHiveRow row = new AccumuloHiveRow(); + row.add(COLUMN_FAMILY.toString(), NAME.toString(), "brian".getBytes()); + row.add(COLUMN_FAMILY.toString(), SID.toString(), parseIntBytes("1")); + row.add(COLUMN_FAMILY.toString(), DEGREES.toString(), parseDoubleBytes("44.5")); + row.add(COLUMN_FAMILY.toString(), MILLIS.toString(), parseLongBytes("555")); + assertTrue(reader.next(rowId, row)); + assertEquals(rowId.toString(), row.getRowId()); + assertTrue(row.hasFamAndQual(COLUMN_FAMILY, NAME)); + assertArrayEquals("brian".getBytes(), row.getValue(COLUMN_FAMILY, NAME)); + assertTrue(row.hasFamAndQual(COLUMN_FAMILY, SID)); + assertArrayEquals(parseIntBytes("1"), row.getValue(COLUMN_FAMILY, SID)); + assertTrue(row.hasFamAndQual(COLUMN_FAMILY, DEGREES)); + assertArrayEquals(parseDoubleBytes("44.5"), row.getValue(COLUMN_FAMILY, DEGREES)); + assertTrue(row.hasFamAndQual(COLUMN_FAMILY, MILLIS)); + assertArrayEquals(parseLongBytes("555"), row.getValue(COLUMN_FAMILY, MILLIS)); + } + + @Test + public void testGetOnlyName() throws Exception { + FileInputFormat.addInputPath(conf, new Path("unused")); + + InputSplit[] splits = inputformat.getSplits(conf, 0); + assertEquals(splits.length, 1); + RecordReader reader = inputformat.getRecordReader(splits[0], conf, null); + Text rowId = new Text("r1"); + AccumuloHiveRow row = new AccumuloHiveRow(); + assertTrue(reader.next(rowId, row)); + assertEquals(row.getRowId(), rowId.toString()); + assertTrue(row.hasFamAndQual(COLUMN_FAMILY, NAME)); + assertArrayEquals(row.getValue(COLUMN_FAMILY, NAME), "brian".getBytes()); + + rowId = new Text("r2"); + assertTrue(reader.next(rowId, row)); + assertEquals(row.getRowId(), rowId.toString()); + assertTrue(row.hasFamAndQual(COLUMN_FAMILY, NAME)); + assertArrayEquals(row.getValue(COLUMN_FAMILY, NAME), "mark".getBytes()); + + rowId = new Text("r3"); + assertTrue(reader.next(rowId, row)); + assertEquals(row.getRowId(), rowId.toString()); + assertTrue(row.hasFamAndQual(COLUMN_FAMILY, NAME)); + assertArrayEquals(row.getValue(COLUMN_FAMILY, NAME), "dennis".getBytes()); + + assertFalse(reader.next(rowId, row)); + } + + @Test + public void testDegreesAndMillis() throws Exception { + Connector con = mockInstance.getConnector(USER, new PasswordToken(PASS.getBytes())); + Scanner scan = con.createScanner(TEST_TABLE, new Authorizations("blah")); + IteratorSetting is = new IteratorSetting(1, PrimitiveComparisonFilter.FILTER_PREFIX + 1, + PrimitiveComparisonFilter.class); + + is.addOption(PrimitiveComparisonFilter.P_COMPARE_CLASS, DoubleCompare.class.getName()); + is.addOption(PrimitiveComparisonFilter.COMPARE_OPT_CLASS, GreaterThanOrEqual.class.getName()); + is.addOption(PrimitiveComparisonFilter.CONST_VAL, + new String(Base64.encodeBase64(parseDoubleBytes("55.6")))); + is.addOption(PrimitiveComparisonFilter.COLUMN, "cf:dgrs"); + scan.addScanIterator(is); + + IteratorSetting is2 = new IteratorSetting(2, PrimitiveComparisonFilter.FILTER_PREFIX + 2, + PrimitiveComparisonFilter.class); + + is2.addOption(PrimitiveComparisonFilter.P_COMPARE_CLASS, LongCompare.class.getName()); + is2.addOption(PrimitiveComparisonFilter.COMPARE_OPT_CLASS, LessThan.class.getName()); + is2.addOption(PrimitiveComparisonFilter.CONST_VAL, + new String(Base64.encodeBase64(parseLongBytes("778")))); + is2.addOption(PrimitiveComparisonFilter.COLUMN, "cf:mills"); + + scan.addScanIterator(is2); + + boolean foundDennis = false; + int totalCount = 0; + for (Map.Entry kv : scan) { + boolean foundName = false; + boolean foundSid = false; + boolean foundDegrees = false; + boolean foundMillis = false; + SortedMap items = PrimitiveComparisonFilter.decodeRow(kv.getKey(), kv.getValue()); + for (Map.Entry item : items.entrySet()) { + SortedMap nestedItems = PrimitiveComparisonFilter.decodeRow(item.getKey(), + item.getValue()); + for (Map.Entry nested : nestedItems.entrySet()) { + if (nested.getKey().getRow().toString().equals("r3")) { + foundDennis = true; + } + if (nested.getKey().getColumnQualifier().equals(NAME)) { + foundName = true; + } else if (nested.getKey().getColumnQualifier().equals(SID)) { + foundSid = true; + } else if (nested.getKey().getColumnQualifier().equals(DEGREES)) { + foundDegrees = true; + } else if (nested.getKey().getColumnQualifier().equals(MILLIS)) { + foundMillis = true; + } + } + } + totalCount++; + assertTrue(foundDegrees & foundMillis & foundName & foundSid); + } + assertTrue(foundDennis); + assertEquals(totalCount, 1); + } + + @Test + public void testGreaterThan1Sid() throws Exception { + Connector con = mockInstance.getConnector(USER, new PasswordToken(PASS.getBytes())); + Scanner scan = con.createScanner(TEST_TABLE, new Authorizations("blah")); + IteratorSetting is = new IteratorSetting(1, PrimitiveComparisonFilter.FILTER_PREFIX + 1, + PrimitiveComparisonFilter.class); + + is.addOption(PrimitiveComparisonFilter.P_COMPARE_CLASS, IntCompare.class.getName()); + is.addOption(PrimitiveComparisonFilter.COMPARE_OPT_CLASS, GreaterThan.class.getName()); + is.addOption(PrimitiveComparisonFilter.CONST_VAL, + new String(Base64.encodeBase64(parseIntBytes("1")))); + is.addOption(PrimitiveComparisonFilter.COLUMN, "cf:sid"); + scan.addScanIterator(is); + boolean foundMark = false; + boolean foundDennis = false; + int totalCount = 0; + for (Map.Entry kv : scan) { + boolean foundName = false; + boolean foundSid = false; + boolean foundDegrees = false; + boolean foundMillis = false; + SortedMap items = PrimitiveComparisonFilter.decodeRow(kv.getKey(), kv.getValue()); + for (Map.Entry item : items.entrySet()) { + if (item.getKey().getRow().toString().equals("r2")) { + foundMark = true; + } else if (item.getKey().getRow().toString().equals("r3")) { + foundDennis = true; + } + if (item.getKey().getColumnQualifier().equals(NAME)) { + foundName = true; + } else if (item.getKey().getColumnQualifier().equals(SID)) { + foundSid = true; + } else if (item.getKey().getColumnQualifier().equals(DEGREES)) { + foundDegrees = true; + } else if (item.getKey().getColumnQualifier().equals(MILLIS)) { + foundMillis = true; + } + } + totalCount++; + assertTrue(foundDegrees & foundMillis & foundName & foundSid); + } + assertTrue(foundDennis & foundMark); + assertEquals(totalCount, 2); + } + + @Test + public void testNameEqualBrian() throws Exception { + Connector con = mockInstance.getConnector(USER, new PasswordToken(PASS.getBytes())); + Scanner scan = con.createScanner(TEST_TABLE, new Authorizations("blah")); + IteratorSetting is = new IteratorSetting(1, PrimitiveComparisonFilter.FILTER_PREFIX + 1, + PrimitiveComparisonFilter.class); + + is.addOption(PrimitiveComparisonFilter.P_COMPARE_CLASS, StringCompare.class.getName()); + is.addOption(PrimitiveComparisonFilter.COMPARE_OPT_CLASS, Equal.class.getName()); + is.addOption(PrimitiveComparisonFilter.CONST_VAL, + new String(Base64.encodeBase64("brian".getBytes()))); + is.addOption(PrimitiveComparisonFilter.COLUMN, "cf:name"); + scan.addScanIterator(is); + boolean foundName = false; + boolean foundSid = false; + boolean foundDegrees = false; + boolean foundMillis = false; + for (Map.Entry kv : scan) { + SortedMap items = PrimitiveComparisonFilter.decodeRow(kv.getKey(), kv.getValue()); + for (Map.Entry item : items.entrySet()) { + assertEquals(item.getKey().getRow().toString(), "r1"); + if (item.getKey().getColumnQualifier().equals(NAME)) { + foundName = true; + assertArrayEquals(item.getValue().get(), "brian".getBytes()); + } else if (item.getKey().getColumnQualifier().equals(SID)) { + foundSid = true; + assertArrayEquals(item.getValue().get(), parseIntBytes("1")); + } else if (item.getKey().getColumnQualifier().equals(DEGREES)) { + foundDegrees = true; + assertArrayEquals(item.getValue().get(), parseDoubleBytes("44.5")); + } else if (item.getKey().getColumnQualifier().equals(MILLIS)) { + foundMillis = true; + assertArrayEquals(item.getValue().get(), parseLongBytes("555")); + } + } + } + assertTrue(foundDegrees & foundMillis & foundName & foundSid); + } + + @Test + public void testGetNone() throws Exception { + FileInputFormat.addInputPath(conf, new Path("unused")); + conf.set(AccumuloSerDeParameters.COLUMN_MAPPINGS, "cf:f1"); + InputSplit[] splits = inputformat.getSplits(conf, 0); + assertEquals(splits.length, 1); + RecordReader reader = inputformat.getRecordReader(splits[0], conf, null); + Text rowId = new Text("r1"); + AccumuloHiveRow row = new AccumuloHiveRow(); + row.setRowId("r1"); + assertFalse(reader.next(rowId, row)); + } + + @Test + public void testIteratorNotInSplitsCompensation() throws Exception { + FileInputFormat.addInputPath(conf, new Path("unused")); + InputSplit[] splits = inputformat.getSplits(conf, 0); + + assertEquals(1, splits.length); + InputSplit split = splits[0]; + + IteratorSetting is = new IteratorSetting(1, PrimitiveComparisonFilter.FILTER_PREFIX + 1, + PrimitiveComparisonFilter.class); + + is.addOption(PrimitiveComparisonFilter.P_COMPARE_CLASS, StringCompare.class.getName()); + is.addOption(PrimitiveComparisonFilter.COMPARE_OPT_CLASS, Equal.class.getName()); + is.addOption(PrimitiveComparisonFilter.CONST_VAL, + new String(Base64.encodeBase64(new byte[] {'0'}))); + is.addOption(PrimitiveComparisonFilter.COLUMN, "cf:cq"); + + // Mock out the predicate handler because it's just easier + AccumuloPredicateHandler predicateHandler = Mockito.mock(AccumuloPredicateHandler.class); + Mockito.when( + predicateHandler.getIterators(Mockito.any(JobConf.class), Mockito.any(ColumnMapper.class))) + .thenReturn(Arrays.asList(is)); + + // Set it on our inputformat + inputformat.predicateHandler = predicateHandler; + + inputformat.getRecordReader(split, conf, null); + + // The code should account for the bug and update the iterators on the split + List settingsOnSplit = ((HiveAccumuloSplit) split).getSplit().getIterators(); + assertEquals(1, settingsOnSplit.size()); + assertEquals(is, settingsOnSplit.get(0)); + } + + @Test + public void testColumnMappingsToPairs() { + List mappings = new ArrayList(); + Set> columns = new HashSet>(); + + // Row ID + mappings.add(new HiveAccumuloRowIdColumnMapping(AccumuloHiveConstants.ROWID, + ColumnEncoding.STRING, "row", TypeInfoFactory.stringTypeInfo.toString())); + + // Some cf:cq + mappings.add(new HiveAccumuloColumnMapping("person", "name", ColumnEncoding.STRING, "col1", + TypeInfoFactory.stringTypeInfo.toString())); + mappings.add(new HiveAccumuloColumnMapping("person", "age", ColumnEncoding.STRING, "col2", + TypeInfoFactory.stringTypeInfo.toString())); + mappings.add(new HiveAccumuloColumnMapping("person", "height", ColumnEncoding.STRING, "col3", + TypeInfoFactory.stringTypeInfo.toString())); + + // Bare cf + mappings.add(new HiveAccumuloColumnMapping("city", "name", ColumnEncoding.STRING, "col4", + TypeInfoFactory.stringTypeInfo.toString())); + + columns.add(new Pair(new Text("person"), new Text("name"))); + columns.add(new Pair(new Text("person"), new Text("age"))); + columns.add(new Pair(new Text("person"), new Text("height"))); + // Null qualifier would mean all qualifiers in that family, want an empty qualifier + columns.add(new Pair(new Text("city"), new Text("name"))); + + assertEquals(columns, inputformat.getPairCollection(mappings)); + } + + @Test + public void testConfigureMockAccumuloInputFormat() throws Exception { + AccumuloConnectionParameters accumuloParams = new AccumuloConnectionParameters(conf); + ColumnMapper columnMapper = new ColumnMapper(conf.get(AccumuloSerDeParameters.COLUMN_MAPPINGS), + conf.get(AccumuloSerDeParameters.DEFAULT_STORAGE_TYPE), columnNames, columnTypes); + Set> cfCqPairs = inputformat + .getPairCollection(columnMapper.getColumnMappings()); + List iterators = Collections.emptyList(); + Set ranges = Collections.singleton(new Range()); + + HiveAccumuloTableInputFormat mockInputFormat = Mockito.mock(HiveAccumuloTableInputFormat.class); + + // Call out to the real configure method + Mockito.doCallRealMethod().when(mockInputFormat) + .configure(conf, mockInstance, con, accumuloParams, columnMapper, iterators, ranges); + + // Also compute the correct cf:cq pairs so we can assert the right argument was passed + Mockito.doCallRealMethod().when(mockInputFormat) + .getPairCollection(columnMapper.getColumnMappings()); + + mockInputFormat.configure(conf, mockInstance, con, accumuloParams, columnMapper, iterators, + ranges); + + // Verify that the correct methods are invoked on AccumuloInputFormat + Mockito.verify(mockInputFormat).setMockInstance(conf, mockInstance.getInstanceName()); + Mockito.verify(mockInputFormat).setConnectorInfo(conf, USER, new PasswordToken(PASS)); + Mockito.verify(mockInputFormat).setInputTableName(conf, TEST_TABLE); + Mockito.verify(mockInputFormat).setScanAuthorizations(conf, + con.securityOperations().getUserAuthorizations(USER)); + Mockito.verify(mockInputFormat).addIterators(conf, iterators); + Mockito.verify(mockInputFormat).setRanges(conf, ranges); + Mockito.verify(mockInputFormat).fetchColumns(conf, cfCqPairs); + } + + @Test + public void testConfigureAccumuloInputFormat() throws Exception { + AccumuloConnectionParameters accumuloParams = new AccumuloConnectionParameters(conf); + ColumnMapper columnMapper = new ColumnMapper(conf.get(AccumuloSerDeParameters.COLUMN_MAPPINGS), + conf.get(AccumuloSerDeParameters.DEFAULT_STORAGE_TYPE), columnNames, columnTypes); + Set> cfCqPairs = inputformat + .getPairCollection(columnMapper.getColumnMappings()); + List iterators = Collections.emptyList(); + Set ranges = Collections.singleton(new Range()); + String instanceName = "realInstance"; + String zookeepers = "host1:2181,host2:2181,host3:2181"; + + ZooKeeperInstance zkInstance = Mockito.mock(ZooKeeperInstance.class); + HiveAccumuloTableInputFormat mockInputFormat = Mockito.mock(HiveAccumuloTableInputFormat.class); + + // Stub out the ZKI mock + Mockito.when(zkInstance.getInstanceName()).thenReturn(instanceName); + Mockito.when(zkInstance.getZooKeepers()).thenReturn(zookeepers); + + // Call out to the real configure method + Mockito.doCallRealMethod().when(mockInputFormat) + .configure(conf, zkInstance, con, accumuloParams, columnMapper, iterators, ranges); + + // Also compute the correct cf:cq pairs so we can assert the right argument was passed + Mockito.doCallRealMethod().when(mockInputFormat) + .getPairCollection(columnMapper.getColumnMappings()); + + mockInputFormat.configure(conf, zkInstance, con, accumuloParams, columnMapper, iterators, + ranges); + + // Verify that the correct methods are invoked on AccumuloInputFormat + Mockito.verify(mockInputFormat).setZooKeeperInstance(conf, instanceName, zookeepers); + Mockito.verify(mockInputFormat).setConnectorInfo(conf, USER, new PasswordToken(PASS)); + Mockito.verify(mockInputFormat).setInputTableName(conf, TEST_TABLE); + Mockito.verify(mockInputFormat).setScanAuthorizations(conf, + con.securityOperations().getUserAuthorizations(USER)); + Mockito.verify(mockInputFormat).addIterators(conf, iterators); + Mockito.verify(mockInputFormat).setRanges(conf, ranges); + Mockito.verify(mockInputFormat).fetchColumns(conf, cfCqPairs); + } + + @Test + public void testConfigureAccumuloInputFormatWithAuthorizations() throws Exception { + AccumuloConnectionParameters accumuloParams = new AccumuloConnectionParameters(conf); + conf.set(AccumuloSerDeParameters.AUTHORIZATIONS_KEY, "foo,bar"); + ColumnMapper columnMapper = new ColumnMapper(conf.get(AccumuloSerDeParameters.COLUMN_MAPPINGS), + conf.get(AccumuloSerDeParameters.DEFAULT_STORAGE_TYPE), columnNames, columnTypes); + Set> cfCqPairs = inputformat + .getPairCollection(columnMapper.getColumnMappings()); + List iterators = Collections.emptyList(); + Set ranges = Collections.singleton(new Range()); + String instanceName = "realInstance"; + String zookeepers = "host1:2181,host2:2181,host3:2181"; + + ZooKeeperInstance zkInstance = Mockito.mock(ZooKeeperInstance.class); + HiveAccumuloTableInputFormat mockInputFormat = Mockito.mock(HiveAccumuloTableInputFormat.class); + + // Stub out the ZKI mock + Mockito.when(zkInstance.getInstanceName()).thenReturn(instanceName); + Mockito.when(zkInstance.getZooKeepers()).thenReturn(zookeepers); + + // Call out to the real configure method + Mockito.doCallRealMethod().when(mockInputFormat) + .configure(conf, zkInstance, con, accumuloParams, columnMapper, iterators, ranges); + + // Also compute the correct cf:cq pairs so we can assert the right argument was passed + Mockito.doCallRealMethod().when(mockInputFormat) + .getPairCollection(columnMapper.getColumnMappings()); + + mockInputFormat.configure(conf, zkInstance, con, accumuloParams, columnMapper, iterators, + ranges); + + // Verify that the correct methods are invoked on AccumuloInputFormat + Mockito.verify(mockInputFormat).setZooKeeperInstance(conf, instanceName, zookeepers); + Mockito.verify(mockInputFormat).setConnectorInfo(conf, USER, new PasswordToken(PASS)); + Mockito.verify(mockInputFormat).setInputTableName(conf, TEST_TABLE); + Mockito.verify(mockInputFormat).setScanAuthorizations(conf, new Authorizations("foo,bar")); + Mockito.verify(mockInputFormat).addIterators(conf, iterators); + Mockito.verify(mockInputFormat).setRanges(conf, ranges); + Mockito.verify(mockInputFormat).fetchColumns(conf, cfCqPairs); + } + + @Test + public void testConfigureAccumuloInputFormatWithIterators() throws Exception { + AccumuloConnectionParameters accumuloParams = new AccumuloConnectionParameters(conf); + ColumnMapper columnMapper = new ColumnMapper(conf.get(AccumuloSerDeParameters.COLUMN_MAPPINGS), + conf.get(AccumuloSerDeParameters.DEFAULT_STORAGE_TYPE), columnNames, columnTypes); + Set> cfCqPairs = inputformat + .getPairCollection(columnMapper.getColumnMappings()); + List iterators = new ArrayList(); + Set ranges = Collections.singleton(new Range()); + String instanceName = "realInstance"; + String zookeepers = "host1:2181,host2:2181,host3:2181"; + + IteratorSetting cfg = new IteratorSetting(50, PrimitiveComparisonFilter.class); + cfg.addOption(PrimitiveComparisonFilter.P_COMPARE_CLASS, StringCompare.class.getName()); + cfg.addOption(PrimitiveComparisonFilter.COMPARE_OPT_CLASS, Equal.class.getName()); + cfg.addOption(PrimitiveComparisonFilter.CONST_VAL, "dave"); + cfg.addOption(PrimitiveComparisonFilter.COLUMN, "person:name"); + iterators.add(cfg); + + cfg = new IteratorSetting(50, PrimitiveComparisonFilter.class); + cfg.addOption(PrimitiveComparisonFilter.P_COMPARE_CLASS, IntCompare.class.getName()); + cfg.addOption(PrimitiveComparisonFilter.COMPARE_OPT_CLASS, Equal.class.getName()); + cfg.addOption(PrimitiveComparisonFilter.CONST_VAL, "50"); + cfg.addOption(PrimitiveComparisonFilter.COLUMN, "person:age"); + iterators.add(cfg); + + ZooKeeperInstance zkInstance = Mockito.mock(ZooKeeperInstance.class); + HiveAccumuloTableInputFormat mockInputFormat = Mockito.mock(HiveAccumuloTableInputFormat.class); + + // Stub out the ZKI mock + Mockito.when(zkInstance.getInstanceName()).thenReturn(instanceName); + Mockito.when(zkInstance.getZooKeepers()).thenReturn(zookeepers); + + // Call out to the real configure method + Mockito.doCallRealMethod().when(mockInputFormat) + .configure(conf, zkInstance, con, accumuloParams, columnMapper, iterators, ranges); + + // Also compute the correct cf:cq pairs so we can assert the right argument was passed + Mockito.doCallRealMethod().when(mockInputFormat) + .getPairCollection(columnMapper.getColumnMappings()); + + mockInputFormat.configure(conf, zkInstance, con, accumuloParams, columnMapper, iterators, + ranges); + + // Verify that the correct methods are invoked on AccumuloInputFormat + Mockito.verify(mockInputFormat).setZooKeeperInstance(conf, instanceName, zookeepers); + Mockito.verify(mockInputFormat).setConnectorInfo(conf, USER, new PasswordToken(PASS)); + Mockito.verify(mockInputFormat).setInputTableName(conf, TEST_TABLE); + Mockito.verify(mockInputFormat).setScanAuthorizations(conf, + con.securityOperations().getUserAuthorizations(USER)); + Mockito.verify(mockInputFormat).addIterators(conf, iterators); + Mockito.verify(mockInputFormat).setRanges(conf, ranges); + Mockito.verify(mockInputFormat).fetchColumns(conf, cfCqPairs); + } + + @Test + public void testConfigureAccumuloInputFormatWithEmptyColumns() throws Exception { + AccumuloConnectionParameters accumuloParams = new AccumuloConnectionParameters(conf); + ColumnMapper columnMapper = new ColumnMapper(conf.get(AccumuloSerDeParameters.COLUMN_MAPPINGS), + conf.get(AccumuloSerDeParameters.DEFAULT_STORAGE_TYPE), columnNames, columnTypes); + HashSet> cfCqPairs = Sets.newHashSet(); + List iterators = new ArrayList(); + Set ranges = Collections.singleton(new Range()); + String instanceName = "realInstance"; + String zookeepers = "host1:2181,host2:2181,host3:2181"; + + IteratorSetting cfg = new IteratorSetting(50, PrimitiveComparisonFilter.class); + cfg.addOption(PrimitiveComparisonFilter.P_COMPARE_CLASS, StringCompare.class.getName()); + cfg.addOption(PrimitiveComparisonFilter.COMPARE_OPT_CLASS, Equal.class.getName()); + cfg.addOption(PrimitiveComparisonFilter.CONST_VAL, "dave"); + cfg.addOption(PrimitiveComparisonFilter.COLUMN, "person:name"); + iterators.add(cfg); + + cfg = new IteratorSetting(50, PrimitiveComparisonFilter.class); + cfg.addOption(PrimitiveComparisonFilter.P_COMPARE_CLASS, IntCompare.class.getName()); + cfg.addOption(PrimitiveComparisonFilter.COMPARE_OPT_CLASS, Equal.class.getName()); + cfg.addOption(PrimitiveComparisonFilter.CONST_VAL, "50"); + cfg.addOption(PrimitiveComparisonFilter.COLUMN, "person:age"); + iterators.add(cfg); + + ZooKeeperInstance zkInstance = Mockito.mock(ZooKeeperInstance.class); + HiveAccumuloTableInputFormat mockInputFormat = Mockito.mock(HiveAccumuloTableInputFormat.class); + + // Stub out the ZKI mock + Mockito.when(zkInstance.getInstanceName()).thenReturn(instanceName); + Mockito.when(zkInstance.getZooKeepers()).thenReturn(zookeepers); + Mockito.when(mockInputFormat.getPairCollection(columnMapper.getColumnMappings())).thenReturn( + cfCqPairs); + + // Call out to the real configure method + Mockito.doCallRealMethod().when(mockInputFormat) + .configure(conf, zkInstance, con, accumuloParams, columnMapper, iterators, ranges); + + // Also compute the correct cf:cq pairs so we can assert the right argument was passed + Mockito.doCallRealMethod().when(mockInputFormat) + .getPairCollection(columnMapper.getColumnMappings()); + + mockInputFormat.configure(conf, zkInstance, con, accumuloParams, columnMapper, iterators, + ranges); + + // Verify that the correct methods are invoked on AccumuloInputFormat + Mockito.verify(mockInputFormat).setZooKeeperInstance(conf, instanceName, zookeepers); + Mockito.verify(mockInputFormat).setConnectorInfo(conf, USER, new PasswordToken(PASS)); + Mockito.verify(mockInputFormat).setInputTableName(conf, TEST_TABLE); + Mockito.verify(mockInputFormat).setScanAuthorizations(conf, + con.securityOperations().getUserAuthorizations(USER)); + Mockito.verify(mockInputFormat).addIterators(conf, iterators); + Mockito.verify(mockInputFormat).setRanges(conf, ranges); + + // fetchColumns is not called because we had no columns to fetch + } + + @Test + public void testGetProtectedField() throws Exception { + FileInputFormat.addInputPath(conf, new Path("unused")); + + BatchWriterConfig writerConf = new BatchWriterConfig(); + BatchWriter writer = con.createBatchWriter(TEST_TABLE, writerConf); + + Authorizations origAuths = con.securityOperations().getUserAuthorizations(USER); + con.securityOperations().changeUserAuthorizations(USER, + new Authorizations(origAuths.toString() + ",foo")); + + Mutation m = new Mutation("r4"); + m.put(COLUMN_FAMILY, NAME, new ColumnVisibility("foo"), new Value("frank".getBytes())); + m.put(COLUMN_FAMILY, SID, new ColumnVisibility("foo"), new Value(parseIntBytes("4"))); + m.put(COLUMN_FAMILY, DEGREES, new ColumnVisibility("foo"), new Value(parseDoubleBytes("60.6"))); + m.put(COLUMN_FAMILY, MILLIS, new ColumnVisibility("foo"), new Value(parseLongBytes("777"))); + + writer.addMutation(m); + writer.close(); + + conf.set(AccumuloSerDeParameters.AUTHORIZATIONS_KEY, "foo"); + + InputSplit[] splits = inputformat.getSplits(conf, 0); + assertEquals(splits.length, 1); + RecordReader reader = inputformat.getRecordReader(splits[0], conf, null); + Text rowId = new Text("r1"); + AccumuloHiveRow row = new AccumuloHiveRow(); + assertTrue(reader.next(rowId, row)); + assertEquals(row.getRowId(), rowId.toString()); + assertTrue(row.hasFamAndQual(COLUMN_FAMILY, NAME)); + assertArrayEquals(row.getValue(COLUMN_FAMILY, NAME), "brian".getBytes()); + + rowId = new Text("r2"); + assertTrue(reader.next(rowId, row)); + assertEquals(row.getRowId(), rowId.toString()); + assertTrue(row.hasFamAndQual(COLUMN_FAMILY, NAME)); + assertArrayEquals(row.getValue(COLUMN_FAMILY, NAME), "mark".getBytes()); + + rowId = new Text("r3"); + assertTrue(reader.next(rowId, row)); + assertEquals(row.getRowId(), rowId.toString()); + assertTrue(row.hasFamAndQual(COLUMN_FAMILY, NAME)); + assertArrayEquals(row.getValue(COLUMN_FAMILY, NAME), "dennis".getBytes()); + + rowId = new Text("r4"); + assertTrue(reader.next(rowId, row)); + assertEquals(row.getRowId(), rowId.toString()); + assertTrue(row.hasFamAndQual(COLUMN_FAMILY, NAME)); + assertArrayEquals(row.getValue(COLUMN_FAMILY, NAME), "frank".getBytes()); + + assertFalse(reader.next(rowId, row)); + } + + @Test + public void testMapColumnPairs() throws TooManyAccumuloColumnsException { + ColumnMapper columnMapper = new ColumnMapper(":rowID,cf:*", + conf.get(AccumuloSerDeParameters.DEFAULT_STORAGE_TYPE), Arrays.asList("row", "col"), + Arrays. asList(TypeInfoFactory.stringTypeInfo, TypeInfoFactory.getMapTypeInfo( + TypeInfoFactory.stringTypeInfo, TypeInfoFactory.stringTypeInfo))); + Set> pairs = inputformat.getPairCollection(columnMapper.getColumnMappings()); + + Assert.assertEquals(1, pairs.size()); + + Pair cfCq = pairs.iterator().next(); + Assert.assertEquals("cf", cfCq.getFirst().toString()); + Assert.assertNull(cfCq.getSecond()); + } +} diff --git a/accumulo-handler/src/test/org/apache/hadoop/hive/accumulo/mr/TestHiveAccumuloTableOutputFormat.java b/accumulo-handler/src/test/org/apache/hadoop/hive/accumulo/mr/TestHiveAccumuloTableOutputFormat.java new file mode 100644 index 0000000..706b26e --- /dev/null +++ b/accumulo-handler/src/test/org/apache/hadoop/hive/accumulo/mr/TestHiveAccumuloTableOutputFormat.java @@ -0,0 +1,492 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hive.accumulo.mr; + +import java.io.IOException; +import java.util.Arrays; +import java.util.Iterator; +import java.util.List; +import java.util.Map.Entry; +import java.util.Properties; + +import org.apache.accumulo.core.client.AccumuloSecurityException; +import org.apache.accumulo.core.client.Connector; +import org.apache.accumulo.core.client.Instance; +import org.apache.accumulo.core.client.mock.MockInstance; +import org.apache.accumulo.core.client.security.tokens.PasswordToken; +import org.apache.accumulo.core.data.Key; +import org.apache.accumulo.core.data.Mutation; +import org.apache.accumulo.core.data.Value; +import org.apache.accumulo.core.security.Authorizations; +import org.apache.accumulo.core.security.ColumnVisibility; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.fs.FileSystem; +import org.apache.hadoop.hive.accumulo.AccumuloConnectionParameters; +import org.apache.hadoop.hive.accumulo.columns.ColumnEncoding; +import org.apache.hadoop.hive.accumulo.serde.AccumuloRowSerializer; +import org.apache.hadoop.hive.accumulo.serde.AccumuloSerDe; +import org.apache.hadoop.hive.accumulo.serde.AccumuloSerDeParameters; +import org.apache.hadoop.hive.serde.serdeConstants; +import org.apache.hadoop.hive.serde2.lazy.ByteArrayRef; +import org.apache.hadoop.hive.serde2.lazy.LazyFactory; +import org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe.SerDeParameters; +import org.apache.hadoop.hive.serde2.lazy.LazyStruct; +import org.apache.hadoop.hive.serde2.lazy.objectinspector.LazyMapObjectInspector; +import org.apache.hadoop.hive.serde2.lazy.objectinspector.LazyObjectInspectorFactory; +import org.apache.hadoop.hive.serde2.lazy.objectinspector.LazySimpleStructObjectInspector; +import org.apache.hadoop.hive.serde2.lazy.objectinspector.primitive.LazyStringObjectInspector; +import org.apache.hadoop.hive.serde2.typeinfo.TypeInfo; +import org.apache.hadoop.hive.serde2.typeinfo.TypeInfoFactory; +import org.apache.hadoop.io.Text; +import org.apache.hadoop.mapred.JobConf; +import org.apache.hadoop.mapred.RecordWriter; +import org.junit.Assert; +import org.junit.Before; +import org.junit.Rule; +import org.junit.Test; +import org.junit.rules.TestName; +import org.mockito.Mockito; + +import com.google.common.base.Joiner; + +/** + * + */ +public class TestHiveAccumuloTableOutputFormat { + + protected JobConf conf; + protected String user = "root"; + protected String password = "password"; + protected String instanceName = "instance"; + protected String zookeepers = "host1:2181,host2:2181,host3:2181"; + protected String outputTable = "output"; + + @Rule + public TestName test = new TestName(); + + @Before + public void setup() throws IOException { + conf = new JobConf(); + + conf.set(AccumuloConnectionParameters.USER_NAME, user); + conf.set(AccumuloConnectionParameters.USER_PASS, password); + conf.set(AccumuloConnectionParameters.INSTANCE_NAME, instanceName); + conf.set(AccumuloConnectionParameters.ZOOKEEPERS, zookeepers); + conf.set(AccumuloConnectionParameters.TABLE_NAME, outputTable); + } + + @Test + public void testBasicConfiguration() throws IOException, AccumuloSecurityException { + HiveAccumuloTableOutputFormat outputFormat = Mockito.mock(HiveAccumuloTableOutputFormat.class); + + Mockito.doCallRealMethod().when(outputFormat).configureAccumuloOutputFormat(conf); + + outputFormat.configureAccumuloOutputFormat(conf); + + Mockito.verify(outputFormat).setAccumuloConnectorInfo(conf, user, new PasswordToken(password)); + Mockito.verify(outputFormat).setAccumuloZooKeeperInstance(conf, instanceName, zookeepers); + Mockito.verify(outputFormat).setDefaultAccumuloTableName(conf, outputTable); + } + + @Test + public void testMockInstance() throws IOException, AccumuloSecurityException { + HiveAccumuloTableOutputFormat outputFormat = Mockito.mock(HiveAccumuloTableOutputFormat.class); + conf.setBoolean(AccumuloConnectionParameters.USE_MOCK_INSTANCE, true); + conf.unset(AccumuloConnectionParameters.ZOOKEEPERS); + + Mockito.doCallRealMethod().when(outputFormat).configureAccumuloOutputFormat(conf); + + outputFormat.configureAccumuloOutputFormat(conf); + + Mockito.verify(outputFormat).setAccumuloConnectorInfo(conf, user, new PasswordToken(password)); + Mockito.verify(outputFormat).setAccumuloMockInstance(conf, instanceName); + Mockito.verify(outputFormat).setDefaultAccumuloTableName(conf, outputTable); + } + + @Test + public void testWriteToMockInstance() throws Exception { + Instance inst = new MockInstance(test.getMethodName()); + Connector conn = inst.getConnector("root", new PasswordToken("")); + + HiveAccumuloTableOutputFormat outputFormat = new HiveAccumuloTableOutputFormat(); + String table = test.getMethodName(); + conn.tableOperations().create(table); + + JobConf conf = new JobConf(); + conf.set(AccumuloConnectionParameters.INSTANCE_NAME, inst.getInstanceName()); + conf.set(AccumuloConnectionParameters.USER_NAME, "root"); + conf.set(AccumuloConnectionParameters.USER_PASS, ""); + conf.setBoolean(AccumuloConnectionParameters.USE_MOCK_INSTANCE, true); + conf.set(AccumuloConnectionParameters.TABLE_NAME, test.getMethodName()); + + FileSystem local = FileSystem.getLocal(conf); + outputFormat.checkOutputSpecs(local, conf); + + RecordWriter recordWriter = outputFormat + .getRecordWriter(local, conf, null, null); + + List names = Arrays.asList("row", "col1", "col2"); + List types = Arrays. asList(TypeInfoFactory.stringTypeInfo, + TypeInfoFactory.stringTypeInfo, TypeInfoFactory.stringTypeInfo); + + Properties tableProperties = new Properties(); + tableProperties.setProperty(AccumuloSerDeParameters.COLUMN_MAPPINGS, ":rowID,cf:cq1,cf:cq2"); + tableProperties.setProperty(serdeConstants.FIELD_DELIM, " "); + tableProperties.setProperty(serdeConstants.LIST_COLUMNS, Joiner.on(',').join(names)); + tableProperties.setProperty(serdeConstants.LIST_COLUMN_TYPES, Joiner.on(',').join(types)); + AccumuloSerDeParameters accumuloSerDeParams = new AccumuloSerDeParameters(new Configuration(), + tableProperties, AccumuloSerDe.class.getSimpleName()); + SerDeParameters serDeParams = accumuloSerDeParams.getSerDeParameters(); + + AccumuloRowSerializer serializer = new AccumuloRowSerializer(0, serDeParams, + accumuloSerDeParams.getColumnMappings(), AccumuloSerDeParameters.DEFAULT_VISIBILITY_LABEL, + accumuloSerDeParams.getRowIdFactory()); + + TypeInfo stringTypeInfo = TypeInfoFactory.getPrimitiveTypeInfo(serdeConstants.STRING_TYPE_NAME); + + LazySimpleStructObjectInspector structOI = (LazySimpleStructObjectInspector) LazyFactory + .createLazyStructInspector(Arrays.asList("row", "cq1", "cq2"), + Arrays.asList(stringTypeInfo, stringTypeInfo, stringTypeInfo), + serDeParams.getSeparators(), serDeParams.getNullSequence(), + serDeParams.isLastColumnTakesRest(), serDeParams.isEscaped(), + serDeParams.getEscapeChar()); + + LazyStruct struct = (LazyStruct) LazyFactory.createLazyObject(structOI); + + ByteArrayRef bytes = new ByteArrayRef(); + bytes.setData("row value1 value2".getBytes()); + struct.init(bytes, 0, bytes.getData().length); + + // Serialize the struct into a mutation + Mutation m = serializer.serialize(struct, structOI); + + // Write the mutation + recordWriter.write(new Text(table), m); + + // Close the writer + recordWriter.close(null); + + Iterator> iter = conn.createScanner(table, new Authorizations()).iterator(); + Assert.assertTrue("Iterator did not have an element as expected", iter.hasNext()); + + Entry entry = iter.next(); + Key k = entry.getKey(); + Value v = entry.getValue(); + + Assert.assertEquals("row", k.getRow().toString()); + Assert.assertEquals("cf", k.getColumnFamily().toString()); + Assert.assertEquals("cq1", k.getColumnQualifier().toString()); + Assert.assertEquals("", k.getColumnVisibility().toString()); + Assert.assertEquals("value1", new String(v.get())); + + Assert.assertTrue("Iterator did not have an element as expected", iter.hasNext()); + + entry = iter.next(); + k = entry.getKey(); + v = entry.getValue(); + + Assert.assertEquals("row", k.getRow().toString()); + Assert.assertEquals("cf", k.getColumnFamily().toString()); + Assert.assertEquals("cq2", k.getColumnQualifier().toString()); + Assert.assertEquals("", k.getColumnVisibility().toString()); + Assert.assertEquals("value2", new String(v.get())); + + Assert.assertFalse("Iterator unexpectedly had more data", iter.hasNext()); + } + + @Test + public void testWriteToMockInstanceWithVisibility() throws Exception { + Instance inst = new MockInstance(test.getMethodName()); + Connector conn = inst.getConnector("root", new PasswordToken("")); + Authorizations auths = new Authorizations("foo"); + conn.securityOperations().changeUserAuthorizations("root", auths); + + HiveAccumuloTableOutputFormat outputFormat = new HiveAccumuloTableOutputFormat(); + String table = test.getMethodName(); + conn.tableOperations().create(table); + + JobConf conf = new JobConf(); + conf.set(AccumuloConnectionParameters.INSTANCE_NAME, inst.getInstanceName()); + conf.set(AccumuloConnectionParameters.USER_NAME, "root"); + conf.set(AccumuloConnectionParameters.USER_PASS, ""); + conf.setBoolean(AccumuloConnectionParameters.USE_MOCK_INSTANCE, true); + conf.set(AccumuloConnectionParameters.TABLE_NAME, test.getMethodName()); + + FileSystem local = FileSystem.getLocal(conf); + outputFormat.checkOutputSpecs(local, conf); + + RecordWriter recordWriter = outputFormat + .getRecordWriter(local, conf, null, null); + + List names = Arrays.asList("row", "col1", "col2"); + List types = Arrays. asList(TypeInfoFactory.stringTypeInfo, + TypeInfoFactory.stringTypeInfo, TypeInfoFactory.stringTypeInfo); + + Properties tableProperties = new Properties(); + tableProperties.setProperty(AccumuloSerDeParameters.COLUMN_MAPPINGS, ":rowID,cf:cq1,cf:cq2"); + tableProperties.setProperty(serdeConstants.FIELD_DELIM, " "); + tableProperties.setProperty(serdeConstants.LIST_COLUMNS, Joiner.on(',').join(names)); + tableProperties.setProperty(serdeConstants.LIST_COLUMN_TYPES, Joiner.on(',').join(types)); + AccumuloSerDeParameters accumuloSerDeParams = new AccumuloSerDeParameters(new Configuration(), + tableProperties, AccumuloSerDe.class.getSimpleName()); + SerDeParameters serDeParams = accumuloSerDeParams.getSerDeParameters(); + + AccumuloRowSerializer serializer = new AccumuloRowSerializer(0, serDeParams, + accumuloSerDeParams.getColumnMappings(), new ColumnVisibility("foo"), + accumuloSerDeParams.getRowIdFactory()); + + LazySimpleStructObjectInspector structOI = (LazySimpleStructObjectInspector) LazyFactory + .createLazyStructInspector(Arrays.asList("row", "cq1", "cq2"), Arrays. asList( + TypeInfoFactory.stringTypeInfo, TypeInfoFactory.stringTypeInfo, + TypeInfoFactory.stringTypeInfo), serDeParams.getSeparators(), serDeParams + .getNullSequence(), serDeParams.isLastColumnTakesRest(), serDeParams.isEscaped(), + serDeParams.getEscapeChar()); + + LazyStruct struct = (LazyStruct) LazyFactory.createLazyObject(structOI); + + ByteArrayRef bytes = new ByteArrayRef(); + bytes.setData("row value1 value2".getBytes()); + struct.init(bytes, 0, bytes.getData().length); + + // Serialize the struct into a mutation + Mutation m = serializer.serialize(struct, structOI); + + // Write the mutation + recordWriter.write(new Text(table), m); + + // Close the writer + recordWriter.close(null); + + Iterator> iter = conn.createScanner(table, auths).iterator(); + Assert.assertTrue("Iterator did not have an element as expected", iter.hasNext()); + + Entry entry = iter.next(); + Key k = entry.getKey(); + Value v = entry.getValue(); + + Assert.assertEquals("row", k.getRow().toString()); + Assert.assertEquals("cf", k.getColumnFamily().toString()); + Assert.assertEquals("cq1", k.getColumnQualifier().toString()); + Assert.assertEquals("foo", k.getColumnVisibility().toString()); + Assert.assertEquals("value1", new String(v.get())); + + Assert.assertTrue("Iterator did not have an element as expected", iter.hasNext()); + + entry = iter.next(); + k = entry.getKey(); + v = entry.getValue(); + + Assert.assertEquals("row", k.getRow().toString()); + Assert.assertEquals("cf", k.getColumnFamily().toString()); + Assert.assertEquals("cq2", k.getColumnQualifier().toString()); + Assert.assertEquals("foo", k.getColumnVisibility().toString()); + Assert.assertEquals("value2", new String(v.get())); + + Assert.assertFalse("Iterator unexpectedly had more data", iter.hasNext()); + } + + @Test + public void testWriteMap() throws Exception { + Instance inst = new MockInstance(test.getMethodName()); + Connector conn = inst.getConnector("root", new PasswordToken("")); + + HiveAccumuloTableOutputFormat outputFormat = new HiveAccumuloTableOutputFormat(); + String table = test.getMethodName(); + conn.tableOperations().create(table); + + JobConf conf = new JobConf(); + conf.set(AccumuloConnectionParameters.INSTANCE_NAME, inst.getInstanceName()); + conf.set(AccumuloConnectionParameters.USER_NAME, "root"); + conf.set(AccumuloConnectionParameters.USER_PASS, ""); + conf.setBoolean(AccumuloConnectionParameters.USE_MOCK_INSTANCE, true); + conf.set(AccumuloConnectionParameters.TABLE_NAME, test.getMethodName()); + + FileSystem local = FileSystem.getLocal(conf); + outputFormat.checkOutputSpecs(local, conf); + + RecordWriter recordWriter = outputFormat + .getRecordWriter(local, conf, null, null); + + List names = Arrays.asList("row", "col1"); + List types = Arrays. asList(TypeInfoFactory.stringTypeInfo, + TypeInfoFactory.stringTypeInfo); + + Properties tableProperties = new Properties(); + tableProperties.setProperty(AccumuloSerDeParameters.COLUMN_MAPPINGS, ":rowID,cf:*"); + tableProperties.setProperty(serdeConstants.FIELD_DELIM, " "); + tableProperties.setProperty(serdeConstants.LIST_COLUMNS, Joiner.on(',').join(names)); + tableProperties.setProperty(serdeConstants.LIST_COLUMN_TYPES, Joiner.on(',').join(types)); + AccumuloSerDeParameters accumuloSerDeParams = new AccumuloSerDeParameters(new Configuration(), + tableProperties, AccumuloSerDe.class.getSimpleName()); + SerDeParameters serDeParams = accumuloSerDeParams.getSerDeParameters(); + + AccumuloRowSerializer serializer = new AccumuloRowSerializer(0, serDeParams, + accumuloSerDeParams.getColumnMappings(), AccumuloSerDeParameters.DEFAULT_VISIBILITY_LABEL, + accumuloSerDeParams.getRowIdFactory()); + + TypeInfo stringTypeInfo = TypeInfoFactory.getPrimitiveTypeInfo(serdeConstants.STRING_TYPE_NAME); + LazyStringObjectInspector stringOI = (LazyStringObjectInspector) LazyFactory + .createLazyObjectInspector(stringTypeInfo, new byte[] {0}, 0, + serDeParams.getNullSequence(), serDeParams.isEscaped(), serDeParams.getEscapeChar()); + + LazyMapObjectInspector mapOI = LazyObjectInspectorFactory.getLazySimpleMapObjectInspector( + stringOI, stringOI, (byte) ',', (byte) ':', serDeParams.getNullSequence(), + serDeParams.isEscaped(), serDeParams.getEscapeChar()); + + LazySimpleStructObjectInspector structOI = (LazySimpleStructObjectInspector) LazyObjectInspectorFactory + .getLazySimpleStructObjectInspector(Arrays.asList("row", "data"), + Arrays.asList(stringOI, mapOI), (byte) ' ', serDeParams.getNullSequence(), + serDeParams.isLastColumnTakesRest(), serDeParams.isEscaped(), + serDeParams.getEscapeChar()); + + LazyStruct struct = (LazyStruct) LazyFactory.createLazyObject(structOI); + + ByteArrayRef bytes = new ByteArrayRef(); + bytes.setData("row cq1:value1,cq2:value2".getBytes()); + struct.init(bytes, 0, bytes.getData().length); + + // Serialize the struct into a mutation + Mutation m = serializer.serialize(struct, structOI); + + // Write the mutation + recordWriter.write(new Text(table), m); + + // Close the writer + recordWriter.close(null); + + Iterator> iter = conn.createScanner(table, new Authorizations()).iterator(); + Assert.assertTrue("Iterator did not have an element as expected", iter.hasNext()); + + Entry entry = iter.next(); + Key k = entry.getKey(); + Value v = entry.getValue(); + + Assert.assertEquals("row", k.getRow().toString()); + Assert.assertEquals("cf", k.getColumnFamily().toString()); + Assert.assertEquals("cq1", k.getColumnQualifier().toString()); + Assert.assertEquals(AccumuloSerDeParameters.DEFAULT_VISIBILITY_LABEL, + k.getColumnVisibilityParsed()); + Assert.assertEquals("value1", new String(v.get())); + + Assert.assertTrue("Iterator did not have an element as expected", iter.hasNext()); + + entry = iter.next(); + k = entry.getKey(); + v = entry.getValue(); + + Assert.assertEquals("row", k.getRow().toString()); + Assert.assertEquals("cf", k.getColumnFamily().toString()); + Assert.assertEquals("cq2", k.getColumnQualifier().toString()); + Assert.assertEquals(AccumuloSerDeParameters.DEFAULT_VISIBILITY_LABEL, + k.getColumnVisibilityParsed()); + Assert.assertEquals("value2", new String(v.get())); + + Assert.assertFalse("Iterator unexpectedly had more data", iter.hasNext()); + } + + @Test + public void testBinarySerializationOnStringFallsBackToUtf8() throws Exception { + Instance inst = new MockInstance(test.getMethodName()); + Connector conn = inst.getConnector("root", new PasswordToken("")); + + HiveAccumuloTableOutputFormat outputFormat = new HiveAccumuloTableOutputFormat(); + String table = test.getMethodName(); + conn.tableOperations().create(table); + + JobConf conf = new JobConf(); + conf.set(AccumuloConnectionParameters.INSTANCE_NAME, inst.getInstanceName()); + conf.set(AccumuloConnectionParameters.USER_NAME, "root"); + conf.set(AccumuloConnectionParameters.USER_PASS, ""); + conf.setBoolean(AccumuloConnectionParameters.USE_MOCK_INSTANCE, true); + conf.set(AccumuloConnectionParameters.TABLE_NAME, test.getMethodName()); + + FileSystem local = FileSystem.getLocal(conf); + outputFormat.checkOutputSpecs(local, conf); + + RecordWriter recordWriter = outputFormat + .getRecordWriter(local, conf, null, null); + + List names = Arrays.asList("row", "col1", "col2"); + List types = Arrays. asList(TypeInfoFactory.stringTypeInfo, + TypeInfoFactory.stringTypeInfo, TypeInfoFactory.stringTypeInfo); + + Properties tableProperties = new Properties(); + tableProperties.setProperty(AccumuloSerDeParameters.COLUMN_MAPPINGS, ":rowID,cf:cq1,cf:cq2"); + tableProperties.setProperty(serdeConstants.FIELD_DELIM, " "); + tableProperties.setProperty(serdeConstants.LIST_COLUMNS, Joiner.on(',').join(names)); + tableProperties.setProperty(serdeConstants.LIST_COLUMN_TYPES, Joiner.on(',').join(types)); + tableProperties.setProperty(AccumuloSerDeParameters.DEFAULT_STORAGE_TYPE, ColumnEncoding.BINARY.getName()); + AccumuloSerDeParameters accumuloSerDeParams = new AccumuloSerDeParameters(new Configuration(), + tableProperties, AccumuloSerDe.class.getSimpleName()); + SerDeParameters serDeParams = accumuloSerDeParams.getSerDeParameters(); + + AccumuloRowSerializer serializer = new AccumuloRowSerializer(0, serDeParams, + accumuloSerDeParams.getColumnMappings(), AccumuloSerDeParameters.DEFAULT_VISIBILITY_LABEL, + accumuloSerDeParams.getRowIdFactory()); + + TypeInfo stringTypeInfo = TypeInfoFactory.getPrimitiveTypeInfo(serdeConstants.STRING_TYPE_NAME); + + LazySimpleStructObjectInspector structOI = (LazySimpleStructObjectInspector) LazyFactory + .createLazyStructInspector(Arrays.asList("row", "cq1", "cq2"), + Arrays.asList(stringTypeInfo, stringTypeInfo, stringTypeInfo), + serDeParams.getSeparators(), serDeParams.getNullSequence(), + serDeParams.isLastColumnTakesRest(), serDeParams.isEscaped(), + serDeParams.getEscapeChar()); + + LazyStruct struct = (LazyStruct) LazyFactory.createLazyObject(structOI); + + ByteArrayRef bytes = new ByteArrayRef(); + bytes.setData("row value1 value2".getBytes()); + struct.init(bytes, 0, bytes.getData().length); + + // Serialize the struct into a mutation + Mutation m = serializer.serialize(struct, structOI); + + // Write the mutation + recordWriter.write(new Text(table), m); + + // Close the writer + recordWriter.close(null); + + Iterator> iter = conn.createScanner(table, new Authorizations()).iterator(); + Assert.assertTrue("Iterator did not have an element as expected", iter.hasNext()); + + Entry entry = iter.next(); + Key k = entry.getKey(); + Value v = entry.getValue(); + + Assert.assertEquals("row", k.getRow().toString()); + Assert.assertEquals("cf", k.getColumnFamily().toString()); + Assert.assertEquals("cq1", k.getColumnQualifier().toString()); + Assert.assertEquals("", k.getColumnVisibility().toString()); + Assert.assertEquals("value1", new String(v.get())); + + Assert.assertTrue("Iterator did not have an element as expected", iter.hasNext()); + + entry = iter.next(); + k = entry.getKey(); + v = entry.getValue(); + + Assert.assertEquals("row", k.getRow().toString()); + Assert.assertEquals("cf", k.getColumnFamily().toString()); + Assert.assertEquals("cq2", k.getColumnQualifier().toString()); + Assert.assertEquals("", k.getColumnVisibility().toString()); + Assert.assertEquals("value2", new String(v.get())); + + Assert.assertFalse("Iterator unexpectedly had more data", iter.hasNext()); + } +} diff --git a/accumulo-handler/src/test/org/apache/hadoop/hive/accumulo/mr/TestHiveAccumuloTypes.java b/accumulo-handler/src/test/org/apache/hadoop/hive/accumulo/mr/TestHiveAccumuloTypes.java new file mode 100644 index 0000000..a378535 --- /dev/null +++ b/accumulo-handler/src/test/org/apache/hadoop/hive/accumulo/mr/TestHiveAccumuloTypes.java @@ -0,0 +1,826 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hive.accumulo.mr; + +import static org.junit.Assert.assertEquals; + +import java.io.ByteArrayInputStream; +import java.io.ByteArrayOutputStream; +import java.io.DataInputStream; +import java.io.DataOutputStream; +import java.sql.Date; +import java.sql.Timestamp; +import java.util.Map.Entry; + +import org.apache.accumulo.core.client.BatchWriter; +import org.apache.accumulo.core.client.BatchWriterConfig; +import org.apache.accumulo.core.client.Connector; +import org.apache.accumulo.core.client.mock.MockInstance; +import org.apache.accumulo.core.client.security.tokens.PasswordToken; +import org.apache.accumulo.core.data.Key; +import org.apache.accumulo.core.data.Mutation; +import org.apache.accumulo.core.data.Value; +import org.apache.accumulo.core.security.Authorizations; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hive.accumulo.AccumuloHiveConstants; +import org.apache.hadoop.hive.accumulo.AccumuloHiveRow; +import org.apache.hadoop.hive.accumulo.serde.AccumuloSerDeParameters; +import org.apache.hadoop.hive.common.type.HiveChar; +import org.apache.hadoop.hive.common.type.HiveDecimal; +import org.apache.hadoop.hive.common.type.HiveVarchar; +import org.apache.hadoop.hive.serde.serdeConstants; +import org.apache.hadoop.hive.serde2.ByteStream; +import org.apache.hadoop.hive.serde2.io.DateWritable; +import org.apache.hadoop.hive.serde2.io.HiveDecimalWritable; +import org.apache.hadoop.hive.serde2.io.TimestampWritable; +import org.apache.hadoop.hive.serde2.lazy.ByteArrayRef; +import org.apache.hadoop.hive.serde2.lazy.LazyBoolean; +import org.apache.hadoop.hive.serde2.lazy.LazyByte; +import org.apache.hadoop.hive.serde2.lazy.LazyDate; +import org.apache.hadoop.hive.serde2.lazy.LazyDouble; +import org.apache.hadoop.hive.serde2.lazy.LazyFactory; +import org.apache.hadoop.hive.serde2.lazy.LazyFloat; +import org.apache.hadoop.hive.serde2.lazy.LazyHiveChar; +import org.apache.hadoop.hive.serde2.lazy.LazyHiveDecimal; +import org.apache.hadoop.hive.serde2.lazy.LazyHiveVarchar; +import org.apache.hadoop.hive.serde2.lazy.LazyInteger; +import org.apache.hadoop.hive.serde2.lazy.LazyLong; +import org.apache.hadoop.hive.serde2.lazy.LazyShort; +import org.apache.hadoop.hive.serde2.lazy.LazyString; +import org.apache.hadoop.hive.serde2.lazy.LazyTimestamp; +import org.apache.hadoop.hive.serde2.lazy.LazyUtils; +import org.apache.hadoop.hive.serde2.lazy.objectinspector.primitive.LazyBooleanObjectInspector; +import org.apache.hadoop.hive.serde2.lazy.objectinspector.primitive.LazyByteObjectInspector; +import org.apache.hadoop.hive.serde2.lazy.objectinspector.primitive.LazyDateObjectInspector; +import org.apache.hadoop.hive.serde2.lazy.objectinspector.primitive.LazyDoubleObjectInspector; +import org.apache.hadoop.hive.serde2.lazy.objectinspector.primitive.LazyFloatObjectInspector; +import org.apache.hadoop.hive.serde2.lazy.objectinspector.primitive.LazyHiveCharObjectInspector; +import org.apache.hadoop.hive.serde2.lazy.objectinspector.primitive.LazyHiveDecimalObjectInspector; +import org.apache.hadoop.hive.serde2.lazy.objectinspector.primitive.LazyHiveVarcharObjectInspector; +import org.apache.hadoop.hive.serde2.lazy.objectinspector.primitive.LazyIntObjectInspector; +import org.apache.hadoop.hive.serde2.lazy.objectinspector.primitive.LazyLongObjectInspector; +import org.apache.hadoop.hive.serde2.lazy.objectinspector.primitive.LazyPrimitiveObjectInspectorFactory; +import org.apache.hadoop.hive.serde2.lazy.objectinspector.primitive.LazyShortObjectInspector; +import org.apache.hadoop.hive.serde2.lazy.objectinspector.primitive.LazyStringObjectInspector; +import org.apache.hadoop.hive.serde2.lazy.objectinspector.primitive.LazyTimestampObjectInspector; +import org.apache.hadoop.hive.serde2.objectinspector.primitive.JavaBooleanObjectInspector; +import org.apache.hadoop.hive.serde2.objectinspector.primitive.JavaByteObjectInspector; +import org.apache.hadoop.hive.serde2.objectinspector.primitive.JavaDateObjectInspector; +import org.apache.hadoop.hive.serde2.objectinspector.primitive.JavaDoubleObjectInspector; +import org.apache.hadoop.hive.serde2.objectinspector.primitive.JavaFloatObjectInspector; +import org.apache.hadoop.hive.serde2.objectinspector.primitive.JavaHiveCharObjectInspector; +import org.apache.hadoop.hive.serde2.objectinspector.primitive.JavaHiveDecimalObjectInspector; +import org.apache.hadoop.hive.serde2.objectinspector.primitive.JavaHiveVarcharObjectInspector; +import org.apache.hadoop.hive.serde2.objectinspector.primitive.JavaIntObjectInspector; +import org.apache.hadoop.hive.serde2.objectinspector.primitive.JavaLongObjectInspector; +import org.apache.hadoop.hive.serde2.objectinspector.primitive.JavaShortObjectInspector; +import org.apache.hadoop.hive.serde2.objectinspector.primitive.JavaStringObjectInspector; +import org.apache.hadoop.hive.serde2.objectinspector.primitive.JavaTimestampObjectInspector; +import org.apache.hadoop.hive.serde2.objectinspector.primitive.PrimitiveObjectInspectorFactory; +import org.apache.hadoop.hive.serde2.typeinfo.CharTypeInfo; +import org.apache.hadoop.hive.serde2.typeinfo.DecimalTypeInfo; +import org.apache.hadoop.hive.serde2.typeinfo.TypeInfoFactory; +import org.apache.hadoop.hive.serde2.typeinfo.VarcharTypeInfo; +import org.apache.hadoop.io.Text; +import org.apache.hadoop.mapred.FileInputFormat; +import org.apache.hadoop.mapred.InputSplit; +import org.apache.hadoop.mapred.JobConf; +import org.apache.hadoop.mapred.RecordReader; +import org.junit.Assert; +import org.junit.Rule; +import org.junit.Test; +import org.junit.rules.TestName; + +/** + * + */ +public class TestHiveAccumuloTypes { + + @Rule + public TestName test = new TestName(); + + @Test + public void testBinaryTypes() throws Exception { + final String tableName = test.getMethodName(), user = "root", pass = ""; + + MockInstance mockInstance = new MockInstance(test.getMethodName()); + Connector conn = mockInstance.getConnector(user, new PasswordToken(pass)); + HiveAccumuloTableInputFormat inputformat = new HiveAccumuloTableInputFormat(); + JobConf conf = new JobConf(); + + conf.set(AccumuloSerDeParameters.TABLE_NAME, tableName); + conf.set(AccumuloSerDeParameters.USE_MOCK_INSTANCE, "true"); + conf.set(AccumuloSerDeParameters.INSTANCE_NAME, test.getMethodName()); + conf.set(AccumuloSerDeParameters.USER_NAME, user); + conf.set(AccumuloSerDeParameters.USER_PASS, pass); + conf.set(AccumuloSerDeParameters.ZOOKEEPERS, "localhost:2181"); // not used for mock, but + // required by input format. + + conf.set(AccumuloSerDeParameters.COLUMN_MAPPINGS, AccumuloHiveConstants.ROWID + + ",cf:string,cf:boolean,cf:tinyint,cf:smallint,cf:int,cf:bigint" + + ",cf:float,cf:double,cf:decimal,cf:date,cf:timestamp,cf:char,cf:varchar"); + conf.set( + serdeConstants.LIST_COLUMNS, + "string,string,boolean,tinyint,smallint,int,bigint,float,double,decimal,date,timestamp,char(4),varchar(7)"); + conf.set( + serdeConstants.LIST_COLUMN_TYPES, + "string,string,boolean,tinyint,smallint,int,bigint,float,double,decimal,date,timestamp,char(4),varchar(7)"); + conf.set(AccumuloSerDeParameters.DEFAULT_STORAGE_TYPE, "binary"); + + conn.tableOperations().create(tableName); + BatchWriterConfig writerConf = new BatchWriterConfig(); + BatchWriter writer = conn.createBatchWriter(tableName, writerConf); + + ByteArrayOutputStream baos = new ByteArrayOutputStream(); + DataOutputStream out = new DataOutputStream(baos); + + String cf = "cf"; + byte[] cfBytes = cf.getBytes(); + + Mutation m = new Mutation("row1"); + + // string + String stringValue = "string"; + JavaStringObjectInspector stringOI = (JavaStringObjectInspector) PrimitiveObjectInspectorFactory + .getPrimitiveJavaObjectInspector(TypeInfoFactory + .getPrimitiveTypeInfo(serdeConstants.STRING_TYPE_NAME)); + LazyUtils.writePrimitiveUTF8(baos, stringOI.create(stringValue), stringOI, false, (byte) 0, + null); + m.put(cfBytes, "string".getBytes(), baos.toByteArray()); + + // boolean + boolean booleanValue = true; + baos.reset(); + JavaBooleanObjectInspector booleanOI = (JavaBooleanObjectInspector) PrimitiveObjectInspectorFactory + .getPrimitiveJavaObjectInspector(TypeInfoFactory + .getPrimitiveTypeInfo(serdeConstants.BOOLEAN_TYPE_NAME)); + LazyUtils.writePrimitive(baos, booleanOI.create(booleanValue), booleanOI); + m.put(cfBytes, "boolean".getBytes(), baos.toByteArray()); + + // tinyint + byte tinyintValue = -127; + baos.reset(); + JavaByteObjectInspector byteOI = (JavaByteObjectInspector) PrimitiveObjectInspectorFactory + .getPrimitiveJavaObjectInspector(TypeInfoFactory + .getPrimitiveTypeInfo(serdeConstants.TINYINT_TYPE_NAME)); + LazyUtils.writePrimitive(baos, tinyintValue, byteOI); + m.put(cfBytes, "tinyint".getBytes(), baos.toByteArray()); + + // smallint + short smallintValue = Short.MAX_VALUE; + baos.reset(); + JavaShortObjectInspector shortOI = (JavaShortObjectInspector) PrimitiveObjectInspectorFactory + .getPrimitiveJavaObjectInspector(TypeInfoFactory + .getPrimitiveTypeInfo(serdeConstants.SMALLINT_TYPE_NAME)); + LazyUtils.writePrimitive(baos, smallintValue, shortOI); + m.put(cfBytes, "smallint".getBytes(), baos.toByteArray()); + + // int + int intValue = Integer.MAX_VALUE; + baos.reset(); + JavaIntObjectInspector intOI = (JavaIntObjectInspector) PrimitiveObjectInspectorFactory + .getPrimitiveJavaObjectInspector(TypeInfoFactory + .getPrimitiveTypeInfo(serdeConstants.INT_TYPE_NAME)); + LazyUtils.writePrimitive(baos, intValue, intOI); + m.put(cfBytes, "int".getBytes(), baos.toByteArray()); + + // bigint + long bigintValue = Long.MAX_VALUE; + baos.reset(); + JavaLongObjectInspector longOI = (JavaLongObjectInspector) PrimitiveObjectInspectorFactory + .getPrimitiveJavaObjectInspector(TypeInfoFactory + .getPrimitiveTypeInfo(serdeConstants.BIGINT_TYPE_NAME)); + LazyUtils.writePrimitive(baos, bigintValue, longOI); + m.put(cfBytes, "bigint".getBytes(), baos.toByteArray()); + + // float + float floatValue = Float.MAX_VALUE; + baos.reset(); + JavaFloatObjectInspector floatOI = (JavaFloatObjectInspector) PrimitiveObjectInspectorFactory + .getPrimitiveJavaObjectInspector(TypeInfoFactory + .getPrimitiveTypeInfo(serdeConstants.FLOAT_TYPE_NAME)); + LazyUtils.writePrimitive(baos, floatValue, floatOI); + m.put(cfBytes, "float".getBytes(), baos.toByteArray()); + + // double + double doubleValue = Double.MAX_VALUE; + baos.reset(); + JavaDoubleObjectInspector doubleOI = (JavaDoubleObjectInspector) PrimitiveObjectInspectorFactory + .getPrimitiveJavaObjectInspector(TypeInfoFactory + .getPrimitiveTypeInfo(serdeConstants.DOUBLE_TYPE_NAME)); + LazyUtils.writePrimitive(baos, doubleValue, doubleOI); + m.put(cfBytes, "double".getBytes(), baos.toByteArray()); + + // decimal + baos.reset(); + HiveDecimal decimalValue = HiveDecimal.create(65536l); + HiveDecimalWritable decimalWritable = new HiveDecimalWritable(decimalValue); + decimalWritable.write(out); + m.put(cfBytes, "decimal".getBytes(), baos.toByteArray()); + + // date + baos.reset(); + Date now = new Date(System.currentTimeMillis()); + DateWritable dateWritable = new DateWritable(now); + Date dateValue = dateWritable.get(); + dateWritable.write(out); + m.put(cfBytes, "date".getBytes(), baos.toByteArray()); + + // tiemestamp + baos.reset(); + Timestamp timestampValue = new Timestamp(now.getTime()); + ByteStream.Output output = new ByteStream.Output(); + TimestampWritable timestampWritable = new TimestampWritable(new Timestamp(now.getTime())); + timestampWritable.write(output); + output.close(); + m.put(cfBytes, "timestamp".getBytes(), output.toByteArray()); + + // char + baos.reset(); + HiveChar charValue = new HiveChar("char", 4); + JavaHiveCharObjectInspector charOI = (JavaHiveCharObjectInspector) PrimitiveObjectInspectorFactory + .getPrimitiveJavaObjectInspector(new CharTypeInfo(4)); + LazyUtils.writePrimitiveUTF8(baos, charOI.create(charValue), charOI, false, (byte) 0, null); + m.put(cfBytes, "char".getBytes(), baos.toByteArray()); + + baos.reset(); + HiveVarchar varcharValue = new HiveVarchar("varchar", 7); + JavaHiveVarcharObjectInspector varcharOI = (JavaHiveVarcharObjectInspector) PrimitiveObjectInspectorFactory + .getPrimitiveJavaObjectInspector(new VarcharTypeInfo(7)); + LazyUtils.writePrimitiveUTF8(baos, varcharOI.create(varcharValue), varcharOI, false, (byte) 0, + null); + m.put(cfBytes, "varchar".getBytes(), baos.toByteArray()); + + writer.addMutation(m); + + writer.close(); + + for (Entry e : conn.createScanner(tableName, new Authorizations())) { + System.out.println(e); + } + + // Create the RecordReader + FileInputFormat.addInputPath(conf, new Path("unused")); + InputSplit[] splits = inputformat.getSplits(conf, 0); + assertEquals(splits.length, 1); + RecordReader reader = inputformat.getRecordReader(splits[0], conf, null); + + Text key = reader.createKey(); + AccumuloHiveRow value = reader.createValue(); + + reader.next(key, value); + + Assert.assertEquals(13, value.getTuples().size()); + + ByteArrayRef byteRef = new ByteArrayRef(); + + // string + Text cfText = new Text(cf), cqHolder = new Text(); + cqHolder.set("string"); + byte[] valueBytes = value.getValue(cfText, cqHolder); + Assert.assertNotNull(valueBytes); + + byteRef.setData(valueBytes); + LazyStringObjectInspector lazyStringOI = LazyPrimitiveObjectInspectorFactory + .getLazyStringObjectInspector(false, (byte) 0); + LazyString lazyString = (LazyString) LazyFactory.createLazyObject(lazyStringOI); + lazyString.init(byteRef, 0, valueBytes.length); + + Assert.assertEquals(stringValue, lazyString.getWritableObject().toString()); + + // boolean + cqHolder.set("boolean"); + valueBytes = value.getValue(cfText, cqHolder); + Assert.assertNotNull(valueBytes); + + byteRef.setData(valueBytes); + LazyBooleanObjectInspector lazyBooleanOI = (LazyBooleanObjectInspector) LazyPrimitiveObjectInspectorFactory + .getLazyObjectInspector(TypeInfoFactory + .getPrimitiveTypeInfo(serdeConstants.BOOLEAN_TYPE_NAME)); + LazyBoolean lazyBoolean = (LazyBoolean) LazyFactory + .createLazyPrimitiveBinaryClass(lazyBooleanOI); + lazyBoolean.init(byteRef, 0, valueBytes.length); + + Assert.assertEquals(booleanValue, lazyBoolean.getWritableObject().get()); + + // tinyint + cqHolder.set("tinyint"); + valueBytes = value.getValue(cfText, cqHolder); + Assert.assertNotNull(valueBytes); + + byteRef.setData(valueBytes); + LazyByteObjectInspector lazyByteOI = (LazyByteObjectInspector) LazyPrimitiveObjectInspectorFactory + .getLazyObjectInspector(TypeInfoFactory + .getPrimitiveTypeInfo(serdeConstants.TINYINT_TYPE_NAME)); + LazyByte lazyByte = (LazyByte) LazyFactory.createLazyPrimitiveBinaryClass(lazyByteOI); + lazyByte.init(byteRef, 0, valueBytes.length); + + Assert.assertEquals(tinyintValue, lazyByte.getWritableObject().get()); + + // smallint + cqHolder.set("smallint"); + valueBytes = value.getValue(cfText, cqHolder); + Assert.assertNotNull(valueBytes); + + byteRef.setData(valueBytes); + LazyShortObjectInspector lazyShortOI = (LazyShortObjectInspector) LazyPrimitiveObjectInspectorFactory + .getLazyObjectInspector(TypeInfoFactory + .getPrimitiveTypeInfo(serdeConstants.SMALLINT_TYPE_NAME)); + LazyShort lazyShort = (LazyShort) LazyFactory.createLazyPrimitiveBinaryClass(lazyShortOI); + lazyShort.init(byteRef, 0, valueBytes.length); + + Assert.assertEquals(smallintValue, lazyShort.getWritableObject().get()); + + // int + cqHolder.set("int"); + valueBytes = value.getValue(cfText, cqHolder); + Assert.assertNotNull(valueBytes); + + byteRef.setData(valueBytes); + LazyIntObjectInspector lazyIntOI = (LazyIntObjectInspector) LazyPrimitiveObjectInspectorFactory + .getLazyObjectInspector(TypeInfoFactory.getPrimitiveTypeInfo(serdeConstants.INT_TYPE_NAME)); + LazyInteger lazyInt = (LazyInteger) LazyFactory.createLazyPrimitiveBinaryClass(lazyIntOI); + lazyInt.init(byteRef, 0, valueBytes.length); + + Assert.assertEquals(intValue, lazyInt.getWritableObject().get()); + + // bigint + cqHolder.set("bigint"); + valueBytes = value.getValue(cfText, cqHolder); + Assert.assertNotNull(valueBytes); + + byteRef.setData(valueBytes); + LazyLongObjectInspector lazyLongOI = (LazyLongObjectInspector) LazyPrimitiveObjectInspectorFactory + .getLazyObjectInspector(TypeInfoFactory + .getPrimitiveTypeInfo(serdeConstants.BIGINT_TYPE_NAME)); + LazyLong lazyLong = (LazyLong) LazyFactory.createLazyPrimitiveBinaryClass(lazyLongOI); + lazyLong.init(byteRef, 0, valueBytes.length); + + Assert.assertEquals(bigintValue, lazyLong.getWritableObject().get()); + + // float + cqHolder.set("float"); + valueBytes = value.getValue(cfText, cqHolder); + Assert.assertNotNull(valueBytes); + + byteRef.setData(valueBytes); + LazyFloatObjectInspector lazyFloatOI = (LazyFloatObjectInspector) LazyPrimitiveObjectInspectorFactory + .getLazyObjectInspector(TypeInfoFactory + .getPrimitiveTypeInfo(serdeConstants.FLOAT_TYPE_NAME)); + LazyFloat lazyFloat = (LazyFloat) LazyFactory.createLazyPrimitiveBinaryClass(lazyFloatOI); + lazyFloat.init(byteRef, 0, valueBytes.length); + + Assert.assertEquals(floatValue, lazyFloat.getWritableObject().get(), 0); + + // double + cqHolder.set("double"); + valueBytes = value.getValue(cfText, cqHolder); + Assert.assertNotNull(valueBytes); + + byteRef.setData(valueBytes); + LazyDoubleObjectInspector lazyDoubleOI = (LazyDoubleObjectInspector) LazyPrimitiveObjectInspectorFactory + .getLazyObjectInspector(TypeInfoFactory + .getPrimitiveTypeInfo(serdeConstants.DOUBLE_TYPE_NAME)); + LazyDouble lazyDouble = (LazyDouble) LazyFactory.createLazyPrimitiveBinaryClass(lazyDoubleOI); + lazyDouble.init(byteRef, 0, valueBytes.length); + + Assert.assertEquals(doubleValue, lazyDouble.getWritableObject().get(), 0); + + // decimal + cqHolder.set("decimal"); + valueBytes = value.getValue(cfText, cqHolder); + Assert.assertNotNull(valueBytes); + + byteRef.setData(valueBytes); + ByteArrayInputStream bais = new ByteArrayInputStream(valueBytes); + DataInputStream in = new DataInputStream(bais); + decimalWritable.readFields(in); + + Assert.assertEquals(decimalValue, decimalWritable.getHiveDecimal()); + + // date + cqHolder.set("date"); + valueBytes = value.getValue(cfText, cqHolder); + Assert.assertNotNull(valueBytes); + + byteRef.setData(valueBytes); + bais = new ByteArrayInputStream(valueBytes); + in = new DataInputStream(bais); + dateWritable.readFields(in); + + Assert.assertEquals(dateValue, dateWritable.get()); + + // timestamp + cqHolder.set("timestamp"); + valueBytes = value.getValue(cfText, cqHolder); + Assert.assertNotNull(valueBytes); + + byteRef.setData(valueBytes); + bais = new ByteArrayInputStream(valueBytes); + in = new DataInputStream(bais); + timestampWritable.readFields(in); + + Assert.assertEquals(timestampValue, timestampWritable.getTimestamp()); + + // char + cqHolder.set("char"); + valueBytes = value.getValue(cfText, cqHolder); + Assert.assertNotNull(valueBytes); + + byteRef.setData(valueBytes); + LazyHiveCharObjectInspector lazyCharOI = (LazyHiveCharObjectInspector) LazyPrimitiveObjectInspectorFactory + .getLazyObjectInspector(new CharTypeInfo(4)); + LazyHiveChar lazyChar = (LazyHiveChar) LazyFactory.createLazyObject(lazyCharOI); + lazyChar.init(byteRef, 0, valueBytes.length); + + Assert.assertEquals(charValue, lazyChar.getWritableObject().getHiveChar()); + + // varchar + cqHolder.set("varchar"); + valueBytes = value.getValue(cfText, cqHolder); + Assert.assertNotNull(valueBytes); + + byteRef.setData(valueBytes); + LazyHiveVarcharObjectInspector lazyVarcharOI = (LazyHiveVarcharObjectInspector) LazyPrimitiveObjectInspectorFactory + .getLazyObjectInspector(new VarcharTypeInfo(7)); + LazyHiveVarchar lazyVarchar = (LazyHiveVarchar) LazyFactory.createLazyObject(lazyVarcharOI); + lazyVarchar.init(byteRef, 0, valueBytes.length); + + Assert.assertEquals(varcharValue.toString(), lazyVarchar.getWritableObject().getHiveVarchar() + .toString()); + } + + @Test + public void testUtf8Types() throws Exception { + final String tableName = test.getMethodName(), user = "root", pass = ""; + + MockInstance mockInstance = new MockInstance(test.getMethodName()); + Connector conn = mockInstance.getConnector(user, new PasswordToken(pass)); + HiveAccumuloTableInputFormat inputformat = new HiveAccumuloTableInputFormat(); + JobConf conf = new JobConf(); + + conf.set(AccumuloSerDeParameters.TABLE_NAME, tableName); + conf.set(AccumuloSerDeParameters.USE_MOCK_INSTANCE, "true"); + conf.set(AccumuloSerDeParameters.INSTANCE_NAME, test.getMethodName()); + conf.set(AccumuloSerDeParameters.USER_NAME, user); + conf.set(AccumuloSerDeParameters.USER_PASS, pass); + conf.set(AccumuloSerDeParameters.ZOOKEEPERS, "localhost:2181"); // not used for mock, but + // required by input format. + + conf.set(AccumuloSerDeParameters.COLUMN_MAPPINGS, AccumuloHiveConstants.ROWID + + ",cf:string,cf:boolean,cf:tinyint,cf:smallint,cf:int,cf:bigint" + + ",cf:float,cf:double,cf:decimal,cf:date,cf:timestamp,cf:char,cf:varchar"); + conf.set( + serdeConstants.LIST_COLUMNS, + "string,string,boolean,tinyint,smallint,int,bigint,float,double,decimal,date,timestamp,char(4),varchar(7)"); + conf.set( + serdeConstants.LIST_COLUMN_TYPES, + "string,string,boolean,tinyint,smallint,int,bigint,float,double,decimal,date,timestamp,char(4),varchar(7)"); + + conn.tableOperations().create(tableName); + BatchWriterConfig writerConf = new BatchWriterConfig(); + BatchWriter writer = conn.createBatchWriter(tableName, writerConf); + + ByteArrayOutputStream baos = new ByteArrayOutputStream(); + + String cf = "cf"; + byte[] cfBytes = cf.getBytes(); + ByteArrayRef byteRef = new ByteArrayRef(); + + Mutation m = new Mutation("row1"); + + // string + String stringValue = "string"; + baos.reset(); + JavaStringObjectInspector stringOI = (JavaStringObjectInspector) PrimitiveObjectInspectorFactory + .getPrimitiveJavaObjectInspector(TypeInfoFactory + .getPrimitiveTypeInfo(serdeConstants.STRING_TYPE_NAME)); + LazyUtils.writePrimitiveUTF8(baos, stringOI.create(stringValue), stringOI, false, (byte) 0, + null); + m.put(cfBytes, "string".getBytes(), baos.toByteArray()); + + // boolean + boolean booleanValue = true; + baos.reset(); + JavaBooleanObjectInspector booleanOI = (JavaBooleanObjectInspector) PrimitiveObjectInspectorFactory + .getPrimitiveJavaObjectInspector(TypeInfoFactory + .getPrimitiveTypeInfo(serdeConstants.BOOLEAN_TYPE_NAME)); + LazyUtils.writePrimitiveUTF8(baos, booleanOI.create(booleanValue), booleanOI, false, (byte) 0, + null); + m.put(cfBytes, "boolean".getBytes(), baos.toByteArray()); + + // tinyint + byte tinyintValue = -127; + baos.reset(); + JavaByteObjectInspector byteOI = (JavaByteObjectInspector) PrimitiveObjectInspectorFactory + .getPrimitiveJavaObjectInspector(TypeInfoFactory + .getPrimitiveTypeInfo(serdeConstants.TINYINT_TYPE_NAME)); + LazyUtils.writePrimitiveUTF8(baos, tinyintValue, byteOI, false, (byte) 0, null); + m.put(cfBytes, "tinyint".getBytes(), baos.toByteArray()); + + // smallint + short smallintValue = Short.MAX_VALUE; + baos.reset(); + JavaShortObjectInspector shortOI = (JavaShortObjectInspector) PrimitiveObjectInspectorFactory + .getPrimitiveJavaObjectInspector(TypeInfoFactory + .getPrimitiveTypeInfo(serdeConstants.SMALLINT_TYPE_NAME)); + LazyUtils.writePrimitiveUTF8(baos, smallintValue, shortOI, false, (byte) 0, null); + m.put(cfBytes, "smallint".getBytes(), baos.toByteArray()); + + // int + int intValue = Integer.MAX_VALUE; + baos.reset(); + JavaIntObjectInspector intOI = (JavaIntObjectInspector) PrimitiveObjectInspectorFactory + .getPrimitiveJavaObjectInspector(TypeInfoFactory + .getPrimitiveTypeInfo(serdeConstants.INT_TYPE_NAME)); + LazyUtils.writePrimitiveUTF8(baos, intValue, intOI, false, (byte) 0, null); + m.put(cfBytes, "int".getBytes(), baos.toByteArray()); + + // bigint + long bigintValue = Long.MAX_VALUE; + baos.reset(); + JavaLongObjectInspector longOI = (JavaLongObjectInspector) PrimitiveObjectInspectorFactory + .getPrimitiveJavaObjectInspector(TypeInfoFactory + .getPrimitiveTypeInfo(serdeConstants.BIGINT_TYPE_NAME)); + LazyUtils.writePrimitiveUTF8(baos, bigintValue, longOI, false, (byte) 0, null); + m.put(cfBytes, "bigint".getBytes(), baos.toByteArray()); + + // float + float floatValue = Float.MAX_VALUE; + baos.reset(); + JavaFloatObjectInspector floatOI = (JavaFloatObjectInspector) PrimitiveObjectInspectorFactory + .getPrimitiveJavaObjectInspector(TypeInfoFactory + .getPrimitiveTypeInfo(serdeConstants.FLOAT_TYPE_NAME)); + LazyUtils.writePrimitiveUTF8(baos, floatValue, floatOI, false, (byte) 0, null); + m.put(cfBytes, "float".getBytes(), baos.toByteArray()); + + // double + double doubleValue = Double.MAX_VALUE; + baos.reset(); + JavaDoubleObjectInspector doubleOI = (JavaDoubleObjectInspector) PrimitiveObjectInspectorFactory + .getPrimitiveJavaObjectInspector(TypeInfoFactory + .getPrimitiveTypeInfo(serdeConstants.DOUBLE_TYPE_NAME)); + LazyUtils.writePrimitiveUTF8(baos, doubleValue, doubleOI, false, (byte) 0, null); + m.put(cfBytes, "double".getBytes(), baos.toByteArray()); + + // decimal + HiveDecimal decimalValue = HiveDecimal.create("1.23"); + baos.reset(); + JavaHiveDecimalObjectInspector decimalOI = (JavaHiveDecimalObjectInspector) PrimitiveObjectInspectorFactory + .getPrimitiveJavaObjectInspector(new DecimalTypeInfo(5, 2)); + LazyUtils.writePrimitiveUTF8(baos, decimalOI.create(decimalValue), decimalOI, false, (byte) 0, + null); + m.put(cfBytes, "decimal".getBytes(), baos.toByteArray()); + + // date + Date now = new Date(System.currentTimeMillis()); + DateWritable dateWritable = new DateWritable(now); + Date dateValue = dateWritable.get(); + baos.reset(); + JavaDateObjectInspector dateOI = (JavaDateObjectInspector) PrimitiveObjectInspectorFactory + .getPrimitiveJavaObjectInspector(TypeInfoFactory + .getPrimitiveTypeInfo(serdeConstants.DATE_TYPE_NAME)); + LazyUtils.writePrimitiveUTF8(baos, dateOI.create(dateValue), dateOI, false, (byte) 0, null); + m.put(cfBytes, "date".getBytes(), baos.toByteArray()); + + // timestamp + Timestamp timestampValue = new Timestamp(now.getTime()); + baos.reset(); + JavaTimestampObjectInspector timestampOI = (JavaTimestampObjectInspector) PrimitiveObjectInspectorFactory + .getPrimitiveJavaObjectInspector(TypeInfoFactory + .getPrimitiveTypeInfo(serdeConstants.TIMESTAMP_TYPE_NAME)); + LazyUtils.writePrimitiveUTF8(baos, timestampOI.create(timestampValue), timestampOI, false, + (byte) 0, null); + m.put(cfBytes, "timestamp".getBytes(), baos.toByteArray()); + + // char + baos.reset(); + HiveChar charValue = new HiveChar("char", 4); + JavaHiveCharObjectInspector charOI = (JavaHiveCharObjectInspector) PrimitiveObjectInspectorFactory + .getPrimitiveJavaObjectInspector(new CharTypeInfo(4)); + LazyUtils.writePrimitiveUTF8(baos, charOI.create(charValue), charOI, false, (byte) 0, null); + m.put(cfBytes, "char".getBytes(), baos.toByteArray()); + + // varchar + baos.reset(); + HiveVarchar varcharValue = new HiveVarchar("varchar", 7); + JavaHiveVarcharObjectInspector varcharOI = (JavaHiveVarcharObjectInspector) PrimitiveObjectInspectorFactory + .getPrimitiveJavaObjectInspector(new VarcharTypeInfo(7)); + LazyUtils.writePrimitiveUTF8(baos, varcharOI.create(varcharValue), varcharOI, false, (byte) 0, + null); + m.put(cfBytes, "varchar".getBytes(), baos.toByteArray()); + + writer.addMutation(m); + + writer.close(); + + for (Entry e : conn.createScanner(tableName, new Authorizations())) { + System.out.println(e); + } + + // Create the RecordReader + FileInputFormat.addInputPath(conf, new Path("unused")); + InputSplit[] splits = inputformat.getSplits(conf, 0); + assertEquals(splits.length, 1); + RecordReader reader = inputformat.getRecordReader(splits[0], conf, null); + + Text key = reader.createKey(); + AccumuloHiveRow value = reader.createValue(); + + reader.next(key, value); + + Assert.assertEquals(13, value.getTuples().size()); + + // string + Text cfText = new Text(cf), cqHolder = new Text(); + cqHolder.set("string"); + byte[] valueBytes = value.getValue(cfText, cqHolder); + Assert.assertNotNull(valueBytes); + + byteRef.setData(valueBytes); + LazyStringObjectInspector lazyStringOI = LazyPrimitiveObjectInspectorFactory + .getLazyStringObjectInspector(false, (byte) 0); + LazyString lazyString = (LazyString) LazyFactory.createLazyObject(lazyStringOI); + lazyString.init(byteRef, 0, valueBytes.length); + + Assert.assertEquals(new Text(stringValue), lazyString.getWritableObject()); + + // boolean + cqHolder.set("boolean"); + valueBytes = value.getValue(cfText, cqHolder); + Assert.assertNotNull(valueBytes); + + byteRef.setData(valueBytes); + LazyBooleanObjectInspector lazyBooleanOI = (LazyBooleanObjectInspector) LazyPrimitiveObjectInspectorFactory + .getLazyObjectInspector(TypeInfoFactory + .getPrimitiveTypeInfo(serdeConstants.BOOLEAN_TYPE_NAME)); + LazyBoolean lazyBoolean = (LazyBoolean) LazyFactory.createLazyObject(lazyBooleanOI); + lazyBoolean.init(byteRef, 0, valueBytes.length); + + Assert.assertEquals(booleanValue, lazyBoolean.getWritableObject().get()); + + // tinyint + cqHolder.set("tinyint"); + valueBytes = value.getValue(cfText, cqHolder); + Assert.assertNotNull(valueBytes); + + byteRef.setData(valueBytes); + LazyByteObjectInspector lazyByteOI = (LazyByteObjectInspector) LazyPrimitiveObjectInspectorFactory + .getLazyObjectInspector(TypeInfoFactory + .getPrimitiveTypeInfo(serdeConstants.TINYINT_TYPE_NAME)); + LazyByte lazyByte = (LazyByte) LazyFactory.createLazyObject(lazyByteOI); + lazyByte.init(byteRef, 0, valueBytes.length); + + Assert.assertEquals(tinyintValue, lazyByte.getWritableObject().get()); + + // smallint + cqHolder.set("smallint"); + valueBytes = value.getValue(cfText, cqHolder); + Assert.assertNotNull(valueBytes); + + byteRef.setData(valueBytes); + LazyShortObjectInspector lazyShortOI = (LazyShortObjectInspector) LazyPrimitiveObjectInspectorFactory + .getLazyObjectInspector(TypeInfoFactory + .getPrimitiveTypeInfo(serdeConstants.SMALLINT_TYPE_NAME)); + LazyShort lazyShort = (LazyShort) LazyFactory.createLazyObject(lazyShortOI); + lazyShort.init(byteRef, 0, valueBytes.length); + + Assert.assertEquals(smallintValue, lazyShort.getWritableObject().get()); + + // int + cqHolder.set("int"); + valueBytes = value.getValue(cfText, cqHolder); + Assert.assertNotNull(valueBytes); + + byteRef.setData(valueBytes); + LazyIntObjectInspector lazyIntOI = (LazyIntObjectInspector) LazyPrimitiveObjectInspectorFactory + .getLazyObjectInspector(TypeInfoFactory.getPrimitiveTypeInfo(serdeConstants.INT_TYPE_NAME)); + LazyInteger lazyInt = (LazyInteger) LazyFactory.createLazyObject(lazyIntOI); + lazyInt.init(byteRef, 0, valueBytes.length); + + Assert.assertEquals(intValue, lazyInt.getWritableObject().get()); + + // bigint + cqHolder.set("bigint"); + valueBytes = value.getValue(cfText, cqHolder); + Assert.assertNotNull(valueBytes); + + byteRef.setData(valueBytes); + LazyLongObjectInspector lazyLongOI = (LazyLongObjectInspector) LazyPrimitiveObjectInspectorFactory + .getLazyObjectInspector(TypeInfoFactory + .getPrimitiveTypeInfo(serdeConstants.BIGINT_TYPE_NAME)); + LazyLong lazyLong = (LazyLong) LazyFactory.createLazyObject(lazyLongOI); + lazyLong.init(byteRef, 0, valueBytes.length); + + Assert.assertEquals(bigintValue, lazyLong.getWritableObject().get()); + + // float + cqHolder.set("float"); + valueBytes = value.getValue(cfText, cqHolder); + Assert.assertNotNull(valueBytes); + + byteRef.setData(valueBytes); + LazyFloatObjectInspector lazyFloatOI = (LazyFloatObjectInspector) LazyPrimitiveObjectInspectorFactory + .getLazyObjectInspector(TypeInfoFactory + .getPrimitiveTypeInfo(serdeConstants.FLOAT_TYPE_NAME)); + LazyFloat lazyFloat = (LazyFloat) LazyFactory.createLazyObject(lazyFloatOI); + lazyFloat.init(byteRef, 0, valueBytes.length); + + Assert.assertEquals(floatValue, lazyFloat.getWritableObject().get(), 0); + + // double + cqHolder.set("double"); + valueBytes = value.getValue(cfText, cqHolder); + Assert.assertNotNull(valueBytes); + + byteRef.setData(valueBytes); + LazyDoubleObjectInspector lazyDoubleOI = (LazyDoubleObjectInspector) LazyPrimitiveObjectInspectorFactory + .getLazyObjectInspector(TypeInfoFactory + .getPrimitiveTypeInfo(serdeConstants.DOUBLE_TYPE_NAME)); + LazyDouble lazyDouble = (LazyDouble) LazyFactory.createLazyObject(lazyDoubleOI); + lazyDouble.init(byteRef, 0, valueBytes.length); + + Assert.assertEquals(doubleValue, lazyDouble.getWritableObject().get(), 0); + + // decimal + cqHolder.set("decimal"); + valueBytes = value.getValue(cfText, cqHolder); + Assert.assertNotNull(valueBytes); + + byteRef.setData(valueBytes); + LazyHiveDecimalObjectInspector lazyDecimalOI = (LazyHiveDecimalObjectInspector) LazyPrimitiveObjectInspectorFactory + .getLazyObjectInspector(new DecimalTypeInfo(5, 2)); + LazyHiveDecimal lazyDecimal = (LazyHiveDecimal) LazyFactory.createLazyObject(lazyDecimalOI); + lazyDecimal.init(byteRef, 0, valueBytes.length); + + Assert.assertEquals(decimalValue, lazyDecimal.getWritableObject().getHiveDecimal()); + + // date + cqHolder.set("date"); + valueBytes = value.getValue(cfText, cqHolder); + Assert.assertNotNull(valueBytes); + + byteRef.setData(valueBytes); + LazyDateObjectInspector lazyDateOI = (LazyDateObjectInspector) LazyPrimitiveObjectInspectorFactory + .getLazyObjectInspector(TypeInfoFactory.getPrimitiveTypeInfo(serdeConstants.DATE_TYPE_NAME)); + LazyDate lazyDate = (LazyDate) LazyFactory.createLazyObject(lazyDateOI); + lazyDate.init(byteRef, 0, valueBytes.length); + + Assert.assertEquals(dateValue, lazyDate.getWritableObject().get()); + + // timestamp + cqHolder.set("timestamp"); + valueBytes = value.getValue(cfText, cqHolder); + Assert.assertNotNull(valueBytes); + + byteRef.setData(valueBytes); + LazyTimestampObjectInspector lazyTimestampOI = (LazyTimestampObjectInspector) LazyPrimitiveObjectInspectorFactory + .getLazyObjectInspector(TypeInfoFactory + .getPrimitiveTypeInfo(serdeConstants.TIMESTAMP_TYPE_NAME)); + LazyTimestamp lazyTimestamp = (LazyTimestamp) LazyFactory.createLazyObject(lazyTimestampOI); + lazyTimestamp.init(byteRef, 0, valueBytes.length); + + Assert.assertEquals(timestampValue, lazyTimestamp.getWritableObject().getTimestamp()); + + // char + cqHolder.set("char"); + valueBytes = value.getValue(cfText, cqHolder); + Assert.assertNotNull(valueBytes); + + byteRef.setData(valueBytes); + LazyHiveCharObjectInspector lazyCharOI = (LazyHiveCharObjectInspector) LazyPrimitiveObjectInspectorFactory + .getLazyObjectInspector(new CharTypeInfo(4)); + LazyHiveChar lazyChar = (LazyHiveChar) LazyFactory.createLazyObject(lazyCharOI); + lazyChar.init(byteRef, 0, valueBytes.length); + + Assert.assertEquals(charValue, lazyChar.getWritableObject().getHiveChar()); + + // varchar + cqHolder.set("varchar"); + valueBytes = value.getValue(cfText, cqHolder); + Assert.assertNotNull(valueBytes); + + byteRef.setData(valueBytes); + LazyHiveVarcharObjectInspector lazyVarcharOI = (LazyHiveVarcharObjectInspector) LazyPrimitiveObjectInspectorFactory + .getLazyObjectInspector(new VarcharTypeInfo(7)); + LazyHiveVarchar lazyVarchar = (LazyHiveVarchar) LazyFactory.createLazyObject(lazyVarcharOI); + lazyVarchar.init(byteRef, 0, valueBytes.length); + + Assert.assertEquals(varcharValue.toString(), lazyVarchar.getWritableObject().getHiveVarchar() + .toString()); + } +} diff --git a/accumulo-handler/src/test/org/apache/hadoop/hive/accumulo/predicate/TestAccumuloPredicateHandler.java b/accumulo-handler/src/test/org/apache/hadoop/hive/accumulo/predicate/TestAccumuloPredicateHandler.java new file mode 100644 index 0000000..c0b14e1 --- /dev/null +++ b/accumulo-handler/src/test/org/apache/hadoop/hive/accumulo/predicate/TestAccumuloPredicateHandler.java @@ -0,0 +1,809 @@ +package org.apache.hadoop.hive.accumulo.predicate; + +import static org.junit.Assert.assertArrayEquals; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertNotNull; +import static org.junit.Assert.assertTrue; +import static org.junit.Assert.fail; + +import java.io.ByteArrayOutputStream; +import java.nio.ByteBuffer; +import java.util.Arrays; +import java.util.Collection; +import java.util.Collections; +import java.util.List; +import java.util.Map; + +import org.apache.accumulo.core.client.IteratorSetting; +import org.apache.accumulo.core.data.Key; +import org.apache.accumulo.core.data.Range; +import org.apache.commons.codec.binary.Base64; +import org.apache.hadoop.hive.accumulo.columns.ColumnEncoding; +import org.apache.hadoop.hive.accumulo.columns.ColumnMapper; +import org.apache.hadoop.hive.accumulo.predicate.compare.CompareOp; +import org.apache.hadoop.hive.accumulo.predicate.compare.DoubleCompare; +import org.apache.hadoop.hive.accumulo.predicate.compare.Equal; +import org.apache.hadoop.hive.accumulo.predicate.compare.GreaterThan; +import org.apache.hadoop.hive.accumulo.predicate.compare.GreaterThanOrEqual; +import org.apache.hadoop.hive.accumulo.predicate.compare.IntCompare; +import org.apache.hadoop.hive.accumulo.predicate.compare.LessThan; +import org.apache.hadoop.hive.accumulo.predicate.compare.LessThanOrEqual; +import org.apache.hadoop.hive.accumulo.predicate.compare.LongCompare; +import org.apache.hadoop.hive.accumulo.predicate.compare.NotEqual; +import org.apache.hadoop.hive.accumulo.predicate.compare.PrimitiveComparison; +import org.apache.hadoop.hive.accumulo.predicate.compare.StringCompare; +import org.apache.hadoop.hive.accumulo.serde.AccumuloSerDeParameters; +import org.apache.hadoop.hive.accumulo.serde.TooManyAccumuloColumnsException; +import org.apache.hadoop.hive.ql.exec.FunctionRegistry; +import org.apache.hadoop.hive.ql.exec.Utilities; +import org.apache.hadoop.hive.ql.index.IndexSearchCondition; +import org.apache.hadoop.hive.ql.plan.ExprNodeColumnDesc; +import org.apache.hadoop.hive.ql.plan.ExprNodeConstantDesc; +import org.apache.hadoop.hive.ql.plan.ExprNodeDesc; +import org.apache.hadoop.hive.ql.plan.ExprNodeGenericFuncDesc; +import org.apache.hadoop.hive.ql.plan.TableScanDesc; +import org.apache.hadoop.hive.ql.udf.generic.GenericUDFOPAnd; +import org.apache.hadoop.hive.ql.udf.generic.GenericUDFOPEqual; +import org.apache.hadoop.hive.ql.udf.generic.GenericUDFOPEqualOrGreaterThan; +import org.apache.hadoop.hive.ql.udf.generic.GenericUDFOPEqualOrLessThan; +import org.apache.hadoop.hive.ql.udf.generic.GenericUDFOPGreaterThan; +import org.apache.hadoop.hive.ql.udf.generic.GenericUDFOPLessThan; +import org.apache.hadoop.hive.ql.udf.generic.GenericUDFOPNotNull; +import org.apache.hadoop.hive.serde.serdeConstants; +import org.apache.hadoop.hive.serde2.SerDeException; +import org.apache.hadoop.hive.serde2.lazy.LazyUtils; +import org.apache.hadoop.hive.serde2.objectinspector.primitive.JavaIntObjectInspector; +import org.apache.hadoop.hive.serde2.objectinspector.primitive.PrimitiveObjectInspectorFactory; +import org.apache.hadoop.hive.serde2.typeinfo.TypeInfo; +import org.apache.hadoop.hive.serde2.typeinfo.TypeInfoFactory; +import org.apache.hadoop.io.Text; +import org.apache.hadoop.mapred.JobConf; +import org.apache.hadoop.util.StringUtils; +import org.apache.log4j.Logger; +import org.junit.Assert; +import org.junit.Before; +import org.junit.Test; +import org.mockito.Mockito; + +import com.google.common.base.Joiner; +import com.google.common.collect.Lists; + +public class TestAccumuloPredicateHandler { + @SuppressWarnings("unused") + private static final Logger log = Logger.getLogger(TestAccumuloPredicateHandler.class); + + private AccumuloPredicateHandler handler = AccumuloPredicateHandler.getInstance(); + private JobConf conf; + private ColumnMapper columnMapper; + + @Before + public void setup() throws TooManyAccumuloColumnsException { + FunctionRegistry.getFunctionNames(); + conf = new JobConf(); + List columnNames = Arrays.asList("field1", "rid"); + List columnTypes = Arrays. asList(TypeInfoFactory.stringTypeInfo, + TypeInfoFactory.stringTypeInfo); + conf.set(serdeConstants.LIST_COLUMNS, Joiner.on(',').join(columnNames)); + conf.set(serdeConstants.LIST_COLUMN_TYPES, "string,string"); + + String columnMappingStr = "cf:f1,:rowID"; + conf.set(AccumuloSerDeParameters.COLUMN_MAPPINGS, columnMappingStr); + columnMapper = new ColumnMapper(columnMappingStr, ColumnEncoding.STRING.getName(), columnNames, + columnTypes); + } + + @Test + public void testGetRowIDSearchCondition() { + ExprNodeDesc column = new ExprNodeColumnDesc(TypeInfoFactory.stringTypeInfo, "rid", null, false); + ExprNodeDesc constant = new ExprNodeConstantDesc(TypeInfoFactory.stringTypeInfo, "hi"); + List children = Lists.newArrayList(); + children.add(column); + children.add(constant); + ExprNodeGenericFuncDesc node = new ExprNodeGenericFuncDesc(TypeInfoFactory.stringTypeInfo, + new GenericUDFOPEqual(), children); + assertNotNull(node); + String filterExpr = Utilities.serializeExpression(node); + conf.set(TableScanDesc.FILTER_EXPR_CONF_STR, filterExpr); + + List sConditions = handler.getSearchConditions(conf); + assertEquals(sConditions.size(), 1); + } + + @Test() + public void testRangeEqual() throws SerDeException { + ExprNodeDesc column = new ExprNodeColumnDesc(TypeInfoFactory.stringTypeInfo, "rid", null, false); + ExprNodeDesc constant = new ExprNodeConstantDesc(TypeInfoFactory.stringTypeInfo, "aaa"); + List children = Lists.newArrayList(); + children.add(column); + children.add(constant); + ExprNodeGenericFuncDesc node = new ExprNodeGenericFuncDesc(TypeInfoFactory.stringTypeInfo, + new GenericUDFOPEqual(), children); + assertNotNull(node); + String filterExpr = Utilities.serializeExpression(node); + conf.set(TableScanDesc.FILTER_EXPR_CONF_STR, filterExpr); + + Collection ranges = handler.getRanges(conf, columnMapper); + assertEquals(ranges.size(), 1); + Range range = ranges.iterator().next(); + assertTrue(range.isStartKeyInclusive()); + assertFalse(range.isEndKeyInclusive()); + assertTrue(range.contains(new Key(new Text("aaa")))); + assertTrue(range.afterEndKey(new Key(new Text("aab")))); + assertTrue(range.beforeStartKey(new Key(new Text("aa")))); + } + + @Test() + public void testRangeGreaterThan() throws SerDeException { + ExprNodeDesc column = new ExprNodeColumnDesc(TypeInfoFactory.stringTypeInfo, "rid", null, false); + ExprNodeDesc constant = new ExprNodeConstantDesc(TypeInfoFactory.stringTypeInfo, "aaa"); + List children = Lists.newArrayList(); + children.add(column); + children.add(constant); + ExprNodeGenericFuncDesc node = new ExprNodeGenericFuncDesc(TypeInfoFactory.stringTypeInfo, + new GenericUDFOPGreaterThan(), children); + assertNotNull(node); + String filterExpr = Utilities.serializeExpression(node); + conf.set(TableScanDesc.FILTER_EXPR_CONF_STR, filterExpr); + + Collection ranges = handler.getRanges(conf, columnMapper); + assertEquals(ranges.size(), 1); + Range range = ranges.iterator().next(); + assertTrue(range.isStartKeyInclusive()); + assertFalse(range.isEndKeyInclusive()); + assertFalse(range.contains(new Key(new Text("aaa")))); + assertFalse(range.afterEndKey(new Key(new Text("ccccc")))); + assertTrue(range.contains(new Key(new Text("aab")))); + assertTrue(range.beforeStartKey(new Key(new Text("aa")))); + assertTrue(range.beforeStartKey(new Key(new Text("aaa")))); + } + + @Test + public void rangeGreaterThanOrEqual() throws SerDeException { + ExprNodeDesc column = new ExprNodeColumnDesc(TypeInfoFactory.stringTypeInfo, "rid", null, false); + ExprNodeDesc constant = new ExprNodeConstantDesc(TypeInfoFactory.stringTypeInfo, "aaa"); + List children = Lists.newArrayList(); + children.add(column); + children.add(constant); + ExprNodeGenericFuncDesc node = new ExprNodeGenericFuncDesc(TypeInfoFactory.stringTypeInfo, + new GenericUDFOPEqualOrGreaterThan(), children); + assertNotNull(node); + String filterExpr = Utilities.serializeExpression(node); + conf.set(TableScanDesc.FILTER_EXPR_CONF_STR, filterExpr); + + Collection ranges = handler.getRanges(conf, columnMapper); + assertEquals(ranges.size(), 1); + Range range = ranges.iterator().next(); + assertTrue(range.isStartKeyInclusive()); + assertFalse(range.isEndKeyInclusive()); + assertTrue(range.contains(new Key(new Text("aaa")))); + assertFalse(range.afterEndKey(new Key(new Text("ccccc")))); + assertTrue(range.contains(new Key(new Text("aab")))); + assertTrue(range.beforeStartKey(new Key(new Text("aa")))); + } + + @Test + public void rangeLessThan() throws SerDeException { + ExprNodeDesc column = new ExprNodeColumnDesc(TypeInfoFactory.stringTypeInfo, "rid", null, false); + ExprNodeDesc constant = new ExprNodeConstantDesc(TypeInfoFactory.stringTypeInfo, "aaa"); + List children = Lists.newArrayList(); + children.add(column); + children.add(constant); + ExprNodeGenericFuncDesc node = new ExprNodeGenericFuncDesc(TypeInfoFactory.stringTypeInfo, + new GenericUDFOPLessThan(), children); + assertNotNull(node); + String filterExpr = Utilities.serializeExpression(node); + conf.set(TableScanDesc.FILTER_EXPR_CONF_STR, filterExpr); + + Collection ranges = handler.getRanges(conf, columnMapper); + assertEquals(ranges.size(), 1); + Range range = ranges.iterator().next(); + assertTrue(range.isStartKeyInclusive()); + assertFalse(range.isEndKeyInclusive()); + assertFalse(range.contains(new Key(new Text("aaa")))); + assertTrue(range.afterEndKey(new Key(new Text("ccccc")))); + assertTrue(range.contains(new Key(new Text("aa")))); + assertTrue(range.afterEndKey(new Key(new Text("aab")))); + assertTrue(range.afterEndKey(new Key(new Text("aaa")))); + } + + @Test + public void rangeLessThanOrEqual() throws SerDeException { + ExprNodeDesc column = new ExprNodeColumnDesc(TypeInfoFactory.stringTypeInfo, "rid", null, false); + ExprNodeDesc constant = new ExprNodeConstantDesc(TypeInfoFactory.stringTypeInfo, "aaa"); + List children = Lists.newArrayList(); + children.add(column); + children.add(constant); + ExprNodeGenericFuncDesc node = new ExprNodeGenericFuncDesc(TypeInfoFactory.stringTypeInfo, + new GenericUDFOPEqualOrLessThan(), children); + assertNotNull(node); + String filterExpr = Utilities.serializeExpression(node); + conf.set(TableScanDesc.FILTER_EXPR_CONF_STR, filterExpr); + + Collection ranges = handler.getRanges(conf, columnMapper); + assertEquals(ranges.size(), 1); + Range range = ranges.iterator().next(); + assertTrue(range.isStartKeyInclusive()); + assertFalse(range.isEndKeyInclusive()); + assertTrue(range.contains(new Key(new Text("aaa")))); + assertTrue(range.afterEndKey(new Key(new Text("ccccc")))); + assertTrue(range.contains(new Key(new Text("aa")))); + assertTrue(range.afterEndKey(new Key(new Text("aab")))); + assertFalse(range.afterEndKey(new Key(new Text("aaa")))); + } + + @Test + public void testDisjointRanges() throws SerDeException { + ExprNodeDesc column = new ExprNodeColumnDesc(TypeInfoFactory.stringTypeInfo, "rid", null, false); + ExprNodeDesc constant = new ExprNodeConstantDesc(TypeInfoFactory.stringTypeInfo, "aaa"); + List children = Lists.newArrayList(); + children.add(column); + children.add(constant); + ExprNodeDesc node = new ExprNodeGenericFuncDesc(TypeInfoFactory.stringTypeInfo, + new GenericUDFOPEqualOrLessThan(), children); + assertNotNull(node); + + ExprNodeDesc column2 = new ExprNodeColumnDesc(TypeInfoFactory.stringTypeInfo, "rid", null, + false); + ExprNodeDesc constant2 = new ExprNodeConstantDesc(TypeInfoFactory.stringTypeInfo, "bbb"); + List children2 = Lists.newArrayList(); + children2.add(column2); + children2.add(constant2); + ExprNodeDesc node2 = new ExprNodeGenericFuncDesc(TypeInfoFactory.stringTypeInfo, + new GenericUDFOPGreaterThan(), children2); + assertNotNull(node2); + + List bothFilters = Lists.newArrayList(); + bothFilters.add(node); + bothFilters.add(node2); + ExprNodeGenericFuncDesc both = new ExprNodeGenericFuncDesc(TypeInfoFactory.stringTypeInfo, + new GenericUDFOPAnd(), bothFilters); + + String filterExpr = Utilities.serializeExpression(both); + conf.set(TableScanDesc.FILTER_EXPR_CONF_STR, filterExpr); + + Collection ranges = handler.getRanges(conf, columnMapper); + + // Impossible to get ranges for row <= 'aaa' and row >= 'bbb' + assertEquals(0, ranges.size()); + } + + @Test + public void testMultipleRanges() throws SerDeException { + ExprNodeDesc column = new ExprNodeColumnDesc(TypeInfoFactory.stringTypeInfo, "rid", null, false); + ExprNodeDesc constant = new ExprNodeConstantDesc(TypeInfoFactory.stringTypeInfo, "aaa"); + List children = Lists.newArrayList(); + children.add(column); + children.add(constant); + ExprNodeDesc node = new ExprNodeGenericFuncDesc(TypeInfoFactory.stringTypeInfo, + new GenericUDFOPEqualOrGreaterThan(), children); + assertNotNull(node); + + ExprNodeDesc column2 = new ExprNodeColumnDesc(TypeInfoFactory.stringTypeInfo, "rid", null, + false); + ExprNodeDesc constant2 = new ExprNodeConstantDesc(TypeInfoFactory.stringTypeInfo, "bbb"); + List children2 = Lists.newArrayList(); + children2.add(column2); + children2.add(constant2); + ExprNodeDesc node2 = new ExprNodeGenericFuncDesc(TypeInfoFactory.stringTypeInfo, + new GenericUDFOPLessThan(), children2); + assertNotNull(node2); + + List bothFilters = Lists.newArrayList(); + bothFilters.add(node); + bothFilters.add(node2); + ExprNodeGenericFuncDesc both = new ExprNodeGenericFuncDesc(TypeInfoFactory.stringTypeInfo, + new GenericUDFOPAnd(), bothFilters); + + String filterExpr = Utilities.serializeExpression(both); + conf.set(TableScanDesc.FILTER_EXPR_CONF_STR, filterExpr); + + List ranges = handler.getRanges(conf, columnMapper); + assertEquals(1, ranges.size()); + Range range = ranges.get(0); + assertEquals(new Range(new Key("aaa"), true, new Key("bbb"), false), range); + } + + @Test + public void testPushdownTuple() throws SerDeException, NoSuchPrimitiveComparisonException, + NoSuchCompareOpException { + ExprNodeDesc column = new ExprNodeColumnDesc(TypeInfoFactory.intTypeInfo, "field1", null, false); + ExprNodeDesc constant = new ExprNodeConstantDesc(TypeInfoFactory.intTypeInfo, 5); + List children = Lists.newArrayList(); + children.add(column); + children.add(constant); + ExprNodeGenericFuncDesc node = new ExprNodeGenericFuncDesc(TypeInfoFactory.stringTypeInfo, + new GenericUDFOPEqual(), children); + assertNotNull(node); + String filterExpr = Utilities.serializeExpression(node); + conf.set(TableScanDesc.FILTER_EXPR_CONF_STR, filterExpr); + + List sConditions = handler.getSearchConditions(conf); + assertEquals(sConditions.size(), 1); + IndexSearchCondition sc = sConditions.get(0); + PushdownTuple tuple = new PushdownTuple(sConditions.get(0), handler.getPrimitiveComparison(sc + .getColumnDesc().getTypeString(), sc), handler.getCompareOp(sc.getComparisonOp(), sc)); + byte[] expectedVal = new byte[4]; + ByteBuffer.wrap(expectedVal).putInt(5); + assertArrayEquals(tuple.getConstVal(), expectedVal); + assertEquals(tuple.getcOpt().getClass(), Equal.class); + assertEquals(tuple.getpCompare().getClass(), IntCompare.class); + } + + @Test(expected = NoSuchPrimitiveComparisonException.class) + public void testPushdownColumnTypeNotSupported() throws SerDeException, + NoSuchPrimitiveComparisonException, NoSuchCompareOpException { + ExprNodeDesc column = new ExprNodeColumnDesc(TypeInfoFactory.floatTypeInfo, "field1", null, + false); + ExprNodeDesc constant = new ExprNodeConstantDesc(TypeInfoFactory.floatTypeInfo, 5.5f); + List children = Lists.newArrayList(); + children.add(column); + children.add(constant); + ExprNodeGenericFuncDesc node = new ExprNodeGenericFuncDesc(TypeInfoFactory.stringTypeInfo, + new GenericUDFOPEqual(), children); + assertNotNull(node); + String filterExpr = Utilities.serializeExpression(node); + conf.set(TableScanDesc.FILTER_EXPR_CONF_STR, filterExpr); + List sConditions = handler.getSearchConditions(conf); + assertEquals(sConditions.size(), 1); + IndexSearchCondition sc = sConditions.get(0); + + handler.getPrimitiveComparison(sc.getColumnDesc().getTypeString(), sc); + } + + @Test + public void testPushdownComparisonOptNotSupported() { + try { + ExprNodeDesc column = new ExprNodeColumnDesc(TypeInfoFactory.stringTypeInfo, "field1", null, + false); + List children = Lists.newArrayList(); + children.add(column); + ExprNodeGenericFuncDesc node = new ExprNodeGenericFuncDesc(TypeInfoFactory.stringTypeInfo, + new GenericUDFOPNotNull(), children); + assertNotNull(node); + String filterExpr = Utilities.serializeExpression(node); + conf.set(TableScanDesc.FILTER_EXPR_CONF_STR, filterExpr); + List sConditions = handler.getSearchConditions(conf); + assertEquals(sConditions.size(), 1); + IndexSearchCondition sc = sConditions.get(0); + new PushdownTuple(sc, handler.getPrimitiveComparison(sc.getColumnDesc().getTypeString(), sc), + handler.getCompareOp(sc.getComparisonOp(), sc)); + fail("Should fail: compare op not registered for index analyzer. Should leave undesirable residual predicate"); + } catch (RuntimeException e) { + assertTrue(e.getMessage().contains("Unexpected residual predicate: field1 is not null")); + } catch (Exception e) { + fail(StringUtils.stringifyException(e)); + } + } + + @Test + public void testIteratorIgnoreRowIDFields() { + ExprNodeDesc column = new ExprNodeColumnDesc(TypeInfoFactory.stringTypeInfo, "rid", null, false); + ExprNodeDesc constant = new ExprNodeConstantDesc(TypeInfoFactory.stringTypeInfo, "aaa"); + List children = Lists.newArrayList(); + children.add(column); + children.add(constant); + ExprNodeDesc node = new ExprNodeGenericFuncDesc(TypeInfoFactory.stringTypeInfo, + new GenericUDFOPEqualOrLessThan(), children); + assertNotNull(node); + + ExprNodeDesc column2 = new ExprNodeColumnDesc(TypeInfoFactory.stringTypeInfo, "rid", null, + false); + ExprNodeDesc constant2 = new ExprNodeConstantDesc(TypeInfoFactory.stringTypeInfo, "bbb"); + List children2 = Lists.newArrayList(); + children2.add(column2); + children2.add(constant2); + ExprNodeDesc node2 = new ExprNodeGenericFuncDesc(TypeInfoFactory.stringTypeInfo, + new GenericUDFOPGreaterThan(), children2); + assertNotNull(node2); + + List bothFilters = Lists.newArrayList(); + bothFilters.add(node); + bothFilters.add(node2); + ExprNodeGenericFuncDesc both = new ExprNodeGenericFuncDesc(TypeInfoFactory.stringTypeInfo, + new GenericUDFOPAnd(), bothFilters); + + String filterExpr = Utilities.serializeExpression(both); + conf.set(TableScanDesc.FILTER_EXPR_CONF_STR, filterExpr); + try { + List iterators = handler.getIterators(conf, columnMapper); + assertEquals(iterators.size(), 0); + } catch (SerDeException e) { + StringUtils.stringifyException(e); + } + } + + @Test + public void testIgnoreIteratorPushdown() throws TooManyAccumuloColumnsException { + // Override what's placed in the Configuration by setup() + conf = new JobConf(); + List columnNames = Arrays.asList("field1", "field2", "rid"); + List columnTypes = Arrays. asList(TypeInfoFactory.stringTypeInfo, + TypeInfoFactory.intTypeInfo, TypeInfoFactory.stringTypeInfo); + conf.set(serdeConstants.LIST_COLUMNS, Joiner.on(',').join(columnNames)); + conf.set(serdeConstants.LIST_COLUMN_TYPES, "string,int,string"); + + String columnMappingStr = "cf:f1,cf:f2,:rowID"; + conf.set(AccumuloSerDeParameters.COLUMN_MAPPINGS, columnMappingStr); + columnMapper = new ColumnMapper(columnMappingStr, ColumnEncoding.STRING.getName(), columnNames, + columnTypes); + + ExprNodeDesc column = new ExprNodeColumnDesc(TypeInfoFactory.stringTypeInfo, "field1", null, + false); + ExprNodeDesc constant = new ExprNodeConstantDesc(TypeInfoFactory.stringTypeInfo, "aaa"); + List children = Lists.newArrayList(); + children.add(column); + children.add(constant); + ExprNodeDesc node = new ExprNodeGenericFuncDesc(TypeInfoFactory.stringTypeInfo, + new GenericUDFOPEqualOrLessThan(), children); + assertNotNull(node); + + ExprNodeDesc column2 = new ExprNodeColumnDesc(TypeInfoFactory.intTypeInfo, "field2", null, + false); + ExprNodeDesc constant2 = new ExprNodeConstantDesc(TypeInfoFactory.intTypeInfo, 5); + List children2 = Lists.newArrayList(); + children2.add(column2); + children2.add(constant2); + ExprNodeDesc node2 = new ExprNodeGenericFuncDesc(TypeInfoFactory.stringTypeInfo, + new GenericUDFOPGreaterThan(), children2); + assertNotNull(node2); + + List bothFilters = Lists.newArrayList(); + bothFilters.add(node); + bothFilters.add(node2); + ExprNodeGenericFuncDesc both = new ExprNodeGenericFuncDesc(TypeInfoFactory.stringTypeInfo, + new GenericUDFOPAnd(), bothFilters); + + String filterExpr = Utilities.serializeExpression(both); + conf.set(TableScanDesc.FILTER_EXPR_CONF_STR, filterExpr); + conf.setBoolean(AccumuloSerDeParameters.ITERATOR_PUSHDOWN_KEY, false); + try { + List iterators = handler.getIterators(conf, columnMapper); + assertEquals(iterators.size(), 0); + } catch (Exception e) { + fail(StringUtils.stringifyException(e)); + } + } + + @Test + public void testCreateIteratorSettings() throws Exception { + // Override what's placed in the Configuration by setup() + conf = new JobConf(); + List columnNames = Arrays.asList("field1", "field2", "rid"); + List columnTypes = Arrays. asList(TypeInfoFactory.stringTypeInfo, + TypeInfoFactory.intTypeInfo, TypeInfoFactory.stringTypeInfo); + conf.set(serdeConstants.LIST_COLUMNS, Joiner.on(',').join(columnNames)); + conf.set(serdeConstants.LIST_COLUMN_TYPES, "string,int,string"); + String columnMappingStr = "cf:f1,cf:f2,:rowID"; + conf.set(AccumuloSerDeParameters.COLUMN_MAPPINGS, columnMappingStr); + columnMapper = new ColumnMapper(columnMappingStr, ColumnEncoding.STRING.getName(), columnNames, + columnTypes); + + ExprNodeDesc column = new ExprNodeColumnDesc(TypeInfoFactory.stringTypeInfo, "field1", null, + false); + ExprNodeDesc constant = new ExprNodeConstantDesc(TypeInfoFactory.stringTypeInfo, "aaa"); + List children = Lists.newArrayList(); + children.add(column); + children.add(constant); + ExprNodeDesc node = new ExprNodeGenericFuncDesc(TypeInfoFactory.stringTypeInfo, + new GenericUDFOPEqualOrLessThan(), children); + assertNotNull(node); + + ExprNodeDesc column2 = new ExprNodeColumnDesc(TypeInfoFactory.intTypeInfo, "field2", null, + false); + ExprNodeDesc constant2 = new ExprNodeConstantDesc(TypeInfoFactory.intTypeInfo, 5); + List children2 = Lists.newArrayList(); + children2.add(column2); + children2.add(constant2); + ExprNodeDesc node2 = new ExprNodeGenericFuncDesc(TypeInfoFactory.stringTypeInfo, + new GenericUDFOPGreaterThan(), children2); + assertNotNull(node2); + + List bothFilters = Lists.newArrayList(); + bothFilters.add(node); + bothFilters.add(node2); + ExprNodeGenericFuncDesc both = new ExprNodeGenericFuncDesc(TypeInfoFactory.stringTypeInfo, + new GenericUDFOPAnd(), bothFilters); + + String filterExpr = Utilities.serializeExpression(both); + conf.set(TableScanDesc.FILTER_EXPR_CONF_STR, filterExpr); + List iterators = handler.getIterators(conf, columnMapper); + assertEquals(iterators.size(), 2); + IteratorSetting is1 = iterators.get(0); + IteratorSetting is2 = iterators.get(1); + + boolean foundQual = false; + boolean foundPCompare = false; + boolean foundCOpt = false; + boolean foundConst = false; + for (Map.Entry option : is1.getOptions().entrySet()) { + String optKey = option.getKey(); + if (optKey.equals(PrimitiveComparisonFilter.COLUMN)) { + foundQual = true; + assertEquals(option.getValue(), "cf:f1"); + } else if (optKey.equals(PrimitiveComparisonFilter.CONST_VAL)) { + foundConst = true; + assertEquals(option.getValue(), new String(Base64.encodeBase64("aaa".getBytes()))); + } else if (optKey.equals(PrimitiveComparisonFilter.COMPARE_OPT_CLASS)) { + foundCOpt = true; + assertEquals(option.getValue(), LessThanOrEqual.class.getName()); + } else if (optKey.equals(PrimitiveComparisonFilter.P_COMPARE_CLASS)) { + foundPCompare = true; + assertEquals(option.getValue(), StringCompare.class.getName()); + } + + } + assertTrue(foundConst & foundCOpt & foundPCompare & foundQual); + + foundQual = false; + foundPCompare = false; + foundCOpt = false; + foundConst = false; + for (Map.Entry option : is2.getOptions().entrySet()) { + String optKey = option.getKey(); + if (optKey.equals(PrimitiveComparisonFilter.COLUMN)) { + foundQual = true; + assertEquals(option.getValue(), "cf:f2"); + } else if (optKey.equals(PrimitiveComparisonFilter.CONST_VAL)) { + foundConst = true; + byte[] intVal = new byte[4]; + ByteBuffer.wrap(intVal).putInt(5); + assertEquals(option.getValue(), new String(Base64.encodeBase64(intVal))); + } else if (optKey.equals(PrimitiveComparisonFilter.COMPARE_OPT_CLASS)) { + foundCOpt = true; + assertEquals(option.getValue(), GreaterThan.class.getName()); + } else if (optKey.equals(PrimitiveComparisonFilter.P_COMPARE_CLASS)) { + foundPCompare = true; + assertEquals(option.getValue(), IntCompare.class.getName()); + } + + } + assertTrue(foundConst & foundCOpt & foundPCompare & foundQual); + } + + @Test + public void testBasicOptLookup() throws NoSuchCompareOpException { + boolean foundEqual = false; + boolean foundNotEqual = false; + boolean foundGreaterThanOrEqual = false; + boolean foundGreaterThan = false; + boolean foundLessThanOrEqual = false; + boolean foundLessThan = false; + for (String opt : handler.cOpKeyset()) { + Class compOpt = handler.getCompareOpClass(opt); + if (compOpt.getName().equals(Equal.class.getName())) { + foundEqual = true; + } else if (compOpt.getName().equals(NotEqual.class.getName())) { + foundNotEqual = true; + } else if (compOpt.getName().equals(GreaterThan.class.getName())) { + foundGreaterThan = true; + } else if (compOpt.getName().equals(GreaterThanOrEqual.class.getName())) { + foundGreaterThanOrEqual = true; + } else if (compOpt.getName().equals(LessThan.class.getName())) { + foundLessThan = true; + } else if (compOpt.getName().equals(LessThanOrEqual.class.getName())) { + foundLessThanOrEqual = true; + } + } + assertTrue("Did not find Equal comparison op", foundEqual); + assertTrue("Did not find NotEqual comparison op", foundNotEqual); + assertTrue("Did not find GreaterThan comparison op", foundGreaterThan); + assertTrue("Did not find GreaterThanOrEqual comparison op", foundGreaterThanOrEqual); + assertTrue("Did not find LessThan comparison op", foundLessThan); + assertTrue("Did not find LessThanOrEqual comparison op", foundLessThanOrEqual); + } + + @Test(expected = NoSuchCompareOpException.class) + public void testNoOptFound() throws NoSuchCompareOpException { + handler.getCompareOpClass("blah"); + } + + @Test + public void testPrimitiveComparsionLookup() throws NoSuchPrimitiveComparisonException { + boolean foundLong = false; + boolean foundString = false; + boolean foundInt = false; + boolean foundDouble = false; + for (String type : handler.pComparisonKeyset()) { + Class pCompare = handler.getPrimitiveComparisonClass(type); + if (pCompare.getName().equals(DoubleCompare.class.getName())) { + foundDouble = true; + } else if (pCompare.getName().equals(LongCompare.class.getName())) { + foundLong = true; + } else if (pCompare.getName().equals(IntCompare.class.getName())) { + foundInt = true; + } else if (pCompare.getName().equals(StringCompare.class.getName())) { + foundString = true; + } + } + assertTrue("Did not find DoubleCompare op", foundDouble); + assertTrue("Did not find LongCompare op", foundLong); + assertTrue("Did not find IntCompare op", foundInt); + assertTrue("Did not find StringCompare op", foundString); + } + + @Test + public void testRowRangeIntersection() throws SerDeException { + // rowId >= 'f' + ExprNodeDesc column = new ExprNodeColumnDesc(TypeInfoFactory.stringTypeInfo, "rid", null, false); + ExprNodeDesc constant = new ExprNodeConstantDesc(TypeInfoFactory.stringTypeInfo, "f"); + List children = Lists.newArrayList(); + children.add(column); + children.add(constant); + ExprNodeDesc node = new ExprNodeGenericFuncDesc(TypeInfoFactory.stringTypeInfo, + new GenericUDFOPEqualOrGreaterThan(), children); + assertNotNull(node); + + // rowId <= 'm' + ExprNodeDesc column2 = new ExprNodeColumnDesc(TypeInfoFactory.stringTypeInfo, "rid", null, + false); + ExprNodeDesc constant2 = new ExprNodeConstantDesc(TypeInfoFactory.stringTypeInfo, "m"); + List children2 = Lists.newArrayList(); + children2.add(column2); + children2.add(constant2); + ExprNodeDesc node2 = new ExprNodeGenericFuncDesc(TypeInfoFactory.stringTypeInfo, + new GenericUDFOPEqualOrLessThan(), children2); + assertNotNull(node2); + + List bothFilters = Lists.newArrayList(); + bothFilters.add(node); + bothFilters.add(node2); + ExprNodeGenericFuncDesc both = new ExprNodeGenericFuncDesc(TypeInfoFactory.stringTypeInfo, + new GenericUDFOPAnd(), bothFilters); + + String filterExpr = Utilities.serializeExpression(both); + conf.set(TableScanDesc.FILTER_EXPR_CONF_STR, filterExpr); + + // Should make ['f', 'm\0') + List ranges = handler.getRanges(conf, columnMapper); + assertEquals(1, ranges.size()); + assertEquals(new Range(new Key("f"), true, new Key("m\0"), false), ranges.get(0)); + } + + @Test + public void testRowRangeGeneration() throws SerDeException { + List columnNames = Arrays.asList("key", "column"); + List columnTypes = Arrays. asList(TypeInfoFactory.stringTypeInfo, + TypeInfoFactory.stringTypeInfo); + conf.set(serdeConstants.LIST_COLUMNS, Joiner.on(',').join(columnNames)); + conf.set(serdeConstants.LIST_COLUMN_TYPES, "string,string"); + + String columnMappingStr = ":rowID,cf:f1"; + conf.set(AccumuloSerDeParameters.COLUMN_MAPPINGS, columnMappingStr); + columnMapper = new ColumnMapper(columnMappingStr, ColumnEncoding.STRING.getName(), columnNames, + columnTypes); + + // 100 < key + ExprNodeDesc column = new ExprNodeColumnDesc(TypeInfoFactory.intTypeInfo, "key", null, false); + ExprNodeDesc constant = new ExprNodeConstantDesc(TypeInfoFactory.intTypeInfo, 100); + List children = Lists.newArrayList(); + children.add(constant); + children.add(column); + ExprNodeGenericFuncDesc node = new ExprNodeGenericFuncDesc(TypeInfoFactory.stringTypeInfo, + new GenericUDFOPLessThan(), children); + assertNotNull(node); + + String filterExpr = Utilities.serializeExpression(node); + conf.set(TableScanDesc.FILTER_EXPR_CONF_STR, filterExpr); + + // Should make (100, +inf) + List ranges = handler.getRanges(conf, columnMapper); + Assert.assertEquals(1, ranges.size()); + Assert.assertEquals(new Range(new Text("100"), false, null, false), ranges.get(0)); + } + + @Test + public void testBinaryRangeGeneration() throws Exception { + List columnNames = Arrays.asList("key", "column"); + List columnTypes = Arrays. asList(TypeInfoFactory.intTypeInfo, + TypeInfoFactory.stringTypeInfo); + conf.set(serdeConstants.LIST_COLUMNS, Joiner.on(',').join(columnNames)); + conf.set(serdeConstants.LIST_COLUMN_TYPES, "int,string"); + + String columnMappingStr = ":rowID#b,cf:f1"; + conf.set(AccumuloSerDeParameters.COLUMN_MAPPINGS, columnMappingStr); + columnMapper = new ColumnMapper(columnMappingStr, ColumnEncoding.STRING.getName(), columnNames, + columnTypes); + + int intValue = 100; + + // Make binary integer value in the bytearray + ByteArrayOutputStream baos = new ByteArrayOutputStream(); + JavaIntObjectInspector intOI = (JavaIntObjectInspector) PrimitiveObjectInspectorFactory + .getPrimitiveJavaObjectInspector(TypeInfoFactory + .getPrimitiveTypeInfo(serdeConstants.INT_TYPE_NAME)); + LazyUtils.writePrimitive(baos, intValue, intOI); + + // 100 < key + ExprNodeDesc column = new ExprNodeColumnDesc(TypeInfoFactory.intTypeInfo, "key", null, false); + ExprNodeDesc constant = new ExprNodeConstantDesc(TypeInfoFactory.intTypeInfo, intValue); + List children = Lists.newArrayList(); + children.add(constant); + children.add(column); + ExprNodeGenericFuncDesc node = new ExprNodeGenericFuncDesc(TypeInfoFactory.stringTypeInfo, + new GenericUDFOPLessThan(), children); + assertNotNull(node); + + String filterExpr = Utilities.serializeExpression(node); + conf.set(TableScanDesc.FILTER_EXPR_CONF_STR, filterExpr); + + // Should make (100, +inf) + List ranges = handler.getRanges(conf, columnMapper); + Assert.assertEquals(1, ranges.size()); + Assert.assertEquals(new Range(new Text(baos.toByteArray()), false, null, false), ranges.get(0)); + } + + @Test + public void testNullRangeGeneratorOutput() throws SerDeException { + // The AccumuloRangeGenerator produces an Object (due to the limitations of the + // traversal interface) which requires interpretation of that Object into Ranges. + // Changes in the return object from the AccumuloRangeGenerator must also represent + // a change in the AccumuloPredicateHandler. + AccumuloPredicateHandler mockHandler = Mockito.mock(AccumuloPredicateHandler.class); + ExprNodeDesc root = Mockito.mock(ExprNodeDesc.class); + String hiveRowIdColumnName = "rid"; + + Mockito.when(mockHandler.getRanges(conf, columnMapper)).thenCallRealMethod(); + Mockito.when(mockHandler.generateRanges(columnMapper, hiveRowIdColumnName, root)).thenReturn(null); + Mockito.when(mockHandler.getExpression(conf)).thenReturn(root); + + // A null result from AccumuloRangeGenerator is all ranges + Assert.assertEquals(Arrays.asList(new Range()), mockHandler.getRanges(conf, columnMapper)); + } + + @Test + public void testEmptyListRangeGeneratorOutput() throws SerDeException { + // The AccumuloRangeGenerator produces an Object (due to the limitations of the + // traversal interface) which requires interpretation of that Object into Ranges. + // Changes in the return object from the AccumuloRangeGenerator must also represent + // a change in the AccumuloPredicateHandler. + AccumuloPredicateHandler mockHandler = Mockito.mock(AccumuloPredicateHandler.class); + ExprNodeDesc root = Mockito.mock(ExprNodeDesc.class); + String hiveRowIdColumnName = "rid"; + + Mockito.when(mockHandler.getRanges(conf, columnMapper)).thenCallRealMethod(); + Mockito.when(mockHandler.generateRanges(columnMapper, hiveRowIdColumnName, root)).thenReturn(Collections.emptyList()); + Mockito.when(mockHandler.getExpression(conf)).thenReturn(root); + + // A null result from AccumuloRangeGenerator is all ranges + Assert.assertEquals(Collections.emptyList(), mockHandler.getRanges(conf, columnMapper)); + } + + @Test + public void testSingleRangeGeneratorOutput() throws SerDeException { + // The AccumuloRangeGenerator produces an Object (due to the limitations of the + // traversal interface) which requires interpretation of that Object into Ranges. + // Changes in the return object from the AccumuloRangeGenerator must also represent + // a change in the AccumuloPredicateHandler. + AccumuloPredicateHandler mockHandler = Mockito.mock(AccumuloPredicateHandler.class); + ExprNodeDesc root = Mockito.mock(ExprNodeDesc.class); + String hiveRowIdColumnName = "rid"; + Range r = new Range("a"); + + Mockito.when(mockHandler.getRanges(conf, columnMapper)).thenCallRealMethod(); + Mockito.when(mockHandler.generateRanges(columnMapper, hiveRowIdColumnName, root)).thenReturn(r); + Mockito.when(mockHandler.getExpression(conf)).thenReturn(root); + + // A null result from AccumuloRangeGenerator is all ranges + Assert.assertEquals(Collections.singletonList(r), mockHandler.getRanges(conf, columnMapper)); + } + + @Test + public void testManyRangesGeneratorOutput() throws SerDeException { + // The AccumuloRangeGenerator produces an Object (due to the limitations of the + // traversal interface) which requires interpretation of that Object into Ranges. + // Changes in the return object from the AccumuloRangeGenerator must also represent + // a change in the AccumuloPredicateHandler. + AccumuloPredicateHandler mockHandler = Mockito.mock(AccumuloPredicateHandler.class); + ExprNodeDesc root = Mockito.mock(ExprNodeDesc.class); + String hiveRowIdColumnName = "rid"; + Range r1 = new Range("a"), r2 = new Range("z"); + + Mockito.when(mockHandler.getRanges(conf, columnMapper)).thenCallRealMethod(); + Mockito.when(mockHandler.generateRanges(columnMapper, hiveRowIdColumnName, root)).thenReturn(Arrays.asList(r1, r2)); + Mockito.when(mockHandler.getExpression(conf)).thenReturn(root); + + // A null result from AccumuloRangeGenerator is all ranges + Assert.assertEquals(Arrays.asList(r1, r2), mockHandler.getRanges(conf, columnMapper)); + } +} diff --git a/accumulo-handler/src/test/org/apache/hadoop/hive/accumulo/predicate/TestAccumuloRangeGenerator.java b/accumulo-handler/src/test/org/apache/hadoop/hive/accumulo/predicate/TestAccumuloRangeGenerator.java new file mode 100644 index 0000000..339da07 --- /dev/null +++ b/accumulo-handler/src/test/org/apache/hadoop/hive/accumulo/predicate/TestAccumuloRangeGenerator.java @@ -0,0 +1,467 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hive.accumulo.predicate; + +import static org.junit.Assert.assertNotNull; + +import java.sql.Date; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.Collections; +import java.util.HashMap; +import java.util.List; + +import org.apache.accumulo.core.data.Key; +import org.apache.accumulo.core.data.Range; +import org.apache.hadoop.hive.accumulo.AccumuloHiveConstants; +import org.apache.hadoop.hive.accumulo.columns.ColumnEncoding; +import org.apache.hadoop.hive.accumulo.columns.HiveAccumuloRowIdColumnMapping; +import org.apache.hadoop.hive.ql.lib.DefaultGraphWalker; +import org.apache.hadoop.hive.ql.lib.DefaultRuleDispatcher; +import org.apache.hadoop.hive.ql.lib.Dispatcher; +import org.apache.hadoop.hive.ql.lib.GraphWalker; +import org.apache.hadoop.hive.ql.lib.Node; +import org.apache.hadoop.hive.ql.lib.NodeProcessor; +import org.apache.hadoop.hive.ql.lib.Rule; +import org.apache.hadoop.hive.ql.parse.SemanticException; +import org.apache.hadoop.hive.ql.plan.ExprNodeColumnDesc; +import org.apache.hadoop.hive.ql.plan.ExprNodeConstantDesc; +import org.apache.hadoop.hive.ql.plan.ExprNodeDesc; +import org.apache.hadoop.hive.ql.plan.ExprNodeGenericFuncDesc; +import org.apache.hadoop.hive.ql.udf.UDFLike; +import org.apache.hadoop.hive.ql.udf.UDFToString; +import org.apache.hadoop.hive.ql.udf.generic.GenericUDFBridge; +import org.apache.hadoop.hive.ql.udf.generic.GenericUDFOPAnd; +import org.apache.hadoop.hive.ql.udf.generic.GenericUDFOPEqualOrGreaterThan; +import org.apache.hadoop.hive.ql.udf.generic.GenericUDFOPEqualOrLessThan; +import org.apache.hadoop.hive.ql.udf.generic.GenericUDFOPGreaterThan; +import org.apache.hadoop.hive.ql.udf.generic.GenericUDFOPLessThan; +import org.apache.hadoop.hive.ql.udf.generic.GenericUDFOPOr; +import org.apache.hadoop.hive.ql.udf.generic.GenericUDFOPPlus; +import org.apache.hadoop.hive.serde2.typeinfo.TypeInfoFactory; +import org.junit.Assert; +import org.junit.Before; +import org.junit.Test; + +import com.google.common.collect.Lists; + +/** + * + */ +public class TestAccumuloRangeGenerator { + + private AccumuloPredicateHandler handler; + private HiveAccumuloRowIdColumnMapping rowIdMapping; + + @Before + public void setup() { + handler = AccumuloPredicateHandler.getInstance(); + rowIdMapping = new HiveAccumuloRowIdColumnMapping(AccumuloHiveConstants.ROWID, + ColumnEncoding.STRING, "row", TypeInfoFactory.stringTypeInfo.toString()); + } + + @Test + public void testRangeConjunction() throws Exception { + // rowId >= 'f' + ExprNodeDesc column = new ExprNodeColumnDesc(TypeInfoFactory.stringTypeInfo, "rid", null, false); + ExprNodeDesc constant = new ExprNodeConstantDesc(TypeInfoFactory.stringTypeInfo, "f"); + List children = Lists.newArrayList(); + children.add(column); + children.add(constant); + ExprNodeDesc node = new ExprNodeGenericFuncDesc(TypeInfoFactory.stringTypeInfo, + new GenericUDFOPEqualOrGreaterThan(), children); + assertNotNull(node); + + // rowId <= 'm' + ExprNodeDesc column2 = new ExprNodeColumnDesc(TypeInfoFactory.stringTypeInfo, "rid", null, + false); + ExprNodeDesc constant2 = new ExprNodeConstantDesc(TypeInfoFactory.stringTypeInfo, "m"); + List children2 = Lists.newArrayList(); + children2.add(column2); + children2.add(constant2); + ExprNodeDesc node2 = new ExprNodeGenericFuncDesc(TypeInfoFactory.stringTypeInfo, + new GenericUDFOPEqualOrLessThan(), children2); + assertNotNull(node2); + + // And UDF + List bothFilters = Lists.newArrayList(); + bothFilters.add(node); + bothFilters.add(node2); + ExprNodeGenericFuncDesc both = new ExprNodeGenericFuncDesc(TypeInfoFactory.stringTypeInfo, + new GenericUDFOPAnd(), bothFilters); + + // Should generate [f,m] + List expectedRanges = Arrays + .asList(new Range(new Key("f"), true, new Key("m\0"), false)); + + AccumuloRangeGenerator rangeGenerator = new AccumuloRangeGenerator(handler, rowIdMapping, "rid"); + Dispatcher disp = new DefaultRuleDispatcher(rangeGenerator, + Collections. emptyMap(), null); + GraphWalker ogw = new DefaultGraphWalker(disp); + ArrayList topNodes = new ArrayList(); + topNodes.add(both); + HashMap nodeOutput = new HashMap(); + + try { + ogw.startWalking(topNodes, nodeOutput); + } catch (SemanticException ex) { + throw new RuntimeException(ex); + } + + Object result = nodeOutput.get(both); + Assert.assertNotNull(result); + Assert.assertTrue("Result from graph walk was not a List", result instanceof List); + @SuppressWarnings("unchecked") + List actualRanges = (List) result; + Assert.assertEquals(expectedRanges, actualRanges); + } + + @Test + public void testRangeDisjunction() throws Exception { + // rowId >= 'f' + ExprNodeDesc column = new ExprNodeColumnDesc(TypeInfoFactory.stringTypeInfo, "rid", null, false); + ExprNodeDesc constant = new ExprNodeConstantDesc(TypeInfoFactory.stringTypeInfo, "f"); + List children = Lists.newArrayList(); + children.add(column); + children.add(constant); + ExprNodeDesc node = new ExprNodeGenericFuncDesc(TypeInfoFactory.stringTypeInfo, + new GenericUDFOPEqualOrGreaterThan(), children); + assertNotNull(node); + + // rowId <= 'm' + ExprNodeDesc column2 = new ExprNodeColumnDesc(TypeInfoFactory.stringTypeInfo, "rid", null, + false); + ExprNodeDesc constant2 = new ExprNodeConstantDesc(TypeInfoFactory.stringTypeInfo, "m"); + List children2 = Lists.newArrayList(); + children2.add(column2); + children2.add(constant2); + ExprNodeDesc node2 = new ExprNodeGenericFuncDesc(TypeInfoFactory.stringTypeInfo, + new GenericUDFOPEqualOrLessThan(), children2); + assertNotNull(node2); + + // Or UDF + List bothFilters = Lists.newArrayList(); + bothFilters.add(node); + bothFilters.add(node2); + ExprNodeGenericFuncDesc both = new ExprNodeGenericFuncDesc(TypeInfoFactory.stringTypeInfo, + new GenericUDFOPOr(), bothFilters); + + // Should generate (-inf,+inf) + List expectedRanges = Arrays.asList(new Range()); + + AccumuloRangeGenerator rangeGenerator = new AccumuloRangeGenerator(handler, rowIdMapping, "rid"); + Dispatcher disp = new DefaultRuleDispatcher(rangeGenerator, + Collections. emptyMap(), null); + GraphWalker ogw = new DefaultGraphWalker(disp); + ArrayList topNodes = new ArrayList(); + topNodes.add(both); + HashMap nodeOutput = new HashMap(); + + try { + ogw.startWalking(topNodes, nodeOutput); + } catch (SemanticException ex) { + throw new RuntimeException(ex); + } + + Object result = nodeOutput.get(both); + Assert.assertNotNull(result); + Assert.assertTrue("Result from graph walk was not a List", result instanceof List); + @SuppressWarnings("unchecked") + List actualRanges = (List) result; + Assert.assertEquals(expectedRanges, actualRanges); + } + + @Test + public void testRangeConjunctionWithDisjunction() throws Exception { + // rowId >= 'h' + ExprNodeDesc column = new ExprNodeColumnDesc(TypeInfoFactory.stringTypeInfo, "rid", null, false); + ExprNodeDesc constant = new ExprNodeConstantDesc(TypeInfoFactory.stringTypeInfo, "h"); + List children = Lists.newArrayList(); + children.add(column); + children.add(constant); + ExprNodeDesc node = new ExprNodeGenericFuncDesc(TypeInfoFactory.stringTypeInfo, + new GenericUDFOPEqualOrGreaterThan(), children); + assertNotNull(node); + + // rowId <= 'd' + ExprNodeDesc column2 = new ExprNodeColumnDesc(TypeInfoFactory.stringTypeInfo, "rid", null, + false); + ExprNodeDesc constant2 = new ExprNodeConstantDesc(TypeInfoFactory.stringTypeInfo, "d"); + List children2 = Lists.newArrayList(); + children2.add(column2); + children2.add(constant2); + ExprNodeDesc node2 = new ExprNodeGenericFuncDesc(TypeInfoFactory.stringTypeInfo, + new GenericUDFOPEqualOrLessThan(), children2); + assertNotNull(node2); + + // rowId >= 'q' + ExprNodeDesc column3 = new ExprNodeColumnDesc(TypeInfoFactory.stringTypeInfo, "rid", null, + false); + ExprNodeDesc constant3 = new ExprNodeConstantDesc(TypeInfoFactory.stringTypeInfo, "q"); + List children3 = Lists.newArrayList(); + children3.add(column3); + children3.add(constant3); + ExprNodeDesc node3 = new ExprNodeGenericFuncDesc(TypeInfoFactory.stringTypeInfo, + new GenericUDFOPEqualOrGreaterThan(), children3); + assertNotNull(node3); + + // Or UDF, (rowId <= 'd' or rowId >= 'q') + List orFilters = Lists.newArrayList(); + orFilters.add(node2); + orFilters.add(node3); + ExprNodeGenericFuncDesc orNode = new ExprNodeGenericFuncDesc(TypeInfoFactory.stringTypeInfo, + new GenericUDFOPOr(), orFilters); + + // And UDF, (rowId >= 'h' and (rowId <= 'd' or rowId >= 'q')) + List andFilters = Lists.newArrayList(); + andFilters.add(node); + andFilters.add(orNode); + ExprNodeGenericFuncDesc both = new ExprNodeGenericFuncDesc(TypeInfoFactory.stringTypeInfo, + new GenericUDFOPAnd(), andFilters); + + // Should generate ['q', +inf) + List expectedRanges = Arrays.asList(new Range(new Key("q"), true, null, false)); + + AccumuloRangeGenerator rangeGenerator = new AccumuloRangeGenerator(handler, rowIdMapping, "rid"); + Dispatcher disp = new DefaultRuleDispatcher(rangeGenerator, + Collections. emptyMap(), null); + GraphWalker ogw = new DefaultGraphWalker(disp); + ArrayList topNodes = new ArrayList(); + topNodes.add(both); + HashMap nodeOutput = new HashMap(); + + try { + ogw.startWalking(topNodes, nodeOutput); + } catch (SemanticException ex) { + throw new RuntimeException(ex); + } + + Object result = nodeOutput.get(both); + Assert.assertNotNull(result); + Assert.assertTrue("Result from graph walk was not a List", result instanceof List); + @SuppressWarnings("unchecked") + List actualRanges = (List) result; + Assert.assertEquals(expectedRanges, actualRanges); + } + + @Test + public void testPartialRangeConjunction() throws Exception { + // rowId >= 'f' + ExprNodeDesc column = new ExprNodeColumnDesc(TypeInfoFactory.stringTypeInfo, "rid", null, false); + ExprNodeDesc constant = new ExprNodeConstantDesc(TypeInfoFactory.stringTypeInfo, "f"); + List children = Lists.newArrayList(); + children.add(column); + children.add(constant); + ExprNodeDesc node = new ExprNodeGenericFuncDesc(TypeInfoFactory.stringTypeInfo, + new GenericUDFOPEqualOrGreaterThan(), children); + assertNotNull(node); + + // anythingElse <= 'foo' + ExprNodeDesc column2 = new ExprNodeColumnDesc(TypeInfoFactory.stringTypeInfo, "anythingElse", + null, false); + ExprNodeDesc constant2 = new ExprNodeConstantDesc(TypeInfoFactory.stringTypeInfo, "foo"); + List children2 = Lists.newArrayList(); + children2.add(column2); + children2.add(constant2); + ExprNodeDesc node2 = new ExprNodeGenericFuncDesc(TypeInfoFactory.stringTypeInfo, + new GenericUDFOPEqualOrLessThan(), children2); + assertNotNull(node2); + + // And UDF + List bothFilters = Lists.newArrayList(); + bothFilters.add(node); + bothFilters.add(node2); + ExprNodeGenericFuncDesc both = new ExprNodeGenericFuncDesc(TypeInfoFactory.stringTypeInfo, + new GenericUDFOPAnd(), bothFilters); + + // Should generate [f,+inf) + List expectedRanges = Arrays.asList(new Range(new Key("f"), true, null, false)); + + AccumuloRangeGenerator rangeGenerator = new AccumuloRangeGenerator(handler, rowIdMapping, "rid"); + Dispatcher disp = new DefaultRuleDispatcher(rangeGenerator, + Collections. emptyMap(), null); + GraphWalker ogw = new DefaultGraphWalker(disp); + ArrayList topNodes = new ArrayList(); + topNodes.add(both); + HashMap nodeOutput = new HashMap(); + + try { + ogw.startWalking(topNodes, nodeOutput); + } catch (SemanticException ex) { + throw new RuntimeException(ex); + } + + Object result = nodeOutput.get(both); + Assert.assertNotNull(result); + Assert.assertTrue("Result from graph walk was not a List", result instanceof List); + @SuppressWarnings("unchecked") + List actualRanges = (List) result; + Assert.assertEquals(expectedRanges, actualRanges); + } + + @Test + public void testDateRangeConjunction() throws Exception { + // rowId >= '2014-01-01' + ExprNodeDesc column = new ExprNodeColumnDesc(TypeInfoFactory.stringTypeInfo, "rid", null, false); + ExprNodeDesc constant = new ExprNodeConstantDesc(TypeInfoFactory.dateTypeInfo, + Date.valueOf("2014-01-01")); + List children = Lists.newArrayList(); + children.add(column); + children.add(constant); + ExprNodeDesc node = new ExprNodeGenericFuncDesc(TypeInfoFactory.stringTypeInfo, + new GenericUDFOPEqualOrGreaterThan(), children); + assertNotNull(node); + + // rowId <= '2014-07-01' + ExprNodeDesc column2 = new ExprNodeColumnDesc(TypeInfoFactory.stringTypeInfo, "rid", null, + false); + ExprNodeDesc constant2 = new ExprNodeConstantDesc(TypeInfoFactory.dateTypeInfo, + Date.valueOf("2014-07-01")); + List children2 = Lists.newArrayList(); + children2.add(column2); + children2.add(constant2); + ExprNodeDesc node2 = new ExprNodeGenericFuncDesc(TypeInfoFactory.stringTypeInfo, + new GenericUDFOPLessThan(), children2); + assertNotNull(node2); + + // And UDF + List bothFilters = Lists.newArrayList(); + bothFilters.add(node); + bothFilters.add(node2); + ExprNodeGenericFuncDesc both = new ExprNodeGenericFuncDesc(TypeInfoFactory.stringTypeInfo, + new GenericUDFOPAnd(), bothFilters); + + // Should generate [2014-01-01, 2014-07-01) + List expectedRanges = Arrays.asList(new Range(new Key("2014-01-01"), true, new Key( + "2014-07-01"), false)); + + AccumuloRangeGenerator rangeGenerator = new AccumuloRangeGenerator(handler, rowIdMapping, "rid"); + Dispatcher disp = new DefaultRuleDispatcher(rangeGenerator, + Collections. emptyMap(), null); + GraphWalker ogw = new DefaultGraphWalker(disp); + ArrayList topNodes = new ArrayList(); + topNodes.add(both); + HashMap nodeOutput = new HashMap(); + + try { + ogw.startWalking(topNodes, nodeOutput); + } catch (SemanticException ex) { + throw new RuntimeException(ex); + } + + Object result = nodeOutput.get(both); + Assert.assertNotNull(result); + Assert.assertTrue("Result from graph walk was not a List", result instanceof List); + @SuppressWarnings("unchecked") + List actualRanges = (List) result; + Assert.assertEquals(expectedRanges, actualRanges); + } + + @Test + public void testCastExpression() throws Exception { + // 40 and 50 + ExprNodeDesc fourty = new ExprNodeConstantDesc(TypeInfoFactory.intTypeInfo, + 40), fifty = new ExprNodeConstantDesc(TypeInfoFactory.intTypeInfo, 50); + + // + + GenericUDFOPPlus plus = new GenericUDFOPPlus(); + + // 40 + 50 + ExprNodeGenericFuncDesc addition = new ExprNodeGenericFuncDesc(TypeInfoFactory.intTypeInfo, plus, Arrays.asList(fourty, fifty)); + + // cast(.... as string) + UDFToString stringCast = new UDFToString(); + GenericUDFBridge stringCastBridge = new GenericUDFBridge("cast", false, stringCast.getClass().getName()); + + // cast (40 + 50 as string) + ExprNodeGenericFuncDesc cast = new ExprNodeGenericFuncDesc(TypeInfoFactory.stringTypeInfo, + stringCastBridge, "cast", Collections. singletonList(addition)); + + ExprNodeDesc key = new ExprNodeColumnDesc(TypeInfoFactory.stringTypeInfo, "key", null, + false); + + ExprNodeGenericFuncDesc node = new ExprNodeGenericFuncDesc(TypeInfoFactory.stringTypeInfo, + new GenericUDFOPEqualOrGreaterThan(), Arrays.asList(key, cast)); + + AccumuloRangeGenerator rangeGenerator = new AccumuloRangeGenerator(handler, rowIdMapping, "key"); + Dispatcher disp = new DefaultRuleDispatcher(rangeGenerator, + Collections. emptyMap(), null); + GraphWalker ogw = new DefaultGraphWalker(disp); + ArrayList topNodes = new ArrayList(); + topNodes.add(node); + HashMap nodeOutput = new HashMap(); + + try { + ogw.startWalking(topNodes, nodeOutput); + } catch (SemanticException ex) { + throw new RuntimeException(ex); + } + + // Don't fail -- would be better to actually compute a range of [90,+inf) + Object result = nodeOutput.get(node); + Assert.assertNull(result); + } + + @Test + public void testRangeOverNonRowIdField() throws Exception { + // foo >= 'f' + ExprNodeDesc column = new ExprNodeColumnDesc(TypeInfoFactory.stringTypeInfo, "foo", null, false); + ExprNodeDesc constant = new ExprNodeConstantDesc(TypeInfoFactory.stringTypeInfo, "f"); + List children = Lists.newArrayList(); + children.add(column); + children.add(constant); + ExprNodeDesc node = new ExprNodeGenericFuncDesc(TypeInfoFactory.stringTypeInfo, + new GenericUDFOPEqualOrGreaterThan(), children); + assertNotNull(node); + + // foo <= 'm' + ExprNodeDesc column2 = new ExprNodeColumnDesc(TypeInfoFactory.stringTypeInfo, "foo", null, + false); + ExprNodeDesc constant2 = new ExprNodeConstantDesc(TypeInfoFactory.stringTypeInfo, "m"); + List children2 = Lists.newArrayList(); + children2.add(column2); + children2.add(constant2); + ExprNodeDesc node2 = new ExprNodeGenericFuncDesc(TypeInfoFactory.stringTypeInfo, + new GenericUDFOPEqualOrLessThan(), children2); + assertNotNull(node2); + + // And UDF + List bothFilters = Lists.newArrayList(); + bothFilters.add(node); + bothFilters.add(node2); + ExprNodeGenericFuncDesc both = new ExprNodeGenericFuncDesc(TypeInfoFactory.stringTypeInfo, + new GenericUDFOPAnd(), bothFilters); + + AccumuloRangeGenerator rangeGenerator = new AccumuloRangeGenerator(handler, rowIdMapping, "rid"); + Dispatcher disp = new DefaultRuleDispatcher(rangeGenerator, + Collections. emptyMap(), null); + GraphWalker ogw = new DefaultGraphWalker(disp); + ArrayList topNodes = new ArrayList(); + topNodes.add(both); + HashMap nodeOutput = new HashMap(); + + try { + ogw.startWalking(topNodes, nodeOutput); + } catch (SemanticException ex) { + throw new RuntimeException(ex); + } + + // Filters are not over the rowid, therefore scan everything + Object result = nodeOutput.get(both); + Assert.assertNull(result); + } +} diff --git a/accumulo-handler/src/test/org/apache/hadoop/hive/accumulo/predicate/TestPrimitiveComparisonFilter.java b/accumulo-handler/src/test/org/apache/hadoop/hive/accumulo/predicate/TestPrimitiveComparisonFilter.java new file mode 100644 index 0000000..95b6ba4 --- /dev/null +++ b/accumulo-handler/src/test/org/apache/hadoop/hive/accumulo/predicate/TestPrimitiveComparisonFilter.java @@ -0,0 +1,76 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hive.accumulo.predicate; + +import java.io.ByteArrayInputStream; +import java.io.ByteArrayOutputStream; +import java.io.DataInputStream; +import java.io.DataOutputStream; +import java.io.IOException; +import java.util.HashMap; +import java.util.Map; + +import org.apache.commons.codec.binary.Base64; +import org.apache.hadoop.io.IntWritable; +import org.junit.Assert; +import org.junit.Test; + +/** + * + */ +public class TestPrimitiveComparisonFilter { + + @Test + public void testBase64ConstantEncode() { + PrimitiveComparisonFilter filter = new PrimitiveComparisonFilter(); + Map options = new HashMap(); + + for (int i = 0; i < 500; i++) { + String constant = Integer.toString(i); + options.put(PrimitiveComparisonFilter.CONST_VAL, new String(Base64.encodeBase64(constant.getBytes()))); + + Assert.assertEquals(constant, new String(filter.getConstant(options))); + } + } + + @Test + public void testNumericBase64ConstantEncode() throws IOException { + PrimitiveComparisonFilter filter = new PrimitiveComparisonFilter(); + Map options = new HashMap(); + IntWritable writable = new IntWritable(); + + ByteArrayOutputStream baos = new ByteArrayOutputStream(); + DataOutputStream out = new DataOutputStream(baos); + + for (int i = 0; i < 500; i++) { + writable.set(i); + writable.write(out); + + options.put(PrimitiveComparisonFilter.CONST_VAL, new String(Base64.encodeBase64(baos.toByteArray()))); + + byte[] bytes = filter.getConstant(options); + + ByteArrayInputStream bais = new ByteArrayInputStream(bytes); + DataInputStream in = new DataInputStream(bais); + writable.readFields(in); + + Assert.assertEquals(i, writable.get()); + + baos.reset(); + } + } +} diff --git a/accumulo-handler/src/test/org/apache/hadoop/hive/accumulo/predicate/compare/TestDoubleCompare.java b/accumulo-handler/src/test/org/apache/hadoop/hive/accumulo/predicate/compare/TestDoubleCompare.java new file mode 100644 index 0000000..a6049c8 --- /dev/null +++ b/accumulo-handler/src/test/org/apache/hadoop/hive/accumulo/predicate/compare/TestDoubleCompare.java @@ -0,0 +1,137 @@ +package org.apache.hadoop.hive.accumulo.predicate.compare; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertTrue; +import static org.junit.Assert.fail; + +import java.math.BigDecimal; +import java.nio.ByteBuffer; + +import org.junit.Before; +import org.junit.Test; + +public class TestDoubleCompare { + + private DoubleCompare doubleCompare; + + @Before + public void setup() { + doubleCompare = new DoubleCompare(); + byte[] db = new byte[8]; + ByteBuffer.wrap(db).putDouble(10.5d); + doubleCompare.init(db); + } + + public byte[] getBytes(double val) { + byte[] dBytes = new byte[8]; + ByteBuffer.wrap(dBytes).putDouble(val); + BigDecimal bd = doubleCompare.serialize(dBytes); + assertEquals(bd.doubleValue(), val, 0); + return dBytes; + } + + @Test + public void equal() { + Equal equalObj = new Equal(doubleCompare); + byte[] val = getBytes(10.5d); + assertTrue(equalObj.accept(val)); + } + + @Test + public void notEqual() { + NotEqual notEqualObj = new NotEqual(doubleCompare); + byte[] val = getBytes(11.0d); + assertTrue(notEqualObj.accept(val)); + + val = getBytes(10.5d); + assertFalse(notEqualObj.accept(val)); + + } + + @Test + public void greaterThan() { + GreaterThan greaterThanObj = new GreaterThan(doubleCompare); + byte[] val = getBytes(11.0d); + + assertTrue(greaterThanObj.accept(val)); + + val = getBytes(4.5d); + assertFalse(greaterThanObj.accept(val)); + + val = getBytes(10.5d); + assertFalse(greaterThanObj.accept(val)); + } + + @Test + public void greaterThanOrEqual() { + GreaterThanOrEqual greaterThanOrEqualObj = new GreaterThanOrEqual(doubleCompare); + + byte[] val = getBytes(11.0d); + + assertTrue(greaterThanOrEqualObj.accept(val)); + + val = getBytes(4.0d); + assertFalse(greaterThanOrEqualObj.accept(val)); + + val = getBytes(10.5d); + assertTrue(greaterThanOrEqualObj.accept(val)); + } + + @Test + public void lessThan() { + + LessThan lessThanObj = new LessThan(doubleCompare); + + byte[] val = getBytes(11.0d); + + assertFalse(lessThanObj.accept(val)); + + val = getBytes(4.0d); + assertTrue(lessThanObj.accept(val)); + + val = getBytes(10.5d); + assertFalse(lessThanObj.accept(val)); + + } + + @Test + public void lessThanOrEqual() { + + LessThanOrEqual lessThanOrEqualObj = new LessThanOrEqual(doubleCompare); + + byte[] val = getBytes(11.0d); + + assertFalse(lessThanOrEqualObj.accept(val)); + + val = getBytes(4.0d); + assertTrue(lessThanOrEqualObj.accept(val)); + + val = getBytes(10.5d); + assertTrue(lessThanOrEqualObj.accept(val)); + } + + @Test + public void like() { + try { + Like likeObj = new Like(doubleCompare); + assertTrue(likeObj.accept(new byte[] {})); + fail("should not accept"); + } catch (UnsupportedOperationException e) { + assertTrue(e.getMessage().contains( + "Like not supported for " + doubleCompare.getClass().getName())); + } + } + + @Test + public void invalidSerialization() { + try { + byte[] badVal = new byte[4]; + ByteBuffer.wrap(badVal).putInt(1); + doubleCompare.serialize(badVal); + fail("Should fail"); + } catch (RuntimeException e) { + assertTrue(e.getMessage().contains(" occurred trying to build double value")); + } + } +} diff --git a/accumulo-handler/src/test/org/apache/hadoop/hive/accumulo/predicate/compare/TestIntCompare.java b/accumulo-handler/src/test/org/apache/hadoop/hive/accumulo/predicate/compare/TestIntCompare.java new file mode 100644 index 0000000..9847a18 --- /dev/null +++ b/accumulo-handler/src/test/org/apache/hadoop/hive/accumulo/predicate/compare/TestIntCompare.java @@ -0,0 +1,123 @@ +package org.apache.hadoop.hive.accumulo.predicate.compare; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertTrue; +import static org.junit.Assert.fail; + +import java.nio.ByteBuffer; + +import org.junit.Before; +import org.junit.Test; + +public class TestIntCompare { + private IntCompare intCompare; + + @Before + public void setup() { + byte[] ibytes = new byte[4]; + ByteBuffer.wrap(ibytes).putInt(10); + intCompare = new IntCompare(); + intCompare.init(ibytes); + } + + public byte[] getBytes(int val) { + byte[] intBytes = new byte[4]; + ByteBuffer.wrap(intBytes).putInt(val); + int serializedVal = intCompare.serialize(intBytes); + assertEquals(serializedVal, val); + return intBytes; + } + + @Test + public void equal() { + Equal equalObj = new Equal(intCompare); + byte[] val = getBytes(10); + assertTrue(equalObj.accept(val)); + } + + @Test + public void notEqual() { + NotEqual notEqualObj = new NotEqual(intCompare); + byte[] val = getBytes(11); + assertTrue(notEqualObj.accept(val)); + + val = getBytes(10); + assertFalse(notEqualObj.accept(val)); + + } + + @Test + public void greaterThan() { + GreaterThan greaterThanObj = new GreaterThan(intCompare); + byte[] val = getBytes(11); + + assertTrue(greaterThanObj.accept(val)); + + val = getBytes(4); + assertFalse(greaterThanObj.accept(val)); + + val = getBytes(10); + assertFalse(greaterThanObj.accept(val)); + } + + @Test + public void greaterThanOrEqual() { + GreaterThanOrEqual greaterThanOrEqualObj = new GreaterThanOrEqual(intCompare); + + byte[] val = getBytes(11); + + assertTrue(greaterThanOrEqualObj.accept(val)); + + val = getBytes(4); + assertFalse(greaterThanOrEqualObj.accept(val)); + + val = getBytes(10); + assertTrue(greaterThanOrEqualObj.accept(val)); + } + + @Test + public void lessThan() { + + LessThan lessThanObj = new LessThan(intCompare); + + byte[] val = getBytes(11); + + assertFalse(lessThanObj.accept(val)); + + val = getBytes(4); + assertTrue(lessThanObj.accept(val)); + + val = getBytes(10); + assertFalse(lessThanObj.accept(val)); + + } + + @Test + public void lessThanOrEqual() { + + LessThanOrEqual lessThanOrEqualObj = new LessThanOrEqual(intCompare); + + byte[] val = getBytes(11); + + assertFalse(lessThanOrEqualObj.accept(val)); + + val = getBytes(4); + assertTrue(lessThanOrEqualObj.accept(val)); + + val = getBytes(10); + assertTrue(lessThanOrEqualObj.accept(val)); + } + + @Test + public void like() { + try { + Like likeObj = new Like(intCompare); + assertTrue(likeObj.accept(new byte[] {})); + fail("should not accept"); + } catch (UnsupportedOperationException e) { + assertTrue(e.getMessage().contains( + "Like not supported for " + intCompare.getClass().getName())); + } + } +} diff --git a/accumulo-handler/src/test/org/apache/hadoop/hive/accumulo/predicate/compare/TestLongComparison.java b/accumulo-handler/src/test/org/apache/hadoop/hive/accumulo/predicate/compare/TestLongComparison.java new file mode 100644 index 0000000..2abd41b --- /dev/null +++ b/accumulo-handler/src/test/org/apache/hadoop/hive/accumulo/predicate/compare/TestLongComparison.java @@ -0,0 +1,136 @@ +package org.apache.hadoop.hive.accumulo.predicate.compare; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertTrue; +import static org.junit.Assert.fail; + +import java.nio.ByteBuffer; + +import org.junit.Before; +import org.junit.Test; + +public class TestLongComparison { + + private LongCompare longComp; + + @Before + public void setup() { + byte[] lBytes = new byte[8]; + ByteBuffer.wrap(lBytes).putLong(10l); + longComp = new LongCompare(); + longComp.init(lBytes); + } + + public byte[] getBytes(long val) { + byte[] lonBytes = new byte[8]; + ByteBuffer.wrap(lonBytes).putLong(val); + long lon = longComp.serialize(lonBytes); + assertEquals(lon, val); + return lonBytes; + } + + @Test + public void equal() { + Equal equalObj = new Equal(longComp); + byte[] val = getBytes(10l); + assertTrue(equalObj.accept(val)); + } + + @Test + public void notEqual() { + NotEqual notEqualObj = new NotEqual(longComp); + byte[] val = getBytes(11l); + assertTrue(notEqualObj.accept(val)); + + val = getBytes(10l); + assertFalse(notEqualObj.accept(val)); + + } + + @Test + public void greaterThan() { + GreaterThan greaterThanObj = new GreaterThan(longComp); + byte[] val = getBytes(11l); + + assertTrue(greaterThanObj.accept(val)); + + val = getBytes(4l); + assertFalse(greaterThanObj.accept(val)); + + val = getBytes(10l); + assertFalse(greaterThanObj.accept(val)); + } + + @Test + public void greaterThanOrEqual() { + GreaterThanOrEqual greaterThanOrEqualObj = new GreaterThanOrEqual(longComp); + + byte[] val = getBytes(11l); + + assertTrue(greaterThanOrEqualObj.accept(val)); + + val = getBytes(4l); + assertFalse(greaterThanOrEqualObj.accept(val)); + + val = getBytes(10l); + assertTrue(greaterThanOrEqualObj.accept(val)); + } + + @Test + public void lessThan() { + + LessThan lessThanObj = new LessThan(longComp); + + byte[] val = getBytes(11l); + + assertFalse(lessThanObj.accept(val)); + + val = getBytes(4l); + assertTrue(lessThanObj.accept(val)); + + val = getBytes(10l); + assertFalse(lessThanObj.accept(val)); + + } + + @Test + public void lessThanOrEqual() { + + LessThanOrEqual lessThanOrEqualObj = new LessThanOrEqual(longComp); + + byte[] val = getBytes(11l); + + assertFalse(lessThanOrEqualObj.accept(val)); + + val = getBytes(4l); + assertTrue(lessThanOrEqualObj.accept(val)); + + val = getBytes(10l); + assertTrue(lessThanOrEqualObj.accept(val)); + } + + @Test + public void like() { + try { + Like likeObj = new Like(longComp); + assertTrue(likeObj.accept(new byte[] {})); + fail("should not accept"); + } catch (UnsupportedOperationException e) { + assertTrue(e.getMessage().contains("Like not supported for " + longComp.getClass().getName())); + } + } + + @Test + public void invalidSerialization() { + try { + byte[] badVal = new byte[4]; + ByteBuffer.wrap(badVal).putInt(1); + longComp.serialize(badVal); + fail("Should fail"); + } catch (RuntimeException e) { + assertTrue(e.getMessage().contains(" occurred trying to build long value")); + } + } + +} diff --git a/accumulo-handler/src/test/org/apache/hadoop/hive/accumulo/predicate/compare/TestStringCompare.java b/accumulo-handler/src/test/org/apache/hadoop/hive/accumulo/predicate/compare/TestStringCompare.java new file mode 100644 index 0000000..08716bc --- /dev/null +++ b/accumulo-handler/src/test/org/apache/hadoop/hive/accumulo/predicate/compare/TestStringCompare.java @@ -0,0 +1,122 @@ +package org.apache.hadoop.hive.accumulo.predicate.compare; + +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertTrue; + +import org.junit.Before; +import org.junit.Test; + +public class TestStringCompare { + + private StringCompare strCompare; + + @Before + public void setup() { + strCompare = new StringCompare(); + strCompare.init("aaa".getBytes()); + } + + @Test + public void equal() { + Equal equalObj = new Equal(strCompare); + byte[] val = "aaa".getBytes(); + assertTrue(equalObj.accept(val)); + } + + @Test + public void notEqual() { + NotEqual notEqualObj = new NotEqual(strCompare); + byte[] val = "aab".getBytes(); + assertTrue(notEqualObj.accept(val)); + + val = "aaa".getBytes(); + assertFalse(notEqualObj.accept(val)); + + } + + @Test + public void greaterThan() { + GreaterThan greaterThanObj = new GreaterThan(strCompare); + byte[] val = "aab".getBytes(); + + assertTrue(greaterThanObj.accept(val)); + + val = "aa".getBytes(); + assertFalse(greaterThanObj.accept(val)); + + val = "aaa".getBytes(); + assertFalse(greaterThanObj.accept(val)); + } + + @Test + public void greaterThanOrEqual() { + GreaterThanOrEqual greaterThanOrEqualObj = new GreaterThanOrEqual(strCompare); + byte[] val = "aab".getBytes(); + + assertTrue(greaterThanOrEqualObj.accept(val)); + + val = "aa".getBytes(); + assertFalse(greaterThanOrEqualObj.accept(val)); + + val = "aaa".getBytes(); + assertTrue(greaterThanOrEqualObj.accept(val)); + } + + @Test + public void lessThan() { + + LessThan lessThanObj = new LessThan(strCompare); + + byte[] val = "aab".getBytes(); + + assertFalse(lessThanObj.accept(val)); + + val = "aa".getBytes(); + assertTrue(lessThanObj.accept(val)); + + val = "aaa".getBytes(); + assertFalse(lessThanObj.accept(val)); + + } + + @Test + public void lessThanOrEqual() { + + LessThanOrEqual lessThanOrEqualObj = new LessThanOrEqual(strCompare); + + byte[] val = "aab".getBytes(); + + assertFalse(lessThanOrEqualObj.accept(val)); + + val = "aa".getBytes(); + assertTrue(lessThanOrEqualObj.accept(val)); + + val = "aaa".getBytes(); + assertTrue(lessThanOrEqualObj.accept(val)); + } + + @Test + public void like() { + Like likeObj = new Like(strCompare); + String condition = "%a"; + assertTrue(likeObj.accept(condition.getBytes())); + + condition = "%a%"; + assertTrue(likeObj.accept(condition.getBytes())); + + condition = "a%"; + assertTrue(likeObj.accept(condition.getBytes())); + + condition = "a%aa"; + assertFalse(likeObj.accept(condition.getBytes())); + + condition = "b%"; + assertFalse(likeObj.accept(condition.getBytes())); + + condition = "%ab%"; + assertFalse(likeObj.accept(condition.getBytes())); + + condition = "%ba"; + assertFalse(likeObj.accept(condition.getBytes())); + } +} diff --git a/accumulo-handler/src/test/org/apache/hadoop/hive/accumulo/serde/DelimitedAccumuloRowIdFactory.java b/accumulo-handler/src/test/org/apache/hadoop/hive/accumulo/serde/DelimitedAccumuloRowIdFactory.java new file mode 100644 index 0000000..4bb5419 --- /dev/null +++ b/accumulo-handler/src/test/org/apache/hadoop/hive/accumulo/serde/DelimitedAccumuloRowIdFactory.java @@ -0,0 +1,104 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hive.accumulo.serde; + +import java.io.IOException; +import java.util.List; +import java.util.Properties; + +import org.apache.hadoop.hive.accumulo.columns.ColumnEncoding; +import org.apache.hadoop.hive.serde2.ByteStream; +import org.apache.hadoop.hive.serde2.SerDeException; +import org.apache.hadoop.hive.serde2.lazy.LazyFactory; +import org.apache.hadoop.hive.serde2.lazy.LazyObjectBase; +import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspector; +import org.apache.hadoop.hive.serde2.objectinspector.StructField; +import org.apache.hadoop.hive.serde2.objectinspector.StructObjectInspector; +import org.apache.hadoop.hive.serde2.typeinfo.TypeInfo; +import org.apache.log4j.Logger; + +/** + * Example AccumuloRowIdFactory which accepts a delimiter that is used to separate the components of + * some struct to place in the rowId. + */ +public class DelimitedAccumuloRowIdFactory extends DefaultAccumuloRowIdFactory { + private static final Logger log = Logger.getLogger(DelimitedAccumuloRowIdFactory.class); + public static final String ACCUMULO_COMPOSITE_DELIMITER = "accumulo.composite.delimiter"; + + private byte separator; + + @Override + public void init(AccumuloSerDeParameters accumuloSerDeParams, Properties properties) + throws SerDeException { + super.init(accumuloSerDeParams, properties); + + String delimiter = properties.getProperty(ACCUMULO_COMPOSITE_DELIMITER); + if (null == delimiter || delimiter.isEmpty()) { + throw new SerDeException("Did not find expected delimiter in configuration: " + + ACCUMULO_COMPOSITE_DELIMITER); + } + + if (delimiter.length() != 1) { + log.warn("Configured delimiter is longer than one character, only using first character"); + } + + separator = (byte) delimiter.charAt(0); + + log.info("Initialized DelimitedAccumuloRowIdFactory with separator of '" + separator + "'"); + } + + @Override + public ObjectInspector createRowIdObjectInspector(TypeInfo type) throws SerDeException { + return LazyFactory.createLazyObjectInspector(type, new byte[] {separator}, 0, + serdeParams.getNullSequence(), serdeParams.isEscaped(), serdeParams.getEscapeChar()); + } + + @Override + public LazyObjectBase createRowId(ObjectInspector inspector) throws SerDeException { + LazyObjectBase lazyObj = LazyFactory.createLazyObject(inspector, + ColumnEncoding.BINARY == rowIdMapping.getEncoding()); + log.info("Created " + lazyObj.getClass() + " for rowId with inspector " + inspector.getClass()); + return lazyObj; + } + + @Override + public byte[] serializeRowId(Object object, StructField field, ByteStream.Output output) + throws IOException { + ObjectInspector inspector = field.getFieldObjectInspector(); + if (inspector.getCategory() != ObjectInspector.Category.STRUCT) { + throw new IllegalStateException("invalid type value " + inspector.getTypeName()); + } + + output.reset(); + + StructObjectInspector structOI = (StructObjectInspector) inspector; + List elements = structOI.getStructFieldsDataAsList(object); + List fields = structOI.getAllStructFieldRefs(); + for (int i = 0; i < elements.size(); i++) { + Object o = elements.get(i); + StructField structField = fields.get(i); + + if (output.getLength() > 0) { + output.write(separator); + } + + serializer.writeWithLevel(structField.getFieldObjectInspector(), o, output, rowIdMapping, 1); + } + + return output.toByteArray(); + } +} diff --git a/accumulo-handler/src/test/org/apache/hadoop/hive/accumulo/serde/FirstCharAccumuloCompositeRowId.java b/accumulo-handler/src/test/org/apache/hadoop/hive/accumulo/serde/FirstCharAccumuloCompositeRowId.java new file mode 100644 index 0000000..e047ae5 --- /dev/null +++ b/accumulo-handler/src/test/org/apache/hadoop/hive/accumulo/serde/FirstCharAccumuloCompositeRowId.java @@ -0,0 +1,75 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hive.accumulo.serde; + +import java.util.Arrays; +import java.util.Properties; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hive.serde2.lazy.ByteArrayRef; +import org.apache.hadoop.hive.serde2.lazy.objectinspector.LazySimpleStructObjectInspector; +import org.apache.hadoop.util.StringUtils; +import org.apache.log4j.Logger; + +/** + * Gets the first character of each string in a struct + */ +public class FirstCharAccumuloCompositeRowId extends AccumuloCompositeRowId { + private static final Logger log = Logger.getLogger(FirstCharAccumuloCompositeRowId.class); + + private Properties tbl; + private Configuration conf; + private byte[] bytes; + private int start, length; + private String bytesAsString; + + public FirstCharAccumuloCompositeRowId(LazySimpleStructObjectInspector oi, Properties tbl, + Configuration conf) { + super(oi); + this.tbl = tbl; + this.conf = conf; + } + + @Override + public void init(ByteArrayRef bytes, int start, int length) { + this.bytes = bytes.getData(); + this.start = start; + this.length = length; + } + + @Override + public Object getField(int fieldID) { + if (bytesAsString == null) { + this.bytesAsString = new String(bytes, start, length); + } + + log.info("Data: " + bytesAsString + ", " + Arrays.toString(bytes)); + + // The separator for the hive row would be using \x02, so the separator for this struct would be + // \x02 + 1 = \x03 + char separator = (char) ((int) oi.getSeparator() + 1); + + log.info("Separator: " + String.format("%04x", (int) separator)); + + // Get the character/byte at the offset in the string equal to the fieldID + String[] fieldBytes = StringUtils.split(bytesAsString, separator); + + log.info("Fields: " + Arrays.toString(fieldBytes)); + + return toLazyObject(fieldID, new byte[] {(byte) fieldBytes[fieldID].charAt(0)}); + } +} diff --git a/accumulo-handler/src/test/org/apache/hadoop/hive/accumulo/serde/TestAccumuloRowSerializer.java b/accumulo-handler/src/test/org/apache/hadoop/hive/accumulo/serde/TestAccumuloRowSerializer.java new file mode 100644 index 0000000..f613a58 --- /dev/null +++ b/accumulo-handler/src/test/org/apache/hadoop/hive/accumulo/serde/TestAccumuloRowSerializer.java @@ -0,0 +1,311 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hive.accumulo.serde; + +import java.io.ByteArrayOutputStream; +import java.io.DataOutputStream; +import java.io.IOException; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.List; +import java.util.Properties; + +import org.apache.accumulo.core.data.ColumnUpdate; +import org.apache.accumulo.core.data.Mutation; +import org.apache.accumulo.core.security.ColumnVisibility; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hive.accumulo.columns.ColumnEncoding; +import org.apache.hadoop.hive.accumulo.columns.ColumnMapping; +import org.apache.hadoop.hive.serde.serdeConstants; +import org.apache.hadoop.hive.serde2.ByteStream; +import org.apache.hadoop.hive.serde2.SerDeException; +import org.apache.hadoop.hive.serde2.lazy.ByteArrayRef; +import org.apache.hadoop.hive.serde2.lazy.LazyFactory; +import org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe.SerDeParameters; +import org.apache.hadoop.hive.serde2.lazy.LazyStruct; +import org.apache.hadoop.hive.serde2.lazy.objectinspector.LazyMapObjectInspector; +import org.apache.hadoop.hive.serde2.lazy.objectinspector.LazyObjectInspectorFactory; +import org.apache.hadoop.hive.serde2.lazy.objectinspector.LazySimpleStructObjectInspector; +import org.apache.hadoop.hive.serde2.lazy.objectinspector.primitive.LazyStringObjectInspector; +import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspector; +import org.apache.hadoop.hive.serde2.objectinspector.PrimitiveObjectInspector; +import org.apache.hadoop.hive.serde2.objectinspector.PrimitiveObjectInspector.PrimitiveCategory; +import org.apache.hadoop.hive.serde2.objectinspector.primitive.StringObjectInspector; +import org.apache.hadoop.hive.serde2.typeinfo.TypeInfo; +import org.apache.hadoop.hive.serde2.typeinfo.TypeInfoFactory; +import org.apache.hadoop.io.Text; +import org.junit.Assert; +import org.junit.Test; +import org.mockito.Mockito; + +import com.google.common.base.Joiner; + +/** + * + */ +public class TestAccumuloRowSerializer { + + @Test + public void testBufferResetBeforeUse() throws IOException { + ByteStream.Output output = new ByteStream.Output(); + PrimitiveObjectInspector fieldObjectInspector = Mockito.mock(StringObjectInspector.class); + ColumnMapping mapping = Mockito.mock(ColumnMapping.class); + + // Write some garbage to the buffer that should be erased + output.write("foobar".getBytes()); + + // Stub out the serializer + AccumuloRowSerializer serializer = Mockito.mock(AccumuloRowSerializer.class); + + String object = "hello"; + + Mockito.when( + serializer.getSerializedValue(Mockito.any(ObjectInspector.class), Mockito.any(), + Mockito.any(ByteStream.Output.class), Mockito.any(ColumnMapping.class))) + .thenCallRealMethod(); + + Mockito.when(fieldObjectInspector.getCategory()).thenReturn(ObjectInspector.Category.PRIMITIVE); + Mockito.when(fieldObjectInspector.getPrimitiveCategory()).thenReturn(PrimitiveCategory.STRING); + Mockito.when(fieldObjectInspector.getPrimitiveWritableObject(Mockito.any(Object.class))) + .thenReturn(new Text(object)); + Mockito.when(mapping.getEncoding()).thenReturn(ColumnEncoding.STRING); + + // Invoke the method + serializer.getSerializedValue(fieldObjectInspector, object, output, mapping); + + // Verify the buffer was reset (real output doesn't happen because it was mocked) + Assert.assertEquals(0, output.size()); + } + + @Test + public void testBinarySerialization() throws IOException, SerDeException { + List columns = Arrays.asList("row", "cq1", "cq2", "cq3"); + List types = Arrays. asList(TypeInfoFactory.stringTypeInfo, + TypeInfoFactory.intTypeInfo, TypeInfoFactory.intTypeInfo, TypeInfoFactory.stringTypeInfo); + List typeNames = new ArrayList(types.size()); + for (TypeInfo type : types) { + typeNames.add(type.getTypeName()); + } + + Properties tableProperties = new Properties(); + tableProperties.setProperty(AccumuloSerDeParameters.COLUMN_MAPPINGS, + ":rowid,cf:cq1#b,cf:cq2#b,cf:cq3"); + tableProperties.setProperty(serdeConstants.FIELD_DELIM, " "); + tableProperties.setProperty(serdeConstants.LIST_COLUMNS, Joiner.on(',').join(columns)); + tableProperties.setProperty(serdeConstants.LIST_COLUMN_TYPES, Joiner.on(',').join(typeNames)); + AccumuloSerDeParameters accumuloSerDeParams = new AccumuloSerDeParameters(new Configuration(), + tableProperties, AccumuloSerDe.class.getSimpleName()); + SerDeParameters serDeParams = accumuloSerDeParams.getSerDeParameters(); + + LazySimpleStructObjectInspector oi = (LazySimpleStructObjectInspector) LazyFactory + .createLazyStructInspector(columns, types, serDeParams.getSeparators(), + serDeParams.getNullSequence(), serDeParams.isLastColumnTakesRest(), + serDeParams.isEscaped(), serDeParams.getEscapeChar()); + + AccumuloRowSerializer serializer = new AccumuloRowSerializer(0, serDeParams, + accumuloSerDeParams.getColumnMappings(), new ColumnVisibility(), + accumuloSerDeParams.getRowIdFactory()); + + // Create the LazyStruct from the LazyStruct...Inspector + LazyStruct obj = (LazyStruct) LazyFactory.createLazyObject(oi); + + ByteArrayRef byteRef = new ByteArrayRef(); + byteRef.setData(new byte[] {'r', 'o', 'w', '1', ' ', '1', '0', ' ', '2', '0', ' ', 'v', 'a', + 'l', 'u', 'e'}); + obj.init(byteRef, 0, byteRef.getData().length); + + Mutation m = (Mutation) serializer.serialize(obj, oi); + + Assert.assertArrayEquals("row1".getBytes(), m.getRow()); + + List updates = m.getUpdates(); + Assert.assertEquals(3, updates.size()); + + ByteArrayOutputStream baos = new ByteArrayOutputStream(); + DataOutputStream out = new DataOutputStream(baos); + + ColumnUpdate update = updates.get(0); + Assert.assertEquals("cf", new String(update.getColumnFamily())); + Assert.assertEquals("cq1", new String(update.getColumnQualifier())); + + out.writeInt(10); + Assert.assertArrayEquals(baos.toByteArray(), update.getValue()); + + update = updates.get(1); + Assert.assertEquals("cf", new String(update.getColumnFamily())); + Assert.assertEquals("cq2", new String(update.getColumnQualifier())); + + baos.reset(); + out.writeInt(20); + Assert.assertArrayEquals(baos.toByteArray(), update.getValue()); + + update = updates.get(2); + Assert.assertEquals("cf", new String(update.getColumnFamily())); + Assert.assertEquals("cq3", new String(update.getColumnQualifier())); + + Assert.assertEquals("value", new String(update.getValue())); + } + + @Test + public void testVisibilityLabel() throws IOException, SerDeException { + List columns = Arrays.asList("row", "cq1", "cq2", "cq3"); + List types = Arrays. asList(TypeInfoFactory.stringTypeInfo, + TypeInfoFactory.intTypeInfo, TypeInfoFactory.intTypeInfo, TypeInfoFactory.stringTypeInfo); + List typeNames = new ArrayList(types.size()); + for (TypeInfo type : types) { + typeNames.add(type.getTypeName()); + } + + Properties tableProperties = new Properties(); + tableProperties.setProperty(AccumuloSerDeParameters.COLUMN_MAPPINGS, + ":rowid,cf:cq1#b,cf:cq2#b,cf:cq3"); + tableProperties.setProperty(serdeConstants.FIELD_DELIM, " "); + tableProperties.setProperty(serdeConstants.LIST_COLUMNS, Joiner.on(',').join(columns)); + tableProperties.setProperty(serdeConstants.LIST_COLUMN_TYPES, Joiner.on(',').join(typeNames)); + AccumuloSerDeParameters accumuloSerDeParams = new AccumuloSerDeParameters(new Configuration(), + tableProperties, AccumuloSerDe.class.getSimpleName()); + SerDeParameters serDeParams = accumuloSerDeParams.getSerDeParameters(); + + LazySimpleStructObjectInspector oi = (LazySimpleStructObjectInspector) LazyFactory + .createLazyStructInspector(columns, types, serDeParams.getSeparators(), + serDeParams.getNullSequence(), serDeParams.isLastColumnTakesRest(), + serDeParams.isEscaped(), serDeParams.getEscapeChar()); + + AccumuloRowSerializer serializer = new AccumuloRowSerializer(0, serDeParams, + accumuloSerDeParams.getColumnMappings(), new ColumnVisibility("foo"), + accumuloSerDeParams.getRowIdFactory()); + + // Create the LazyStruct from the LazyStruct...Inspector + LazyStruct obj = (LazyStruct) LazyFactory.createLazyObject(oi); + + ByteArrayRef byteRef = new ByteArrayRef(); + byteRef.setData(new byte[] {'r', 'o', 'w', '1', ' ', '1', '0', ' ', '2', '0', ' ', 'v', 'a', + 'l', 'u', 'e'}); + obj.init(byteRef, 0, byteRef.getData().length); + + Mutation m = (Mutation) serializer.serialize(obj, oi); + + Assert.assertArrayEquals("row1".getBytes(), m.getRow()); + + List updates = m.getUpdates(); + Assert.assertEquals(3, updates.size()); + + ByteArrayOutputStream baos = new ByteArrayOutputStream(); + DataOutputStream out = new DataOutputStream(baos); + + ColumnUpdate update = updates.get(0); + Assert.assertEquals("cf", new String(update.getColumnFamily())); + Assert.assertEquals("cq1", new String(update.getColumnQualifier())); + Assert.assertEquals("foo", new String(update.getColumnVisibility())); + + out.writeInt(10); + Assert.assertArrayEquals(baos.toByteArray(), update.getValue()); + + update = updates.get(1); + Assert.assertEquals("cf", new String(update.getColumnFamily())); + Assert.assertEquals("cq2", new String(update.getColumnQualifier())); + Assert.assertEquals("foo", new String(update.getColumnVisibility())); + + baos.reset(); + out.writeInt(20); + Assert.assertArrayEquals(baos.toByteArray(), update.getValue()); + + update = updates.get(2); + Assert.assertEquals("cf", new String(update.getColumnFamily())); + Assert.assertEquals("cq3", new String(update.getColumnQualifier())); + Assert.assertEquals("foo", new String(update.getColumnVisibility())); + + Assert.assertEquals("value", new String(update.getValue())); + } + + @Test + public void testMapSerialization() throws IOException, SerDeException { + List columns = Arrays.asList("row", "col"); + List types = Arrays. asList(TypeInfoFactory.stringTypeInfo, TypeInfoFactory + .getMapTypeInfo(TypeInfoFactory.stringTypeInfo, TypeInfoFactory.stringTypeInfo)); + List typeNames = new ArrayList(types.size()); + for (TypeInfo type : types) { + typeNames.add(type.getTypeName()); + } + + Properties tableProperties = new Properties(); + tableProperties.setProperty(AccumuloSerDeParameters.COLUMN_MAPPINGS, ":rowid,cf:*"); + tableProperties.setProperty(serdeConstants.FIELD_DELIM, " "); + tableProperties.setProperty(serdeConstants.COLLECTION_DELIM, ","); + tableProperties.setProperty(serdeConstants.MAPKEY_DELIM, ":"); + tableProperties.setProperty(serdeConstants.LIST_COLUMNS, Joiner.on(',').join(columns)); + tableProperties.setProperty(serdeConstants.LIST_COLUMN_TYPES, Joiner.on(',').join(typeNames)); + AccumuloSerDeParameters accumuloSerDeParams = new AccumuloSerDeParameters(new Configuration(), + tableProperties, AccumuloSerDe.class.getSimpleName()); + SerDeParameters serDeParams = accumuloSerDeParams.getSerDeParameters(); + + TypeInfo stringTypeInfo = TypeInfoFactory.getPrimitiveTypeInfo(serdeConstants.STRING_TYPE_NAME); + LazyStringObjectInspector stringOI = (LazyStringObjectInspector) LazyFactory + .createLazyObjectInspector(stringTypeInfo, new byte[] {0}, 0, + serDeParams.getNullSequence(), serDeParams.isEscaped(), serDeParams.getEscapeChar()); + + LazyMapObjectInspector mapOI = LazyObjectInspectorFactory.getLazySimpleMapObjectInspector( + stringOI, stringOI, (byte) ',', (byte) ':', serDeParams.getNullSequence(), + serDeParams.isEscaped(), serDeParams.getEscapeChar()); + + LazySimpleStructObjectInspector structOI = (LazySimpleStructObjectInspector) LazyObjectInspectorFactory + .getLazySimpleStructObjectInspector(columns, Arrays.asList(stringOI, mapOI), (byte) ' ', + serDeParams.getNullSequence(), serDeParams.isLastColumnTakesRest(), + serDeParams.isEscaped(), serDeParams.getEscapeChar()); + + AccumuloRowSerializer serializer = new AccumuloRowSerializer(0, serDeParams, + accumuloSerDeParams.getColumnMappings(), new ColumnVisibility(), + accumuloSerDeParams.getRowIdFactory()); + + // Create the LazyStruct from the LazyStruct...Inspector + LazyStruct obj = (LazyStruct) LazyFactory.createLazyObject(structOI); + + ByteArrayRef byteRef = new ByteArrayRef(); + byteRef.setData("row1 cq1:10,cq2:20,cq3:value".getBytes()); + obj.init(byteRef, 0, byteRef.getData().length); + + Mutation m = (Mutation) serializer.serialize(obj, structOI); + + Assert.assertArrayEquals("row1".getBytes(), m.getRow()); + + List updates = m.getUpdates(); + Assert.assertEquals(3, updates.size()); + + ColumnUpdate update = updates.get(0); + Assert.assertEquals("cf", new String(update.getColumnFamily())); + Assert.assertEquals("cq1", new String(update.getColumnQualifier())); + Assert.assertEquals("10", new String(update.getValue())); + + update = updates.get(1); + Assert.assertEquals("cf", new String(update.getColumnFamily())); + Assert.assertEquals("cq2", new String(update.getColumnQualifier())); + Assert.assertEquals("20", new String(update.getValue())); + + update = updates.get(2); + Assert.assertEquals("cf", new String(update.getColumnFamily())); + Assert.assertEquals("cq3", new String(update.getColumnQualifier())); + Assert.assertEquals("value", new String(update.getValue())); + } + + @Test(expected = IllegalArgumentException.class) + public void testInvalidRowIdOffset() throws SerDeException { + ArrayList mappings = new ArrayList(); + + // Should fail because of the -1 + new AccumuloRowSerializer(-1, null, mappings, new ColumnVisibility(), null); + } +} diff --git a/accumulo-handler/src/test/org/apache/hadoop/hive/accumulo/serde/TestAccumuloSerDe.java b/accumulo-handler/src/test/org/apache/hadoop/hive/accumulo/serde/TestAccumuloSerDe.java new file mode 100644 index 0000000..bf3acd0 --- /dev/null +++ b/accumulo-handler/src/test/org/apache/hadoop/hive/accumulo/serde/TestAccumuloSerDe.java @@ -0,0 +1,467 @@ +package org.apache.hadoop.hive.accumulo.serde; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertNotNull; +import static org.junit.Assert.assertTrue; + +import java.io.IOException; +import java.util.Arrays; +import java.util.HashMap; +import java.util.HashSet; +import java.util.List; +import java.util.Map; +import java.util.Map.Entry; +import java.util.Properties; +import java.util.Set; + +import org.apache.accumulo.core.data.ColumnUpdate; +import org.apache.accumulo.core.data.Mutation; +import org.apache.accumulo.core.security.ColumnVisibility; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hive.accumulo.AccumuloHiveRow; +import org.apache.hadoop.hive.accumulo.LazyAccumuloRow; +import org.apache.hadoop.hive.accumulo.columns.InvalidColumnMappingException; +import org.apache.hadoop.hive.serde.serdeConstants; +import org.apache.hadoop.hive.serde2.SerDeException; +import org.apache.hadoop.hive.serde2.lazy.ByteArrayRef; +import org.apache.hadoop.hive.serde2.lazy.LazyArray; +import org.apache.hadoop.hive.serde2.lazy.LazyFactory; +import org.apache.hadoop.hive.serde2.lazy.LazyMap; +import org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe.SerDeParameters; +import org.apache.hadoop.hive.serde2.lazy.LazyString; +import org.apache.hadoop.hive.serde2.lazy.LazyStruct; +import org.apache.hadoop.hive.serde2.lazy.objectinspector.LazyMapObjectInspector; +import org.apache.hadoop.hive.serde2.lazy.objectinspector.LazyObjectInspectorFactory; +import org.apache.hadoop.hive.serde2.lazy.objectinspector.LazySimpleStructObjectInspector; +import org.apache.hadoop.hive.serde2.lazy.objectinspector.primitive.LazyStringObjectInspector; +import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspector; +import org.apache.hadoop.hive.serde2.typeinfo.TypeInfo; +import org.apache.hadoop.hive.serde2.typeinfo.TypeInfoFactory; +import org.apache.hadoop.io.Text; +import org.apache.log4j.Logger; +import org.junit.Assert; +import org.junit.Before; +import org.junit.Test; + +import com.google.common.base.Joiner; + +public class TestAccumuloSerDe { + @SuppressWarnings("unused") + private static final Logger log = Logger.getLogger(TestAccumuloSerDe.class); + + protected AccumuloSerDe serde; + + @Before + public void setup() { + serde = new AccumuloSerDe(); + } + + @Test(expected = TooManyHiveColumnsException.class) + public void moreHiveColumnsThanAccumuloColumns() throws Exception { + Properties properties = new Properties(); + Configuration conf = new Configuration(); + + properties.setProperty(AccumuloSerDeParameters.COLUMN_MAPPINGS, ":rowID,cf:f3"); + properties.setProperty(serdeConstants.LIST_COLUMNS, "row,field1,field2,field3,field4"); + properties.setProperty(serdeConstants.LIST_COLUMN_TYPES, "string,string,string,string,string"); + + serde.initialize(conf, properties); + serde.deserialize(new Text("fail")); + } + + @Test(expected = TooManyAccumuloColumnsException.class) + public void moreAccumuloColumnsThanHiveColumns() throws Exception { + Properties properties = new Properties(); + Configuration conf = new Configuration(); + + properties.setProperty(AccumuloSerDeParameters.COLUMN_MAPPINGS, ":rowID,cf:f1,cf:f2,cf:f3"); + properties.setProperty(serdeConstants.LIST_COLUMNS, "row,field1,field2"); + properties.setProperty(serdeConstants.LIST_COLUMN_TYPES, "string,string,string"); + + serde.initialize(conf, properties); + serde.deserialize(new Text("fail")); + } + + @Test(expected = NullPointerException.class) + public void emptyConfiguration() throws SerDeException { + Properties properties = new Properties(); + Configuration conf = new Configuration(); + serde.initialize(conf, properties); + } + + @Test + public void simpleColumnMapping() throws SerDeException { + Properties properties = new Properties(); + Configuration conf = new Configuration(); + + properties.setProperty(AccumuloSerDeParameters.COLUMN_MAPPINGS, ":rowID,cf:f1,cf:f2,cf:f3"); + properties.setProperty(serdeConstants.LIST_COLUMNS, "row,field1,field2,field3"); + + serde.initialize(conf, properties); + assertNotNull(serde.getCachedRow()); + } + + @Test + public void withRowID() throws SerDeException { + Properties properties = new Properties(); + Configuration conf = new Configuration(); + properties.setProperty(AccumuloSerDeParameters.COLUMN_MAPPINGS, "cf:f1,:rowID,cf:f2,cf:f3"); + properties.setProperty(serdeConstants.LIST_COLUMNS, "field1,field2,field3,field4"); + serde.initialize(conf, properties); + assertNotNull(serde.getCachedRow()); + } + + @Test(expected = InvalidColumnMappingException.class) + public void invalidColMapping() throws Exception { + Properties properties = new Properties(); + Configuration conf = new Configuration(); + properties.setProperty(AccumuloSerDeParameters.COLUMN_MAPPINGS, "cf,cf:f2,cf:f3"); + properties.setProperty(serdeConstants.LIST_COLUMNS, "field2,field3,field4"); + + serde.initialize(conf, properties); + AccumuloHiveRow row = new AccumuloHiveRow(); + row.setRowId("r1"); + Object obj = serde.deserialize(row); + assertTrue(obj instanceof LazyAccumuloRow); + LazyAccumuloRow lazyRow = (LazyAccumuloRow) obj; + lazyRow.getField(0); + } + + @Test(expected = TooManyAccumuloColumnsException.class) + public void deserializeWithTooFewHiveColumns() throws Exception { + Properties properties = new Properties(); + Configuration conf = new Configuration(); + properties.setProperty(AccumuloSerDeParameters.COLUMN_MAPPINGS, ":rowID,cf:f1,cf:f2,cf:f3"); + properties.setProperty(serdeConstants.LIST_COLUMNS, "row,col1,col2"); + properties.setProperty(serdeConstants.LIST_COLUMN_TYPES, "string,string,string"); + + serde.initialize(conf, properties); + serde.deserialize(new Text("fail")); + } + + @Test + public void testArraySerialization() throws Exception { + Properties properties = new Properties(); + Configuration conf = new Configuration(); + + properties.setProperty(AccumuloSerDeParameters.COLUMN_MAPPINGS, ":rowID,cf:vals"); + properties.setProperty(serdeConstants.LIST_COLUMNS, "row,values"); + properties.setProperty(serdeConstants.LIST_COLUMN_TYPES, "string,array"); + properties.setProperty(serdeConstants.COLLECTION_DELIM, ":"); + + // Get one of the default separators to avoid having to set a custom separator + char separator = ':'; + + serde.initialize(conf, properties); + + AccumuloHiveRow row = new AccumuloHiveRow(); + row.setRowId("r1"); + row.add("cf", "vals", ("value1" + separator + "value2" + separator + "value3").getBytes()); + + Object obj = serde.deserialize(row); + + assertNotNull(obj); + assertTrue(obj instanceof LazyAccumuloRow); + + LazyAccumuloRow lazyRow = (LazyAccumuloRow) obj; + Object field0 = lazyRow.getField(0); + assertNotNull(field0); + assertTrue(field0 instanceof LazyString); + assertEquals(row.getRowId(), ((LazyString) field0).getWritableObject().toString()); + + Object field1 = lazyRow.getField(1); + assertNotNull(field1); + assertTrue(field1 instanceof LazyArray); + LazyArray array = (LazyArray) field1; + + List values = array.getList(); + assertEquals(3, values.size()); + for (int i = 0; i < 3; i++) { + Object o = values.get(i); + assertNotNull(o); + assertTrue(o instanceof LazyString); + assertEquals("value" + (i + 1), ((LazyString) o).getWritableObject().toString()); + } + } + + @Test + public void testMapSerialization() throws Exception { + Properties properties = new Properties(); + Configuration conf = new Configuration(); + + properties.setProperty(AccumuloSerDeParameters.COLUMN_MAPPINGS, ":rowID,cf:vals"); + properties.setProperty(serdeConstants.LIST_COLUMNS, "row,values"); + properties.setProperty(serdeConstants.LIST_COLUMN_TYPES, "string,map"); + properties.setProperty(serdeConstants.COLLECTION_DELIM, ":"); + properties.setProperty(serdeConstants.MAPKEY_DELIM, "="); + + // Get one of the default separators to avoid having to set a custom separator + char collectionSeparator = ':', kvSeparator = '='; + + serde.initialize(conf, properties); + + AccumuloHiveRow row = new AccumuloHiveRow(); + row.setRowId("r1"); + row.add("cf", "vals", ("k1" + kvSeparator + "v1" + collectionSeparator + "k2" + kvSeparator + + "v2" + collectionSeparator + "k3" + kvSeparator + "v3").getBytes()); + + Object obj = serde.deserialize(row); + + assertNotNull(obj); + assertTrue(obj instanceof LazyAccumuloRow); + + LazyAccumuloRow lazyRow = (LazyAccumuloRow) obj; + Object field0 = lazyRow.getField(0); + assertNotNull(field0); + assertTrue(field0 instanceof LazyString); + assertEquals(row.getRowId(), ((LazyString) field0).getWritableObject().toString()); + + Object field1 = lazyRow.getField(1); + assertNotNull(field1); + assertTrue(field1 instanceof LazyMap); + LazyMap map = (LazyMap) field1; + + Map untypedMap = map.getMap(); + assertEquals(3, map.getMapSize()); + Set expectedKeys = new HashSet(); + expectedKeys.add("k1"); + expectedKeys.add("k2"); + expectedKeys.add("k3"); + for (Entry entry : untypedMap.entrySet()) { + assertNotNull(entry.getKey()); + assertTrue(entry.getKey() instanceof LazyString); + LazyString key = (LazyString) entry.getKey(); + + assertNotNull(entry.getValue()); + assertTrue(entry.getValue() instanceof LazyString); + LazyString value = (LazyString) entry.getValue(); + + String strKey = key.getWritableObject().toString(), strValue = value.getWritableObject() + .toString(); + + assertTrue(expectedKeys.remove(strKey)); + + assertEquals(2, strValue.length()); + assertTrue(strValue.startsWith("v")); + assertTrue(strValue.endsWith(Character.toString(strKey.charAt(1)))); + } + + assertTrue("Did not find expected keys: " + expectedKeys, expectedKeys.isEmpty()); + } + + @Test + public void deserialization() throws Exception { + Properties properties = new Properties(); + Configuration conf = new Configuration(); + properties.setProperty(AccumuloSerDeParameters.COLUMN_MAPPINGS, ":rowID,cf:f1,cf:f2,cf:f3"); + + properties.setProperty(serdeConstants.LIST_COLUMNS, "blah,field2,field3,field4"); + serde.initialize(conf, properties); + + AccumuloHiveRow row = new AccumuloHiveRow(); + row.setRowId("r1"); + row.add("cf", "f1", "v1".getBytes()); + row.add("cf", "f2", "v2".getBytes()); + + Object obj = serde.deserialize(row); + assertTrue(obj instanceof LazyAccumuloRow); + + LazyAccumuloRow lazyRow = (LazyAccumuloRow) obj; + Object field0 = lazyRow.getField(0); + assertNotNull(field0); + assertTrue(field0 instanceof LazyString); + assertEquals(field0.toString(), "r1"); + + Object field1 = lazyRow.getField(1); + assertNotNull(field1); + assertTrue("Expected instance of LazyString but was " + field1.getClass(), + field1 instanceof LazyString); + assertEquals(field1.toString(), "v1"); + + Object field2 = lazyRow.getField(2); + assertNotNull(field2); + assertTrue(field2 instanceof LazyString); + assertEquals(field2.toString(), "v2"); + } + + @Test + public void testNoVisibilitySetsEmptyVisibility() throws SerDeException { + Properties properties = new Properties(); + Configuration conf = new Configuration(); + properties.setProperty(AccumuloSerDeParameters.COLUMN_MAPPINGS, "cf:f1,:rowID"); + properties.setProperty(serdeConstants.LIST_COLUMNS, "field1,field2"); + + serde.initialize(conf, properties); + + AccumuloRowSerializer serializer = serde.getSerializer(); + + Assert.assertEquals(new ColumnVisibility(), serializer.getVisibility()); + } + + @Test + public void testColumnVisibilityForSerializer() throws SerDeException { + Properties properties = new Properties(); + Configuration conf = new Configuration(); + properties.setProperty(AccumuloSerDeParameters.COLUMN_MAPPINGS, "cf:f1,:rowID"); + properties.setProperty(serdeConstants.LIST_COLUMNS, "field1,field2"); + properties.setProperty(AccumuloSerDeParameters.VISIBILITY_LABEL_KEY, "foobar"); + + serde.initialize(conf, properties); + + AccumuloRowSerializer serializer = serde.getSerializer(); + + Assert.assertEquals(new ColumnVisibility("foobar"), serializer.getVisibility()); + } + + @Test + public void testCompositeKeyDeserialization() throws Exception { + Properties properties = new Properties(); + Configuration conf = new Configuration(); + properties.setProperty(AccumuloSerDeParameters.COLUMN_MAPPINGS, ":rowID,cf:f1"); + properties.setProperty(serdeConstants.LIST_COLUMNS, "row,field1"); + properties.setProperty(serdeConstants.LIST_COLUMN_TYPES, + "struct,string"); + properties.setProperty(DelimitedAccumuloRowIdFactory.ACCUMULO_COMPOSITE_DELIMITER, "_"); + properties.setProperty(AccumuloSerDeParameters.COMPOSITE_ROWID_FACTORY, + DelimitedAccumuloRowIdFactory.class.getName()); + + serde.initialize(conf, properties); + + AccumuloHiveRow row = new AccumuloHiveRow(); + row.setRowId("p1_p2_p3"); + row.add("cf", "f1", "v1".getBytes()); + + Object obj = serde.deserialize(row); + assertTrue(obj instanceof LazyAccumuloRow); + + LazyAccumuloRow lazyRow = (LazyAccumuloRow) obj; + Object field0 = lazyRow.getField(0); + assertNotNull(field0); + assertTrue(field0 instanceof LazyStruct); + LazyStruct struct = (LazyStruct) field0; + List fields = struct.getFieldsAsList(); + assertEquals(3, fields.size()); + for (int i = 0; i < fields.size(); i++) { + assertEquals(LazyString.class, fields.get(i).getClass()); + assertEquals("p" + (i + 1), fields.get(i).toString()); + } + + Object field1 = lazyRow.getField(1); + assertNotNull(field1); + assertTrue("Expected instance of LazyString but was " + field1.getClass(), + field1 instanceof LazyString); + assertEquals(field1.toString(), "v1"); + } + + @Test + public void testStructOfMapSerialization() throws IOException, SerDeException { + List columns = Arrays.asList("row", "col"); + List structColNames = Arrays.asList("map1", "map2"); + TypeInfo mapTypeInfo = TypeInfoFactory.getMapTypeInfo(TypeInfoFactory.stringTypeInfo, + TypeInfoFactory.stringTypeInfo); + + // struct,map2:map>,string + List types = Arrays. asList( + TypeInfoFactory.getStructTypeInfo(structColNames, Arrays.asList(mapTypeInfo, mapTypeInfo)), + TypeInfoFactory.stringTypeInfo); + + Properties tableProperties = new Properties(); + tableProperties.setProperty(AccumuloSerDeParameters.COLUMN_MAPPINGS, ":rowid,cf:cq"); + // Use the default separators [0, 1, 2, 3, ..., 7] + tableProperties.setProperty(serdeConstants.LIST_COLUMNS, Joiner.on(',').join(columns)); + tableProperties.setProperty(serdeConstants.LIST_COLUMN_TYPES, Joiner.on(',').join(types)); + AccumuloSerDeParameters accumuloSerDeParams = new AccumuloSerDeParameters(new Configuration(), + tableProperties, AccumuloSerDe.class.getSimpleName()); + SerDeParameters serDeParams = accumuloSerDeParams.getSerDeParameters(); + + byte[] seps = serDeParams.getSeparators(); + + // struct_map>> + + TypeInfo stringTypeInfo = TypeInfoFactory.getPrimitiveTypeInfo(serdeConstants.STRING_TYPE_NAME); + LazyStringObjectInspector stringOI = (LazyStringObjectInspector) LazyFactory + .createLazyObjectInspector(stringTypeInfo, new byte[] {0}, 0, + serDeParams.getNullSequence(), serDeParams.isEscaped(), serDeParams.getEscapeChar()); + + LazyMapObjectInspector mapOI = LazyObjectInspectorFactory.getLazySimpleMapObjectInspector( + stringOI, stringOI, seps[3], seps[4], serDeParams.getNullSequence(), + serDeParams.isEscaped(), serDeParams.getEscapeChar()); + + LazySimpleStructObjectInspector rowStructOI = (LazySimpleStructObjectInspector) LazyObjectInspectorFactory + .getLazySimpleStructObjectInspector(structColNames, + Arrays. asList(mapOI, mapOI), (byte) seps[2], + serDeParams.getNullSequence(), serDeParams.isLastColumnTakesRest(), + serDeParams.isEscaped(), serDeParams.getEscapeChar()); + + LazySimpleStructObjectInspector structOI = (LazySimpleStructObjectInspector) LazyObjectInspectorFactory + .getLazySimpleStructObjectInspector(columns, Arrays.asList(rowStructOI, stringOI), seps[1], + serDeParams.getNullSequence(), serDeParams.isLastColumnTakesRest(), + serDeParams.isEscaped(), serDeParams.getEscapeChar()); + + AccumuloRowSerializer serializer = new AccumuloRowSerializer(0, serDeParams, + accumuloSerDeParams.getColumnMappings(), new ColumnVisibility(), + accumuloSerDeParams.getRowIdFactory()); + + Map map1 = new HashMap(), map2 = new HashMap(); + + map1.put("key10", "value10"); + map1.put("key11", "value11"); + + map2.put("key20", "value20"); + map2.put("key21", "value21"); + + ByteArrayRef byteRef = new ByteArrayRef(); + // Default separators are 1-indexed (instead of 0-indexed), thus the separator at offset 1 is + // (byte) 2 + // The separator for the hive row is \x02, for the row Id struct, \x03, and the maps \x04 and + // \x05 + String accumuloRow = "key10\5value10\4key11\5value11\3key20\5value20\4key21\5value21"; + LazyStruct entireStruct = (LazyStruct) LazyFactory.createLazyObject(structOI); + byteRef.setData((accumuloRow + "\2foo").getBytes()); + entireStruct.init(byteRef, 0, byteRef.getData().length); + + Mutation m = serializer.serialize(entireStruct, structOI); + Assert.assertArrayEquals(accumuloRow.getBytes(), m.getRow()); + Assert.assertEquals(1, m.getUpdates().size()); + ColumnUpdate update = m.getUpdates().get(0); + Assert.assertEquals("cf", new String(update.getColumnFamily())); + Assert.assertEquals("cq", new String(update.getColumnQualifier())); + Assert.assertEquals("foo", new String(update.getValue())); + + AccumuloHiveRow haRow = new AccumuloHiveRow(new String(m.getRow())); + haRow.add("cf", "cq", "foo".getBytes()); + + LazyAccumuloRow lazyAccumuloRow = new LazyAccumuloRow(structOI); + lazyAccumuloRow.init(haRow, accumuloSerDeParams.getColumnMappings(), + accumuloSerDeParams.getRowIdFactory()); + + List objects = lazyAccumuloRow.getFieldsAsList(); + Assert.assertEquals(2, objects.size()); + + Assert.assertEquals("foo", objects.get(1).toString()); + + LazyStruct rowStruct = (LazyStruct) objects.get(0); + List rowObjects = rowStruct.getFieldsAsList(); + Assert.assertEquals(2, rowObjects.size()); + + LazyMap rowMap = (LazyMap) rowObjects.get(0); + Map actualMap = rowMap.getMap(); + System.out.println("Actual map 1: " + actualMap); + Map actualStringMap = new HashMap(); + for (Entry entry : actualMap.entrySet()) { + actualStringMap.put(entry.getKey().toString(), entry.getValue().toString()); + } + + Assert.assertEquals(map1, actualStringMap); + + rowMap = (LazyMap) rowObjects.get(1); + actualMap = rowMap.getMap(); + System.out.println("Actual map 2: " + actualMap); + actualStringMap = new HashMap(); + for (Entry entry : actualMap.entrySet()) { + actualStringMap.put(entry.getKey().toString(), entry.getValue().toString()); + } + + Assert.assertEquals(map2, actualStringMap); + } +} diff --git a/accumulo-handler/src/test/org/apache/hadoop/hive/accumulo/serde/TestAccumuloSerDeParameters.java b/accumulo-handler/src/test/org/apache/hadoop/hive/accumulo/serde/TestAccumuloSerDeParameters.java new file mode 100644 index 0000000..216f924 --- /dev/null +++ b/accumulo-handler/src/test/org/apache/hadoop/hive/accumulo/serde/TestAccumuloSerDeParameters.java @@ -0,0 +1,101 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hive.accumulo.serde; + +import java.util.Properties; + +import org.apache.accumulo.core.security.Authorizations; +import org.apache.accumulo.core.security.ColumnVisibility; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hive.serde.serdeConstants; +import org.apache.hadoop.hive.serde2.SerDeException; +import org.junit.Assert; +import org.junit.Test; + +/** + * + */ +public class TestAccumuloSerDeParameters { + + @Test + public void testParseColumnVisibility() throws SerDeException { + Properties properties = new Properties(); + Configuration conf = new Configuration(); + + properties.setProperty(AccumuloSerDeParameters.COLUMN_MAPPINGS, ":rowid,cf:f2,cf:f3"); + properties.setProperty(serdeConstants.LIST_COLUMNS, "field1,field2,field3"); + properties.setProperty(serdeConstants.LIST_TYPE_NAME, "string,string,string"); + properties.setProperty(AccumuloSerDeParameters.VISIBILITY_LABEL_KEY, "foo&bar"); + + AccumuloSerDeParameters params = new AccumuloSerDeParameters(conf, properties, + AccumuloSerDe.class.getName()); + + ColumnVisibility cv = params.getTableVisibilityLabel(); + + Assert.assertEquals(new ColumnVisibility("foo&bar"), cv); + } + + @Test + public void testParseAuthorizationsFromConf() throws SerDeException { + Configuration conf = new Configuration(false); + conf.set(AccumuloSerDeParameters.AUTHORIZATIONS_KEY, "foo,bar"); + + Authorizations auths = AccumuloSerDeParameters.getAuthorizationsFromConf(conf); + Assert.assertEquals(new Authorizations("foo,bar"), auths); + } + + @Test + public void testParseAuthorizationsFromnProperties() throws SerDeException { + Configuration conf = new Configuration(); + Properties properties = new Properties(); + + properties.setProperty(AccumuloSerDeParameters.COLUMN_MAPPINGS, ":rowid,cf:f2,cf:f3"); + properties.setProperty(serdeConstants.LIST_COLUMNS, "field1,field2,field3"); + properties.setProperty(serdeConstants.LIST_COLUMN_TYPES, "string,string,string"); + properties.setProperty(AccumuloSerDeParameters.AUTHORIZATIONS_KEY, "foo,bar"); + + AccumuloSerDeParameters params = new AccumuloSerDeParameters(conf, properties, + AccumuloSerDe.class.getName()); + + Authorizations auths = params.getAuthorizations(); + Assert.assertEquals(new Authorizations("foo,bar"), auths); + } + + @Test + public void testNullAuthsFromProperties() throws SerDeException { + Configuration conf = new Configuration(); + Properties properties = new Properties(); + + properties.setProperty(AccumuloSerDeParameters.COLUMN_MAPPINGS, ":rowid,cf:f2,cf:f3"); + properties.setProperty(serdeConstants.LIST_COLUMNS, "field1,field2,field3"); + properties.setProperty(serdeConstants.LIST_COLUMN_TYPES, "string,string,string"); + + AccumuloSerDeParameters params = new AccumuloSerDeParameters(conf, properties, + AccumuloSerDe.class.getName()); + + Authorizations auths = params.getAuthorizations(); + Assert.assertNull(auths); + } + + @Test + public void testNullAuthsFromConf() throws SerDeException { + Configuration conf = new Configuration(false); + + Authorizations auths = AccumuloSerDeParameters.getAuthorizationsFromConf(conf); + Assert.assertNull(auths); + } +} diff --git a/accumulo-handler/src/test/org/apache/hadoop/hive/accumulo/serde/TestDefaultAccumuloRowIdFactory.java b/accumulo-handler/src/test/org/apache/hadoop/hive/accumulo/serde/TestDefaultAccumuloRowIdFactory.java new file mode 100644 index 0000000..d464740 --- /dev/null +++ b/accumulo-handler/src/test/org/apache/hadoop/hive/accumulo/serde/TestDefaultAccumuloRowIdFactory.java @@ -0,0 +1,128 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hive.accumulo.serde; + +import java.util.List; +import java.util.Properties; + +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hive.accumulo.columns.ColumnEncoding; +import org.apache.hadoop.hive.accumulo.columns.ColumnMapper; +import org.apache.hadoop.hive.serde.serdeConstants; +import org.apache.hadoop.hive.serde2.SerDeException; +import org.apache.hadoop.hive.serde2.lazy.LazyObjectBase; +import org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe.SerDeParameters; +import org.apache.hadoop.hive.serde2.lazy.LazyString; +import org.apache.hadoop.hive.serde2.lazy.objectinspector.LazyMapObjectInspector; +import org.apache.hadoop.hive.serde2.lazy.objectinspector.LazySimpleStructObjectInspector; +import org.apache.hadoop.hive.serde2.lazy.objectinspector.primitive.LazyIntObjectInspector; +import org.apache.hadoop.hive.serde2.lazy.objectinspector.primitive.LazyPrimitiveObjectInspectorFactory; +import org.apache.hadoop.hive.serde2.lazy.objectinspector.primitive.LazyStringObjectInspector; +import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspector; +import org.apache.hadoop.hive.serde2.objectinspector.primitive.JavaStringObjectInspector; +import org.apache.hadoop.hive.serde2.objectinspector.primitive.PrimitiveObjectInspectorFactory; +import org.apache.hadoop.hive.serde2.typeinfo.TypeInfo; +import org.junit.Assert; +import org.junit.Test; + +/** + * + */ +public class TestDefaultAccumuloRowIdFactory { + + @Test + public void testCorrectPrimitiveInspectors() throws SerDeException { + AccumuloSerDe accumuloSerDe = new AccumuloSerDe(); + + Properties properties = new Properties(); + Configuration conf = new Configuration(); + properties.setProperty(AccumuloSerDeParameters.COLUMN_MAPPINGS, ":rowID,cf:cq"); + properties.setProperty(serdeConstants.LIST_COLUMNS, "row,col"); + properties.setProperty(serdeConstants.LIST_COLUMN_TYPES, + "string,int"); + + accumuloSerDe.initialize(conf, properties); + + AccumuloRowIdFactory factory = accumuloSerDe.getParams().getRowIdFactory(); + List columnTypes = accumuloSerDe.getParams().getHiveColumnTypes(); + ColumnMapper mapper = accumuloSerDe.getParams().getColumnMapper(); + SerDeParameters serDeParams = accumuloSerDe.getParams().getSerDeParameters(); + + List OIs = accumuloSerDe.getColumnObjectInspectors(columnTypes, serDeParams, mapper.getColumnMappings(), factory); + + Assert.assertEquals(2, OIs.size()); + Assert.assertEquals(LazyStringObjectInspector.class, OIs.get(0).getClass()); + Assert.assertEquals(LazyIntObjectInspector.class, OIs.get(1).getClass()); + } + + @Test + public void testCorrectComplexInspectors() throws SerDeException { + AccumuloSerDe accumuloSerDe = new AccumuloSerDe(); + + Properties properties = new Properties(); + Configuration conf = new Configuration(); + properties.setProperty(AccumuloSerDeParameters.COLUMN_MAPPINGS, ":rowID,cf:cq"); + properties.setProperty(serdeConstants.LIST_COLUMNS, "row,col"); + properties.setProperty(serdeConstants.LIST_COLUMN_TYPES, + "struct,map"); + + accumuloSerDe.initialize(conf, properties); + + AccumuloRowIdFactory factory = accumuloSerDe.getParams().getRowIdFactory(); + List columnTypes = accumuloSerDe.getParams().getHiveColumnTypes(); + ColumnMapper mapper = accumuloSerDe.getParams().getColumnMapper(); + SerDeParameters serDeParams = accumuloSerDe.getParams().getSerDeParameters(); + + List OIs = accumuloSerDe.getColumnObjectInspectors(columnTypes, serDeParams, mapper.getColumnMappings(), factory); + + // Expect the correct OIs + Assert.assertEquals(2, OIs.size()); + Assert.assertEquals(LazySimpleStructObjectInspector.class, OIs.get(0).getClass()); + Assert.assertEquals(LazyMapObjectInspector.class, OIs.get(1).getClass()); + + LazySimpleStructObjectInspector structOI = (LazySimpleStructObjectInspector) OIs.get(0); + Assert.assertEquals(2, (int) structOI.getSeparator()); + + LazyMapObjectInspector mapOI = (LazyMapObjectInspector) OIs.get(1); + Assert.assertEquals(2, (int) mapOI.getItemSeparator()); + Assert.assertEquals(3, (int) mapOI.getKeyValueSeparator()); + } + + @Test + public void testBinaryStringRowId() throws SerDeException { + AccumuloSerDe accumuloSerDe = new AccumuloSerDe(); + + Properties properties = new Properties(); + Configuration conf = new Configuration(); + properties.setProperty(AccumuloSerDeParameters.COLUMN_MAPPINGS, ":rowID,cf:cq"); + properties.setProperty(serdeConstants.LIST_COLUMNS, "row,col"); + properties.setProperty(serdeConstants.LIST_COLUMN_TYPES, + "string,string"); + properties.setProperty(AccumuloSerDeParameters.DEFAULT_STORAGE_TYPE, ColumnEncoding.BINARY.getName()); + + accumuloSerDe.initialize(conf, properties); + + DefaultAccumuloRowIdFactory rowIdFactory = new DefaultAccumuloRowIdFactory(); + rowIdFactory.init(accumuloSerDe.getParams(), properties); + + LazyStringObjectInspector oi = LazyPrimitiveObjectInspectorFactory.getLazyStringObjectInspector(false, (byte) '\\'); + LazyObjectBase lazyObj = rowIdFactory.createRowId(oi); + Assert.assertNotNull(lazyObj); + Assert.assertTrue(LazyString.class.isAssignableFrom(lazyObj.getClass())); + } + +} diff --git a/accumulo-handler/src/test/queries/positive/accumulo_custom_key.q b/accumulo-handler/src/test/queries/positive/accumulo_custom_key.q new file mode 100644 index 0000000..6684fd3 --- /dev/null +++ b/accumulo-handler/src/test/queries/positive/accumulo_custom_key.q @@ -0,0 +1,22 @@ +CREATE TABLE accumulo_ck_1(key struct, value string) +STORED BY 'org.apache.hadoop.hive.accumulo.AccumuloStorageHandler' +WITH SERDEPROPERTIES ( + "accumulo.table.name" = "accumulo_custom", + "accumulo.columns.mapping" = ":rowid,cf:string", + "accumulo.composite.rowid.factory"="org.apache.hadoop.hive.accumulo.serde.DelimitedAccumuloRowIdFactory", + "accumulo.composite.delimiter" = "$"); + +CREATE EXTERNAL TABLE accumulo_ck_2(key string, value string) +STORED BY 'org.apache.hadoop.hive.accumulo.AccumuloStorageHandler' +WITH SERDEPROPERTIES ( + "accumulo.table.name" = "accumulo_custom", + "accumulo.columns.mapping" = ":rowid,cf:string"); + +insert overwrite table accumulo_ck_1 select struct('1000','2000','3000'),'value' +from src where key = 100; + +select * from accumulo_ck_1; +select * from accumulo_ck_2; + +DROP TABLE accumulo_ck_1; +DROP TABLE accumulo_ck_2; diff --git a/accumulo-handler/src/test/queries/positive/accumulo_custom_key2.q b/accumulo-handler/src/test/queries/positive/accumulo_custom_key2.q new file mode 100644 index 0000000..038633f --- /dev/null +++ b/accumulo-handler/src/test/queries/positive/accumulo_custom_key2.q @@ -0,0 +1,13 @@ +CREATE TABLE accumulo_ck_3(key struct, value string) +STORED BY 'org.apache.hadoop.hive.accumulo.AccumuloStorageHandler' +WITH SERDEPROPERTIES ( + "accumulo.table.name" = "accumulo_custom2", + "accumulo.columns.mapping" = ":rowid,cf:string", + "accumulo.composite.rowid"="org.apache.hadoop.hive.accumulo.serde.FirstCharAccumuloCompositeRowId"); + +insert overwrite table accumulo_ck_3 select struct('abcd','mnop','wxyz'),'value' +from src where key = 100; + +select * from accumulo_ck_3; + +DROP TABLE accumulo_ck_3; diff --git a/accumulo-handler/src/test/queries/positive/accumulo_joins.q b/accumulo-handler/src/test/queries/positive/accumulo_joins.q new file mode 100644 index 0000000..b72ec6b --- /dev/null +++ b/accumulo-handler/src/test/queries/positive/accumulo_joins.q @@ -0,0 +1,82 @@ +DROP TABLE users; +DROP TABLE states; +DROP TABLE countries; +DROP TABLE users_level; + +-- From HIVE-1257 + +CREATE TABLE users(key string, state string, country string, country_id int) +STORED BY 'org.apache.hadoop.hive.accumulo.AccumuloStorageHandler' +WITH SERDEPROPERTIES ( +"accumulo.columns.mapping" = ":rowID,info:state,info:country,info:country_id" +); + +CREATE TABLE states(key string, name string) +STORED BY 'org.apache.hadoop.hive.accumulo.AccumuloStorageHandler' +WITH SERDEPROPERTIES ( +"accumulo.columns.mapping" = ":rowID,state:name" +); + +CREATE TABLE countries(key string, name string, country string, country_id int) +STORED BY 'org.apache.hadoop.hive.accumulo.AccumuloStorageHandler' +WITH SERDEPROPERTIES ( +"accumulo.columns.mapping" = ":rowID,info:name,info:country,info:country_id" +); + +INSERT OVERWRITE TABLE users SELECT 'user1', 'IA', 'USA', 0 +FROM src WHERE key=100; + +INSERT OVERWRITE TABLE states SELECT 'IA', 'Iowa' +FROM src WHERE key=100; + +INSERT OVERWRITE TABLE countries SELECT 'USA', 'United States', 'USA', 1 +FROM src WHERE key=100; + +set hive.input.format = org.apache.hadoop.hive.ql.io.HiveInputFormat; + +SELECT u.key, u.country, c.name, c.key FROM users u JOIN countries c +ON (u.country = c.key); + +SELECT u.key, u.country, c.name, c.key FROM users u JOIN countries c +ON (u.country = c.country); + +SELECT u.key, u.country, c.name, c.key FROM users u JOIN countries c +ON (u.country_id = c.country_id); + +SELECT u.key, u.state, s.name FROM users u JOIN states s +ON (u.state = s.key); + +set hive.input.format = org.apache.hadoop.hive.ql.io.CombineHiveInputFormat; + +SELECT u.key, u.country, c.name, c.key FROM users u JOIN countries c +ON (u.country = c.key); + +SELECT u.key, u.country, c.name, c.key FROM users u JOIN countries c +ON (u.country = c.country); + +SELECT u.key, u.country, c.name, c.key FROM users u JOIN countries c +ON (u.country_id = c.country_id); + +SELECT u.key, u.state, s.name FROM users u JOIN states s +ON (u.state = s.key); + +DROP TABLE users; +DROP TABLE states; +DROP TABLE countries; + +CREATE TABLE users(key int, userid int, username string, created int) +STORED BY 'org.apache.hadoop.hive.accumulo.AccumuloStorageHandler' +WITH SERDEPROPERTIES ("accumulo.columns.mapping" = ":rowID,f:userid,f:nickname,f:created"); + +CREATE TABLE users_level(key int, userid int, level int) +STORED BY 'org.apache.hadoop.hive.accumulo.AccumuloStorageHandler' +WITH SERDEPROPERTIES ("accumulo.columns.mapping" = ":rowID,f:userid,f:level"); + +-- HIVE-1903: the problem fixed here showed up even without any data, +-- so no need to load any to test it +SELECT year(from_unixtime(users.created)) AS year, level, count(users.userid) AS num + FROM users JOIN users_level ON (users.userid = users_level.userid) + GROUP BY year(from_unixtime(users.created)), level; + +DROP TABLE users; +DROP TABLE users_level; diff --git a/accumulo-handler/src/test/queries/positive/accumulo_predicate_pushdown.q b/accumulo-handler/src/test/queries/positive/accumulo_predicate_pushdown.q new file mode 100644 index 0000000..0f064af --- /dev/null +++ b/accumulo-handler/src/test/queries/positive/accumulo_predicate_pushdown.q @@ -0,0 +1,70 @@ +CREATE TABLE accumulo_pushdown(key string, value string) +STORED BY 'org.apache.hadoop.hive.accumulo.AccumuloStorageHandler' +WITH SERDEPROPERTIES ("accumulo.columns.mapping" = ":rowid,cf:string"); + +INSERT OVERWRITE TABLE accumulo_pushdown +SELECT cast(key as string), value +FROM src; + +-- with full pushdown +explain select * from accumulo_pushdown where key>'90'; + +select * from accumulo_pushdown where key>'90'; +select * from accumulo_pushdown where key<'1'; +select * from accumulo_pushdown where key<='2'; +select * from accumulo_pushdown where key>='90'; + +-- with constant expression +explain select * from accumulo_pushdown where key>=cast(40 + 50 as string); +select * from accumulo_pushdown where key>=cast(40 + 50 as string); + +-- with partial pushdown + +explain select * from accumulo_pushdown where key>'90' and value like '%9%'; + +select * from accumulo_pushdown where key>'90' and value like '%9%'; + +-- with two residuals + +explain select * from accumulo_pushdown +where key>='90' and value like '%9%' and key=cast(value as int); + +select * from accumulo_pushdown +where key>='90' and value like '%9%' and key=cast(value as int); + + +-- with contradictory pushdowns + +explain select * from accumulo_pushdown +where key<'80' and key>'90' and value like '%90%'; + +select * from accumulo_pushdown +where key<'80' and key>'90' and value like '%90%'; + +-- with nothing to push down + +explain select * from accumulo_pushdown; + +-- with a predicate which is not actually part of the filter, so +-- it should be ignored by pushdown + +explain select * from accumulo_pushdown +where (case when key<'90' then 2 else 4 end) > 3; + +-- with a predicate which is under an OR, so it should +-- be ignored by pushdown + +explain select * from accumulo_pushdown +where key<='80' or value like '%90%'; + +explain select * from accumulo_pushdown where key > '281' +and key < '287'; + +select * from accumulo_pushdown where key > '281' +and key < '287'; + +set hive.optimize.ppd.storage=false; + +-- with pushdown disabled + +explain select * from accumulo_pushdown where key<='90'; diff --git a/accumulo-handler/src/test/queries/positive/accumulo_queries.q b/accumulo-handler/src/test/queries/positive/accumulo_queries.q new file mode 100644 index 0000000..279b661 --- /dev/null +++ b/accumulo-handler/src/test/queries/positive/accumulo_queries.q @@ -0,0 +1,158 @@ +DROP TABLE accumulo_table_1; +CREATE TABLE accumulo_table_1(key int, value string) +STORED BY 'org.apache.hadoop.hive.accumulo.AccumuloStorageHandler' +WITH SERDEPROPERTIES ("accumulo.columns.mapping" = ":rowID,cf:string") +TBLPROPERTIES ("accumulo.table.name" = "accumulo_table_0"); + +DESCRIBE EXTENDED accumulo_table_1; + +select * from accumulo_table_1; + +EXPLAIN FROM src INSERT OVERWRITE TABLE accumulo_table_1 SELECT * WHERE (key%2)=0; +FROM src INSERT OVERWRITE TABLE accumulo_table_1 SELECT * WHERE (key%2)=0; + +DROP TABLE accumulo_table_2; +CREATE EXTERNAL TABLE accumulo_table_2(key int, value string) +STORED BY 'org.apache.hadoop.hive.accumulo.AccumuloStorageHandler' +WITH SERDEPROPERTIES ("accumulo.columns.mapping" = ":rowID,cf:string") +TBLPROPERTIES ("accumulo.table.name" = "accumulo_table_0"); + +EXPLAIN +SELECT Y.* +FROM +(SELECT accumulo_table_1.* FROM accumulo_table_1) x +JOIN +(SELECT src.* FROM src) Y +ON (x.key = Y.key) +ORDER BY key, value LIMIT 20; + +SELECT Y.* +FROM +(SELECT accumulo_table_1.* FROM accumulo_table_1) x +JOIN +(SELECT src.* FROM src) Y +ON (x.key = Y.key) +ORDER BY key, value LIMIT 20; + +EXPLAIN +SELECT Y.* +FROM +(SELECT accumulo_table_1.* FROM accumulo_table_1 WHERE 100 < accumulo_table_1.key) x +JOIN +(SELECT accumulo_table_2.* FROM accumulo_table_2 WHERE accumulo_table_2.key < 120) Y +ON (x.key = Y.key) +ORDER BY key, value; + +SELECT Y.* +FROM +(SELECT accumulo_table_1.* FROM accumulo_table_1 WHERE 100 < accumulo_table_1.key) x +JOIN +(SELECT accumulo_table_2.* FROM accumulo_table_2 WHERE accumulo_table_2.key < 120) Y +ON (x.key = Y.key) +ORDER BY key,value; + +DROP TABLE empty_accumulo_table; +CREATE TABLE empty_accumulo_table(key int, value string) +STORED BY 'org.apache.hadoop.hive.accumulo.AccumuloStorageHandler' +WITH SERDEPROPERTIES ("accumulo.columns.mapping" = ":rowID,cf:string"); + +DROP TABLE empty_normal_table; +CREATE TABLE empty_normal_table(key int, value string); + +select * from (select count(1) as c from empty_normal_table union all select count(1) as c from empty_accumulo_table) x order by c; +select * from (select count(1) c from empty_normal_table union all select count(1) as c from accumulo_table_1) x order by c; +select * from (select count(1) c from src union all select count(1) as c from empty_accumulo_table) x order by c; +select * from (select count(1) c from src union all select count(1) as c from accumulo_table_1) x order by c; + +CREATE TABLE accumulo_table_3(key int, value string, count int) +STORED BY 'org.apache.hadoop.hive.accumulo.AccumuloStorageHandler' +WITH SERDEPROPERTIES ( +"accumulo.columns.mapping" = ":rowID,cf:val,cf2:count" +); + +EXPLAIN +INSERT OVERWRITE TABLE accumulo_table_3 +SELECT x.key, x.value, Y.count +FROM +(SELECT accumulo_table_1.* FROM accumulo_table_1) x +JOIN +(SELECT src.key, count(src.key) as count FROM src GROUP BY src.key) Y +ON (x.key = Y.key); + +INSERT OVERWRITE TABLE accumulo_table_3 +SELECT x.key, x.value, Y.count +FROM +(SELECT accumulo_table_1.* FROM accumulo_table_1) x +JOIN +(SELECT src.key, count(src.key) as count FROM src GROUP BY src.key) Y +ON (x.key = Y.key); + +select count(1) from accumulo_table_3; +select * from accumulo_table_3 order by key, value limit 5; +select key, count from accumulo_table_3 order by key, count desc limit 5; + +DROP TABLE accumulo_table_4; +CREATE TABLE accumulo_table_4(key int, value1 string, value2 int, value3 int) +STORED BY 'org.apache.hadoop.hive.accumulo.AccumuloStorageHandler' +WITH SERDEPROPERTIES ( +"accumulo.columns.mapping" = ":rowID,a:b,a:c,d:e" +); + +INSERT OVERWRITE TABLE accumulo_table_4 SELECT key, value, key+1, key+2 +FROM src WHERE key=98 OR key=100; + +SELECT * FROM accumulo_table_4 ORDER BY key; + +DROP TABLE accumulo_table_5; +CREATE EXTERNAL TABLE accumulo_table_5(key int, value map) +STORED BY 'org.apache.hadoop.hive.accumulo.AccumuloStorageHandler' +WITH SERDEPROPERTIES ("accumulo.columns.mapping" = ":rowID,a:*") +TBLPROPERTIES ("accumulo.table.name" = "accumulo_table_4"); + +SELECT * FROM accumulo_table_5 ORDER BY key; + +DROP TABLE accumulo_table_6; +CREATE TABLE accumulo_table_6(key int, value map) +STORED BY 'org.apache.hadoop.hive.accumulo.AccumuloStorageHandler' +WITH SERDEPROPERTIES ( +"accumulo.columns.mapping" = ":rowID,cf:*" +); +INSERT OVERWRITE TABLE accumulo_table_6 SELECT key, map(value, key) FROM src +WHERE key=98 OR key=100; + +SELECT * FROM accumulo_table_6 ORDER BY key; + +DROP TABLE accumulo_table_7; +CREATE TABLE accumulo_table_7(value map, key int) +STORED BY 'org.apache.hadoop.hive.accumulo.AccumuloStorageHandler' +WITH SERDEPROPERTIES ( +"accumulo.columns.mapping" = "cf:*,:rowID" +); +INSERT OVERWRITE TABLE accumulo_table_7 +SELECT map(value, key, upper(value), key+1), key FROM src +WHERE key=98 OR key=100; + +SELECT * FROM accumulo_table_7 ORDER BY key; + +DROP TABLE accumulo_table_8; +CREATE TABLE accumulo_table_8(key int, value1 string, value2 int, value3 int) +STORED BY 'org.apache.hadoop.hive.accumulo.AccumuloStorageHandler' +WITH SERDEPROPERTIES ( +"accumulo.columns.mapping" = ":rowID,a:b,a:c,d:e" +); + +INSERT OVERWRITE TABLE accumulo_table_8 SELECT key, value, key+1, key+2 +FROM src WHERE key=98 OR key=100; + +SELECT * FROM accumulo_table_8 ORDER BY key; + +DROP TABLE accumulo_table_1; +DROP TABLE accumulo_table_2; +DROP TABLE accumulo_table_3; +DROP TABLE accumulo_table_4; +DROP TABLE accumulo_table_5; +DROP TABLE accumulo_table_6; +DROP TABLE accumulo_table_7; +DROP TABLE accumulo_table_8; +DROP TABLE empty_accumulo_table; +DROP TABLE empty_normal_table; diff --git a/accumulo-handler/src/test/queries/positive/accumulo_single_sourced_multi_insert.q b/accumulo-handler/src/test/queries/positive/accumulo_single_sourced_multi_insert.q new file mode 100644 index 0000000..f904d3f --- /dev/null +++ b/accumulo-handler/src/test/queries/positive/accumulo_single_sourced_multi_insert.q @@ -0,0 +1,24 @@ +-- HIVE-4375 Single sourced multi insert consists of native and non-native table mixed throws NPE +CREATE TABLE src_x1(key string, value string); +CREATE TABLE src_x2(key string, value string) +STORED BY 'org.apache.hadoop.hive.accumulo.AccumuloStorageHandler' +WITH SERDEPROPERTIES ("accumulo.columns.mapping" = ":rowid, cf:value"); + +explain +from src a +insert overwrite table src_x1 +select key,"" where a.key > 0 AND a.key < 50 +insert overwrite table src_x2 +select value,"" where a.key > 50 AND a.key < 100; + +from src a +insert overwrite table src_x1 +select key,"" where a.key > 0 AND a.key < 50 +insert overwrite table src_x2 +select value,"" where a.key > 50 AND a.key < 100; + +select * from src_x1 order by key; +select * from src_x2 order by key; + +DROP TABLE src_x1; +DROP TABLE src_x2; diff --git a/accumulo-handler/src/test/results/positive/accumulo_custom_key.q.out b/accumulo-handler/src/test/results/positive/accumulo_custom_key.q.out new file mode 100644 index 0000000..ca5f37b --- /dev/null +++ b/accumulo-handler/src/test/results/positive/accumulo_custom_key.q.out @@ -0,0 +1,80 @@ +PREHOOK: query: CREATE TABLE accumulo_ck_1(key struct, value string) +STORED BY 'org.apache.hadoop.hive.accumulo.AccumuloStorageHandler' +WITH SERDEPROPERTIES ( + "accumulo.table.name" = "accumulo_custom", + "accumulo.columns.mapping" = ":rowid,cf:string", + "accumulo.composite.rowid.factory"="org.apache.hadoop.hive.accumulo.serde.DelimitedAccumuloRowIdFactory", + "accumulo.composite.delimiter" = "$") +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +PREHOOK: Output: default@accumulo_ck_1 +POSTHOOK: query: CREATE TABLE accumulo_ck_1(key struct, value string) +STORED BY 'org.apache.hadoop.hive.accumulo.AccumuloStorageHandler' +WITH SERDEPROPERTIES ( + "accumulo.table.name" = "accumulo_custom", + "accumulo.columns.mapping" = ":rowid,cf:string", + "accumulo.composite.rowid.factory"="org.apache.hadoop.hive.accumulo.serde.DelimitedAccumuloRowIdFactory", + "accumulo.composite.delimiter" = "$") +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@accumulo_ck_1 +PREHOOK: query: CREATE EXTERNAL TABLE accumulo_ck_2(key string, value string) +STORED BY 'org.apache.hadoop.hive.accumulo.AccumuloStorageHandler' +WITH SERDEPROPERTIES ( + "accumulo.table.name" = "accumulo_custom", + "accumulo.columns.mapping" = ":rowid,cf:string") +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +PREHOOK: Output: default@accumulo_ck_2 +POSTHOOK: query: CREATE EXTERNAL TABLE accumulo_ck_2(key string, value string) +STORED BY 'org.apache.hadoop.hive.accumulo.AccumuloStorageHandler' +WITH SERDEPROPERTIES ( + "accumulo.table.name" = "accumulo_custom", + "accumulo.columns.mapping" = ":rowid,cf:string") +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@accumulo_ck_2 +PREHOOK: query: insert overwrite table accumulo_ck_1 select struct('1000','2000','3000'),'value' +from src where key = 100 +PREHOOK: type: QUERY +PREHOOK: Input: default@src +PREHOOK: Output: default@accumulo_ck_1 +POSTHOOK: query: insert overwrite table accumulo_ck_1 select struct('1000','2000','3000'),'value' +from src where key = 100 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@src +POSTHOOK: Output: default@accumulo_ck_1 +PREHOOK: query: select * from accumulo_ck_1 +PREHOOK: type: QUERY +PREHOOK: Input: default@accumulo_ck_1 +#### A masked pattern was here #### +POSTHOOK: query: select * from accumulo_ck_1 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@accumulo_ck_1 +#### A masked pattern was here #### +{"col1":"1000","col2":"2000","col3":"3000"} value +PREHOOK: query: select * from accumulo_ck_2 +PREHOOK: type: QUERY +PREHOOK: Input: default@accumulo_ck_2 +#### A masked pattern was here #### +POSTHOOK: query: select * from accumulo_ck_2 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@accumulo_ck_2 +#### A masked pattern was here #### +1000$2000$3000 value +PREHOOK: query: DROP TABLE accumulo_ck_1 +PREHOOK: type: DROPTABLE +PREHOOK: Input: default@accumulo_ck_1 +PREHOOK: Output: default@accumulo_ck_1 +POSTHOOK: query: DROP TABLE accumulo_ck_1 +POSTHOOK: type: DROPTABLE +POSTHOOK: Input: default@accumulo_ck_1 +POSTHOOK: Output: default@accumulo_ck_1 +PREHOOK: query: DROP TABLE accumulo_ck_2 +PREHOOK: type: DROPTABLE +PREHOOK: Input: default@accumulo_ck_2 +PREHOOK: Output: default@accumulo_ck_2 +POSTHOOK: query: DROP TABLE accumulo_ck_2 +POSTHOOK: type: DROPTABLE +POSTHOOK: Input: default@accumulo_ck_2 +POSTHOOK: Output: default@accumulo_ck_2 diff --git a/accumulo-handler/src/test/results/positive/accumulo_custom_key2.q.out b/accumulo-handler/src/test/results/positive/accumulo_custom_key2.q.out new file mode 100644 index 0000000..b9e1009 --- /dev/null +++ b/accumulo-handler/src/test/results/positive/accumulo_custom_key2.q.out @@ -0,0 +1,45 @@ +PREHOOK: query: CREATE TABLE accumulo_ck_3(key struct, value string) +STORED BY 'org.apache.hadoop.hive.accumulo.AccumuloStorageHandler' +WITH SERDEPROPERTIES ( + "accumulo.table.name" = "accumulo_custom2", + "accumulo.columns.mapping" = ":rowid,cf:string", + "accumulo.composite.rowid"="org.apache.hadoop.hive.accumulo.serde.FirstCharAccumuloCompositeRowId") +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +PREHOOK: Output: default@accumulo_ck_3 +POSTHOOK: query: CREATE TABLE accumulo_ck_3(key struct, value string) +STORED BY 'org.apache.hadoop.hive.accumulo.AccumuloStorageHandler' +WITH SERDEPROPERTIES ( + "accumulo.table.name" = "accumulo_custom2", + "accumulo.columns.mapping" = ":rowid,cf:string", + "accumulo.composite.rowid"="org.apache.hadoop.hive.accumulo.serde.FirstCharAccumuloCompositeRowId") +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@accumulo_ck_3 +PREHOOK: query: insert overwrite table accumulo_ck_3 select struct('abcd','mnop','wxyz'),'value' +from src where key = 100 +PREHOOK: type: QUERY +PREHOOK: Input: default@src +PREHOOK: Output: default@accumulo_ck_3 +POSTHOOK: query: insert overwrite table accumulo_ck_3 select struct('abcd','mnop','wxyz'),'value' +from src where key = 100 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@src +POSTHOOK: Output: default@accumulo_ck_3 +PREHOOK: query: select * from accumulo_ck_3 +PREHOOK: type: QUERY +PREHOOK: Input: default@accumulo_ck_3 +#### A masked pattern was here #### +POSTHOOK: query: select * from accumulo_ck_3 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@accumulo_ck_3 +#### A masked pattern was here #### +{"col1":"a","col2":"m","col3":"w"} value +PREHOOK: query: DROP TABLE accumulo_ck_3 +PREHOOK: type: DROPTABLE +PREHOOK: Input: default@accumulo_ck_3 +PREHOOK: Output: default@accumulo_ck_3 +POSTHOOK: query: DROP TABLE accumulo_ck_3 +POSTHOOK: type: DROPTABLE +POSTHOOK: Input: default@accumulo_ck_3 +POSTHOOK: Output: default@accumulo_ck_3 diff --git a/accumulo-handler/src/test/results/positive/accumulo_joins.q.out b/accumulo-handler/src/test/results/positive/accumulo_joins.q.out new file mode 100644 index 0000000..e0b6632 --- /dev/null +++ b/accumulo-handler/src/test/results/positive/accumulo_joins.q.out @@ -0,0 +1,282 @@ +PREHOOK: query: DROP TABLE users +PREHOOK: type: DROPTABLE +POSTHOOK: query: DROP TABLE users +POSTHOOK: type: DROPTABLE +PREHOOK: query: DROP TABLE states +PREHOOK: type: DROPTABLE +POSTHOOK: query: DROP TABLE states +POSTHOOK: type: DROPTABLE +PREHOOK: query: DROP TABLE countries +PREHOOK: type: DROPTABLE +POSTHOOK: query: DROP TABLE countries +POSTHOOK: type: DROPTABLE +PREHOOK: query: DROP TABLE users_level +PREHOOK: type: DROPTABLE +POSTHOOK: query: DROP TABLE users_level +POSTHOOK: type: DROPTABLE +PREHOOK: query: -- From HIVE-1257 + +CREATE TABLE users(key string, state string, country string, country_id int) +STORED BY 'org.apache.hadoop.hive.accumulo.AccumuloStorageHandler' +WITH SERDEPROPERTIES ( +"accumulo.columns.mapping" = ":rowID,info:state,info:country,info:country_id" +) +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +PREHOOK: Output: default@users +POSTHOOK: query: -- From HIVE-1257 + +CREATE TABLE users(key string, state string, country string, country_id int) +STORED BY 'org.apache.hadoop.hive.accumulo.AccumuloStorageHandler' +WITH SERDEPROPERTIES ( +"accumulo.columns.mapping" = ":rowID,info:state,info:country,info:country_id" +) +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@users +PREHOOK: query: CREATE TABLE states(key string, name string) +STORED BY 'org.apache.hadoop.hive.accumulo.AccumuloStorageHandler' +WITH SERDEPROPERTIES ( +"accumulo.columns.mapping" = ":rowID,state:name" +) +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +PREHOOK: Output: default@states +POSTHOOK: query: CREATE TABLE states(key string, name string) +STORED BY 'org.apache.hadoop.hive.accumulo.AccumuloStorageHandler' +WITH SERDEPROPERTIES ( +"accumulo.columns.mapping" = ":rowID,state:name" +) +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@states +PREHOOK: query: CREATE TABLE countries(key string, name string, country string, country_id int) +STORED BY 'org.apache.hadoop.hive.accumulo.AccumuloStorageHandler' +WITH SERDEPROPERTIES ( +"accumulo.columns.mapping" = ":rowID,info:name,info:country,info:country_id" +) +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +PREHOOK: Output: default@countries +POSTHOOK: query: CREATE TABLE countries(key string, name string, country string, country_id int) +STORED BY 'org.apache.hadoop.hive.accumulo.AccumuloStorageHandler' +WITH SERDEPROPERTIES ( +"accumulo.columns.mapping" = ":rowID,info:name,info:country,info:country_id" +) +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@countries +PREHOOK: query: INSERT OVERWRITE TABLE users SELECT 'user1', 'IA', 'USA', 0 +FROM src WHERE key=100 +PREHOOK: type: QUERY +PREHOOK: Input: default@src +PREHOOK: Output: default@users +POSTHOOK: query: INSERT OVERWRITE TABLE users SELECT 'user1', 'IA', 'USA', 0 +FROM src WHERE key=100 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@src +POSTHOOK: Output: default@users +PREHOOK: query: INSERT OVERWRITE TABLE states SELECT 'IA', 'Iowa' +FROM src WHERE key=100 +PREHOOK: type: QUERY +PREHOOK: Input: default@src +PREHOOK: Output: default@states +POSTHOOK: query: INSERT OVERWRITE TABLE states SELECT 'IA', 'Iowa' +FROM src WHERE key=100 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@src +POSTHOOK: Output: default@states +PREHOOK: query: INSERT OVERWRITE TABLE countries SELECT 'USA', 'United States', 'USA', 1 +FROM src WHERE key=100 +PREHOOK: type: QUERY +PREHOOK: Input: default@src +PREHOOK: Output: default@countries +POSTHOOK: query: INSERT OVERWRITE TABLE countries SELECT 'USA', 'United States', 'USA', 1 +FROM src WHERE key=100 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@src +POSTHOOK: Output: default@countries +PREHOOK: query: SELECT u.key, u.country, c.name, c.key FROM users u JOIN countries c +ON (u.country = c.key) +PREHOOK: type: QUERY +PREHOOK: Input: default@countries +PREHOOK: Input: default@users +#### A masked pattern was here #### +POSTHOOK: query: SELECT u.key, u.country, c.name, c.key FROM users u JOIN countries c +ON (u.country = c.key) +POSTHOOK: type: QUERY +POSTHOOK: Input: default@countries +POSTHOOK: Input: default@users +#### A masked pattern was here #### +user1 USA United States USA +PREHOOK: query: SELECT u.key, u.country, c.name, c.key FROM users u JOIN countries c +ON (u.country = c.country) +PREHOOK: type: QUERY +PREHOOK: Input: default@countries +PREHOOK: Input: default@users +#### A masked pattern was here #### +POSTHOOK: query: SELECT u.key, u.country, c.name, c.key FROM users u JOIN countries c +ON (u.country = c.country) +POSTHOOK: type: QUERY +POSTHOOK: Input: default@countries +POSTHOOK: Input: default@users +#### A masked pattern was here #### +user1 USA United States USA +PREHOOK: query: SELECT u.key, u.country, c.name, c.key FROM users u JOIN countries c +ON (u.country_id = c.country_id) +PREHOOK: type: QUERY +PREHOOK: Input: default@countries +PREHOOK: Input: default@users +#### A masked pattern was here #### +POSTHOOK: query: SELECT u.key, u.country, c.name, c.key FROM users u JOIN countries c +ON (u.country_id = c.country_id) +POSTHOOK: type: QUERY +POSTHOOK: Input: default@countries +POSTHOOK: Input: default@users +#### A masked pattern was here #### +PREHOOK: query: SELECT u.key, u.state, s.name FROM users u JOIN states s +ON (u.state = s.key) +PREHOOK: type: QUERY +PREHOOK: Input: default@states +PREHOOK: Input: default@users +#### A masked pattern was here #### +POSTHOOK: query: SELECT u.key, u.state, s.name FROM users u JOIN states s +ON (u.state = s.key) +POSTHOOK: type: QUERY +POSTHOOK: Input: default@states +POSTHOOK: Input: default@users +#### A masked pattern was here #### +user1 IA Iowa +PREHOOK: query: SELECT u.key, u.country, c.name, c.key FROM users u JOIN countries c +ON (u.country = c.key) +PREHOOK: type: QUERY +PREHOOK: Input: default@countries +PREHOOK: Input: default@users +#### A masked pattern was here #### +POSTHOOK: query: SELECT u.key, u.country, c.name, c.key FROM users u JOIN countries c +ON (u.country = c.key) +POSTHOOK: type: QUERY +POSTHOOK: Input: default@countries +POSTHOOK: Input: default@users +#### A masked pattern was here #### +user1 USA United States USA +PREHOOK: query: SELECT u.key, u.country, c.name, c.key FROM users u JOIN countries c +ON (u.country = c.country) +PREHOOK: type: QUERY +PREHOOK: Input: default@countries +PREHOOK: Input: default@users +#### A masked pattern was here #### +POSTHOOK: query: SELECT u.key, u.country, c.name, c.key FROM users u JOIN countries c +ON (u.country = c.country) +POSTHOOK: type: QUERY +POSTHOOK: Input: default@countries +POSTHOOK: Input: default@users +#### A masked pattern was here #### +user1 USA United States USA +PREHOOK: query: SELECT u.key, u.country, c.name, c.key FROM users u JOIN countries c +ON (u.country_id = c.country_id) +PREHOOK: type: QUERY +PREHOOK: Input: default@countries +PREHOOK: Input: default@users +#### A masked pattern was here #### +POSTHOOK: query: SELECT u.key, u.country, c.name, c.key FROM users u JOIN countries c +ON (u.country_id = c.country_id) +POSTHOOK: type: QUERY +POSTHOOK: Input: default@countries +POSTHOOK: Input: default@users +#### A masked pattern was here #### +PREHOOK: query: SELECT u.key, u.state, s.name FROM users u JOIN states s +ON (u.state = s.key) +PREHOOK: type: QUERY +PREHOOK: Input: default@states +PREHOOK: Input: default@users +#### A masked pattern was here #### +POSTHOOK: query: SELECT u.key, u.state, s.name FROM users u JOIN states s +ON (u.state = s.key) +POSTHOOK: type: QUERY +POSTHOOK: Input: default@states +POSTHOOK: Input: default@users +#### A masked pattern was here #### +user1 IA Iowa +PREHOOK: query: DROP TABLE users +PREHOOK: type: DROPTABLE +PREHOOK: Input: default@users +PREHOOK: Output: default@users +POSTHOOK: query: DROP TABLE users +POSTHOOK: type: DROPTABLE +POSTHOOK: Input: default@users +POSTHOOK: Output: default@users +PREHOOK: query: DROP TABLE states +PREHOOK: type: DROPTABLE +PREHOOK: Input: default@states +PREHOOK: Output: default@states +POSTHOOK: query: DROP TABLE states +POSTHOOK: type: DROPTABLE +POSTHOOK: Input: default@states +POSTHOOK: Output: default@states +PREHOOK: query: DROP TABLE countries +PREHOOK: type: DROPTABLE +PREHOOK: Input: default@countries +PREHOOK: Output: default@countries +POSTHOOK: query: DROP TABLE countries +POSTHOOK: type: DROPTABLE +POSTHOOK: Input: default@countries +POSTHOOK: Output: default@countries +PREHOOK: query: CREATE TABLE users(key int, userid int, username string, created int) +STORED BY 'org.apache.hadoop.hive.accumulo.AccumuloStorageHandler' +WITH SERDEPROPERTIES ("accumulo.columns.mapping" = ":rowID,f:userid,f:nickname,f:created") +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +PREHOOK: Output: default@users +POSTHOOK: query: CREATE TABLE users(key int, userid int, username string, created int) +STORED BY 'org.apache.hadoop.hive.accumulo.AccumuloStorageHandler' +WITH SERDEPROPERTIES ("accumulo.columns.mapping" = ":rowID,f:userid,f:nickname,f:created") +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@users +PREHOOK: query: CREATE TABLE users_level(key int, userid int, level int) +STORED BY 'org.apache.hadoop.hive.accumulo.AccumuloStorageHandler' +WITH SERDEPROPERTIES ("accumulo.columns.mapping" = ":rowID,f:userid,f:level") +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +PREHOOK: Output: default@users_level +POSTHOOK: query: CREATE TABLE users_level(key int, userid int, level int) +STORED BY 'org.apache.hadoop.hive.accumulo.AccumuloStorageHandler' +WITH SERDEPROPERTIES ("accumulo.columns.mapping" = ":rowID,f:userid,f:level") +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@users_level +PREHOOK: query: -- HIVE-1903: the problem fixed here showed up even without any data, +-- so no need to load any to test it +SELECT year(from_unixtime(users.created)) AS year, level, count(users.userid) AS num + FROM users JOIN users_level ON (users.userid = users_level.userid) + GROUP BY year(from_unixtime(users.created)), level +PREHOOK: type: QUERY +PREHOOK: Input: default@users +PREHOOK: Input: default@users_level +#### A masked pattern was here #### +POSTHOOK: query: -- HIVE-1903: the problem fixed here showed up even without any data, +-- so no need to load any to test it +SELECT year(from_unixtime(users.created)) AS year, level, count(users.userid) AS num + FROM users JOIN users_level ON (users.userid = users_level.userid) + GROUP BY year(from_unixtime(users.created)), level +POSTHOOK: type: QUERY +POSTHOOK: Input: default@users +POSTHOOK: Input: default@users_level +#### A masked pattern was here #### +PREHOOK: query: DROP TABLE users +PREHOOK: type: DROPTABLE +PREHOOK: Input: default@users +PREHOOK: Output: default@users +POSTHOOK: query: DROP TABLE users +POSTHOOK: type: DROPTABLE +POSTHOOK: Input: default@users +POSTHOOK: Output: default@users +PREHOOK: query: DROP TABLE users_level +PREHOOK: type: DROPTABLE +PREHOOK: Input: default@users_level +PREHOOK: Output: default@users_level +POSTHOOK: query: DROP TABLE users_level +POSTHOOK: type: DROPTABLE +POSTHOOK: Input: default@users_level +POSTHOOK: Output: default@users_level diff --git a/accumulo-handler/src/test/results/positive/accumulo_predicate_pushdown.q.out b/accumulo-handler/src/test/results/positive/accumulo_predicate_pushdown.q.out new file mode 100644 index 0000000..309f2f7 --- /dev/null +++ b/accumulo-handler/src/test/results/positive/accumulo_predicate_pushdown.q.out @@ -0,0 +1,600 @@ +PREHOOK: query: CREATE TABLE accumulo_pushdown(key string, value string) +STORED BY 'org.apache.hadoop.hive.accumulo.AccumuloStorageHandler' +WITH SERDEPROPERTIES ("accumulo.columns.mapping" = ":rowid,cf:string") +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +PREHOOK: Output: default@accumulo_pushdown +POSTHOOK: query: CREATE TABLE accumulo_pushdown(key string, value string) +STORED BY 'org.apache.hadoop.hive.accumulo.AccumuloStorageHandler' +WITH SERDEPROPERTIES ("accumulo.columns.mapping" = ":rowid,cf:string") +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@accumulo_pushdown +PREHOOK: query: INSERT OVERWRITE TABLE accumulo_pushdown +SELECT cast(key as string), value +FROM src +PREHOOK: type: QUERY +PREHOOK: Input: default@src +PREHOOK: Output: default@accumulo_pushdown +POSTHOOK: query: INSERT OVERWRITE TABLE accumulo_pushdown +SELECT cast(key as string), value +FROM src +POSTHOOK: type: QUERY +POSTHOOK: Input: default@src +POSTHOOK: Output: default@accumulo_pushdown +PREHOOK: query: -- with full pushdown +explain select * from accumulo_pushdown where key>'90' +PREHOOK: type: QUERY +POSTHOOK: query: -- with full pushdown +explain select * from accumulo_pushdown where key>'90' +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-0 depends on stages: Stage-1 + +STAGE PLANS: + Stage: Stage-1 + Map Reduce + Map Operator Tree: + TableScan + alias: accumulo_pushdown + filterExpr: (key > '90') (type: boolean) + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + Filter Operator + predicate: (key > '90') (type: boolean) + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + Select Operator + expressions: key (type: string), value (type: string) + outputColumnNames: _col0, _col1 + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + File Output Operator + compressed: false + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + + Stage: Stage-0 + Fetch Operator + limit: -1 + Processor Tree: + ListSink + +PREHOOK: query: select * from accumulo_pushdown where key>'90' +PREHOOK: type: QUERY +PREHOOK: Input: default@accumulo_pushdown +#### A masked pattern was here #### +POSTHOOK: query: select * from accumulo_pushdown where key>'90' +POSTHOOK: type: QUERY +POSTHOOK: Input: default@accumulo_pushdown +#### A masked pattern was here #### +92 val_92 +95 val_95 +96 val_96 +97 val_97 +98 val_98 +PREHOOK: query: select * from accumulo_pushdown where key<'1' +PREHOOK: type: QUERY +PREHOOK: Input: default@accumulo_pushdown +#### A masked pattern was here #### +POSTHOOK: query: select * from accumulo_pushdown where key<'1' +POSTHOOK: type: QUERY +POSTHOOK: Input: default@accumulo_pushdown +#### A masked pattern was here #### +0 val_0 +PREHOOK: query: select * from accumulo_pushdown where key<='2' +PREHOOK: type: QUERY +PREHOOK: Input: default@accumulo_pushdown +#### A masked pattern was here #### +POSTHOOK: query: select * from accumulo_pushdown where key<='2' +POSTHOOK: type: QUERY +POSTHOOK: Input: default@accumulo_pushdown +#### A masked pattern was here #### +0 val_0 +10 val_10 +100 val_100 +103 val_103 +104 val_104 +105 val_105 +11 val_11 +111 val_111 +113 val_113 +114 val_114 +116 val_116 +118 val_118 +119 val_119 +12 val_12 +120 val_120 +125 val_125 +126 val_126 +128 val_128 +129 val_129 +131 val_131 +133 val_133 +134 val_134 +136 val_136 +137 val_137 +138 val_138 +143 val_143 +145 val_145 +146 val_146 +149 val_149 +15 val_15 +150 val_150 +152 val_152 +153 val_153 +155 val_155 +156 val_156 +157 val_157 +158 val_158 +160 val_160 +162 val_162 +163 val_163 +164 val_164 +165 val_165 +166 val_166 +167 val_167 +168 val_168 +169 val_169 +17 val_17 +170 val_170 +172 val_172 +174 val_174 +175 val_175 +176 val_176 +177 val_177 +178 val_178 +179 val_179 +18 val_18 +180 val_180 +181 val_181 +183 val_183 +186 val_186 +187 val_187 +189 val_189 +19 val_19 +190 val_190 +191 val_191 +192 val_192 +193 val_193 +194 val_194 +195 val_195 +196 val_196 +197 val_197 +199 val_199 +2 val_2 +PREHOOK: query: select * from accumulo_pushdown where key>='90' +PREHOOK: type: QUERY +PREHOOK: Input: default@accumulo_pushdown +#### A masked pattern was here #### +POSTHOOK: query: select * from accumulo_pushdown where key>='90' +POSTHOOK: type: QUERY +POSTHOOK: Input: default@accumulo_pushdown +#### A masked pattern was here #### +90 val_90 +92 val_92 +95 val_95 +96 val_96 +97 val_97 +98 val_98 +PREHOOK: query: -- with constant expression +explain select * from accumulo_pushdown where key>=cast(40 + 50 as string) +PREHOOK: type: QUERY +POSTHOOK: query: -- with constant expression +explain select * from accumulo_pushdown where key>=cast(40 + 50 as string) +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-0 depends on stages: Stage-1 + +STAGE PLANS: + Stage: Stage-1 + Map Reduce + Map Operator Tree: + TableScan + alias: accumulo_pushdown + filterExpr: (key >= '90') (type: boolean) + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + Filter Operator + predicate: (key >= '90') (type: boolean) + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + Select Operator + expressions: key (type: string), value (type: string) + outputColumnNames: _col0, _col1 + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + File Output Operator + compressed: false + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + + Stage: Stage-0 + Fetch Operator + limit: -1 + Processor Tree: + ListSink + +PREHOOK: query: select * from accumulo_pushdown where key>=cast(40 + 50 as string) +PREHOOK: type: QUERY +PREHOOK: Input: default@accumulo_pushdown +#### A masked pattern was here #### +POSTHOOK: query: select * from accumulo_pushdown where key>=cast(40 + 50 as string) +POSTHOOK: type: QUERY +POSTHOOK: Input: default@accumulo_pushdown +#### A masked pattern was here #### +90 val_90 +92 val_92 +95 val_95 +96 val_96 +97 val_97 +98 val_98 +PREHOOK: query: -- with partial pushdown + +explain select * from accumulo_pushdown where key>'90' and value like '%9%' +PREHOOK: type: QUERY +POSTHOOK: query: -- with partial pushdown + +explain select * from accumulo_pushdown where key>'90' and value like '%9%' +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-0 depends on stages: Stage-1 + +STAGE PLANS: + Stage: Stage-1 + Map Reduce + Map Operator Tree: + TableScan + alias: accumulo_pushdown + filterExpr: (key > '90') (type: boolean) + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + Filter Operator + predicate: (value like '%9%') (type: boolean) + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + Select Operator + expressions: key (type: string), value (type: string) + outputColumnNames: _col0, _col1 + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + File Output Operator + compressed: false + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + + Stage: Stage-0 + Fetch Operator + limit: -1 + Processor Tree: + ListSink + +PREHOOK: query: select * from accumulo_pushdown where key>'90' and value like '%9%' +PREHOOK: type: QUERY +PREHOOK: Input: default@accumulo_pushdown +#### A masked pattern was here #### +POSTHOOK: query: select * from accumulo_pushdown where key>'90' and value like '%9%' +POSTHOOK: type: QUERY +POSTHOOK: Input: default@accumulo_pushdown +#### A masked pattern was here #### +92 val_92 +95 val_95 +96 val_96 +97 val_97 +98 val_98 +PREHOOK: query: -- with two residuals + +explain select * from accumulo_pushdown +where key>='90' and value like '%9%' and key=cast(value as int) +PREHOOK: type: QUERY +POSTHOOK: query: -- with two residuals + +explain select * from accumulo_pushdown +where key>='90' and value like '%9%' and key=cast(value as int) +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-0 depends on stages: Stage-1 + +STAGE PLANS: + Stage: Stage-1 + Map Reduce + Map Operator Tree: + TableScan + alias: accumulo_pushdown + filterExpr: (key >= '90') (type: boolean) + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + Filter Operator + predicate: ((value like '%9%') and (key = UDFToInteger(value))) (type: boolean) + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + Select Operator + expressions: key (type: string), value (type: string) + outputColumnNames: _col0, _col1 + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + File Output Operator + compressed: false + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + + Stage: Stage-0 + Fetch Operator + limit: -1 + Processor Tree: + ListSink + +PREHOOK: query: select * from accumulo_pushdown +where key>='90' and value like '%9%' and key=cast(value as int) +PREHOOK: type: QUERY +PREHOOK: Input: default@accumulo_pushdown +#### A masked pattern was here #### +POSTHOOK: query: select * from accumulo_pushdown +where key>='90' and value like '%9%' and key=cast(value as int) +POSTHOOK: type: QUERY +POSTHOOK: Input: default@accumulo_pushdown +#### A masked pattern was here #### +PREHOOK: query: -- with contradictory pushdowns + +explain select * from accumulo_pushdown +where key<'80' and key>'90' and value like '%90%' +PREHOOK: type: QUERY +POSTHOOK: query: -- with contradictory pushdowns + +explain select * from accumulo_pushdown +where key<'80' and key>'90' and value like '%90%' +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-0 depends on stages: Stage-1 + +STAGE PLANS: + Stage: Stage-1 + Map Reduce + Map Operator Tree: + TableScan + alias: accumulo_pushdown + filterExpr: ((key < '80') and (key > '90')) (type: boolean) + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + Filter Operator + predicate: (value like '%90%') (type: boolean) + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + Select Operator + expressions: key (type: string), value (type: string) + outputColumnNames: _col0, _col1 + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + File Output Operator + compressed: false + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + + Stage: Stage-0 + Fetch Operator + limit: -1 + Processor Tree: + ListSink + +PREHOOK: query: select * from accumulo_pushdown +where key<'80' and key>'90' and value like '%90%' +PREHOOK: type: QUERY +PREHOOK: Input: default@accumulo_pushdown +#### A masked pattern was here #### +POSTHOOK: query: select * from accumulo_pushdown +where key<'80' and key>'90' and value like '%90%' +POSTHOOK: type: QUERY +POSTHOOK: Input: default@accumulo_pushdown +#### A masked pattern was here #### +PREHOOK: query: -- with nothing to push down + +explain select * from accumulo_pushdown +PREHOOK: type: QUERY +POSTHOOK: query: -- with nothing to push down + +explain select * from accumulo_pushdown +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-0 is a root stage + +STAGE PLANS: + Stage: Stage-0 + Fetch Operator + limit: -1 + Processor Tree: + TableScan + alias: accumulo_pushdown + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + Select Operator + expressions: key (type: string), value (type: string) + outputColumnNames: _col0, _col1 + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + ListSink + +PREHOOK: query: -- with a predicate which is not actually part of the filter, so +-- it should be ignored by pushdown + +explain select * from accumulo_pushdown +where (case when key<'90' then 2 else 4 end) > 3 +PREHOOK: type: QUERY +POSTHOOK: query: -- with a predicate which is not actually part of the filter, so +-- it should be ignored by pushdown + +explain select * from accumulo_pushdown +where (case when key<'90' then 2 else 4 end) > 3 +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-0 depends on stages: Stage-1 + +STAGE PLANS: + Stage: Stage-1 + Map Reduce + Map Operator Tree: + TableScan + alias: accumulo_pushdown + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + Filter Operator + predicate: (CASE WHEN ((key < '90')) THEN (2) ELSE (4) END > 3) (type: boolean) + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + Select Operator + expressions: key (type: string), value (type: string) + outputColumnNames: _col0, _col1 + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + File Output Operator + compressed: false + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + + Stage: Stage-0 + Fetch Operator + limit: -1 + Processor Tree: + ListSink + +PREHOOK: query: -- with a predicate which is under an OR, so it should +-- be ignored by pushdown + +explain select * from accumulo_pushdown +where key<='80' or value like '%90%' +PREHOOK: type: QUERY +POSTHOOK: query: -- with a predicate which is under an OR, so it should +-- be ignored by pushdown + +explain select * from accumulo_pushdown +where key<='80' or value like '%90%' +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-0 depends on stages: Stage-1 + +STAGE PLANS: + Stage: Stage-1 + Map Reduce + Map Operator Tree: + TableScan + alias: accumulo_pushdown + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + Filter Operator + predicate: ((key <= '80') or (value like '%90%')) (type: boolean) + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + Select Operator + expressions: key (type: string), value (type: string) + outputColumnNames: _col0, _col1 + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + File Output Operator + compressed: false + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + + Stage: Stage-0 + Fetch Operator + limit: -1 + Processor Tree: + ListSink + +PREHOOK: query: explain select * from accumulo_pushdown where key > '281' +and key < '287' +PREHOOK: type: QUERY +POSTHOOK: query: explain select * from accumulo_pushdown where key > '281' +and key < '287' +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-0 depends on stages: Stage-1 + +STAGE PLANS: + Stage: Stage-1 + Map Reduce + Map Operator Tree: + TableScan + alias: accumulo_pushdown + filterExpr: ((key > '281') and (key < '287')) (type: boolean) + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + Filter Operator + predicate: ((key > '281') and (key < '287')) (type: boolean) + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + Select Operator + expressions: key (type: string), value (type: string) + outputColumnNames: _col0, _col1 + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + File Output Operator + compressed: false + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + + Stage: Stage-0 + Fetch Operator + limit: -1 + Processor Tree: + ListSink + +PREHOOK: query: select * from accumulo_pushdown where key > '281' +and key < '287' +PREHOOK: type: QUERY +PREHOOK: Input: default@accumulo_pushdown +#### A masked pattern was here #### +POSTHOOK: query: select * from accumulo_pushdown where key > '281' +and key < '287' +POSTHOOK: type: QUERY +POSTHOOK: Input: default@accumulo_pushdown +#### A masked pattern was here #### +282 val_282 +283 val_283 +284 val_284 +285 val_285 +286 val_286 +PREHOOK: query: -- with pushdown disabled + +explain select * from accumulo_pushdown where key<='90' +PREHOOK: type: QUERY +POSTHOOK: query: -- with pushdown disabled + +explain select * from accumulo_pushdown where key<='90' +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-0 depends on stages: Stage-1 + +STAGE PLANS: + Stage: Stage-1 + Map Reduce + Map Operator Tree: + TableScan + alias: accumulo_pushdown + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + Filter Operator + predicate: (key <= '90') (type: boolean) + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + Select Operator + expressions: key (type: string), value (type: string) + outputColumnNames: _col0, _col1 + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + File Output Operator + compressed: false + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + + Stage: Stage-0 + Fetch Operator + limit: -1 + Processor Tree: + ListSink + diff --git a/accumulo-handler/src/test/results/positive/accumulo_queries.q.out b/accumulo-handler/src/test/results/positive/accumulo_queries.q.out new file mode 100644 index 0000000..ac8b093 --- /dev/null +++ b/accumulo-handler/src/test/results/positive/accumulo_queries.q.out @@ -0,0 +1,909 @@ +PREHOOK: query: DROP TABLE accumulo_table_1 +PREHOOK: type: DROPTABLE +POSTHOOK: query: DROP TABLE accumulo_table_1 +POSTHOOK: type: DROPTABLE +PREHOOK: query: CREATE TABLE accumulo_table_1(key int, value string) +STORED BY 'org.apache.hadoop.hive.accumulo.AccumuloStorageHandler' +WITH SERDEPROPERTIES ("accumulo.columns.mapping" = ":rowID,cf:string") +TBLPROPERTIES ("accumulo.table.name" = "accumulo_table_0") +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +PREHOOK: Output: default@accumulo_table_1 +POSTHOOK: query: CREATE TABLE accumulo_table_1(key int, value string) +STORED BY 'org.apache.hadoop.hive.accumulo.AccumuloStorageHandler' +WITH SERDEPROPERTIES ("accumulo.columns.mapping" = ":rowID,cf:string") +TBLPROPERTIES ("accumulo.table.name" = "accumulo_table_0") +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@accumulo_table_1 +PREHOOK: query: DESCRIBE EXTENDED accumulo_table_1 +PREHOOK: type: DESCTABLE +PREHOOK: Input: default@accumulo_table_1 +POSTHOOK: query: DESCRIBE EXTENDED accumulo_table_1 +POSTHOOK: type: DESCTABLE +POSTHOOK: Input: default@accumulo_table_1 +key int from deserializer +value string from deserializer + +#### A masked pattern was here #### +PREHOOK: query: select * from accumulo_table_1 +PREHOOK: type: QUERY +PREHOOK: Input: default@accumulo_table_1 +#### A masked pattern was here #### +POSTHOOK: query: select * from accumulo_table_1 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@accumulo_table_1 +#### A masked pattern was here #### +PREHOOK: query: EXPLAIN FROM src INSERT OVERWRITE TABLE accumulo_table_1 SELECT * WHERE (key%2)=0 +PREHOOK: type: QUERY +POSTHOOK: query: EXPLAIN FROM src INSERT OVERWRITE TABLE accumulo_table_1 SELECT * WHERE (key%2)=0 +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-0 is a root stage + +STAGE PLANS: + Stage: Stage-0 + Map Reduce + Map Operator Tree: + TableScan + alias: src + Statistics: Num rows: 29 Data size: 5812 Basic stats: COMPLETE Column stats: NONE + Filter Operator + predicate: ((key % 2) = 0) (type: boolean) + Statistics: Num rows: 14 Data size: 2805 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: UDFToInteger(key) (type: int), value (type: string) + outputColumnNames: _col0, _col1 + Statistics: Num rows: 14 Data size: 2805 Basic stats: COMPLETE Column stats: NONE + File Output Operator + compressed: false + Statistics: Num rows: 14 Data size: 2805 Basic stats: COMPLETE Column stats: NONE + table: + input format: org.apache.hadoop.hive.accumulo.mr.HiveAccumuloTableInputFormat + output format: org.apache.hadoop.hive.accumulo.mr.HiveAccumuloTableOutputFormat + serde: org.apache.hadoop.hive.accumulo.serde.AccumuloSerDe + name: default.accumulo_table_1 + +PREHOOK: query: FROM src INSERT OVERWRITE TABLE accumulo_table_1 SELECT * WHERE (key%2)=0 +PREHOOK: type: QUERY +PREHOOK: Input: default@src +PREHOOK: Output: default@accumulo_table_1 +POSTHOOK: query: FROM src INSERT OVERWRITE TABLE accumulo_table_1 SELECT * WHERE (key%2)=0 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@src +POSTHOOK: Output: default@accumulo_table_1 +PREHOOK: query: DROP TABLE accumulo_table_2 +PREHOOK: type: DROPTABLE +POSTHOOK: query: DROP TABLE accumulo_table_2 +POSTHOOK: type: DROPTABLE +PREHOOK: query: CREATE EXTERNAL TABLE accumulo_table_2(key int, value string) +STORED BY 'org.apache.hadoop.hive.accumulo.AccumuloStorageHandler' +WITH SERDEPROPERTIES ("accumulo.columns.mapping" = ":rowID,cf:string") +TBLPROPERTIES ("accumulo.table.name" = "accumulo_table_0") +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +PREHOOK: Output: default@accumulo_table_2 +POSTHOOK: query: CREATE EXTERNAL TABLE accumulo_table_2(key int, value string) +STORED BY 'org.apache.hadoop.hive.accumulo.AccumuloStorageHandler' +WITH SERDEPROPERTIES ("accumulo.columns.mapping" = ":rowID,cf:string") +TBLPROPERTIES ("accumulo.table.name" = "accumulo_table_0") +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@accumulo_table_2 +PREHOOK: query: EXPLAIN +SELECT Y.* +FROM +(SELECT accumulo_table_1.* FROM accumulo_table_1) x +JOIN +(SELECT src.* FROM src) Y +ON (x.key = Y.key) +ORDER BY key, value LIMIT 20 +PREHOOK: type: QUERY +POSTHOOK: query: EXPLAIN +SELECT Y.* +FROM +(SELECT accumulo_table_1.* FROM accumulo_table_1) x +JOIN +(SELECT src.* FROM src) Y +ON (x.key = Y.key) +ORDER BY key, value LIMIT 20 +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-2 depends on stages: Stage-1 + Stage-0 depends on stages: Stage-2 + +STAGE PLANS: + Stage: Stage-1 + Map Reduce + Map Operator Tree: + TableScan + alias: accumulo_table_1 + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + Filter Operator + predicate: UDFToDouble(key) is not null (type: boolean) + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + Select Operator + expressions: key (type: int) + outputColumnNames: _col0 + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + Reduce Output Operator + key expressions: UDFToDouble(_col0) (type: double) + sort order: + + Map-reduce partition columns: UDFToDouble(_col0) (type: double) + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + TableScan + alias: src + Statistics: Num rows: 29 Data size: 5812 Basic stats: COMPLETE Column stats: NONE + Filter Operator + predicate: UDFToDouble(key) is not null (type: boolean) + Statistics: Num rows: 15 Data size: 3006 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: key (type: string), value (type: string) + outputColumnNames: _col0, _col1 + Statistics: Num rows: 15 Data size: 3006 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + key expressions: UDFToDouble(_col0) (type: double) + sort order: + + Map-reduce partition columns: UDFToDouble(_col0) (type: double) + Statistics: Num rows: 15 Data size: 3006 Basic stats: COMPLETE Column stats: NONE + value expressions: _col0 (type: string), _col1 (type: string) + Reduce Operator Tree: + Join Operator + condition map: + Inner Join 0 to 1 + condition expressions: + 0 + 1 {VALUE._col0} {VALUE._col1} + outputColumnNames: _col2, _col3 + Statistics: Num rows: 16 Data size: 3306 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: _col2 (type: string), _col3 (type: string) + outputColumnNames: _col0, _col1 + Statistics: Num rows: 16 Data size: 3306 Basic stats: COMPLETE Column stats: NONE + File Output Operator + compressed: false + table: + input format: org.apache.hadoop.mapred.SequenceFileInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat + serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe + + Stage: Stage-2 + Map Reduce + Map Operator Tree: + TableScan + Reduce Output Operator + key expressions: _col0 (type: string), _col1 (type: string) + sort order: ++ + Statistics: Num rows: 16 Data size: 3306 Basic stats: COMPLETE Column stats: NONE + Reduce Operator Tree: + Select Operator + expressions: KEY.reducesinkkey0 (type: string), KEY.reducesinkkey1 (type: string) + outputColumnNames: _col0, _col1 + Statistics: Num rows: 16 Data size: 3306 Basic stats: COMPLETE Column stats: NONE + Limit + Number of rows: 20 + Statistics: Num rows: 16 Data size: 3306 Basic stats: COMPLETE Column stats: NONE + File Output Operator + compressed: false + Statistics: Num rows: 16 Data size: 3306 Basic stats: COMPLETE Column stats: NONE + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + + Stage: Stage-0 + Fetch Operator + limit: 20 + Processor Tree: + ListSink + +PREHOOK: query: SELECT Y.* +FROM +(SELECT accumulo_table_1.* FROM accumulo_table_1) x +JOIN +(SELECT src.* FROM src) Y +ON (x.key = Y.key) +ORDER BY key, value LIMIT 20 +PREHOOK: type: QUERY +PREHOOK: Input: default@accumulo_table_1 +PREHOOK: Input: default@src +#### A masked pattern was here #### +POSTHOOK: query: SELECT Y.* +FROM +(SELECT accumulo_table_1.* FROM accumulo_table_1) x +JOIN +(SELECT src.* FROM src) Y +ON (x.key = Y.key) +ORDER BY key, value LIMIT 20 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@accumulo_table_1 +POSTHOOK: Input: default@src +#### A masked pattern was here #### +0 val_0 +0 val_0 +0 val_0 +10 val_10 +100 val_100 +100 val_100 +104 val_104 +104 val_104 +114 val_114 +116 val_116 +118 val_118 +118 val_118 +12 val_12 +12 val_12 +120 val_120 +120 val_120 +126 val_126 +128 val_128 +128 val_128 +128 val_128 +PREHOOK: query: EXPLAIN +SELECT Y.* +FROM +(SELECT accumulo_table_1.* FROM accumulo_table_1 WHERE 100 < accumulo_table_1.key) x +JOIN +(SELECT accumulo_table_2.* FROM accumulo_table_2 WHERE accumulo_table_2.key < 120) Y +ON (x.key = Y.key) +ORDER BY key, value +PREHOOK: type: QUERY +POSTHOOK: query: EXPLAIN +SELECT Y.* +FROM +(SELECT accumulo_table_1.* FROM accumulo_table_1 WHERE 100 < accumulo_table_1.key) x +JOIN +(SELECT accumulo_table_2.* FROM accumulo_table_2 WHERE accumulo_table_2.key < 120) Y +ON (x.key = Y.key) +ORDER BY key, value +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-2 depends on stages: Stage-1 + Stage-0 depends on stages: Stage-2 + +STAGE PLANS: + Stage: Stage-1 + Map Reduce + Map Operator Tree: + TableScan + alias: accumulo_table_1 + filterExpr: (100 < key) (type: boolean) + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + Filter Operator + predicate: key is not null (type: boolean) + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + Select Operator + expressions: key (type: int) + outputColumnNames: _col0 + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + Reduce Output Operator + key expressions: _col0 (type: int) + sort order: + + Map-reduce partition columns: _col0 (type: int) + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + TableScan + alias: accumulo_table_2 + filterExpr: (key < 120) (type: boolean) + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + Filter Operator + predicate: key is not null (type: boolean) + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + Select Operator + expressions: key (type: int), value (type: string) + outputColumnNames: _col0, _col1 + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + Reduce Output Operator + key expressions: _col0 (type: int) + sort order: + + Map-reduce partition columns: _col0 (type: int) + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + value expressions: _col1 (type: string) + Reduce Operator Tree: + Join Operator + condition map: + Inner Join 0 to 1 + condition expressions: + 0 + 1 {KEY.reducesinkkey0} {VALUE._col0} + outputColumnNames: _col2, _col3 + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + Select Operator + expressions: _col2 (type: int), _col3 (type: string) + outputColumnNames: _col0, _col1 + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + File Output Operator + compressed: false + table: + input format: org.apache.hadoop.mapred.SequenceFileInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat + serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe + + Stage: Stage-2 + Map Reduce + Map Operator Tree: + TableScan + Reduce Output Operator + key expressions: _col0 (type: int), _col1 (type: string) + sort order: ++ + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + Reduce Operator Tree: + Select Operator + expressions: KEY.reducesinkkey0 (type: int), KEY.reducesinkkey1 (type: string) + outputColumnNames: _col0, _col1 + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + File Output Operator + compressed: false + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + + Stage: Stage-0 + Fetch Operator + limit: -1 + Processor Tree: + ListSink + +PREHOOK: query: SELECT Y.* +FROM +(SELECT accumulo_table_1.* FROM accumulo_table_1 WHERE 100 < accumulo_table_1.key) x +JOIN +(SELECT accumulo_table_2.* FROM accumulo_table_2 WHERE accumulo_table_2.key < 120) Y +ON (x.key = Y.key) +ORDER BY key,value +PREHOOK: type: QUERY +PREHOOK: Input: default@accumulo_table_1 +PREHOOK: Input: default@accumulo_table_2 +#### A masked pattern was here #### +POSTHOOK: query: SELECT Y.* +FROM +(SELECT accumulo_table_1.* FROM accumulo_table_1 WHERE 100 < accumulo_table_1.key) x +JOIN +(SELECT accumulo_table_2.* FROM accumulo_table_2 WHERE accumulo_table_2.key < 120) Y +ON (x.key = Y.key) +ORDER BY key,value +POSTHOOK: type: QUERY +POSTHOOK: Input: default@accumulo_table_1 +POSTHOOK: Input: default@accumulo_table_2 +#### A masked pattern was here #### +12 val_12 +104 val_104 +114 val_114 +116 val_116 +118 val_118 +PREHOOK: query: DROP TABLE empty_accumulo_table +PREHOOK: type: DROPTABLE +POSTHOOK: query: DROP TABLE empty_accumulo_table +POSTHOOK: type: DROPTABLE +PREHOOK: query: CREATE TABLE empty_accumulo_table(key int, value string) +STORED BY 'org.apache.hadoop.hive.accumulo.AccumuloStorageHandler' +WITH SERDEPROPERTIES ("accumulo.columns.mapping" = ":rowID,cf:string") +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +PREHOOK: Output: default@empty_accumulo_table +POSTHOOK: query: CREATE TABLE empty_accumulo_table(key int, value string) +STORED BY 'org.apache.hadoop.hive.accumulo.AccumuloStorageHandler' +WITH SERDEPROPERTIES ("accumulo.columns.mapping" = ":rowID,cf:string") +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@empty_accumulo_table +PREHOOK: query: DROP TABLE empty_normal_table +PREHOOK: type: DROPTABLE +POSTHOOK: query: DROP TABLE empty_normal_table +POSTHOOK: type: DROPTABLE +PREHOOK: query: CREATE TABLE empty_normal_table(key int, value string) +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +PREHOOK: Output: default@empty_normal_table +POSTHOOK: query: CREATE TABLE empty_normal_table(key int, value string) +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@empty_normal_table +PREHOOK: query: select * from (select count(1) as c from empty_normal_table union all select count(1) as c from empty_accumulo_table) x order by c +PREHOOK: type: QUERY +PREHOOK: Input: default@empty_accumulo_table +PREHOOK: Input: default@empty_normal_table +#### A masked pattern was here #### +POSTHOOK: query: select * from (select count(1) as c from empty_normal_table union all select count(1) as c from empty_accumulo_table) x order by c +POSTHOOK: type: QUERY +POSTHOOK: Input: default@empty_accumulo_table +POSTHOOK: Input: default@empty_normal_table +#### A masked pattern was here #### +0 +0 +PREHOOK: query: select * from (select count(1) c from empty_normal_table union all select count(1) as c from accumulo_table_1) x order by c +PREHOOK: type: QUERY +PREHOOK: Input: default@accumulo_table_1 +PREHOOK: Input: default@empty_normal_table +#### A masked pattern was here #### +POSTHOOK: query: select * from (select count(1) c from empty_normal_table union all select count(1) as c from accumulo_table_1) x order by c +POSTHOOK: type: QUERY +POSTHOOK: Input: default@accumulo_table_1 +POSTHOOK: Input: default@empty_normal_table +#### A masked pattern was here #### +0 +155 +PREHOOK: query: select * from (select count(1) c from src union all select count(1) as c from empty_accumulo_table) x order by c +PREHOOK: type: QUERY +PREHOOK: Input: default@empty_accumulo_table +PREHOOK: Input: default@src +#### A masked pattern was here #### +POSTHOOK: query: select * from (select count(1) c from src union all select count(1) as c from empty_accumulo_table) x order by c +POSTHOOK: type: QUERY +POSTHOOK: Input: default@empty_accumulo_table +POSTHOOK: Input: default@src +#### A masked pattern was here #### +0 +500 +PREHOOK: query: select * from (select count(1) c from src union all select count(1) as c from accumulo_table_1) x order by c +PREHOOK: type: QUERY +PREHOOK: Input: default@accumulo_table_1 +PREHOOK: Input: default@src +#### A masked pattern was here #### +POSTHOOK: query: select * from (select count(1) c from src union all select count(1) as c from accumulo_table_1) x order by c +POSTHOOK: type: QUERY +POSTHOOK: Input: default@accumulo_table_1 +POSTHOOK: Input: default@src +#### A masked pattern was here #### +155 +500 +PREHOOK: query: CREATE TABLE accumulo_table_3(key int, value string, count int) +STORED BY 'org.apache.hadoop.hive.accumulo.AccumuloStorageHandler' +WITH SERDEPROPERTIES ( +"accumulo.columns.mapping" = ":rowID,cf:val,cf2:count" +) +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +PREHOOK: Output: default@accumulo_table_3 +POSTHOOK: query: CREATE TABLE accumulo_table_3(key int, value string, count int) +STORED BY 'org.apache.hadoop.hive.accumulo.AccumuloStorageHandler' +WITH SERDEPROPERTIES ( +"accumulo.columns.mapping" = ":rowID,cf:val,cf2:count" +) +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@accumulo_table_3 +PREHOOK: query: EXPLAIN +INSERT OVERWRITE TABLE accumulo_table_3 +SELECT x.key, x.value, Y.count +FROM +(SELECT accumulo_table_1.* FROM accumulo_table_1) x +JOIN +(SELECT src.key, count(src.key) as count FROM src GROUP BY src.key) Y +ON (x.key = Y.key) +PREHOOK: type: QUERY +POSTHOOK: query: EXPLAIN +INSERT OVERWRITE TABLE accumulo_table_3 +SELECT x.key, x.value, Y.count +FROM +(SELECT accumulo_table_1.* FROM accumulo_table_1) x +JOIN +(SELECT src.key, count(src.key) as count FROM src GROUP BY src.key) Y +ON (x.key = Y.key) +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-0 depends on stages: Stage-1 + +STAGE PLANS: + Stage: Stage-1 + Map Reduce + Map Operator Tree: + TableScan + alias: src + Statistics: Num rows: 58 Data size: 5812 Basic stats: COMPLETE Column stats: NONE + Filter Operator + predicate: UDFToDouble(key) is not null (type: boolean) + Statistics: Num rows: 29 Data size: 2906 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: key (type: string) + outputColumnNames: key + Statistics: Num rows: 29 Data size: 2906 Basic stats: COMPLETE Column stats: NONE + Group By Operator + aggregations: count(key) + keys: key (type: string) + mode: hash + outputColumnNames: _col0, _col1 + Statistics: Num rows: 29 Data size: 2906 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + key expressions: _col0 (type: string) + sort order: + + Map-reduce partition columns: _col0 (type: string) + Statistics: Num rows: 29 Data size: 2906 Basic stats: COMPLETE Column stats: NONE + value expressions: _col1 (type: bigint) + Reduce Operator Tree: + Group By Operator + aggregations: count(VALUE._col0) + keys: KEY._col0 (type: string) + mode: mergepartial + outputColumnNames: _col0, _col1 + Statistics: Num rows: 14 Data size: 1402 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: _col0 (type: string), _col1 (type: bigint) + outputColumnNames: _col0, _col1 + Statistics: Num rows: 14 Data size: 1402 Basic stats: COMPLETE Column stats: NONE + File Output Operator + compressed: false + table: + input format: org.apache.hadoop.mapred.SequenceFileInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat + serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe + + Stage: Stage-0 + Map Reduce + Map Operator Tree: + TableScan + alias: accumulo_table_1 + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + Filter Operator + predicate: UDFToDouble(key) is not null (type: boolean) + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + Select Operator + expressions: key (type: int), value (type: string) + outputColumnNames: _col0, _col1 + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + Reduce Output Operator + key expressions: UDFToDouble(_col0) (type: double) + sort order: + + Map-reduce partition columns: UDFToDouble(_col0) (type: double) + Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE + value expressions: _col0 (type: int), _col1 (type: string) + TableScan + Reduce Output Operator + key expressions: UDFToDouble(_col0) (type: double) + sort order: + + Map-reduce partition columns: UDFToDouble(_col0) (type: double) + Statistics: Num rows: 14 Data size: 1402 Basic stats: COMPLETE Column stats: NONE + value expressions: _col1 (type: bigint) + Reduce Operator Tree: + Join Operator + condition map: + Inner Join 0 to 1 + condition expressions: + 0 {VALUE._col0} {VALUE._col1} + 1 {VALUE._col1} + outputColumnNames: _col0, _col1, _col3 + Statistics: Num rows: 15 Data size: 1542 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: _col0 (type: int), _col1 (type: string), UDFToInteger(_col3) (type: int) + outputColumnNames: _col0, _col1, _col2 + Statistics: Num rows: 15 Data size: 1542 Basic stats: COMPLETE Column stats: NONE + File Output Operator + compressed: false + Statistics: Num rows: 15 Data size: 1542 Basic stats: COMPLETE Column stats: NONE + table: + input format: org.apache.hadoop.hive.accumulo.mr.HiveAccumuloTableInputFormat + output format: org.apache.hadoop.hive.accumulo.mr.HiveAccumuloTableOutputFormat + serde: org.apache.hadoop.hive.accumulo.serde.AccumuloSerDe + name: default.accumulo_table_3 + +PREHOOK: query: INSERT OVERWRITE TABLE accumulo_table_3 +SELECT x.key, x.value, Y.count +FROM +(SELECT accumulo_table_1.* FROM accumulo_table_1) x +JOIN +(SELECT src.key, count(src.key) as count FROM src GROUP BY src.key) Y +ON (x.key = Y.key) +PREHOOK: type: QUERY +PREHOOK: Input: default@accumulo_table_1 +PREHOOK: Input: default@src +PREHOOK: Output: default@accumulo_table_3 +POSTHOOK: query: INSERT OVERWRITE TABLE accumulo_table_3 +SELECT x.key, x.value, Y.count +FROM +(SELECT accumulo_table_1.* FROM accumulo_table_1) x +JOIN +(SELECT src.key, count(src.key) as count FROM src GROUP BY src.key) Y +ON (x.key = Y.key) +POSTHOOK: type: QUERY +POSTHOOK: Input: default@accumulo_table_1 +POSTHOOK: Input: default@src +POSTHOOK: Output: default@accumulo_table_3 +PREHOOK: query: select count(1) from accumulo_table_3 +PREHOOK: type: QUERY +PREHOOK: Input: default@accumulo_table_3 +#### A masked pattern was here #### +POSTHOOK: query: select count(1) from accumulo_table_3 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@accumulo_table_3 +#### A masked pattern was here #### +155 +PREHOOK: query: select * from accumulo_table_3 order by key, value limit 5 +PREHOOK: type: QUERY +PREHOOK: Input: default@accumulo_table_3 +#### A masked pattern was here #### +POSTHOOK: query: select * from accumulo_table_3 order by key, value limit 5 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@accumulo_table_3 +#### A masked pattern was here #### +0 val_0 3 +2 val_2 1 +4 val_4 1 +8 val_8 1 +10 val_10 1 +PREHOOK: query: select key, count from accumulo_table_3 order by key, count desc limit 5 +PREHOOK: type: QUERY +PREHOOK: Input: default@accumulo_table_3 +#### A masked pattern was here #### +POSTHOOK: query: select key, count from accumulo_table_3 order by key, count desc limit 5 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@accumulo_table_3 +#### A masked pattern was here #### +0 3 +2 1 +4 1 +8 1 +10 1 +PREHOOK: query: DROP TABLE accumulo_table_4 +PREHOOK: type: DROPTABLE +POSTHOOK: query: DROP TABLE accumulo_table_4 +POSTHOOK: type: DROPTABLE +PREHOOK: query: CREATE TABLE accumulo_table_4(key int, value1 string, value2 int, value3 int) +STORED BY 'org.apache.hadoop.hive.accumulo.AccumuloStorageHandler' +WITH SERDEPROPERTIES ( +"accumulo.columns.mapping" = ":rowID,a:b,a:c,d:e" +) +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +PREHOOK: Output: default@accumulo_table_4 +POSTHOOK: query: CREATE TABLE accumulo_table_4(key int, value1 string, value2 int, value3 int) +STORED BY 'org.apache.hadoop.hive.accumulo.AccumuloStorageHandler' +WITH SERDEPROPERTIES ( +"accumulo.columns.mapping" = ":rowID,a:b,a:c,d:e" +) +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@accumulo_table_4 +PREHOOK: query: INSERT OVERWRITE TABLE accumulo_table_4 SELECT key, value, key+1, key+2 +FROM src WHERE key=98 OR key=100 +PREHOOK: type: QUERY +PREHOOK: Input: default@src +PREHOOK: Output: default@accumulo_table_4 +POSTHOOK: query: INSERT OVERWRITE TABLE accumulo_table_4 SELECT key, value, key+1, key+2 +FROM src WHERE key=98 OR key=100 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@src +POSTHOOK: Output: default@accumulo_table_4 +PREHOOK: query: SELECT * FROM accumulo_table_4 ORDER BY key +PREHOOK: type: QUERY +PREHOOK: Input: default@accumulo_table_4 +#### A masked pattern was here #### +POSTHOOK: query: SELECT * FROM accumulo_table_4 ORDER BY key +POSTHOOK: type: QUERY +POSTHOOK: Input: default@accumulo_table_4 +#### A masked pattern was here #### +98 val_98 99 100 +100 val_100 101 102 +PREHOOK: query: DROP TABLE accumulo_table_5 +PREHOOK: type: DROPTABLE +POSTHOOK: query: DROP TABLE accumulo_table_5 +POSTHOOK: type: DROPTABLE +PREHOOK: query: CREATE EXTERNAL TABLE accumulo_table_5(key int, value map) +STORED BY 'org.apache.hadoop.hive.accumulo.AccumuloStorageHandler' +WITH SERDEPROPERTIES ("accumulo.columns.mapping" = ":rowID,a:*") +TBLPROPERTIES ("accumulo.table.name" = "accumulo_table_4") +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +PREHOOK: Output: default@accumulo_table_5 +POSTHOOK: query: CREATE EXTERNAL TABLE accumulo_table_5(key int, value map) +STORED BY 'org.apache.hadoop.hive.accumulo.AccumuloStorageHandler' +WITH SERDEPROPERTIES ("accumulo.columns.mapping" = ":rowID,a:*") +TBLPROPERTIES ("accumulo.table.name" = "accumulo_table_4") +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@accumulo_table_5 +PREHOOK: query: SELECT * FROM accumulo_table_5 ORDER BY key +PREHOOK: type: QUERY +PREHOOK: Input: default@accumulo_table_5 +#### A masked pattern was here #### +POSTHOOK: query: SELECT * FROM accumulo_table_5 ORDER BY key +POSTHOOK: type: QUERY +POSTHOOK: Input: default@accumulo_table_5 +#### A masked pattern was here #### +98 {"b":"val_98","c":"99"} +100 {"b":"val_100","c":"101"} +PREHOOK: query: DROP TABLE accumulo_table_6 +PREHOOK: type: DROPTABLE +POSTHOOK: query: DROP TABLE accumulo_table_6 +POSTHOOK: type: DROPTABLE +PREHOOK: query: CREATE TABLE accumulo_table_6(key int, value map) +STORED BY 'org.apache.hadoop.hive.accumulo.AccumuloStorageHandler' +WITH SERDEPROPERTIES ( +"accumulo.columns.mapping" = ":rowID,cf:*" +) +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +PREHOOK: Output: default@accumulo_table_6 +POSTHOOK: query: CREATE TABLE accumulo_table_6(key int, value map) +STORED BY 'org.apache.hadoop.hive.accumulo.AccumuloStorageHandler' +WITH SERDEPROPERTIES ( +"accumulo.columns.mapping" = ":rowID,cf:*" +) +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@accumulo_table_6 +PREHOOK: query: INSERT OVERWRITE TABLE accumulo_table_6 SELECT key, map(value, key) FROM src +WHERE key=98 OR key=100 +PREHOOK: type: QUERY +PREHOOK: Input: default@src +PREHOOK: Output: default@accumulo_table_6 +POSTHOOK: query: INSERT OVERWRITE TABLE accumulo_table_6 SELECT key, map(value, key) FROM src +WHERE key=98 OR key=100 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@src +POSTHOOK: Output: default@accumulo_table_6 +PREHOOK: query: SELECT * FROM accumulo_table_6 ORDER BY key +PREHOOK: type: QUERY +PREHOOK: Input: default@accumulo_table_6 +#### A masked pattern was here #### +POSTHOOK: query: SELECT * FROM accumulo_table_6 ORDER BY key +POSTHOOK: type: QUERY +POSTHOOK: Input: default@accumulo_table_6 +#### A masked pattern was here #### +98 {"val_98":"98"} +100 {"val_100":"100"} +PREHOOK: query: DROP TABLE accumulo_table_7 +PREHOOK: type: DROPTABLE +POSTHOOK: query: DROP TABLE accumulo_table_7 +POSTHOOK: type: DROPTABLE +PREHOOK: query: CREATE TABLE accumulo_table_7(value map, key int) +STORED BY 'org.apache.hadoop.hive.accumulo.AccumuloStorageHandler' +WITH SERDEPROPERTIES ( +"accumulo.columns.mapping" = "cf:*,:rowID" +) +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +PREHOOK: Output: default@accumulo_table_7 +POSTHOOK: query: CREATE TABLE accumulo_table_7(value map, key int) +STORED BY 'org.apache.hadoop.hive.accumulo.AccumuloStorageHandler' +WITH SERDEPROPERTIES ( +"accumulo.columns.mapping" = "cf:*,:rowID" +) +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@accumulo_table_7 +PREHOOK: query: INSERT OVERWRITE TABLE accumulo_table_7 +SELECT map(value, key, upper(value), key+1), key FROM src +WHERE key=98 OR key=100 +PREHOOK: type: QUERY +PREHOOK: Input: default@src +PREHOOK: Output: default@accumulo_table_7 +POSTHOOK: query: INSERT OVERWRITE TABLE accumulo_table_7 +SELECT map(value, key, upper(value), key+1), key FROM src +WHERE key=98 OR key=100 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@src +POSTHOOK: Output: default@accumulo_table_7 +PREHOOK: query: SELECT * FROM accumulo_table_7 ORDER BY key +PREHOOK: type: QUERY +PREHOOK: Input: default@accumulo_table_7 +#### A masked pattern was here #### +POSTHOOK: query: SELECT * FROM accumulo_table_7 ORDER BY key +POSTHOOK: type: QUERY +POSTHOOK: Input: default@accumulo_table_7 +#### A masked pattern was here #### +{"VAL_98":"99.0","val_98":"98"} 98 +{"VAL_100":"101.0","val_100":"100"} 100 +PREHOOK: query: DROP TABLE accumulo_table_8 +PREHOOK: type: DROPTABLE +POSTHOOK: query: DROP TABLE accumulo_table_8 +POSTHOOK: type: DROPTABLE +PREHOOK: query: CREATE TABLE accumulo_table_8(key int, value1 string, value2 int, value3 int) +STORED BY 'org.apache.hadoop.hive.accumulo.AccumuloStorageHandler' +WITH SERDEPROPERTIES ( +"accumulo.columns.mapping" = ":rowID,a:b,a:c,d:e" +) +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +PREHOOK: Output: default@accumulo_table_8 +POSTHOOK: query: CREATE TABLE accumulo_table_8(key int, value1 string, value2 int, value3 int) +STORED BY 'org.apache.hadoop.hive.accumulo.AccumuloStorageHandler' +WITH SERDEPROPERTIES ( +"accumulo.columns.mapping" = ":rowID,a:b,a:c,d:e" +) +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@accumulo_table_8 +PREHOOK: query: INSERT OVERWRITE TABLE accumulo_table_8 SELECT key, value, key+1, key+2 +FROM src WHERE key=98 OR key=100 +PREHOOK: type: QUERY +PREHOOK: Input: default@src +PREHOOK: Output: default@accumulo_table_8 +POSTHOOK: query: INSERT OVERWRITE TABLE accumulo_table_8 SELECT key, value, key+1, key+2 +FROM src WHERE key=98 OR key=100 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@src +POSTHOOK: Output: default@accumulo_table_8 +PREHOOK: query: SELECT * FROM accumulo_table_8 ORDER BY key +PREHOOK: type: QUERY +PREHOOK: Input: default@accumulo_table_8 +#### A masked pattern was here #### +POSTHOOK: query: SELECT * FROM accumulo_table_8 ORDER BY key +POSTHOOK: type: QUERY +POSTHOOK: Input: default@accumulo_table_8 +#### A masked pattern was here #### +98 val_98 99 100 +100 val_100 101 102 +PREHOOK: query: DROP TABLE accumulo_table_1 +PREHOOK: type: DROPTABLE +PREHOOK: Input: default@accumulo_table_1 +PREHOOK: Output: default@accumulo_table_1 +POSTHOOK: query: DROP TABLE accumulo_table_1 +POSTHOOK: type: DROPTABLE +POSTHOOK: Input: default@accumulo_table_1 +POSTHOOK: Output: default@accumulo_table_1 +PREHOOK: query: DROP TABLE accumulo_table_2 +PREHOOK: type: DROPTABLE +PREHOOK: Input: default@accumulo_table_2 +PREHOOK: Output: default@accumulo_table_2 +POSTHOOK: query: DROP TABLE accumulo_table_2 +POSTHOOK: type: DROPTABLE +POSTHOOK: Input: default@accumulo_table_2 +POSTHOOK: Output: default@accumulo_table_2 +PREHOOK: query: DROP TABLE accumulo_table_3 +PREHOOK: type: DROPTABLE +PREHOOK: Input: default@accumulo_table_3 +PREHOOK: Output: default@accumulo_table_3 +POSTHOOK: query: DROP TABLE accumulo_table_3 +POSTHOOK: type: DROPTABLE +POSTHOOK: Input: default@accumulo_table_3 +POSTHOOK: Output: default@accumulo_table_3 +PREHOOK: query: DROP TABLE accumulo_table_4 +PREHOOK: type: DROPTABLE +PREHOOK: Input: default@accumulo_table_4 +PREHOOK: Output: default@accumulo_table_4 +POSTHOOK: query: DROP TABLE accumulo_table_4 +POSTHOOK: type: DROPTABLE +POSTHOOK: Input: default@accumulo_table_4 +POSTHOOK: Output: default@accumulo_table_4 +PREHOOK: query: DROP TABLE accumulo_table_5 +PREHOOK: type: DROPTABLE +PREHOOK: Input: default@accumulo_table_5 +PREHOOK: Output: default@accumulo_table_5 +POSTHOOK: query: DROP TABLE accumulo_table_5 +POSTHOOK: type: DROPTABLE +POSTHOOK: Input: default@accumulo_table_5 +POSTHOOK: Output: default@accumulo_table_5 +PREHOOK: query: DROP TABLE accumulo_table_6 +PREHOOK: type: DROPTABLE +PREHOOK: Input: default@accumulo_table_6 +PREHOOK: Output: default@accumulo_table_6 +POSTHOOK: query: DROP TABLE accumulo_table_6 +POSTHOOK: type: DROPTABLE +POSTHOOK: Input: default@accumulo_table_6 +POSTHOOK: Output: default@accumulo_table_6 +PREHOOK: query: DROP TABLE accumulo_table_7 +PREHOOK: type: DROPTABLE +PREHOOK: Input: default@accumulo_table_7 +PREHOOK: Output: default@accumulo_table_7 +POSTHOOK: query: DROP TABLE accumulo_table_7 +POSTHOOK: type: DROPTABLE +POSTHOOK: Input: default@accumulo_table_7 +POSTHOOK: Output: default@accumulo_table_7 +PREHOOK: query: DROP TABLE accumulo_table_8 +PREHOOK: type: DROPTABLE +PREHOOK: Input: default@accumulo_table_8 +PREHOOK: Output: default@accumulo_table_8 +POSTHOOK: query: DROP TABLE accumulo_table_8 +POSTHOOK: type: DROPTABLE +POSTHOOK: Input: default@accumulo_table_8 +POSTHOOK: Output: default@accumulo_table_8 +PREHOOK: query: DROP TABLE empty_accumulo_table +PREHOOK: type: DROPTABLE +PREHOOK: Input: default@empty_accumulo_table +PREHOOK: Output: default@empty_accumulo_table +POSTHOOK: query: DROP TABLE empty_accumulo_table +POSTHOOK: type: DROPTABLE +POSTHOOK: Input: default@empty_accumulo_table +POSTHOOK: Output: default@empty_accumulo_table +PREHOOK: query: DROP TABLE empty_normal_table +PREHOOK: type: DROPTABLE +PREHOOK: Input: default@empty_normal_table +PREHOOK: Output: default@empty_normal_table +POSTHOOK: query: DROP TABLE empty_normal_table +POSTHOOK: type: DROPTABLE +POSTHOOK: Input: default@empty_normal_table +POSTHOOK: Output: default@empty_normal_table diff --git a/accumulo-handler/src/test/results/positive/accumulo_single_sourced_multi_insert.q.out b/accumulo-handler/src/test/results/positive/accumulo_single_sourced_multi_insert.q.out new file mode 100644 index 0000000..7038bdd --- /dev/null +++ b/accumulo-handler/src/test/results/positive/accumulo_single_sourced_multi_insert.q.out @@ -0,0 +1,255 @@ +PREHOOK: query: -- HIVE-4375 Single sourced multi insert consists of native and non-native table mixed throws NPE +CREATE TABLE src_x1(key string, value string) +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +PREHOOK: Output: default@src_x1 +POSTHOOK: query: -- HIVE-4375 Single sourced multi insert consists of native and non-native table mixed throws NPE +CREATE TABLE src_x1(key string, value string) +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@src_x1 +PREHOOK: query: CREATE TABLE src_x2(key string, value string) +STORED BY 'org.apache.hadoop.hive.accumulo.AccumuloStorageHandler' +WITH SERDEPROPERTIES ("accumulo.columns.mapping" = ":rowid, cf:value") +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +PREHOOK: Output: default@src_x2 +POSTHOOK: query: CREATE TABLE src_x2(key string, value string) +STORED BY 'org.apache.hadoop.hive.accumulo.AccumuloStorageHandler' +WITH SERDEPROPERTIES ("accumulo.columns.mapping" = ":rowid, cf:value") +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@src_x2 +PREHOOK: query: explain +from src a +insert overwrite table src_x1 +select key,"" where a.key > 0 AND a.key < 50 +insert overwrite table src_x2 +select value,"" where a.key > 50 AND a.key < 100 +PREHOOK: type: QUERY +POSTHOOK: query: explain +from src a +insert overwrite table src_x1 +select key,"" where a.key > 0 AND a.key < 50 +insert overwrite table src_x2 +select value,"" where a.key > 50 AND a.key < 100 +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-7 depends on stages: Stage-1 , consists of Stage-4, Stage-3, Stage-5 + Stage-4 + Stage-0 depends on stages: Stage-4, Stage-3, Stage-6 + Stage-2 depends on stages: Stage-0 + Stage-3 + Stage-5 + Stage-6 depends on stages: Stage-5 + +STAGE PLANS: + Stage: Stage-1 + Map Reduce + Map Operator Tree: + TableScan + alias: a + Statistics: Num rows: 29 Data size: 5812 Basic stats: COMPLETE Column stats: NONE + Filter Operator + predicate: ((key > 0) and (key < 50)) (type: boolean) + Statistics: Num rows: 3 Data size: 601 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: key (type: string), '' (type: string) + outputColumnNames: _col0, _col1 + Statistics: Num rows: 3 Data size: 601 Basic stats: COMPLETE Column stats: NONE + File Output Operator + compressed: false + Statistics: Num rows: 3 Data size: 601 Basic stats: COMPLETE Column stats: NONE + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + name: default.src_x1 + Filter Operator + predicate: ((key > 50) and (key < 100)) (type: boolean) + Statistics: Num rows: 3 Data size: 601 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: value (type: string), '' (type: string) + outputColumnNames: _col0, _col1 + Statistics: Num rows: 3 Data size: 601 Basic stats: COMPLETE Column stats: NONE + File Output Operator + compressed: false + Statistics: Num rows: 3 Data size: 601 Basic stats: COMPLETE Column stats: NONE + table: + input format: org.apache.hadoop.hive.accumulo.mr.HiveAccumuloTableInputFormat + output format: org.apache.hadoop.hive.accumulo.mr.HiveAccumuloTableOutputFormat + serde: org.apache.hadoop.hive.accumulo.serde.AccumuloSerDe + name: default.src_x2 + + Stage: Stage-7 + Conditional Operator + + Stage: Stage-4 + Move Operator + files: + hdfs directory: true +#### A masked pattern was here #### + + Stage: Stage-0 + Move Operator + tables: + replace: true + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + name: default.src_x1 + + Stage: Stage-2 + Stats-Aggr Operator + + Stage: Stage-3 + Map Reduce + Map Operator Tree: + TableScan + File Output Operator + compressed: false + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + name: default.src_x1 + + Stage: Stage-5 + Map Reduce + Map Operator Tree: + TableScan + File Output Operator + compressed: false + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + name: default.src_x1 + + Stage: Stage-6 + Move Operator + files: + hdfs directory: true +#### A masked pattern was here #### + +PREHOOK: query: from src a +insert overwrite table src_x1 +select key,"" where a.key > 0 AND a.key < 50 +insert overwrite table src_x2 +select value,"" where a.key > 50 AND a.key < 100 +PREHOOK: type: QUERY +PREHOOK: Input: default@src +PREHOOK: Output: default@src_x1 +PREHOOK: Output: default@src_x2 +POSTHOOK: query: from src a +insert overwrite table src_x1 +select key,"" where a.key > 0 AND a.key < 50 +insert overwrite table src_x2 +select value,"" where a.key > 50 AND a.key < 100 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@src +POSTHOOK: Output: default@src_x1 +POSTHOOK: Output: default@src_x2 +POSTHOOK: Lineage: src_x1.key SIMPLE [(src)a.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: src_x1.value SIMPLE [] +PREHOOK: query: select * from src_x1 order by key +PREHOOK: type: QUERY +PREHOOK: Input: default@src_x1 +#### A masked pattern was here #### +POSTHOOK: query: select * from src_x1 order by key +POSTHOOK: type: QUERY +POSTHOOK: Input: default@src_x1 +#### A masked pattern was here #### +10 +11 +12 +12 +15 +15 +17 +18 +18 +19 +2 +20 +24 +24 +26 +26 +27 +28 +30 +33 +34 +35 +35 +35 +37 +37 +4 +41 +42 +42 +43 +44 +47 +5 +5 +5 +8 +9 +PREHOOK: query: select * from src_x2 order by key +PREHOOK: type: QUERY +PREHOOK: Input: default@src_x2 +#### A masked pattern was here #### +POSTHOOK: query: select * from src_x2 order by key +POSTHOOK: type: QUERY +POSTHOOK: Input: default@src_x2 +#### A masked pattern was here #### +val_51 +val_53 +val_54 +val_57 +val_58 +val_64 +val_65 +val_66 +val_67 +val_69 +val_70 +val_72 +val_74 +val_76 +val_77 +val_78 +val_80 +val_82 +val_83 +val_84 +val_85 +val_86 +val_87 +val_90 +val_92 +val_95 +val_96 +val_97 +val_98 +PREHOOK: query: DROP TABLE src_x1 +PREHOOK: type: DROPTABLE +PREHOOK: Input: default@src_x1 +PREHOOK: Output: default@src_x1 +POSTHOOK: query: DROP TABLE src_x1 +POSTHOOK: type: DROPTABLE +POSTHOOK: Input: default@src_x1 +POSTHOOK: Output: default@src_x1 +PREHOOK: query: DROP TABLE src_x2 +PREHOOK: type: DROPTABLE +PREHOOK: Input: default@src_x2 +PREHOOK: Output: default@src_x2 +POSTHOOK: query: DROP TABLE src_x2 +POSTHOOK: type: DROPTABLE +POSTHOOK: Input: default@src_x2 +POSTHOOK: Output: default@src_x2 diff --git a/accumulo-handler/src/test/templates/TestAccumuloCliDriver.vm b/accumulo-handler/src/test/templates/TestAccumuloCliDriver.vm new file mode 100644 index 0000000..dd1ee64 --- /dev/null +++ b/accumulo-handler/src/test/templates/TestAccumuloCliDriver.vm @@ -0,0 +1,144 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hive.cli; + +import junit.framework.Test; +import junit.framework.TestCase; +import junit.framework.TestSuite; + +import java.io.*; +import java.util.*; + +import org.apache.hadoop.hive.accumulo.AccumuloQTestUtil; +import org.apache.hadoop.hive.accumulo.AccumuloTestSetup; +import org.apache.hadoop.hive.ql.QTestUtil.MiniClusterType; +import org.apache.hadoop.hive.ql.session.SessionState; + +public class $className extends TestCase { + + private static final String HIVE_ROOT = AccumuloQTestUtil.ensurePathEndsInSlash(System.getProperty("hive.root")); + private AccumuloQTestUtil qt; + private AccumuloTestSetup setup; + + public $className(String name, AccumuloTestSetup setup) { + super(name); + qt = null; + this.setup = setup; + } + + @Override + protected void setUp() { + + MiniClusterType miniMR = MiniClusterType.valueForString("$clusterMode"); + String initScript = "$initScript"; + String cleanupScript = "$cleanupScript"; + + try { + qt = new AccumuloQTestUtil((HIVE_ROOT + "$resultsDir"), (HIVE_ROOT + "$logDir"), miniMR, + setup, initScript, cleanupScript); + } catch (Exception e) { + System.err.println("Exception: " + e.getMessage()); + e.printStackTrace(); + System.err.flush(); + fail("Unexpected exception in setup: " + e); + } + } + + @Override + protected void tearDown() { + try { + qt.shutdown(); + } + catch (Exception e) { + System.err.println("Exception: " + e.getMessage()); + e.printStackTrace(); + System.err.flush(); + fail("Unexpected exception in tearDown"); + } + } + + public static Test suite() { + Set qFilesToExecute = new HashSet(); + String qFiles = System.getProperty("qfile", "").trim(); + if(!qFiles.isEmpty()) { + for(String qFile : qFiles.split(",")) { + qFile = qFile.trim(); + if(!qFile.isEmpty()) { + qFilesToExecute.add(qFile); + } + } + } + TestSuite suite = new TestSuite(); + AccumuloTestSetup setup = new AccumuloTestSetup(suite); +#foreach ($qf in $qfiles) + #set ($fname = $qf.getName()) + #set ($eidx = $fname.indexOf('.')) + #set ($tname = $fname.substring(0, $eidx)) + if(qFilesToExecute.isEmpty() || qFilesToExecute.contains("$fname")) { + suite.addTest(new $className("testCliDriver_$tname", setup)); + } +#end + return setup; + } + +#foreach ($qf in $qfiles) + #set ($fname = $qf.getName()) + #set ($eidx = $fname.indexOf('.')) + #set ($tname = $fname.substring(0, $eidx)) + #set ($fpath = $qfilesMap.get($fname)) + public void testCliDriver_$tname() throws Exception { + runTest("$tname", "$fname", (HIVE_ROOT + "$fpath")); + } + +#end + + private void runTest(String tname, String fname, String fpath) throws Exception { + long startTime = System.currentTimeMillis(); + try { + System.err.println("Begin query: " + fname); + + qt.addFile(fpath); + + if (qt.shouldBeSkipped(fname)) { + System.err.println("Test " + fname + " skipped"); + return; + } + + qt.cliInit(fname); + qt.clearTestSideEffects(); + int ecode = qt.executeClient(fname); + if (ecode != 0) { + qt.failed(ecode, fname, null); + } + + ecode = qt.checkCliDriverResults(fname); + if (ecode != 0) { + qt.failedDiff(ecode, fname, null); + } + qt.clearPostTestEffects(); + + } catch (Throwable e) { + qt.failed(e, fname, null); + } + + long elapsedTime = System.currentTimeMillis() - startTime; + System.err.println("Done query: " + fname + " elapsedTime=" + elapsedTime/1000 + "s"); + assertTrue("Test passed", true); + } +} + diff --git a/common/src/java/org/apache/hadoop/hive/ant/GenHiveTemplate.java b/common/src/java/org/apache/hadoop/hive/ant/GenHiveTemplate.java index 00e12c4..4293b7c 100644 --- a/common/src/java/org/apache/hadoop/hive/ant/GenHiveTemplate.java +++ b/common/src/java/org/apache/hadoop/hive/ant/GenHiveTemplate.java @@ -108,7 +108,7 @@ private Document generateTemplate() throws Exception { continue; } Element property = appendElement(root, "property", null); - appendElement(property, "key", confVars.varname); + appendElement(property, "name", confVars.varname); appendElement(property, "value", confVars.getDefaultExpr()); appendElement(property, "description", normalize(confVars.getDescription())); // wish to add new line here. diff --git a/common/src/java/org/apache/hadoop/hive/common/type/Decimal128.java b/common/src/java/org/apache/hadoop/hive/common/type/Decimal128.java index d4cc32d..bd3e997 100644 --- a/common/src/java/org/apache/hadoop/hive/common/type/Decimal128.java +++ b/common/src/java/org/apache/hadoop/hive/common/type/Decimal128.java @@ -103,6 +103,13 @@ private short scale; /** + * This is the actual scale detected from the value passed to this Decimal128. + * The value is always equals or less than #scale. It is used to return the correct + * decimal string from {@link #getHiveDecimalString()}. + */ + private short actualScale; + + /** * -1 means negative, 0 means zero, 1 means positive. * * @serial @@ -127,6 +134,7 @@ public Decimal128() { this.unscaledValue = new UnsignedInt128(); this.scale = 0; this.signum = 0; + this.actualScale = 0; } /** @@ -139,6 +147,7 @@ public Decimal128(Decimal128 o) { this.unscaledValue = new UnsignedInt128(o.unscaledValue); this.scale = o.scale; this.signum = o.signum; + this.actualScale = o.actualScale; } /** @@ -178,6 +187,7 @@ public Decimal128(UnsignedInt128 unscaledVal, short scale, boolean negative) { checkScaleRange(scale); this.unscaledValue = new UnsignedInt128(unscaledVal); this.scale = scale; + this.actualScale = scale; if (unscaledValue.isZero()) { this.signum = 0; } else { @@ -264,6 +274,7 @@ public Decimal128 update(Decimal128 o) { this.unscaledValue.update(o.unscaledValue); this.scale = o.scale; this.signum = o.signum; + this.actualScale = o.actualScale; return this; } @@ -292,7 +303,7 @@ public Decimal128 update(long val) { /** * Update the value of this object with the given {@code long} with the given - * scal. + * scale. * * @param val * {@code long} value to be set to {@code Decimal128}. @@ -314,6 +325,8 @@ public Decimal128 update(long val, short scale) { if (scale != 0) { changeScaleDestructive(scale); } + // set actualScale to 0 because there is no fractional digits on integer values + this.actualScale = 0; return this; } @@ -341,6 +354,11 @@ public Decimal128 update(double val, short scale) { checkScaleRange(scale); this.scale = scale; + // Obtains the scale of the double value to keep a record of the original + // scale. This will be used to print the HiveDecimal string with the + // correct value scale. + this.actualScale = (short) BigDecimal.valueOf(val).scale(); + // Translate the double into sign, exponent and significand, according // to the formulae in JLS, Section 20.10.22. long valBits = Double.doubleToLongBits(val); @@ -364,6 +382,10 @@ public Decimal128 update(double val, short scale) { exponent++; } + // Calculate the real number of fractional digits from the double value + this.actualScale -= (exponent > 0) ? exponent : 0; + this.actualScale = (this.actualScale < 0) ? 0 : this.actualScale; + // so far same as java.math.BigDecimal, but the scaling below is // specific to ANSI SQL Numeric. @@ -426,6 +448,7 @@ public Decimal128 update(double val, short scale) { public Decimal128 update(IntBuffer buf, int precision) { int scaleAndSignum = buf.get(); this.scale = (short) (scaleAndSignum >> 16); + this.actualScale = this.scale; this.signum = (byte) (scaleAndSignum & 0xFF); this.unscaledValue.update(buf, precision); assert ((signum == 0) == unscaledValue.isZero()); @@ -442,6 +465,7 @@ public Decimal128 update(IntBuffer buf, int precision) { public Decimal128 update128(IntBuffer buf) { int scaleAndSignum = buf.get(); this.scale = (short) (scaleAndSignum >> 16); + this.actualScale = this.scale; this.signum = (byte) (scaleAndSignum & 0xFF); this.unscaledValue.update128(buf); assert ((signum == 0) == unscaledValue.isZero()); @@ -458,6 +482,7 @@ public Decimal128 update128(IntBuffer buf) { public Decimal128 update96(IntBuffer buf) { int scaleAndSignum = buf.get(); this.scale = (short) (scaleAndSignum >> 16); + this.actualScale = this.scale; this.signum = (byte) (scaleAndSignum & 0xFF); this.unscaledValue.update96(buf); assert ((signum == 0) == unscaledValue.isZero()); @@ -474,6 +499,7 @@ public Decimal128 update96(IntBuffer buf) { public Decimal128 update64(IntBuffer buf) { int scaleAndSignum = buf.get(); this.scale = (short) (scaleAndSignum >> 16); + this.actualScale = this.scale; this.signum = (byte) (scaleAndSignum & 0xFF); this.unscaledValue.update64(buf); assert ((signum == 0) == unscaledValue.isZero()); @@ -490,6 +516,7 @@ public Decimal128 update64(IntBuffer buf) { public Decimal128 update32(IntBuffer buf) { int scaleAndSignum = buf.get(); this.scale = (short) (scaleAndSignum >> 16); + this.actualScale = this.scale; this.signum = (byte) (scaleAndSignum & 0xFF); this.unscaledValue.update32(buf); assert ((signum == 0) == unscaledValue.isZero()); @@ -510,6 +537,7 @@ public Decimal128 update32(IntBuffer buf) { public Decimal128 update(int[] array, int offset, int precision) { int scaleAndSignum = array[offset]; this.scale = (short) (scaleAndSignum >> 16); + this.actualScale = this.scale; this.signum = (byte) (scaleAndSignum & 0xFF); this.unscaledValue.update(array, offset + 1, precision); return this; @@ -527,6 +555,7 @@ public Decimal128 update(int[] array, int offset, int precision) { public Decimal128 update128(int[] array, int offset) { int scaleAndSignum = array[offset]; this.scale = (short) (scaleAndSignum >> 16); + this.actualScale = this.scale; this.signum = (byte) (scaleAndSignum & 0xFF); this.unscaledValue.update128(array, offset + 1); return this; @@ -544,6 +573,7 @@ public Decimal128 update128(int[] array, int offset) { public Decimal128 update96(int[] array, int offset) { int scaleAndSignum = array[offset]; this.scale = (short) (scaleAndSignum >> 16); + this.actualScale = this.scale; this.signum = (byte) (scaleAndSignum & 0xFF); this.unscaledValue.update96(array, offset + 1); return this; @@ -561,6 +591,7 @@ public Decimal128 update96(int[] array, int offset) { public Decimal128 update64(int[] array, int offset) { int scaleAndSignum = array[offset]; this.scale = (short) (scaleAndSignum >> 16); + this.actualScale = this.scale; this.signum = (byte) (scaleAndSignum & 0xFF); this.unscaledValue.update64(array, offset + 1); return this; @@ -578,6 +609,7 @@ public Decimal128 update64(int[] array, int offset) { public Decimal128 update32(int[] array, int offset) { int scaleAndSignum = array[offset]; this.scale = (short) (scaleAndSignum >> 16); + this.actualScale = this.scale; this.signum = (byte) (scaleAndSignum & 0xFF); this.unscaledValue.update32(array, offset + 1); return this; @@ -600,7 +632,6 @@ public Decimal128 update(BigDecimal bigDecimal) { * @param scale */ public Decimal128 update(BigInteger bigInt, short scale) { - this.scale = scale; this.signum = (byte) bigInt.compareTo(BigInteger.ZERO); if (signum == 0) { update(0); @@ -609,6 +640,9 @@ public Decimal128 update(BigInteger bigInt, short scale) { } else { unscaledValue.update(bigInt); } + this.scale = scale; + this.actualScale = scale; + return this; } @@ -731,6 +765,9 @@ public Decimal128 update(char[] str, int offset, int length, short scale) { this.unscaledValue.addDestructive(accumulated); } + this.actualScale = (short) (fractionalDigits - exponent); + this.actualScale = (this.actualScale < 0) ? 0 : this.actualScale; + int scaleAdjust = scale - fractionalDigits + exponent; if (scaleAdjust > 0) { this.unscaledValue.scaleUpTenDestructive((short) scaleAdjust); @@ -924,6 +961,7 @@ public void changeScaleDestructive(short scale) { this.unscaledValue.scaleUpTenDestructive((short) -scaleDown); } this.scale = scale; + this.actualScale = scale; this.unscaledValue.throwIfExceedsTenToThirtyEight(); } @@ -1125,6 +1163,7 @@ public void multiplyDestructiveNativeDecimal128(Decimal128 right, short newScale if (this.signum == 0 || right.signum == 0) { this.zeroClear(); this.scale = newScale; + this.actualScale = newScale; return; } @@ -1154,6 +1193,7 @@ public void multiplyDestructiveNativeDecimal128(Decimal128 right, short newScale } this.scale = newScale; + this.actualScale = newScale; this.signum = (byte) (this.signum * right.signum); if (this.unscaledValue.isZero()) { this.signum = 0; // because of scaling down, this could happen @@ -1244,6 +1284,7 @@ public void divideDestructiveNativeDecimal128(Decimal128 right, short newScale, } if (this.signum == 0) { this.scale = newScale; + this.actualScale = newScale; remainder.update(this); return; } @@ -1271,6 +1312,7 @@ public void divideDestructiveNativeDecimal128(Decimal128 right, short newScale, } this.scale = newScale; + this.actualScale = newScale; this.signum = (byte) (this.unscaledValue.isZero() ? 0 : (this.signum * right.signum)); remainder.scale = scale; @@ -1731,17 +1773,13 @@ private static void checkScaleRange(short scale) { private int [] tmpArray = new int[2]; /** - * Returns the string representation of this value. It discards the trailing zeros - * in the fractional part to match the HiveDecimal's string representation. However, + * Returns the string representation of this value. It returns the original + * {@code actualScale} fractional part when this value was created. However, * don't use this string representation for the reconstruction of the object. * * @return string representation of this value */ public String getHiveDecimalString() { - if (this.signum == 0) { - return "0"; - } - StringBuilder buf = new StringBuilder(50); if (this.signum < 0) { buf.append('-'); @@ -1752,32 +1790,40 @@ public String getHiveDecimalString() { int trailingZeros = tmpArray[1]; int numIntegerDigits = unscaledLength - this.scale; if (numIntegerDigits > 0) { - // write out integer part first // then write out fractional part for (int i=0; i < numIntegerDigits; i++) { buf.append(unscaled[i]); } - if (this.scale > trailingZeros) { + if (this.actualScale > 0) { buf.append('.'); - for (int i = numIntegerDigits; i < (unscaledLength - trailingZeros); i++) { + + if (trailingZeros > this.actualScale) { + for (int i=0; i < (trailingZeros - this.scale); i++) { + buf.append("0"); + } + } + + for (int i = numIntegerDigits; i < (numIntegerDigits + this.actualScale); i++) { buf.append(unscaled[i]); } } } else { - // no integer part buf.append('0'); - if (this.scale > trailingZeros) { - + if (this.actualScale > 0) { // fractional part has, starting with zeros buf.append('.'); - for (int i = unscaledLength; i < this.scale; ++i) { - buf.append('0'); + + if (this.actualScale > trailingZeros) { + for (int i = unscaledLength; i < this.scale; ++i) { + buf.append('0'); + } } - for (int i = 0; i < (unscaledLength - trailingZeros); i++) { + + for (int i = 0; i < (numIntegerDigits + this.actualScale); i++) { buf.append(unscaled[i]); } } @@ -1836,9 +1882,10 @@ public String toFormalString() { @Override public String toString() { - return toFormalString() + "(Decimal128: scale=" + scale + ", signum=" - + signum + ", BigDecimal.toString=" + toBigDecimal().toString() - + ", unscaledValue=[" + unscaledValue.toString() + "])"; + return toFormalString() + "(Decimal128: scale=" + scale + ", actualScale=" + + this.actualScale + ", signum=" + signum + ", BigDecimal.toString=" + + toBigDecimal().toString() + ", unscaledValue=[" + unscaledValue.toString() + + "])"; } /** @@ -1956,6 +2003,7 @@ public Decimal128 updateVarianceDestructive( */ public Decimal128 fastUpdateFromInternalStorage(byte[] internalStorage, short scale) { this.scale = scale; + this.actualScale = scale; this.signum = this.unscaledValue.fastUpdateFromInternalStorage(internalStorage); return this; diff --git a/common/src/java/org/apache/hadoop/hive/common/type/HiveDecimal.java b/common/src/java/org/apache/hadoop/hive/common/type/HiveDecimal.java index ad09015..00ea481 100644 --- a/common/src/java/org/apache/hadoop/hive/common/type/HiveDecimal.java +++ b/common/src/java/org/apache/hadoop/hive/common/type/HiveDecimal.java @@ -30,7 +30,6 @@ public class HiveDecimal implements Comparable { public static final int MAX_PRECISION = 38; public static final int MAX_SCALE = 38; - /** * Default precision/scale when user doesn't specify in the column metadata, such as * decimal and decimal(8). @@ -113,7 +112,7 @@ public int compareTo(HiveDecimal dec) { @Override public int hashCode() { - return bd.hashCode(); + return trim(bd).hashCode(); } @Override @@ -169,7 +168,7 @@ public HiveDecimal subtract(HiveDecimal dec) { } public HiveDecimal multiply(HiveDecimal dec) { - return create(bd.multiply(dec.bd), false); + return create(bd.multiply(dec.bd), true); } public BigInteger unscaledValue() { @@ -202,7 +201,7 @@ public HiveDecimal remainder(HiveDecimal dec) { } public HiveDecimal divide(HiveDecimal dec) { - return create(bd.divide(dec.bd, MAX_SCALE, RoundingMode.HALF_UP), true); + return create(trim(bd.divide(dec.bd, MAX_SCALE, RoundingMode.HALF_UP)), true); } /** @@ -232,8 +231,6 @@ private static BigDecimal normalize(BigDecimal bd, boolean allowRounding) { return null; } - bd = trim(bd); - int intDigits = bd.precision() - bd.scale(); if (intDigits > MAX_PRECISION) { @@ -244,8 +241,6 @@ private static BigDecimal normalize(BigDecimal bd, boolean allowRounding) { if (bd.scale() > maxScale ) { if (allowRounding) { bd = bd.setScale(maxScale, RoundingMode.HALF_UP); - // Trimming is again necessary, because rounding may introduce new trailing 0's. - bd = trim(bd); } else { bd = null; } @@ -259,8 +254,6 @@ public static BigDecimal enforcePrecisionScale(BigDecimal bd, int maxPrecision, return null; } - bd = trim(bd); - int maxIntDigits = maxPrecision - maxScale; int intDigits = bd.precision() - bd.scale(); if (intDigits > maxIntDigits) { diff --git a/common/src/test/org/apache/hadoop/hive/common/type/TestDecimal128.java b/common/src/test/org/apache/hadoop/hive/common/type/TestDecimal128.java index 46236a5..0786cca 100644 --- a/common/src/test/org/apache/hadoop/hive/common/type/TestDecimal128.java +++ b/common/src/test/org/apache/hadoop/hive/common/type/TestDecimal128.java @@ -811,7 +811,7 @@ public void testToHiveDecimalString() { assertEquals("0.00923076923", d2.getHiveDecimalString()); Decimal128 d3 = new Decimal128("0.00923076000", (short) 15); - assertEquals("0.00923076", d3.getHiveDecimalString()); + assertEquals("0.00923076000", d3.getHiveDecimalString()); Decimal128 d4 = new Decimal128("4294967296.01", (short) 15); assertEquals("4294967296.01", d4.getHiveDecimalString()); @@ -849,15 +849,37 @@ public void testToHiveDecimalString() { d11.update(hd6.bigDecimalValue()); assertEquals(hd6.toString(), d11.getHiveDecimalString()); + // The trailing zeros from a double value are trimmed automatically + // by the double data type Decimal128 d12 = new Decimal128(27.000, (short)3); - HiveDecimal hd7 = HiveDecimal.create(new BigDecimal("27.000")); + HiveDecimal hd7 = HiveDecimal.create(new BigDecimal("27.0")); assertEquals(hd7.toString(), d12.getHiveDecimalString()); - assertEquals("27", d12.getHiveDecimalString()); + assertEquals("27.0", d12.getHiveDecimalString()); Decimal128 d13 = new Decimal128(1234123000, (short)3); HiveDecimal hd8 = HiveDecimal.create(new BigDecimal("1234123000")); assertEquals(hd8.toString(), d13.getHiveDecimalString()); assertEquals("1234123000", d13.getHiveDecimalString()); + + Decimal128 d14 = new Decimal128(1.33e4, (short)10); + HiveDecimal hd9 = HiveDecimal.create(new BigDecimal("1.33e4")); + assertEquals(hd9.toString(), d14.getHiveDecimalString()); + assertEquals("13300", d14.getHiveDecimalString()); + + Decimal128 d15 = new Decimal128(1.33e-4, (short)10); + HiveDecimal hd10 = HiveDecimal.create(new BigDecimal("1.33e-4")); + assertEquals(hd10.toString(), d15.getHiveDecimalString()); + assertEquals("0.000133", d15.getHiveDecimalString()); + + Decimal128 d16 = new Decimal128("1.33e4", (short)10); + HiveDecimal hd11 = HiveDecimal.create(new BigDecimal("1.33e4")); + assertEquals(hd11.toString(), d16.getHiveDecimalString()); + assertEquals("13300", d16.getHiveDecimalString()); + + Decimal128 d17 = new Decimal128("1.33e-4", (short)10); + HiveDecimal hd12 = HiveDecimal.create(new BigDecimal("1.33e-4")); + assertEquals(hd12.toString(), d17.getHiveDecimalString()); + assertEquals("0.000133", d17.getHiveDecimalString()); } @Test diff --git a/common/src/test/org/apache/hadoop/hive/common/type/TestHiveDecimal.java b/common/src/test/org/apache/hadoop/hive/common/type/TestHiveDecimal.java index 1384a45..769410d 100644 --- a/common/src/test/org/apache/hadoop/hive/common/type/TestHiveDecimal.java +++ b/common/src/test/org/apache/hadoop/hive/common/type/TestHiveDecimal.java @@ -50,25 +50,35 @@ public void testPrecisionScaleEnforcement() { Assert.assertEquals("-1786135888657847525803324040144343378.1", dec.toString()); dec = HiveDecimal.create("005.34000"); - Assert.assertEquals(dec.precision(), 3); - Assert.assertEquals(dec.scale(), 2); + Assert.assertEquals(dec.precision(), 6); + Assert.assertEquals(dec.scale(), 5); dec = HiveDecimal.create("178613588865784752580332404014434337809799306448796128931113691624"); Assert.assertNull(dec); - } - @Test - public void testTrailingZeroRemovalAfterEnforcement() { - String decStr = "8.090000000000000000000000000000000000000123456"; - HiveDecimal dec = HiveDecimal.create(decStr); - Assert.assertEquals("8.09", dec.toString()); + // Leaving trailing zeros + Assert.assertEquals("0.0", HiveDecimal.enforcePrecisionScale(new BigDecimal("0.0"), 2, 1).toString()); + Assert.assertEquals("0.00", HiveDecimal.enforcePrecisionScale(new BigDecimal("0.00"), 3, 2).toString()); + Assert.assertEquals("0.0000", HiveDecimal.enforcePrecisionScale(new BigDecimal("0.0000"), 10, 4).toString()); + Assert.assertEquals("100.00000", HiveDecimal.enforcePrecisionScale(new BigDecimal("100.00000"), 15, 5).toString()); + Assert.assertEquals("100.00", HiveDecimal.enforcePrecisionScale(new BigDecimal("100.00"), 15, 5).toString()); + + // Rounding numbers + Assert.assertEquals("0.01", HiveDecimal.enforcePrecisionScale(new BigDecimal("0.012"), 3, 2).toString()); + Assert.assertEquals("0.02", HiveDecimal.enforcePrecisionScale(new BigDecimal("0.015"), 3, 2).toString()); + Assert.assertEquals("0.01", HiveDecimal.enforcePrecisionScale(new BigDecimal("0.0145"), 3, 2).toString()); + + // Integers with no scale values are not modified (zeros are not null) + Assert.assertEquals("0", HiveDecimal.enforcePrecisionScale(new BigDecimal("0"), 1, 0).toString()); + Assert.assertEquals("30", HiveDecimal.enforcePrecisionScale(new BigDecimal("30"), 2, 0).toString()); + Assert.assertEquals("5", HiveDecimal.enforcePrecisionScale(new BigDecimal("5"), 3, 2).toString()); } @Test public void testMultiply() { HiveDecimal dec1 = HiveDecimal.create("0.00001786135888657847525803"); HiveDecimal dec2 = HiveDecimal.create("3.0000123456789"); - Assert.assertNull(dec1.multiply(dec2)); + Assert.assertNotNull(dec1.multiply(dec2)); dec1 = HiveDecimal.create("178613588865784752580323232232323444.4"); dec2 = HiveDecimal.create("178613588865784752580302323232.3"); @@ -77,6 +87,14 @@ public void testMultiply() { dec1 = HiveDecimal.create("47.324"); dec2 = HiveDecimal.create("9232.309"); Assert.assertEquals("436909.791116", dec1.multiply(dec2).toString()); + + dec1 = HiveDecimal.create("3.140"); + dec2 = HiveDecimal.create("1.00"); + Assert.assertEquals("3.14000", dec1.multiply(dec2).toString()); + + dec1 = HiveDecimal.create("43.010"); + dec2 = HiveDecimal.create("2"); + Assert.assertEquals("86.020", dec1.multiply(dec2).toString()); } @Test @@ -87,6 +105,9 @@ public void testPow() { HiveDecimal dec1 = HiveDecimal.create("0.000017861358882"); dec1 = dec1.pow(3); Assert.assertNull(dec1); + + dec1 = HiveDecimal.create("3.140"); + Assert.assertEquals("9.859600", dec1.pow(2).toString()); } @Test @@ -94,6 +115,14 @@ public void testDivide() { HiveDecimal dec1 = HiveDecimal.create("3.14"); HiveDecimal dec2 = HiveDecimal.create("3"); Assert.assertNotNull(dec1.divide(dec2)); + + dec1 = HiveDecimal.create("15"); + dec2 = HiveDecimal.create("5"); + Assert.assertEquals("3", dec1.divide(dec2).toString()); + + dec1 = HiveDecimal.create("3.140"); + dec2 = HiveDecimal.create("1.00"); + Assert.assertEquals("3.14", dec1.divide(dec2).toString()); } @Test @@ -101,6 +130,18 @@ public void testPlus() { HiveDecimal dec1 = HiveDecimal.create("99999999999999999999999999999999999"); HiveDecimal dec2 = HiveDecimal.create("1"); Assert.assertNotNull(dec1.add(dec2)); + + dec1 = HiveDecimal.create("3.140"); + dec2 = HiveDecimal.create("1.00"); + Assert.assertEquals("4.140", dec1.add(dec2).toString()); + } + + + @Test + public void testSubtract() { + HiveDecimal dec1 = HiveDecimal.create("3.140"); + HiveDecimal dec2 = HiveDecimal.create("1.00"); + Assert.assertEquals("2.140", dec1.subtract(dec2).toString()); } @Test @@ -112,6 +153,12 @@ public void testPosMod() { } @Test + public void testHashCode() { + Assert.assertEquals(HiveDecimal.create("9").hashCode(), HiveDecimal.create("9.00").hashCode()); + Assert.assertEquals(HiveDecimal.create("0").hashCode(), HiveDecimal.create("0.00").hashCode()); + } + + @Test public void testException() { HiveDecimal dec = HiveDecimal.create("3.1415.926"); Assert.assertNull(dec); @@ -121,7 +168,7 @@ public void testException() { @Test public void testBinaryConversion() { - testBinaryConversion("0.0"); + testBinaryConversion("0.00"); testBinaryConversion("-12.25"); testBinaryConversion("234.79"); } diff --git a/data/files/extrapolate_stats_full.txt b/data/files/extrapolate_stats_full.txt new file mode 100644 index 0000000..d7ad64e --- /dev/null +++ b/data/files/extrapolate_stats_full.txt @@ -0,0 +1,6 @@ +|1|94087|2000 +O|2|94086|2000 +|1|94087|2001 +H|2|94086|2001 +|3|94086|2001 +OH|4|94086|2001 diff --git a/data/files/extrapolate_stats_partial.txt b/data/files/extrapolate_stats_partial.txt new file mode 100644 index 0000000..fa92ed3 --- /dev/null +++ b/data/files/extrapolate_stats_partial.txt @@ -0,0 +1,20 @@ +|1|94087|2000 +O|2|94086|2000 +|1|94087|2001 +H|2|94086|2001 +|3|94086|2001 +OH|4|43201|2001 +oh1|1|94087|2002 +OH2|2|43201|2002 +oh3|3|94087|2002 +OH4|4|94086|2002 +oh5|4|43201|2002 +OH6|5|94087|2002 +|31|94087|2003 +OH33|1|43201|2003 +|3|94087|2003 +OH|1|94086|2003 +|4|43201|2003 +OH|1|94087|2003 +|1|43201|2003 +OH|5|94086|2003 diff --git a/data/files/kv10.txt b/data/files/kv10.txt new file mode 100644 index 0000000..88136f5 --- /dev/null +++ b/data/files/kv10.txt @@ -0,0 +1,30 @@ +0,000000000000000000000000000000000000000000000,000000000000000000000000000000000000000000000 +1,-000000000000000000000000000000000000000000000,-000000000000000000000000000000000000000000000 +2,1000000000.0000,100000000000000.00000000 +3,1.0000000000000,1.0000000000000000000000 +4,10.000000000000,10.000000000000000000000 +5,100.00000000000,100.00000000000000000000 +6,1000.0000000000,1000.0000000000000000000 +7,10000.000000000,10000.000000000000000000 +8,100000.00000000,100000.00000000000000000 +9,1000000.0000000,1000000.0000000000000000 +10,10000000.000000,10000000.000000000000000 +11,100000000.00000,100000000.00000000000000 +12,1000000000.0000,1000000000.0000000000000 +13,10000000000.000,10000000000.000000000000 +14,100000000000.00,100000000000.00000000000 +15,1000000000000.0,1000000000000.0000000000 +16,10000000000000.0,100000000000000.00000000 +17,10000000000000.0,1000000000000000.0000000 +18,1.0000,1.00000000 +19,10.000,10.0000000 +20,100.00,100.000000 +21,1000.0,1000.00000 +22,100000,10000.0000 +23,0.0000,0.00000000 +24,00.000,00.0000000 +25,000.00,000.000000 +26,0000.0,0000.00000 +27,00000.,0000.00000 +28,12313.2000,134134.31252500 +29,99999.9990,134134.31242553 diff --git a/data/files/parquet_types.txt b/data/files/parquet_types.txt index 9d81c3c..750626e 100644 --- a/data/files/parquet_types.txt +++ b/data/files/parquet_types.txt @@ -1,21 +1,21 @@ -100|1|1|1.0|0.0|abc|2011-01-01 01:01:01.111111111 -101|2|2|1.1|0.3|def|2012-02-02 02:02:02.222222222 -102|3|3|1.2|0.6|ghi|2013-03-03 03:03:03.333333333 -103|1|4|1.3|0.9|jkl|2014-04-04 04:04:04.444444444 -104|2|5|1.4|1.2|mno|2015-05-05 05:05:05.555555555 -105|3|1|1.0|1.5|pqr|2016-06-06 06:06:06.666666666 -106|1|2|1.1|1.8|stu|2017-07-07 07:07:07.777777777 -107|2|3|1.2|2.1|vwx|2018-08-08 08:08:08.888888888 -108|3|4|1.3|2.4|yza|2019-09-09 09:09:09.999999999 -109|1|5|1.4|2.7|bcd|2020-10-10 10:10:10.101010101 -110|2|1|1.0|3.0|efg|2021-11-11 11:11:11.111111111 -111|3|2|1.1|3.3|hij|2022-12-12 12:12:12.121212121 -112|1|3|1.2|3.6|klm|2023-01-02 13:13:13.131313131 -113|2|4|1.3|3.9|nop|2024-02-02 14:14:14.141414141 -114|3|5|1.4|4.2|qrs|2025-03-03 15:15:15.151515151 -115|1|1|1.0|4.5|tuv|2026-04-04 16:16:16.161616161 -116|2|2|1.1|4.8|wxy|2027-05-05 17:17:17.171717171 -117|3|3|1.2|5.1|zab|2028-06-06 18:18:18.181818181 -118|1|4|1.3|5.4|cde|2029-07-07 19:19:19.191919191 -119|2|5|1.4|5.7|fgh|2030-08-08 20:20:20.202020202 -120|3|1|1.0|6.0|ijk|2031-09-09 21:21:21.212121212 +100|1|1|1.0|0.0|abc|2011-01-01 01:01:01.111111111|a |a +101|2|2|1.1|0.3|def|2012-02-02 02:02:02.222222222|ab |ab +102|3|3|1.2|0.6|ghi|2013-03-03 03:03:03.333333333|abc|abc +103|1|4|1.3|0.9|jkl|2014-04-04 04:04:04.444444444|abcd|abcd +104|2|5|1.4|1.2|mno|2015-05-05 05:05:05.555555555|abcde|abcde +105|3|1|1.0|1.5|pqr|2016-06-06 06:06:06.666666666|abcdef|abcdef +106|1|2|1.1|1.8|stu|2017-07-07 07:07:07.777777777|abcdefg|abcdefg +107|2|3|1.2|2.1|vwx|2018-08-08 08:08:08.888888888|bcdefg|abcdefgh +108|3|4|1.3|2.4|yza|2019-09-09 09:09:09.999999999|cdefg|abcdefghijklmnop +109|1|5|1.4|2.7|bcd|2020-10-10 10:10:10.101010101|klmno|abcdedef +110|2|1|1.0|3.0|efg|2021-11-11 11:11:11.111111111|pqrst|abcdede +111|3|2|1.1|3.3|hij|2022-12-12 12:12:12.121212121|nopqr|abcded +112|1|3|1.2|3.6|klm|2023-01-02 13:13:13.131313131|opqrs|abcdd +113|2|4|1.3|3.9|nop|2024-02-02 14:14:14.141414141|pqrst|abc +114|3|5|1.4|4.2|qrs|2025-03-03 15:15:15.151515151|qrstu|b +115|1|1|1.0|4.5|tuv|2026-04-04 16:16:16.161616161|rstuv|abcded +116|2|2|1.1|4.8|wxy|2027-05-05 17:17:17.171717171|stuvw|abcded +117|3|3|1.2|5.1|zab|2028-06-06 18:18:18.181818181|tuvwx|abcded +118|1|4|1.3|5.4|cde|2029-07-07 19:19:19.191919191|uvwzy|abcdede +119|2|5|1.4|5.7|fgh|2030-08-08 20:20:20.202020202|vwxyz|abcdede +120|3|1|1.0|6.0|ijk|2031-09-09 21:21:21.212121212|wxyza|abcde diff --git a/hcatalog/streaming/src/java/org/apache/hive/hcatalog/streaming/AbstractRecordWriter.java b/hcatalog/streaming/src/java/org/apache/hive/hcatalog/streaming/AbstractRecordWriter.java index fa7a8f0..8c4bca0 100644 --- a/hcatalog/streaming/src/java/org/apache/hive/hcatalog/streaming/AbstractRecordWriter.java +++ b/hcatalog/streaming/src/java/org/apache/hive/hcatalog/streaming/AbstractRecordWriter.java @@ -53,7 +53,7 @@ private int currentBucketId = 0; private final Path partitionPath; - final AcidOutputFormat outf; + final AcidOutputFormat outf; protected AbstractRecordWriter(HiveEndPoint endPoint, HiveConf conf) throws ConnectionError, StreamingException { @@ -70,7 +70,7 @@ protected AbstractRecordWriter(HiveEndPoint endPoint, HiveConf conf) + endPoint); } String outFormatName = this.tbl.getSd().getOutputFormat(); - outf = (AcidOutputFormat) ReflectionUtils.newInstance(Class.forName(outFormatName), conf); + outf = (AcidOutputFormat) ReflectionUtils.newInstance(Class.forName(outFormatName), conf); } catch (MetaException e) { throw new ConnectionError(endPoint, e); } catch (NoSuchObjectException e) { diff --git a/hcatalog/webhcat/java-client/src/main/java/org/apache/hive/hcatalog/api/HCatAddPartitionDesc.java b/hcatalog/webhcat/java-client/src/main/java/org/apache/hive/hcatalog/api/HCatAddPartitionDesc.java index afa3054..c667382 100644 --- a/hcatalog/webhcat/java-client/src/main/java/org/apache/hive/hcatalog/api/HCatAddPartitionDesc.java +++ b/hcatalog/webhcat/java-client/src/main/java/org/apache/hive/hcatalog/api/HCatAddPartitionDesc.java @@ -18,18 +18,8 @@ */ package org.apache.hive.hcatalog.api; -import java.util.ArrayList; -import java.util.List; import java.util.Map; -import org.apache.hadoop.fs.Path; -import org.apache.hadoop.hive.metastore.MetaStoreUtils; -import org.apache.hadoop.hive.metastore.Warehouse; -import org.apache.hadoop.hive.metastore.api.FieldSchema; -import org.apache.hadoop.hive.metastore.api.MetaException; -import org.apache.hadoop.hive.metastore.api.Partition; -import org.apache.hadoop.hive.metastore.api.StorageDescriptor; -import org.apache.hadoop.hive.metastore.api.Table; import org.apache.hive.hcatalog.common.HCatException; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -40,16 +30,33 @@ public class HCatAddPartitionDesc { private static final Logger LOG = LoggerFactory.getLogger(HCatAddPartitionDesc.class); - private String tableName; - private String dbName; - private String location; - private Map partSpec; + private HCatPartition hcatPartition; - private HCatAddPartitionDesc(String dbName, String tbl, String loc, Map spec) { + // The following data members are only required to support the deprecated constructor (and builder). + String dbName, tableName, location; + Map partitionKeyValues; + + private HCatAddPartitionDesc(HCatPartition hcatPartition) { + this.hcatPartition = hcatPartition; + } + + private HCatAddPartitionDesc(String dbName, String tableName, String location, Map partitionKeyValues) { + this.hcatPartition = null; this.dbName = dbName; - this.tableName = tbl; - this.location = loc; - this.partSpec = spec; + this.tableName = tableName; + this.location = location; + this.partitionKeyValues = partitionKeyValues; + } + + HCatPartition getHCatPartition() { + return hcatPartition; + } + + HCatPartition getHCatPartition(HCatTable hcatTable) throws HCatException { + assert hcatPartition == null : "hcatPartition should have been null at this point."; + assert dbName.equalsIgnoreCase(hcatTable.getDbName()) : "DB names don't match."; + assert tableName.equalsIgnoreCase(hcatTable.getTableName()) : "Table names don't match."; + return new HCatPartition(hcatTable, partitionKeyValues, location); } /** @@ -57,18 +64,19 @@ private HCatAddPartitionDesc(String dbName, String tbl, String loc, Map getPartitionSpec() { - return this.partSpec; + return hcatPartition == null? partitionKeyValues : hcatPartition.getPartitionKeyValMap(); } /** @@ -76,8 +84,9 @@ public String getLocation() { * * @return the table name */ + @Deprecated // @deprecated in favour of {@link HCatPartition.#getTableName()}. To be removed in Hive 0.16. public String getTableName() { - return this.tableName; + return hcatPartition == null? tableName : hcatPartition.getTableName(); } /** @@ -85,17 +94,14 @@ public String getTableName() { * * @return the database name */ + @Deprecated // @deprecated in favour of {@link HCatPartition.#getDatabaseName()}. To be removed in Hive 0.16. public String getDatabaseName() { - return this.dbName; + return hcatPartition == null? dbName : hcatPartition.getDatabaseName(); } @Override public String toString() { - return "HCatAddPartitionDesc [" - + (tableName != null ? "tableName=" + tableName + ", " : "tableName=null") - + (dbName != null ? "dbName=" + dbName + ", " : "dbName=null") - + (location != null ? "location=" + location + ", " : "location=null") - + (partSpec != null ? "partSpec=" + partSpec : "partSpec=null") + "]"; + return "HCatAddPartitionDesc [" + hcatPartition + "]"; } /** @@ -108,61 +114,48 @@ public String toString() { * @return the builder * @throws HCatException */ - public static Builder create(String dbName, String tableName, String location, - Map partSpec) throws HCatException { + @Deprecated // @deprecated in favour of {@link HCatAddPartitionDesc.#create(HCatPartition)}. To be removed in Hive 0.16. + public static Builder create(String dbName, + String tableName, + String location, + Map partSpec + ) throws HCatException { + LOG.error("Unsupported! HCatAddPartitionDesc requires HCatTable to be specified explicitly."); return new Builder(dbName, tableName, location, partSpec); } - Partition toHivePartition(Table hiveTable) throws HCatException { - Partition hivePtn = new Partition(); - hivePtn.setDbName(this.dbName); - hivePtn.setTableName(this.tableName); - - List pvals = new ArrayList(); - for (FieldSchema field : hiveTable.getPartitionKeys()) { - String val = partSpec.get(field.getName()); - if (val == null || val.length() == 0) { - throw new HCatException("create partition: Value for key " - + field.getName() + " is null or empty"); - } - pvals.add(val); - } - - hivePtn.setValues(pvals); - StorageDescriptor sd = new StorageDescriptor(hiveTable.getSd()); - hivePtn.setSd(sd); - hivePtn.setParameters(hiveTable.getParameters()); - if (this.location != null) { - hivePtn.getSd().setLocation(this.location); - } else { - String partName; - try { - partName = Warehouse.makePartName( - hiveTable.getPartitionKeys(), pvals); - LOG.info("Setting partition location to :" + partName); - } catch (MetaException e) { - throw new HCatException("Exception while creating partition name.", e); - } - Path partPath = new Path(hiveTable.getSd().getLocation(), partName); - hivePtn.getSd().setLocation(partPath.toString()); - } - hivePtn.setCreateTime((int) (System.currentTimeMillis() / 1000)); - hivePtn.setLastAccessTimeIsSet(false); - return hivePtn; + /** + * Constructs a Builder instance, using an HCatPartition object. + * @param partition An HCatPartition instance. + * @return A Builder object that can build an appropriate HCatAddPartitionDesc. + * @throws HCatException + */ + public static Builder create(HCatPartition partition) throws HCatException { + return new Builder(partition); } + /** + * Builder class for constructing an HCatAddPartition instance. + */ public static class Builder { - private String tableName; - private String location; - private Map values; - private String dbName; + private HCatPartition hcatPartition; + + // The following data members are only required to support the deprecated constructor (and builder). + String dbName, tableName, location; + Map partitionSpec; + + private Builder(HCatPartition hcatPartition) { + this.hcatPartition = hcatPartition; + } - private Builder(String dbName, String tableName, String location, Map values) { + @Deprecated // To be removed in Hive 0.16. + private Builder(String dbName, String tableName, String location, Map partitionSpec) { + this.hcatPartition = null; this.dbName = dbName; this.tableName = tableName; this.location = location; - this.values = values; + this.partitionSpec = partitionSpec; } /** @@ -172,13 +165,9 @@ private Builder(String dbName, String tableName, String location, Map serializePartitions(List hcatPartitions) throws HCatException; + + /** + * Deserializer for an HCatPartition. + * @param hcatPartitionStringRep The String representation of the HCatPartition, presumably retrieved from {@link #serializePartition(HCatPartition)} + * @return HCatPartition instance reconstructed from the string. + * @throws HCatException, on failure to deserialze. + */ + public abstract HCatPartition deserializePartition(String hcatPartitionStringRep) throws HCatException; + + /** + * Deserializer for a list of HCatPartition strings. + * @param hcatPartitionStringReps The list of HCatPartition strings to be deserialized. + * @return A list of HCatPartition instances, each reconstructed from an entry in the string-list. + * @throws HCatException, on failure to deserialize. + */ + public abstract List deserializePartitions(List hcatPartitionStringReps) throws HCatException; + + /** * Creates the table like an existing table. * * @param dbName The name of the database. diff --git a/hcatalog/webhcat/java-client/src/main/java/org/apache/hive/hcatalog/api/HCatClientHMSImpl.java b/hcatalog/webhcat/java-client/src/main/java/org/apache/hive/hcatalog/api/HCatClientHMSImpl.java index c4b5971..b3afa72 100644 --- a/hcatalog/webhcat/java-client/src/main/java/org/apache/hive/hcatalog/api/HCatClientHMSImpl.java +++ b/hcatalog/webhcat/java-client/src/main/java/org/apache/hive/hcatalog/api/HCatClientHMSImpl.java @@ -172,7 +172,7 @@ public HCatTable getTable(String dbName, String tableName) public void createTable(HCatCreateTableDesc createTableDesc) throws HCatException { try { - hmsClient.createTable(createTableDesc.toHiveTable(hiveConfig)); + hmsClient.createTable(createTableDesc.getHCatTable().toHiveTable()); } catch (AlreadyExistsException e) { if (!createTableDesc.getIfNotExists()) { throw new HCatException( @@ -220,6 +220,27 @@ public void updateTableSchema(String dbName, String tableName, List hcatPtns = new ArrayList(); try { + Table table = hmsClient.getTable(dbName, tblName); + HCatTable hcatTable = new HCatTable(table); List hivePtns = hmsClient.listPartitions( checkDB(dbName), tblName, (short) -1); for (Partition ptn : hivePtns) { - hcatPtns.add(new HCatPartition(ptn)); + hcatPtns.add(new HCatPartition(hcatTable, ptn)); } } catch (NoSuchObjectException e) { throw new ObjectNotFoundException( @@ -351,7 +374,8 @@ public HCatPartition getPartition(String dbName, String tableName, Map partitionSpec) throws HCatException { HCatPartition partition = null; try { - List partitionColumns = getTable(checkDB(dbName), tableName).getPartCols(); + HCatTable hcatTable = getTable(checkDB(dbName), tableName); + List partitionColumns = hcatTable.getPartCols(); if (partitionColumns.size() != partitionSpec.size()) { throw new HCatException("Partition-spec doesn't have the right number of partition keys."); } @@ -369,7 +393,7 @@ public HCatPartition getPartition(String dbName, String tableName, Partition hivePartition = hmsClient.getPartition(checkDB(dbName), tableName, ptnValues); if (hivePartition != null) { - partition = new HCatPartition(hivePartition); + partition = new HCatPartition(hcatTable, hivePartition); } } catch (MetaException e) { throw new HCatException( @@ -397,7 +421,17 @@ public void addPartition(HCatAddPartitionDesc partInfo) + " is not partitioned."); } - hmsClient.add_partition(partInfo.toHivePartition(tbl)); + HCatTable hcatTable = new HCatTable(tbl); + + HCatPartition hcatPartition = partInfo.getHCatPartition(); + + // TODO: Remove in Hive 0.16. + // This is only required to support the deprecated methods in HCatAddPartitionDesc.Builder. + if (hcatPartition == null) { + hcatPartition = partInfo.getHCatPartition(hcatTable); + } + + hmsClient.add_partition(hcatPartition.toHivePartition()); } catch (InvalidObjectException e) { throw new HCatException( "InvalidObjectException while adding partition.", e); @@ -458,10 +492,11 @@ private void dropPartition(Partition partition, boolean ifExists) String tblName, String filter) throws HCatException { List hcatPtns = new ArrayList(); try { + HCatTable table = getTable(dbName, tblName); List hivePtns = hmsClient.listPartitionsByFilter( checkDB(dbName), tblName, filter, (short) -1); for (Partition ptn : hivePtns) { - hcatPtns.add(new HCatPartition(ptn)); + hcatPtns.add(new HCatPartition(table, ptn)); } } catch (MetaException e) { throw new HCatException("MetaException while fetching partitions.", @@ -682,9 +717,18 @@ public int addPartitions(List partInfoList) try { tbl = hmsClient.getTable(partInfoList.get(0).getDatabaseName(), partInfoList.get(0).getTableName()); + HCatTable hcatTable = new HCatTable(tbl); ArrayList ptnList = new ArrayList(); for (HCatAddPartitionDesc desc : partInfoList) { - ptnList.add(desc.toHivePartition(tbl)); + HCatPartition hCatPartition = desc.getHCatPartition(); + + // TODO: Remove in Hive 0.16. + // This is required only to support the deprecated HCatAddPartitionDesc.Builder interfaces. + if (hCatPartition == null) { + hCatPartition = desc.getHCatPartition(hcatTable); + } + + ptnList.add(hCatPartition.toHivePartition()); } numPartitions = hmsClient.add_partitions(ptnList); } catch (InvalidObjectException e) { @@ -720,4 +764,65 @@ public String getMessageBusTopicName(String dbName, String tableName) throws HCa "TException while retrieving JMS Topic name.", e); } } + + @Override + public String serializeTable(HCatTable hcatTable) throws HCatException { + return MetadataSerializer.get().serializeTable(hcatTable); + } + + @Override + public HCatTable deserializeTable(String hcatTableStringRep) throws HCatException { + return MetadataSerializer.get().deserializeTable(hcatTableStringRep); + } + + @Override + public String serializePartition(HCatPartition hcatPartition) throws HCatException { + return MetadataSerializer.get().serializePartition(hcatPartition); + } + + @Override + public List serializePartitions(List hcatPartitions) throws HCatException { + List partStrings = new ArrayList(hcatPartitions.size()); + MetadataSerializer serializer = MetadataSerializer.get(); + + for (HCatPartition partition : hcatPartitions) { + partStrings.add(serializer.serializePartition(partition)); + } + + return partStrings; + } + + @Override + public HCatPartition deserializePartition(String hcatPartitionStringRep) throws HCatException { + HCatPartition hcatPartition = MetadataSerializer.get().deserializePartition(hcatPartitionStringRep); + hcatPartition.hcatTable(getTable(hcatPartition.getDatabaseName(), hcatPartition.getTableName())); + return hcatPartition; + } + + @Override + public List deserializePartitions(List hcatPartitionStringReps) throws HCatException { + List partitions = new ArrayList(hcatPartitionStringReps.size()); + MetadataSerializer deserializer = MetadataSerializer.get(); + HCatTable table = null; + for (String partString : hcatPartitionStringReps) { + HCatPartition partition; + if (table == null) { + partition = deserializePartition(partString); + table = partition.hcatTable(); + } + else { + partition = deserializer.deserializePartition(partString); + if (partition.getDatabaseName().equals(table.getDbName()) + && partition.getTableName().equals(table.getTableName())) { + partition.hcatTable(table); + } + else { + throw new HCatException("All partitions are not of the same table: " + + table.getDbName() + "." + table.getTableName()); + } + } + partitions.add(partition); + } + return partitions; + } } diff --git a/hcatalog/webhcat/java-client/src/main/java/org/apache/hive/hcatalog/api/HCatCreateTableDesc.java b/hcatalog/webhcat/java-client/src/main/java/org/apache/hive/hcatalog/api/HCatCreateTableDesc.java index d6e9753..5293f7b 100644 --- a/hcatalog/webhcat/java-client/src/main/java/org/apache/hive/hcatalog/api/HCatCreateTableDesc.java +++ b/hcatalog/webhcat/java-client/src/main/java/org/apache/hive/hcatalog/api/HCatCreateTableDesc.java @@ -19,38 +19,13 @@ package org.apache.hive.hcatalog.api; import java.util.ArrayList; -import java.util.HashMap; import java.util.List; import java.util.Map; -import org.apache.commons.lang.StringUtils; -import org.apache.hadoop.hive.conf.HiveConf; -import org.apache.hadoop.hive.metastore.MetaStoreUtils; -import org.apache.hadoop.hive.metastore.TableType; -import org.apache.hadoop.hive.metastore.api.FieldSchema; import org.apache.hadoop.hive.metastore.api.Order; -import org.apache.hadoop.hive.metastore.api.SerDeInfo; -import org.apache.hadoop.hive.metastore.api.StorageDescriptor; -import org.apache.hadoop.hive.metastore.api.Table; -import org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat; -import org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat; -import org.apache.hadoop.hive.ql.io.orc.OrcInputFormat; -import org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat; -import org.apache.hadoop.hive.ql.io.orc.OrcSerde; -import org.apache.hadoop.hive.ql.io.RCFileInputFormat; -import org.apache.hadoop.hive.ql.io.RCFileOutputFormat; -import org.apache.hadoop.hive.ql.metadata.HiveException; -import org.apache.hadoop.hive.ql.metadata.HiveStorageHandler; -import org.apache.hadoop.hive.ql.metadata.HiveUtils; import org.apache.hadoop.hive.serde.serdeConstants; -import org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe; -import org.apache.hadoop.mapred.SequenceFileInputFormat; -import org.apache.hadoop.mapred.TextInputFormat; import org.apache.hive.hcatalog.common.HCatException; import org.apache.hive.hcatalog.data.schema.HCatFieldSchema; -import org.apache.hive.hcatalog.data.schema.HCatSchemaUtils; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; /** * The Class HCatCreateTableDesc for defining attributes for a new table. @@ -58,31 +33,12 @@ @SuppressWarnings("deprecation") public class HCatCreateTableDesc { - private static final Logger LOG = LoggerFactory.getLogger(HCatCreateTableDesc.class); - - private String tableName; - private String dbName; - private boolean isExternal; - private String comment; - private String location; - private List cols; - private List partCols; - private List bucketCols; - private int numBuckets; - private List sortCols; - private Map tblProps; private boolean ifNotExists; - private String fileFormat; - private String inputformat; - private String outputformat; - private String serde; - private String storageHandler; - private Map serdeParams; - - private HCatCreateTableDesc(String dbName, String tableName, List columns) { - this.dbName = dbName; - this.tableName = tableName; - this.cols = columns; + private HCatTable hcatTable; + + private HCatCreateTableDesc(HCatTable hcatTable, boolean ifNotExists) { + this.hcatTable = hcatTable; + this.ifNotExists = ifNotExists; } /** @@ -93,109 +49,36 @@ private HCatCreateTableDesc(String dbName, String tableName, List columns) { return new Builder(dbName, tableName, columns); } - Table toHiveTable(HiveConf conf) throws HCatException { - - /* - * get the same defaults as are set when a Table is created via the Hive Driver. - */ - Table newTable = org.apache.hadoop.hive.ql.metadata.Table.getEmptyTable(dbName, tableName); - newTable.setDbName(dbName); - newTable.setTableName(tableName); - if (tblProps != null) { - for ( Map.Entry e : tblProps.entrySet()){ - newTable.getParameters().put(e.getKey(), e.getValue()); - } - } - - if (isExternal) { - newTable.putToParameters("EXTERNAL", "TRUE"); - newTable.setTableType(TableType.EXTERNAL_TABLE.toString()); - } else { - newTable.setTableType(TableType.MANAGED_TABLE.toString()); - } - - // Initialize an sd if one does not exist - if (newTable.getSd() == null) { - newTable.setSd(new StorageDescriptor()); - } - StorageDescriptor sd = newTable.getSd(); - - if (sd.getSerdeInfo() == null){ - sd.setSerdeInfo(new SerDeInfo()); - } - if (location != null) { - sd.setLocation(location); - } - if (this.comment != null) { - newTable.putToParameters("comment", comment); - } - if (!StringUtils.isEmpty(fileFormat)) { - sd.setInputFormat(inputformat); - sd.setOutputFormat(outputformat); - if (serde != null) { - sd.getSerdeInfo().setSerializationLib(serde); - } else { - LOG.info("Using LazySimpleSerDe for table " + tableName); - sd.getSerdeInfo() - .setSerializationLib( - org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe.class - .getName()); - } - } else { - try { - LOG.info("Creating instance of storage handler to get input/output, serder info."); - HiveStorageHandler sh = HiveUtils.getStorageHandler(conf, - storageHandler); - sd.setInputFormat(sh.getInputFormatClass().getName()); - sd.setOutputFormat(sh.getOutputFormatClass().getName()); - sd.getSerdeInfo().setSerializationLib( - sh.getSerDeClass().getName()); - newTable.putToParameters( - org.apache.hadoop.hive.metastore.api.hive_metastoreConstants.META_TABLE_STORAGE, - storageHandler); - } catch (HiveException e) { - throw new HCatException( - "Exception while creating instance of storage handler", - e); - } - } - if(serdeParams != null) { - for(Map.Entry param : serdeParams.entrySet()) { - sd.getSerdeInfo().putToParameters(param.getKey(), param.getValue()); - } - } - if (this.partCols != null) { - ArrayList hivePtnCols = new ArrayList(); - for (HCatFieldSchema fs : this.partCols) { - hivePtnCols.add(HCatSchemaUtils.getFieldSchema(fs)); - } - newTable.setPartitionKeys(hivePtnCols); - } - - if (this.cols != null) { - ArrayList hiveTblCols = new ArrayList(); - for (HCatFieldSchema fs : this.cols) { - hiveTblCols.add(HCatSchemaUtils.getFieldSchema(fs)); - } - newTable.getSd().setCols(hiveTblCols); - } - - if (this.bucketCols != null) { - newTable.getSd().setBucketCols(bucketCols); - newTable.getSd().setNumBuckets(numBuckets); - } + /** + * Getter for HCatCreateTableDesc.Builder instance. + * @param table Spec for HCatTable to be created. + * @param ifNotExists Only create the table if it doesn't already exist. + * @return Builder instance. + */ + public static Builder create(HCatTable table, boolean ifNotExists) { + return new Builder(table, ifNotExists); + } - if (this.sortCols != null) { - newTable.getSd().setSortCols(sortCols); - } + /** + * Getter for HCatCreateTableDesc.Builder instance. By default, ifNotExists is false. + * So the attempt to create the table is made even if the table already exists. + * @param table Spec for HCatTable to be created. + * @return Builder instance. + */ + public static Builder create(HCatTable table) { + return new Builder(table, false); + } - newTable.setCreateTime((int) (System.currentTimeMillis() / 1000)); - newTable.setLastAccessTimeIsSet(false); - return newTable; + /** + * Getter for underlying HCatTable instance. + */ + public HCatTable getHCatTable() { + return this.hcatTable; } /** @@ -212,8 +95,9 @@ public boolean getIfNotExists() { * * @return the table name */ + @Deprecated // @deprecated in favour of {@link HCatTable.#getTableName()}. To be removed in Hive 0.16. public String getTableName() { - return this.tableName; + return this.hcatTable.getTableName(); } /** @@ -221,8 +105,9 @@ public String getTableName() { * * @return the cols */ + @Deprecated // @deprecated in favour of {@link HCatTable.#getCols()}. To be removed in Hive 0.16. public List getCols() { - return this.cols; + return this.hcatTable.getCols(); } /** @@ -230,8 +115,9 @@ public String getTableName() { * * @return the partition cols */ + @Deprecated // @deprecated in favour of {@link HCatTable.#getPartCols()}. To be removed in Hive 0.16. public List getPartitionCols() { - return this.partCols; + return this.hcatTable.getPartCols(); } /** @@ -239,12 +125,14 @@ public String getTableName() { * * @return the bucket cols */ + @Deprecated // @deprecated in favour of {@link HCatTable.#getBucketCols()}. To be removed in Hive 0.16. public List getBucketCols() { - return this.bucketCols; + return this.hcatTable.getBucketCols(); } + @Deprecated // @deprecated in favour of {@link HCatTable.#getNumBuckets()}. public int getNumBuckets() { - return this.numBuckets; + return this.hcatTable.getNumBuckets(); } /** @@ -252,8 +140,9 @@ public int getNumBuckets() { * * @return the comments */ + @Deprecated // @deprecated in favour of {@link HCatTable.#comment()}. To be removed in Hive 0.16. public String getComments() { - return this.comment; + return this.hcatTable.comment(); } /** @@ -261,8 +150,9 @@ public String getComments() { * * @return the storage handler */ + @Deprecated // @deprecated in favour of {@link HCatTable.#getStorageHandler()}. To be removed in Hive 0.16. public String getStorageHandler() { - return this.storageHandler; + return this.hcatTable.getStorageHandler(); } /** @@ -270,8 +160,9 @@ public String getStorageHandler() { * * @return the location */ + @Deprecated // @deprecated in favour of {@link HCatTable.#getLocation()}. To be removed in Hive 0.16. public String getLocation() { - return this.location; + return this.hcatTable.getLocation(); } /** @@ -279,8 +170,11 @@ public String getLocation() { * * @return the external */ + @Deprecated // @deprecated in favour of {@link HCatTable.#getTableType()}. To be removed in Hive 0.16. public boolean getExternal() { - return this.isExternal; + + return this.hcatTable.getTabletype() + .equalsIgnoreCase(HCatTable.Type.EXTERNAL_TABLE.toString()); } /** @@ -288,8 +182,9 @@ public boolean getExternal() { * * @return the sort cols */ + @Deprecated // @deprecated in favour of {@link HCatTable.#getSortCols()}. To be removed in Hive 0.16. public List getSortCols() { - return this.sortCols; + return this.hcatTable.getSortCols(); } /** @@ -297,8 +192,9 @@ public boolean getExternal() { * * @return the tbl props */ + @Deprecated // @deprecated in favour of {@link HCatTable.#getTblProps()}. To be removed in Hive 0.16. public Map getTblProps() { - return this.tblProps; + return this.hcatTable.getTblProps(); } /** @@ -306,8 +202,9 @@ public boolean getExternal() { * * @return the file format */ + @Deprecated // @deprecated in favour of {@link HCatTable.#fileFormat()}. To be removed in Hive 0.16. public String getFileFormat() { - return this.fileFormat; + return this.hcatTable.fileFormat(); } /** @@ -315,74 +212,39 @@ public String getFileFormat() { * * @return the database name */ + @Deprecated // @deprecated in favour of {@link HCatTable.#getDbName()}. To be removed in Hive 0.16. public String getDatabaseName() { - return this.dbName; + return this.hcatTable.getDbName(); } + /** * Gets the SerDe parameters; for example see {@link org.apache.hive.hcatalog.api.HCatCreateTableDesc.Builder#fieldsTerminatedBy(char)} */ + @Deprecated public Map getSerdeParams() { - return serdeParams; + return this.hcatTable.getSerdeParams(); } @Override public String toString() { - return "HCatCreateTableDesc [" - + (tableName != null ? "tableName=" + tableName + ", " : "tableName=null") - + (dbName != null ? "dbName=" + dbName + ", " : "dbName=null") - + "isExternal=" - + isExternal - + ", " - + (comment != null ? "comment=" + comment + ", " : "comment=null") - + (location != null ? "location=" + location + ", " : "location=null") - + (cols != null ? "cols=" + cols + ", " : "cols=null") - + (partCols != null ? "partCols=" + partCols + ", " : "partCols=null") - + (bucketCols != null ? "bucketCols=" + bucketCols + ", " : "bucketCols=null") - + "numBuckets=" - + numBuckets - + ", " - + (sortCols != null ? "sortCols=" + sortCols + ", " : "sortCols=null") - + (tblProps != null ? "tblProps=" + tblProps + ", " : "tblProps=null") - + "ifNotExists=" - + ifNotExists - + ", " - + (fileFormat != null ? "fileFormat=" + fileFormat + ", " : "fileFormat=null") - + (inputformat != null ? "inputformat=" + inputformat + ", " - : "inputformat=null") - + (outputformat != null ? "outputformat=" + outputformat + ", " - : "outputformat=null") - + (serde != null ? "serde=" + serde + ", " : "serde=null") - + (storageHandler != null ? "storageHandler=" + storageHandler - : "storageHandler=null") - + ",serdeParams=" + (serdeParams == null ? "null" : serdeParams) - + "]"; + return "HCatCreateTableDesc [ " + hcatTable.toString() + + ", ifNotExists = " + ifNotExists + "]"; } public static class Builder { - private String tableName; - private boolean isExternal; - private List cols; - private List partCols; - private List bucketCols; - private List sortCols; - private int numBuckets; - private String comment; - private String fileFormat; - private String location; - private String storageHandler; - private Map tblProps; private boolean ifNotExists; - private String dbName; - private Map serdeParams; - + private HCatTable hcatTable; + @Deprecated // @deprecated in favour of {@link #Builder(HCatTable, boolean)}. To be removed in Hive 0.16. private Builder(String dbName, String tableName, List columns) { - this.dbName = dbName; - this.tableName = tableName; - this.cols = columns; + hcatTable = new HCatTable(dbName, tableName).cols(columns); } + private Builder(HCatTable hcatTable, boolean ifNotExists) { + this.hcatTable = hcatTable; + this.ifNotExists = ifNotExists; + } /** * If not exists. @@ -403,8 +265,9 @@ public Builder ifNotExists(boolean ifNotExists) { * @param partCols the partition cols * @return the builder */ + @Deprecated // @deprecated in favour of {@link HCatTable.#partCols(List)}. To be removed in Hive 0.16. public Builder partCols(List partCols) { - this.partCols = partCols; + this.hcatTable.partCols(partCols); return this; } @@ -415,9 +278,10 @@ public Builder partCols(List partCols) { * @param bucketCols the bucket cols * @return the builder */ + @Deprecated // @deprecated in favour of {@link HCatTable.#bucketCols(List) and HCatTable.#numBuckets(int)}. + // To be removed in Hive 0.16. public Builder bucketCols(List bucketCols, int buckets) { - this.bucketCols = bucketCols; - this.numBuckets = buckets; + this.hcatTable.bucketCols(bucketCols).numBuckets(buckets); return this; } @@ -427,8 +291,9 @@ public Builder bucketCols(List bucketCols, int buckets) { * @param storageHandler the storage handler * @return the builder */ - public Builder storageHandler(String storageHandler) { - this.storageHandler = storageHandler; + @Deprecated // @deprecated in favour of {@link HCatTable.#storageHandler(String)}. To be removed in Hive 0.16. + public Builder storageHandler(String storageHandler) throws HCatException { + this.hcatTable.storageHandler(storageHandler); return this; } @@ -438,8 +303,9 @@ public Builder storageHandler(String storageHandler) { * @param location the location * @return the builder */ + @Deprecated // @deprecated in favour of {@link HCatTable.#location(String)}. To be removed in Hive 0.16. public Builder location(String location) { - this.location = location; + this.hcatTable.location(location); return this; } @@ -449,8 +315,9 @@ public Builder location(String location) { * @param comment the comment * @return the builder */ + @Deprecated // @deprecated in favour of {@link HCatTable.#comment(String)}. To be removed in Hive 0.16. public Builder comments(String comment) { - this.comment = comment; + this.hcatTable.comment(comment); return this; } @@ -460,8 +327,9 @@ public Builder comments(String comment) { * @param isExternal the is external * @return the builder */ + @Deprecated // @deprecated in favour of {@link HCatTable.#tableType(HCatTable.Type)}. To be removed in Hive 0.16. public Builder isTableExternal(boolean isExternal) { - this.isExternal = isExternal; + this.hcatTable.tableType(isExternal? HCatTable.Type.EXTERNAL_TABLE : HCatTable.Type.MANAGED_TABLE); return this; } @@ -471,8 +339,9 @@ public Builder isTableExternal(boolean isExternal) { * @param sortCols the sort cols * @return the builder */ + @Deprecated // @deprecated in favour of {@link HCatTable.#sortCols(ArrayList)}. To be removed in Hive 0.16. public Builder sortCols(ArrayList sortCols) { - this.sortCols = sortCols; + this.hcatTable.sortCols(sortCols); return this; } @@ -482,8 +351,10 @@ public Builder sortCols(ArrayList sortCols) { * @param tblProps the tbl props * @return the builder */ + @Deprecated // @deprecated in favour of {@link HCatTable.#sortCols(Map)}. + // To be removed in Hive 0.16. public Builder tblProps(Map tblProps) { - this.tblProps = tblProps; + this.hcatTable.tblProps(tblProps); return this; } @@ -493,54 +364,60 @@ public Builder tblProps(Map tblProps) { * @param format the format * @return the builder */ + @Deprecated // @deprecated in favour of {@link HCatTable.#fileFormat(String)}. To be removed in Hive 0.16. public Builder fileFormat(String format) { - this.fileFormat = format; + this.hcatTable.fileFormat(format); return this; } /** * See row_format element of CREATE_TABLE DDL for Hive. */ + @Deprecated // @deprecated in favour of {@link HCatTable.#fieldsTerminatedBy()}. To be removed in Hive 0.16. public Builder fieldsTerminatedBy(char delimiter) { return serdeParam(serdeConstants.FIELD_DELIM, Character.toString(delimiter)); } /** * See row_format element of CREATE_TABLE DDL for Hive. */ + @Deprecated // @deprecated in favour of {@link HCatTable.#escapeChar()}. public Builder escapeChar(char escapeChar) { return serdeParam(serdeConstants.ESCAPE_CHAR, Character.toString(escapeChar)); } /** * See row_format element of CREATE_TABLE DDL for Hive. */ + @Deprecated // @deprecated in favour of {@link HCatTable.#collectionItemsTerminatedBy()}. To be removed in Hive 0.16. public Builder collectionItemsTerminatedBy(char delimiter) { return serdeParam(serdeConstants.COLLECTION_DELIM, Character.toString(delimiter)); } /** * See row_format element of CREATE_TABLE DDL for Hive. */ + @Deprecated // @deprecated in favour of {@link HCatTable.#mapKeysTerminatedBy()}. To be removed in Hive 0.16. public Builder mapKeysTerminatedBy(char delimiter) { return serdeParam(serdeConstants.MAPKEY_DELIM, Character.toString(delimiter)); } /** * See row_format element of CREATE_TABLE DDL for Hive. */ + @Deprecated // @deprecated in favour of {@link HCatTable.#linesTerminatedBy()}. To be removed in Hive 0.16. public Builder linesTerminatedBy(char delimiter) { return serdeParam(serdeConstants.LINE_DELIM, Character.toString(delimiter)); } /** * See row_format element of CREATE_TABLE DDL for Hive. */ + @Deprecated // @deprecated in favour of {@link HCatTable.#nullDefinedAs()}. To be removed in Hive 0.16. public Builder nullDefinedAs(char nullChar) { return serdeParam(serdeConstants.SERIALIZATION_NULL_FORMAT, Character.toString(nullChar)); } /** * used for setting arbitrary SerDe parameter */ + @Deprecated // @deprecated in favour of {@link HCatTable.#serdeParam(Map)}. + // To be removed in Hive 0.16. public Builder serdeParam(String paramName, String value) { - if(serdeParams == null) { - serdeParams = new HashMap(); - } - serdeParams.put(paramName, value); + hcatTable.serdeParam(paramName, value); return this; } /** @@ -550,52 +427,9 @@ public Builder serdeParam(String paramName, String value) { * @throws HCatException */ public HCatCreateTableDesc build() throws HCatException { - if (this.dbName == null) { - LOG.info("Database name found null. Setting db to :" - + MetaStoreUtils.DEFAULT_DATABASE_NAME); - this.dbName = MetaStoreUtils.DEFAULT_DATABASE_NAME; - } - HCatCreateTableDesc desc = new HCatCreateTableDesc(this.dbName, - this.tableName, this.cols); - desc.ifNotExists = this.ifNotExists; - desc.isExternal = this.isExternal; - desc.comment = this.comment; - desc.partCols = this.partCols; - desc.bucketCols = this.bucketCols; - desc.numBuckets = this.numBuckets; - desc.location = this.location; - desc.tblProps = this.tblProps; - desc.sortCols = this.sortCols; - desc.serde = null; - if (!StringUtils.isEmpty(fileFormat)) { - desc.fileFormat = fileFormat; - if ("SequenceFile".equalsIgnoreCase(fileFormat)) { - desc.inputformat = SequenceFileInputFormat.class.getName(); - desc.outputformat = HiveSequenceFileOutputFormat.class - .getName(); - } else if ("RCFile".equalsIgnoreCase(fileFormat)) { - desc.inputformat = RCFileInputFormat.class.getName(); - desc.outputformat = RCFileOutputFormat.class.getName(); - desc.serde = ColumnarSerDe.class.getName(); - } else if ("orcfile".equalsIgnoreCase(fileFormat)) { - desc.inputformat = OrcInputFormat.class.getName(); - desc.outputformat = OrcOutputFormat.class.getName(); - desc.serde = OrcSerde.class.getName(); - } - desc.storageHandler = StringUtils.EMPTY; - } else if (!StringUtils.isEmpty(storageHandler)) { - desc.storageHandler = storageHandler; - } else { - desc.fileFormat = "TextFile"; - LOG.info("Using text file format for the table."); - desc.inputformat = TextInputFormat.class.getName(); - LOG.info("Table input format:" + desc.inputformat); - desc.outputformat = HiveIgnoreKeyTextOutputFormat.class - .getName(); - LOG.info("Table output format:" + desc.outputformat); - } - desc.serdeParams = this.serdeParams; - return desc; + return new HCatCreateTableDesc(this.hcatTable, this.ifNotExists); } - } -} + + } // class Builder; + +} // class HCatAddPartitionDesc; diff --git a/hcatalog/webhcat/java-client/src/main/java/org/apache/hive/hcatalog/api/HCatPartition.java b/hcatalog/webhcat/java-client/src/main/java/org/apache/hive/hcatalog/api/HCatPartition.java index ee1b6bf..ea7c54c 100644 --- a/hcatalog/webhcat/java-client/src/main/java/org/apache/hive/hcatalog/api/HCatPartition.java +++ b/hcatalog/webhcat/java-client/src/main/java/org/apache/hive/hcatalog/api/HCatPartition.java @@ -19,45 +19,136 @@ package org.apache.hive.hcatalog.api; import java.util.ArrayList; +import java.util.LinkedHashMap; import java.util.List; import java.util.Map; +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hive.metastore.MetaStoreUtils; +import org.apache.hadoop.hive.metastore.Warehouse; import org.apache.hadoop.hive.metastore.api.FieldSchema; +import org.apache.hadoop.hive.metastore.api.MetaException; import org.apache.hadoop.hive.metastore.api.Order; import org.apache.hadoop.hive.metastore.api.Partition; import org.apache.hadoop.hive.metastore.api.StorageDescriptor; import org.apache.hive.hcatalog.common.HCatException; import org.apache.hive.hcatalog.data.schema.HCatFieldSchema; import org.apache.hive.hcatalog.data.schema.HCatSchemaUtils; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; /** * The HCatPartition is a wrapper around org.apache.hadoop.hive.metastore.api.Partition. */ public class HCatPartition { + private static final Logger LOG = LoggerFactory.getLogger(HCatPartition.class); + + private HCatTable hcatTable; private String tableName; - private String dbName; + private String dbName = MetaStoreUtils.DEFAULT_DATABASE_NAME; private List values; - private List tableCols; private int createTime; private int lastAccessTime; private StorageDescriptor sd; private Map parameters; - HCatPartition(Partition partition) throws HCatException { + // For use from within HCatClient.getPartitions(). + HCatPartition(HCatTable hcatTable, Partition partition) throws HCatException { + this.hcatTable = hcatTable; this.tableName = partition.getTableName(); this.dbName = partition.getDbName(); this.createTime = partition.getCreateTime(); this.lastAccessTime = partition.getLastAccessTime(); this.parameters = partition.getParameters(); this.values = partition.getValues(); + if (hcatTable != null && partition.getValuesSize() != hcatTable.getPartCols().size()) { + throw new HCatException("Mismatched number of partition columns between table:" + hcatTable.getDbName() + "." + hcatTable.getTableName() + + " and partition " + partition.getValues()); + } + this.sd = partition.getSd(); - this.tableCols = new ArrayList(); - for (FieldSchema fs : this.sd.getCols()) { - this.tableCols.add(HCatSchemaUtils.getHCatFieldSchema(fs)); + } + + // For constructing HCatPartitions afresh, as an argument to HCatClient.addPartitions(). + public HCatPartition(HCatTable hcatTable, Map partitionKeyValues, String location) throws HCatException { + this.hcatTable = hcatTable; + this.tableName = hcatTable.getTableName(); + this.dbName = hcatTable.getDbName(); + this.sd = new StorageDescriptor(hcatTable.getSd()); + this.sd.setLocation(location); + this.createTime = (int)(System.currentTimeMillis()/1000); + this.lastAccessTime = -1; + this.values = new ArrayList(hcatTable.getPartCols().size()); + for (HCatFieldSchema partField : hcatTable.getPartCols()) { + if (!partitionKeyValues.containsKey(partField.getName())) { + throw new HCatException("Missing value for partition-key \'" + partField.getName() + + "\' in table: " + hcatTable.getDbName() + "." + hcatTable.getTableName()); + } + else { + values.add(partitionKeyValues.get(partField.getName())); + } } } + // For replicating an HCatPartition definition. + public HCatPartition(HCatPartition rhs, Map partitionKeyValues, String location) throws HCatException { + this.hcatTable = rhs.hcatTable; + this.tableName = rhs.tableName; + this.dbName = rhs.dbName; + this.sd = new StorageDescriptor(rhs.sd); + this.sd.setLocation(location); + + this.createTime = (int) (System.currentTimeMillis() / 1000); + this.lastAccessTime = -1; + this.values = new ArrayList(hcatTable.getPartCols().size()); + for (HCatFieldSchema partField : hcatTable.getPartCols()) { + if (!partitionKeyValues.containsKey(partField.getName())) { + throw new HCatException("Missing value for partition-key \'" + partField.getName() + + "\' in table: " + hcatTable.getDbName() + "." + hcatTable.getTableName()); + } else { + values.add(partitionKeyValues.get(partField.getName())); + } + } + } + + // For use from HCatClient.addPartitions(), to construct from user-input. + Partition toHivePartition() throws HCatException { + Partition hivePtn = new Partition(); + hivePtn.setDbName(dbName); + hivePtn.setTableName(tableName); + hivePtn.setValues(values); + + hivePtn.setParameters(parameters); + if (sd.getLocation() == null) { + LOG.warn("Partition location is not set! Attempting to construct default partition location."); + try { + String partName = Warehouse.makePartName(HCatSchemaUtils.getFieldSchemas(hcatTable.getPartCols()), values); + sd.setLocation(new Path(hcatTable.getSd().getLocation(), partName).toString()); + } + catch(MetaException exception) { + throw new HCatException("Could not construct default partition-path for " + + hcatTable.getDbName() + "." + hcatTable.getTableName() + "[" + values + "]"); + } + } + hivePtn.setSd(sd); + + hivePtn.setCreateTime((int) (System.currentTimeMillis() / 1000)); + hivePtn.setLastAccessTimeIsSet(false); + return hivePtn; + } + + public HCatTable hcatTable() { + return hcatTable; + } + + public HCatPartition hcatTable(HCatTable hcatTable) { + this.hcatTable = hcatTable; + this.tableName = hcatTable.getTableName(); + this.dbName = hcatTable.getDbName(); + return this; + } + /** * Gets the table name. * @@ -81,8 +172,12 @@ public String getDatabaseName() { * * @return the columns */ - public List getColumns() { - return this.tableCols; + public List getColumns() throws HCatException { + ArrayList columns = new ArrayList(sd.getColsSize()); + for (FieldSchema fieldSchema : sd.getCols()) { + columns.add(HCatSchemaUtils.getHCatFieldSchema(fieldSchema)); + } + return columns; } /** @@ -124,6 +219,14 @@ public String getLocation() { } /** + * Setter for partition directory location. + */ + public HCatPartition location(String location) { + this.sd.setLocation(location); + return this; + } + + /** * Gets the serde. * * @return the serde @@ -132,6 +235,14 @@ public String getSerDe() { return this.sd.getSerdeInfo().getSerializationLib(); } + /** + * Getter for SerDe parameters. + * @return The SerDe parameters. + */ + public Map getSerdeParams() { + return this.sd.getSerdeInfo().getParameters(); + } + public Map getParameters() { return this.parameters; } @@ -164,6 +275,34 @@ public int getCreateTime() { } /** + * Getter for partition-spec map. + */ + public LinkedHashMap getPartitionKeyValMap() { + LinkedHashMap map = new LinkedHashMap(hcatTable.getPartCols().size()); + for (int i=0; i partitionKeyValues) throws HCatException { + for (HCatFieldSchema partField : hcatTable.getPartCols()) { + if (!partitionKeyValues.containsKey(partField.getName())) { + throw new HCatException("Missing value for partition-key \'" + partField.getName() + + "\' in table: " + hcatTable.getDbName() + "." + hcatTable.getTableName()); + } + else { + values.add(partitionKeyValues.get(partField.getName())); + // Keep partKeyValMap in synch as well. + } + } + return this; + } + + /** * Gets the bucket columns. * * @return the bucket columns @@ -192,13 +331,14 @@ public int getNumBuckets() { @Override public String toString() { - return "HCatPartition [" - + (tableName != null ? "tableName=" + tableName + ", " : "tableName=null") - + (dbName != null ? "dbName=" + dbName + ", " : "dbName=null") - + (values != null ? "values=" + values + ", " : "values=null") - + "createTime=" + createTime + ", lastAccessTime=" - + lastAccessTime + ", " + (sd != null ? "sd=" + sd + ", " : "sd=null") - + (parameters != null ? "parameters=" + parameters : "parameters=null") + "]"; + return "HCatPartition [ " + + "tableName=" + tableName + "," + + "dbName=" + dbName + "," + + "values=" + values + "," + + "createTime=" + createTime + "," + + "lastAccessTime=" + lastAccessTime + "," + + "sd=" + sd + "," + + "parameters=" + parameters + "]"; } } diff --git a/hcatalog/webhcat/java-client/src/main/java/org/apache/hive/hcatalog/api/HCatTable.java b/hcatalog/webhcat/java-client/src/main/java/org/apache/hive/hcatalog/api/HCatTable.java index be254c3..2e2987d 100644 --- a/hcatalog/webhcat/java-client/src/main/java/org/apache/hive/hcatalog/api/HCatTable.java +++ b/hcatalog/webhcat/java-client/src/main/java/org/apache/hive/hcatalog/api/HCatTable.java @@ -19,62 +19,200 @@ package org.apache.hive.hcatalog.api; import java.util.ArrayList; +import java.util.EnumSet; +import java.util.HashMap; import java.util.List; import java.util.Map; +import com.google.common.collect.Maps; +import org.apache.commons.lang.StringUtils; +import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hive.conf.HiveConf; +import org.apache.hadoop.hive.metastore.MetaStoreUtils; +import org.apache.hadoop.hive.metastore.TableType; import org.apache.hadoop.hive.metastore.api.FieldSchema; import org.apache.hadoop.hive.metastore.api.Order; +import org.apache.hadoop.hive.metastore.api.SerDeInfo; +import org.apache.hadoop.hive.metastore.api.StorageDescriptor; import org.apache.hadoop.hive.metastore.api.Table; +import org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat; +import org.apache.hadoop.hive.ql.io.RCFileInputFormat; +import org.apache.hadoop.hive.ql.io.RCFileOutputFormat; +import org.apache.hadoop.hive.ql.io.orc.OrcInputFormat; +import org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat; +import org.apache.hadoop.hive.ql.io.orc.OrcSerde; +import org.apache.hadoop.hive.ql.metadata.HiveException; +import org.apache.hadoop.hive.ql.metadata.HiveStorageHandler; +import org.apache.hadoop.hive.ql.metadata.HiveUtils; +import org.apache.hadoop.hive.serde.serdeConstants; +import org.apache.hadoop.hive.serde2.columnar.LazyBinaryColumnarSerDe; +import org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe; +import org.apache.hadoop.mapred.SequenceFileInputFormat; import org.apache.hive.hcatalog.common.HCatException; import org.apache.hive.hcatalog.data.schema.HCatFieldSchema; import org.apache.hive.hcatalog.data.schema.HCatSchemaUtils; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; /** * The HCatTable is a wrapper around org.apache.hadoop.hive.metastore.api.Table. */ public class HCatTable { + private static final Logger LOG = LoggerFactory.getLogger(HCatTable.class); + public static enum Type { + MANAGED_TABLE, + EXTERNAL_TABLE, + VIRTUAL_VIEW, + INDEX_TABLE + } + + /** + * Attributes that can be compared between HCatTables. + */ + public static enum TableAttribute { + COLUMNS, + PARTITION_COLUMNS, + INPUT_FORMAT, + OUTPUT_FORMAT, + SERDE, + SERDE_PROPERTIES, + STORAGE_HANDLER, + LOCATION, + TABLE_PROPERTIES, + STATS // TODO: Handle replication of changes to Table-STATS. + } + + /** + * The default set of attributes that can be diffed between HCatTables. + */ + public static final EnumSet DEFAULT_COMPARISON_ATTRIBUTES + = EnumSet.of(TableAttribute.COLUMNS, + TableAttribute.INPUT_FORMAT, + TableAttribute.OUTPUT_FORMAT, + TableAttribute.SERDE, + TableAttribute.SERDE_PROPERTIES, + TableAttribute.STORAGE_HANDLER, + TableAttribute.TABLE_PROPERTIES); + + /** + * 2 HCatTables are considered equivalent if {@code lhs.diff(rhs).equals(NO_DIFF) == true; } + */ + public static final EnumSet NO_DIFF = EnumSet.noneOf(TableAttribute.class); + + public static final String DEFAULT_SERDE_CLASS = org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe.class.getName(); + public static final String DEFAULT_INPUT_FORMAT_CLASS = org.apache.hadoop.mapred.TextInputFormat.class.getName(); + public static final String DEFAULT_OUTPUT_FORMAT_CLASS = org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat.class.getName(); + + private String dbName = MetaStoreUtils.DEFAULT_DATABASE_NAME; private String tableName; - private String tabletype; - private List cols; - private List partCols; - private List bucketCols; - private List sortCols; - private int numBuckets; - private String inputFileFormat; - private String outputFileFormat; - private String storageHandler; - private Map tblProps; - private String dbName; - private String serde; - private String location; - private Map serdeParams; + private HiveConf conf; + private String tableType; + private boolean isExternal; + private List cols = new ArrayList(); + private List partCols = new ArrayList(); + private StorageDescriptor sd; + private String fileFormat; + private Map tblProps = new HashMap(); + private String comment = ""; + private String owner; + + public HCatTable(String dbName, String tableName) { + this.dbName = StringUtils.isBlank(dbName)? MetaStoreUtils.DEFAULT_DATABASE_NAME : dbName; + this.tableName = tableName; + this.sd = new StorageDescriptor(); + this.sd.setInputFormat(DEFAULT_INPUT_FORMAT_CLASS); + this.sd.setOutputFormat(DEFAULT_OUTPUT_FORMAT_CLASS); + this.sd.setSerdeInfo(new SerDeInfo()); + this.sd.getSerdeInfo().setSerializationLib(DEFAULT_SERDE_CLASS); + this.sd.getSerdeInfo().setParameters(new HashMap()); + this.sd.getSerdeInfo().getParameters().put(serdeConstants.SERIALIZATION_FORMAT, "1"); // Default serialization format. + } HCatTable(Table hiveTable) throws HCatException { - this.tableName = hiveTable.getTableName(); - this.dbName = hiveTable.getDbName(); - this.tabletype = hiveTable.getTableType(); - cols = new ArrayList(); - for (FieldSchema colFS : hiveTable.getSd().getCols()) { + tableName = hiveTable.getTableName(); + dbName = hiveTable.getDbName(); + tableType = hiveTable.getTableType(); + isExternal = hiveTable.getTableType().equals(TableType.EXTERNAL_TABLE.toString()); + sd = hiveTable.getSd(); + for (FieldSchema colFS : sd.getCols()) { cols.add(HCatSchemaUtils.getHCatFieldSchema(colFS)); } partCols = new ArrayList(); for (FieldSchema colFS : hiveTable.getPartitionKeys()) { partCols.add(HCatSchemaUtils.getHCatFieldSchema(colFS)); } - bucketCols = hiveTable.getSd().getBucketCols(); - sortCols = hiveTable.getSd().getSortCols(); - numBuckets = hiveTable.getSd().getNumBuckets(); - inputFileFormat = hiveTable.getSd().getInputFormat(); - outputFileFormat = hiveTable.getSd().getOutputFormat(); - storageHandler = hiveTable - .getSd() - .getParameters() - .get(org.apache.hadoop.hive.metastore.api.hive_metastoreConstants.META_TABLE_STORAGE); - tblProps = hiveTable.getParameters(); - serde = hiveTable.getSd().getSerdeInfo().getSerializationLib(); - location = hiveTable.getSd().getLocation(); - serdeParams = hiveTable.getSd().getSerdeInfo().getParameters(); + if (hiveTable.getParameters() != null) { + tblProps.putAll(hiveTable.getParameters()); + } + + if (StringUtils.isNotBlank(tblProps.get("comment"))) { + comment = tblProps.get("comment"); + } + + owner = hiveTable.getOwner(); + } + + Table toHiveTable() throws HCatException { + Table newTable = new Table(); + newTable.setDbName(dbName); + newTable.setTableName(tableName); + if (tblProps != null) { + newTable.setParameters(tblProps); + } + + if (isExternal) { + newTable.putToParameters("EXTERNAL", "TRUE"); + newTable.setTableType(TableType.EXTERNAL_TABLE.toString()); + } else { + newTable.setTableType(TableType.MANAGED_TABLE.toString()); + } + + if (this.comment != null) { + newTable.putToParameters("comment", comment); + } + + newTable.setSd(sd); + if (partCols != null) { + ArrayList hivePtnCols = new ArrayList(); + for (HCatFieldSchema fs : partCols) { + hivePtnCols.add(HCatSchemaUtils.getFieldSchema(fs)); + } + newTable.setPartitionKeys(hivePtnCols); + } + + newTable.setCreateTime((int) (System.currentTimeMillis() / 1000)); + newTable.setLastAccessTimeIsSet(false); + try { + // TODO: Verify that this works for systems using UGI.doAs() (e.g. Oozie). + newTable.setOwner(owner == null? getConf().getUser() : owner); + } + catch (Exception exception) { + throw new HCatException("Unable to determine owner of table (" + dbName + "." + tableName + + ") from HiveConf."); + } + return newTable; + } + + void setConf(Configuration conf) { + if (conf instanceof HiveConf) { + this.conf = (HiveConf)conf; + } + else { + this.conf = new HiveConf(conf, getClass()); + } + } + + HiveConf getConf() { + if (conf == null) { + LOG.warn("Conf hasn't been set yet. Using defaults."); + conf = new HiveConf(); + } + return conf; + } + + StorageDescriptor getSd() { + return sd; } /** @@ -87,6 +225,14 @@ public String getTableName() { } /** + * Setter for TableName. + */ + public HCatTable tableName(String tableName) { + this.tableName = tableName; + return this; + } + + /** * Gets the db name. * * @return the db name @@ -96,6 +242,14 @@ public String getDbName() { } /** + * Setter for db-name. + */ + public HCatTable dbName(String dbName) { + this.dbName = dbName; + return this; + } + + /** * Gets the columns. * * @return the columns @@ -105,6 +259,18 @@ public String getDbName() { } /** + * Setter for Column schemas. + */ + public HCatTable cols(List cols) { + if (!this.cols.equals(cols)) { + this.cols.clear(); + this.cols.addAll(cols); + this.sd.setCols(HCatSchemaUtils.getFieldSchemas(cols)); + } + return this; + } + + /** * Gets the part columns. * * @return the part columns @@ -114,12 +280,40 @@ public String getDbName() { } /** + * Setter for list of partition columns. + */ + public HCatTable partCols(List partCols) { + this.partCols = partCols; + return this; + } + + /** + * Setter for individual partition columns. + */ + public HCatTable partCol(HCatFieldSchema partCol) { + if (this.partCols == null) { + this.partCols = new ArrayList(); + } + + this.partCols.add(partCol); + return this; + } + + /** * Gets the bucket columns. * * @return the bucket columns */ public List getBucketCols() { - return bucketCols; + return this.sd.getBucketCols(); + } + + /** + * Setter for list of bucket columns. + */ + public HCatTable bucketCols(List bucketCols) { + this.sd.setBucketCols(bucketCols); + return this; } /** @@ -128,7 +322,15 @@ public String getDbName() { * @return the sort columns */ public List getSortCols() { - return sortCols; + return this.sd.getSortCols(); + } + + /** + * Setter for Sort-cols. + */ + public HCatTable sortCols(List sortCols) { + this.sd.setSortCols(sortCols); + return this; } /** @@ -137,7 +339,15 @@ public String getDbName() { * @return the number of buckets */ public int getNumBuckets() { - return numBuckets; + return this.sd.getNumBuckets(); + } + + /** + * Setter for number of buckets. + */ + public HCatTable numBuckets(int numBuckets) { + this.sd.setNumBuckets(numBuckets); + return this; } /** @@ -146,7 +356,29 @@ public int getNumBuckets() { * @return the storage handler */ public String getStorageHandler() { - return storageHandler; + return this.tblProps.get(org.apache.hadoop.hive.metastore.api.hive_metastoreConstants.META_TABLE_STORAGE); + } + + /** + * Setter for StorageHandler class. + */ + public HCatTable storageHandler(String storageHandler) throws HCatException { + this.tblProps.put( + org.apache.hadoop.hive.metastore.api.hive_metastoreConstants.META_TABLE_STORAGE, + storageHandler); + LOG.warn("HiveStorageHandlers can't be reliably instantiated on the client-side. " + + "Attempting to derive Input/OutputFormat settings from StorageHandler, on best effort: "); + try { + HiveStorageHandler sh = HiveUtils.getStorageHandler(getConf(), storageHandler); + this.sd.setInputFormat(sh.getInputFormatClass().getName()); + this.sd.setOutputFormat(sh.getOutputFormatClass().getName()); + this.sd.getSerdeInfo().setSerializationLib(sh.getSerDeClass().getName()); + } catch (HiveException e) { + LOG.warn("Could not derive Input/OutputFormat and SerDe settings from storageHandler. " + + "These values need to be set explicitly.", e); + } + + return this; } /** @@ -159,21 +391,83 @@ public String getStorageHandler() { } /** - * Gets the tabletype. + * Setter for TableProperty map. + */ + public HCatTable tblProps(Map tblProps) { + if (!this.tblProps.equals(tblProps)) { + this.tblProps.clear(); + this.tblProps.putAll(tblProps); + } + return this; + } + + /** + * Gets the tableType. * - * @return the tabletype + * @return the tableType */ public String getTabletype() { - return tabletype; + return tableType; + } + + /** + * Setter for table-type. + */ + public HCatTable tableType(Type tableType) { + this.tableType = tableType.name(); + this.isExternal = tableType.equals(Type.EXTERNAL_TABLE); + return this; + } + + private SerDeInfo getSerDeInfo() { + if (!sd.isSetSerdeInfo()) { + sd.setSerdeInfo(new SerDeInfo()); + } + return sd.getSerdeInfo(); + } + + public HCatTable fileFormat(String fileFormat) { + this.fileFormat = fileFormat; + + if (fileFormat.equalsIgnoreCase("sequencefile")) { + inputFileFormat(SequenceFileInputFormat.class.getName()); + outputFileFormat(HiveSequenceFileOutputFormat.class.getName()); + serdeLib(LazySimpleSerDe.class.getName()); + } + else + if (fileFormat.equalsIgnoreCase("rcfile")) { + inputFileFormat(RCFileInputFormat.class.getName()); + outputFileFormat(RCFileOutputFormat.class.getName()); + serdeLib(LazyBinaryColumnarSerDe.class.getName()); + } + else + if (fileFormat.equalsIgnoreCase("orcfile")) { + inputFileFormat(OrcInputFormat.class.getName()); + outputFileFormat(OrcOutputFormat.class.getName()); + serdeLib(OrcSerde.class.getName()); + } + + return this; } + public String fileFormat() { + return fileFormat; + } /** * Gets the input file format. * * @return the input file format */ public String getInputFileFormat() { - return inputFileFormat; + return sd.getInputFormat(); + } + + /** + * Setter for InputFormat class. + */ + public HCatTable inputFileFormat(String inputFileFormat) { + sd.setInputFormat(inputFileFormat); + return this; } /** @@ -182,7 +476,15 @@ public String getInputFileFormat() { * @return the output file format */ public String getOutputFileFormat() { - return outputFileFormat; + return sd.getOutputFormat(); + } + + /** + * Setter for OutputFormat class. + */ + public HCatTable outputFileFormat(String outputFileFormat) { + this.sd.setOutputFormat(outputFileFormat); + return this; } /** @@ -191,7 +493,37 @@ public String getOutputFileFormat() { * @return the serde lib */ public String getSerdeLib() { - return serde; + return getSerDeInfo().getSerializationLib(); + } + + /** + * Setter for SerDe class name. + */ + public HCatTable serdeLib(String serde) { + getSerDeInfo().setSerializationLib(serde); + return this; + } + + public HCatTable serdeParams(Map serdeParams) { + getSerDeInfo().setParameters(serdeParams); + return this; + } + + public HCatTable serdeParam(String paramName, String value) { + SerDeInfo serdeInfo = getSerDeInfo(); + if (serdeInfo.getParameters() == null) { + serdeInfo.setParameters(new HashMap()); + } + serdeInfo.getParameters().put(paramName, value); + + return this; + } + + /** + * Returns parameters such as field delimiter,etc. + */ + public Map getSerdeParams() { + return getSerDeInfo().getParameters(); } /** @@ -200,38 +532,230 @@ public String getSerdeLib() { * @return the location */ public String getLocation() { - return location; + return sd.getLocation(); } + /** - * Returns parameters such as field delimiter,etc. + * Setter for location. */ - public Map getSerdeParams() { - return serdeParams; + public HCatTable location(String location) { + this.sd.setLocation(location); + return this; + } + + /** + * Getter for table-owner. + */ + public String owner() { + return owner; + } + + /** + * Setter for table-owner. + */ + public HCatTable owner(String owner) { + this.owner = owner; + return this; + } + + public String comment() { + return this.comment; + } + + /** + * Setter for table-level comment. + */ + public HCatTable comment(String comment) { + this.comment = comment; + return this; + } + + /** + * See row_format element of CREATE_TABLE DDL for Hive. + */ + public HCatTable fieldsTerminatedBy(char delimiter) { + return serdeParam(serdeConstants.FIELD_DELIM, Character.toString(delimiter)); + } + /** + * See row_format element of CREATE_TABLE DDL for Hive. + */ + public HCatTable escapeChar(char escapeChar) { + return serdeParam(serdeConstants.ESCAPE_CHAR, Character.toString(escapeChar)); + } + /** + * See row_format element of CREATE_TABLE DDL for Hive. + */ + public HCatTable collectionItemsTerminatedBy(char delimiter) { + return serdeParam(serdeConstants.COLLECTION_DELIM, Character.toString(delimiter)); + } + /** + * See row_format element of CREATE_TABLE DDL for Hive. + */ + public HCatTable mapKeysTerminatedBy(char delimiter) { + return serdeParam(serdeConstants.MAPKEY_DELIM, Character.toString(delimiter)); + } + /** + * See row_format element of CREATE_TABLE DDL for Hive. + */ + public HCatTable linesTerminatedBy(char delimiter) { + return serdeParam(serdeConstants.LINE_DELIM, Character.toString(delimiter)); + } + /** + * See row_format element of CREATE_TABLE DDL for Hive. + */ + public HCatTable nullDefinedAs(char nullChar) { + return serdeParam(serdeConstants.SERIALIZATION_NULL_FORMAT, Character.toString(nullChar)); } @Override public String toString() { - return "HCatTable [" - + (tableName != null ? "tableName=" + tableName + ", " : "tableName=null") - + (dbName != null ? "dbName=" + dbName + ", " : "dbName=null") - + (tabletype != null ? "tabletype=" + tabletype + ", " : "tabletype=null") - + (cols != null ? "cols=" + cols + ", " : "cols=null") - + (partCols != null ? "partCols=" + partCols + ", " : "partCols==null") - + (bucketCols != null ? "bucketCols=" + bucketCols + ", " : "bucketCols=null") - + (sortCols != null ? "sortCols=" + sortCols + ", " : "sortCols=null") - + "numBuckets=" - + numBuckets - + ", " - + (inputFileFormat != null ? "inputFileFormat=" - + inputFileFormat + ", " : "inputFileFormat=null") - + (outputFileFormat != null ? "outputFileFormat=" - + outputFileFormat + ", " : "outputFileFormat=null") - + (storageHandler != null ? "storageHandler=" + storageHandler - + ", " : "storageHandler=null") - + (tblProps != null ? "tblProps=" + tblProps + ", " : "tblProps=null") - + (serde != null ? "serde=" + serde + ", " : "serde=") - + (location != null ? "location=" + location : "location=") - + ",serdeParams=" + (serdeParams == null ? "null" : serdeParams) - + "]"; + return "HCatTable [ " + + "tableName=" + tableName + ", " + + "dbName=" + dbName + ", " + + "tableType=" + tableType + ", " + + "cols=" + cols + ", " + + "partCols=" + partCols + ", " + + "bucketCols=" + getBucketCols() + ", " + + "numBuckets=" + getNumBuckets() + ", " + + "sortCols=" + getSortCols() + ", " + + "inputFormat=" + getInputFileFormat() + ", " + + "outputFormat=" + getOutputFileFormat() + ", " + + "storageHandler=" + getStorageHandler() + ", " + + "serde=" + getSerdeLib() + ", " + + "tblProps=" + getTblProps() + ", " + + "location=" + getLocation() + ", " + + "owner=" + owner() + " ]"; + + } + + /** + * Method to compare the attributes of 2 HCatTable instances. + * @param rhs The other table being compared against. Can't be null. + * @param attributesToCheck The list of TableAttributes being compared. + * @return {@code EnumSet} containing all the attribute that differ between {@code this} and rhs. + * Subset of {@code attributesToCheck}. + */ + public EnumSet diff(HCatTable rhs, EnumSet attributesToCheck) { + EnumSet theDiff = EnumSet.noneOf(TableAttribute.class); + + for (TableAttribute attribute : attributesToCheck) { + + if (attribute.equals(TableAttribute.COLUMNS)) { + if (!rhs.getCols().containsAll(getCols()) || + !getCols().containsAll(rhs.getCols())) { + theDiff.add(TableAttribute.COLUMNS); + } + } + + if (attribute.equals(TableAttribute.INPUT_FORMAT)) { + if ((getInputFileFormat() == null && rhs.getInputFileFormat() != null) + || (getInputFileFormat() != null && (rhs.getInputFileFormat() == null || !rhs.getInputFileFormat().equals(getInputFileFormat())))) { + theDiff.add(TableAttribute.INPUT_FORMAT); + } + } + + if (attribute.equals(TableAttribute.OUTPUT_FORMAT)) { + if ((getOutputFileFormat() == null && rhs.getOutputFileFormat() != null) + || (getOutputFileFormat() != null && (rhs.getOutputFileFormat() == null || !rhs.getOutputFileFormat().equals(getOutputFileFormat())))) { + theDiff.add(TableAttribute.OUTPUT_FORMAT); + } + } + + if (attribute.equals(TableAttribute.STORAGE_HANDLER)) { + if ((getStorageHandler() == null && rhs.getStorageHandler() != null) + || (getStorageHandler() != null && (rhs.getStorageHandler() == null || !rhs.getStorageHandler().equals(getStorageHandler())))) { + theDiff.add(TableAttribute.STORAGE_HANDLER); + } + } + + if (attribute.equals(TableAttribute.SERDE)) { + if ((getSerdeLib() == null && rhs.getSerdeLib() != null) + || (getSerdeLib() != null && (rhs.getSerdeLib() == null || !rhs.getSerdeLib().equals(getSerdeLib())))) { + theDiff.add(TableAttribute.SERDE); + } + } + + if (attribute.equals(TableAttribute.SERDE_PROPERTIES)) { + if (!equivalent(sd.getSerdeInfo().getParameters(), rhs.sd.getSerdeInfo().getParameters())) { + theDiff.add(TableAttribute.SERDE_PROPERTIES); + } + } + + if (attribute.equals(TableAttribute.TABLE_PROPERTIES)) { + if (!equivalent(tblProps, rhs.tblProps)) { + theDiff.add(TableAttribute.TABLE_PROPERTIES); + } + } + + } + + return theDiff; + } + + /** + * Helper method to compare 2 Map instances, for equivalence. + * @param lhs First map to be compared. + * @param rhs Second map to be compared. + * @return true, if the 2 Maps contain the same entries. + */ + private static boolean equivalent(Map lhs, Map rhs) { + return lhs.size() == rhs.size() && Maps.difference(lhs, rhs).areEqual(); + } + + /** + * Method to compare the attributes of 2 HCatTable instances. + * Only the {@code DEFAULT_COMPARISON_ATTRIBUTES} are compared. + * @param rhs The other table being compared against. Can't be null. + * @return {@code EnumSet} containing all the attribute that differ between {@code this} and rhs. + * Subset of {@code DEFAULT_COMPARISON_ATTRIBUTES}. + */ + public EnumSet diff (HCatTable rhs) { + return diff(rhs, DEFAULT_COMPARISON_ATTRIBUTES); + } + + /** + * Method to "adopt" the specified attributes from rhs into this HCatTable object. + * @param rhs The "source" table from which attributes are to be copied from. + * @param attributes The set of attributes to be copied from rhs. Usually the result of {@code this.diff(rhs)}. + * @return This HCatTable + * @throws HCatException + */ + public HCatTable resolve(HCatTable rhs, EnumSet attributes) throws HCatException { + + if (rhs == this) + return this; + + for (TableAttribute attribute : attributes) { + + if (attribute.equals(TableAttribute.COLUMNS)) { + cols(rhs.cols); + } + + if (attribute.equals(TableAttribute.INPUT_FORMAT)) { + inputFileFormat(rhs.getInputFileFormat()); + } + + if (attribute.equals(TableAttribute.OUTPUT_FORMAT)) { + outputFileFormat(rhs.getOutputFileFormat()); + } + + if (attribute.equals(TableAttribute.SERDE)) { + serdeLib(rhs.getSerdeLib()); + } + + if (attribute.equals(TableAttribute.SERDE_PROPERTIES)) { + serdeParams(rhs.getSerdeParams()); + } + + if (attribute.equals(TableAttribute.STORAGE_HANDLER)) { + storageHandler(rhs.getStorageHandler()); + } + + if (attribute.equals(TableAttribute.TABLE_PROPERTIES)) { + tblProps(rhs.tblProps); + } + } + + return this; } } diff --git a/hcatalog/webhcat/java-client/src/main/java/org/apache/hive/hcatalog/api/MetadataJSONSerializer.java b/hcatalog/webhcat/java-client/src/main/java/org/apache/hive/hcatalog/api/MetadataJSONSerializer.java new file mode 100644 index 0000000..30ac00f --- /dev/null +++ b/hcatalog/webhcat/java-client/src/main/java/org/apache/hive/hcatalog/api/MetadataJSONSerializer.java @@ -0,0 +1,71 @@ +package org.apache.hive.hcatalog.api; + +import org.apache.hadoop.hive.metastore.api.Partition; +import org.apache.hadoop.hive.metastore.api.Table; +import org.apache.hive.hcatalog.common.HCatException; +import org.apache.thrift.TDeserializer; +import org.apache.thrift.TException; +import org.apache.thrift.TSerializer; +import org.apache.thrift.protocol.TJSONProtocol; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +/** + * MetadataSerializer implementation, that serializes HCat API elements into JSON. + */ +class MetadataJSONSerializer extends MetadataSerializer { + + private static final Logger LOG = LoggerFactory.getLogger(MetadataJSONSerializer.class); + + MetadataJSONSerializer() throws HCatException {} + + @Override + public String serializeTable(HCatTable hcatTable) throws HCatException { + try { + return new TSerializer(new TJSONProtocol.Factory()) + .toString(hcatTable.toHiveTable(), "UTF-8"); + } + catch (TException exception) { + throw new HCatException("Could not serialize HCatTable: " + hcatTable, exception); + } + } + + @Override + public HCatTable deserializeTable(String hcatTableStringRep) throws HCatException { + try { + Table table = new Table(); + new TDeserializer(new TJSONProtocol.Factory()).deserialize(table, hcatTableStringRep, "UTF-8"); + return new HCatTable(table); + } + catch(TException exception) { + if (LOG.isDebugEnabled()) + LOG.debug("Could not de-serialize from: " + hcatTableStringRep); + throw new HCatException("Could not de-serialize HCatTable.", exception); + } + } + + @Override + public String serializePartition(HCatPartition hcatPartition) throws HCatException { + try { + return new TSerializer(new TJSONProtocol.Factory()) + .toString(hcatPartition.toHivePartition(), "UTF-8"); + } + catch (TException exception) { + throw new HCatException("Could not serialize HCatPartition: " + hcatPartition, exception); + } + } + + @Override + public HCatPartition deserializePartition(String hcatPartitionStringRep) throws HCatException { + try { + Partition partition = new Partition(); + new TDeserializer(new TJSONProtocol.Factory()).deserialize(partition, hcatPartitionStringRep, "UTF-8"); + return new HCatPartition(null, partition); + } + catch(TException exception) { + if (LOG.isDebugEnabled()) + LOG.debug("Could not de-serialize partition from: " + hcatPartitionStringRep); + throw new HCatException("Could not de-serialize HCatPartition.", exception); + } + } +} diff --git a/hcatalog/webhcat/java-client/src/main/java/org/apache/hive/hcatalog/api/MetadataSerializer.java b/hcatalog/webhcat/java-client/src/main/java/org/apache/hive/hcatalog/api/MetadataSerializer.java new file mode 100644 index 0000000..dd5da99 --- /dev/null +++ b/hcatalog/webhcat/java-client/src/main/java/org/apache/hive/hcatalog/api/MetadataSerializer.java @@ -0,0 +1,54 @@ +package org.apache.hive.hcatalog.api; + +import org.apache.hive.hcatalog.common.HCatException; + +/** + * Interface to serialize HCat API elements. + */ +abstract class MetadataSerializer { + + // Prevent construction outside the get() method. + protected MetadataSerializer() {} + + /** + * Static getter method for the appropriate MetadataSerializer implementation. + * @return MetadataSerializer sub-class. + * @throws HCatException On failure to construct a concrete MetadataSerializer. + */ + public static MetadataSerializer get() throws HCatException { + return new MetadataJSONSerializer(); + } + + /** + * Serializer for HCatTable instances. + * @param hcatTable The HCatTable operand, to be serialized. + * @return Serialized (i.e. String-ified) HCatTable. + * @throws HCatException On failure to serialize. + */ + public abstract String serializeTable(HCatTable hcatTable) throws HCatException ; + + /** + * Deserializer for HCatTable string-representations. + * @param hcatTableStringRep Serialized HCatTable String (gotten from serializeTable()). + * @return Deserialized HCatTable instance. + * @throws HCatException On failure to deserialize (e.g. incompatible serialization format, etc.) + */ + public abstract HCatTable deserializeTable(String hcatTableStringRep) throws HCatException; + + /** + * Serializer for HCatPartition instances. + * @param hcatPartition The HCatPartition operand, to be serialized. + * @return Serialized (i.e. String-ified) HCatPartition. + * @throws HCatException On failure to serialize. + */ + public abstract String serializePartition(HCatPartition hcatPartition) throws HCatException; + + /** + * Deserializer for HCatPartition string-representations. + * @param hcatPartitionStringRep Serialized HCatPartition String (gotten from serializePartition()). + * @return Deserialized HCatPartition instance. + * @throws HCatException On failure to deserialize (e.g. incompatible serialization format, etc.) + */ + public abstract HCatPartition deserializePartition(String hcatPartitionStringRep) throws HCatException; + +} diff --git a/hcatalog/webhcat/java-client/src/test/java/org/apache/hive/hcatalog/api/TestHCatClient.java b/hcatalog/webhcat/java-client/src/test/java/org/apache/hive/hcatalog/api/TestHCatClient.java index aea3f31..f9f7b04 100644 --- a/hcatalog/webhcat/java-client/src/test/java/org/apache/hive/hcatalog/api/TestHCatClient.java +++ b/hcatalog/webhcat/java-client/src/test/java/org/apache/hive/hcatalog/api/TestHCatClient.java @@ -21,6 +21,7 @@ import java.math.BigInteger; import java.util.ArrayList; import java.util.Arrays; +import java.util.EnumSet; import java.util.HashMap; import java.util.List; import java.util.Map; @@ -38,7 +39,7 @@ import org.apache.hadoop.hive.ql.io.orc.OrcSerde; import org.apache.hadoop.hive.ql.metadata.Table; import org.apache.hadoop.hive.serde.serdeConstants; -import org.apache.hadoop.hive.serde2.columnar.ColumnarSerDe; +import org.apache.hadoop.hive.serde2.columnar.LazyBinaryColumnarSerDe; import org.apache.hadoop.mapred.TextInputFormat; import org.apache.hive.hcatalog.cli.SemanticAnalysis.HCatSemanticAnalyzer; import org.apache.hive.hcatalog.common.HCatConstants; @@ -65,19 +66,37 @@ private static final Logger LOG = LoggerFactory.getLogger(TestHCatClient.class); private static final String msPort = "20101"; private static HiveConf hcatConf; + private static boolean isReplicationTargetHCatRunning = false; + private static final String replicationTargetHCatPort = "20102"; + private static HiveConf replicationTargetHCatConf; private static SecurityManager securityManager; private static class RunMS implements Runnable { + private final String msPort; + private List args = new ArrayList(); + + public RunMS(String msPort) { + this.msPort = msPort; + this.args.add("-v"); + this.args.add("-p"); + this.args.add(this.msPort); + } + + public RunMS arg(String arg) { + this.args.add(arg); + return this; + } + @Override public void run() { try { - HiveMetaStore.main(new String[]{"-v", "-p", msPort}); + HiveMetaStore.main(args.toArray(new String[args.size()])); } catch (Throwable t) { LOG.error("Exiting. Got exception from metastore: ", t); } } - } + } // class RunMS; @AfterClass public static void tearDown() throws Exception { @@ -88,9 +107,9 @@ public static void tearDown() throws Exception { @BeforeClass public static void startMetaStoreServer() throws Exception { - Thread t = new Thread(new RunMS()); + Thread t = new Thread(new RunMS(msPort)); t.start(); - Thread.sleep(40000); + Thread.sleep(10000); securityManager = System.getSecurityManager(); System.setSecurityManager(new NoExitSecurityManager()); @@ -152,7 +171,7 @@ public void testBasicDDLCommands() throws Exception { assertTrue(table1.getOutputFileFormat().equalsIgnoreCase( RCFileOutputFormat.class.getName())); assertTrue(table1.getSerdeLib().equalsIgnoreCase( - ColumnarSerDe.class.getName())); + LazyBinaryColumnarSerDe.class.getName())); assertTrue(table1.getCols().equals(cols)); // Since "ifexists" was not set to true, trying to create the same table // again @@ -171,8 +190,8 @@ public void testBasicDDLCommands() throws Exception { mapKeysTerminatedBy('\004').collectionItemsTerminatedBy('\005').nullDefinedAs('\006').build(); client.createTable(tableDesc2); HCatTable table2 = client.getTable(db, tableTwo); - assertTrue(table2.getInputFileFormat().equalsIgnoreCase( - TextInputFormat.class.getName())); + assertTrue("Expected TextInputFormat, but got: " + table2.getInputFileFormat(), + table2.getInputFileFormat().equalsIgnoreCase(TextInputFormat.class.getName())); assertTrue(table2.getOutputFileFormat().equalsIgnoreCase( HiveIgnoreKeyTextOutputFormat.class.getName())); assertTrue("SerdeParams not found", table2.getSerdeParams() != null); @@ -222,9 +241,10 @@ public void testEmptyTableInstantiation() throws Exception { cols.add(new HCatFieldSchema("id", Type.INT, "id comment")); cols.add(new HCatFieldSchema("value", Type.STRING, "value comment")); + client.dropTable(dbName, tblName, true); // Create a minimalistic table client.createTable(HCatCreateTableDesc - .create(dbName, tblName, cols) + .create(new HCatTable(dbName, tblName).cols(cols), false) .build()); HCatTable tCreated = client.getTable(dbName, tblName); @@ -281,21 +301,26 @@ public void testPartitionsHCatClientImpl() throws Exception { ptnCols.add(new HCatFieldSchema("dt", Type.STRING, "date column")); ptnCols.add(new HCatFieldSchema("country", Type.STRING, "country column")); - HCatCreateTableDesc tableDesc = HCatCreateTableDesc - .create(dbName, tableName, cols).fileFormat("sequencefile") - .partCols(ptnCols).build(); + HCatTable table = new HCatTable(dbName, tableName).cols(cols) + .partCols(ptnCols) + .fileFormat("sequenceFile"); + HCatCreateTableDesc tableDesc = HCatCreateTableDesc.create(table, false).build(); client.createTable(tableDesc); + // Verify that the table is created successfully. + table = client.getTable(dbName, tableName); + Map firstPtn = new HashMap(); firstPtn.put("dt", "04/30/2012"); firstPtn.put("country", "usa"); - HCatAddPartitionDesc addPtn = HCatAddPartitionDesc.create(dbName, - tableName, null, firstPtn).build(); + // Test new HCatAddPartitionsDesc API. + HCatAddPartitionDesc addPtn = HCatAddPartitionDesc.create(new HCatPartition(table, firstPtn, null)).build(); client.addPartition(addPtn); Map secondPtn = new HashMap(); secondPtn.put("dt", "04/12/2012"); secondPtn.put("country", "brazil"); + // Test deprecated HCatAddPartitionsDesc API. HCatAddPartitionDesc addPtn2 = HCatAddPartitionDesc.create(dbName, tableName, null, secondPtn).build(); client.addPartition(addPtn2); @@ -303,6 +328,7 @@ public void testPartitionsHCatClientImpl() throws Exception { Map thirdPtn = new HashMap(); thirdPtn.put("dt", "04/13/2012"); thirdPtn.put("country", "argentina"); + // Test deprecated HCatAddPartitionsDesc API. HCatAddPartitionDesc addPtn3 = HCatAddPartitionDesc.create(dbName, tableName, null, thirdPtn).build(); client.addPartition(addPtn3); @@ -540,9 +566,8 @@ public void testObjectNotFoundException() throws Exception { List columns = Arrays.asList(new HCatFieldSchema("col", Type.STRING, "")); ArrayList partitionColumns = new ArrayList( Arrays.asList(new HCatFieldSchema(partitionColumn, Type.STRING, ""))); - client.createTable(HCatCreateTableDesc.create(dbName, tableName, columns) - .partCols(partitionColumns) - .build()); + HCatTable table = new HCatTable(dbName, tableName).cols(columns).partCols(partitionColumns); + client.createTable(HCatCreateTableDesc.create(table, false).build()); Map partitionSpec = new HashMap(); partitionSpec.put(partitionColumn, "foobar"); @@ -555,7 +580,7 @@ public void testObjectNotFoundException() throws Exception { exception instanceof ObjectNotFoundException); } - client.addPartition(HCatAddPartitionDesc.create(dbName, tableName, "", partitionSpec).build()); + client.addPartition(HCatAddPartitionDesc.create(new HCatPartition(table, partitionSpec, "")).build()); // Test that listPartitionsByFilter() returns an empty-set, if the filter selects no partitions. assertEquals("Expected empty set of partitions.", @@ -649,21 +674,26 @@ public void testGetPartitionsWithPartialSpec() throws Exception { List partitionSchema = Arrays.asList(new HCatFieldSchema("dt", Type.STRING, ""), new HCatFieldSchema("grid", Type.STRING, "")); - client.createTable(HCatCreateTableDesc.create(dbName, tableName, columnSchema).partCols(new ArrayList(partitionSchema)).build()); + HCatTable table = new HCatTable(dbName, tableName).cols(columnSchema).partCols(partitionSchema); + client.createTable(HCatCreateTableDesc.create(table, false).build()); + + // Verify that the table was created successfully. + table = client.getTable(dbName, tableName); + assertNotNull("The created just now can't be null.", table); Map partitionSpec = new HashMap(); partitionSpec.put("grid", "AB"); partitionSpec.put("dt", "2011_12_31"); - client.addPartition(HCatAddPartitionDesc.create(dbName, tableName, "", partitionSpec).build()); + client.addPartition(HCatAddPartitionDesc.create(new HCatPartition(table, partitionSpec, "")).build()); partitionSpec.put("grid", "AB"); partitionSpec.put("dt", "2012_01_01"); - client.addPartition(HCatAddPartitionDesc.create(dbName, tableName, "", partitionSpec).build()); + client.addPartition(HCatAddPartitionDesc.create(new HCatPartition(table, partitionSpec, "")).build()); partitionSpec.put("dt", "2012_01_01"); partitionSpec.put("grid", "OB"); - client.addPartition(HCatAddPartitionDesc.create(dbName, tableName, "", partitionSpec).build()); + client.addPartition(HCatAddPartitionDesc.create(new HCatPartition(table, partitionSpec, "")).build()); partitionSpec.put("dt", "2012_01_01"); partitionSpec.put("grid", "XB"); - client.addPartition(HCatAddPartitionDesc.create(dbName, tableName, "", partitionSpec).build()); + client.addPartition(HCatAddPartitionDesc.create(new HCatPartition(table, partitionSpec, "")).build()); Map partialPartitionSpec = new HashMap(); partialPartitionSpec.put("dt", "2012_01_01"); @@ -698,21 +728,26 @@ public void testDropPartitionsWithPartialSpec() throws Exception { List partitionSchema = Arrays.asList(new HCatFieldSchema("dt", Type.STRING, ""), new HCatFieldSchema("grid", Type.STRING, "")); - client.createTable(HCatCreateTableDesc.create(dbName, tableName, columnSchema).partCols(new ArrayList(partitionSchema)).build()); + HCatTable table = new HCatTable(dbName, tableName).cols(columnSchema).partCols(partitionSchema); + client.createTable(HCatCreateTableDesc.create(table, false).build()); + + // Verify that the table was created successfully. + table = client.getTable(dbName, tableName); + assertNotNull("Table couldn't be queried for. ", table); Map partitionSpec = new HashMap(); partitionSpec.put("grid", "AB"); partitionSpec.put("dt", "2011_12_31"); - client.addPartition(HCatAddPartitionDesc.create(dbName, tableName, "", partitionSpec).build()); + client.addPartition(HCatAddPartitionDesc.create(new HCatPartition(table, partitionSpec, "")).build()); partitionSpec.put("grid", "AB"); partitionSpec.put("dt", "2012_01_01"); - client.addPartition(HCatAddPartitionDesc.create(dbName, tableName, "", partitionSpec).build()); + client.addPartition(HCatAddPartitionDesc.create(new HCatPartition(table, partitionSpec, "")).build()); partitionSpec.put("dt", "2012_01_01"); partitionSpec.put("grid", "OB"); - client.addPartition(HCatAddPartitionDesc.create(dbName, tableName, "", partitionSpec).build()); + client.addPartition(HCatAddPartitionDesc.create(new HCatPartition(table, partitionSpec, "")).build()); partitionSpec.put("dt", "2012_01_01"); partitionSpec.put("grid", "XB"); - client.addPartition(HCatAddPartitionDesc.create(dbName, tableName, "", partitionSpec).build()); + client.addPartition(HCatAddPartitionDesc.create(new HCatPartition(table, partitionSpec, "")).build()); Map partialPartitionSpec = new HashMap(); partialPartitionSpec.put("dt", "2012_01_01"); @@ -731,4 +766,235 @@ public void testDropPartitionsWithPartialSpec() throws Exception { } } + private void startReplicationTargetMetaStoreIfRequired() throws Exception { + if (!isReplicationTargetHCatRunning) { + Thread t = new Thread(new RunMS(replicationTargetHCatPort) + .arg("--hiveconf") + .arg("javax.jdo.option.ConnectionURL") // Reset, to use a different Derby instance. + .arg(hcatConf.get("javax.jdo.option.ConnectionURL") + .replace("metastore", "target_metastore"))); + t.start(); + Thread.sleep(10000); + replicationTargetHCatConf = new HiveConf(hcatConf); + replicationTargetHCatConf.setVar(HiveConf.ConfVars.METASTOREURIS, + "thrift://localhost:" + replicationTargetHCatPort); + isReplicationTargetHCatRunning = true; + } + } + + /** + * Test for detecting schema-changes for an HCatalog table, across 2 different HCat instances. + * A table is created with the same schema on 2 HCat instances. The table-schema is modified on the source HCat + * instance (columns, I/O formats, SerDe definitions, etc.). The table metadata is compared between source + * and target, the changes are detected and propagated to target. + * @throws Exception + */ + @Test + public void testTableSchemaPropagation() throws Exception { + try { + startReplicationTargetMetaStoreIfRequired(); + HCatClient sourceMetaStore = HCatClient.create(new Configuration(hcatConf)); + final String dbName = "myDb"; + final String tableName = "myTable"; + + sourceMetaStore.dropDatabase(dbName, true, HCatClient.DropDBMode.CASCADE); + + sourceMetaStore.createDatabase(HCatCreateDBDesc.create(dbName).build()); + List columnSchema = Arrays.asList(new HCatFieldSchema("foo", Type.INT, ""), + new HCatFieldSchema("bar", Type.STRING, "")); + + List partitionSchema = Arrays.asList(new HCatFieldSchema("dt", Type.STRING, ""), + new HCatFieldSchema("grid", Type.STRING, "")); + + HCatTable sourceTable = new HCatTable(dbName, tableName).cols(columnSchema).partCols(partitionSchema); + sourceMetaStore.createTable(HCatCreateTableDesc.create(sourceTable).build()); + + // Verify that the sourceTable was created successfully. + sourceTable = sourceMetaStore.getTable(dbName, tableName); + assertNotNull("Table couldn't be queried for. ", sourceTable); + + // Serialize Table definition. Deserialize using the target HCatClient instance. + String tableStringRep = sourceMetaStore.serializeTable(sourceTable); + HCatClient targetMetaStore = HCatClient.create(new Configuration(replicationTargetHCatConf)); + targetMetaStore.dropDatabase(dbName, true, HCatClient.DropDBMode.CASCADE); + targetMetaStore.createDatabase(HCatCreateDBDesc.create(dbName).build()); + + HCatTable targetTable = targetMetaStore.deserializeTable(tableStringRep); + + assertEquals("Table after deserialization should have been identical to sourceTable.", + sourceTable.diff(targetTable), HCatTable.NO_DIFF); + + // Create table on Target. + targetMetaStore.createTable(HCatCreateTableDesc.create(targetTable).build()); + // Verify that the created table is identical to sourceTable. + targetTable = targetMetaStore.getTable(dbName, tableName); + assertEquals("Table after deserialization should have been identical to sourceTable.", + sourceTable.diff(targetTable), HCatTable.NO_DIFF); + + // Modify sourceTable. + List newColumnSchema = new ArrayList(columnSchema); + newColumnSchema.add(new HCatFieldSchema("goo_new", Type.DOUBLE, "")); + Map tableParams = new HashMap(1); + tableParams.put("orc.compress", "ZLIB"); + sourceTable.cols(newColumnSchema) // Add a column. + .fileFormat("orcfile") // Change SerDe, File I/O formats. + .tblProps(tableParams) + .serdeParam(serdeConstants.FIELD_DELIM, Character.toString('\001')); + sourceMetaStore.updateTableSchema(dbName, tableName, sourceTable); + sourceTable = sourceMetaStore.getTable(dbName, tableName); + + // Diff against table on target. + + EnumSet diff = targetTable.diff(sourceTable); + assertTrue("Couldn't find change in column-schema.", + diff.contains(HCatTable.TableAttribute.COLUMNS)); + assertTrue("Couldn't find change in InputFormat.", + diff.contains(HCatTable.TableAttribute.INPUT_FORMAT)); + assertTrue("Couldn't find change in OutputFormat.", + diff.contains(HCatTable.TableAttribute.OUTPUT_FORMAT)); + assertTrue("Couldn't find change in SerDe.", + diff.contains(HCatTable.TableAttribute.SERDE)); + assertTrue("Couldn't find change in SerDe parameters.", + diff.contains(HCatTable.TableAttribute.SERDE_PROPERTIES)); + assertTrue("Couldn't find change in Table parameters.", + diff.contains(HCatTable.TableAttribute.TABLE_PROPERTIES)); + + // Replicate the changes to the replicated-table. + targetMetaStore.updateTableSchema(dbName, tableName, targetTable.resolve(sourceTable, diff)); + targetTable = targetMetaStore.getTable(dbName, tableName); + + assertEquals("After propagating schema changes, source and target tables should have been equivalent.", + targetTable.diff(sourceTable), HCatTable.NO_DIFF); + + } + catch (Exception unexpected) { + LOG.error("Unexpected exception!", unexpected); + assertTrue("Unexpected exception! " + unexpected.getMessage(), false); + } + } + + /** + * Test that partition-definitions can be replicated between HCat-instances, + * independently of table-metadata replication. + * 2 identical tables are created on 2 different HCat instances ("source" and "target"). + * On the source instance, + * 1. One partition is added with the old format ("TEXTFILE"). + * 2. The table is updated with an additional column and the data-format changed to ORC. + * 3. Another partition is added with the new format. + * 4. The partitions' metadata is copied to the target HCat instance, without updating the target table definition. + * 5. The partitions' metadata is tested to be an exact replica of that on the source. + * @throws Exception + */ + @Test + public void testPartitionRegistrationWithCustomSchema() throws Exception { + try { + startReplicationTargetMetaStoreIfRequired(); + + HCatClient sourceMetaStore = HCatClient.create(new Configuration(hcatConf)); + final String dbName = "myDb"; + final String tableName = "myTable"; + + sourceMetaStore.dropDatabase(dbName, true, HCatClient.DropDBMode.CASCADE); + + sourceMetaStore.createDatabase(HCatCreateDBDesc.create(dbName).build()); + List columnSchema = new ArrayList( + Arrays.asList(new HCatFieldSchema("foo", Type.INT, ""), + new HCatFieldSchema("bar", Type.STRING, ""))); + + List partitionSchema = Arrays.asList(new HCatFieldSchema("dt", Type.STRING, ""), + new HCatFieldSchema("grid", Type.STRING, "")); + + HCatTable sourceTable = new HCatTable(dbName, tableName).cols(columnSchema) + .partCols(partitionSchema) + .comment("Source table."); + + sourceMetaStore.createTable(HCatCreateTableDesc.create(sourceTable).build()); + + // Verify that the sourceTable was created successfully. + sourceTable = sourceMetaStore.getTable(dbName, tableName); + assertNotNull("Table couldn't be queried for. ", sourceTable); + + // Partitions added now should inherit table-schema, properties, etc. + Map partitionSpec_1 = new HashMap(); + partitionSpec_1.put("grid", "AB"); + partitionSpec_1.put("dt", "2011_12_31"); + HCatPartition sourcePartition_1 = new HCatPartition(sourceTable, partitionSpec_1, ""); + + sourceMetaStore.addPartition(HCatAddPartitionDesc.create(sourcePartition_1).build()); + assertEquals("Unexpected number of partitions. ", + sourceMetaStore.getPartitions(dbName, tableName).size(), 1); + // Verify that partition_1 was added correctly, and properties were inherited from the HCatTable. + HCatPartition addedPartition_1 = sourceMetaStore.getPartition(dbName, tableName, partitionSpec_1); + assertEquals("Column schema doesn't match.", addedPartition_1.getColumns(), sourceTable.getCols()); + assertEquals("InputFormat doesn't match.", addedPartition_1.getInputFormat(), sourceTable.getInputFileFormat()); + assertEquals("OutputFormat doesn't match.", addedPartition_1.getOutputFormat(), sourceTable.getOutputFileFormat()); + assertEquals("SerDe doesn't match.", addedPartition_1.getSerDe(), sourceTable.getSerdeLib()); + assertEquals("SerDe params don't match.", addedPartition_1.getSerdeParams(), sourceTable.getSerdeParams()); + + // Replicate table definition. + + HCatClient targetMetaStore = HCatClient.create(new Configuration(replicationTargetHCatConf)); + targetMetaStore.dropDatabase(dbName, true, HCatClient.DropDBMode.CASCADE); + + targetMetaStore.createDatabase(HCatCreateDBDesc.create(dbName).build()); + // Make a copy of the source-table, as would be done across class-loaders. + HCatTable targetTable = targetMetaStore.deserializeTable(sourceMetaStore.serializeTable(sourceTable)); + targetMetaStore.createTable(HCatCreateTableDesc.create(targetTable).build()); + targetTable = targetMetaStore.getTable(dbName, tableName); + + assertEquals("Created table doesn't match the source.", + targetTable.diff(sourceTable), HCatTable.NO_DIFF); + + // Modify Table schema at the source. + List newColumnSchema = new ArrayList(columnSchema); + newColumnSchema.add(new HCatFieldSchema("goo_new", Type.DOUBLE, "")); + Map tableParams = new HashMap(1); + tableParams.put("orc.compress", "ZLIB"); + sourceTable.cols(newColumnSchema) // Add a column. + .fileFormat("orcfile") // Change SerDe, File I/O formats. + .tblProps(tableParams) + .serdeParam(serdeConstants.FIELD_DELIM, Character.toString('\001')); + sourceMetaStore.updateTableSchema(dbName, tableName, sourceTable); + sourceTable = sourceMetaStore.getTable(dbName, tableName); + + // Add another partition to the source. + Map partitionSpec_2 = new HashMap(); + partitionSpec_2.put("grid", "AB"); + partitionSpec_2.put("dt", "2012_01_01"); + HCatPartition sourcePartition_2 = new HCatPartition(sourceTable, partitionSpec_2, ""); + sourceMetaStore.addPartition(HCatAddPartitionDesc.create(sourcePartition_2).build()); + + // The source table now has 2 partitions, one in TEXTFILE, the other in ORC. + // Test that adding these partitions to the target-table *without* replicating the table-change. + + List sourcePartitions = sourceMetaStore.getPartitions(dbName, tableName); + assertEquals("Unexpected number of source partitions.", 2, sourcePartitions.size()); + + List addPartitionDescs = new ArrayList(sourcePartitions.size()); + for (HCatPartition partition : sourcePartitions) { + addPartitionDescs.add(HCatAddPartitionDesc.create(partition).build()); + } + + targetMetaStore.addPartitions(addPartitionDescs); + + List targetPartitions = targetMetaStore.getPartitions(dbName, tableName); + + assertEquals("Expected the same number of partitions. ", targetPartitions.size(), sourcePartitions.size()); + + for (int i=0; i inputs = getHivePrivilegeObjectInputs(); + List inputs = getHivePrivilegeObjectInputs().getLeft(); checkSingleTableInput(inputs); HivePrivilegeObject tableObj = inputs.get(0); assertEquals("no of columns used", 3, tableObj.getColumns().size()); @@ -123,7 +132,7 @@ public void testInputAllColumnsUsed() throws HiveAuthzPluginException, HiveAcces int status = driver.compile("select * from " + tableName + " order by i"); assertEquals(0, status); - List inputs = getHivePrivilegeObjectInputs(); + List inputs = getHivePrivilegeObjectInputs().getLeft(); checkSingleTableInput(inputs); HivePrivilegeObject tableObj = inputs.get(0); assertEquals("no of columns used", 5, tableObj.getColumns().size()); @@ -139,12 +148,60 @@ public void testInputNoColumnsUsed() throws HiveAuthzPluginException, HiveAccess int status = driver.compile("describe " + tableName); assertEquals(0, status); - List inputs = getHivePrivilegeObjectInputs(); + List inputs = getHivePrivilegeObjectInputs().getLeft(); checkSingleTableInput(inputs); HivePrivilegeObject tableObj = inputs.get(0); assertNull("columns used", tableObj.getColumns()); } + @Test + public void testPermFunction() throws HiveAuthzPluginException, HiveAccessControlException, + CommandNeedRetryException { + + reset(mockedAuthorizer); + final String funcName = "testauthfunc1"; + int status = driver.compile("create function " + dbName + "." + funcName + + " as 'org.apache.hadoop.hive.ql.udf.UDFPI'"); + assertEquals(0, status); + + List outputs = getHivePrivilegeObjectInputs().getRight(); + + HivePrivilegeObject funcObj; + HivePrivilegeObject dbObj; + assertEquals("number of output object", 2, outputs.size()); + if(outputs.get(0).getType() == HivePrivilegeObjectType.FUNCTION) { + funcObj = outputs.get(0); + dbObj = outputs.get(1); + } else { + funcObj = outputs.get(1); + dbObj = outputs.get(0); + } + + assertEquals("input type", HivePrivilegeObjectType.FUNCTION, funcObj.getType()); + assertTrue("function name", funcName.equalsIgnoreCase(funcObj.getObjectName())); + assertTrue("db name", dbName.equalsIgnoreCase(funcObj.getDbname())); + + assertEquals("input type", HivePrivilegeObjectType.DATABASE, dbObj.getType()); + assertTrue("db name", dbName.equalsIgnoreCase(dbObj.getDbname())); + } + + @Test + public void testTempFunction() throws HiveAuthzPluginException, HiveAccessControlException, + CommandNeedRetryException { + + reset(mockedAuthorizer); + final String funcName = "testAuthFunc2"; + int status = driver.compile("create temporary function " + funcName + + " as 'org.apache.hadoop.hive.ql.udf.UDFPI'"); + assertEquals(0, status); + + List outputs = getHivePrivilegeObjectInputs().getRight(); + HivePrivilegeObject funcObj = outputs.get(0); + assertEquals("input type", HivePrivilegeObjectType.FUNCTION, funcObj.getType()); + assertTrue("function name", funcName.equalsIgnoreCase(funcObj.getObjectName())); + assertEquals("db name", null, funcObj.getDbname()); + } + private void checkSingleTableInput(List inputs) { assertEquals("number of inputs", 1, inputs.size()); @@ -154,23 +211,26 @@ private void checkSingleTableInput(List inputs) { } /** - * @return the inputs passed in current call to authorizer.checkPrivileges + * @return pair with left value as inputs and right value as outputs, + * passed in current call to authorizer.checkPrivileges * @throws HiveAuthzPluginException * @throws HiveAccessControlException */ - private List getHivePrivilegeObjectInputs() throws HiveAuthzPluginException, + private Pair, List> getHivePrivilegeObjectInputs() throws HiveAuthzPluginException, HiveAccessControlException { // Create argument capturer // a class variable cast to this generic of generic class Class> class_listPrivObjects = (Class) List.class; ArgumentCaptor> inputsCapturer = ArgumentCaptor .forClass(class_listPrivObjects); + ArgumentCaptor> outputsCapturer = ArgumentCaptor + .forClass(class_listPrivObjects); verify(mockedAuthorizer).checkPrivileges(any(HiveOperationType.class), - inputsCapturer.capture(), Matchers.anyListOf(HivePrivilegeObject.class), + inputsCapturer.capture(), outputsCapturer.capture(), any(HiveAuthzContext.class)); - return inputsCapturer.getValue(); + return new ImmutablePair(inputsCapturer.getValue(), outputsCapturer.getValue()); } } diff --git a/itests/hive-unit/src/test/java/org/apache/hive/jdbc/TestJdbcDriver2.java b/itests/hive-unit/src/test/java/org/apache/hive/jdbc/TestJdbcDriver2.java index 13aa39b..ae128a9 100644 --- a/itests/hive-unit/src/test/java/org/apache/hive/jdbc/TestJdbcDriver2.java +++ b/itests/hive-unit/src/test/java/org/apache/hive/jdbc/TestJdbcDriver2.java @@ -868,7 +868,7 @@ private void doTestSelectAll(String tableName, int maxRows, int fetchSize) throw assertNotNull("ResultSet is null", res); assertTrue("getResultSet() not returning expected ResultSet", res == stmt .getResultSet()); - assertEquals("get update count not as expected", 0, stmt.getUpdateCount()); + assertEquals("get update count not as expected", -1, stmt.getUpdateCount()); int i = 0; ResultSetMetaData meta = res.getMetaData(); diff --git a/itests/qtest/pom.xml b/itests/qtest/pom.xml index 291a8f2..7a932ce 100644 --- a/itests/qtest/pom.xml +++ b/itests/qtest/pom.xml @@ -42,6 +42,11 @@ + org.apache.accumulo + accumulo-minicluster + test + + org.apache.hive hive-ant ${project.version} @@ -402,6 +407,7 @@ + @@ -590,6 +596,20 @@ initScript="q_test_init.sql" cleanupScript="q_test_cleanup.sql"/> + + + diff --git a/itests/src/test/resources/testconfiguration.properties b/itests/src/test/resources/testconfiguration.properties index 7c68c58..5a62528 100644 --- a/itests/src/test/resources/testconfiguration.properties +++ b/itests/src/test/resources/testconfiguration.properties @@ -146,7 +146,8 @@ minitez.query.files=bucket_map_join_tez1.q,\ tez_join_tests.q,\ tez_joins_explain.q,\ tez_schema_evolution.q,\ - tez_union.q + tez_union.q,\ + tez_union_decimal.q beeline.positive.exclude=add_part_exist.q,\ alter1.q,\ diff --git a/itests/util/pom.xml b/itests/util/pom.xml index aca01cb..8a7f3a7 100644 --- a/itests/util/pom.xml +++ b/itests/util/pom.xml @@ -35,6 +35,21 @@ + org.apache.accumulo + accumulo-minicluster + + + org.apache.hive + hive-accumulo-handler + ${project.version} + + + org.apache.hive + hive-accumulo-handler + ${project.version} + tests + + org.apache.hive hive-common ${project.version} diff --git a/itests/util/src/main/java/org/apache/hadoop/hive/accumulo/AccumuloQTestUtil.java b/itests/util/src/main/java/org/apache/hadoop/hive/accumulo/AccumuloQTestUtil.java new file mode 100644 index 0000000..b83543a --- /dev/null +++ b/itests/util/src/main/java/org/apache/hadoop/hive/accumulo/AccumuloQTestUtil.java @@ -0,0 +1,38 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hive.accumulo; + +import org.apache.hadoop.hive.ql.QTestUtil; + +/** + * AccumuloQTestUtil initializes Accumulo-specific test fixtures. + */ +public class AccumuloQTestUtil extends QTestUtil { + public AccumuloQTestUtil(String outDir, String logDir, MiniClusterType miniMr, + AccumuloTestSetup setup, String initScript, String cleanupScript) throws Exception { + + super(outDir, logDir, miniMr, null, initScript, cleanupScript); + setup.setupWithHiveConf(conf); + super.init(); + } + + @Override + public void init() throws Exception { + // defer + } +} diff --git a/itests/util/src/main/java/org/apache/hadoop/hive/accumulo/AccumuloTestSetup.java b/itests/util/src/main/java/org/apache/hadoop/hive/accumulo/AccumuloTestSetup.java new file mode 100644 index 0000000..132e8c8 --- /dev/null +++ b/itests/util/src/main/java/org/apache/hadoop/hive/accumulo/AccumuloTestSetup.java @@ -0,0 +1,127 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hive.accumulo; + +import java.io.File; +import java.sql.Date; +import java.sql.Timestamp; + +import junit.extensions.TestSetup; +import junit.framework.Test; + +import org.apache.accumulo.core.client.AccumuloException; +import org.apache.accumulo.core.client.AccumuloSecurityException; +import org.apache.accumulo.core.client.BatchWriter; +import org.apache.accumulo.core.client.BatchWriterConfig; +import org.apache.accumulo.core.client.Connector; +import org.apache.accumulo.core.client.TableExistsException; +import org.apache.accumulo.core.client.TableNotFoundException; +import org.apache.accumulo.core.client.admin.TableOperations; +import org.apache.accumulo.core.data.Mutation; +import org.apache.accumulo.minicluster.MiniAccumuloCluster; +import org.apache.accumulo.minicluster.MiniAccumuloConfig; +import org.apache.hadoop.hive.common.type.HiveDecimal; +import org.apache.hadoop.hive.conf.HiveConf; + +/** + * Start and stop an AccumuloMiniCluster for testing purposes + */ +public class AccumuloTestSetup extends TestSetup { + public static final String PASSWORD = "password"; + public static final String TABLE_NAME = "accumuloHiveTable"; + + protected MiniAccumuloCluster miniCluster; + + public AccumuloTestSetup(Test test) { + super(test); + } + + protected void setupWithHiveConf(HiveConf conf) throws Exception { + if (null == miniCluster) { + String testTmpDir = System.getProperty("test.tmp.dir"); + File tmpDir = new File(testTmpDir, "accumulo"); + + MiniAccumuloConfig cfg = new MiniAccumuloConfig(tmpDir, PASSWORD); + cfg.setNumTservers(1); + + miniCluster = new MiniAccumuloCluster(cfg); + + miniCluster.start(); + + createAccumuloTable(miniCluster.getConnector("root", PASSWORD)); + } + + // Setup connection information + conf.set(AccumuloConnectionParameters.USER_NAME, "root"); + conf.set(AccumuloConnectionParameters.USER_PASS, PASSWORD); + conf.set(AccumuloConnectionParameters.ZOOKEEPERS, miniCluster.getZooKeepers()); + conf.set(AccumuloConnectionParameters.INSTANCE_NAME, miniCluster.getInstanceName()); + } + + protected void createAccumuloTable(Connector conn) throws TableExistsException, + TableNotFoundException, AccumuloException, AccumuloSecurityException { + TableOperations tops = conn.tableOperations(); + if (tops.exists(TABLE_NAME)) { + tops.delete(TABLE_NAME); + } + + tops.create(TABLE_NAME); + + boolean[] booleans = new boolean[] {true, false, true}; + byte [] bytes = new byte [] { Byte.MIN_VALUE, -1, Byte.MAX_VALUE }; + short [] shorts = new short [] { Short.MIN_VALUE, -1, Short.MAX_VALUE }; + int [] ints = new int [] { Integer.MIN_VALUE, -1, Integer.MAX_VALUE }; + long [] longs = new long [] { Long.MIN_VALUE, -1, Long.MAX_VALUE }; + String [] strings = new String [] { "Hadoop, Accumulo", "Hive", "Test Strings" }; + float [] floats = new float [] { Float.MIN_VALUE, -1.0F, Float.MAX_VALUE }; + double [] doubles = new double [] { Double.MIN_VALUE, -1.0, Double.MAX_VALUE }; + HiveDecimal[] decimals = new HiveDecimal[] {HiveDecimal.create("3.14159"), HiveDecimal.create("2.71828"), HiveDecimal.create("0.57721")}; + Date[] dates = new Date[] {Date.valueOf("2014-01-01"), Date.valueOf("2014-03-01"), Date.valueOf("2014-05-01")}; + Timestamp[] timestamps = new Timestamp[] {new Timestamp(50), new Timestamp(100), new Timestamp(150)}; + + BatchWriter bw = conn.createBatchWriter(TABLE_NAME, new BatchWriterConfig()); + final String cf = "cf"; + try { + for (int i = 0; i < 3; i++) { + Mutation m = new Mutation("key-" + i); + m.put(cf, "cq-boolean", Boolean.toString(booleans[i])); + m.put(cf.getBytes(), "cq-byte".getBytes(), new byte[] {bytes[i]}); + m.put(cf, "cq-short", Short.toString(shorts[i])); + m.put(cf, "cq-int", Integer.toString(ints[i])); + m.put(cf, "cq-long", Long.toString(longs[i])); + m.put(cf, "cq-string", strings[i]); + m.put(cf, "cq-float", Float.toString(floats[i])); + m.put(cf, "cq-double", Double.toString(doubles[i])); + m.put(cf, "cq-decimal", decimals[i].toString()); + m.put(cf, "cq-date", dates[i].toString()); + m.put(cf, "cq-timestamp", timestamps[i].toString()); + + bw.addMutation(m); + } + } finally { + bw.close(); + } + } + + @Override + protected void tearDown() throws Exception { + if (null != miniCluster) { + miniCluster.stop(); + miniCluster = null; + } + } +} diff --git a/jdbc/src/java/org/apache/hive/jdbc/HiveStatement.java b/jdbc/src/java/org/apache/hive/jdbc/HiveStatement.java index 0e04ced..2cbf58c 100644 --- a/jdbc/src/java/org/apache/hive/jdbc/HiveStatement.java +++ b/jdbc/src/java/org/apache/hive/jdbc/HiveStatement.java @@ -21,6 +21,7 @@ import java.sql.Connection; import java.sql.ResultSet; import java.sql.SQLException; +import java.sql.SQLFeatureNotSupportedException; import java.sql.SQLWarning; import java.util.HashMap; import java.util.Map; @@ -435,7 +436,7 @@ public int getFetchSize() throws SQLException { @Override public ResultSet getGeneratedKeys() throws SQLException { - throw new SQLException("Method not supported"); + throw new SQLFeatureNotSupportedException("Method not supported"); } /* @@ -469,7 +470,7 @@ public int getMaxRows() throws SQLException { @Override public boolean getMoreResults() throws SQLException { - throw new SQLException("Method not supported"); + return false; } /* @@ -480,7 +481,7 @@ public boolean getMoreResults() throws SQLException { @Override public boolean getMoreResults(int current) throws SQLException { - throw new SQLException("Method not supported"); + throw new SQLFeatureNotSupportedException("Method not supported"); } /* @@ -550,7 +551,7 @@ public int getResultSetType() throws SQLException { @Override public int getUpdateCount() throws SQLException { checkConnection("getUpdateCount"); - return 0; + return -1; } /* @@ -600,7 +601,7 @@ public boolean isPoolable() throws SQLException { @Override public void setCursorName(String name) throws SQLException { - throw new SQLException("Method not supported"); + throw new SQLFeatureNotSupportedException("Method not supported"); } /* @@ -611,7 +612,9 @@ public void setCursorName(String name) throws SQLException { @Override public void setEscapeProcessing(boolean enable) throws SQLException { - throw new SQLException("Method not supported"); + if (enable) { + throw new SQLException("Method not supported"); + } } /* diff --git a/metastore/if/hive_metastore.thrift b/metastore/if/hive_metastore.thrift index cb326f4..5567bef 100755 --- a/metastore/if/hive_metastore.thrift +++ b/metastore/if/hive_metastore.thrift @@ -366,6 +366,10 @@ struct AggrStats { 2: required i64 partsFound // number of partitions for which stats were found } +struct SetPartitionsStatsRequest { +1: required list colStats +} + // schema of the table/query results etc. struct Schema { // column names, types, comments @@ -960,6 +964,8 @@ service ThriftHiveMetastore extends fb303.FacebookService (1:NoSuchObjectException o1, 2:MetaException o2) AggrStats get_aggr_stats_for(1:PartitionsStatsRequest request) throws (1:NoSuchObjectException o1, 2:MetaException o2) + bool set_aggr_stats_for(1:SetPartitionsStatsRequest request) throws + (1:NoSuchObjectException o1, 2:InvalidObjectException o2, 3:MetaException o3, 4:InvalidInputException o4) // delete APIs attempt to delete column statistics, if found, associated with a given db_name, tbl_name, [part_name] diff --git a/metastore/scripts/upgrade/derby/019-HIVE-7784.derby.sql b/metastore/scripts/upgrade/derby/019-HIVE-7784.derby.sql new file mode 100644 index 0000000..66e6c12 --- /dev/null +++ b/metastore/scripts/upgrade/derby/019-HIVE-7784.derby.sql @@ -0,0 +1 @@ +CREATE INDEX "APP"."PCS_STATS_IDX" ON "APP"."PART_COL_STATS" ("DB_NAME","TABLE_NAME","COLUMN_NAME","PARTITION_NAME"); diff --git a/metastore/scripts/upgrade/derby/hive-schema-0.14.0.derby.sql b/metastore/scripts/upgrade/derby/hive-schema-0.14.0.derby.sql index 11ce4d6..90d5e9b 100644 --- a/metastore/scripts/upgrade/derby/hive-schema-0.14.0.derby.sql +++ b/metastore/scripts/upgrade/derby/hive-schema-0.14.0.derby.sql @@ -114,6 +114,8 @@ CREATE INDEX "APP"."TABLECOLUMNPRIVILEGEINDEX" ON "APP"."TBL_COL_PRIVS" ("TBL_ID CREATE UNIQUE INDEX "APP"."DBPRIVILEGEINDEX" ON "APP"."DB_PRIVS" ("DB_ID", "PRINCIPAL_NAME", "PRINCIPAL_TYPE", "DB_PRIV", "GRANTOR", "GRANTOR_TYPE"); +CREATE INDEX "APP"."PCS_STATS_IDX" ON "APP"."PART_COL_STATS" ("DB_NAME","TABLE_NAME","COLUMN_NAME","PARTITION_NAME"); + CREATE INDEX "APP"."PARTPRIVILEGEINDEX" ON "APP"."PART_PRIVS" ("PART_ID", "PRINCIPAL_NAME", "PRINCIPAL_TYPE", "PART_PRIV", "GRANTOR", "GRANTOR_TYPE"); CREATE UNIQUE INDEX "APP"."ROLEENTITYINDEX" ON "APP"."ROLES" ("ROLE_NAME"); diff --git a/metastore/scripts/upgrade/derby/upgrade-0.13.0-to-0.14.0.derby.sql b/metastore/scripts/upgrade/derby/upgrade-0.13.0-to-0.14.0.derby.sql index 82e7733..75193ce 100644 --- a/metastore/scripts/upgrade/derby/upgrade-0.13.0-to-0.14.0.derby.sql +++ b/metastore/scripts/upgrade/derby/upgrade-0.13.0-to-0.14.0.derby.sql @@ -1,3 +1,5 @@ -- Upgrade MetaStore schema from 0.13.0 to 0.14.0 +RUN '019-HIVE-7784.derby.sql'; + UPDATE "APP".VERSION SET SCHEMA_VERSION='0.14.0', VERSION_COMMENT='Hive release version 0.14.0' where VER_ID=1; diff --git a/metastore/scripts/upgrade/mssql/002-HIVE-7784.mssql.sql b/metastore/scripts/upgrade/mssql/002-HIVE-7784.mssql.sql new file mode 100644 index 0000000..cc6ba09 --- /dev/null +++ b/metastore/scripts/upgrade/mssql/002-HIVE-7784.mssql.sql @@ -0,0 +1 @@ +CREATE INDEX PCS_STATS_IDX ON PART_COL_STATS (DB_NAME,TABLE_NAME,COLUMN_NAME,PARTITION_NAME); diff --git a/metastore/scripts/upgrade/mssql/upgrade-0.13.0-to-0.14.0.mssql.sql b/metastore/scripts/upgrade/mssql/upgrade-0.13.0-to-0.14.0.mssql.sql new file mode 100644 index 0000000..2b5f3b8 --- /dev/null +++ b/metastore/scripts/upgrade/mssql/upgrade-0.13.0-to-0.14.0.mssql.sql @@ -0,0 +1,6 @@ +SELECT 'Upgrading MetaStore schema from 0.13.0 to 0.14.0' AS MESSAGE; + +:r 002-HIVE-7784.mssql.sql; + +UPDATE VERSION SET SCHEMA_VERSION='0.14.0', VERSION_COMMENT='Hive release version 0.14.0' where VER_ID=1; +SELECT 'Finished upgrading MetaStore schema from 0.13.0 to 0.14.0' AS MESSAGE; diff --git a/metastore/scripts/upgrade/mssql/upgrade.order.mssql b/metastore/scripts/upgrade/mssql/upgrade.order.mssql index 6b84ce2..638345a 100644 --- a/metastore/scripts/upgrade/mssql/upgrade.order.mssql +++ b/metastore/scripts/upgrade/mssql/upgrade.order.mssql @@ -1,2 +1,3 @@ 0.11.0-to-0.12.0 0.12.0-to-0.13.0 +0.13.0-to-0.14.0 diff --git a/metastore/scripts/upgrade/mysql/019-HIVE-7784.mysql.sql b/metastore/scripts/upgrade/mysql/019-HIVE-7784.mysql.sql new file mode 100644 index 0000000..5d847bc --- /dev/null +++ b/metastore/scripts/upgrade/mysql/019-HIVE-7784.mysql.sql @@ -0,0 +1 @@ +CREATE INDEX PCS_STATS_IDX ON PART_COL_STATS (DB_NAME,TABLE_NAME,COLUMN_NAME,PARTITION_NAME) USING BTREE; diff --git a/metastore/scripts/upgrade/mysql/hive-schema-0.14.0.mysql.sql b/metastore/scripts/upgrade/mysql/hive-schema-0.14.0.mysql.sql index a509175..b479aa2 100644 --- a/metastore/scripts/upgrade/mysql/hive-schema-0.14.0.mysql.sql +++ b/metastore/scripts/upgrade/mysql/hive-schema-0.14.0.mysql.sql @@ -704,6 +704,8 @@ CREATE TABLE IF NOT EXISTS `PART_COL_STATS` ( CONSTRAINT `PART_COL_STATS_FK` FOREIGN KEY (`PART_ID`) REFERENCES `PARTITIONS` (`PART_ID`) ) ENGINE=InnoDB DEFAULT CHARSET=latin1; +CREATE INDEX PCS_STATS_IDX ON PART_COL_STATS (DB_NAME,TABLE_NAME,COLUMN_NAME,PARTITION_NAME) USING BTREE; + -- -- Table structure for table `TYPES` -- diff --git a/metastore/scripts/upgrade/mysql/upgrade-0.13.0-to-0.14.0.mysql.sql b/metastore/scripts/upgrade/mysql/upgrade-0.13.0-to-0.14.0.mysql.sql index 773ec34..ddcc3f5 100644 --- a/metastore/scripts/upgrade/mysql/upgrade-0.13.0-to-0.14.0.mysql.sql +++ b/metastore/scripts/upgrade/mysql/upgrade-0.13.0-to-0.14.0.mysql.sql @@ -1,4 +1,7 @@ SELECT 'Upgrading MetaStore schema from 0.13.0 to 0.14.0' AS ' '; +SOURCE 019-HIVE-7784.mysql.sql; + UPDATE VERSION SET SCHEMA_VERSION='0.14.0', VERSION_COMMENT='Hive release version 0.14.0' where VER_ID=1; SELECT 'Finished upgrading MetaStore schema from 0.13.0 to 0.14.0' AS ' '; + diff --git a/metastore/scripts/upgrade/oracle/020-HIVE-7784.oracle.sql b/metastore/scripts/upgrade/oracle/020-HIVE-7784.oracle.sql new file mode 100644 index 0000000..cc6ba09 --- /dev/null +++ b/metastore/scripts/upgrade/oracle/020-HIVE-7784.oracle.sql @@ -0,0 +1 @@ +CREATE INDEX PCS_STATS_IDX ON PART_COL_STATS (DB_NAME,TABLE_NAME,COLUMN_NAME,PARTITION_NAME); diff --git a/metastore/scripts/upgrade/oracle/hive-schema-0.14.0.oracle.sql b/metastore/scripts/upgrade/oracle/hive-schema-0.14.0.oracle.sql index d43b59b..b810b3d 100644 --- a/metastore/scripts/upgrade/oracle/hive-schema-0.14.0.oracle.sql +++ b/metastore/scripts/upgrade/oracle/hive-schema-0.14.0.oracle.sql @@ -527,6 +527,8 @@ ALTER TABLE PART_COL_STATS ADD CONSTRAINT PART_COL_STATS_FK FOREIGN KEY (PART_ID CREATE INDEX PART_COL_STATS_N49 ON PART_COL_STATS (PART_ID); +CREATE INDEX PCS_STATS_IDX ON PART_COL_STATS (DB_NAME,TABLE_NAME,COLUMN_NAME,PARTITION_NAME); + CREATE TABLE FUNCS ( FUNC_ID NUMBER NOT NULL, CLASS_NAME VARCHAR2(4000), diff --git a/metastore/scripts/upgrade/oracle/upgrade-0.13.0-to-0.14.0.oracle.sql b/metastore/scripts/upgrade/oracle/upgrade-0.13.0-to-0.14.0.oracle.sql index b212fdb..9b09555 100644 --- a/metastore/scripts/upgrade/oracle/upgrade-0.13.0-to-0.14.0.oracle.sql +++ b/metastore/scripts/upgrade/oracle/upgrade-0.13.0-to-0.14.0.oracle.sql @@ -1,4 +1,6 @@ SELECT 'Upgrading MetaStore schema from 0.13.0 to 0.14.0' AS Status from dual; +@020-HIVE-7784.oracle.sql; + UPDATE VERSION SET SCHEMA_VERSION='0.14.0', VERSION_COMMENT='Hive release version 0.14.0' where VER_ID=1; SELECT 'Finished upgrading MetaStore schema from 0.13.0 to 0.14.0' AS Status from dual; diff --git a/metastore/scripts/upgrade/postgres/019-HIVE-7784.postgres.sql b/metastore/scripts/upgrade/postgres/019-HIVE-7784.postgres.sql new file mode 100644 index 0000000..ac6b749 --- /dev/null +++ b/metastore/scripts/upgrade/postgres/019-HIVE-7784.postgres.sql @@ -0,0 +1 @@ +CREATE INDEX "PCS_STATS_IDX" ON "PART_COL_STATS" USING btree ("DB_NAME","TABLE_NAME","COLUMN_NAME","PARTITION_NAME"); diff --git a/metastore/scripts/upgrade/postgres/hive-schema-0.14.0.postgres.sql b/metastore/scripts/upgrade/postgres/hive-schema-0.14.0.postgres.sql index e292850..5358f50 100644 --- a/metastore/scripts/upgrade/postgres/hive-schema-0.14.0.postgres.sql +++ b/metastore/scripts/upgrade/postgres/hive-schema-0.14.0.postgres.sql @@ -1059,6 +1059,13 @@ CREATE INDEX "PART_PRIVS_N49" ON "PART_PRIVS" USING btree ("PART_ID"); -- +-- Name: PCS_STATS_IDX; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace: +-- + +CREATE INDEX "PCS_STATS_IDX" ON "PART_COL_STATS" USING btree ("DB_NAME","TABLE_NAME","COLUMN_NAME","PARTITION_NAME"); + + +-- -- Name: ROLE_MAP_N49; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace: -- diff --git a/metastore/scripts/upgrade/postgres/upgrade-0.13.0-to-0.14.0.postgres.sql b/metastore/scripts/upgrade/postgres/upgrade-0.13.0-to-0.14.0.postgres.sql index 23a6025..91aea44 100644 --- a/metastore/scripts/upgrade/postgres/upgrade-0.13.0-to-0.14.0.postgres.sql +++ b/metastore/scripts/upgrade/postgres/upgrade-0.13.0-to-0.14.0.postgres.sql @@ -1,6 +1,13 @@ SELECT 'Upgrading MetaStore schema from 0.13.0 to 0.14.0'; +\i 019-HIVE-7784.postgres.sql; + UPDATE "VERSION" SET "SCHEMA_VERSION"='0.14.0', "VERSION_COMMENT"='Hive release version 0.14.0' where "VER_ID"=1; SELECT 'Finished upgrading MetaStore schema from 0.13.0 to 0.14.0'; +-- +-- Name: PCS_STATS_IDX; Type: INDEX; Schema: public; Owner: hiveuser; Tablespace: +-- + + diff --git a/metastore/src/gen/thrift/gen-cpp/ThriftHiveMetastore.cpp b/metastore/src/gen/thrift/gen-cpp/ThriftHiveMetastore.cpp index 23b5edf..5517b1a 100644 --- a/metastore/src/gen/thrift/gen-cpp/ThriftHiveMetastore.cpp +++ b/metastore/src/gen/thrift/gen-cpp/ThriftHiveMetastore.cpp @@ -1096,14 +1096,14 @@ uint32_t ThriftHiveMetastore_get_databases_result::read(::apache::thrift::protoc if (ftype == ::apache::thrift::protocol::T_LIST) { { this->success.clear(); - uint32_t _size369; - ::apache::thrift::protocol::TType _etype372; - xfer += iprot->readListBegin(_etype372, _size369); - this->success.resize(_size369); - uint32_t _i373; - for (_i373 = 0; _i373 < _size369; ++_i373) + uint32_t _size375; + ::apache::thrift::protocol::TType _etype378; + xfer += iprot->readListBegin(_etype378, _size375); + this->success.resize(_size375); + uint32_t _i379; + for (_i379 = 0; _i379 < _size375; ++_i379) { - xfer += iprot->readString(this->success[_i373]); + xfer += iprot->readString(this->success[_i379]); } xfer += iprot->readListEnd(); } @@ -1142,10 +1142,10 @@ uint32_t ThriftHiveMetastore_get_databases_result::write(::apache::thrift::proto xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->success.size())); - std::vector ::const_iterator _iter374; - for (_iter374 = this->success.begin(); _iter374 != this->success.end(); ++_iter374) + std::vector ::const_iterator _iter380; + for (_iter380 = this->success.begin(); _iter380 != this->success.end(); ++_iter380) { - xfer += oprot->writeString((*_iter374)); + xfer += oprot->writeString((*_iter380)); } xfer += oprot->writeListEnd(); } @@ -1184,14 +1184,14 @@ uint32_t ThriftHiveMetastore_get_databases_presult::read(::apache::thrift::proto if (ftype == ::apache::thrift::protocol::T_LIST) { { (*(this->success)).clear(); - uint32_t _size375; - ::apache::thrift::protocol::TType _etype378; - xfer += iprot->readListBegin(_etype378, _size375); - (*(this->success)).resize(_size375); - uint32_t _i379; - for (_i379 = 0; _i379 < _size375; ++_i379) + uint32_t _size381; + ::apache::thrift::protocol::TType _etype384; + xfer += iprot->readListBegin(_etype384, _size381); + (*(this->success)).resize(_size381); + uint32_t _i385; + for (_i385 = 0; _i385 < _size381; ++_i385) { - xfer += iprot->readString((*(this->success))[_i379]); + xfer += iprot->readString((*(this->success))[_i385]); } xfer += iprot->readListEnd(); } @@ -1289,14 +1289,14 @@ uint32_t ThriftHiveMetastore_get_all_databases_result::read(::apache::thrift::pr if (ftype == ::apache::thrift::protocol::T_LIST) { { this->success.clear(); - uint32_t _size380; - ::apache::thrift::protocol::TType _etype383; - xfer += iprot->readListBegin(_etype383, _size380); - this->success.resize(_size380); - uint32_t _i384; - for (_i384 = 0; _i384 < _size380; ++_i384) + uint32_t _size386; + ::apache::thrift::protocol::TType _etype389; + xfer += iprot->readListBegin(_etype389, _size386); + this->success.resize(_size386); + uint32_t _i390; + for (_i390 = 0; _i390 < _size386; ++_i390) { - xfer += iprot->readString(this->success[_i384]); + xfer += iprot->readString(this->success[_i390]); } xfer += iprot->readListEnd(); } @@ -1335,10 +1335,10 @@ uint32_t ThriftHiveMetastore_get_all_databases_result::write(::apache::thrift::p xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->success.size())); - std::vector ::const_iterator _iter385; - for (_iter385 = this->success.begin(); _iter385 != this->success.end(); ++_iter385) + std::vector ::const_iterator _iter391; + for (_iter391 = this->success.begin(); _iter391 != this->success.end(); ++_iter391) { - xfer += oprot->writeString((*_iter385)); + xfer += oprot->writeString((*_iter391)); } xfer += oprot->writeListEnd(); } @@ -1377,14 +1377,14 @@ uint32_t ThriftHiveMetastore_get_all_databases_presult::read(::apache::thrift::p if (ftype == ::apache::thrift::protocol::T_LIST) { { (*(this->success)).clear(); - uint32_t _size386; - ::apache::thrift::protocol::TType _etype389; - xfer += iprot->readListBegin(_etype389, _size386); - (*(this->success)).resize(_size386); - uint32_t _i390; - for (_i390 = 0; _i390 < _size386; ++_i390) + uint32_t _size392; + ::apache::thrift::protocol::TType _etype395; + xfer += iprot->readListBegin(_etype395, _size392); + (*(this->success)).resize(_size392); + uint32_t _i396; + for (_i396 = 0; _i396 < _size392; ++_i396) { - xfer += iprot->readString((*(this->success))[_i390]); + xfer += iprot->readString((*(this->success))[_i396]); } xfer += iprot->readListEnd(); } @@ -2327,17 +2327,17 @@ uint32_t ThriftHiveMetastore_get_type_all_result::read(::apache::thrift::protoco if (ftype == ::apache::thrift::protocol::T_MAP) { { this->success.clear(); - uint32_t _size391; - ::apache::thrift::protocol::TType _ktype392; - ::apache::thrift::protocol::TType _vtype393; - xfer += iprot->readMapBegin(_ktype392, _vtype393, _size391); - uint32_t _i395; - for (_i395 = 0; _i395 < _size391; ++_i395) + uint32_t _size397; + ::apache::thrift::protocol::TType _ktype398; + ::apache::thrift::protocol::TType _vtype399; + xfer += iprot->readMapBegin(_ktype398, _vtype399, _size397); + uint32_t _i401; + for (_i401 = 0; _i401 < _size397; ++_i401) { - std::string _key396; - xfer += iprot->readString(_key396); - Type& _val397 = this->success[_key396]; - xfer += _val397.read(iprot); + std::string _key402; + xfer += iprot->readString(_key402); + Type& _val403 = this->success[_key402]; + xfer += _val403.read(iprot); } xfer += iprot->readMapEnd(); } @@ -2376,11 +2376,11 @@ uint32_t ThriftHiveMetastore_get_type_all_result::write(::apache::thrift::protoc xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_MAP, 0); { xfer += oprot->writeMapBegin(::apache::thrift::protocol::T_STRING, ::apache::thrift::protocol::T_STRUCT, static_cast(this->success.size())); - std::map ::const_iterator _iter398; - for (_iter398 = this->success.begin(); _iter398 != this->success.end(); ++_iter398) + std::map ::const_iterator _iter404; + for (_iter404 = this->success.begin(); _iter404 != this->success.end(); ++_iter404) { - xfer += oprot->writeString(_iter398->first); - xfer += _iter398->second.write(oprot); + xfer += oprot->writeString(_iter404->first); + xfer += _iter404->second.write(oprot); } xfer += oprot->writeMapEnd(); } @@ -2419,17 +2419,17 @@ uint32_t ThriftHiveMetastore_get_type_all_presult::read(::apache::thrift::protoc if (ftype == ::apache::thrift::protocol::T_MAP) { { (*(this->success)).clear(); - uint32_t _size399; - ::apache::thrift::protocol::TType _ktype400; - ::apache::thrift::protocol::TType _vtype401; - xfer += iprot->readMapBegin(_ktype400, _vtype401, _size399); - uint32_t _i403; - for (_i403 = 0; _i403 < _size399; ++_i403) + uint32_t _size405; + ::apache::thrift::protocol::TType _ktype406; + ::apache::thrift::protocol::TType _vtype407; + xfer += iprot->readMapBegin(_ktype406, _vtype407, _size405); + uint32_t _i409; + for (_i409 = 0; _i409 < _size405; ++_i409) { - std::string _key404; - xfer += iprot->readString(_key404); - Type& _val405 = (*(this->success))[_key404]; - xfer += _val405.read(iprot); + std::string _key410; + xfer += iprot->readString(_key410); + Type& _val411 = (*(this->success))[_key410]; + xfer += _val411.read(iprot); } xfer += iprot->readMapEnd(); } @@ -2564,14 +2564,14 @@ uint32_t ThriftHiveMetastore_get_fields_result::read(::apache::thrift::protocol: if (ftype == ::apache::thrift::protocol::T_LIST) { { this->success.clear(); - uint32_t _size406; - ::apache::thrift::protocol::TType _etype409; - xfer += iprot->readListBegin(_etype409, _size406); - this->success.resize(_size406); - uint32_t _i410; - for (_i410 = 0; _i410 < _size406; ++_i410) + uint32_t _size412; + ::apache::thrift::protocol::TType _etype415; + xfer += iprot->readListBegin(_etype415, _size412); + this->success.resize(_size412); + uint32_t _i416; + for (_i416 = 0; _i416 < _size412; ++_i416) { - xfer += this->success[_i410].read(iprot); + xfer += this->success[_i416].read(iprot); } xfer += iprot->readListEnd(); } @@ -2626,10 +2626,10 @@ uint32_t ThriftHiveMetastore_get_fields_result::write(::apache::thrift::protocol xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->success.size())); - std::vector ::const_iterator _iter411; - for (_iter411 = this->success.begin(); _iter411 != this->success.end(); ++_iter411) + std::vector ::const_iterator _iter417; + for (_iter417 = this->success.begin(); _iter417 != this->success.end(); ++_iter417) { - xfer += (*_iter411).write(oprot); + xfer += (*_iter417).write(oprot); } xfer += oprot->writeListEnd(); } @@ -2676,14 +2676,14 @@ uint32_t ThriftHiveMetastore_get_fields_presult::read(::apache::thrift::protocol if (ftype == ::apache::thrift::protocol::T_LIST) { { (*(this->success)).clear(); - uint32_t _size412; - ::apache::thrift::protocol::TType _etype415; - xfer += iprot->readListBegin(_etype415, _size412); - (*(this->success)).resize(_size412); - uint32_t _i416; - for (_i416 = 0; _i416 < _size412; ++_i416) + uint32_t _size418; + ::apache::thrift::protocol::TType _etype421; + xfer += iprot->readListBegin(_etype421, _size418); + (*(this->success)).resize(_size418); + uint32_t _i422; + for (_i422 = 0; _i422 < _size418; ++_i422) { - xfer += (*(this->success))[_i416].read(iprot); + xfer += (*(this->success))[_i422].read(iprot); } xfer += iprot->readListEnd(); } @@ -2834,14 +2834,14 @@ uint32_t ThriftHiveMetastore_get_schema_result::read(::apache::thrift::protocol: if (ftype == ::apache::thrift::protocol::T_LIST) { { this->success.clear(); - uint32_t _size417; - ::apache::thrift::protocol::TType _etype420; - xfer += iprot->readListBegin(_etype420, _size417); - this->success.resize(_size417); - uint32_t _i421; - for (_i421 = 0; _i421 < _size417; ++_i421) + uint32_t _size423; + ::apache::thrift::protocol::TType _etype426; + xfer += iprot->readListBegin(_etype426, _size423); + this->success.resize(_size423); + uint32_t _i427; + for (_i427 = 0; _i427 < _size423; ++_i427) { - xfer += this->success[_i421].read(iprot); + xfer += this->success[_i427].read(iprot); } xfer += iprot->readListEnd(); } @@ -2896,10 +2896,10 @@ uint32_t ThriftHiveMetastore_get_schema_result::write(::apache::thrift::protocol xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->success.size())); - std::vector ::const_iterator _iter422; - for (_iter422 = this->success.begin(); _iter422 != this->success.end(); ++_iter422) + std::vector ::const_iterator _iter428; + for (_iter428 = this->success.begin(); _iter428 != this->success.end(); ++_iter428) { - xfer += (*_iter422).write(oprot); + xfer += (*_iter428).write(oprot); } xfer += oprot->writeListEnd(); } @@ -2946,14 +2946,14 @@ uint32_t ThriftHiveMetastore_get_schema_presult::read(::apache::thrift::protocol if (ftype == ::apache::thrift::protocol::T_LIST) { { (*(this->success)).clear(); - uint32_t _size423; - ::apache::thrift::protocol::TType _etype426; - xfer += iprot->readListBegin(_etype426, _size423); - (*(this->success)).resize(_size423); - uint32_t _i427; - for (_i427 = 0; _i427 < _size423; ++_i427) + uint32_t _size429; + ::apache::thrift::protocol::TType _etype432; + xfer += iprot->readListBegin(_etype432, _size429); + (*(this->success)).resize(_size429); + uint32_t _i433; + for (_i433 = 0; _i433 < _size429; ++_i433) { - xfer += (*(this->success))[_i427].read(iprot); + xfer += (*(this->success))[_i433].read(iprot); } xfer += iprot->readListEnd(); } @@ -4008,14 +4008,14 @@ uint32_t ThriftHiveMetastore_get_tables_result::read(::apache::thrift::protocol: if (ftype == ::apache::thrift::protocol::T_LIST) { { this->success.clear(); - uint32_t _size428; - ::apache::thrift::protocol::TType _etype431; - xfer += iprot->readListBegin(_etype431, _size428); - this->success.resize(_size428); - uint32_t _i432; - for (_i432 = 0; _i432 < _size428; ++_i432) + uint32_t _size434; + ::apache::thrift::protocol::TType _etype437; + xfer += iprot->readListBegin(_etype437, _size434); + this->success.resize(_size434); + uint32_t _i438; + for (_i438 = 0; _i438 < _size434; ++_i438) { - xfer += iprot->readString(this->success[_i432]); + xfer += iprot->readString(this->success[_i438]); } xfer += iprot->readListEnd(); } @@ -4054,10 +4054,10 @@ uint32_t ThriftHiveMetastore_get_tables_result::write(::apache::thrift::protocol xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->success.size())); - std::vector ::const_iterator _iter433; - for (_iter433 = this->success.begin(); _iter433 != this->success.end(); ++_iter433) + std::vector ::const_iterator _iter439; + for (_iter439 = this->success.begin(); _iter439 != this->success.end(); ++_iter439) { - xfer += oprot->writeString((*_iter433)); + xfer += oprot->writeString((*_iter439)); } xfer += oprot->writeListEnd(); } @@ -4096,14 +4096,14 @@ uint32_t ThriftHiveMetastore_get_tables_presult::read(::apache::thrift::protocol if (ftype == ::apache::thrift::protocol::T_LIST) { { (*(this->success)).clear(); - uint32_t _size434; - ::apache::thrift::protocol::TType _etype437; - xfer += iprot->readListBegin(_etype437, _size434); - (*(this->success)).resize(_size434); - uint32_t _i438; - for (_i438 = 0; _i438 < _size434; ++_i438) + uint32_t _size440; + ::apache::thrift::protocol::TType _etype443; + xfer += iprot->readListBegin(_etype443, _size440); + (*(this->success)).resize(_size440); + uint32_t _i444; + for (_i444 = 0; _i444 < _size440; ++_i444) { - xfer += iprot->readString((*(this->success))[_i438]); + xfer += iprot->readString((*(this->success))[_i444]); } xfer += iprot->readListEnd(); } @@ -4222,14 +4222,14 @@ uint32_t ThriftHiveMetastore_get_all_tables_result::read(::apache::thrift::proto if (ftype == ::apache::thrift::protocol::T_LIST) { { this->success.clear(); - uint32_t _size439; - ::apache::thrift::protocol::TType _etype442; - xfer += iprot->readListBegin(_etype442, _size439); - this->success.resize(_size439); - uint32_t _i443; - for (_i443 = 0; _i443 < _size439; ++_i443) + uint32_t _size445; + ::apache::thrift::protocol::TType _etype448; + xfer += iprot->readListBegin(_etype448, _size445); + this->success.resize(_size445); + uint32_t _i449; + for (_i449 = 0; _i449 < _size445; ++_i449) { - xfer += iprot->readString(this->success[_i443]); + xfer += iprot->readString(this->success[_i449]); } xfer += iprot->readListEnd(); } @@ -4268,10 +4268,10 @@ uint32_t ThriftHiveMetastore_get_all_tables_result::write(::apache::thrift::prot xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->success.size())); - std::vector ::const_iterator _iter444; - for (_iter444 = this->success.begin(); _iter444 != this->success.end(); ++_iter444) + std::vector ::const_iterator _iter450; + for (_iter450 = this->success.begin(); _iter450 != this->success.end(); ++_iter450) { - xfer += oprot->writeString((*_iter444)); + xfer += oprot->writeString((*_iter450)); } xfer += oprot->writeListEnd(); } @@ -4310,14 +4310,14 @@ uint32_t ThriftHiveMetastore_get_all_tables_presult::read(::apache::thrift::prot if (ftype == ::apache::thrift::protocol::T_LIST) { { (*(this->success)).clear(); - uint32_t _size445; - ::apache::thrift::protocol::TType _etype448; - xfer += iprot->readListBegin(_etype448, _size445); - (*(this->success)).resize(_size445); - uint32_t _i449; - for (_i449 = 0; _i449 < _size445; ++_i449) + uint32_t _size451; + ::apache::thrift::protocol::TType _etype454; + xfer += iprot->readListBegin(_etype454, _size451); + (*(this->success)).resize(_size451); + uint32_t _i455; + for (_i455 = 0; _i455 < _size451; ++_i455) { - xfer += iprot->readString((*(this->success))[_i449]); + xfer += iprot->readString((*(this->success))[_i455]); } xfer += iprot->readListEnd(); } @@ -4596,14 +4596,14 @@ uint32_t ThriftHiveMetastore_get_table_objects_by_name_args::read(::apache::thri if (ftype == ::apache::thrift::protocol::T_LIST) { { this->tbl_names.clear(); - uint32_t _size450; - ::apache::thrift::protocol::TType _etype453; - xfer += iprot->readListBegin(_etype453, _size450); - this->tbl_names.resize(_size450); - uint32_t _i454; - for (_i454 = 0; _i454 < _size450; ++_i454) + uint32_t _size456; + ::apache::thrift::protocol::TType _etype459; + xfer += iprot->readListBegin(_etype459, _size456); + this->tbl_names.resize(_size456); + uint32_t _i460; + for (_i460 = 0; _i460 < _size456; ++_i460) { - xfer += iprot->readString(this->tbl_names[_i454]); + xfer += iprot->readString(this->tbl_names[_i460]); } xfer += iprot->readListEnd(); } @@ -4635,10 +4635,10 @@ uint32_t ThriftHiveMetastore_get_table_objects_by_name_args::write(::apache::thr xfer += oprot->writeFieldBegin("tbl_names", ::apache::thrift::protocol::T_LIST, 2); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->tbl_names.size())); - std::vector ::const_iterator _iter455; - for (_iter455 = this->tbl_names.begin(); _iter455 != this->tbl_names.end(); ++_iter455) + std::vector ::const_iterator _iter461; + for (_iter461 = this->tbl_names.begin(); _iter461 != this->tbl_names.end(); ++_iter461) { - xfer += oprot->writeString((*_iter455)); + xfer += oprot->writeString((*_iter461)); } xfer += oprot->writeListEnd(); } @@ -4660,10 +4660,10 @@ uint32_t ThriftHiveMetastore_get_table_objects_by_name_pargs::write(::apache::th xfer += oprot->writeFieldBegin("tbl_names", ::apache::thrift::protocol::T_LIST, 2); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast((*(this->tbl_names)).size())); - std::vector ::const_iterator _iter456; - for (_iter456 = (*(this->tbl_names)).begin(); _iter456 != (*(this->tbl_names)).end(); ++_iter456) + std::vector ::const_iterator _iter462; + for (_iter462 = (*(this->tbl_names)).begin(); _iter462 != (*(this->tbl_names)).end(); ++_iter462) { - xfer += oprot->writeString((*_iter456)); + xfer += oprot->writeString((*_iter462)); } xfer += oprot->writeListEnd(); } @@ -4698,14 +4698,14 @@ uint32_t ThriftHiveMetastore_get_table_objects_by_name_result::read(::apache::th if (ftype == ::apache::thrift::protocol::T_LIST) { { this->success.clear(); - uint32_t _size457; - ::apache::thrift::protocol::TType _etype460; - xfer += iprot->readListBegin(_etype460, _size457); - this->success.resize(_size457); - uint32_t _i461; - for (_i461 = 0; _i461 < _size457; ++_i461) + uint32_t _size463; + ::apache::thrift::protocol::TType _etype466; + xfer += iprot->readListBegin(_etype466, _size463); + this->success.resize(_size463); + uint32_t _i467; + for (_i467 = 0; _i467 < _size463; ++_i467) { - xfer += this->success[_i461].read(iprot); + xfer += this->success[_i467].read(iprot); } xfer += iprot->readListEnd(); } @@ -4760,10 +4760,10 @@ uint32_t ThriftHiveMetastore_get_table_objects_by_name_result::write(::apache::t xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->success.size())); - std::vector ::const_iterator _iter462; - for (_iter462 = this->success.begin(); _iter462 != this->success.end(); ++_iter462) + std::vector
::const_iterator _iter468; + for (_iter468 = this->success.begin(); _iter468 != this->success.end(); ++_iter468) { - xfer += (*_iter462).write(oprot); + xfer += (*_iter468).write(oprot); } xfer += oprot->writeListEnd(); } @@ -4810,14 +4810,14 @@ uint32_t ThriftHiveMetastore_get_table_objects_by_name_presult::read(::apache::t if (ftype == ::apache::thrift::protocol::T_LIST) { { (*(this->success)).clear(); - uint32_t _size463; - ::apache::thrift::protocol::TType _etype466; - xfer += iprot->readListBegin(_etype466, _size463); - (*(this->success)).resize(_size463); - uint32_t _i467; - for (_i467 = 0; _i467 < _size463; ++_i467) + uint32_t _size469; + ::apache::thrift::protocol::TType _etype472; + xfer += iprot->readListBegin(_etype472, _size469); + (*(this->success)).resize(_size469); + uint32_t _i473; + for (_i473 = 0; _i473 < _size469; ++_i473) { - xfer += (*(this->success))[_i467].read(iprot); + xfer += (*(this->success))[_i473].read(iprot); } xfer += iprot->readListEnd(); } @@ -4984,14 +4984,14 @@ uint32_t ThriftHiveMetastore_get_table_names_by_filter_result::read(::apache::th if (ftype == ::apache::thrift::protocol::T_LIST) { { this->success.clear(); - uint32_t _size468; - ::apache::thrift::protocol::TType _etype471; - xfer += iprot->readListBegin(_etype471, _size468); - this->success.resize(_size468); - uint32_t _i472; - for (_i472 = 0; _i472 < _size468; ++_i472) + uint32_t _size474; + ::apache::thrift::protocol::TType _etype477; + xfer += iprot->readListBegin(_etype477, _size474); + this->success.resize(_size474); + uint32_t _i478; + for (_i478 = 0; _i478 < _size474; ++_i478) { - xfer += iprot->readString(this->success[_i472]); + xfer += iprot->readString(this->success[_i478]); } xfer += iprot->readListEnd(); } @@ -5046,10 +5046,10 @@ uint32_t ThriftHiveMetastore_get_table_names_by_filter_result::write(::apache::t xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->success.size())); - std::vector ::const_iterator _iter473; - for (_iter473 = this->success.begin(); _iter473 != this->success.end(); ++_iter473) + std::vector ::const_iterator _iter479; + for (_iter479 = this->success.begin(); _iter479 != this->success.end(); ++_iter479) { - xfer += oprot->writeString((*_iter473)); + xfer += oprot->writeString((*_iter479)); } xfer += oprot->writeListEnd(); } @@ -5096,14 +5096,14 @@ uint32_t ThriftHiveMetastore_get_table_names_by_filter_presult::read(::apache::t if (ftype == ::apache::thrift::protocol::T_LIST) { { (*(this->success)).clear(); - uint32_t _size474; - ::apache::thrift::protocol::TType _etype477; - xfer += iprot->readListBegin(_etype477, _size474); - (*(this->success)).resize(_size474); - uint32_t _i478; - for (_i478 = 0; _i478 < _size474; ++_i478) + uint32_t _size480; + ::apache::thrift::protocol::TType _etype483; + xfer += iprot->readListBegin(_etype483, _size480); + (*(this->success)).resize(_size480); + uint32_t _i484; + for (_i484 = 0; _i484 < _size480; ++_i484) { - xfer += iprot->readString((*(this->success))[_i478]); + xfer += iprot->readString((*(this->success))[_i484]); } xfer += iprot->readListEnd(); } @@ -6076,14 +6076,14 @@ uint32_t ThriftHiveMetastore_add_partitions_args::read(::apache::thrift::protoco if (ftype == ::apache::thrift::protocol::T_LIST) { { this->new_parts.clear(); - uint32_t _size479; - ::apache::thrift::protocol::TType _etype482; - xfer += iprot->readListBegin(_etype482, _size479); - this->new_parts.resize(_size479); - uint32_t _i483; - for (_i483 = 0; _i483 < _size479; ++_i483) + uint32_t _size485; + ::apache::thrift::protocol::TType _etype488; + xfer += iprot->readListBegin(_etype488, _size485); + this->new_parts.resize(_size485); + uint32_t _i489; + for (_i489 = 0; _i489 < _size485; ++_i489) { - xfer += this->new_parts[_i483].read(iprot); + xfer += this->new_parts[_i489].read(iprot); } xfer += iprot->readListEnd(); } @@ -6111,10 +6111,10 @@ uint32_t ThriftHiveMetastore_add_partitions_args::write(::apache::thrift::protoc xfer += oprot->writeFieldBegin("new_parts", ::apache::thrift::protocol::T_LIST, 1); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->new_parts.size())); - std::vector ::const_iterator _iter484; - for (_iter484 = this->new_parts.begin(); _iter484 != this->new_parts.end(); ++_iter484) + std::vector ::const_iterator _iter490; + for (_iter490 = this->new_parts.begin(); _iter490 != this->new_parts.end(); ++_iter490) { - xfer += (*_iter484).write(oprot); + xfer += (*_iter490).write(oprot); } xfer += oprot->writeListEnd(); } @@ -6132,10 +6132,10 @@ uint32_t ThriftHiveMetastore_add_partitions_pargs::write(::apache::thrift::proto xfer += oprot->writeFieldBegin("new_parts", ::apache::thrift::protocol::T_LIST, 1); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast((*(this->new_parts)).size())); - std::vector ::const_iterator _iter485; - for (_iter485 = (*(this->new_parts)).begin(); _iter485 != (*(this->new_parts)).end(); ++_iter485) + std::vector ::const_iterator _iter491; + for (_iter491 = (*(this->new_parts)).begin(); _iter491 != (*(this->new_parts)).end(); ++_iter491) { - xfer += (*_iter485).write(oprot); + xfer += (*_iter491).write(oprot); } xfer += oprot->writeListEnd(); } @@ -6342,14 +6342,14 @@ uint32_t ThriftHiveMetastore_append_partition_args::read(::apache::thrift::proto if (ftype == ::apache::thrift::protocol::T_LIST) { { this->part_vals.clear(); - uint32_t _size486; - ::apache::thrift::protocol::TType _etype489; - xfer += iprot->readListBegin(_etype489, _size486); - this->part_vals.resize(_size486); - uint32_t _i490; - for (_i490 = 0; _i490 < _size486; ++_i490) + uint32_t _size492; + ::apache::thrift::protocol::TType _etype495; + xfer += iprot->readListBegin(_etype495, _size492); + this->part_vals.resize(_size492); + uint32_t _i496; + for (_i496 = 0; _i496 < _size492; ++_i496) { - xfer += iprot->readString(this->part_vals[_i490]); + xfer += iprot->readString(this->part_vals[_i496]); } xfer += iprot->readListEnd(); } @@ -6385,10 +6385,10 @@ uint32_t ThriftHiveMetastore_append_partition_args::write(::apache::thrift::prot xfer += oprot->writeFieldBegin("part_vals", ::apache::thrift::protocol::T_LIST, 3); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->part_vals.size())); - std::vector ::const_iterator _iter491; - for (_iter491 = this->part_vals.begin(); _iter491 != this->part_vals.end(); ++_iter491) + std::vector ::const_iterator _iter497; + for (_iter497 = this->part_vals.begin(); _iter497 != this->part_vals.end(); ++_iter497) { - xfer += oprot->writeString((*_iter491)); + xfer += oprot->writeString((*_iter497)); } xfer += oprot->writeListEnd(); } @@ -6414,10 +6414,10 @@ uint32_t ThriftHiveMetastore_append_partition_pargs::write(::apache::thrift::pro xfer += oprot->writeFieldBegin("part_vals", ::apache::thrift::protocol::T_LIST, 3); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast((*(this->part_vals)).size())); - std::vector ::const_iterator _iter492; - for (_iter492 = (*(this->part_vals)).begin(); _iter492 != (*(this->part_vals)).end(); ++_iter492) + std::vector ::const_iterator _iter498; + for (_iter498 = (*(this->part_vals)).begin(); _iter498 != (*(this->part_vals)).end(); ++_iter498) { - xfer += oprot->writeString((*_iter492)); + xfer += oprot->writeString((*_iter498)); } xfer += oprot->writeListEnd(); } @@ -6846,14 +6846,14 @@ uint32_t ThriftHiveMetastore_append_partition_with_environment_context_args::rea if (ftype == ::apache::thrift::protocol::T_LIST) { { this->part_vals.clear(); - uint32_t _size493; - ::apache::thrift::protocol::TType _etype496; - xfer += iprot->readListBegin(_etype496, _size493); - this->part_vals.resize(_size493); - uint32_t _i497; - for (_i497 = 0; _i497 < _size493; ++_i497) + uint32_t _size499; + ::apache::thrift::protocol::TType _etype502; + xfer += iprot->readListBegin(_etype502, _size499); + this->part_vals.resize(_size499); + uint32_t _i503; + for (_i503 = 0; _i503 < _size499; ++_i503) { - xfer += iprot->readString(this->part_vals[_i497]); + xfer += iprot->readString(this->part_vals[_i503]); } xfer += iprot->readListEnd(); } @@ -6897,10 +6897,10 @@ uint32_t ThriftHiveMetastore_append_partition_with_environment_context_args::wri xfer += oprot->writeFieldBegin("part_vals", ::apache::thrift::protocol::T_LIST, 3); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->part_vals.size())); - std::vector ::const_iterator _iter498; - for (_iter498 = this->part_vals.begin(); _iter498 != this->part_vals.end(); ++_iter498) + std::vector ::const_iterator _iter504; + for (_iter504 = this->part_vals.begin(); _iter504 != this->part_vals.end(); ++_iter504) { - xfer += oprot->writeString((*_iter498)); + xfer += oprot->writeString((*_iter504)); } xfer += oprot->writeListEnd(); } @@ -6930,10 +6930,10 @@ uint32_t ThriftHiveMetastore_append_partition_with_environment_context_pargs::wr xfer += oprot->writeFieldBegin("part_vals", ::apache::thrift::protocol::T_LIST, 3); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast((*(this->part_vals)).size())); - std::vector ::const_iterator _iter499; - for (_iter499 = (*(this->part_vals)).begin(); _iter499 != (*(this->part_vals)).end(); ++_iter499) + std::vector ::const_iterator _iter505; + for (_iter505 = (*(this->part_vals)).begin(); _iter505 != (*(this->part_vals)).end(); ++_iter505) { - xfer += oprot->writeString((*_iter499)); + xfer += oprot->writeString((*_iter505)); } xfer += oprot->writeListEnd(); } @@ -7668,14 +7668,14 @@ uint32_t ThriftHiveMetastore_drop_partition_args::read(::apache::thrift::protoco if (ftype == ::apache::thrift::protocol::T_LIST) { { this->part_vals.clear(); - uint32_t _size500; - ::apache::thrift::protocol::TType _etype503; - xfer += iprot->readListBegin(_etype503, _size500); - this->part_vals.resize(_size500); - uint32_t _i504; - for (_i504 = 0; _i504 < _size500; ++_i504) + uint32_t _size506; + ::apache::thrift::protocol::TType _etype509; + xfer += iprot->readListBegin(_etype509, _size506); + this->part_vals.resize(_size506); + uint32_t _i510; + for (_i510 = 0; _i510 < _size506; ++_i510) { - xfer += iprot->readString(this->part_vals[_i504]); + xfer += iprot->readString(this->part_vals[_i510]); } xfer += iprot->readListEnd(); } @@ -7719,10 +7719,10 @@ uint32_t ThriftHiveMetastore_drop_partition_args::write(::apache::thrift::protoc xfer += oprot->writeFieldBegin("part_vals", ::apache::thrift::protocol::T_LIST, 3); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->part_vals.size())); - std::vector ::const_iterator _iter505; - for (_iter505 = this->part_vals.begin(); _iter505 != this->part_vals.end(); ++_iter505) + std::vector ::const_iterator _iter511; + for (_iter511 = this->part_vals.begin(); _iter511 != this->part_vals.end(); ++_iter511) { - xfer += oprot->writeString((*_iter505)); + xfer += oprot->writeString((*_iter511)); } xfer += oprot->writeListEnd(); } @@ -7752,10 +7752,10 @@ uint32_t ThriftHiveMetastore_drop_partition_pargs::write(::apache::thrift::proto xfer += oprot->writeFieldBegin("part_vals", ::apache::thrift::protocol::T_LIST, 3); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast((*(this->part_vals)).size())); - std::vector ::const_iterator _iter506; - for (_iter506 = (*(this->part_vals)).begin(); _iter506 != (*(this->part_vals)).end(); ++_iter506) + std::vector ::const_iterator _iter512; + for (_iter512 = (*(this->part_vals)).begin(); _iter512 != (*(this->part_vals)).end(); ++_iter512) { - xfer += oprot->writeString((*_iter506)); + xfer += oprot->writeString((*_iter512)); } xfer += oprot->writeListEnd(); } @@ -7946,14 +7946,14 @@ uint32_t ThriftHiveMetastore_drop_partition_with_environment_context_args::read( if (ftype == ::apache::thrift::protocol::T_LIST) { { this->part_vals.clear(); - uint32_t _size507; - ::apache::thrift::protocol::TType _etype510; - xfer += iprot->readListBegin(_etype510, _size507); - this->part_vals.resize(_size507); - uint32_t _i511; - for (_i511 = 0; _i511 < _size507; ++_i511) + uint32_t _size513; + ::apache::thrift::protocol::TType _etype516; + xfer += iprot->readListBegin(_etype516, _size513); + this->part_vals.resize(_size513); + uint32_t _i517; + for (_i517 = 0; _i517 < _size513; ++_i517) { - xfer += iprot->readString(this->part_vals[_i511]); + xfer += iprot->readString(this->part_vals[_i517]); } xfer += iprot->readListEnd(); } @@ -8005,10 +8005,10 @@ uint32_t ThriftHiveMetastore_drop_partition_with_environment_context_args::write xfer += oprot->writeFieldBegin("part_vals", ::apache::thrift::protocol::T_LIST, 3); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->part_vals.size())); - std::vector ::const_iterator _iter512; - for (_iter512 = this->part_vals.begin(); _iter512 != this->part_vals.end(); ++_iter512) + std::vector ::const_iterator _iter518; + for (_iter518 = this->part_vals.begin(); _iter518 != this->part_vals.end(); ++_iter518) { - xfer += oprot->writeString((*_iter512)); + xfer += oprot->writeString((*_iter518)); } xfer += oprot->writeListEnd(); } @@ -8042,10 +8042,10 @@ uint32_t ThriftHiveMetastore_drop_partition_with_environment_context_pargs::writ xfer += oprot->writeFieldBegin("part_vals", ::apache::thrift::protocol::T_LIST, 3); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast((*(this->part_vals)).size())); - std::vector ::const_iterator _iter513; - for (_iter513 = (*(this->part_vals)).begin(); _iter513 != (*(this->part_vals)).end(); ++_iter513) + std::vector ::const_iterator _iter519; + for (_iter519 = (*(this->part_vals)).begin(); _iter519 != (*(this->part_vals)).end(); ++_iter519) { - xfer += oprot->writeString((*_iter513)); + xfer += oprot->writeString((*_iter519)); } xfer += oprot->writeListEnd(); } @@ -8958,14 +8958,14 @@ uint32_t ThriftHiveMetastore_get_partition_args::read(::apache::thrift::protocol if (ftype == ::apache::thrift::protocol::T_LIST) { { this->part_vals.clear(); - uint32_t _size514; - ::apache::thrift::protocol::TType _etype517; - xfer += iprot->readListBegin(_etype517, _size514); - this->part_vals.resize(_size514); - uint32_t _i518; - for (_i518 = 0; _i518 < _size514; ++_i518) + uint32_t _size520; + ::apache::thrift::protocol::TType _etype523; + xfer += iprot->readListBegin(_etype523, _size520); + this->part_vals.resize(_size520); + uint32_t _i524; + for (_i524 = 0; _i524 < _size520; ++_i524) { - xfer += iprot->readString(this->part_vals[_i518]); + xfer += iprot->readString(this->part_vals[_i524]); } xfer += iprot->readListEnd(); } @@ -9001,10 +9001,10 @@ uint32_t ThriftHiveMetastore_get_partition_args::write(::apache::thrift::protoco xfer += oprot->writeFieldBegin("part_vals", ::apache::thrift::protocol::T_LIST, 3); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->part_vals.size())); - std::vector ::const_iterator _iter519; - for (_iter519 = this->part_vals.begin(); _iter519 != this->part_vals.end(); ++_iter519) + std::vector ::const_iterator _iter525; + for (_iter525 = this->part_vals.begin(); _iter525 != this->part_vals.end(); ++_iter525) { - xfer += oprot->writeString((*_iter519)); + xfer += oprot->writeString((*_iter525)); } xfer += oprot->writeListEnd(); } @@ -9030,10 +9030,10 @@ uint32_t ThriftHiveMetastore_get_partition_pargs::write(::apache::thrift::protoc xfer += oprot->writeFieldBegin("part_vals", ::apache::thrift::protocol::T_LIST, 3); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast((*(this->part_vals)).size())); - std::vector ::const_iterator _iter520; - for (_iter520 = (*(this->part_vals)).begin(); _iter520 != (*(this->part_vals)).end(); ++_iter520) + std::vector ::const_iterator _iter526; + for (_iter526 = (*(this->part_vals)).begin(); _iter526 != (*(this->part_vals)).end(); ++_iter526) { - xfer += oprot->writeString((*_iter520)); + xfer += oprot->writeString((*_iter526)); } xfer += oprot->writeListEnd(); } @@ -9204,17 +9204,17 @@ uint32_t ThriftHiveMetastore_exchange_partition_args::read(::apache::thrift::pro if (ftype == ::apache::thrift::protocol::T_MAP) { { this->partitionSpecs.clear(); - uint32_t _size521; - ::apache::thrift::protocol::TType _ktype522; - ::apache::thrift::protocol::TType _vtype523; - xfer += iprot->readMapBegin(_ktype522, _vtype523, _size521); - uint32_t _i525; - for (_i525 = 0; _i525 < _size521; ++_i525) + uint32_t _size527; + ::apache::thrift::protocol::TType _ktype528; + ::apache::thrift::protocol::TType _vtype529; + xfer += iprot->readMapBegin(_ktype528, _vtype529, _size527); + uint32_t _i531; + for (_i531 = 0; _i531 < _size527; ++_i531) { - std::string _key526; - xfer += iprot->readString(_key526); - std::string& _val527 = this->partitionSpecs[_key526]; - xfer += iprot->readString(_val527); + std::string _key532; + xfer += iprot->readString(_key532); + std::string& _val533 = this->partitionSpecs[_key532]; + xfer += iprot->readString(_val533); } xfer += iprot->readMapEnd(); } @@ -9274,11 +9274,11 @@ uint32_t ThriftHiveMetastore_exchange_partition_args::write(::apache::thrift::pr xfer += oprot->writeFieldBegin("partitionSpecs", ::apache::thrift::protocol::T_MAP, 1); { xfer += oprot->writeMapBegin(::apache::thrift::protocol::T_STRING, ::apache::thrift::protocol::T_STRING, static_cast(this->partitionSpecs.size())); - std::map ::const_iterator _iter528; - for (_iter528 = this->partitionSpecs.begin(); _iter528 != this->partitionSpecs.end(); ++_iter528) + std::map ::const_iterator _iter534; + for (_iter534 = this->partitionSpecs.begin(); _iter534 != this->partitionSpecs.end(); ++_iter534) { - xfer += oprot->writeString(_iter528->first); - xfer += oprot->writeString(_iter528->second); + xfer += oprot->writeString(_iter534->first); + xfer += oprot->writeString(_iter534->second); } xfer += oprot->writeMapEnd(); } @@ -9312,11 +9312,11 @@ uint32_t ThriftHiveMetastore_exchange_partition_pargs::write(::apache::thrift::p xfer += oprot->writeFieldBegin("partitionSpecs", ::apache::thrift::protocol::T_MAP, 1); { xfer += oprot->writeMapBegin(::apache::thrift::protocol::T_STRING, ::apache::thrift::protocol::T_STRING, static_cast((*(this->partitionSpecs)).size())); - std::map ::const_iterator _iter529; - for (_iter529 = (*(this->partitionSpecs)).begin(); _iter529 != (*(this->partitionSpecs)).end(); ++_iter529) + std::map ::const_iterator _iter535; + for (_iter535 = (*(this->partitionSpecs)).begin(); _iter535 != (*(this->partitionSpecs)).end(); ++_iter535) { - xfer += oprot->writeString(_iter529->first); - xfer += oprot->writeString(_iter529->second); + xfer += oprot->writeString(_iter535->first); + xfer += oprot->writeString(_iter535->second); } xfer += oprot->writeMapEnd(); } @@ -9559,14 +9559,14 @@ uint32_t ThriftHiveMetastore_get_partition_with_auth_args::read(::apache::thrift if (ftype == ::apache::thrift::protocol::T_LIST) { { this->part_vals.clear(); - uint32_t _size530; - ::apache::thrift::protocol::TType _etype533; - xfer += iprot->readListBegin(_etype533, _size530); - this->part_vals.resize(_size530); - uint32_t _i534; - for (_i534 = 0; _i534 < _size530; ++_i534) + uint32_t _size536; + ::apache::thrift::protocol::TType _etype539; + xfer += iprot->readListBegin(_etype539, _size536); + this->part_vals.resize(_size536); + uint32_t _i540; + for (_i540 = 0; _i540 < _size536; ++_i540) { - xfer += iprot->readString(this->part_vals[_i534]); + xfer += iprot->readString(this->part_vals[_i540]); } xfer += iprot->readListEnd(); } @@ -9587,14 +9587,14 @@ uint32_t ThriftHiveMetastore_get_partition_with_auth_args::read(::apache::thrift if (ftype == ::apache::thrift::protocol::T_LIST) { { this->group_names.clear(); - uint32_t _size535; - ::apache::thrift::protocol::TType _etype538; - xfer += iprot->readListBegin(_etype538, _size535); - this->group_names.resize(_size535); - uint32_t _i539; - for (_i539 = 0; _i539 < _size535; ++_i539) + uint32_t _size541; + ::apache::thrift::protocol::TType _etype544; + xfer += iprot->readListBegin(_etype544, _size541); + this->group_names.resize(_size541); + uint32_t _i545; + for (_i545 = 0; _i545 < _size541; ++_i545) { - xfer += iprot->readString(this->group_names[_i539]); + xfer += iprot->readString(this->group_names[_i545]); } xfer += iprot->readListEnd(); } @@ -9630,10 +9630,10 @@ uint32_t ThriftHiveMetastore_get_partition_with_auth_args::write(::apache::thrif xfer += oprot->writeFieldBegin("part_vals", ::apache::thrift::protocol::T_LIST, 3); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->part_vals.size())); - std::vector ::const_iterator _iter540; - for (_iter540 = this->part_vals.begin(); _iter540 != this->part_vals.end(); ++_iter540) + std::vector ::const_iterator _iter546; + for (_iter546 = this->part_vals.begin(); _iter546 != this->part_vals.end(); ++_iter546) { - xfer += oprot->writeString((*_iter540)); + xfer += oprot->writeString((*_iter546)); } xfer += oprot->writeListEnd(); } @@ -9646,10 +9646,10 @@ uint32_t ThriftHiveMetastore_get_partition_with_auth_args::write(::apache::thrif xfer += oprot->writeFieldBegin("group_names", ::apache::thrift::protocol::T_LIST, 5); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->group_names.size())); - std::vector ::const_iterator _iter541; - for (_iter541 = this->group_names.begin(); _iter541 != this->group_names.end(); ++_iter541) + std::vector ::const_iterator _iter547; + for (_iter547 = this->group_names.begin(); _iter547 != this->group_names.end(); ++_iter547) { - xfer += oprot->writeString((*_iter541)); + xfer += oprot->writeString((*_iter547)); } xfer += oprot->writeListEnd(); } @@ -9675,10 +9675,10 @@ uint32_t ThriftHiveMetastore_get_partition_with_auth_pargs::write(::apache::thri xfer += oprot->writeFieldBegin("part_vals", ::apache::thrift::protocol::T_LIST, 3); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast((*(this->part_vals)).size())); - std::vector ::const_iterator _iter542; - for (_iter542 = (*(this->part_vals)).begin(); _iter542 != (*(this->part_vals)).end(); ++_iter542) + std::vector ::const_iterator _iter548; + for (_iter548 = (*(this->part_vals)).begin(); _iter548 != (*(this->part_vals)).end(); ++_iter548) { - xfer += oprot->writeString((*_iter542)); + xfer += oprot->writeString((*_iter548)); } xfer += oprot->writeListEnd(); } @@ -9691,10 +9691,10 @@ uint32_t ThriftHiveMetastore_get_partition_with_auth_pargs::write(::apache::thri xfer += oprot->writeFieldBegin("group_names", ::apache::thrift::protocol::T_LIST, 5); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast((*(this->group_names)).size())); - std::vector ::const_iterator _iter543; - for (_iter543 = (*(this->group_names)).begin(); _iter543 != (*(this->group_names)).end(); ++_iter543) + std::vector ::const_iterator _iter549; + for (_iter549 = (*(this->group_names)).begin(); _iter549 != (*(this->group_names)).end(); ++_iter549) { - xfer += oprot->writeString((*_iter543)); + xfer += oprot->writeString((*_iter549)); } xfer += oprot->writeListEnd(); } @@ -10197,14 +10197,14 @@ uint32_t ThriftHiveMetastore_get_partitions_result::read(::apache::thrift::proto if (ftype == ::apache::thrift::protocol::T_LIST) { { this->success.clear(); - uint32_t _size544; - ::apache::thrift::protocol::TType _etype547; - xfer += iprot->readListBegin(_etype547, _size544); - this->success.resize(_size544); - uint32_t _i548; - for (_i548 = 0; _i548 < _size544; ++_i548) + uint32_t _size550; + ::apache::thrift::protocol::TType _etype553; + xfer += iprot->readListBegin(_etype553, _size550); + this->success.resize(_size550); + uint32_t _i554; + for (_i554 = 0; _i554 < _size550; ++_i554) { - xfer += this->success[_i548].read(iprot); + xfer += this->success[_i554].read(iprot); } xfer += iprot->readListEnd(); } @@ -10251,10 +10251,10 @@ uint32_t ThriftHiveMetastore_get_partitions_result::write(::apache::thrift::prot xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->success.size())); - std::vector ::const_iterator _iter549; - for (_iter549 = this->success.begin(); _iter549 != this->success.end(); ++_iter549) + std::vector ::const_iterator _iter555; + for (_iter555 = this->success.begin(); _iter555 != this->success.end(); ++_iter555) { - xfer += (*_iter549).write(oprot); + xfer += (*_iter555).write(oprot); } xfer += oprot->writeListEnd(); } @@ -10297,14 +10297,14 @@ uint32_t ThriftHiveMetastore_get_partitions_presult::read(::apache::thrift::prot if (ftype == ::apache::thrift::protocol::T_LIST) { { (*(this->success)).clear(); - uint32_t _size550; - ::apache::thrift::protocol::TType _etype553; - xfer += iprot->readListBegin(_etype553, _size550); - (*(this->success)).resize(_size550); - uint32_t _i554; - for (_i554 = 0; _i554 < _size550; ++_i554) + uint32_t _size556; + ::apache::thrift::protocol::TType _etype559; + xfer += iprot->readListBegin(_etype559, _size556); + (*(this->success)).resize(_size556); + uint32_t _i560; + for (_i560 = 0; _i560 < _size556; ++_i560) { - xfer += (*(this->success))[_i554].read(iprot); + xfer += (*(this->success))[_i560].read(iprot); } xfer += iprot->readListEnd(); } @@ -10397,14 +10397,14 @@ uint32_t ThriftHiveMetastore_get_partitions_with_auth_args::read(::apache::thrif if (ftype == ::apache::thrift::protocol::T_LIST) { { this->group_names.clear(); - uint32_t _size555; - ::apache::thrift::protocol::TType _etype558; - xfer += iprot->readListBegin(_etype558, _size555); - this->group_names.resize(_size555); - uint32_t _i559; - for (_i559 = 0; _i559 < _size555; ++_i559) + uint32_t _size561; + ::apache::thrift::protocol::TType _etype564; + xfer += iprot->readListBegin(_etype564, _size561); + this->group_names.resize(_size561); + uint32_t _i565; + for (_i565 = 0; _i565 < _size561; ++_i565) { - xfer += iprot->readString(this->group_names[_i559]); + xfer += iprot->readString(this->group_names[_i565]); } xfer += iprot->readListEnd(); } @@ -10448,10 +10448,10 @@ uint32_t ThriftHiveMetastore_get_partitions_with_auth_args::write(::apache::thri xfer += oprot->writeFieldBegin("group_names", ::apache::thrift::protocol::T_LIST, 5); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->group_names.size())); - std::vector ::const_iterator _iter560; - for (_iter560 = this->group_names.begin(); _iter560 != this->group_names.end(); ++_iter560) + std::vector ::const_iterator _iter566; + for (_iter566 = this->group_names.begin(); _iter566 != this->group_names.end(); ++_iter566) { - xfer += oprot->writeString((*_iter560)); + xfer += oprot->writeString((*_iter566)); } xfer += oprot->writeListEnd(); } @@ -10485,10 +10485,10 @@ uint32_t ThriftHiveMetastore_get_partitions_with_auth_pargs::write(::apache::thr xfer += oprot->writeFieldBegin("group_names", ::apache::thrift::protocol::T_LIST, 5); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast((*(this->group_names)).size())); - std::vector ::const_iterator _iter561; - for (_iter561 = (*(this->group_names)).begin(); _iter561 != (*(this->group_names)).end(); ++_iter561) + std::vector ::const_iterator _iter567; + for (_iter567 = (*(this->group_names)).begin(); _iter567 != (*(this->group_names)).end(); ++_iter567) { - xfer += oprot->writeString((*_iter561)); + xfer += oprot->writeString((*_iter567)); } xfer += oprot->writeListEnd(); } @@ -10523,14 +10523,14 @@ uint32_t ThriftHiveMetastore_get_partitions_with_auth_result::read(::apache::thr if (ftype == ::apache::thrift::protocol::T_LIST) { { this->success.clear(); - uint32_t _size562; - ::apache::thrift::protocol::TType _etype565; - xfer += iprot->readListBegin(_etype565, _size562); - this->success.resize(_size562); - uint32_t _i566; - for (_i566 = 0; _i566 < _size562; ++_i566) + uint32_t _size568; + ::apache::thrift::protocol::TType _etype571; + xfer += iprot->readListBegin(_etype571, _size568); + this->success.resize(_size568); + uint32_t _i572; + for (_i572 = 0; _i572 < _size568; ++_i572) { - xfer += this->success[_i566].read(iprot); + xfer += this->success[_i572].read(iprot); } xfer += iprot->readListEnd(); } @@ -10577,10 +10577,10 @@ uint32_t ThriftHiveMetastore_get_partitions_with_auth_result::write(::apache::th xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->success.size())); - std::vector ::const_iterator _iter567; - for (_iter567 = this->success.begin(); _iter567 != this->success.end(); ++_iter567) + std::vector ::const_iterator _iter573; + for (_iter573 = this->success.begin(); _iter573 != this->success.end(); ++_iter573) { - xfer += (*_iter567).write(oprot); + xfer += (*_iter573).write(oprot); } xfer += oprot->writeListEnd(); } @@ -10623,14 +10623,14 @@ uint32_t ThriftHiveMetastore_get_partitions_with_auth_presult::read(::apache::th if (ftype == ::apache::thrift::protocol::T_LIST) { { (*(this->success)).clear(); - uint32_t _size568; - ::apache::thrift::protocol::TType _etype571; - xfer += iprot->readListBegin(_etype571, _size568); - (*(this->success)).resize(_size568); - uint32_t _i572; - for (_i572 = 0; _i572 < _size568; ++_i572) + uint32_t _size574; + ::apache::thrift::protocol::TType _etype577; + xfer += iprot->readListBegin(_etype577, _size574); + (*(this->success)).resize(_size574); + uint32_t _i578; + for (_i578 = 0; _i578 < _size574; ++_i578) { - xfer += (*(this->success))[_i572].read(iprot); + xfer += (*(this->success))[_i578].read(iprot); } xfer += iprot->readListEnd(); } @@ -10789,14 +10789,14 @@ uint32_t ThriftHiveMetastore_get_partition_names_result::read(::apache::thrift:: if (ftype == ::apache::thrift::protocol::T_LIST) { { this->success.clear(); - uint32_t _size573; - ::apache::thrift::protocol::TType _etype576; - xfer += iprot->readListBegin(_etype576, _size573); - this->success.resize(_size573); - uint32_t _i577; - for (_i577 = 0; _i577 < _size573; ++_i577) + uint32_t _size579; + ::apache::thrift::protocol::TType _etype582; + xfer += iprot->readListBegin(_etype582, _size579); + this->success.resize(_size579); + uint32_t _i583; + for (_i583 = 0; _i583 < _size579; ++_i583) { - xfer += iprot->readString(this->success[_i577]); + xfer += iprot->readString(this->success[_i583]); } xfer += iprot->readListEnd(); } @@ -10835,10 +10835,10 @@ uint32_t ThriftHiveMetastore_get_partition_names_result::write(::apache::thrift: xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->success.size())); - std::vector ::const_iterator _iter578; - for (_iter578 = this->success.begin(); _iter578 != this->success.end(); ++_iter578) + std::vector ::const_iterator _iter584; + for (_iter584 = this->success.begin(); _iter584 != this->success.end(); ++_iter584) { - xfer += oprot->writeString((*_iter578)); + xfer += oprot->writeString((*_iter584)); } xfer += oprot->writeListEnd(); } @@ -10877,14 +10877,14 @@ uint32_t ThriftHiveMetastore_get_partition_names_presult::read(::apache::thrift: if (ftype == ::apache::thrift::protocol::T_LIST) { { (*(this->success)).clear(); - uint32_t _size579; - ::apache::thrift::protocol::TType _etype582; - xfer += iprot->readListBegin(_etype582, _size579); - (*(this->success)).resize(_size579); - uint32_t _i583; - for (_i583 = 0; _i583 < _size579; ++_i583) + uint32_t _size585; + ::apache::thrift::protocol::TType _etype588; + xfer += iprot->readListBegin(_etype588, _size585); + (*(this->success)).resize(_size585); + uint32_t _i589; + for (_i589 = 0; _i589 < _size585; ++_i589) { - xfer += iprot->readString((*(this->success))[_i583]); + xfer += iprot->readString((*(this->success))[_i589]); } xfer += iprot->readListEnd(); } @@ -10953,14 +10953,14 @@ uint32_t ThriftHiveMetastore_get_partitions_ps_args::read(::apache::thrift::prot if (ftype == ::apache::thrift::protocol::T_LIST) { { this->part_vals.clear(); - uint32_t _size584; - ::apache::thrift::protocol::TType _etype587; - xfer += iprot->readListBegin(_etype587, _size584); - this->part_vals.resize(_size584); - uint32_t _i588; - for (_i588 = 0; _i588 < _size584; ++_i588) + uint32_t _size590; + ::apache::thrift::protocol::TType _etype593; + xfer += iprot->readListBegin(_etype593, _size590); + this->part_vals.resize(_size590); + uint32_t _i594; + for (_i594 = 0; _i594 < _size590; ++_i594) { - xfer += iprot->readString(this->part_vals[_i588]); + xfer += iprot->readString(this->part_vals[_i594]); } xfer += iprot->readListEnd(); } @@ -11004,10 +11004,10 @@ uint32_t ThriftHiveMetastore_get_partitions_ps_args::write(::apache::thrift::pro xfer += oprot->writeFieldBegin("part_vals", ::apache::thrift::protocol::T_LIST, 3); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->part_vals.size())); - std::vector ::const_iterator _iter589; - for (_iter589 = this->part_vals.begin(); _iter589 != this->part_vals.end(); ++_iter589) + std::vector ::const_iterator _iter595; + for (_iter595 = this->part_vals.begin(); _iter595 != this->part_vals.end(); ++_iter595) { - xfer += oprot->writeString((*_iter589)); + xfer += oprot->writeString((*_iter595)); } xfer += oprot->writeListEnd(); } @@ -11037,10 +11037,10 @@ uint32_t ThriftHiveMetastore_get_partitions_ps_pargs::write(::apache::thrift::pr xfer += oprot->writeFieldBegin("part_vals", ::apache::thrift::protocol::T_LIST, 3); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast((*(this->part_vals)).size())); - std::vector ::const_iterator _iter590; - for (_iter590 = (*(this->part_vals)).begin(); _iter590 != (*(this->part_vals)).end(); ++_iter590) + std::vector ::const_iterator _iter596; + for (_iter596 = (*(this->part_vals)).begin(); _iter596 != (*(this->part_vals)).end(); ++_iter596) { - xfer += oprot->writeString((*_iter590)); + xfer += oprot->writeString((*_iter596)); } xfer += oprot->writeListEnd(); } @@ -11079,14 +11079,14 @@ uint32_t ThriftHiveMetastore_get_partitions_ps_result::read(::apache::thrift::pr if (ftype == ::apache::thrift::protocol::T_LIST) { { this->success.clear(); - uint32_t _size591; - ::apache::thrift::protocol::TType _etype594; - xfer += iprot->readListBegin(_etype594, _size591); - this->success.resize(_size591); - uint32_t _i595; - for (_i595 = 0; _i595 < _size591; ++_i595) + uint32_t _size597; + ::apache::thrift::protocol::TType _etype600; + xfer += iprot->readListBegin(_etype600, _size597); + this->success.resize(_size597); + uint32_t _i601; + for (_i601 = 0; _i601 < _size597; ++_i601) { - xfer += this->success[_i595].read(iprot); + xfer += this->success[_i601].read(iprot); } xfer += iprot->readListEnd(); } @@ -11133,10 +11133,10 @@ uint32_t ThriftHiveMetastore_get_partitions_ps_result::write(::apache::thrift::p xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->success.size())); - std::vector ::const_iterator _iter596; - for (_iter596 = this->success.begin(); _iter596 != this->success.end(); ++_iter596) + std::vector ::const_iterator _iter602; + for (_iter602 = this->success.begin(); _iter602 != this->success.end(); ++_iter602) { - xfer += (*_iter596).write(oprot); + xfer += (*_iter602).write(oprot); } xfer += oprot->writeListEnd(); } @@ -11179,14 +11179,14 @@ uint32_t ThriftHiveMetastore_get_partitions_ps_presult::read(::apache::thrift::p if (ftype == ::apache::thrift::protocol::T_LIST) { { (*(this->success)).clear(); - uint32_t _size597; - ::apache::thrift::protocol::TType _etype600; - xfer += iprot->readListBegin(_etype600, _size597); - (*(this->success)).resize(_size597); - uint32_t _i601; - for (_i601 = 0; _i601 < _size597; ++_i601) + uint32_t _size603; + ::apache::thrift::protocol::TType _etype606; + xfer += iprot->readListBegin(_etype606, _size603); + (*(this->success)).resize(_size603); + uint32_t _i607; + for (_i607 = 0; _i607 < _size603; ++_i607) { - xfer += (*(this->success))[_i601].read(iprot); + xfer += (*(this->success))[_i607].read(iprot); } xfer += iprot->readListEnd(); } @@ -11263,14 +11263,14 @@ uint32_t ThriftHiveMetastore_get_partitions_ps_with_auth_args::read(::apache::th if (ftype == ::apache::thrift::protocol::T_LIST) { { this->part_vals.clear(); - uint32_t _size602; - ::apache::thrift::protocol::TType _etype605; - xfer += iprot->readListBegin(_etype605, _size602); - this->part_vals.resize(_size602); - uint32_t _i606; - for (_i606 = 0; _i606 < _size602; ++_i606) + uint32_t _size608; + ::apache::thrift::protocol::TType _etype611; + xfer += iprot->readListBegin(_etype611, _size608); + this->part_vals.resize(_size608); + uint32_t _i612; + for (_i612 = 0; _i612 < _size608; ++_i612) { - xfer += iprot->readString(this->part_vals[_i606]); + xfer += iprot->readString(this->part_vals[_i612]); } xfer += iprot->readListEnd(); } @@ -11299,14 +11299,14 @@ uint32_t ThriftHiveMetastore_get_partitions_ps_with_auth_args::read(::apache::th if (ftype == ::apache::thrift::protocol::T_LIST) { { this->group_names.clear(); - uint32_t _size607; - ::apache::thrift::protocol::TType _etype610; - xfer += iprot->readListBegin(_etype610, _size607); - this->group_names.resize(_size607); - uint32_t _i611; - for (_i611 = 0; _i611 < _size607; ++_i611) + uint32_t _size613; + ::apache::thrift::protocol::TType _etype616; + xfer += iprot->readListBegin(_etype616, _size613); + this->group_names.resize(_size613); + uint32_t _i617; + for (_i617 = 0; _i617 < _size613; ++_i617) { - xfer += iprot->readString(this->group_names[_i611]); + xfer += iprot->readString(this->group_names[_i617]); } xfer += iprot->readListEnd(); } @@ -11342,10 +11342,10 @@ uint32_t ThriftHiveMetastore_get_partitions_ps_with_auth_args::write(::apache::t xfer += oprot->writeFieldBegin("part_vals", ::apache::thrift::protocol::T_LIST, 3); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->part_vals.size())); - std::vector ::const_iterator _iter612; - for (_iter612 = this->part_vals.begin(); _iter612 != this->part_vals.end(); ++_iter612) + std::vector ::const_iterator _iter618; + for (_iter618 = this->part_vals.begin(); _iter618 != this->part_vals.end(); ++_iter618) { - xfer += oprot->writeString((*_iter612)); + xfer += oprot->writeString((*_iter618)); } xfer += oprot->writeListEnd(); } @@ -11362,10 +11362,10 @@ uint32_t ThriftHiveMetastore_get_partitions_ps_with_auth_args::write(::apache::t xfer += oprot->writeFieldBegin("group_names", ::apache::thrift::protocol::T_LIST, 6); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->group_names.size())); - std::vector ::const_iterator _iter613; - for (_iter613 = this->group_names.begin(); _iter613 != this->group_names.end(); ++_iter613) + std::vector ::const_iterator _iter619; + for (_iter619 = this->group_names.begin(); _iter619 != this->group_names.end(); ++_iter619) { - xfer += oprot->writeString((*_iter613)); + xfer += oprot->writeString((*_iter619)); } xfer += oprot->writeListEnd(); } @@ -11391,10 +11391,10 @@ uint32_t ThriftHiveMetastore_get_partitions_ps_with_auth_pargs::write(::apache:: xfer += oprot->writeFieldBegin("part_vals", ::apache::thrift::protocol::T_LIST, 3); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast((*(this->part_vals)).size())); - std::vector ::const_iterator _iter614; - for (_iter614 = (*(this->part_vals)).begin(); _iter614 != (*(this->part_vals)).end(); ++_iter614) + std::vector ::const_iterator _iter620; + for (_iter620 = (*(this->part_vals)).begin(); _iter620 != (*(this->part_vals)).end(); ++_iter620) { - xfer += oprot->writeString((*_iter614)); + xfer += oprot->writeString((*_iter620)); } xfer += oprot->writeListEnd(); } @@ -11411,10 +11411,10 @@ uint32_t ThriftHiveMetastore_get_partitions_ps_with_auth_pargs::write(::apache:: xfer += oprot->writeFieldBegin("group_names", ::apache::thrift::protocol::T_LIST, 6); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast((*(this->group_names)).size())); - std::vector ::const_iterator _iter615; - for (_iter615 = (*(this->group_names)).begin(); _iter615 != (*(this->group_names)).end(); ++_iter615) + std::vector ::const_iterator _iter621; + for (_iter621 = (*(this->group_names)).begin(); _iter621 != (*(this->group_names)).end(); ++_iter621) { - xfer += oprot->writeString((*_iter615)); + xfer += oprot->writeString((*_iter621)); } xfer += oprot->writeListEnd(); } @@ -11449,14 +11449,14 @@ uint32_t ThriftHiveMetastore_get_partitions_ps_with_auth_result::read(::apache:: if (ftype == ::apache::thrift::protocol::T_LIST) { { this->success.clear(); - uint32_t _size616; - ::apache::thrift::protocol::TType _etype619; - xfer += iprot->readListBegin(_etype619, _size616); - this->success.resize(_size616); - uint32_t _i620; - for (_i620 = 0; _i620 < _size616; ++_i620) + uint32_t _size622; + ::apache::thrift::protocol::TType _etype625; + xfer += iprot->readListBegin(_etype625, _size622); + this->success.resize(_size622); + uint32_t _i626; + for (_i626 = 0; _i626 < _size622; ++_i626) { - xfer += this->success[_i620].read(iprot); + xfer += this->success[_i626].read(iprot); } xfer += iprot->readListEnd(); } @@ -11503,10 +11503,10 @@ uint32_t ThriftHiveMetastore_get_partitions_ps_with_auth_result::write(::apache: xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->success.size())); - std::vector ::const_iterator _iter621; - for (_iter621 = this->success.begin(); _iter621 != this->success.end(); ++_iter621) + std::vector ::const_iterator _iter627; + for (_iter627 = this->success.begin(); _iter627 != this->success.end(); ++_iter627) { - xfer += (*_iter621).write(oprot); + xfer += (*_iter627).write(oprot); } xfer += oprot->writeListEnd(); } @@ -11549,14 +11549,14 @@ uint32_t ThriftHiveMetastore_get_partitions_ps_with_auth_presult::read(::apache: if (ftype == ::apache::thrift::protocol::T_LIST) { { (*(this->success)).clear(); - uint32_t _size622; - ::apache::thrift::protocol::TType _etype625; - xfer += iprot->readListBegin(_etype625, _size622); - (*(this->success)).resize(_size622); - uint32_t _i626; - for (_i626 = 0; _i626 < _size622; ++_i626) + uint32_t _size628; + ::apache::thrift::protocol::TType _etype631; + xfer += iprot->readListBegin(_etype631, _size628); + (*(this->success)).resize(_size628); + uint32_t _i632; + for (_i632 = 0; _i632 < _size628; ++_i632) { - xfer += (*(this->success))[_i626].read(iprot); + xfer += (*(this->success))[_i632].read(iprot); } xfer += iprot->readListEnd(); } @@ -11633,14 +11633,14 @@ uint32_t ThriftHiveMetastore_get_partition_names_ps_args::read(::apache::thrift: if (ftype == ::apache::thrift::protocol::T_LIST) { { this->part_vals.clear(); - uint32_t _size627; - ::apache::thrift::protocol::TType _etype630; - xfer += iprot->readListBegin(_etype630, _size627); - this->part_vals.resize(_size627); - uint32_t _i631; - for (_i631 = 0; _i631 < _size627; ++_i631) + uint32_t _size633; + ::apache::thrift::protocol::TType _etype636; + xfer += iprot->readListBegin(_etype636, _size633); + this->part_vals.resize(_size633); + uint32_t _i637; + for (_i637 = 0; _i637 < _size633; ++_i637) { - xfer += iprot->readString(this->part_vals[_i631]); + xfer += iprot->readString(this->part_vals[_i637]); } xfer += iprot->readListEnd(); } @@ -11684,10 +11684,10 @@ uint32_t ThriftHiveMetastore_get_partition_names_ps_args::write(::apache::thrift xfer += oprot->writeFieldBegin("part_vals", ::apache::thrift::protocol::T_LIST, 3); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->part_vals.size())); - std::vector ::const_iterator _iter632; - for (_iter632 = this->part_vals.begin(); _iter632 != this->part_vals.end(); ++_iter632) + std::vector ::const_iterator _iter638; + for (_iter638 = this->part_vals.begin(); _iter638 != this->part_vals.end(); ++_iter638) { - xfer += oprot->writeString((*_iter632)); + xfer += oprot->writeString((*_iter638)); } xfer += oprot->writeListEnd(); } @@ -11717,10 +11717,10 @@ uint32_t ThriftHiveMetastore_get_partition_names_ps_pargs::write(::apache::thrif xfer += oprot->writeFieldBegin("part_vals", ::apache::thrift::protocol::T_LIST, 3); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast((*(this->part_vals)).size())); - std::vector ::const_iterator _iter633; - for (_iter633 = (*(this->part_vals)).begin(); _iter633 != (*(this->part_vals)).end(); ++_iter633) + std::vector ::const_iterator _iter639; + for (_iter639 = (*(this->part_vals)).begin(); _iter639 != (*(this->part_vals)).end(); ++_iter639) { - xfer += oprot->writeString((*_iter633)); + xfer += oprot->writeString((*_iter639)); } xfer += oprot->writeListEnd(); } @@ -11759,14 +11759,14 @@ uint32_t ThriftHiveMetastore_get_partition_names_ps_result::read(::apache::thrif if (ftype == ::apache::thrift::protocol::T_LIST) { { this->success.clear(); - uint32_t _size634; - ::apache::thrift::protocol::TType _etype637; - xfer += iprot->readListBegin(_etype637, _size634); - this->success.resize(_size634); - uint32_t _i638; - for (_i638 = 0; _i638 < _size634; ++_i638) + uint32_t _size640; + ::apache::thrift::protocol::TType _etype643; + xfer += iprot->readListBegin(_etype643, _size640); + this->success.resize(_size640); + uint32_t _i644; + for (_i644 = 0; _i644 < _size640; ++_i644) { - xfer += iprot->readString(this->success[_i638]); + xfer += iprot->readString(this->success[_i644]); } xfer += iprot->readListEnd(); } @@ -11813,10 +11813,10 @@ uint32_t ThriftHiveMetastore_get_partition_names_ps_result::write(::apache::thri xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->success.size())); - std::vector ::const_iterator _iter639; - for (_iter639 = this->success.begin(); _iter639 != this->success.end(); ++_iter639) + std::vector ::const_iterator _iter645; + for (_iter645 = this->success.begin(); _iter645 != this->success.end(); ++_iter645) { - xfer += oprot->writeString((*_iter639)); + xfer += oprot->writeString((*_iter645)); } xfer += oprot->writeListEnd(); } @@ -11859,14 +11859,14 @@ uint32_t ThriftHiveMetastore_get_partition_names_ps_presult::read(::apache::thri if (ftype == ::apache::thrift::protocol::T_LIST) { { (*(this->success)).clear(); - uint32_t _size640; - ::apache::thrift::protocol::TType _etype643; - xfer += iprot->readListBegin(_etype643, _size640); - (*(this->success)).resize(_size640); - uint32_t _i644; - for (_i644 = 0; _i644 < _size640; ++_i644) + uint32_t _size646; + ::apache::thrift::protocol::TType _etype649; + xfer += iprot->readListBegin(_etype649, _size646); + (*(this->success)).resize(_size646); + uint32_t _i650; + for (_i650 = 0; _i650 < _size646; ++_i650) { - xfer += iprot->readString((*(this->success))[_i644]); + xfer += iprot->readString((*(this->success))[_i650]); } xfer += iprot->readListEnd(); } @@ -12041,14 +12041,14 @@ uint32_t ThriftHiveMetastore_get_partitions_by_filter_result::read(::apache::thr if (ftype == ::apache::thrift::protocol::T_LIST) { { this->success.clear(); - uint32_t _size645; - ::apache::thrift::protocol::TType _etype648; - xfer += iprot->readListBegin(_etype648, _size645); - this->success.resize(_size645); - uint32_t _i649; - for (_i649 = 0; _i649 < _size645; ++_i649) + uint32_t _size651; + ::apache::thrift::protocol::TType _etype654; + xfer += iprot->readListBegin(_etype654, _size651); + this->success.resize(_size651); + uint32_t _i655; + for (_i655 = 0; _i655 < _size651; ++_i655) { - xfer += this->success[_i649].read(iprot); + xfer += this->success[_i655].read(iprot); } xfer += iprot->readListEnd(); } @@ -12095,10 +12095,10 @@ uint32_t ThriftHiveMetastore_get_partitions_by_filter_result::write(::apache::th xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->success.size())); - std::vector ::const_iterator _iter650; - for (_iter650 = this->success.begin(); _iter650 != this->success.end(); ++_iter650) + std::vector ::const_iterator _iter656; + for (_iter656 = this->success.begin(); _iter656 != this->success.end(); ++_iter656) { - xfer += (*_iter650).write(oprot); + xfer += (*_iter656).write(oprot); } xfer += oprot->writeListEnd(); } @@ -12141,14 +12141,14 @@ uint32_t ThriftHiveMetastore_get_partitions_by_filter_presult::read(::apache::th if (ftype == ::apache::thrift::protocol::T_LIST) { { (*(this->success)).clear(); - uint32_t _size651; - ::apache::thrift::protocol::TType _etype654; - xfer += iprot->readListBegin(_etype654, _size651); - (*(this->success)).resize(_size651); - uint32_t _i655; - for (_i655 = 0; _i655 < _size651; ++_i655) + uint32_t _size657; + ::apache::thrift::protocol::TType _etype660; + xfer += iprot->readListBegin(_etype660, _size657); + (*(this->success)).resize(_size657); + uint32_t _i661; + for (_i661 = 0; _i661 < _size657; ++_i661) { - xfer += (*(this->success))[_i655].read(iprot); + xfer += (*(this->success))[_i661].read(iprot); } xfer += iprot->readListEnd(); } @@ -12427,14 +12427,14 @@ uint32_t ThriftHiveMetastore_get_partitions_by_names_args::read(::apache::thrift if (ftype == ::apache::thrift::protocol::T_LIST) { { this->names.clear(); - uint32_t _size656; - ::apache::thrift::protocol::TType _etype659; - xfer += iprot->readListBegin(_etype659, _size656); - this->names.resize(_size656); - uint32_t _i660; - for (_i660 = 0; _i660 < _size656; ++_i660) + uint32_t _size662; + ::apache::thrift::protocol::TType _etype665; + xfer += iprot->readListBegin(_etype665, _size662); + this->names.resize(_size662); + uint32_t _i666; + for (_i666 = 0; _i666 < _size662; ++_i666) { - xfer += iprot->readString(this->names[_i660]); + xfer += iprot->readString(this->names[_i666]); } xfer += iprot->readListEnd(); } @@ -12470,10 +12470,10 @@ uint32_t ThriftHiveMetastore_get_partitions_by_names_args::write(::apache::thrif xfer += oprot->writeFieldBegin("names", ::apache::thrift::protocol::T_LIST, 3); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->names.size())); - std::vector ::const_iterator _iter661; - for (_iter661 = this->names.begin(); _iter661 != this->names.end(); ++_iter661) + std::vector ::const_iterator _iter667; + for (_iter667 = this->names.begin(); _iter667 != this->names.end(); ++_iter667) { - xfer += oprot->writeString((*_iter661)); + xfer += oprot->writeString((*_iter667)); } xfer += oprot->writeListEnd(); } @@ -12499,10 +12499,10 @@ uint32_t ThriftHiveMetastore_get_partitions_by_names_pargs::write(::apache::thri xfer += oprot->writeFieldBegin("names", ::apache::thrift::protocol::T_LIST, 3); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast((*(this->names)).size())); - std::vector ::const_iterator _iter662; - for (_iter662 = (*(this->names)).begin(); _iter662 != (*(this->names)).end(); ++_iter662) + std::vector ::const_iterator _iter668; + for (_iter668 = (*(this->names)).begin(); _iter668 != (*(this->names)).end(); ++_iter668) { - xfer += oprot->writeString((*_iter662)); + xfer += oprot->writeString((*_iter668)); } xfer += oprot->writeListEnd(); } @@ -12537,14 +12537,14 @@ uint32_t ThriftHiveMetastore_get_partitions_by_names_result::read(::apache::thri if (ftype == ::apache::thrift::protocol::T_LIST) { { this->success.clear(); - uint32_t _size663; - ::apache::thrift::protocol::TType _etype666; - xfer += iprot->readListBegin(_etype666, _size663); - this->success.resize(_size663); - uint32_t _i667; - for (_i667 = 0; _i667 < _size663; ++_i667) + uint32_t _size669; + ::apache::thrift::protocol::TType _etype672; + xfer += iprot->readListBegin(_etype672, _size669); + this->success.resize(_size669); + uint32_t _i673; + for (_i673 = 0; _i673 < _size669; ++_i673) { - xfer += this->success[_i667].read(iprot); + xfer += this->success[_i673].read(iprot); } xfer += iprot->readListEnd(); } @@ -12591,10 +12591,10 @@ uint32_t ThriftHiveMetastore_get_partitions_by_names_result::write(::apache::thr xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->success.size())); - std::vector ::const_iterator _iter668; - for (_iter668 = this->success.begin(); _iter668 != this->success.end(); ++_iter668) + std::vector ::const_iterator _iter674; + for (_iter674 = this->success.begin(); _iter674 != this->success.end(); ++_iter674) { - xfer += (*_iter668).write(oprot); + xfer += (*_iter674).write(oprot); } xfer += oprot->writeListEnd(); } @@ -12637,14 +12637,14 @@ uint32_t ThriftHiveMetastore_get_partitions_by_names_presult::read(::apache::thr if (ftype == ::apache::thrift::protocol::T_LIST) { { (*(this->success)).clear(); - uint32_t _size669; - ::apache::thrift::protocol::TType _etype672; - xfer += iprot->readListBegin(_etype672, _size669); - (*(this->success)).resize(_size669); - uint32_t _i673; - for (_i673 = 0; _i673 < _size669; ++_i673) + uint32_t _size675; + ::apache::thrift::protocol::TType _etype678; + xfer += iprot->readListBegin(_etype678, _size675); + (*(this->success)).resize(_size675); + uint32_t _i679; + for (_i679 = 0; _i679 < _size675; ++_i679) { - xfer += (*(this->success))[_i673].read(iprot); + xfer += (*(this->success))[_i679].read(iprot); } xfer += iprot->readListEnd(); } @@ -12935,14 +12935,14 @@ uint32_t ThriftHiveMetastore_alter_partitions_args::read(::apache::thrift::proto if (ftype == ::apache::thrift::protocol::T_LIST) { { this->new_parts.clear(); - uint32_t _size674; - ::apache::thrift::protocol::TType _etype677; - xfer += iprot->readListBegin(_etype677, _size674); - this->new_parts.resize(_size674); - uint32_t _i678; - for (_i678 = 0; _i678 < _size674; ++_i678) + uint32_t _size680; + ::apache::thrift::protocol::TType _etype683; + xfer += iprot->readListBegin(_etype683, _size680); + this->new_parts.resize(_size680); + uint32_t _i684; + for (_i684 = 0; _i684 < _size680; ++_i684) { - xfer += this->new_parts[_i678].read(iprot); + xfer += this->new_parts[_i684].read(iprot); } xfer += iprot->readListEnd(); } @@ -12978,10 +12978,10 @@ uint32_t ThriftHiveMetastore_alter_partitions_args::write(::apache::thrift::prot xfer += oprot->writeFieldBegin("new_parts", ::apache::thrift::protocol::T_LIST, 3); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->new_parts.size())); - std::vector ::const_iterator _iter679; - for (_iter679 = this->new_parts.begin(); _iter679 != this->new_parts.end(); ++_iter679) + std::vector ::const_iterator _iter685; + for (_iter685 = this->new_parts.begin(); _iter685 != this->new_parts.end(); ++_iter685) { - xfer += (*_iter679).write(oprot); + xfer += (*_iter685).write(oprot); } xfer += oprot->writeListEnd(); } @@ -13007,10 +13007,10 @@ uint32_t ThriftHiveMetastore_alter_partitions_pargs::write(::apache::thrift::pro xfer += oprot->writeFieldBegin("new_parts", ::apache::thrift::protocol::T_LIST, 3); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast((*(this->new_parts)).size())); - std::vector ::const_iterator _iter680; - for (_iter680 = (*(this->new_parts)).begin(); _iter680 != (*(this->new_parts)).end(); ++_iter680) + std::vector ::const_iterator _iter686; + for (_iter686 = (*(this->new_parts)).begin(); _iter686 != (*(this->new_parts)).end(); ++_iter686) { - xfer += (*_iter680).write(oprot); + xfer += (*_iter686).write(oprot); } xfer += oprot->writeListEnd(); } @@ -13407,14 +13407,14 @@ uint32_t ThriftHiveMetastore_rename_partition_args::read(::apache::thrift::proto if (ftype == ::apache::thrift::protocol::T_LIST) { { this->part_vals.clear(); - uint32_t _size681; - ::apache::thrift::protocol::TType _etype684; - xfer += iprot->readListBegin(_etype684, _size681); - this->part_vals.resize(_size681); - uint32_t _i685; - for (_i685 = 0; _i685 < _size681; ++_i685) + uint32_t _size687; + ::apache::thrift::protocol::TType _etype690; + xfer += iprot->readListBegin(_etype690, _size687); + this->part_vals.resize(_size687); + uint32_t _i691; + for (_i691 = 0; _i691 < _size687; ++_i691) { - xfer += iprot->readString(this->part_vals[_i685]); + xfer += iprot->readString(this->part_vals[_i691]); } xfer += iprot->readListEnd(); } @@ -13458,10 +13458,10 @@ uint32_t ThriftHiveMetastore_rename_partition_args::write(::apache::thrift::prot xfer += oprot->writeFieldBegin("part_vals", ::apache::thrift::protocol::T_LIST, 3); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->part_vals.size())); - std::vector ::const_iterator _iter686; - for (_iter686 = this->part_vals.begin(); _iter686 != this->part_vals.end(); ++_iter686) + std::vector ::const_iterator _iter692; + for (_iter692 = this->part_vals.begin(); _iter692 != this->part_vals.end(); ++_iter692) { - xfer += oprot->writeString((*_iter686)); + xfer += oprot->writeString((*_iter692)); } xfer += oprot->writeListEnd(); } @@ -13491,10 +13491,10 @@ uint32_t ThriftHiveMetastore_rename_partition_pargs::write(::apache::thrift::pro xfer += oprot->writeFieldBegin("part_vals", ::apache::thrift::protocol::T_LIST, 3); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast((*(this->part_vals)).size())); - std::vector ::const_iterator _iter687; - for (_iter687 = (*(this->part_vals)).begin(); _iter687 != (*(this->part_vals)).end(); ++_iter687) + std::vector ::const_iterator _iter693; + for (_iter693 = (*(this->part_vals)).begin(); _iter693 != (*(this->part_vals)).end(); ++_iter693) { - xfer += oprot->writeString((*_iter687)); + xfer += oprot->writeString((*_iter693)); } xfer += oprot->writeListEnd(); } @@ -13649,14 +13649,14 @@ uint32_t ThriftHiveMetastore_partition_name_has_valid_characters_args::read(::ap if (ftype == ::apache::thrift::protocol::T_LIST) { { this->part_vals.clear(); - uint32_t _size688; - ::apache::thrift::protocol::TType _etype691; - xfer += iprot->readListBegin(_etype691, _size688); - this->part_vals.resize(_size688); - uint32_t _i692; - for (_i692 = 0; _i692 < _size688; ++_i692) + uint32_t _size694; + ::apache::thrift::protocol::TType _etype697; + xfer += iprot->readListBegin(_etype697, _size694); + this->part_vals.resize(_size694); + uint32_t _i698; + for (_i698 = 0; _i698 < _size694; ++_i698) { - xfer += iprot->readString(this->part_vals[_i692]); + xfer += iprot->readString(this->part_vals[_i698]); } xfer += iprot->readListEnd(); } @@ -13692,10 +13692,10 @@ uint32_t ThriftHiveMetastore_partition_name_has_valid_characters_args::write(::a xfer += oprot->writeFieldBegin("part_vals", ::apache::thrift::protocol::T_LIST, 1); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->part_vals.size())); - std::vector ::const_iterator _iter693; - for (_iter693 = this->part_vals.begin(); _iter693 != this->part_vals.end(); ++_iter693) + std::vector ::const_iterator _iter699; + for (_iter699 = this->part_vals.begin(); _iter699 != this->part_vals.end(); ++_iter699) { - xfer += oprot->writeString((*_iter693)); + xfer += oprot->writeString((*_iter699)); } xfer += oprot->writeListEnd(); } @@ -13717,10 +13717,10 @@ uint32_t ThriftHiveMetastore_partition_name_has_valid_characters_pargs::write(:: xfer += oprot->writeFieldBegin("part_vals", ::apache::thrift::protocol::T_LIST, 1); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast((*(this->part_vals)).size())); - std::vector ::const_iterator _iter694; - for (_iter694 = (*(this->part_vals)).begin(); _iter694 != (*(this->part_vals)).end(); ++_iter694) + std::vector ::const_iterator _iter700; + for (_iter700 = (*(this->part_vals)).begin(); _iter700 != (*(this->part_vals)).end(); ++_iter700) { - xfer += oprot->writeString((*_iter694)); + xfer += oprot->writeString((*_iter700)); } xfer += oprot->writeListEnd(); } @@ -14139,102 +14139,102 @@ uint32_t ThriftHiveMetastore_partition_name_to_vals_result::read(::apache::thrif if (ftype == ::apache::thrift::protocol::T_LIST) { { this->success.clear(); - uint32_t _size695; - ::apache::thrift::protocol::TType _etype698; - xfer += iprot->readListBegin(_etype698, _size695); - this->success.resize(_size695); - uint32_t _i699; - for (_i699 = 0; _i699 < _size695; ++_i699) - { - xfer += iprot->readString(this->success[_i699]); - } - xfer += iprot->readListEnd(); - } - this->__isset.success = true; - } else { - xfer += iprot->skip(ftype); - } - break; - case 1: - if (ftype == ::apache::thrift::protocol::T_STRUCT) { - xfer += this->o1.read(iprot); - this->__isset.o1 = true; - } else { - xfer += iprot->skip(ftype); - } - break; - default: - xfer += iprot->skip(ftype); - break; - } - xfer += iprot->readFieldEnd(); - } - - xfer += iprot->readStructEnd(); - - return xfer; -} - -uint32_t ThriftHiveMetastore_partition_name_to_vals_result::write(::apache::thrift::protocol::TProtocol* oprot) const { - - uint32_t xfer = 0; - - xfer += oprot->writeStructBegin("ThriftHiveMetastore_partition_name_to_vals_result"); - - if (this->__isset.success) { - xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0); - { - xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->success.size())); - std::vector ::const_iterator _iter700; - for (_iter700 = this->success.begin(); _iter700 != this->success.end(); ++_iter700) - { - xfer += oprot->writeString((*_iter700)); - } - xfer += oprot->writeListEnd(); - } - xfer += oprot->writeFieldEnd(); - } else if (this->__isset.o1) { - xfer += oprot->writeFieldBegin("o1", ::apache::thrift::protocol::T_STRUCT, 1); - xfer += this->o1.write(oprot); - xfer += oprot->writeFieldEnd(); - } - xfer += oprot->writeFieldStop(); - xfer += oprot->writeStructEnd(); - return xfer; -} - -uint32_t ThriftHiveMetastore_partition_name_to_vals_presult::read(::apache::thrift::protocol::TProtocol* iprot) { - - uint32_t xfer = 0; - std::string fname; - ::apache::thrift::protocol::TType ftype; - int16_t fid; - - xfer += iprot->readStructBegin(fname); - - using ::apache::thrift::protocol::TProtocolException; - - - while (true) - { - xfer += iprot->readFieldBegin(fname, ftype, fid); - if (ftype == ::apache::thrift::protocol::T_STOP) { - break; - } - switch (fid) - { - case 0: - if (ftype == ::apache::thrift::protocol::T_LIST) { - { - (*(this->success)).clear(); uint32_t _size701; ::apache::thrift::protocol::TType _etype704; xfer += iprot->readListBegin(_etype704, _size701); - (*(this->success)).resize(_size701); + this->success.resize(_size701); uint32_t _i705; for (_i705 = 0; _i705 < _size701; ++_i705) { - xfer += iprot->readString((*(this->success))[_i705]); + xfer += iprot->readString(this->success[_i705]); + } + xfer += iprot->readListEnd(); + } + this->__isset.success = true; + } else { + xfer += iprot->skip(ftype); + } + break; + case 1: + if (ftype == ::apache::thrift::protocol::T_STRUCT) { + xfer += this->o1.read(iprot); + this->__isset.o1 = true; + } else { + xfer += iprot->skip(ftype); + } + break; + default: + xfer += iprot->skip(ftype); + break; + } + xfer += iprot->readFieldEnd(); + } + + xfer += iprot->readStructEnd(); + + return xfer; +} + +uint32_t ThriftHiveMetastore_partition_name_to_vals_result::write(::apache::thrift::protocol::TProtocol* oprot) const { + + uint32_t xfer = 0; + + xfer += oprot->writeStructBegin("ThriftHiveMetastore_partition_name_to_vals_result"); + + if (this->__isset.success) { + xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0); + { + xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->success.size())); + std::vector ::const_iterator _iter706; + for (_iter706 = this->success.begin(); _iter706 != this->success.end(); ++_iter706) + { + xfer += oprot->writeString((*_iter706)); + } + xfer += oprot->writeListEnd(); + } + xfer += oprot->writeFieldEnd(); + } else if (this->__isset.o1) { + xfer += oprot->writeFieldBegin("o1", ::apache::thrift::protocol::T_STRUCT, 1); + xfer += this->o1.write(oprot); + xfer += oprot->writeFieldEnd(); + } + xfer += oprot->writeFieldStop(); + xfer += oprot->writeStructEnd(); + return xfer; +} + +uint32_t ThriftHiveMetastore_partition_name_to_vals_presult::read(::apache::thrift::protocol::TProtocol* iprot) { + + uint32_t xfer = 0; + std::string fname; + ::apache::thrift::protocol::TType ftype; + int16_t fid; + + xfer += iprot->readStructBegin(fname); + + using ::apache::thrift::protocol::TProtocolException; + + + while (true) + { + xfer += iprot->readFieldBegin(fname, ftype, fid); + if (ftype == ::apache::thrift::protocol::T_STOP) { + break; + } + switch (fid) + { + case 0: + if (ftype == ::apache::thrift::protocol::T_LIST) { + { + (*(this->success)).clear(); + uint32_t _size707; + ::apache::thrift::protocol::TType _etype710; + xfer += iprot->readListBegin(_etype710, _size707); + (*(this->success)).resize(_size707); + uint32_t _i711; + for (_i711 = 0; _i711 < _size707; ++_i711) + { + xfer += iprot->readString((*(this->success))[_i711]); } xfer += iprot->readListEnd(); } @@ -14353,17 +14353,17 @@ uint32_t ThriftHiveMetastore_partition_name_to_spec_result::read(::apache::thrif if (ftype == ::apache::thrift::protocol::T_MAP) { { this->success.clear(); - uint32_t _size706; - ::apache::thrift::protocol::TType _ktype707; - ::apache::thrift::protocol::TType _vtype708; - xfer += iprot->readMapBegin(_ktype707, _vtype708, _size706); - uint32_t _i710; - for (_i710 = 0; _i710 < _size706; ++_i710) + uint32_t _size712; + ::apache::thrift::protocol::TType _ktype713; + ::apache::thrift::protocol::TType _vtype714; + xfer += iprot->readMapBegin(_ktype713, _vtype714, _size712); + uint32_t _i716; + for (_i716 = 0; _i716 < _size712; ++_i716) { - std::string _key711; - xfer += iprot->readString(_key711); - std::string& _val712 = this->success[_key711]; - xfer += iprot->readString(_val712); + std::string _key717; + xfer += iprot->readString(_key717); + std::string& _val718 = this->success[_key717]; + xfer += iprot->readString(_val718); } xfer += iprot->readMapEnd(); } @@ -14402,11 +14402,11 @@ uint32_t ThriftHiveMetastore_partition_name_to_spec_result::write(::apache::thri xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_MAP, 0); { xfer += oprot->writeMapBegin(::apache::thrift::protocol::T_STRING, ::apache::thrift::protocol::T_STRING, static_cast(this->success.size())); - std::map ::const_iterator _iter713; - for (_iter713 = this->success.begin(); _iter713 != this->success.end(); ++_iter713) + std::map ::const_iterator _iter719; + for (_iter719 = this->success.begin(); _iter719 != this->success.end(); ++_iter719) { - xfer += oprot->writeString(_iter713->first); - xfer += oprot->writeString(_iter713->second); + xfer += oprot->writeString(_iter719->first); + xfer += oprot->writeString(_iter719->second); } xfer += oprot->writeMapEnd(); } @@ -14445,17 +14445,17 @@ uint32_t ThriftHiveMetastore_partition_name_to_spec_presult::read(::apache::thri if (ftype == ::apache::thrift::protocol::T_MAP) { { (*(this->success)).clear(); - uint32_t _size714; - ::apache::thrift::protocol::TType _ktype715; - ::apache::thrift::protocol::TType _vtype716; - xfer += iprot->readMapBegin(_ktype715, _vtype716, _size714); - uint32_t _i718; - for (_i718 = 0; _i718 < _size714; ++_i718) + uint32_t _size720; + ::apache::thrift::protocol::TType _ktype721; + ::apache::thrift::protocol::TType _vtype722; + xfer += iprot->readMapBegin(_ktype721, _vtype722, _size720); + uint32_t _i724; + for (_i724 = 0; _i724 < _size720; ++_i724) { - std::string _key719; - xfer += iprot->readString(_key719); - std::string& _val720 = (*(this->success))[_key719]; - xfer += iprot->readString(_val720); + std::string _key725; + xfer += iprot->readString(_key725); + std::string& _val726 = (*(this->success))[_key725]; + xfer += iprot->readString(_val726); } xfer += iprot->readMapEnd(); } @@ -14524,17 +14524,17 @@ uint32_t ThriftHiveMetastore_markPartitionForEvent_args::read(::apache::thrift:: if (ftype == ::apache::thrift::protocol::T_MAP) { { this->part_vals.clear(); - uint32_t _size721; - ::apache::thrift::protocol::TType _ktype722; - ::apache::thrift::protocol::TType _vtype723; - xfer += iprot->readMapBegin(_ktype722, _vtype723, _size721); - uint32_t _i725; - for (_i725 = 0; _i725 < _size721; ++_i725) + uint32_t _size727; + ::apache::thrift::protocol::TType _ktype728; + ::apache::thrift::protocol::TType _vtype729; + xfer += iprot->readMapBegin(_ktype728, _vtype729, _size727); + uint32_t _i731; + for (_i731 = 0; _i731 < _size727; ++_i731) { - std::string _key726; - xfer += iprot->readString(_key726); - std::string& _val727 = this->part_vals[_key726]; - xfer += iprot->readString(_val727); + std::string _key732; + xfer += iprot->readString(_key732); + std::string& _val733 = this->part_vals[_key732]; + xfer += iprot->readString(_val733); } xfer += iprot->readMapEnd(); } @@ -14545,9 +14545,9 @@ uint32_t ThriftHiveMetastore_markPartitionForEvent_args::read(::apache::thrift:: break; case 4: if (ftype == ::apache::thrift::protocol::T_I32) { - int32_t ecast728; - xfer += iprot->readI32(ecast728); - this->eventType = (PartitionEventType::type)ecast728; + int32_t ecast734; + xfer += iprot->readI32(ecast734); + this->eventType = (PartitionEventType::type)ecast734; this->__isset.eventType = true; } else { xfer += iprot->skip(ftype); @@ -14580,11 +14580,11 @@ uint32_t ThriftHiveMetastore_markPartitionForEvent_args::write(::apache::thrift: xfer += oprot->writeFieldBegin("part_vals", ::apache::thrift::protocol::T_MAP, 3); { xfer += oprot->writeMapBegin(::apache::thrift::protocol::T_STRING, ::apache::thrift::protocol::T_STRING, static_cast(this->part_vals.size())); - std::map ::const_iterator _iter729; - for (_iter729 = this->part_vals.begin(); _iter729 != this->part_vals.end(); ++_iter729) + std::map ::const_iterator _iter735; + for (_iter735 = this->part_vals.begin(); _iter735 != this->part_vals.end(); ++_iter735) { - xfer += oprot->writeString(_iter729->first); - xfer += oprot->writeString(_iter729->second); + xfer += oprot->writeString(_iter735->first); + xfer += oprot->writeString(_iter735->second); } xfer += oprot->writeMapEnd(); } @@ -14614,11 +14614,11 @@ uint32_t ThriftHiveMetastore_markPartitionForEvent_pargs::write(::apache::thrift xfer += oprot->writeFieldBegin("part_vals", ::apache::thrift::protocol::T_MAP, 3); { xfer += oprot->writeMapBegin(::apache::thrift::protocol::T_STRING, ::apache::thrift::protocol::T_STRING, static_cast((*(this->part_vals)).size())); - std::map ::const_iterator _iter730; - for (_iter730 = (*(this->part_vals)).begin(); _iter730 != (*(this->part_vals)).end(); ++_iter730) + std::map ::const_iterator _iter736; + for (_iter736 = (*(this->part_vals)).begin(); _iter736 != (*(this->part_vals)).end(); ++_iter736) { - xfer += oprot->writeString(_iter730->first); - xfer += oprot->writeString(_iter730->second); + xfer += oprot->writeString(_iter736->first); + xfer += oprot->writeString(_iter736->second); } xfer += oprot->writeMapEnd(); } @@ -14869,17 +14869,17 @@ uint32_t ThriftHiveMetastore_isPartitionMarkedForEvent_args::read(::apache::thri if (ftype == ::apache::thrift::protocol::T_MAP) { { this->part_vals.clear(); - uint32_t _size731; - ::apache::thrift::protocol::TType _ktype732; - ::apache::thrift::protocol::TType _vtype733; - xfer += iprot->readMapBegin(_ktype732, _vtype733, _size731); - uint32_t _i735; - for (_i735 = 0; _i735 < _size731; ++_i735) + uint32_t _size737; + ::apache::thrift::protocol::TType _ktype738; + ::apache::thrift::protocol::TType _vtype739; + xfer += iprot->readMapBegin(_ktype738, _vtype739, _size737); + uint32_t _i741; + for (_i741 = 0; _i741 < _size737; ++_i741) { - std::string _key736; - xfer += iprot->readString(_key736); - std::string& _val737 = this->part_vals[_key736]; - xfer += iprot->readString(_val737); + std::string _key742; + xfer += iprot->readString(_key742); + std::string& _val743 = this->part_vals[_key742]; + xfer += iprot->readString(_val743); } xfer += iprot->readMapEnd(); } @@ -14890,9 +14890,9 @@ uint32_t ThriftHiveMetastore_isPartitionMarkedForEvent_args::read(::apache::thri break; case 4: if (ftype == ::apache::thrift::protocol::T_I32) { - int32_t ecast738; - xfer += iprot->readI32(ecast738); - this->eventType = (PartitionEventType::type)ecast738; + int32_t ecast744; + xfer += iprot->readI32(ecast744); + this->eventType = (PartitionEventType::type)ecast744; this->__isset.eventType = true; } else { xfer += iprot->skip(ftype); @@ -14925,11 +14925,11 @@ uint32_t ThriftHiveMetastore_isPartitionMarkedForEvent_args::write(::apache::thr xfer += oprot->writeFieldBegin("part_vals", ::apache::thrift::protocol::T_MAP, 3); { xfer += oprot->writeMapBegin(::apache::thrift::protocol::T_STRING, ::apache::thrift::protocol::T_STRING, static_cast(this->part_vals.size())); - std::map ::const_iterator _iter739; - for (_iter739 = this->part_vals.begin(); _iter739 != this->part_vals.end(); ++_iter739) + std::map ::const_iterator _iter745; + for (_iter745 = this->part_vals.begin(); _iter745 != this->part_vals.end(); ++_iter745) { - xfer += oprot->writeString(_iter739->first); - xfer += oprot->writeString(_iter739->second); + xfer += oprot->writeString(_iter745->first); + xfer += oprot->writeString(_iter745->second); } xfer += oprot->writeMapEnd(); } @@ -14959,11 +14959,11 @@ uint32_t ThriftHiveMetastore_isPartitionMarkedForEvent_pargs::write(::apache::th xfer += oprot->writeFieldBegin("part_vals", ::apache::thrift::protocol::T_MAP, 3); { xfer += oprot->writeMapBegin(::apache::thrift::protocol::T_STRING, ::apache::thrift::protocol::T_STRING, static_cast((*(this->part_vals)).size())); - std::map ::const_iterator _iter740; - for (_iter740 = (*(this->part_vals)).begin(); _iter740 != (*(this->part_vals)).end(); ++_iter740) + std::map ::const_iterator _iter746; + for (_iter746 = (*(this->part_vals)).begin(); _iter746 != (*(this->part_vals)).end(); ++_iter746) { - xfer += oprot->writeString(_iter740->first); - xfer += oprot->writeString(_iter740->second); + xfer += oprot->writeString(_iter746->first); + xfer += oprot->writeString(_iter746->second); } xfer += oprot->writeMapEnd(); } @@ -16268,14 +16268,14 @@ uint32_t ThriftHiveMetastore_get_indexes_result::read(::apache::thrift::protocol if (ftype == ::apache::thrift::protocol::T_LIST) { { this->success.clear(); - uint32_t _size741; - ::apache::thrift::protocol::TType _etype744; - xfer += iprot->readListBegin(_etype744, _size741); - this->success.resize(_size741); - uint32_t _i745; - for (_i745 = 0; _i745 < _size741; ++_i745) + uint32_t _size747; + ::apache::thrift::protocol::TType _etype750; + xfer += iprot->readListBegin(_etype750, _size747); + this->success.resize(_size747); + uint32_t _i751; + for (_i751 = 0; _i751 < _size747; ++_i751) { - xfer += this->success[_i745].read(iprot); + xfer += this->success[_i751].read(iprot); } xfer += iprot->readListEnd(); } @@ -16322,10 +16322,10 @@ uint32_t ThriftHiveMetastore_get_indexes_result::write(::apache::thrift::protoco xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->success.size())); - std::vector ::const_iterator _iter746; - for (_iter746 = this->success.begin(); _iter746 != this->success.end(); ++_iter746) + std::vector ::const_iterator _iter752; + for (_iter752 = this->success.begin(); _iter752 != this->success.end(); ++_iter752) { - xfer += (*_iter746).write(oprot); + xfer += (*_iter752).write(oprot); } xfer += oprot->writeListEnd(); } @@ -16368,14 +16368,14 @@ uint32_t ThriftHiveMetastore_get_indexes_presult::read(::apache::thrift::protoco if (ftype == ::apache::thrift::protocol::T_LIST) { { (*(this->success)).clear(); - uint32_t _size747; - ::apache::thrift::protocol::TType _etype750; - xfer += iprot->readListBegin(_etype750, _size747); - (*(this->success)).resize(_size747); - uint32_t _i751; - for (_i751 = 0; _i751 < _size747; ++_i751) + uint32_t _size753; + ::apache::thrift::protocol::TType _etype756; + xfer += iprot->readListBegin(_etype756, _size753); + (*(this->success)).resize(_size753); + uint32_t _i757; + for (_i757 = 0; _i757 < _size753; ++_i757) { - xfer += (*(this->success))[_i751].read(iprot); + xfer += (*(this->success))[_i757].read(iprot); } xfer += iprot->readListEnd(); } @@ -16534,14 +16534,14 @@ uint32_t ThriftHiveMetastore_get_index_names_result::read(::apache::thrift::prot if (ftype == ::apache::thrift::protocol::T_LIST) { { this->success.clear(); - uint32_t _size752; - ::apache::thrift::protocol::TType _etype755; - xfer += iprot->readListBegin(_etype755, _size752); - this->success.resize(_size752); - uint32_t _i756; - for (_i756 = 0; _i756 < _size752; ++_i756) + uint32_t _size758; + ::apache::thrift::protocol::TType _etype761; + xfer += iprot->readListBegin(_etype761, _size758); + this->success.resize(_size758); + uint32_t _i762; + for (_i762 = 0; _i762 < _size758; ++_i762) { - xfer += iprot->readString(this->success[_i756]); + xfer += iprot->readString(this->success[_i762]); } xfer += iprot->readListEnd(); } @@ -16580,10 +16580,10 @@ uint32_t ThriftHiveMetastore_get_index_names_result::write(::apache::thrift::pro xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->success.size())); - std::vector ::const_iterator _iter757; - for (_iter757 = this->success.begin(); _iter757 != this->success.end(); ++_iter757) + std::vector ::const_iterator _iter763; + for (_iter763 = this->success.begin(); _iter763 != this->success.end(); ++_iter763) { - xfer += oprot->writeString((*_iter757)); + xfer += oprot->writeString((*_iter763)); } xfer += oprot->writeListEnd(); } @@ -16622,14 +16622,14 @@ uint32_t ThriftHiveMetastore_get_index_names_presult::read(::apache::thrift::pro if (ftype == ::apache::thrift::protocol::T_LIST) { { (*(this->success)).clear(); - uint32_t _size758; - ::apache::thrift::protocol::TType _etype761; - xfer += iprot->readListBegin(_etype761, _size758); - (*(this->success)).resize(_size758); - uint32_t _i762; - for (_i762 = 0; _i762 < _size758; ++_i762) + uint32_t _size764; + ::apache::thrift::protocol::TType _etype767; + xfer += iprot->readListBegin(_etype767, _size764); + (*(this->success)).resize(_size764); + uint32_t _i768; + for (_i768 = 0; _i768 < _size764; ++_i768) { - xfer += iprot->readString((*(this->success))[_i762]); + xfer += iprot->readString((*(this->success))[_i768]); } xfer += iprot->readListEnd(); } @@ -18312,6 +18312,248 @@ uint32_t ThriftHiveMetastore_get_aggr_stats_for_presult::read(::apache::thrift:: return xfer; } +uint32_t ThriftHiveMetastore_set_aggr_stats_for_args::read(::apache::thrift::protocol::TProtocol* iprot) { + + uint32_t xfer = 0; + std::string fname; + ::apache::thrift::protocol::TType ftype; + int16_t fid; + + xfer += iprot->readStructBegin(fname); + + using ::apache::thrift::protocol::TProtocolException; + + + while (true) + { + xfer += iprot->readFieldBegin(fname, ftype, fid); + if (ftype == ::apache::thrift::protocol::T_STOP) { + break; + } + switch (fid) + { + case 1: + if (ftype == ::apache::thrift::protocol::T_STRUCT) { + xfer += this->request.read(iprot); + this->__isset.request = true; + } else { + xfer += iprot->skip(ftype); + } + break; + default: + xfer += iprot->skip(ftype); + break; + } + xfer += iprot->readFieldEnd(); + } + + xfer += iprot->readStructEnd(); + + return xfer; +} + +uint32_t ThriftHiveMetastore_set_aggr_stats_for_args::write(::apache::thrift::protocol::TProtocol* oprot) const { + uint32_t xfer = 0; + xfer += oprot->writeStructBegin("ThriftHiveMetastore_set_aggr_stats_for_args"); + + xfer += oprot->writeFieldBegin("request", ::apache::thrift::protocol::T_STRUCT, 1); + xfer += this->request.write(oprot); + xfer += oprot->writeFieldEnd(); + + xfer += oprot->writeFieldStop(); + xfer += oprot->writeStructEnd(); + return xfer; +} + +uint32_t ThriftHiveMetastore_set_aggr_stats_for_pargs::write(::apache::thrift::protocol::TProtocol* oprot) const { + uint32_t xfer = 0; + xfer += oprot->writeStructBegin("ThriftHiveMetastore_set_aggr_stats_for_pargs"); + + xfer += oprot->writeFieldBegin("request", ::apache::thrift::protocol::T_STRUCT, 1); + xfer += (*(this->request)).write(oprot); + xfer += oprot->writeFieldEnd(); + + xfer += oprot->writeFieldStop(); + xfer += oprot->writeStructEnd(); + return xfer; +} + +uint32_t ThriftHiveMetastore_set_aggr_stats_for_result::read(::apache::thrift::protocol::TProtocol* iprot) { + + uint32_t xfer = 0; + std::string fname; + ::apache::thrift::protocol::TType ftype; + int16_t fid; + + xfer += iprot->readStructBegin(fname); + + using ::apache::thrift::protocol::TProtocolException; + + + while (true) + { + xfer += iprot->readFieldBegin(fname, ftype, fid); + if (ftype == ::apache::thrift::protocol::T_STOP) { + break; + } + switch (fid) + { + case 0: + if (ftype == ::apache::thrift::protocol::T_BOOL) { + xfer += iprot->readBool(this->success); + this->__isset.success = true; + } else { + xfer += iprot->skip(ftype); + } + break; + case 1: + if (ftype == ::apache::thrift::protocol::T_STRUCT) { + xfer += this->o1.read(iprot); + this->__isset.o1 = true; + } else { + xfer += iprot->skip(ftype); + } + break; + case 2: + if (ftype == ::apache::thrift::protocol::T_STRUCT) { + xfer += this->o2.read(iprot); + this->__isset.o2 = true; + } else { + xfer += iprot->skip(ftype); + } + break; + case 3: + if (ftype == ::apache::thrift::protocol::T_STRUCT) { + xfer += this->o3.read(iprot); + this->__isset.o3 = true; + } else { + xfer += iprot->skip(ftype); + } + break; + case 4: + if (ftype == ::apache::thrift::protocol::T_STRUCT) { + xfer += this->o4.read(iprot); + this->__isset.o4 = true; + } else { + xfer += iprot->skip(ftype); + } + break; + default: + xfer += iprot->skip(ftype); + break; + } + xfer += iprot->readFieldEnd(); + } + + xfer += iprot->readStructEnd(); + + return xfer; +} + +uint32_t ThriftHiveMetastore_set_aggr_stats_for_result::write(::apache::thrift::protocol::TProtocol* oprot) const { + + uint32_t xfer = 0; + + xfer += oprot->writeStructBegin("ThriftHiveMetastore_set_aggr_stats_for_result"); + + if (this->__isset.success) { + xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_BOOL, 0); + xfer += oprot->writeBool(this->success); + xfer += oprot->writeFieldEnd(); + } else if (this->__isset.o1) { + xfer += oprot->writeFieldBegin("o1", ::apache::thrift::protocol::T_STRUCT, 1); + xfer += this->o1.write(oprot); + xfer += oprot->writeFieldEnd(); + } else if (this->__isset.o2) { + xfer += oprot->writeFieldBegin("o2", ::apache::thrift::protocol::T_STRUCT, 2); + xfer += this->o2.write(oprot); + xfer += oprot->writeFieldEnd(); + } else if (this->__isset.o3) { + xfer += oprot->writeFieldBegin("o3", ::apache::thrift::protocol::T_STRUCT, 3); + xfer += this->o3.write(oprot); + xfer += oprot->writeFieldEnd(); + } else if (this->__isset.o4) { + xfer += oprot->writeFieldBegin("o4", ::apache::thrift::protocol::T_STRUCT, 4); + xfer += this->o4.write(oprot); + xfer += oprot->writeFieldEnd(); + } + xfer += oprot->writeFieldStop(); + xfer += oprot->writeStructEnd(); + return xfer; +} + +uint32_t ThriftHiveMetastore_set_aggr_stats_for_presult::read(::apache::thrift::protocol::TProtocol* iprot) { + + uint32_t xfer = 0; + std::string fname; + ::apache::thrift::protocol::TType ftype; + int16_t fid; + + xfer += iprot->readStructBegin(fname); + + using ::apache::thrift::protocol::TProtocolException; + + + while (true) + { + xfer += iprot->readFieldBegin(fname, ftype, fid); + if (ftype == ::apache::thrift::protocol::T_STOP) { + break; + } + switch (fid) + { + case 0: + if (ftype == ::apache::thrift::protocol::T_BOOL) { + xfer += iprot->readBool((*(this->success))); + this->__isset.success = true; + } else { + xfer += iprot->skip(ftype); + } + break; + case 1: + if (ftype == ::apache::thrift::protocol::T_STRUCT) { + xfer += this->o1.read(iprot); + this->__isset.o1 = true; + } else { + xfer += iprot->skip(ftype); + } + break; + case 2: + if (ftype == ::apache::thrift::protocol::T_STRUCT) { + xfer += this->o2.read(iprot); + this->__isset.o2 = true; + } else { + xfer += iprot->skip(ftype); + } + break; + case 3: + if (ftype == ::apache::thrift::protocol::T_STRUCT) { + xfer += this->o3.read(iprot); + this->__isset.o3 = true; + } else { + xfer += iprot->skip(ftype); + } + break; + case 4: + if (ftype == ::apache::thrift::protocol::T_STRUCT) { + xfer += this->o4.read(iprot); + this->__isset.o4 = true; + } else { + xfer += iprot->skip(ftype); + } + break; + default: + xfer += iprot->skip(ftype); + break; + } + xfer += iprot->readFieldEnd(); + } + + xfer += iprot->readStructEnd(); + + return xfer; +} + uint32_t ThriftHiveMetastore_delete_partition_column_statistics_args::read(::apache::thrift::protocol::TProtocol* iprot) { uint32_t xfer = 0; @@ -19616,14 +19858,14 @@ uint32_t ThriftHiveMetastore_get_functions_result::read(::apache::thrift::protoc if (ftype == ::apache::thrift::protocol::T_LIST) { { this->success.clear(); - uint32_t _size763; - ::apache::thrift::protocol::TType _etype766; - xfer += iprot->readListBegin(_etype766, _size763); - this->success.resize(_size763); - uint32_t _i767; - for (_i767 = 0; _i767 < _size763; ++_i767) + uint32_t _size769; + ::apache::thrift::protocol::TType _etype772; + xfer += iprot->readListBegin(_etype772, _size769); + this->success.resize(_size769); + uint32_t _i773; + for (_i773 = 0; _i773 < _size769; ++_i773) { - xfer += iprot->readString(this->success[_i767]); + xfer += iprot->readString(this->success[_i773]); } xfer += iprot->readListEnd(); } @@ -19662,10 +19904,10 @@ uint32_t ThriftHiveMetastore_get_functions_result::write(::apache::thrift::proto xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->success.size())); - std::vector ::const_iterator _iter768; - for (_iter768 = this->success.begin(); _iter768 != this->success.end(); ++_iter768) + std::vector ::const_iterator _iter774; + for (_iter774 = this->success.begin(); _iter774 != this->success.end(); ++_iter774) { - xfer += oprot->writeString((*_iter768)); + xfer += oprot->writeString((*_iter774)); } xfer += oprot->writeListEnd(); } @@ -19704,14 +19946,14 @@ uint32_t ThriftHiveMetastore_get_functions_presult::read(::apache::thrift::proto if (ftype == ::apache::thrift::protocol::T_LIST) { { (*(this->success)).clear(); - uint32_t _size769; - ::apache::thrift::protocol::TType _etype772; - xfer += iprot->readListBegin(_etype772, _size769); - (*(this->success)).resize(_size769); - uint32_t _i773; - for (_i773 = 0; _i773 < _size769; ++_i773) + uint32_t _size775; + ::apache::thrift::protocol::TType _etype778; + xfer += iprot->readListBegin(_etype778, _size775); + (*(this->success)).resize(_size775); + uint32_t _i779; + for (_i779 = 0; _i779 < _size775; ++_i779) { - xfer += iprot->readString((*(this->success))[_i773]); + xfer += iprot->readString((*(this->success))[_i779]); } xfer += iprot->readListEnd(); } @@ -20391,14 +20633,14 @@ uint32_t ThriftHiveMetastore_get_role_names_result::read(::apache::thrift::proto if (ftype == ::apache::thrift::protocol::T_LIST) { { this->success.clear(); - uint32_t _size774; - ::apache::thrift::protocol::TType _etype777; - xfer += iprot->readListBegin(_etype777, _size774); - this->success.resize(_size774); - uint32_t _i778; - for (_i778 = 0; _i778 < _size774; ++_i778) + uint32_t _size780; + ::apache::thrift::protocol::TType _etype783; + xfer += iprot->readListBegin(_etype783, _size780); + this->success.resize(_size780); + uint32_t _i784; + for (_i784 = 0; _i784 < _size780; ++_i784) { - xfer += iprot->readString(this->success[_i778]); + xfer += iprot->readString(this->success[_i784]); } xfer += iprot->readListEnd(); } @@ -20437,10 +20679,10 @@ uint32_t ThriftHiveMetastore_get_role_names_result::write(::apache::thrift::prot xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->success.size())); - std::vector ::const_iterator _iter779; - for (_iter779 = this->success.begin(); _iter779 != this->success.end(); ++_iter779) + std::vector ::const_iterator _iter785; + for (_iter785 = this->success.begin(); _iter785 != this->success.end(); ++_iter785) { - xfer += oprot->writeString((*_iter779)); + xfer += oprot->writeString((*_iter785)); } xfer += oprot->writeListEnd(); } @@ -20479,14 +20721,14 @@ uint32_t ThriftHiveMetastore_get_role_names_presult::read(::apache::thrift::prot if (ftype == ::apache::thrift::protocol::T_LIST) { { (*(this->success)).clear(); - uint32_t _size780; - ::apache::thrift::protocol::TType _etype783; - xfer += iprot->readListBegin(_etype783, _size780); - (*(this->success)).resize(_size780); - uint32_t _i784; - for (_i784 = 0; _i784 < _size780; ++_i784) + uint32_t _size786; + ::apache::thrift::protocol::TType _etype789; + xfer += iprot->readListBegin(_etype789, _size786); + (*(this->success)).resize(_size786); + uint32_t _i790; + for (_i790 = 0; _i790 < _size786; ++_i790) { - xfer += iprot->readString((*(this->success))[_i784]); + xfer += iprot->readString((*(this->success))[_i790]); } xfer += iprot->readListEnd(); } @@ -20553,9 +20795,9 @@ uint32_t ThriftHiveMetastore_grant_role_args::read(::apache::thrift::protocol::T break; case 3: if (ftype == ::apache::thrift::protocol::T_I32) { - int32_t ecast785; - xfer += iprot->readI32(ecast785); - this->principal_type = (PrincipalType::type)ecast785; + int32_t ecast791; + xfer += iprot->readI32(ecast791); + this->principal_type = (PrincipalType::type)ecast791; this->__isset.principal_type = true; } else { xfer += iprot->skip(ftype); @@ -20571,9 +20813,9 @@ uint32_t ThriftHiveMetastore_grant_role_args::read(::apache::thrift::protocol::T break; case 5: if (ftype == ::apache::thrift::protocol::T_I32) { - int32_t ecast786; - xfer += iprot->readI32(ecast786); - this->grantorType = (PrincipalType::type)ecast786; + int32_t ecast792; + xfer += iprot->readI32(ecast792); + this->grantorType = (PrincipalType::type)ecast792; this->__isset.grantorType = true; } else { xfer += iprot->skip(ftype); @@ -20819,9 +21061,9 @@ uint32_t ThriftHiveMetastore_revoke_role_args::read(::apache::thrift::protocol:: break; case 3: if (ftype == ::apache::thrift::protocol::T_I32) { - int32_t ecast787; - xfer += iprot->readI32(ecast787); - this->principal_type = (PrincipalType::type)ecast787; + int32_t ecast793; + xfer += iprot->readI32(ecast793); + this->principal_type = (PrincipalType::type)ecast793; this->__isset.principal_type = true; } else { xfer += iprot->skip(ftype); @@ -21027,9 +21269,9 @@ uint32_t ThriftHiveMetastore_list_roles_args::read(::apache::thrift::protocol::T break; case 2: if (ftype == ::apache::thrift::protocol::T_I32) { - int32_t ecast788; - xfer += iprot->readI32(ecast788); - this->principal_type = (PrincipalType::type)ecast788; + int32_t ecast794; + xfer += iprot->readI32(ecast794); + this->principal_type = (PrincipalType::type)ecast794; this->__isset.principal_type = true; } else { xfer += iprot->skip(ftype); @@ -21105,14 +21347,14 @@ uint32_t ThriftHiveMetastore_list_roles_result::read(::apache::thrift::protocol: if (ftype == ::apache::thrift::protocol::T_LIST) { { this->success.clear(); - uint32_t _size789; - ::apache::thrift::protocol::TType _etype792; - xfer += iprot->readListBegin(_etype792, _size789); - this->success.resize(_size789); - uint32_t _i793; - for (_i793 = 0; _i793 < _size789; ++_i793) + uint32_t _size795; + ::apache::thrift::protocol::TType _etype798; + xfer += iprot->readListBegin(_etype798, _size795); + this->success.resize(_size795); + uint32_t _i799; + for (_i799 = 0; _i799 < _size795; ++_i799) { - xfer += this->success[_i793].read(iprot); + xfer += this->success[_i799].read(iprot); } xfer += iprot->readListEnd(); } @@ -21151,10 +21393,10 @@ uint32_t ThriftHiveMetastore_list_roles_result::write(::apache::thrift::protocol xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->success.size())); - std::vector ::const_iterator _iter794; - for (_iter794 = this->success.begin(); _iter794 != this->success.end(); ++_iter794) + std::vector ::const_iterator _iter800; + for (_iter800 = this->success.begin(); _iter800 != this->success.end(); ++_iter800) { - xfer += (*_iter794).write(oprot); + xfer += (*_iter800).write(oprot); } xfer += oprot->writeListEnd(); } @@ -21193,14 +21435,14 @@ uint32_t ThriftHiveMetastore_list_roles_presult::read(::apache::thrift::protocol if (ftype == ::apache::thrift::protocol::T_LIST) { { (*(this->success)).clear(); - uint32_t _size795; - ::apache::thrift::protocol::TType _etype798; - xfer += iprot->readListBegin(_etype798, _size795); - (*(this->success)).resize(_size795); - uint32_t _i799; - for (_i799 = 0; _i799 < _size795; ++_i799) + uint32_t _size801; + ::apache::thrift::protocol::TType _etype804; + xfer += iprot->readListBegin(_etype804, _size801); + (*(this->success)).resize(_size801); + uint32_t _i805; + for (_i805 = 0; _i805 < _size801; ++_i805) { - xfer += (*(this->success))[_i799].read(iprot); + xfer += (*(this->success))[_i805].read(iprot); } xfer += iprot->readListEnd(); } @@ -21815,14 +22057,14 @@ uint32_t ThriftHiveMetastore_get_privilege_set_args::read(::apache::thrift::prot if (ftype == ::apache::thrift::protocol::T_LIST) { { this->group_names.clear(); - uint32_t _size800; - ::apache::thrift::protocol::TType _etype803; - xfer += iprot->readListBegin(_etype803, _size800); - this->group_names.resize(_size800); - uint32_t _i804; - for (_i804 = 0; _i804 < _size800; ++_i804) + uint32_t _size806; + ::apache::thrift::protocol::TType _etype809; + xfer += iprot->readListBegin(_etype809, _size806); + this->group_names.resize(_size806); + uint32_t _i810; + for (_i810 = 0; _i810 < _size806; ++_i810) { - xfer += iprot->readString(this->group_names[_i804]); + xfer += iprot->readString(this->group_names[_i810]); } xfer += iprot->readListEnd(); } @@ -21858,10 +22100,10 @@ uint32_t ThriftHiveMetastore_get_privilege_set_args::write(::apache::thrift::pro xfer += oprot->writeFieldBegin("group_names", ::apache::thrift::protocol::T_LIST, 3); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->group_names.size())); - std::vector ::const_iterator _iter805; - for (_iter805 = this->group_names.begin(); _iter805 != this->group_names.end(); ++_iter805) + std::vector ::const_iterator _iter811; + for (_iter811 = this->group_names.begin(); _iter811 != this->group_names.end(); ++_iter811) { - xfer += oprot->writeString((*_iter805)); + xfer += oprot->writeString((*_iter811)); } xfer += oprot->writeListEnd(); } @@ -21887,10 +22129,10 @@ uint32_t ThriftHiveMetastore_get_privilege_set_pargs::write(::apache::thrift::pr xfer += oprot->writeFieldBegin("group_names", ::apache::thrift::protocol::T_LIST, 3); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast((*(this->group_names)).size())); - std::vector ::const_iterator _iter806; - for (_iter806 = (*(this->group_names)).begin(); _iter806 != (*(this->group_names)).end(); ++_iter806) + std::vector ::const_iterator _iter812; + for (_iter812 = (*(this->group_names)).begin(); _iter812 != (*(this->group_names)).end(); ++_iter812) { - xfer += oprot->writeString((*_iter806)); + xfer += oprot->writeString((*_iter812)); } xfer += oprot->writeListEnd(); } @@ -22047,9 +22289,9 @@ uint32_t ThriftHiveMetastore_list_privileges_args::read(::apache::thrift::protoc break; case 2: if (ftype == ::apache::thrift::protocol::T_I32) { - int32_t ecast807; - xfer += iprot->readI32(ecast807); - this->principal_type = (PrincipalType::type)ecast807; + int32_t ecast813; + xfer += iprot->readI32(ecast813); + this->principal_type = (PrincipalType::type)ecast813; this->__isset.principal_type = true; } else { xfer += iprot->skip(ftype); @@ -22141,14 +22383,14 @@ uint32_t ThriftHiveMetastore_list_privileges_result::read(::apache::thrift::prot if (ftype == ::apache::thrift::protocol::T_LIST) { { this->success.clear(); - uint32_t _size808; - ::apache::thrift::protocol::TType _etype811; - xfer += iprot->readListBegin(_etype811, _size808); - this->success.resize(_size808); - uint32_t _i812; - for (_i812 = 0; _i812 < _size808; ++_i812) + uint32_t _size814; + ::apache::thrift::protocol::TType _etype817; + xfer += iprot->readListBegin(_etype817, _size814); + this->success.resize(_size814); + uint32_t _i818; + for (_i818 = 0; _i818 < _size814; ++_i818) { - xfer += this->success[_i812].read(iprot); + xfer += this->success[_i818].read(iprot); } xfer += iprot->readListEnd(); } @@ -22187,10 +22429,10 @@ uint32_t ThriftHiveMetastore_list_privileges_result::write(::apache::thrift::pro xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->success.size())); - std::vector ::const_iterator _iter813; - for (_iter813 = this->success.begin(); _iter813 != this->success.end(); ++_iter813) + std::vector ::const_iterator _iter819; + for (_iter819 = this->success.begin(); _iter819 != this->success.end(); ++_iter819) { - xfer += (*_iter813).write(oprot); + xfer += (*_iter819).write(oprot); } xfer += oprot->writeListEnd(); } @@ -22229,14 +22471,14 @@ uint32_t ThriftHiveMetastore_list_privileges_presult::read(::apache::thrift::pro if (ftype == ::apache::thrift::protocol::T_LIST) { { (*(this->success)).clear(); - uint32_t _size814; - ::apache::thrift::protocol::TType _etype817; - xfer += iprot->readListBegin(_etype817, _size814); - (*(this->success)).resize(_size814); - uint32_t _i818; - for (_i818 = 0; _i818 < _size814; ++_i818) + uint32_t _size820; + ::apache::thrift::protocol::TType _etype823; + xfer += iprot->readListBegin(_etype823, _size820); + (*(this->success)).resize(_size820); + uint32_t _i824; + for (_i824 = 0; _i824 < _size820; ++_i824) { - xfer += (*(this->success))[_i818].read(iprot); + xfer += (*(this->success))[_i824].read(iprot); } xfer += iprot->readListEnd(); } @@ -22843,14 +23085,14 @@ uint32_t ThriftHiveMetastore_set_ugi_args::read(::apache::thrift::protocol::TPro if (ftype == ::apache::thrift::protocol::T_LIST) { { this->group_names.clear(); - uint32_t _size819; - ::apache::thrift::protocol::TType _etype822; - xfer += iprot->readListBegin(_etype822, _size819); - this->group_names.resize(_size819); - uint32_t _i823; - for (_i823 = 0; _i823 < _size819; ++_i823) + uint32_t _size825; + ::apache::thrift::protocol::TType _etype828; + xfer += iprot->readListBegin(_etype828, _size825); + this->group_names.resize(_size825); + uint32_t _i829; + for (_i829 = 0; _i829 < _size825; ++_i829) { - xfer += iprot->readString(this->group_names[_i823]); + xfer += iprot->readString(this->group_names[_i829]); } xfer += iprot->readListEnd(); } @@ -22882,10 +23124,10 @@ uint32_t ThriftHiveMetastore_set_ugi_args::write(::apache::thrift::protocol::TPr xfer += oprot->writeFieldBegin("group_names", ::apache::thrift::protocol::T_LIST, 2); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->group_names.size())); - std::vector ::const_iterator _iter824; - for (_iter824 = this->group_names.begin(); _iter824 != this->group_names.end(); ++_iter824) + std::vector ::const_iterator _iter830; + for (_iter830 = this->group_names.begin(); _iter830 != this->group_names.end(); ++_iter830) { - xfer += oprot->writeString((*_iter824)); + xfer += oprot->writeString((*_iter830)); } xfer += oprot->writeListEnd(); } @@ -22907,10 +23149,10 @@ uint32_t ThriftHiveMetastore_set_ugi_pargs::write(::apache::thrift::protocol::TP xfer += oprot->writeFieldBegin("group_names", ::apache::thrift::protocol::T_LIST, 2); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast((*(this->group_names)).size())); - std::vector ::const_iterator _iter825; - for (_iter825 = (*(this->group_names)).begin(); _iter825 != (*(this->group_names)).end(); ++_iter825) + std::vector ::const_iterator _iter831; + for (_iter831 = (*(this->group_names)).begin(); _iter831 != (*(this->group_names)).end(); ++_iter831) { - xfer += oprot->writeString((*_iter825)); + xfer += oprot->writeString((*_iter831)); } xfer += oprot->writeListEnd(); } @@ -22945,14 +23187,14 @@ uint32_t ThriftHiveMetastore_set_ugi_result::read(::apache::thrift::protocol::TP if (ftype == ::apache::thrift::protocol::T_LIST) { { this->success.clear(); - uint32_t _size826; - ::apache::thrift::protocol::TType _etype829; - xfer += iprot->readListBegin(_etype829, _size826); - this->success.resize(_size826); - uint32_t _i830; - for (_i830 = 0; _i830 < _size826; ++_i830) + uint32_t _size832; + ::apache::thrift::protocol::TType _etype835; + xfer += iprot->readListBegin(_etype835, _size832); + this->success.resize(_size832); + uint32_t _i836; + for (_i836 = 0; _i836 < _size832; ++_i836) { - xfer += iprot->readString(this->success[_i830]); + xfer += iprot->readString(this->success[_i836]); } xfer += iprot->readListEnd(); } @@ -22991,10 +23233,10 @@ uint32_t ThriftHiveMetastore_set_ugi_result::write(::apache::thrift::protocol::T xfer += oprot->writeFieldBegin("success", ::apache::thrift::protocol::T_LIST, 0); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->success.size())); - std::vector ::const_iterator _iter831; - for (_iter831 = this->success.begin(); _iter831 != this->success.end(); ++_iter831) + std::vector ::const_iterator _iter837; + for (_iter837 = this->success.begin(); _iter837 != this->success.end(); ++_iter837) { - xfer += oprot->writeString((*_iter831)); + xfer += oprot->writeString((*_iter837)); } xfer += oprot->writeListEnd(); } @@ -23033,14 +23275,14 @@ uint32_t ThriftHiveMetastore_set_ugi_presult::read(::apache::thrift::protocol::T if (ftype == ::apache::thrift::protocol::T_LIST) { { (*(this->success)).clear(); - uint32_t _size832; - ::apache::thrift::protocol::TType _etype835; - xfer += iprot->readListBegin(_etype835, _size832); - (*(this->success)).resize(_size832); - uint32_t _i836; - for (_i836 = 0; _i836 < _size832; ++_i836) + uint32_t _size838; + ::apache::thrift::protocol::TType _etype841; + xfer += iprot->readListBegin(_etype841, _size838); + (*(this->success)).resize(_size838); + uint32_t _i842; + for (_i842 = 0; _i842 < _size838; ++_i842) { - xfer += iprot->readString((*(this->success))[_i836]); + xfer += iprot->readString((*(this->success))[_i842]); } xfer += iprot->readListEnd(); } @@ -30673,6 +30915,76 @@ void ThriftHiveMetastoreClient::recv_get_aggr_stats_for(AggrStats& _return) throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "get_aggr_stats_for failed: unknown result"); } +bool ThriftHiveMetastoreClient::set_aggr_stats_for(const SetPartitionsStatsRequest& request) +{ + send_set_aggr_stats_for(request); + return recv_set_aggr_stats_for(); +} + +void ThriftHiveMetastoreClient::send_set_aggr_stats_for(const SetPartitionsStatsRequest& request) +{ + int32_t cseqid = 0; + oprot_->writeMessageBegin("set_aggr_stats_for", ::apache::thrift::protocol::T_CALL, cseqid); + + ThriftHiveMetastore_set_aggr_stats_for_pargs args; + args.request = &request; + args.write(oprot_); + + oprot_->writeMessageEnd(); + oprot_->getTransport()->writeEnd(); + oprot_->getTransport()->flush(); +} + +bool ThriftHiveMetastoreClient::recv_set_aggr_stats_for() +{ + + int32_t rseqid = 0; + std::string fname; + ::apache::thrift::protocol::TMessageType mtype; + + iprot_->readMessageBegin(fname, mtype, rseqid); + if (mtype == ::apache::thrift::protocol::T_EXCEPTION) { + ::apache::thrift::TApplicationException x; + x.read(iprot_); + iprot_->readMessageEnd(); + iprot_->getTransport()->readEnd(); + throw x; + } + if (mtype != ::apache::thrift::protocol::T_REPLY) { + iprot_->skip(::apache::thrift::protocol::T_STRUCT); + iprot_->readMessageEnd(); + iprot_->getTransport()->readEnd(); + } + if (fname.compare("set_aggr_stats_for") != 0) { + iprot_->skip(::apache::thrift::protocol::T_STRUCT); + iprot_->readMessageEnd(); + iprot_->getTransport()->readEnd(); + } + bool _return; + ThriftHiveMetastore_set_aggr_stats_for_presult result; + result.success = &_return; + result.read(iprot_); + iprot_->readMessageEnd(); + iprot_->getTransport()->readEnd(); + + if (result.__isset.success) { + return _return; + } + if (result.__isset.o1) { + throw result.o1; + } + if (result.__isset.o2) { + throw result.o2; + } + if (result.__isset.o3) { + throw result.o3; + } + if (result.__isset.o4) { + throw result.o4; + } + throw ::apache::thrift::TApplicationException(::apache::thrift::TApplicationException::MISSING_RESULT, "set_aggr_stats_for failed: unknown result"); +} + bool ThriftHiveMetastoreClient::delete_partition_column_statistics(const std::string& db_name, const std::string& tbl_name, const std::string& part_name, const std::string& col_name) { send_delete_partition_column_statistics(db_name, tbl_name, part_name, col_name); @@ -37512,6 +37824,72 @@ void ThriftHiveMetastoreProcessor::process_get_aggr_stats_for(int32_t seqid, ::a } } +void ThriftHiveMetastoreProcessor::process_set_aggr_stats_for(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext) +{ + void* ctx = NULL; + if (this->eventHandler_.get() != NULL) { + ctx = this->eventHandler_->getContext("ThriftHiveMetastore.set_aggr_stats_for", callContext); + } + ::apache::thrift::TProcessorContextFreer freer(this->eventHandler_.get(), ctx, "ThriftHiveMetastore.set_aggr_stats_for"); + + if (this->eventHandler_.get() != NULL) { + this->eventHandler_->preRead(ctx, "ThriftHiveMetastore.set_aggr_stats_for"); + } + + ThriftHiveMetastore_set_aggr_stats_for_args args; + args.read(iprot); + iprot->readMessageEnd(); + uint32_t bytes = iprot->getTransport()->readEnd(); + + if (this->eventHandler_.get() != NULL) { + this->eventHandler_->postRead(ctx, "ThriftHiveMetastore.set_aggr_stats_for", bytes); + } + + ThriftHiveMetastore_set_aggr_stats_for_result result; + try { + result.success = iface_->set_aggr_stats_for(args.request); + result.__isset.success = true; + } catch (NoSuchObjectException &o1) { + result.o1 = o1; + result.__isset.o1 = true; + } catch (InvalidObjectException &o2) { + result.o2 = o2; + result.__isset.o2 = true; + } catch (MetaException &o3) { + result.o3 = o3; + result.__isset.o3 = true; + } catch (InvalidInputException &o4) { + result.o4 = o4; + result.__isset.o4 = true; + } catch (const std::exception& e) { + if (this->eventHandler_.get() != NULL) { + this->eventHandler_->handlerError(ctx, "ThriftHiveMetastore.set_aggr_stats_for"); + } + + ::apache::thrift::TApplicationException x(e.what()); + oprot->writeMessageBegin("set_aggr_stats_for", ::apache::thrift::protocol::T_EXCEPTION, seqid); + x.write(oprot); + oprot->writeMessageEnd(); + oprot->getTransport()->writeEnd(); + oprot->getTransport()->flush(); + return; + } + + if (this->eventHandler_.get() != NULL) { + this->eventHandler_->preWrite(ctx, "ThriftHiveMetastore.set_aggr_stats_for"); + } + + oprot->writeMessageBegin("set_aggr_stats_for", ::apache::thrift::protocol::T_REPLY, seqid); + result.write(oprot); + oprot->writeMessageEnd(); + bytes = oprot->getTransport()->writeEnd(); + oprot->getTransport()->flush(); + + if (this->eventHandler_.get() != NULL) { + this->eventHandler_->postWrite(ctx, "ThriftHiveMetastore.set_aggr_stats_for", bytes); + } +} + void ThriftHiveMetastoreProcessor::process_delete_partition_column_statistics(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext) { void* ctx = NULL; diff --git a/metastore/src/gen/thrift/gen-cpp/ThriftHiveMetastore.h b/metastore/src/gen/thrift/gen-cpp/ThriftHiveMetastore.h index 0e328dd..ef1352b 100644 --- a/metastore/src/gen/thrift/gen-cpp/ThriftHiveMetastore.h +++ b/metastore/src/gen/thrift/gen-cpp/ThriftHiveMetastore.h @@ -90,6 +90,7 @@ class ThriftHiveMetastoreIf : virtual public ::facebook::fb303::FacebookService virtual void get_table_statistics_req(TableStatsResult& _return, const TableStatsRequest& request) = 0; virtual void get_partitions_statistics_req(PartitionsStatsResult& _return, const PartitionsStatsRequest& request) = 0; virtual void get_aggr_stats_for(AggrStats& _return, const PartitionsStatsRequest& request) = 0; + virtual bool set_aggr_stats_for(const SetPartitionsStatsRequest& request) = 0; virtual bool delete_partition_column_statistics(const std::string& db_name, const std::string& tbl_name, const std::string& part_name, const std::string& col_name) = 0; virtual bool delete_table_column_statistics(const std::string& db_name, const std::string& tbl_name, const std::string& col_name) = 0; virtual void create_function(const Function& func) = 0; @@ -391,6 +392,10 @@ class ThriftHiveMetastoreNull : virtual public ThriftHiveMetastoreIf , virtual p void get_aggr_stats_for(AggrStats& /* _return */, const PartitionsStatsRequest& /* request */) { return; } + bool set_aggr_stats_for(const SetPartitionsStatsRequest& /* request */) { + bool _return = false; + return _return; + } bool delete_partition_column_statistics(const std::string& /* db_name */, const std::string& /* tbl_name */, const std::string& /* part_name */, const std::string& /* col_name */) { bool _return = false; return _return; @@ -11135,6 +11140,154 @@ class ThriftHiveMetastore_get_aggr_stats_for_presult { }; +typedef struct _ThriftHiveMetastore_set_aggr_stats_for_args__isset { + _ThriftHiveMetastore_set_aggr_stats_for_args__isset() : request(false) {} + bool request; +} _ThriftHiveMetastore_set_aggr_stats_for_args__isset; + +class ThriftHiveMetastore_set_aggr_stats_for_args { + public: + + ThriftHiveMetastore_set_aggr_stats_for_args() { + } + + virtual ~ThriftHiveMetastore_set_aggr_stats_for_args() throw() {} + + SetPartitionsStatsRequest request; + + _ThriftHiveMetastore_set_aggr_stats_for_args__isset __isset; + + void __set_request(const SetPartitionsStatsRequest& val) { + request = val; + } + + bool operator == (const ThriftHiveMetastore_set_aggr_stats_for_args & rhs) const + { + if (!(request == rhs.request)) + return false; + return true; + } + bool operator != (const ThriftHiveMetastore_set_aggr_stats_for_args &rhs) const { + return !(*this == rhs); + } + + bool operator < (const ThriftHiveMetastore_set_aggr_stats_for_args & ) const; + + uint32_t read(::apache::thrift::protocol::TProtocol* iprot); + uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const; + +}; + + +class ThriftHiveMetastore_set_aggr_stats_for_pargs { + public: + + + virtual ~ThriftHiveMetastore_set_aggr_stats_for_pargs() throw() {} + + const SetPartitionsStatsRequest* request; + + uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const; + +}; + +typedef struct _ThriftHiveMetastore_set_aggr_stats_for_result__isset { + _ThriftHiveMetastore_set_aggr_stats_for_result__isset() : success(false), o1(false), o2(false), o3(false), o4(false) {} + bool success; + bool o1; + bool o2; + bool o3; + bool o4; +} _ThriftHiveMetastore_set_aggr_stats_for_result__isset; + +class ThriftHiveMetastore_set_aggr_stats_for_result { + public: + + ThriftHiveMetastore_set_aggr_stats_for_result() : success(0) { + } + + virtual ~ThriftHiveMetastore_set_aggr_stats_for_result() throw() {} + + bool success; + NoSuchObjectException o1; + InvalidObjectException o2; + MetaException o3; + InvalidInputException o4; + + _ThriftHiveMetastore_set_aggr_stats_for_result__isset __isset; + + void __set_success(const bool val) { + success = val; + } + + void __set_o1(const NoSuchObjectException& val) { + o1 = val; + } + + void __set_o2(const InvalidObjectException& val) { + o2 = val; + } + + void __set_o3(const MetaException& val) { + o3 = val; + } + + void __set_o4(const InvalidInputException& val) { + o4 = val; + } + + bool operator == (const ThriftHiveMetastore_set_aggr_stats_for_result & rhs) const + { + if (!(success == rhs.success)) + return false; + if (!(o1 == rhs.o1)) + return false; + if (!(o2 == rhs.o2)) + return false; + if (!(o3 == rhs.o3)) + return false; + if (!(o4 == rhs.o4)) + return false; + return true; + } + bool operator != (const ThriftHiveMetastore_set_aggr_stats_for_result &rhs) const { + return !(*this == rhs); + } + + bool operator < (const ThriftHiveMetastore_set_aggr_stats_for_result & ) const; + + uint32_t read(::apache::thrift::protocol::TProtocol* iprot); + uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const; + +}; + +typedef struct _ThriftHiveMetastore_set_aggr_stats_for_presult__isset { + _ThriftHiveMetastore_set_aggr_stats_for_presult__isset() : success(false), o1(false), o2(false), o3(false), o4(false) {} + bool success; + bool o1; + bool o2; + bool o3; + bool o4; +} _ThriftHiveMetastore_set_aggr_stats_for_presult__isset; + +class ThriftHiveMetastore_set_aggr_stats_for_presult { + public: + + + virtual ~ThriftHiveMetastore_set_aggr_stats_for_presult() throw() {} + + bool* success; + NoSuchObjectException o1; + InvalidObjectException o2; + MetaException o3; + InvalidInputException o4; + + _ThriftHiveMetastore_set_aggr_stats_for_presult__isset __isset; + + uint32_t read(::apache::thrift::protocol::TProtocol* iprot); + +}; + typedef struct _ThriftHiveMetastore_delete_partition_column_statistics_args__isset { _ThriftHiveMetastore_delete_partition_column_statistics_args__isset() : db_name(false), tbl_name(false), part_name(false), col_name(false) {} bool db_name; @@ -16047,6 +16200,9 @@ class ThriftHiveMetastoreClient : virtual public ThriftHiveMetastoreIf, public void get_aggr_stats_for(AggrStats& _return, const PartitionsStatsRequest& request); void send_get_aggr_stats_for(const PartitionsStatsRequest& request); void recv_get_aggr_stats_for(AggrStats& _return); + bool set_aggr_stats_for(const SetPartitionsStatsRequest& request); + void send_set_aggr_stats_for(const SetPartitionsStatsRequest& request); + bool recv_set_aggr_stats_for(); bool delete_partition_column_statistics(const std::string& db_name, const std::string& tbl_name, const std::string& part_name, const std::string& col_name); void send_delete_partition_column_statistics(const std::string& db_name, const std::string& tbl_name, const std::string& part_name, const std::string& col_name); bool recv_delete_partition_column_statistics(); @@ -16245,6 +16401,7 @@ class ThriftHiveMetastoreProcessor : public ::facebook::fb303::FacebookServiceP void process_get_table_statistics_req(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext); void process_get_partitions_statistics_req(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext); void process_get_aggr_stats_for(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext); + void process_set_aggr_stats_for(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext); void process_delete_partition_column_statistics(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext); void process_delete_table_column_statistics(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext); void process_create_function(int32_t seqid, ::apache::thrift::protocol::TProtocol* iprot, ::apache::thrift::protocol::TProtocol* oprot, void* callContext); @@ -16361,6 +16518,7 @@ class ThriftHiveMetastoreProcessor : public ::facebook::fb303::FacebookServiceP processMap_["get_table_statistics_req"] = &ThriftHiveMetastoreProcessor::process_get_table_statistics_req; processMap_["get_partitions_statistics_req"] = &ThriftHiveMetastoreProcessor::process_get_partitions_statistics_req; processMap_["get_aggr_stats_for"] = &ThriftHiveMetastoreProcessor::process_get_aggr_stats_for; + processMap_["set_aggr_stats_for"] = &ThriftHiveMetastoreProcessor::process_set_aggr_stats_for; processMap_["delete_partition_column_statistics"] = &ThriftHiveMetastoreProcessor::process_delete_partition_column_statistics; processMap_["delete_table_column_statistics"] = &ThriftHiveMetastoreProcessor::process_delete_table_column_statistics; processMap_["create_function"] = &ThriftHiveMetastoreProcessor::process_create_function; @@ -17144,6 +17302,15 @@ class ThriftHiveMetastoreMultiface : virtual public ThriftHiveMetastoreIf, publi return; } + bool set_aggr_stats_for(const SetPartitionsStatsRequest& request) { + size_t sz = ifaces_.size(); + size_t i = 0; + for (; i < (sz - 1); ++i) { + ifaces_[i]->set_aggr_stats_for(request); + } + return ifaces_[i]->set_aggr_stats_for(request); + } + bool delete_partition_column_statistics(const std::string& db_name, const std::string& tbl_name, const std::string& part_name, const std::string& col_name) { size_t sz = ifaces_.size(); size_t i = 0; diff --git a/metastore/src/gen/thrift/gen-cpp/ThriftHiveMetastore_server.skeleton.cpp b/metastore/src/gen/thrift/gen-cpp/ThriftHiveMetastore_server.skeleton.cpp index 4bcb2e6..021f5c6 100644 --- a/metastore/src/gen/thrift/gen-cpp/ThriftHiveMetastore_server.skeleton.cpp +++ b/metastore/src/gen/thrift/gen-cpp/ThriftHiveMetastore_server.skeleton.cpp @@ -392,6 +392,11 @@ class ThriftHiveMetastoreHandler : virtual public ThriftHiveMetastoreIf { printf("get_aggr_stats_for\n"); } + bool set_aggr_stats_for(const SetPartitionsStatsRequest& request) { + // Your implementation goes here + printf("set_aggr_stats_for\n"); + } + bool delete_partition_column_statistics(const std::string& db_name, const std::string& tbl_name, const std::string& part_name, const std::string& col_name) { // Your implementation goes here printf("delete_partition_column_statistics\n"); diff --git a/metastore/src/gen/thrift/gen-cpp/hive_metastore_types.cpp b/metastore/src/gen/thrift/gen-cpp/hive_metastore_types.cpp index 4768128..456400d 100644 --- a/metastore/src/gen/thrift/gen-cpp/hive_metastore_types.cpp +++ b/metastore/src/gen/thrift/gen-cpp/hive_metastore_types.cpp @@ -4768,10 +4768,10 @@ void swap(AggrStats &a, AggrStats &b) { swap(a.partsFound, b.partsFound); } -const char* Schema::ascii_fingerprint = "5CFEE46C975F4E2368D905109B8E3B5B"; -const uint8_t Schema::binary_fingerprint[16] = {0x5C,0xFE,0xE4,0x6C,0x97,0x5F,0x4E,0x23,0x68,0xD9,0x05,0x10,0x9B,0x8E,0x3B,0x5B}; +const char* SetPartitionsStatsRequest::ascii_fingerprint = "635C0DA9A947DA57AAE693A5DFB86569"; +const uint8_t SetPartitionsStatsRequest::binary_fingerprint[16] = {0x63,0x5C,0x0D,0xA9,0xA9,0x47,0xDA,0x57,0xAA,0xE6,0x93,0xA5,0xDF,0xB8,0x65,0x69}; -uint32_t Schema::read(::apache::thrift::protocol::TProtocol* iprot) { +uint32_t SetPartitionsStatsRequest::read(::apache::thrift::protocol::TProtocol* iprot) { uint32_t xfer = 0; std::string fname; @@ -4782,6 +4782,7 @@ uint32_t Schema::read(::apache::thrift::protocol::TProtocol* iprot) { using ::apache::thrift::protocol::TProtocolException; + bool isset_colStats = false; while (true) { @@ -4794,15 +4795,98 @@ uint32_t Schema::read(::apache::thrift::protocol::TProtocol* iprot) { case 1: if (ftype == ::apache::thrift::protocol::T_LIST) { { - this->fieldSchemas.clear(); + this->colStats.clear(); uint32_t _size205; ::apache::thrift::protocol::TType _etype208; xfer += iprot->readListBegin(_etype208, _size205); - this->fieldSchemas.resize(_size205); + this->colStats.resize(_size205); uint32_t _i209; for (_i209 = 0; _i209 < _size205; ++_i209) { - xfer += this->fieldSchemas[_i209].read(iprot); + xfer += this->colStats[_i209].read(iprot); + } + xfer += iprot->readListEnd(); + } + isset_colStats = true; + } else { + xfer += iprot->skip(ftype); + } + break; + default: + xfer += iprot->skip(ftype); + break; + } + xfer += iprot->readFieldEnd(); + } + + xfer += iprot->readStructEnd(); + + if (!isset_colStats) + throw TProtocolException(TProtocolException::INVALID_DATA); + return xfer; +} + +uint32_t SetPartitionsStatsRequest::write(::apache::thrift::protocol::TProtocol* oprot) const { + uint32_t xfer = 0; + xfer += oprot->writeStructBegin("SetPartitionsStatsRequest"); + + xfer += oprot->writeFieldBegin("colStats", ::apache::thrift::protocol::T_LIST, 1); + { + xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->colStats.size())); + std::vector ::const_iterator _iter210; + for (_iter210 = this->colStats.begin(); _iter210 != this->colStats.end(); ++_iter210) + { + xfer += (*_iter210).write(oprot); + } + xfer += oprot->writeListEnd(); + } + xfer += oprot->writeFieldEnd(); + + xfer += oprot->writeFieldStop(); + xfer += oprot->writeStructEnd(); + return xfer; +} + +void swap(SetPartitionsStatsRequest &a, SetPartitionsStatsRequest &b) { + using ::std::swap; + swap(a.colStats, b.colStats); +} + +const char* Schema::ascii_fingerprint = "5CFEE46C975F4E2368D905109B8E3B5B"; +const uint8_t Schema::binary_fingerprint[16] = {0x5C,0xFE,0xE4,0x6C,0x97,0x5F,0x4E,0x23,0x68,0xD9,0x05,0x10,0x9B,0x8E,0x3B,0x5B}; + +uint32_t Schema::read(::apache::thrift::protocol::TProtocol* iprot) { + + uint32_t xfer = 0; + std::string fname; + ::apache::thrift::protocol::TType ftype; + int16_t fid; + + xfer += iprot->readStructBegin(fname); + + using ::apache::thrift::protocol::TProtocolException; + + + while (true) + { + xfer += iprot->readFieldBegin(fname, ftype, fid); + if (ftype == ::apache::thrift::protocol::T_STOP) { + break; + } + switch (fid) + { + case 1: + if (ftype == ::apache::thrift::protocol::T_LIST) { + { + this->fieldSchemas.clear(); + uint32_t _size211; + ::apache::thrift::protocol::TType _etype214; + xfer += iprot->readListBegin(_etype214, _size211); + this->fieldSchemas.resize(_size211); + uint32_t _i215; + for (_i215 = 0; _i215 < _size211; ++_i215) + { + xfer += this->fieldSchemas[_i215].read(iprot); } xfer += iprot->readListEnd(); } @@ -4815,17 +4899,17 @@ uint32_t Schema::read(::apache::thrift::protocol::TProtocol* iprot) { if (ftype == ::apache::thrift::protocol::T_MAP) { { this->properties.clear(); - uint32_t _size210; - ::apache::thrift::protocol::TType _ktype211; - ::apache::thrift::protocol::TType _vtype212; - xfer += iprot->readMapBegin(_ktype211, _vtype212, _size210); - uint32_t _i214; - for (_i214 = 0; _i214 < _size210; ++_i214) + uint32_t _size216; + ::apache::thrift::protocol::TType _ktype217; + ::apache::thrift::protocol::TType _vtype218; + xfer += iprot->readMapBegin(_ktype217, _vtype218, _size216); + uint32_t _i220; + for (_i220 = 0; _i220 < _size216; ++_i220) { - std::string _key215; - xfer += iprot->readString(_key215); - std::string& _val216 = this->properties[_key215]; - xfer += iprot->readString(_val216); + std::string _key221; + xfer += iprot->readString(_key221); + std::string& _val222 = this->properties[_key221]; + xfer += iprot->readString(_val222); } xfer += iprot->readMapEnd(); } @@ -4853,10 +4937,10 @@ uint32_t Schema::write(::apache::thrift::protocol::TProtocol* oprot) const { xfer += oprot->writeFieldBegin("fieldSchemas", ::apache::thrift::protocol::T_LIST, 1); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->fieldSchemas.size())); - std::vector ::const_iterator _iter217; - for (_iter217 = this->fieldSchemas.begin(); _iter217 != this->fieldSchemas.end(); ++_iter217) + std::vector ::const_iterator _iter223; + for (_iter223 = this->fieldSchemas.begin(); _iter223 != this->fieldSchemas.end(); ++_iter223) { - xfer += (*_iter217).write(oprot); + xfer += (*_iter223).write(oprot); } xfer += oprot->writeListEnd(); } @@ -4865,11 +4949,11 @@ uint32_t Schema::write(::apache::thrift::protocol::TProtocol* oprot) const { xfer += oprot->writeFieldBegin("properties", ::apache::thrift::protocol::T_MAP, 2); { xfer += oprot->writeMapBegin(::apache::thrift::protocol::T_STRING, ::apache::thrift::protocol::T_STRING, static_cast(this->properties.size())); - std::map ::const_iterator _iter218; - for (_iter218 = this->properties.begin(); _iter218 != this->properties.end(); ++_iter218) + std::map ::const_iterator _iter224; + for (_iter224 = this->properties.begin(); _iter224 != this->properties.end(); ++_iter224) { - xfer += oprot->writeString(_iter218->first); - xfer += oprot->writeString(_iter218->second); + xfer += oprot->writeString(_iter224->first); + xfer += oprot->writeString(_iter224->second); } xfer += oprot->writeMapEnd(); } @@ -4914,17 +4998,17 @@ uint32_t EnvironmentContext::read(::apache::thrift::protocol::TProtocol* iprot) if (ftype == ::apache::thrift::protocol::T_MAP) { { this->properties.clear(); - uint32_t _size219; - ::apache::thrift::protocol::TType _ktype220; - ::apache::thrift::protocol::TType _vtype221; - xfer += iprot->readMapBegin(_ktype220, _vtype221, _size219); - uint32_t _i223; - for (_i223 = 0; _i223 < _size219; ++_i223) + uint32_t _size225; + ::apache::thrift::protocol::TType _ktype226; + ::apache::thrift::protocol::TType _vtype227; + xfer += iprot->readMapBegin(_ktype226, _vtype227, _size225); + uint32_t _i229; + for (_i229 = 0; _i229 < _size225; ++_i229) { - std::string _key224; - xfer += iprot->readString(_key224); - std::string& _val225 = this->properties[_key224]; - xfer += iprot->readString(_val225); + std::string _key230; + xfer += iprot->readString(_key230); + std::string& _val231 = this->properties[_key230]; + xfer += iprot->readString(_val231); } xfer += iprot->readMapEnd(); } @@ -4952,11 +5036,11 @@ uint32_t EnvironmentContext::write(::apache::thrift::protocol::TProtocol* oprot) xfer += oprot->writeFieldBegin("properties", ::apache::thrift::protocol::T_MAP, 1); { xfer += oprot->writeMapBegin(::apache::thrift::protocol::T_STRING, ::apache::thrift::protocol::T_STRING, static_cast(this->properties.size())); - std::map ::const_iterator _iter226; - for (_iter226 = this->properties.begin(); _iter226 != this->properties.end(); ++_iter226) + std::map ::const_iterator _iter232; + for (_iter232 = this->properties.begin(); _iter232 != this->properties.end(); ++_iter232) { - xfer += oprot->writeString(_iter226->first); - xfer += oprot->writeString(_iter226->second); + xfer += oprot->writeString(_iter232->first); + xfer += oprot->writeString(_iter232->second); } xfer += oprot->writeMapEnd(); } @@ -5002,14 +5086,14 @@ uint32_t PartitionsByExprResult::read(::apache::thrift::protocol::TProtocol* ipr if (ftype == ::apache::thrift::protocol::T_LIST) { { this->partitions.clear(); - uint32_t _size227; - ::apache::thrift::protocol::TType _etype230; - xfer += iprot->readListBegin(_etype230, _size227); - this->partitions.resize(_size227); - uint32_t _i231; - for (_i231 = 0; _i231 < _size227; ++_i231) + uint32_t _size233; + ::apache::thrift::protocol::TType _etype236; + xfer += iprot->readListBegin(_etype236, _size233); + this->partitions.resize(_size233); + uint32_t _i237; + for (_i237 = 0; _i237 < _size233; ++_i237) { - xfer += this->partitions[_i231].read(iprot); + xfer += this->partitions[_i237].read(iprot); } xfer += iprot->readListEnd(); } @@ -5049,10 +5133,10 @@ uint32_t PartitionsByExprResult::write(::apache::thrift::protocol::TProtocol* op xfer += oprot->writeFieldBegin("partitions", ::apache::thrift::protocol::T_LIST, 1); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->partitions.size())); - std::vector ::const_iterator _iter232; - for (_iter232 = this->partitions.begin(); _iter232 != this->partitions.end(); ++_iter232) + std::vector ::const_iterator _iter238; + for (_iter238 = this->partitions.begin(); _iter238 != this->partitions.end(); ++_iter238) { - xfer += (*_iter232).write(oprot); + xfer += (*_iter238).write(oprot); } xfer += oprot->writeListEnd(); } @@ -5226,14 +5310,14 @@ uint32_t TableStatsResult::read(::apache::thrift::protocol::TProtocol* iprot) { if (ftype == ::apache::thrift::protocol::T_LIST) { { this->tableStats.clear(); - uint32_t _size233; - ::apache::thrift::protocol::TType _etype236; - xfer += iprot->readListBegin(_etype236, _size233); - this->tableStats.resize(_size233); - uint32_t _i237; - for (_i237 = 0; _i237 < _size233; ++_i237) + uint32_t _size239; + ::apache::thrift::protocol::TType _etype242; + xfer += iprot->readListBegin(_etype242, _size239); + this->tableStats.resize(_size239); + uint32_t _i243; + for (_i243 = 0; _i243 < _size239; ++_i243) { - xfer += this->tableStats[_i237].read(iprot); + xfer += this->tableStats[_i243].read(iprot); } xfer += iprot->readListEnd(); } @@ -5263,10 +5347,10 @@ uint32_t TableStatsResult::write(::apache::thrift::protocol::TProtocol* oprot) c xfer += oprot->writeFieldBegin("tableStats", ::apache::thrift::protocol::T_LIST, 1); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->tableStats.size())); - std::vector ::const_iterator _iter238; - for (_iter238 = this->tableStats.begin(); _iter238 != this->tableStats.end(); ++_iter238) + std::vector ::const_iterator _iter244; + for (_iter244 = this->tableStats.begin(); _iter244 != this->tableStats.end(); ++_iter244) { - xfer += (*_iter238).write(oprot); + xfer += (*_iter244).write(oprot); } xfer += oprot->writeListEnd(); } @@ -5310,26 +5394,26 @@ uint32_t PartitionsStatsResult::read(::apache::thrift::protocol::TProtocol* ipro if (ftype == ::apache::thrift::protocol::T_MAP) { { this->partStats.clear(); - uint32_t _size239; - ::apache::thrift::protocol::TType _ktype240; - ::apache::thrift::protocol::TType _vtype241; - xfer += iprot->readMapBegin(_ktype240, _vtype241, _size239); - uint32_t _i243; - for (_i243 = 0; _i243 < _size239; ++_i243) + uint32_t _size245; + ::apache::thrift::protocol::TType _ktype246; + ::apache::thrift::protocol::TType _vtype247; + xfer += iprot->readMapBegin(_ktype246, _vtype247, _size245); + uint32_t _i249; + for (_i249 = 0; _i249 < _size245; ++_i249) { - std::string _key244; - xfer += iprot->readString(_key244); - std::vector & _val245 = this->partStats[_key244]; + std::string _key250; + xfer += iprot->readString(_key250); + std::vector & _val251 = this->partStats[_key250]; { - _val245.clear(); - uint32_t _size246; - ::apache::thrift::protocol::TType _etype249; - xfer += iprot->readListBegin(_etype249, _size246); - _val245.resize(_size246); - uint32_t _i250; - for (_i250 = 0; _i250 < _size246; ++_i250) + _val251.clear(); + uint32_t _size252; + ::apache::thrift::protocol::TType _etype255; + xfer += iprot->readListBegin(_etype255, _size252); + _val251.resize(_size252); + uint32_t _i256; + for (_i256 = 0; _i256 < _size252; ++_i256) { - xfer += _val245[_i250].read(iprot); + xfer += _val251[_i256].read(iprot); } xfer += iprot->readListEnd(); } @@ -5362,16 +5446,16 @@ uint32_t PartitionsStatsResult::write(::apache::thrift::protocol::TProtocol* opr xfer += oprot->writeFieldBegin("partStats", ::apache::thrift::protocol::T_MAP, 1); { xfer += oprot->writeMapBegin(::apache::thrift::protocol::T_STRING, ::apache::thrift::protocol::T_LIST, static_cast(this->partStats.size())); - std::map > ::const_iterator _iter251; - for (_iter251 = this->partStats.begin(); _iter251 != this->partStats.end(); ++_iter251) + std::map > ::const_iterator _iter257; + for (_iter257 = this->partStats.begin(); _iter257 != this->partStats.end(); ++_iter257) { - xfer += oprot->writeString(_iter251->first); + xfer += oprot->writeString(_iter257->first); { - xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(_iter251->second.size())); - std::vector ::const_iterator _iter252; - for (_iter252 = _iter251->second.begin(); _iter252 != _iter251->second.end(); ++_iter252) + xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(_iter257->second.size())); + std::vector ::const_iterator _iter258; + for (_iter258 = _iter257->second.begin(); _iter258 != _iter257->second.end(); ++_iter258) { - xfer += (*_iter252).write(oprot); + xfer += (*_iter258).write(oprot); } xfer += oprot->writeListEnd(); } @@ -5436,14 +5520,14 @@ uint32_t TableStatsRequest::read(::apache::thrift::protocol::TProtocol* iprot) { if (ftype == ::apache::thrift::protocol::T_LIST) { { this->colNames.clear(); - uint32_t _size253; - ::apache::thrift::protocol::TType _etype256; - xfer += iprot->readListBegin(_etype256, _size253); - this->colNames.resize(_size253); - uint32_t _i257; - for (_i257 = 0; _i257 < _size253; ++_i257) + uint32_t _size259; + ::apache::thrift::protocol::TType _etype262; + xfer += iprot->readListBegin(_etype262, _size259); + this->colNames.resize(_size259); + uint32_t _i263; + for (_i263 = 0; _i263 < _size259; ++_i263) { - xfer += iprot->readString(this->colNames[_i257]); + xfer += iprot->readString(this->colNames[_i263]); } xfer += iprot->readListEnd(); } @@ -5485,10 +5569,10 @@ uint32_t TableStatsRequest::write(::apache::thrift::protocol::TProtocol* oprot) xfer += oprot->writeFieldBegin("colNames", ::apache::thrift::protocol::T_LIST, 3); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->colNames.size())); - std::vector ::const_iterator _iter258; - for (_iter258 = this->colNames.begin(); _iter258 != this->colNames.end(); ++_iter258) + std::vector ::const_iterator _iter264; + for (_iter264 = this->colNames.begin(); _iter264 != this->colNames.end(); ++_iter264) { - xfer += oprot->writeString((*_iter258)); + xfer += oprot->writeString((*_iter264)); } xfer += oprot->writeListEnd(); } @@ -5553,14 +5637,14 @@ uint32_t PartitionsStatsRequest::read(::apache::thrift::protocol::TProtocol* ipr if (ftype == ::apache::thrift::protocol::T_LIST) { { this->colNames.clear(); - uint32_t _size259; - ::apache::thrift::protocol::TType _etype262; - xfer += iprot->readListBegin(_etype262, _size259); - this->colNames.resize(_size259); - uint32_t _i263; - for (_i263 = 0; _i263 < _size259; ++_i263) + uint32_t _size265; + ::apache::thrift::protocol::TType _etype268; + xfer += iprot->readListBegin(_etype268, _size265); + this->colNames.resize(_size265); + uint32_t _i269; + for (_i269 = 0; _i269 < _size265; ++_i269) { - xfer += iprot->readString(this->colNames[_i263]); + xfer += iprot->readString(this->colNames[_i269]); } xfer += iprot->readListEnd(); } @@ -5573,14 +5657,14 @@ uint32_t PartitionsStatsRequest::read(::apache::thrift::protocol::TProtocol* ipr if (ftype == ::apache::thrift::protocol::T_LIST) { { this->partNames.clear(); - uint32_t _size264; - ::apache::thrift::protocol::TType _etype267; - xfer += iprot->readListBegin(_etype267, _size264); - this->partNames.resize(_size264); - uint32_t _i268; - for (_i268 = 0; _i268 < _size264; ++_i268) + uint32_t _size270; + ::apache::thrift::protocol::TType _etype273; + xfer += iprot->readListBegin(_etype273, _size270); + this->partNames.resize(_size270); + uint32_t _i274; + for (_i274 = 0; _i274 < _size270; ++_i274) { - xfer += iprot->readString(this->partNames[_i268]); + xfer += iprot->readString(this->partNames[_i274]); } xfer += iprot->readListEnd(); } @@ -5624,10 +5708,10 @@ uint32_t PartitionsStatsRequest::write(::apache::thrift::protocol::TProtocol* op xfer += oprot->writeFieldBegin("colNames", ::apache::thrift::protocol::T_LIST, 3); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->colNames.size())); - std::vector ::const_iterator _iter269; - for (_iter269 = this->colNames.begin(); _iter269 != this->colNames.end(); ++_iter269) + std::vector ::const_iterator _iter275; + for (_iter275 = this->colNames.begin(); _iter275 != this->colNames.end(); ++_iter275) { - xfer += oprot->writeString((*_iter269)); + xfer += oprot->writeString((*_iter275)); } xfer += oprot->writeListEnd(); } @@ -5636,10 +5720,10 @@ uint32_t PartitionsStatsRequest::write(::apache::thrift::protocol::TProtocol* op xfer += oprot->writeFieldBegin("partNames", ::apache::thrift::protocol::T_LIST, 4); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->partNames.size())); - std::vector ::const_iterator _iter270; - for (_iter270 = this->partNames.begin(); _iter270 != this->partNames.end(); ++_iter270) + std::vector ::const_iterator _iter276; + for (_iter276 = this->partNames.begin(); _iter276 != this->partNames.end(); ++_iter276) { - xfer += oprot->writeString((*_iter270)); + xfer += oprot->writeString((*_iter276)); } xfer += oprot->writeListEnd(); } @@ -5685,14 +5769,14 @@ uint32_t AddPartitionsResult::read(::apache::thrift::protocol::TProtocol* iprot) if (ftype == ::apache::thrift::protocol::T_LIST) { { this->partitions.clear(); - uint32_t _size271; - ::apache::thrift::protocol::TType _etype274; - xfer += iprot->readListBegin(_etype274, _size271); - this->partitions.resize(_size271); - uint32_t _i275; - for (_i275 = 0; _i275 < _size271; ++_i275) + uint32_t _size277; + ::apache::thrift::protocol::TType _etype280; + xfer += iprot->readListBegin(_etype280, _size277); + this->partitions.resize(_size277); + uint32_t _i281; + for (_i281 = 0; _i281 < _size277; ++_i281) { - xfer += this->partitions[_i275].read(iprot); + xfer += this->partitions[_i281].read(iprot); } xfer += iprot->readListEnd(); } @@ -5721,10 +5805,10 @@ uint32_t AddPartitionsResult::write(::apache::thrift::protocol::TProtocol* oprot xfer += oprot->writeFieldBegin("partitions", ::apache::thrift::protocol::T_LIST, 1); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->partitions.size())); - std::vector ::const_iterator _iter276; - for (_iter276 = this->partitions.begin(); _iter276 != this->partitions.end(); ++_iter276) + std::vector ::const_iterator _iter282; + for (_iter282 = this->partitions.begin(); _iter282 != this->partitions.end(); ++_iter282) { - xfer += (*_iter276).write(oprot); + xfer += (*_iter282).write(oprot); } xfer += oprot->writeListEnd(); } @@ -5788,14 +5872,14 @@ uint32_t AddPartitionsRequest::read(::apache::thrift::protocol::TProtocol* iprot if (ftype == ::apache::thrift::protocol::T_LIST) { { this->parts.clear(); - uint32_t _size277; - ::apache::thrift::protocol::TType _etype280; - xfer += iprot->readListBegin(_etype280, _size277); - this->parts.resize(_size277); - uint32_t _i281; - for (_i281 = 0; _i281 < _size277; ++_i281) + uint32_t _size283; + ::apache::thrift::protocol::TType _etype286; + xfer += iprot->readListBegin(_etype286, _size283); + this->parts.resize(_size283); + uint32_t _i287; + for (_i287 = 0; _i287 < _size283; ++_i287) { - xfer += this->parts[_i281].read(iprot); + xfer += this->parts[_i287].read(iprot); } xfer += iprot->readListEnd(); } @@ -5855,10 +5939,10 @@ uint32_t AddPartitionsRequest::write(::apache::thrift::protocol::TProtocol* opro xfer += oprot->writeFieldBegin("parts", ::apache::thrift::protocol::T_LIST, 3); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->parts.size())); - std::vector ::const_iterator _iter282; - for (_iter282 = this->parts.begin(); _iter282 != this->parts.end(); ++_iter282) + std::vector ::const_iterator _iter288; + for (_iter288 = this->parts.begin(); _iter288 != this->parts.end(); ++_iter288) { - xfer += (*_iter282).write(oprot); + xfer += (*_iter288).write(oprot); } xfer += oprot->writeListEnd(); } @@ -5915,14 +5999,14 @@ uint32_t DropPartitionsResult::read(::apache::thrift::protocol::TProtocol* iprot if (ftype == ::apache::thrift::protocol::T_LIST) { { this->partitions.clear(); - uint32_t _size283; - ::apache::thrift::protocol::TType _etype286; - xfer += iprot->readListBegin(_etype286, _size283); - this->partitions.resize(_size283); - uint32_t _i287; - for (_i287 = 0; _i287 < _size283; ++_i287) + uint32_t _size289; + ::apache::thrift::protocol::TType _etype292; + xfer += iprot->readListBegin(_etype292, _size289); + this->partitions.resize(_size289); + uint32_t _i293; + for (_i293 = 0; _i293 < _size289; ++_i293) { - xfer += this->partitions[_i287].read(iprot); + xfer += this->partitions[_i293].read(iprot); } xfer += iprot->readListEnd(); } @@ -5951,10 +6035,10 @@ uint32_t DropPartitionsResult::write(::apache::thrift::protocol::TProtocol* opro xfer += oprot->writeFieldBegin("partitions", ::apache::thrift::protocol::T_LIST, 1); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->partitions.size())); - std::vector ::const_iterator _iter288; - for (_iter288 = this->partitions.begin(); _iter288 != this->partitions.end(); ++_iter288) + std::vector ::const_iterator _iter294; + for (_iter294 = this->partitions.begin(); _iter294 != this->partitions.end(); ++_iter294) { - xfer += (*_iter288).write(oprot); + xfer += (*_iter294).write(oprot); } xfer += oprot->writeListEnd(); } @@ -6077,14 +6161,14 @@ uint32_t RequestPartsSpec::read(::apache::thrift::protocol::TProtocol* iprot) { if (ftype == ::apache::thrift::protocol::T_LIST) { { this->names.clear(); - uint32_t _size289; - ::apache::thrift::protocol::TType _etype292; - xfer += iprot->readListBegin(_etype292, _size289); - this->names.resize(_size289); - uint32_t _i293; - for (_i293 = 0; _i293 < _size289; ++_i293) + uint32_t _size295; + ::apache::thrift::protocol::TType _etype298; + xfer += iprot->readListBegin(_etype298, _size295); + this->names.resize(_size295); + uint32_t _i299; + for (_i299 = 0; _i299 < _size295; ++_i299) { - xfer += iprot->readString(this->names[_i293]); + xfer += iprot->readString(this->names[_i299]); } xfer += iprot->readListEnd(); } @@ -6097,14 +6181,14 @@ uint32_t RequestPartsSpec::read(::apache::thrift::protocol::TProtocol* iprot) { if (ftype == ::apache::thrift::protocol::T_LIST) { { this->exprs.clear(); - uint32_t _size294; - ::apache::thrift::protocol::TType _etype297; - xfer += iprot->readListBegin(_etype297, _size294); - this->exprs.resize(_size294); - uint32_t _i298; - for (_i298 = 0; _i298 < _size294; ++_i298) + uint32_t _size300; + ::apache::thrift::protocol::TType _etype303; + xfer += iprot->readListBegin(_etype303, _size300); + this->exprs.resize(_size300); + uint32_t _i304; + for (_i304 = 0; _i304 < _size300; ++_i304) { - xfer += this->exprs[_i298].read(iprot); + xfer += this->exprs[_i304].read(iprot); } xfer += iprot->readListEnd(); } @@ -6132,10 +6216,10 @@ uint32_t RequestPartsSpec::write(::apache::thrift::protocol::TProtocol* oprot) c xfer += oprot->writeFieldBegin("names", ::apache::thrift::protocol::T_LIST, 1); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRING, static_cast(this->names.size())); - std::vector ::const_iterator _iter299; - for (_iter299 = this->names.begin(); _iter299 != this->names.end(); ++_iter299) + std::vector ::const_iterator _iter305; + for (_iter305 = this->names.begin(); _iter305 != this->names.end(); ++_iter305) { - xfer += oprot->writeString((*_iter299)); + xfer += oprot->writeString((*_iter305)); } xfer += oprot->writeListEnd(); } @@ -6144,10 +6228,10 @@ uint32_t RequestPartsSpec::write(::apache::thrift::protocol::TProtocol* oprot) c xfer += oprot->writeFieldBegin("exprs", ::apache::thrift::protocol::T_LIST, 2); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->exprs.size())); - std::vector ::const_iterator _iter300; - for (_iter300 = this->exprs.begin(); _iter300 != this->exprs.end(); ++_iter300) + std::vector ::const_iterator _iter306; + for (_iter306 = this->exprs.begin(); _iter306 != this->exprs.end(); ++_iter306) { - xfer += (*_iter300).write(oprot); + xfer += (*_iter306).write(oprot); } xfer += oprot->writeListEnd(); } @@ -6357,9 +6441,9 @@ uint32_t ResourceUri::read(::apache::thrift::protocol::TProtocol* iprot) { { case 1: if (ftype == ::apache::thrift::protocol::T_I32) { - int32_t ecast301; - xfer += iprot->readI32(ecast301); - this->resourceType = (ResourceType::type)ecast301; + int32_t ecast307; + xfer += iprot->readI32(ecast307); + this->resourceType = (ResourceType::type)ecast307; this->__isset.resourceType = true; } else { xfer += iprot->skip(ftype); @@ -6466,9 +6550,9 @@ uint32_t Function::read(::apache::thrift::protocol::TProtocol* iprot) { break; case 5: if (ftype == ::apache::thrift::protocol::T_I32) { - int32_t ecast302; - xfer += iprot->readI32(ecast302); - this->ownerType = (PrincipalType::type)ecast302; + int32_t ecast308; + xfer += iprot->readI32(ecast308); + this->ownerType = (PrincipalType::type)ecast308; this->__isset.ownerType = true; } else { xfer += iprot->skip(ftype); @@ -6484,9 +6568,9 @@ uint32_t Function::read(::apache::thrift::protocol::TProtocol* iprot) { break; case 7: if (ftype == ::apache::thrift::protocol::T_I32) { - int32_t ecast303; - xfer += iprot->readI32(ecast303); - this->functionType = (FunctionType::type)ecast303; + int32_t ecast309; + xfer += iprot->readI32(ecast309); + this->functionType = (FunctionType::type)ecast309; this->__isset.functionType = true; } else { xfer += iprot->skip(ftype); @@ -6496,14 +6580,14 @@ uint32_t Function::read(::apache::thrift::protocol::TProtocol* iprot) { if (ftype == ::apache::thrift::protocol::T_LIST) { { this->resourceUris.clear(); - uint32_t _size304; - ::apache::thrift::protocol::TType _etype307; - xfer += iprot->readListBegin(_etype307, _size304); - this->resourceUris.resize(_size304); - uint32_t _i308; - for (_i308 = 0; _i308 < _size304; ++_i308) + uint32_t _size310; + ::apache::thrift::protocol::TType _etype313; + xfer += iprot->readListBegin(_etype313, _size310); + this->resourceUris.resize(_size310); + uint32_t _i314; + for (_i314 = 0; _i314 < _size310; ++_i314) { - xfer += this->resourceUris[_i308].read(iprot); + xfer += this->resourceUris[_i314].read(iprot); } xfer += iprot->readListEnd(); } @@ -6559,10 +6643,10 @@ uint32_t Function::write(::apache::thrift::protocol::TProtocol* oprot) const { xfer += oprot->writeFieldBegin("resourceUris", ::apache::thrift::protocol::T_LIST, 8); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->resourceUris.size())); - std::vector ::const_iterator _iter309; - for (_iter309 = this->resourceUris.begin(); _iter309 != this->resourceUris.end(); ++_iter309) + std::vector ::const_iterator _iter315; + for (_iter315 = this->resourceUris.begin(); _iter315 != this->resourceUris.end(); ++_iter315) { - xfer += (*_iter309).write(oprot); + xfer += (*_iter315).write(oprot); } xfer += oprot->writeListEnd(); } @@ -6623,9 +6707,9 @@ uint32_t TxnInfo::read(::apache::thrift::protocol::TProtocol* iprot) { break; case 2: if (ftype == ::apache::thrift::protocol::T_I32) { - int32_t ecast310; - xfer += iprot->readI32(ecast310); - this->state = (TxnState::type)ecast310; + int32_t ecast316; + xfer += iprot->readI32(ecast316); + this->state = (TxnState::type)ecast316; isset_state = true; } else { xfer += iprot->skip(ftype); @@ -6737,14 +6821,14 @@ uint32_t GetOpenTxnsInfoResponse::read(::apache::thrift::protocol::TProtocol* ip if (ftype == ::apache::thrift::protocol::T_LIST) { { this->open_txns.clear(); - uint32_t _size311; - ::apache::thrift::protocol::TType _etype314; - xfer += iprot->readListBegin(_etype314, _size311); - this->open_txns.resize(_size311); - uint32_t _i315; - for (_i315 = 0; _i315 < _size311; ++_i315) + uint32_t _size317; + ::apache::thrift::protocol::TType _etype320; + xfer += iprot->readListBegin(_etype320, _size317); + this->open_txns.resize(_size317); + uint32_t _i321; + for (_i321 = 0; _i321 < _size317; ++_i321) { - xfer += this->open_txns[_i315].read(iprot); + xfer += this->open_txns[_i321].read(iprot); } xfer += iprot->readListEnd(); } @@ -6780,10 +6864,10 @@ uint32_t GetOpenTxnsInfoResponse::write(::apache::thrift::protocol::TProtocol* o xfer += oprot->writeFieldBegin("open_txns", ::apache::thrift::protocol::T_LIST, 2); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->open_txns.size())); - std::vector ::const_iterator _iter316; - for (_iter316 = this->open_txns.begin(); _iter316 != this->open_txns.end(); ++_iter316) + std::vector ::const_iterator _iter322; + for (_iter322 = this->open_txns.begin(); _iter322 != this->open_txns.end(); ++_iter322) { - xfer += (*_iter316).write(oprot); + xfer += (*_iter322).write(oprot); } xfer += oprot->writeListEnd(); } @@ -6837,15 +6921,15 @@ uint32_t GetOpenTxnsResponse::read(::apache::thrift::protocol::TProtocol* iprot) if (ftype == ::apache::thrift::protocol::T_SET) { { this->open_txns.clear(); - uint32_t _size317; - ::apache::thrift::protocol::TType _etype320; - xfer += iprot->readSetBegin(_etype320, _size317); - uint32_t _i321; - for (_i321 = 0; _i321 < _size317; ++_i321) + uint32_t _size323; + ::apache::thrift::protocol::TType _etype326; + xfer += iprot->readSetBegin(_etype326, _size323); + uint32_t _i327; + for (_i327 = 0; _i327 < _size323; ++_i327) { - int64_t _elem322; - xfer += iprot->readI64(_elem322); - this->open_txns.insert(_elem322); + int64_t _elem328; + xfer += iprot->readI64(_elem328); + this->open_txns.insert(_elem328); } xfer += iprot->readSetEnd(); } @@ -6881,10 +6965,10 @@ uint32_t GetOpenTxnsResponse::write(::apache::thrift::protocol::TProtocol* oprot xfer += oprot->writeFieldBegin("open_txns", ::apache::thrift::protocol::T_SET, 2); { xfer += oprot->writeSetBegin(::apache::thrift::protocol::T_I64, static_cast(this->open_txns.size())); - std::set ::const_iterator _iter323; - for (_iter323 = this->open_txns.begin(); _iter323 != this->open_txns.end(); ++_iter323) + std::set ::const_iterator _iter329; + for (_iter329 = this->open_txns.begin(); _iter329 != this->open_txns.end(); ++_iter329) { - xfer += oprot->writeI64((*_iter323)); + xfer += oprot->writeI64((*_iter329)); } xfer += oprot->writeSetEnd(); } @@ -7025,14 +7109,14 @@ uint32_t OpenTxnsResponse::read(::apache::thrift::protocol::TProtocol* iprot) { if (ftype == ::apache::thrift::protocol::T_LIST) { { this->txn_ids.clear(); - uint32_t _size324; - ::apache::thrift::protocol::TType _etype327; - xfer += iprot->readListBegin(_etype327, _size324); - this->txn_ids.resize(_size324); - uint32_t _i328; - for (_i328 = 0; _i328 < _size324; ++_i328) + uint32_t _size330; + ::apache::thrift::protocol::TType _etype333; + xfer += iprot->readListBegin(_etype333, _size330); + this->txn_ids.resize(_size330); + uint32_t _i334; + for (_i334 = 0; _i334 < _size330; ++_i334) { - xfer += iprot->readI64(this->txn_ids[_i328]); + xfer += iprot->readI64(this->txn_ids[_i334]); } xfer += iprot->readListEnd(); } @@ -7062,10 +7146,10 @@ uint32_t OpenTxnsResponse::write(::apache::thrift::protocol::TProtocol* oprot) c xfer += oprot->writeFieldBegin("txn_ids", ::apache::thrift::protocol::T_LIST, 1); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_I64, static_cast(this->txn_ids.size())); - std::vector ::const_iterator _iter329; - for (_iter329 = this->txn_ids.begin(); _iter329 != this->txn_ids.end(); ++_iter329) + std::vector ::const_iterator _iter335; + for (_iter335 = this->txn_ids.begin(); _iter335 != this->txn_ids.end(); ++_iter335) { - xfer += oprot->writeI64((*_iter329)); + xfer += oprot->writeI64((*_iter335)); } xfer += oprot->writeListEnd(); } @@ -7237,9 +7321,9 @@ uint32_t LockComponent::read(::apache::thrift::protocol::TProtocol* iprot) { { case 1: if (ftype == ::apache::thrift::protocol::T_I32) { - int32_t ecast330; - xfer += iprot->readI32(ecast330); - this->type = (LockType::type)ecast330; + int32_t ecast336; + xfer += iprot->readI32(ecast336); + this->type = (LockType::type)ecast336; isset_type = true; } else { xfer += iprot->skip(ftype); @@ -7247,9 +7331,9 @@ uint32_t LockComponent::read(::apache::thrift::protocol::TProtocol* iprot) { break; case 2: if (ftype == ::apache::thrift::protocol::T_I32) { - int32_t ecast331; - xfer += iprot->readI32(ecast331); - this->level = (LockLevel::type)ecast331; + int32_t ecast337; + xfer += iprot->readI32(ecast337); + this->level = (LockLevel::type)ecast337; isset_level = true; } else { xfer += iprot->skip(ftype); @@ -7368,14 +7452,14 @@ uint32_t LockRequest::read(::apache::thrift::protocol::TProtocol* iprot) { if (ftype == ::apache::thrift::protocol::T_LIST) { { this->component.clear(); - uint32_t _size332; - ::apache::thrift::protocol::TType _etype335; - xfer += iprot->readListBegin(_etype335, _size332); - this->component.resize(_size332); - uint32_t _i336; - for (_i336 = 0; _i336 < _size332; ++_i336) + uint32_t _size338; + ::apache::thrift::protocol::TType _etype341; + xfer += iprot->readListBegin(_etype341, _size338); + this->component.resize(_size338); + uint32_t _i342; + for (_i342 = 0; _i342 < _size338; ++_i342) { - xfer += this->component[_i336].read(iprot); + xfer += this->component[_i342].read(iprot); } xfer += iprot->readListEnd(); } @@ -7433,10 +7517,10 @@ uint32_t LockRequest::write(::apache::thrift::protocol::TProtocol* oprot) const xfer += oprot->writeFieldBegin("component", ::apache::thrift::protocol::T_LIST, 1); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->component.size())); - std::vector ::const_iterator _iter337; - for (_iter337 = this->component.begin(); _iter337 != this->component.end(); ++_iter337) + std::vector ::const_iterator _iter343; + for (_iter343 = this->component.begin(); _iter343 != this->component.end(); ++_iter343) { - xfer += (*_iter337).write(oprot); + xfer += (*_iter343).write(oprot); } xfer += oprot->writeListEnd(); } @@ -7504,9 +7588,9 @@ uint32_t LockResponse::read(::apache::thrift::protocol::TProtocol* iprot) { break; case 2: if (ftype == ::apache::thrift::protocol::T_I32) { - int32_t ecast338; - xfer += iprot->readI32(ecast338); - this->state = (LockState::type)ecast338; + int32_t ecast344; + xfer += iprot->readI32(ecast344); + this->state = (LockState::type)ecast344; isset_state = true; } else { xfer += iprot->skip(ftype); @@ -7788,9 +7872,9 @@ uint32_t ShowLocksResponseElement::read(::apache::thrift::protocol::TProtocol* i break; case 5: if (ftype == ::apache::thrift::protocol::T_I32) { - int32_t ecast339; - xfer += iprot->readI32(ecast339); - this->state = (LockState::type)ecast339; + int32_t ecast345; + xfer += iprot->readI32(ecast345); + this->state = (LockState::type)ecast345; isset_state = true; } else { xfer += iprot->skip(ftype); @@ -7798,9 +7882,9 @@ uint32_t ShowLocksResponseElement::read(::apache::thrift::protocol::TProtocol* i break; case 6: if (ftype == ::apache::thrift::protocol::T_I32) { - int32_t ecast340; - xfer += iprot->readI32(ecast340); - this->type = (LockType::type)ecast340; + int32_t ecast346; + xfer += iprot->readI32(ecast346); + this->type = (LockType::type)ecast346; isset_type = true; } else { xfer += iprot->skip(ftype); @@ -7972,14 +8056,14 @@ uint32_t ShowLocksResponse::read(::apache::thrift::protocol::TProtocol* iprot) { if (ftype == ::apache::thrift::protocol::T_LIST) { { this->locks.clear(); - uint32_t _size341; - ::apache::thrift::protocol::TType _etype344; - xfer += iprot->readListBegin(_etype344, _size341); - this->locks.resize(_size341); - uint32_t _i345; - for (_i345 = 0; _i345 < _size341; ++_i345) + uint32_t _size347; + ::apache::thrift::protocol::TType _etype350; + xfer += iprot->readListBegin(_etype350, _size347); + this->locks.resize(_size347); + uint32_t _i351; + for (_i351 = 0; _i351 < _size347; ++_i351) { - xfer += this->locks[_i345].read(iprot); + xfer += this->locks[_i351].read(iprot); } xfer += iprot->readListEnd(); } @@ -8007,10 +8091,10 @@ uint32_t ShowLocksResponse::write(::apache::thrift::protocol::TProtocol* oprot) xfer += oprot->writeFieldBegin("locks", ::apache::thrift::protocol::T_LIST, 1); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->locks.size())); - std::vector ::const_iterator _iter346; - for (_iter346 = this->locks.begin(); _iter346 != this->locks.end(); ++_iter346) + std::vector ::const_iterator _iter352; + for (_iter352 = this->locks.begin(); _iter352 != this->locks.end(); ++_iter352) { - xfer += (*_iter346).write(oprot); + xfer += (*_iter352).write(oprot); } xfer += oprot->writeListEnd(); } @@ -8213,15 +8297,15 @@ uint32_t HeartbeatTxnRangeResponse::read(::apache::thrift::protocol::TProtocol* if (ftype == ::apache::thrift::protocol::T_SET) { { this->aborted.clear(); - uint32_t _size347; - ::apache::thrift::protocol::TType _etype350; - xfer += iprot->readSetBegin(_etype350, _size347); - uint32_t _i351; - for (_i351 = 0; _i351 < _size347; ++_i351) + uint32_t _size353; + ::apache::thrift::protocol::TType _etype356; + xfer += iprot->readSetBegin(_etype356, _size353); + uint32_t _i357; + for (_i357 = 0; _i357 < _size353; ++_i357) { - int64_t _elem352; - xfer += iprot->readI64(_elem352); - this->aborted.insert(_elem352); + int64_t _elem358; + xfer += iprot->readI64(_elem358); + this->aborted.insert(_elem358); } xfer += iprot->readSetEnd(); } @@ -8234,15 +8318,15 @@ uint32_t HeartbeatTxnRangeResponse::read(::apache::thrift::protocol::TProtocol* if (ftype == ::apache::thrift::protocol::T_SET) { { this->nosuch.clear(); - uint32_t _size353; - ::apache::thrift::protocol::TType _etype356; - xfer += iprot->readSetBegin(_etype356, _size353); - uint32_t _i357; - for (_i357 = 0; _i357 < _size353; ++_i357) + uint32_t _size359; + ::apache::thrift::protocol::TType _etype362; + xfer += iprot->readSetBegin(_etype362, _size359); + uint32_t _i363; + for (_i363 = 0; _i363 < _size359; ++_i363) { - int64_t _elem358; - xfer += iprot->readI64(_elem358); - this->nosuch.insert(_elem358); + int64_t _elem364; + xfer += iprot->readI64(_elem364); + this->nosuch.insert(_elem364); } xfer += iprot->readSetEnd(); } @@ -8274,10 +8358,10 @@ uint32_t HeartbeatTxnRangeResponse::write(::apache::thrift::protocol::TProtocol* xfer += oprot->writeFieldBegin("aborted", ::apache::thrift::protocol::T_SET, 1); { xfer += oprot->writeSetBegin(::apache::thrift::protocol::T_I64, static_cast(this->aborted.size())); - std::set ::const_iterator _iter359; - for (_iter359 = this->aborted.begin(); _iter359 != this->aborted.end(); ++_iter359) + std::set ::const_iterator _iter365; + for (_iter365 = this->aborted.begin(); _iter365 != this->aborted.end(); ++_iter365) { - xfer += oprot->writeI64((*_iter359)); + xfer += oprot->writeI64((*_iter365)); } xfer += oprot->writeSetEnd(); } @@ -8286,10 +8370,10 @@ uint32_t HeartbeatTxnRangeResponse::write(::apache::thrift::protocol::TProtocol* xfer += oprot->writeFieldBegin("nosuch", ::apache::thrift::protocol::T_SET, 2); { xfer += oprot->writeSetBegin(::apache::thrift::protocol::T_I64, static_cast(this->nosuch.size())); - std::set ::const_iterator _iter360; - for (_iter360 = this->nosuch.begin(); _iter360 != this->nosuch.end(); ++_iter360) + std::set ::const_iterator _iter366; + for (_iter366 = this->nosuch.begin(); _iter366 != this->nosuch.end(); ++_iter366) { - xfer += oprot->writeI64((*_iter360)); + xfer += oprot->writeI64((*_iter366)); } xfer += oprot->writeSetEnd(); } @@ -8358,9 +8442,9 @@ uint32_t CompactionRequest::read(::apache::thrift::protocol::TProtocol* iprot) { break; case 4: if (ftype == ::apache::thrift::protocol::T_I32) { - int32_t ecast361; - xfer += iprot->readI32(ecast361); - this->type = (CompactionType::type)ecast361; + int32_t ecast367; + xfer += iprot->readI32(ecast367); + this->type = (CompactionType::type)ecast367; isset_type = true; } else { xfer += iprot->skip(ftype); @@ -8535,9 +8619,9 @@ uint32_t ShowCompactResponseElement::read(::apache::thrift::protocol::TProtocol* break; case 4: if (ftype == ::apache::thrift::protocol::T_I32) { - int32_t ecast362; - xfer += iprot->readI32(ecast362); - this->type = (CompactionType::type)ecast362; + int32_t ecast368; + xfer += iprot->readI32(ecast368); + this->type = (CompactionType::type)ecast368; isset_type = true; } else { xfer += iprot->skip(ftype); @@ -8684,14 +8768,14 @@ uint32_t ShowCompactResponse::read(::apache::thrift::protocol::TProtocol* iprot) if (ftype == ::apache::thrift::protocol::T_LIST) { { this->compacts.clear(); - uint32_t _size363; - ::apache::thrift::protocol::TType _etype366; - xfer += iprot->readListBegin(_etype366, _size363); - this->compacts.resize(_size363); - uint32_t _i367; - for (_i367 = 0; _i367 < _size363; ++_i367) + uint32_t _size369; + ::apache::thrift::protocol::TType _etype372; + xfer += iprot->readListBegin(_etype372, _size369); + this->compacts.resize(_size369); + uint32_t _i373; + for (_i373 = 0; _i373 < _size369; ++_i373) { - xfer += this->compacts[_i367].read(iprot); + xfer += this->compacts[_i373].read(iprot); } xfer += iprot->readListEnd(); } @@ -8721,10 +8805,10 @@ uint32_t ShowCompactResponse::write(::apache::thrift::protocol::TProtocol* oprot xfer += oprot->writeFieldBegin("compacts", ::apache::thrift::protocol::T_LIST, 1); { xfer += oprot->writeListBegin(::apache::thrift::protocol::T_STRUCT, static_cast(this->compacts.size())); - std::vector ::const_iterator _iter368; - for (_iter368 = this->compacts.begin(); _iter368 != this->compacts.end(); ++_iter368) + std::vector ::const_iterator _iter374; + for (_iter374 = this->compacts.begin(); _iter374 != this->compacts.end(); ++_iter374) { - xfer += (*_iter368).write(oprot); + xfer += (*_iter374).write(oprot); } xfer += oprot->writeListEnd(); } diff --git a/metastore/src/gen/thrift/gen-cpp/hive_metastore_types.h b/metastore/src/gen/thrift/gen-cpp/hive_metastore_types.h index 9f583a4..5ab864a 100644 --- a/metastore/src/gen/thrift/gen-cpp/hive_metastore_types.h +++ b/metastore/src/gen/thrift/gen-cpp/hive_metastore_types.h @@ -2681,6 +2681,43 @@ class AggrStats { void swap(AggrStats &a, AggrStats &b); + +class SetPartitionsStatsRequest { + public: + + static const char* ascii_fingerprint; // = "635C0DA9A947DA57AAE693A5DFB86569"; + static const uint8_t binary_fingerprint[16]; // = {0x63,0x5C,0x0D,0xA9,0xA9,0x47,0xDA,0x57,0xAA,0xE6,0x93,0xA5,0xDF,0xB8,0x65,0x69}; + + SetPartitionsStatsRequest() { + } + + virtual ~SetPartitionsStatsRequest() throw() {} + + std::vector colStats; + + void __set_colStats(const std::vector & val) { + colStats = val; + } + + bool operator == (const SetPartitionsStatsRequest & rhs) const + { + if (!(colStats == rhs.colStats)) + return false; + return true; + } + bool operator != (const SetPartitionsStatsRequest &rhs) const { + return !(*this == rhs); + } + + bool operator < (const SetPartitionsStatsRequest & ) const; + + uint32_t read(::apache::thrift::protocol::TProtocol* iprot); + uint32_t write(::apache::thrift::protocol::TProtocol* oprot) const; + +}; + +void swap(SetPartitionsStatsRequest &a, SetPartitionsStatsRequest &b); + typedef struct _Schema__isset { _Schema__isset() : fieldSchemas(false), properties(false) {} bool fieldSchemas; diff --git a/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/AddPartitionsRequest.java b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/AddPartitionsRequest.java index 96caab6..08f6974 100644 --- a/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/AddPartitionsRequest.java +++ b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/AddPartitionsRequest.java @@ -700,14 +700,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, AddPartitionsReques case 3: // PARTS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list346 = iprot.readListBegin(); - struct.parts = new ArrayList(_list346.size); - for (int _i347 = 0; _i347 < _list346.size; ++_i347) + org.apache.thrift.protocol.TList _list354 = iprot.readListBegin(); + struct.parts = new ArrayList(_list354.size); + for (int _i355 = 0; _i355 < _list354.size; ++_i355) { - Partition _elem348; // required - _elem348 = new Partition(); - _elem348.read(iprot); - struct.parts.add(_elem348); + Partition _elem356; // required + _elem356 = new Partition(); + _elem356.read(iprot); + struct.parts.add(_elem356); } iprot.readListEnd(); } @@ -759,9 +759,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, AddPartitionsReque oprot.writeFieldBegin(PARTS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.parts.size())); - for (Partition _iter349 : struct.parts) + for (Partition _iter357 : struct.parts) { - _iter349.write(oprot); + _iter357.write(oprot); } oprot.writeListEnd(); } @@ -796,9 +796,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, AddPartitionsReques oprot.writeString(struct.tblName); { oprot.writeI32(struct.parts.size()); - for (Partition _iter350 : struct.parts) + for (Partition _iter358 : struct.parts) { - _iter350.write(oprot); + _iter358.write(oprot); } } oprot.writeBool(struct.ifNotExists); @@ -820,14 +820,14 @@ public void read(org.apache.thrift.protocol.TProtocol prot, AddPartitionsRequest struct.tblName = iprot.readString(); struct.setTblNameIsSet(true); { - org.apache.thrift.protocol.TList _list351 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.parts = new ArrayList(_list351.size); - for (int _i352 = 0; _i352 < _list351.size; ++_i352) + org.apache.thrift.protocol.TList _list359 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.parts = new ArrayList(_list359.size); + for (int _i360 = 0; _i360 < _list359.size; ++_i360) { - Partition _elem353; // required - _elem353 = new Partition(); - _elem353.read(iprot); - struct.parts.add(_elem353); + Partition _elem361; // required + _elem361 = new Partition(); + _elem361.read(iprot); + struct.parts.add(_elem361); } } struct.setPartsIsSet(true); diff --git a/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/AddPartitionsResult.java b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/AddPartitionsResult.java index ba65da6..4a44177 100644 --- a/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/AddPartitionsResult.java +++ b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/AddPartitionsResult.java @@ -342,14 +342,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, AddPartitionsResult case 1: // PARTITIONS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list338 = iprot.readListBegin(); - struct.partitions = new ArrayList(_list338.size); - for (int _i339 = 0; _i339 < _list338.size; ++_i339) + org.apache.thrift.protocol.TList _list346 = iprot.readListBegin(); + struct.partitions = new ArrayList(_list346.size); + for (int _i347 = 0; _i347 < _list346.size; ++_i347) { - Partition _elem340; // required - _elem340 = new Partition(); - _elem340.read(iprot); - struct.partitions.add(_elem340); + Partition _elem348; // required + _elem348 = new Partition(); + _elem348.read(iprot); + struct.partitions.add(_elem348); } iprot.readListEnd(); } @@ -376,9 +376,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, AddPartitionsResul oprot.writeFieldBegin(PARTITIONS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.partitions.size())); - for (Partition _iter341 : struct.partitions) + for (Partition _iter349 : struct.partitions) { - _iter341.write(oprot); + _iter349.write(oprot); } oprot.writeListEnd(); } @@ -410,9 +410,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, AddPartitionsResult if (struct.isSetPartitions()) { { oprot.writeI32(struct.partitions.size()); - for (Partition _iter342 : struct.partitions) + for (Partition _iter350 : struct.partitions) { - _iter342.write(oprot); + _iter350.write(oprot); } } } @@ -424,14 +424,14 @@ public void read(org.apache.thrift.protocol.TProtocol prot, AddPartitionsResult BitSet incoming = iprot.readBitSet(1); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list343 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.partitions = new ArrayList(_list343.size); - for (int _i344 = 0; _i344 < _list343.size; ++_i344) + org.apache.thrift.protocol.TList _list351 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.partitions = new ArrayList(_list351.size); + for (int _i352 = 0; _i352 < _list351.size; ++_i352) { - Partition _elem345; // required - _elem345 = new Partition(); - _elem345.read(iprot); - struct.partitions.add(_elem345); + Partition _elem353; // required + _elem353 = new Partition(); + _elem353.read(iprot); + struct.partitions.add(_elem353); } } struct.setPartitionsIsSet(true); diff --git a/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/DropPartitionsResult.java b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/DropPartitionsResult.java index 87444d2..e454571 100644 --- a/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/DropPartitionsResult.java +++ b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/DropPartitionsResult.java @@ -342,14 +342,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, DropPartitionsResul case 1: // PARTITIONS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list354 = iprot.readListBegin(); - struct.partitions = new ArrayList(_list354.size); - for (int _i355 = 0; _i355 < _list354.size; ++_i355) + org.apache.thrift.protocol.TList _list362 = iprot.readListBegin(); + struct.partitions = new ArrayList(_list362.size); + for (int _i363 = 0; _i363 < _list362.size; ++_i363) { - Partition _elem356; // required - _elem356 = new Partition(); - _elem356.read(iprot); - struct.partitions.add(_elem356); + Partition _elem364; // required + _elem364 = new Partition(); + _elem364.read(iprot); + struct.partitions.add(_elem364); } iprot.readListEnd(); } @@ -376,9 +376,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, DropPartitionsResu oprot.writeFieldBegin(PARTITIONS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.partitions.size())); - for (Partition _iter357 : struct.partitions) + for (Partition _iter365 : struct.partitions) { - _iter357.write(oprot); + _iter365.write(oprot); } oprot.writeListEnd(); } @@ -410,9 +410,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, DropPartitionsResul if (struct.isSetPartitions()) { { oprot.writeI32(struct.partitions.size()); - for (Partition _iter358 : struct.partitions) + for (Partition _iter366 : struct.partitions) { - _iter358.write(oprot); + _iter366.write(oprot); } } } @@ -424,14 +424,14 @@ public void read(org.apache.thrift.protocol.TProtocol prot, DropPartitionsResult BitSet incoming = iprot.readBitSet(1); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list359 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.partitions = new ArrayList(_list359.size); - for (int _i360 = 0; _i360 < _list359.size; ++_i360) + org.apache.thrift.protocol.TList _list367 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.partitions = new ArrayList(_list367.size); + for (int _i368 = 0; _i368 < _list367.size; ++_i368) { - Partition _elem361; // required - _elem361 = new Partition(); - _elem361.read(iprot); - struct.partitions.add(_elem361); + Partition _elem369; // required + _elem369 = new Partition(); + _elem369.read(iprot); + struct.partitions.add(_elem369); } } struct.setPartitionsIsSet(true); diff --git a/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/EnvironmentContext.java b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/EnvironmentContext.java index 3a38950..52c0e2b 100644 --- a/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/EnvironmentContext.java +++ b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/EnvironmentContext.java @@ -351,15 +351,15 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, EnvironmentContext case 1: // PROPERTIES if (schemeField.type == org.apache.thrift.protocol.TType.MAP) { { - org.apache.thrift.protocol.TMap _map270 = iprot.readMapBegin(); - struct.properties = new HashMap(2*_map270.size); - for (int _i271 = 0; _i271 < _map270.size; ++_i271) + org.apache.thrift.protocol.TMap _map278 = iprot.readMapBegin(); + struct.properties = new HashMap(2*_map278.size); + for (int _i279 = 0; _i279 < _map278.size; ++_i279) { - String _key272; // required - String _val273; // required - _key272 = iprot.readString(); - _val273 = iprot.readString(); - struct.properties.put(_key272, _val273); + String _key280; // required + String _val281; // required + _key280 = iprot.readString(); + _val281 = iprot.readString(); + struct.properties.put(_key280, _val281); } iprot.readMapEnd(); } @@ -385,10 +385,10 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, EnvironmentContext oprot.writeFieldBegin(PROPERTIES_FIELD_DESC); { oprot.writeMapBegin(new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING, struct.properties.size())); - for (Map.Entry _iter274 : struct.properties.entrySet()) + for (Map.Entry _iter282 : struct.properties.entrySet()) { - oprot.writeString(_iter274.getKey()); - oprot.writeString(_iter274.getValue()); + oprot.writeString(_iter282.getKey()); + oprot.writeString(_iter282.getValue()); } oprot.writeMapEnd(); } @@ -419,10 +419,10 @@ public void write(org.apache.thrift.protocol.TProtocol prot, EnvironmentContext if (struct.isSetProperties()) { { oprot.writeI32(struct.properties.size()); - for (Map.Entry _iter275 : struct.properties.entrySet()) + for (Map.Entry _iter283 : struct.properties.entrySet()) { - oprot.writeString(_iter275.getKey()); - oprot.writeString(_iter275.getValue()); + oprot.writeString(_iter283.getKey()); + oprot.writeString(_iter283.getValue()); } } } @@ -434,15 +434,15 @@ public void read(org.apache.thrift.protocol.TProtocol prot, EnvironmentContext s BitSet incoming = iprot.readBitSet(1); if (incoming.get(0)) { { - org.apache.thrift.protocol.TMap _map276 = new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.properties = new HashMap(2*_map276.size); - for (int _i277 = 0; _i277 < _map276.size; ++_i277) + org.apache.thrift.protocol.TMap _map284 = new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.properties = new HashMap(2*_map284.size); + for (int _i285 = 0; _i285 < _map284.size; ++_i285) { - String _key278; // required - String _val279; // required - _key278 = iprot.readString(); - _val279 = iprot.readString(); - struct.properties.put(_key278, _val279); + String _key286; // required + String _val287; // required + _key286 = iprot.readString(); + _val287 = iprot.readString(); + struct.properties.put(_key286, _val287); } } struct.setPropertiesIsSet(true); diff --git a/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/Function.java b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/Function.java index 813d5f5..35c91dd 100644 --- a/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/Function.java +++ b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/Function.java @@ -993,14 +993,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, Function struct) th case 8: // RESOURCE_URIS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list378 = iprot.readListBegin(); - struct.resourceUris = new ArrayList(_list378.size); - for (int _i379 = 0; _i379 < _list378.size; ++_i379) + org.apache.thrift.protocol.TList _list386 = iprot.readListBegin(); + struct.resourceUris = new ArrayList(_list386.size); + for (int _i387 = 0; _i387 < _list386.size; ++_i387) { - ResourceUri _elem380; // required - _elem380 = new ResourceUri(); - _elem380.read(iprot); - struct.resourceUris.add(_elem380); + ResourceUri _elem388; // required + _elem388 = new ResourceUri(); + _elem388.read(iprot); + struct.resourceUris.add(_elem388); } iprot.readListEnd(); } @@ -1059,9 +1059,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, Function struct) t oprot.writeFieldBegin(RESOURCE_URIS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.resourceUris.size())); - for (ResourceUri _iter381 : struct.resourceUris) + for (ResourceUri _iter389 : struct.resourceUris) { - _iter381.write(oprot); + _iter389.write(oprot); } oprot.writeListEnd(); } @@ -1134,9 +1134,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, Function struct) th if (struct.isSetResourceUris()) { { oprot.writeI32(struct.resourceUris.size()); - for (ResourceUri _iter382 : struct.resourceUris) + for (ResourceUri _iter390 : struct.resourceUris) { - _iter382.write(oprot); + _iter390.write(oprot); } } } @@ -1176,14 +1176,14 @@ public void read(org.apache.thrift.protocol.TProtocol prot, Function struct) thr } if (incoming.get(7)) { { - org.apache.thrift.protocol.TList _list383 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.resourceUris = new ArrayList(_list383.size); - for (int _i384 = 0; _i384 < _list383.size; ++_i384) + org.apache.thrift.protocol.TList _list391 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.resourceUris = new ArrayList(_list391.size); + for (int _i392 = 0; _i392 < _list391.size; ++_i392) { - ResourceUri _elem385; // required - _elem385 = new ResourceUri(); - _elem385.read(iprot); - struct.resourceUris.add(_elem385); + ResourceUri _elem393; // required + _elem393 = new ResourceUri(); + _elem393.read(iprot); + struct.resourceUris.add(_elem393); } } struct.setResourceUrisIsSet(true); diff --git a/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetOpenTxnsInfoResponse.java b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetOpenTxnsInfoResponse.java index 5d3bf75..579718c 100644 --- a/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetOpenTxnsInfoResponse.java +++ b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetOpenTxnsInfoResponse.java @@ -443,14 +443,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, GetOpenTxnsInfoResp case 2: // OPEN_TXNS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list386 = iprot.readListBegin(); - struct.open_txns = new ArrayList(_list386.size); - for (int _i387 = 0; _i387 < _list386.size; ++_i387) + org.apache.thrift.protocol.TList _list394 = iprot.readListBegin(); + struct.open_txns = new ArrayList(_list394.size); + for (int _i395 = 0; _i395 < _list394.size; ++_i395) { - TxnInfo _elem388; // required - _elem388 = new TxnInfo(); - _elem388.read(iprot); - struct.open_txns.add(_elem388); + TxnInfo _elem396; // required + _elem396 = new TxnInfo(); + _elem396.read(iprot); + struct.open_txns.add(_elem396); } iprot.readListEnd(); } @@ -479,9 +479,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, GetOpenTxnsInfoRes oprot.writeFieldBegin(OPEN_TXNS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.open_txns.size())); - for (TxnInfo _iter389 : struct.open_txns) + for (TxnInfo _iter397 : struct.open_txns) { - _iter389.write(oprot); + _iter397.write(oprot); } oprot.writeListEnd(); } @@ -507,9 +507,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, GetOpenTxnsInfoResp oprot.writeI64(struct.txn_high_water_mark); { oprot.writeI32(struct.open_txns.size()); - for (TxnInfo _iter390 : struct.open_txns) + for (TxnInfo _iter398 : struct.open_txns) { - _iter390.write(oprot); + _iter398.write(oprot); } } } @@ -520,14 +520,14 @@ public void read(org.apache.thrift.protocol.TProtocol prot, GetOpenTxnsInfoRespo struct.txn_high_water_mark = iprot.readI64(); struct.setTxn_high_water_markIsSet(true); { - org.apache.thrift.protocol.TList _list391 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.open_txns = new ArrayList(_list391.size); - for (int _i392 = 0; _i392 < _list391.size; ++_i392) + org.apache.thrift.protocol.TList _list399 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.open_txns = new ArrayList(_list399.size); + for (int _i400 = 0; _i400 < _list399.size; ++_i400) { - TxnInfo _elem393; // required - _elem393 = new TxnInfo(); - _elem393.read(iprot); - struct.open_txns.add(_elem393); + TxnInfo _elem401; // required + _elem401 = new TxnInfo(); + _elem401.read(iprot); + struct.open_txns.add(_elem401); } } struct.setOpen_txnsIsSet(true); diff --git a/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetOpenTxnsResponse.java b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetOpenTxnsResponse.java index b938d7d..b94b9d5 100644 --- a/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetOpenTxnsResponse.java +++ b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/GetOpenTxnsResponse.java @@ -443,13 +443,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, GetOpenTxnsResponse case 2: // OPEN_TXNS if (schemeField.type == org.apache.thrift.protocol.TType.SET) { { - org.apache.thrift.protocol.TSet _set394 = iprot.readSetBegin(); - struct.open_txns = new HashSet(2*_set394.size); - for (int _i395 = 0; _i395 < _set394.size; ++_i395) + org.apache.thrift.protocol.TSet _set402 = iprot.readSetBegin(); + struct.open_txns = new HashSet(2*_set402.size); + for (int _i403 = 0; _i403 < _set402.size; ++_i403) { - long _elem396; // required - _elem396 = iprot.readI64(); - struct.open_txns.add(_elem396); + long _elem404; // required + _elem404 = iprot.readI64(); + struct.open_txns.add(_elem404); } iprot.readSetEnd(); } @@ -478,9 +478,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, GetOpenTxnsRespons oprot.writeFieldBegin(OPEN_TXNS_FIELD_DESC); { oprot.writeSetBegin(new org.apache.thrift.protocol.TSet(org.apache.thrift.protocol.TType.I64, struct.open_txns.size())); - for (long _iter397 : struct.open_txns) + for (long _iter405 : struct.open_txns) { - oprot.writeI64(_iter397); + oprot.writeI64(_iter405); } oprot.writeSetEnd(); } @@ -506,9 +506,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, GetOpenTxnsResponse oprot.writeI64(struct.txn_high_water_mark); { oprot.writeI32(struct.open_txns.size()); - for (long _iter398 : struct.open_txns) + for (long _iter406 : struct.open_txns) { - oprot.writeI64(_iter398); + oprot.writeI64(_iter406); } } } @@ -519,13 +519,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, GetOpenTxnsResponse struct.txn_high_water_mark = iprot.readI64(); struct.setTxn_high_water_markIsSet(true); { - org.apache.thrift.protocol.TSet _set399 = new org.apache.thrift.protocol.TSet(org.apache.thrift.protocol.TType.I64, iprot.readI32()); - struct.open_txns = new HashSet(2*_set399.size); - for (int _i400 = 0; _i400 < _set399.size; ++_i400) + org.apache.thrift.protocol.TSet _set407 = new org.apache.thrift.protocol.TSet(org.apache.thrift.protocol.TType.I64, iprot.readI32()); + struct.open_txns = new HashSet(2*_set407.size); + for (int _i408 = 0; _i408 < _set407.size; ++_i408) { - long _elem401; // required - _elem401 = iprot.readI64(); - struct.open_txns.add(_elem401); + long _elem409; // required + _elem409 = iprot.readI64(); + struct.open_txns.add(_elem409); } } struct.setOpen_txnsIsSet(true); diff --git a/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/HeartbeatTxnRangeResponse.java b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/HeartbeatTxnRangeResponse.java index 49f4e56..5da3efb 100644 --- a/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/HeartbeatTxnRangeResponse.java +++ b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/HeartbeatTxnRangeResponse.java @@ -455,13 +455,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, HeartbeatTxnRangeRe case 1: // ABORTED if (schemeField.type == org.apache.thrift.protocol.TType.SET) { { - org.apache.thrift.protocol.TSet _set426 = iprot.readSetBegin(); - struct.aborted = new HashSet(2*_set426.size); - for (int _i427 = 0; _i427 < _set426.size; ++_i427) + org.apache.thrift.protocol.TSet _set434 = iprot.readSetBegin(); + struct.aborted = new HashSet(2*_set434.size); + for (int _i435 = 0; _i435 < _set434.size; ++_i435) { - long _elem428; // required - _elem428 = iprot.readI64(); - struct.aborted.add(_elem428); + long _elem436; // required + _elem436 = iprot.readI64(); + struct.aborted.add(_elem436); } iprot.readSetEnd(); } @@ -473,13 +473,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, HeartbeatTxnRangeRe case 2: // NOSUCH if (schemeField.type == org.apache.thrift.protocol.TType.SET) { { - org.apache.thrift.protocol.TSet _set429 = iprot.readSetBegin(); - struct.nosuch = new HashSet(2*_set429.size); - for (int _i430 = 0; _i430 < _set429.size; ++_i430) + org.apache.thrift.protocol.TSet _set437 = iprot.readSetBegin(); + struct.nosuch = new HashSet(2*_set437.size); + for (int _i438 = 0; _i438 < _set437.size; ++_i438) { - long _elem431; // required - _elem431 = iprot.readI64(); - struct.nosuch.add(_elem431); + long _elem439; // required + _elem439 = iprot.readI64(); + struct.nosuch.add(_elem439); } iprot.readSetEnd(); } @@ -505,9 +505,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, HeartbeatTxnRangeR oprot.writeFieldBegin(ABORTED_FIELD_DESC); { oprot.writeSetBegin(new org.apache.thrift.protocol.TSet(org.apache.thrift.protocol.TType.I64, struct.aborted.size())); - for (long _iter432 : struct.aborted) + for (long _iter440 : struct.aborted) { - oprot.writeI64(_iter432); + oprot.writeI64(_iter440); } oprot.writeSetEnd(); } @@ -517,9 +517,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, HeartbeatTxnRangeR oprot.writeFieldBegin(NOSUCH_FIELD_DESC); { oprot.writeSetBegin(new org.apache.thrift.protocol.TSet(org.apache.thrift.protocol.TType.I64, struct.nosuch.size())); - for (long _iter433 : struct.nosuch) + for (long _iter441 : struct.nosuch) { - oprot.writeI64(_iter433); + oprot.writeI64(_iter441); } oprot.writeSetEnd(); } @@ -544,16 +544,16 @@ public void write(org.apache.thrift.protocol.TProtocol prot, HeartbeatTxnRangeRe TTupleProtocol oprot = (TTupleProtocol) prot; { oprot.writeI32(struct.aborted.size()); - for (long _iter434 : struct.aborted) + for (long _iter442 : struct.aborted) { - oprot.writeI64(_iter434); + oprot.writeI64(_iter442); } } { oprot.writeI32(struct.nosuch.size()); - for (long _iter435 : struct.nosuch) + for (long _iter443 : struct.nosuch) { - oprot.writeI64(_iter435); + oprot.writeI64(_iter443); } } } @@ -562,24 +562,24 @@ public void write(org.apache.thrift.protocol.TProtocol prot, HeartbeatTxnRangeRe public void read(org.apache.thrift.protocol.TProtocol prot, HeartbeatTxnRangeResponse struct) throws org.apache.thrift.TException { TTupleProtocol iprot = (TTupleProtocol) prot; { - org.apache.thrift.protocol.TSet _set436 = new org.apache.thrift.protocol.TSet(org.apache.thrift.protocol.TType.I64, iprot.readI32()); - struct.aborted = new HashSet(2*_set436.size); - for (int _i437 = 0; _i437 < _set436.size; ++_i437) + org.apache.thrift.protocol.TSet _set444 = new org.apache.thrift.protocol.TSet(org.apache.thrift.protocol.TType.I64, iprot.readI32()); + struct.aborted = new HashSet(2*_set444.size); + for (int _i445 = 0; _i445 < _set444.size; ++_i445) { - long _elem438; // required - _elem438 = iprot.readI64(); - struct.aborted.add(_elem438); + long _elem446; // required + _elem446 = iprot.readI64(); + struct.aborted.add(_elem446); } } struct.setAbortedIsSet(true); { - org.apache.thrift.protocol.TSet _set439 = new org.apache.thrift.protocol.TSet(org.apache.thrift.protocol.TType.I64, iprot.readI32()); - struct.nosuch = new HashSet(2*_set439.size); - for (int _i440 = 0; _i440 < _set439.size; ++_i440) + org.apache.thrift.protocol.TSet _set447 = new org.apache.thrift.protocol.TSet(org.apache.thrift.protocol.TType.I64, iprot.readI32()); + struct.nosuch = new HashSet(2*_set447.size); + for (int _i448 = 0; _i448 < _set447.size; ++_i448) { - long _elem441; // required - _elem441 = iprot.readI64(); - struct.nosuch.add(_elem441); + long _elem449; // required + _elem449 = iprot.readI64(); + struct.nosuch.add(_elem449); } } struct.setNosuchIsSet(true); diff --git a/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/LockRequest.java b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/LockRequest.java index f860028..5886c2d 100644 --- a/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/LockRequest.java +++ b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/LockRequest.java @@ -601,14 +601,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, LockRequest struct) case 1: // COMPONENT if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list410 = iprot.readListBegin(); - struct.component = new ArrayList(_list410.size); - for (int _i411 = 0; _i411 < _list410.size; ++_i411) + org.apache.thrift.protocol.TList _list418 = iprot.readListBegin(); + struct.component = new ArrayList(_list418.size); + for (int _i419 = 0; _i419 < _list418.size; ++_i419) { - LockComponent _elem412; // required - _elem412 = new LockComponent(); - _elem412.read(iprot); - struct.component.add(_elem412); + LockComponent _elem420; // required + _elem420 = new LockComponent(); + _elem420.read(iprot); + struct.component.add(_elem420); } iprot.readListEnd(); } @@ -658,9 +658,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, LockRequest struct oprot.writeFieldBegin(COMPONENT_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.component.size())); - for (LockComponent _iter413 : struct.component) + for (LockComponent _iter421 : struct.component) { - _iter413.write(oprot); + _iter421.write(oprot); } oprot.writeListEnd(); } @@ -700,9 +700,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, LockRequest struct) TTupleProtocol oprot = (TTupleProtocol) prot; { oprot.writeI32(struct.component.size()); - for (LockComponent _iter414 : struct.component) + for (LockComponent _iter422 : struct.component) { - _iter414.write(oprot); + _iter422.write(oprot); } } oprot.writeString(struct.user); @@ -721,14 +721,14 @@ public void write(org.apache.thrift.protocol.TProtocol prot, LockRequest struct) public void read(org.apache.thrift.protocol.TProtocol prot, LockRequest struct) throws org.apache.thrift.TException { TTupleProtocol iprot = (TTupleProtocol) prot; { - org.apache.thrift.protocol.TList _list415 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.component = new ArrayList(_list415.size); - for (int _i416 = 0; _i416 < _list415.size; ++_i416) + org.apache.thrift.protocol.TList _list423 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.component = new ArrayList(_list423.size); + for (int _i424 = 0; _i424 < _list423.size; ++_i424) { - LockComponent _elem417; // required - _elem417 = new LockComponent(); - _elem417.read(iprot); - struct.component.add(_elem417); + LockComponent _elem425; // required + _elem425 = new LockComponent(); + _elem425.read(iprot); + struct.component.add(_elem425); } } struct.setComponentIsSet(true); diff --git a/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/OpenTxnsResponse.java b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/OpenTxnsResponse.java index 1a99948..990ab86 100644 --- a/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/OpenTxnsResponse.java +++ b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/OpenTxnsResponse.java @@ -350,13 +350,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, OpenTxnsResponse st case 1: // TXN_IDS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list402 = iprot.readListBegin(); - struct.txn_ids = new ArrayList(_list402.size); - for (int _i403 = 0; _i403 < _list402.size; ++_i403) + org.apache.thrift.protocol.TList _list410 = iprot.readListBegin(); + struct.txn_ids = new ArrayList(_list410.size); + for (int _i411 = 0; _i411 < _list410.size; ++_i411) { - long _elem404; // required - _elem404 = iprot.readI64(); - struct.txn_ids.add(_elem404); + long _elem412; // required + _elem412 = iprot.readI64(); + struct.txn_ids.add(_elem412); } iprot.readListEnd(); } @@ -382,9 +382,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, OpenTxnsResponse s oprot.writeFieldBegin(TXN_IDS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.I64, struct.txn_ids.size())); - for (long _iter405 : struct.txn_ids) + for (long _iter413 : struct.txn_ids) { - oprot.writeI64(_iter405); + oprot.writeI64(_iter413); } oprot.writeListEnd(); } @@ -409,9 +409,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, OpenTxnsResponse st TTupleProtocol oprot = (TTupleProtocol) prot; { oprot.writeI32(struct.txn_ids.size()); - for (long _iter406 : struct.txn_ids) + for (long _iter414 : struct.txn_ids) { - oprot.writeI64(_iter406); + oprot.writeI64(_iter414); } } } @@ -420,13 +420,13 @@ public void write(org.apache.thrift.protocol.TProtocol prot, OpenTxnsResponse st public void read(org.apache.thrift.protocol.TProtocol prot, OpenTxnsResponse struct) throws org.apache.thrift.TException { TTupleProtocol iprot = (TTupleProtocol) prot; { - org.apache.thrift.protocol.TList _list407 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.I64, iprot.readI32()); - struct.txn_ids = new ArrayList(_list407.size); - for (int _i408 = 0; _i408 < _list407.size; ++_i408) + org.apache.thrift.protocol.TList _list415 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.I64, iprot.readI32()); + struct.txn_ids = new ArrayList(_list415.size); + for (int _i416 = 0; _i416 < _list415.size; ++_i416) { - long _elem409; // required - _elem409 = iprot.readI64(); - struct.txn_ids.add(_elem409); + long _elem417; // required + _elem417 = iprot.readI64(); + struct.txn_ids.add(_elem417); } } struct.setTxn_idsIsSet(true); diff --git a/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/PartitionsByExprResult.java b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/PartitionsByExprResult.java index 33d5435..d4b10bf 100644 --- a/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/PartitionsByExprResult.java +++ b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/PartitionsByExprResult.java @@ -435,14 +435,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, PartitionsByExprRes case 1: // PARTITIONS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list280 = iprot.readListBegin(); - struct.partitions = new ArrayList(_list280.size); - for (int _i281 = 0; _i281 < _list280.size; ++_i281) + org.apache.thrift.protocol.TList _list288 = iprot.readListBegin(); + struct.partitions = new ArrayList(_list288.size); + for (int _i289 = 0; _i289 < _list288.size; ++_i289) { - Partition _elem282; // required - _elem282 = new Partition(); - _elem282.read(iprot); - struct.partitions.add(_elem282); + Partition _elem290; // required + _elem290 = new Partition(); + _elem290.read(iprot); + struct.partitions.add(_elem290); } iprot.readListEnd(); } @@ -476,9 +476,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, PartitionsByExprRe oprot.writeFieldBegin(PARTITIONS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.partitions.size())); - for (Partition _iter283 : struct.partitions) + for (Partition _iter291 : struct.partitions) { - _iter283.write(oprot); + _iter291.write(oprot); } oprot.writeListEnd(); } @@ -506,9 +506,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, PartitionsByExprRes TTupleProtocol oprot = (TTupleProtocol) prot; { oprot.writeI32(struct.partitions.size()); - for (Partition _iter284 : struct.partitions) + for (Partition _iter292 : struct.partitions) { - _iter284.write(oprot); + _iter292.write(oprot); } } oprot.writeBool(struct.hasUnknownPartitions); @@ -518,14 +518,14 @@ public void write(org.apache.thrift.protocol.TProtocol prot, PartitionsByExprRes public void read(org.apache.thrift.protocol.TProtocol prot, PartitionsByExprResult struct) throws org.apache.thrift.TException { TTupleProtocol iprot = (TTupleProtocol) prot; { - org.apache.thrift.protocol.TList _list285 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.partitions = new ArrayList(_list285.size); - for (int _i286 = 0; _i286 < _list285.size; ++_i286) + org.apache.thrift.protocol.TList _list293 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.partitions = new ArrayList(_list293.size); + for (int _i294 = 0; _i294 < _list293.size; ++_i294) { - Partition _elem287; // required - _elem287 = new Partition(); - _elem287.read(iprot); - struct.partitions.add(_elem287); + Partition _elem295; // required + _elem295 = new Partition(); + _elem295.read(iprot); + struct.partitions.add(_elem295); } } struct.setPartitionsIsSet(true); diff --git a/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/PartitionsStatsRequest.java b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/PartitionsStatsRequest.java index ec451ad..f23b89a 100644 --- a/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/PartitionsStatsRequest.java +++ b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/PartitionsStatsRequest.java @@ -641,13 +641,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, PartitionsStatsRequ case 3: // COL_NAMES if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list322 = iprot.readListBegin(); - struct.colNames = new ArrayList(_list322.size); - for (int _i323 = 0; _i323 < _list322.size; ++_i323) + org.apache.thrift.protocol.TList _list330 = iprot.readListBegin(); + struct.colNames = new ArrayList(_list330.size); + for (int _i331 = 0; _i331 < _list330.size; ++_i331) { - String _elem324; // required - _elem324 = iprot.readString(); - struct.colNames.add(_elem324); + String _elem332; // required + _elem332 = iprot.readString(); + struct.colNames.add(_elem332); } iprot.readListEnd(); } @@ -659,13 +659,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, PartitionsStatsRequ case 4: // PART_NAMES if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list325 = iprot.readListBegin(); - struct.partNames = new ArrayList(_list325.size); - for (int _i326 = 0; _i326 < _list325.size; ++_i326) + org.apache.thrift.protocol.TList _list333 = iprot.readListBegin(); + struct.partNames = new ArrayList(_list333.size); + for (int _i334 = 0; _i334 < _list333.size; ++_i334) { - String _elem327; // required - _elem327 = iprot.readString(); - struct.partNames.add(_elem327); + String _elem335; // required + _elem335 = iprot.readString(); + struct.partNames.add(_elem335); } iprot.readListEnd(); } @@ -701,9 +701,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, PartitionsStatsReq oprot.writeFieldBegin(COL_NAMES_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.colNames.size())); - for (String _iter328 : struct.colNames) + for (String _iter336 : struct.colNames) { - oprot.writeString(_iter328); + oprot.writeString(_iter336); } oprot.writeListEnd(); } @@ -713,9 +713,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, PartitionsStatsReq oprot.writeFieldBegin(PART_NAMES_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.partNames.size())); - for (String _iter329 : struct.partNames) + for (String _iter337 : struct.partNames) { - oprot.writeString(_iter329); + oprot.writeString(_iter337); } oprot.writeListEnd(); } @@ -742,16 +742,16 @@ public void write(org.apache.thrift.protocol.TProtocol prot, PartitionsStatsRequ oprot.writeString(struct.tblName); { oprot.writeI32(struct.colNames.size()); - for (String _iter330 : struct.colNames) + for (String _iter338 : struct.colNames) { - oprot.writeString(_iter330); + oprot.writeString(_iter338); } } { oprot.writeI32(struct.partNames.size()); - for (String _iter331 : struct.partNames) + for (String _iter339 : struct.partNames) { - oprot.writeString(_iter331); + oprot.writeString(_iter339); } } } @@ -764,24 +764,24 @@ public void read(org.apache.thrift.protocol.TProtocol prot, PartitionsStatsReque struct.tblName = iprot.readString(); struct.setTblNameIsSet(true); { - org.apache.thrift.protocol.TList _list332 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.colNames = new ArrayList(_list332.size); - for (int _i333 = 0; _i333 < _list332.size; ++_i333) + org.apache.thrift.protocol.TList _list340 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.colNames = new ArrayList(_list340.size); + for (int _i341 = 0; _i341 < _list340.size; ++_i341) { - String _elem334; // required - _elem334 = iprot.readString(); - struct.colNames.add(_elem334); + String _elem342; // required + _elem342 = iprot.readString(); + struct.colNames.add(_elem342); } } struct.setColNamesIsSet(true); { - org.apache.thrift.protocol.TList _list335 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.partNames = new ArrayList(_list335.size); - for (int _i336 = 0; _i336 < _list335.size; ++_i336) + org.apache.thrift.protocol.TList _list343 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.partNames = new ArrayList(_list343.size); + for (int _i344 = 0; _i344 < _list343.size; ++_i344) { - String _elem337; // required - _elem337 = iprot.readString(); - struct.partNames.add(_elem337); + String _elem345; // required + _elem345 = iprot.readString(); + struct.partNames.add(_elem345); } } struct.setPartNamesIsSet(true); diff --git a/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/PartitionsStatsResult.java b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/PartitionsStatsResult.java index 916d4b4..d320c5e 100644 --- a/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/PartitionsStatsResult.java +++ b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/PartitionsStatsResult.java @@ -359,26 +359,26 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, PartitionsStatsResu case 1: // PART_STATS if (schemeField.type == org.apache.thrift.protocol.TType.MAP) { { - org.apache.thrift.protocol.TMap _map296 = iprot.readMapBegin(); - struct.partStats = new HashMap>(2*_map296.size); - for (int _i297 = 0; _i297 < _map296.size; ++_i297) + org.apache.thrift.protocol.TMap _map304 = iprot.readMapBegin(); + struct.partStats = new HashMap>(2*_map304.size); + for (int _i305 = 0; _i305 < _map304.size; ++_i305) { - String _key298; // required - List _val299; // required - _key298 = iprot.readString(); + String _key306; // required + List _val307; // required + _key306 = iprot.readString(); { - org.apache.thrift.protocol.TList _list300 = iprot.readListBegin(); - _val299 = new ArrayList(_list300.size); - for (int _i301 = 0; _i301 < _list300.size; ++_i301) + org.apache.thrift.protocol.TList _list308 = iprot.readListBegin(); + _val307 = new ArrayList(_list308.size); + for (int _i309 = 0; _i309 < _list308.size; ++_i309) { - ColumnStatisticsObj _elem302; // required - _elem302 = new ColumnStatisticsObj(); - _elem302.read(iprot); - _val299.add(_elem302); + ColumnStatisticsObj _elem310; // required + _elem310 = new ColumnStatisticsObj(); + _elem310.read(iprot); + _val307.add(_elem310); } iprot.readListEnd(); } - struct.partStats.put(_key298, _val299); + struct.partStats.put(_key306, _val307); } iprot.readMapEnd(); } @@ -404,14 +404,14 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, PartitionsStatsRes oprot.writeFieldBegin(PART_STATS_FIELD_DESC); { oprot.writeMapBegin(new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.LIST, struct.partStats.size())); - for (Map.Entry> _iter303 : struct.partStats.entrySet()) + for (Map.Entry> _iter311 : struct.partStats.entrySet()) { - oprot.writeString(_iter303.getKey()); + oprot.writeString(_iter311.getKey()); { - oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, _iter303.getValue().size())); - for (ColumnStatisticsObj _iter304 : _iter303.getValue()) + oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, _iter311.getValue().size())); + for (ColumnStatisticsObj _iter312 : _iter311.getValue()) { - _iter304.write(oprot); + _iter312.write(oprot); } oprot.writeListEnd(); } @@ -439,14 +439,14 @@ public void write(org.apache.thrift.protocol.TProtocol prot, PartitionsStatsResu TTupleProtocol oprot = (TTupleProtocol) prot; { oprot.writeI32(struct.partStats.size()); - for (Map.Entry> _iter305 : struct.partStats.entrySet()) + for (Map.Entry> _iter313 : struct.partStats.entrySet()) { - oprot.writeString(_iter305.getKey()); + oprot.writeString(_iter313.getKey()); { - oprot.writeI32(_iter305.getValue().size()); - for (ColumnStatisticsObj _iter306 : _iter305.getValue()) + oprot.writeI32(_iter313.getValue().size()); + for (ColumnStatisticsObj _iter314 : _iter313.getValue()) { - _iter306.write(oprot); + _iter314.write(oprot); } } } @@ -457,25 +457,25 @@ public void write(org.apache.thrift.protocol.TProtocol prot, PartitionsStatsResu public void read(org.apache.thrift.protocol.TProtocol prot, PartitionsStatsResult struct) throws org.apache.thrift.TException { TTupleProtocol iprot = (TTupleProtocol) prot; { - org.apache.thrift.protocol.TMap _map307 = new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.LIST, iprot.readI32()); - struct.partStats = new HashMap>(2*_map307.size); - for (int _i308 = 0; _i308 < _map307.size; ++_i308) + org.apache.thrift.protocol.TMap _map315 = new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.LIST, iprot.readI32()); + struct.partStats = new HashMap>(2*_map315.size); + for (int _i316 = 0; _i316 < _map315.size; ++_i316) { - String _key309; // required - List _val310; // required - _key309 = iprot.readString(); + String _key317; // required + List _val318; // required + _key317 = iprot.readString(); { - org.apache.thrift.protocol.TList _list311 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - _val310 = new ArrayList(_list311.size); - for (int _i312 = 0; _i312 < _list311.size; ++_i312) + org.apache.thrift.protocol.TList _list319 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + _val318 = new ArrayList(_list319.size); + for (int _i320 = 0; _i320 < _list319.size; ++_i320) { - ColumnStatisticsObj _elem313; // required - _elem313 = new ColumnStatisticsObj(); - _elem313.read(iprot); - _val310.add(_elem313); + ColumnStatisticsObj _elem321; // required + _elem321 = new ColumnStatisticsObj(); + _elem321.read(iprot); + _val318.add(_elem321); } } - struct.partStats.put(_key309, _val310); + struct.partStats.put(_key317, _val318); } } struct.setPartStatsIsSet(true); diff --git a/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/RequestPartsSpec.java b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/RequestPartsSpec.java index 217a3c1..805618f 100644 --- a/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/RequestPartsSpec.java +++ b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/RequestPartsSpec.java @@ -165,13 +165,13 @@ protected Object standardSchemeReadValue(org.apache.thrift.protocol.TProtocol ip if (field.type == NAMES_FIELD_DESC.type) { List names; { - org.apache.thrift.protocol.TList _list362 = iprot.readListBegin(); - names = new ArrayList(_list362.size); - for (int _i363 = 0; _i363 < _list362.size; ++_i363) + org.apache.thrift.protocol.TList _list370 = iprot.readListBegin(); + names = new ArrayList(_list370.size); + for (int _i371 = 0; _i371 < _list370.size; ++_i371) { - String _elem364; // required - _elem364 = iprot.readString(); - names.add(_elem364); + String _elem372; // required + _elem372 = iprot.readString(); + names.add(_elem372); } iprot.readListEnd(); } @@ -184,14 +184,14 @@ protected Object standardSchemeReadValue(org.apache.thrift.protocol.TProtocol ip if (field.type == EXPRS_FIELD_DESC.type) { List exprs; { - org.apache.thrift.protocol.TList _list365 = iprot.readListBegin(); - exprs = new ArrayList(_list365.size); - for (int _i366 = 0; _i366 < _list365.size; ++_i366) + org.apache.thrift.protocol.TList _list373 = iprot.readListBegin(); + exprs = new ArrayList(_list373.size); + for (int _i374 = 0; _i374 < _list373.size; ++_i374) { - DropPartitionsExpr _elem367; // required - _elem367 = new DropPartitionsExpr(); - _elem367.read(iprot); - exprs.add(_elem367); + DropPartitionsExpr _elem375; // required + _elem375 = new DropPartitionsExpr(); + _elem375.read(iprot); + exprs.add(_elem375); } iprot.readListEnd(); } @@ -215,9 +215,9 @@ protected void standardSchemeWriteValue(org.apache.thrift.protocol.TProtocol opr List names = (List)value_; { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, names.size())); - for (String _iter368 : names) + for (String _iter376 : names) { - oprot.writeString(_iter368); + oprot.writeString(_iter376); } oprot.writeListEnd(); } @@ -226,9 +226,9 @@ protected void standardSchemeWriteValue(org.apache.thrift.protocol.TProtocol opr List exprs = (List)value_; { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, exprs.size())); - for (DropPartitionsExpr _iter369 : exprs) + for (DropPartitionsExpr _iter377 : exprs) { - _iter369.write(oprot); + _iter377.write(oprot); } oprot.writeListEnd(); } @@ -246,13 +246,13 @@ protected Object tupleSchemeReadValue(org.apache.thrift.protocol.TProtocol iprot case NAMES: List names; { - org.apache.thrift.protocol.TList _list370 = iprot.readListBegin(); - names = new ArrayList(_list370.size); - for (int _i371 = 0; _i371 < _list370.size; ++_i371) + org.apache.thrift.protocol.TList _list378 = iprot.readListBegin(); + names = new ArrayList(_list378.size); + for (int _i379 = 0; _i379 < _list378.size; ++_i379) { - String _elem372; // required - _elem372 = iprot.readString(); - names.add(_elem372); + String _elem380; // required + _elem380 = iprot.readString(); + names.add(_elem380); } iprot.readListEnd(); } @@ -260,14 +260,14 @@ protected Object tupleSchemeReadValue(org.apache.thrift.protocol.TProtocol iprot case EXPRS: List exprs; { - org.apache.thrift.protocol.TList _list373 = iprot.readListBegin(); - exprs = new ArrayList(_list373.size); - for (int _i374 = 0; _i374 < _list373.size; ++_i374) + org.apache.thrift.protocol.TList _list381 = iprot.readListBegin(); + exprs = new ArrayList(_list381.size); + for (int _i382 = 0; _i382 < _list381.size; ++_i382) { - DropPartitionsExpr _elem375; // required - _elem375 = new DropPartitionsExpr(); - _elem375.read(iprot); - exprs.add(_elem375); + DropPartitionsExpr _elem383; // required + _elem383 = new DropPartitionsExpr(); + _elem383.read(iprot); + exprs.add(_elem383); } iprot.readListEnd(); } @@ -287,9 +287,9 @@ protected void tupleSchemeWriteValue(org.apache.thrift.protocol.TProtocol oprot) List names = (List)value_; { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, names.size())); - for (String _iter376 : names) + for (String _iter384 : names) { - oprot.writeString(_iter376); + oprot.writeString(_iter384); } oprot.writeListEnd(); } @@ -298,9 +298,9 @@ protected void tupleSchemeWriteValue(org.apache.thrift.protocol.TProtocol oprot) List exprs = (List)value_; { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, exprs.size())); - for (DropPartitionsExpr _iter377 : exprs) + for (DropPartitionsExpr _iter385 : exprs) { - _iter377.write(oprot); + _iter385.write(oprot); } oprot.writeListEnd(); } diff --git a/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/Schema.java b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/Schema.java index 0c200c5..45bf05c 100644 --- a/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/Schema.java +++ b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/Schema.java @@ -452,14 +452,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, Schema struct) thro case 1: // FIELD_SCHEMAS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list252 = iprot.readListBegin(); - struct.fieldSchemas = new ArrayList(_list252.size); - for (int _i253 = 0; _i253 < _list252.size; ++_i253) + org.apache.thrift.protocol.TList _list260 = iprot.readListBegin(); + struct.fieldSchemas = new ArrayList(_list260.size); + for (int _i261 = 0; _i261 < _list260.size; ++_i261) { - FieldSchema _elem254; // required - _elem254 = new FieldSchema(); - _elem254.read(iprot); - struct.fieldSchemas.add(_elem254); + FieldSchema _elem262; // required + _elem262 = new FieldSchema(); + _elem262.read(iprot); + struct.fieldSchemas.add(_elem262); } iprot.readListEnd(); } @@ -471,15 +471,15 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, Schema struct) thro case 2: // PROPERTIES if (schemeField.type == org.apache.thrift.protocol.TType.MAP) { { - org.apache.thrift.protocol.TMap _map255 = iprot.readMapBegin(); - struct.properties = new HashMap(2*_map255.size); - for (int _i256 = 0; _i256 < _map255.size; ++_i256) + org.apache.thrift.protocol.TMap _map263 = iprot.readMapBegin(); + struct.properties = new HashMap(2*_map263.size); + for (int _i264 = 0; _i264 < _map263.size; ++_i264) { - String _key257; // required - String _val258; // required - _key257 = iprot.readString(); - _val258 = iprot.readString(); - struct.properties.put(_key257, _val258); + String _key265; // required + String _val266; // required + _key265 = iprot.readString(); + _val266 = iprot.readString(); + struct.properties.put(_key265, _val266); } iprot.readMapEnd(); } @@ -505,9 +505,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, Schema struct) thr oprot.writeFieldBegin(FIELD_SCHEMAS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.fieldSchemas.size())); - for (FieldSchema _iter259 : struct.fieldSchemas) + for (FieldSchema _iter267 : struct.fieldSchemas) { - _iter259.write(oprot); + _iter267.write(oprot); } oprot.writeListEnd(); } @@ -517,10 +517,10 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, Schema struct) thr oprot.writeFieldBegin(PROPERTIES_FIELD_DESC); { oprot.writeMapBegin(new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING, struct.properties.size())); - for (Map.Entry _iter260 : struct.properties.entrySet()) + for (Map.Entry _iter268 : struct.properties.entrySet()) { - oprot.writeString(_iter260.getKey()); - oprot.writeString(_iter260.getValue()); + oprot.writeString(_iter268.getKey()); + oprot.writeString(_iter268.getValue()); } oprot.writeMapEnd(); } @@ -554,19 +554,19 @@ public void write(org.apache.thrift.protocol.TProtocol prot, Schema struct) thro if (struct.isSetFieldSchemas()) { { oprot.writeI32(struct.fieldSchemas.size()); - for (FieldSchema _iter261 : struct.fieldSchemas) + for (FieldSchema _iter269 : struct.fieldSchemas) { - _iter261.write(oprot); + _iter269.write(oprot); } } } if (struct.isSetProperties()) { { oprot.writeI32(struct.properties.size()); - for (Map.Entry _iter262 : struct.properties.entrySet()) + for (Map.Entry _iter270 : struct.properties.entrySet()) { - oprot.writeString(_iter262.getKey()); - oprot.writeString(_iter262.getValue()); + oprot.writeString(_iter270.getKey()); + oprot.writeString(_iter270.getValue()); } } } @@ -578,29 +578,29 @@ public void read(org.apache.thrift.protocol.TProtocol prot, Schema struct) throw BitSet incoming = iprot.readBitSet(2); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list263 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.fieldSchemas = new ArrayList(_list263.size); - for (int _i264 = 0; _i264 < _list263.size; ++_i264) + org.apache.thrift.protocol.TList _list271 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.fieldSchemas = new ArrayList(_list271.size); + for (int _i272 = 0; _i272 < _list271.size; ++_i272) { - FieldSchema _elem265; // required - _elem265 = new FieldSchema(); - _elem265.read(iprot); - struct.fieldSchemas.add(_elem265); + FieldSchema _elem273; // required + _elem273 = new FieldSchema(); + _elem273.read(iprot); + struct.fieldSchemas.add(_elem273); } } struct.setFieldSchemasIsSet(true); } if (incoming.get(1)) { { - org.apache.thrift.protocol.TMap _map266 = new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.properties = new HashMap(2*_map266.size); - for (int _i267 = 0; _i267 < _map266.size; ++_i267) + org.apache.thrift.protocol.TMap _map274 = new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.properties = new HashMap(2*_map274.size); + for (int _i275 = 0; _i275 < _map274.size; ++_i275) { - String _key268; // required - String _val269; // required - _key268 = iprot.readString(); - _val269 = iprot.readString(); - struct.properties.put(_key268, _val269); + String _key276; // required + String _val277; // required + _key276 = iprot.readString(); + _val277 = iprot.readString(); + struct.properties.put(_key276, _val277); } } struct.setPropertiesIsSet(true); diff --git a/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/SetPartitionsStatsRequest.java b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/SetPartitionsStatsRequest.java new file mode 100644 index 0000000..98a100a --- /dev/null +++ b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/SetPartitionsStatsRequest.java @@ -0,0 +1,439 @@ +/** + * Autogenerated by Thrift Compiler (0.9.0) + * + * DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING + * @generated + */ +package org.apache.hadoop.hive.metastore.api; + +import org.apache.commons.lang.builder.HashCodeBuilder; +import org.apache.thrift.scheme.IScheme; +import org.apache.thrift.scheme.SchemeFactory; +import org.apache.thrift.scheme.StandardScheme; + +import org.apache.thrift.scheme.TupleScheme; +import org.apache.thrift.protocol.TTupleProtocol; +import org.apache.thrift.protocol.TProtocolException; +import org.apache.thrift.EncodingUtils; +import org.apache.thrift.TException; +import java.util.List; +import java.util.ArrayList; +import java.util.Map; +import java.util.HashMap; +import java.util.EnumMap; +import java.util.Set; +import java.util.HashSet; +import java.util.EnumSet; +import java.util.Collections; +import java.util.BitSet; +import java.nio.ByteBuffer; +import java.util.Arrays; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +public class SetPartitionsStatsRequest implements org.apache.thrift.TBase, java.io.Serializable, Cloneable { + private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("SetPartitionsStatsRequest"); + + private static final org.apache.thrift.protocol.TField COL_STATS_FIELD_DESC = new org.apache.thrift.protocol.TField("colStats", org.apache.thrift.protocol.TType.LIST, (short)1); + + private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); + static { + schemes.put(StandardScheme.class, new SetPartitionsStatsRequestStandardSchemeFactory()); + schemes.put(TupleScheme.class, new SetPartitionsStatsRequestTupleSchemeFactory()); + } + + private List colStats; // required + + /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ + public enum _Fields implements org.apache.thrift.TFieldIdEnum { + COL_STATS((short)1, "colStats"); + + private static final Map byName = new HashMap(); + + static { + for (_Fields field : EnumSet.allOf(_Fields.class)) { + byName.put(field.getFieldName(), field); + } + } + + /** + * Find the _Fields constant that matches fieldId, or null if its not found. + */ + public static _Fields findByThriftId(int fieldId) { + switch(fieldId) { + case 1: // COL_STATS + return COL_STATS; + default: + return null; + } + } + + /** + * Find the _Fields constant that matches fieldId, throwing an exception + * if it is not found. + */ + public static _Fields findByThriftIdOrThrow(int fieldId) { + _Fields fields = findByThriftId(fieldId); + if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!"); + return fields; + } + + /** + * Find the _Fields constant that matches name, or null if its not found. + */ + public static _Fields findByName(String name) { + return byName.get(name); + } + + private final short _thriftId; + private final String _fieldName; + + _Fields(short thriftId, String fieldName) { + _thriftId = thriftId; + _fieldName = fieldName; + } + + public short getThriftFieldId() { + return _thriftId; + } + + public String getFieldName() { + return _fieldName; + } + } + + // isset id assignments + public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; + static { + Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); + tmpMap.put(_Fields.COL_STATS, new org.apache.thrift.meta_data.FieldMetaData("colStats", org.apache.thrift.TFieldRequirementType.REQUIRED, + new org.apache.thrift.meta_data.ListMetaData(org.apache.thrift.protocol.TType.LIST, + new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, ColumnStatistics.class)))); + metaDataMap = Collections.unmodifiableMap(tmpMap); + org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(SetPartitionsStatsRequest.class, metaDataMap); + } + + public SetPartitionsStatsRequest() { + } + + public SetPartitionsStatsRequest( + List colStats) + { + this(); + this.colStats = colStats; + } + + /** + * Performs a deep copy on other. + */ + public SetPartitionsStatsRequest(SetPartitionsStatsRequest other) { + if (other.isSetColStats()) { + List __this__colStats = new ArrayList(); + for (ColumnStatistics other_element : other.colStats) { + __this__colStats.add(new ColumnStatistics(other_element)); + } + this.colStats = __this__colStats; + } + } + + public SetPartitionsStatsRequest deepCopy() { + return new SetPartitionsStatsRequest(this); + } + + @Override + public void clear() { + this.colStats = null; + } + + public int getColStatsSize() { + return (this.colStats == null) ? 0 : this.colStats.size(); + } + + public java.util.Iterator getColStatsIterator() { + return (this.colStats == null) ? null : this.colStats.iterator(); + } + + public void addToColStats(ColumnStatistics elem) { + if (this.colStats == null) { + this.colStats = new ArrayList(); + } + this.colStats.add(elem); + } + + public List getColStats() { + return this.colStats; + } + + public void setColStats(List colStats) { + this.colStats = colStats; + } + + public void unsetColStats() { + this.colStats = null; + } + + /** Returns true if field colStats is set (has been assigned a value) and false otherwise */ + public boolean isSetColStats() { + return this.colStats != null; + } + + public void setColStatsIsSet(boolean value) { + if (!value) { + this.colStats = null; + } + } + + public void setFieldValue(_Fields field, Object value) { + switch (field) { + case COL_STATS: + if (value == null) { + unsetColStats(); + } else { + setColStats((List)value); + } + break; + + } + } + + public Object getFieldValue(_Fields field) { + switch (field) { + case COL_STATS: + return getColStats(); + + } + throw new IllegalStateException(); + } + + /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */ + public boolean isSet(_Fields field) { + if (field == null) { + throw new IllegalArgumentException(); + } + + switch (field) { + case COL_STATS: + return isSetColStats(); + } + throw new IllegalStateException(); + } + + @Override + public boolean equals(Object that) { + if (that == null) + return false; + if (that instanceof SetPartitionsStatsRequest) + return this.equals((SetPartitionsStatsRequest)that); + return false; + } + + public boolean equals(SetPartitionsStatsRequest that) { + if (that == null) + return false; + + boolean this_present_colStats = true && this.isSetColStats(); + boolean that_present_colStats = true && that.isSetColStats(); + if (this_present_colStats || that_present_colStats) { + if (!(this_present_colStats && that_present_colStats)) + return false; + if (!this.colStats.equals(that.colStats)) + return false; + } + + return true; + } + + @Override + public int hashCode() { + HashCodeBuilder builder = new HashCodeBuilder(); + + boolean present_colStats = true && (isSetColStats()); + builder.append(present_colStats); + if (present_colStats) + builder.append(colStats); + + return builder.toHashCode(); + } + + public int compareTo(SetPartitionsStatsRequest other) { + if (!getClass().equals(other.getClass())) { + return getClass().getName().compareTo(other.getClass().getName()); + } + + int lastComparison = 0; + SetPartitionsStatsRequest typedOther = (SetPartitionsStatsRequest)other; + + lastComparison = Boolean.valueOf(isSetColStats()).compareTo(typedOther.isSetColStats()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetColStats()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.colStats, typedOther.colStats); + if (lastComparison != 0) { + return lastComparison; + } + } + return 0; + } + + public _Fields fieldForId(int fieldId) { + return _Fields.findByThriftId(fieldId); + } + + public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException { + schemes.get(iprot.getScheme()).getScheme().read(iprot, this); + } + + public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException { + schemes.get(oprot.getScheme()).getScheme().write(oprot, this); + } + + @Override + public String toString() { + StringBuilder sb = new StringBuilder("SetPartitionsStatsRequest("); + boolean first = true; + + sb.append("colStats:"); + if (this.colStats == null) { + sb.append("null"); + } else { + sb.append(this.colStats); + } + first = false; + sb.append(")"); + return sb.toString(); + } + + public void validate() throws org.apache.thrift.TException { + // check for required fields + if (!isSetColStats()) { + throw new org.apache.thrift.protocol.TProtocolException("Required field 'colStats' is unset! Struct:" + toString()); + } + + // check for sub-struct validity + } + + private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { + try { + write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out))); + } catch (org.apache.thrift.TException te) { + throw new java.io.IOException(te); + } + } + + private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { + try { + read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); + } catch (org.apache.thrift.TException te) { + throw new java.io.IOException(te); + } + } + + private static class SetPartitionsStatsRequestStandardSchemeFactory implements SchemeFactory { + public SetPartitionsStatsRequestStandardScheme getScheme() { + return new SetPartitionsStatsRequestStandardScheme(); + } + } + + private static class SetPartitionsStatsRequestStandardScheme extends StandardScheme { + + public void read(org.apache.thrift.protocol.TProtocol iprot, SetPartitionsStatsRequest struct) throws org.apache.thrift.TException { + org.apache.thrift.protocol.TField schemeField; + iprot.readStructBegin(); + while (true) + { + schemeField = iprot.readFieldBegin(); + if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { + break; + } + switch (schemeField.id) { + case 1: // COL_STATS + if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { + { + org.apache.thrift.protocol.TList _list252 = iprot.readListBegin(); + struct.colStats = new ArrayList(_list252.size); + for (int _i253 = 0; _i253 < _list252.size; ++_i253) + { + ColumnStatistics _elem254; // required + _elem254 = new ColumnStatistics(); + _elem254.read(iprot); + struct.colStats.add(_elem254); + } + iprot.readListEnd(); + } + struct.setColStatsIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; + default: + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + iprot.readFieldEnd(); + } + iprot.readStructEnd(); + struct.validate(); + } + + public void write(org.apache.thrift.protocol.TProtocol oprot, SetPartitionsStatsRequest struct) throws org.apache.thrift.TException { + struct.validate(); + + oprot.writeStructBegin(STRUCT_DESC); + if (struct.colStats != null) { + oprot.writeFieldBegin(COL_STATS_FIELD_DESC); + { + oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.colStats.size())); + for (ColumnStatistics _iter255 : struct.colStats) + { + _iter255.write(oprot); + } + oprot.writeListEnd(); + } + oprot.writeFieldEnd(); + } + oprot.writeFieldStop(); + oprot.writeStructEnd(); + } + + } + + private static class SetPartitionsStatsRequestTupleSchemeFactory implements SchemeFactory { + public SetPartitionsStatsRequestTupleScheme getScheme() { + return new SetPartitionsStatsRequestTupleScheme(); + } + } + + private static class SetPartitionsStatsRequestTupleScheme extends TupleScheme { + + @Override + public void write(org.apache.thrift.protocol.TProtocol prot, SetPartitionsStatsRequest struct) throws org.apache.thrift.TException { + TTupleProtocol oprot = (TTupleProtocol) prot; + { + oprot.writeI32(struct.colStats.size()); + for (ColumnStatistics _iter256 : struct.colStats) + { + _iter256.write(oprot); + } + } + } + + @Override + public void read(org.apache.thrift.protocol.TProtocol prot, SetPartitionsStatsRequest struct) throws org.apache.thrift.TException { + TTupleProtocol iprot = (TTupleProtocol) prot; + { + org.apache.thrift.protocol.TList _list257 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.colStats = new ArrayList(_list257.size); + for (int _i258 = 0; _i258 < _list257.size; ++_i258) + { + ColumnStatistics _elem259; // required + _elem259 = new ColumnStatistics(); + _elem259.read(iprot); + struct.colStats.add(_elem259); + } + } + struct.setColStatsIsSet(true); + } + } + +} + diff --git a/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ShowCompactResponse.java b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ShowCompactResponse.java index 6da732e..ac41683 100644 --- a/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ShowCompactResponse.java +++ b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ShowCompactResponse.java @@ -350,14 +350,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, ShowCompactResponse case 1: // COMPACTS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list442 = iprot.readListBegin(); - struct.compacts = new ArrayList(_list442.size); - for (int _i443 = 0; _i443 < _list442.size; ++_i443) + org.apache.thrift.protocol.TList _list450 = iprot.readListBegin(); + struct.compacts = new ArrayList(_list450.size); + for (int _i451 = 0; _i451 < _list450.size; ++_i451) { - ShowCompactResponseElement _elem444; // required - _elem444 = new ShowCompactResponseElement(); - _elem444.read(iprot); - struct.compacts.add(_elem444); + ShowCompactResponseElement _elem452; // required + _elem452 = new ShowCompactResponseElement(); + _elem452.read(iprot); + struct.compacts.add(_elem452); } iprot.readListEnd(); } @@ -383,9 +383,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, ShowCompactRespons oprot.writeFieldBegin(COMPACTS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.compacts.size())); - for (ShowCompactResponseElement _iter445 : struct.compacts) + for (ShowCompactResponseElement _iter453 : struct.compacts) { - _iter445.write(oprot); + _iter453.write(oprot); } oprot.writeListEnd(); } @@ -410,9 +410,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, ShowCompactResponse TTupleProtocol oprot = (TTupleProtocol) prot; { oprot.writeI32(struct.compacts.size()); - for (ShowCompactResponseElement _iter446 : struct.compacts) + for (ShowCompactResponseElement _iter454 : struct.compacts) { - _iter446.write(oprot); + _iter454.write(oprot); } } } @@ -421,14 +421,14 @@ public void write(org.apache.thrift.protocol.TProtocol prot, ShowCompactResponse public void read(org.apache.thrift.protocol.TProtocol prot, ShowCompactResponse struct) throws org.apache.thrift.TException { TTupleProtocol iprot = (TTupleProtocol) prot; { - org.apache.thrift.protocol.TList _list447 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.compacts = new ArrayList(_list447.size); - for (int _i448 = 0; _i448 < _list447.size; ++_i448) + org.apache.thrift.protocol.TList _list455 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.compacts = new ArrayList(_list455.size); + for (int _i456 = 0; _i456 < _list455.size; ++_i456) { - ShowCompactResponseElement _elem449; // required - _elem449 = new ShowCompactResponseElement(); - _elem449.read(iprot); - struct.compacts.add(_elem449); + ShowCompactResponseElement _elem457; // required + _elem457 = new ShowCompactResponseElement(); + _elem457.read(iprot); + struct.compacts.add(_elem457); } } struct.setCompactsIsSet(true); diff --git a/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ShowLocksResponse.java b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ShowLocksResponse.java index 554601a..f4ef62d 100644 --- a/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ShowLocksResponse.java +++ b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ShowLocksResponse.java @@ -346,14 +346,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, ShowLocksResponse s case 1: // LOCKS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list418 = iprot.readListBegin(); - struct.locks = new ArrayList(_list418.size); - for (int _i419 = 0; _i419 < _list418.size; ++_i419) + org.apache.thrift.protocol.TList _list426 = iprot.readListBegin(); + struct.locks = new ArrayList(_list426.size); + for (int _i427 = 0; _i427 < _list426.size; ++_i427) { - ShowLocksResponseElement _elem420; // required - _elem420 = new ShowLocksResponseElement(); - _elem420.read(iprot); - struct.locks.add(_elem420); + ShowLocksResponseElement _elem428; // required + _elem428 = new ShowLocksResponseElement(); + _elem428.read(iprot); + struct.locks.add(_elem428); } iprot.readListEnd(); } @@ -379,9 +379,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, ShowLocksResponse oprot.writeFieldBegin(LOCKS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.locks.size())); - for (ShowLocksResponseElement _iter421 : struct.locks) + for (ShowLocksResponseElement _iter429 : struct.locks) { - _iter421.write(oprot); + _iter429.write(oprot); } oprot.writeListEnd(); } @@ -412,9 +412,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, ShowLocksResponse s if (struct.isSetLocks()) { { oprot.writeI32(struct.locks.size()); - for (ShowLocksResponseElement _iter422 : struct.locks) + for (ShowLocksResponseElement _iter430 : struct.locks) { - _iter422.write(oprot); + _iter430.write(oprot); } } } @@ -426,14 +426,14 @@ public void read(org.apache.thrift.protocol.TProtocol prot, ShowLocksResponse st BitSet incoming = iprot.readBitSet(1); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list423 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.locks = new ArrayList(_list423.size); - for (int _i424 = 0; _i424 < _list423.size; ++_i424) + org.apache.thrift.protocol.TList _list431 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.locks = new ArrayList(_list431.size); + for (int _i432 = 0; _i432 < _list431.size; ++_i432) { - ShowLocksResponseElement _elem425; // required - _elem425 = new ShowLocksResponseElement(); - _elem425.read(iprot); - struct.locks.add(_elem425); + ShowLocksResponseElement _elem433; // required + _elem433 = new ShowLocksResponseElement(); + _elem433.read(iprot); + struct.locks.add(_elem433); } } struct.setLocksIsSet(true); diff --git a/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/TableStatsRequest.java b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/TableStatsRequest.java index 23711f8..d53292e 100644 --- a/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/TableStatsRequest.java +++ b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/TableStatsRequest.java @@ -536,13 +536,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, TableStatsRequest s case 3: // COL_NAMES if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list314 = iprot.readListBegin(); - struct.colNames = new ArrayList(_list314.size); - for (int _i315 = 0; _i315 < _list314.size; ++_i315) + org.apache.thrift.protocol.TList _list322 = iprot.readListBegin(); + struct.colNames = new ArrayList(_list322.size); + for (int _i323 = 0; _i323 < _list322.size; ++_i323) { - String _elem316; // required - _elem316 = iprot.readString(); - struct.colNames.add(_elem316); + String _elem324; // required + _elem324 = iprot.readString(); + struct.colNames.add(_elem324); } iprot.readListEnd(); } @@ -578,9 +578,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, TableStatsRequest oprot.writeFieldBegin(COL_NAMES_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.colNames.size())); - for (String _iter317 : struct.colNames) + for (String _iter325 : struct.colNames) { - oprot.writeString(_iter317); + oprot.writeString(_iter325); } oprot.writeListEnd(); } @@ -607,9 +607,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, TableStatsRequest s oprot.writeString(struct.tblName); { oprot.writeI32(struct.colNames.size()); - for (String _iter318 : struct.colNames) + for (String _iter326 : struct.colNames) { - oprot.writeString(_iter318); + oprot.writeString(_iter326); } } } @@ -622,13 +622,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, TableStatsRequest st struct.tblName = iprot.readString(); struct.setTblNameIsSet(true); { - org.apache.thrift.protocol.TList _list319 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.colNames = new ArrayList(_list319.size); - for (int _i320 = 0; _i320 < _list319.size; ++_i320) + org.apache.thrift.protocol.TList _list327 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.colNames = new ArrayList(_list327.size); + for (int _i328 = 0; _i328 < _list327.size; ++_i328) { - String _elem321; // required - _elem321 = iprot.readString(); - struct.colNames.add(_elem321); + String _elem329; // required + _elem329 = iprot.readString(); + struct.colNames.add(_elem329); } } struct.setColNamesIsSet(true); diff --git a/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/TableStatsResult.java b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/TableStatsResult.java index a61d363..f78732e 100644 --- a/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/TableStatsResult.java +++ b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/TableStatsResult.java @@ -350,14 +350,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, TableStatsResult st case 1: // TABLE_STATS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list288 = iprot.readListBegin(); - struct.tableStats = new ArrayList(_list288.size); - for (int _i289 = 0; _i289 < _list288.size; ++_i289) + org.apache.thrift.protocol.TList _list296 = iprot.readListBegin(); + struct.tableStats = new ArrayList(_list296.size); + for (int _i297 = 0; _i297 < _list296.size; ++_i297) { - ColumnStatisticsObj _elem290; // required - _elem290 = new ColumnStatisticsObj(); - _elem290.read(iprot); - struct.tableStats.add(_elem290); + ColumnStatisticsObj _elem298; // required + _elem298 = new ColumnStatisticsObj(); + _elem298.read(iprot); + struct.tableStats.add(_elem298); } iprot.readListEnd(); } @@ -383,9 +383,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, TableStatsResult s oprot.writeFieldBegin(TABLE_STATS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.tableStats.size())); - for (ColumnStatisticsObj _iter291 : struct.tableStats) + for (ColumnStatisticsObj _iter299 : struct.tableStats) { - _iter291.write(oprot); + _iter299.write(oprot); } oprot.writeListEnd(); } @@ -410,9 +410,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, TableStatsResult st TTupleProtocol oprot = (TTupleProtocol) prot; { oprot.writeI32(struct.tableStats.size()); - for (ColumnStatisticsObj _iter292 : struct.tableStats) + for (ColumnStatisticsObj _iter300 : struct.tableStats) { - _iter292.write(oprot); + _iter300.write(oprot); } } } @@ -421,14 +421,14 @@ public void write(org.apache.thrift.protocol.TProtocol prot, TableStatsResult st public void read(org.apache.thrift.protocol.TProtocol prot, TableStatsResult struct) throws org.apache.thrift.TException { TTupleProtocol iprot = (TTupleProtocol) prot; { - org.apache.thrift.protocol.TList _list293 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.tableStats = new ArrayList(_list293.size); - for (int _i294 = 0; _i294 < _list293.size; ++_i294) + org.apache.thrift.protocol.TList _list301 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.tableStats = new ArrayList(_list301.size); + for (int _i302 = 0; _i302 < _list301.size; ++_i302) { - ColumnStatisticsObj _elem295; // required - _elem295 = new ColumnStatisticsObj(); - _elem295.read(iprot); - struct.tableStats.add(_elem295); + ColumnStatisticsObj _elem303; // required + _elem303 = new ColumnStatisticsObj(); + _elem303.read(iprot); + struct.tableStats.add(_elem303); } } struct.setTableStatsIsSet(true); diff --git a/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ThriftHiveMetastore.java b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ThriftHiveMetastore.java index fb06b6c..011f612 100644 --- a/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ThriftHiveMetastore.java +++ b/metastore/src/gen/thrift/gen-javabean/org/apache/hadoop/hive/metastore/api/ThriftHiveMetastore.java @@ -186,6 +186,8 @@ public AggrStats get_aggr_stats_for(PartitionsStatsRequest request) throws NoSuchObjectException, MetaException, org.apache.thrift.TException; + public boolean set_aggr_stats_for(SetPartitionsStatsRequest request) throws NoSuchObjectException, InvalidObjectException, MetaException, InvalidInputException, org.apache.thrift.TException; + public boolean delete_partition_column_statistics(String db_name, String tbl_name, String part_name, String col_name) throws NoSuchObjectException, MetaException, InvalidObjectException, InvalidInputException, org.apache.thrift.TException; public boolean delete_table_column_statistics(String db_name, String tbl_name, String col_name) throws NoSuchObjectException, MetaException, InvalidObjectException, InvalidInputException, org.apache.thrift.TException; @@ -414,6 +416,8 @@ public void get_aggr_stats_for(PartitionsStatsRequest request, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException; + public void set_aggr_stats_for(SetPartitionsStatsRequest request, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException; + public void delete_partition_column_statistics(String db_name, String tbl_name, String part_name, String col_name, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException; public void delete_table_column_statistics(String db_name, String tbl_name, String col_name, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException; @@ -2803,6 +2807,41 @@ public AggrStats recv_get_aggr_stats_for() throws NoSuchObjectException, MetaExc throw new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.MISSING_RESULT, "get_aggr_stats_for failed: unknown result"); } + public boolean set_aggr_stats_for(SetPartitionsStatsRequest request) throws NoSuchObjectException, InvalidObjectException, MetaException, InvalidInputException, org.apache.thrift.TException + { + send_set_aggr_stats_for(request); + return recv_set_aggr_stats_for(); + } + + public void send_set_aggr_stats_for(SetPartitionsStatsRequest request) throws org.apache.thrift.TException + { + set_aggr_stats_for_args args = new set_aggr_stats_for_args(); + args.setRequest(request); + sendBase("set_aggr_stats_for", args); + } + + public boolean recv_set_aggr_stats_for() throws NoSuchObjectException, InvalidObjectException, MetaException, InvalidInputException, org.apache.thrift.TException + { + set_aggr_stats_for_result result = new set_aggr_stats_for_result(); + receiveBase(result, "set_aggr_stats_for"); + if (result.isSetSuccess()) { + return result.success; + } + if (result.o1 != null) { + throw result.o1; + } + if (result.o2 != null) { + throw result.o2; + } + if (result.o3 != null) { + throw result.o3; + } + if (result.o4 != null) { + throw result.o4; + } + throw new org.apache.thrift.TApplicationException(org.apache.thrift.TApplicationException.MISSING_RESULT, "set_aggr_stats_for failed: unknown result"); + } + public boolean delete_partition_column_statistics(String db_name, String tbl_name, String part_name, String col_name) throws NoSuchObjectException, MetaException, InvalidObjectException, InvalidInputException, org.apache.thrift.TException { send_delete_partition_column_statistics(db_name, tbl_name, part_name, col_name); @@ -6561,6 +6600,38 @@ public AggrStats getResult() throws NoSuchObjectException, MetaException, org.ap } } + public void set_aggr_stats_for(SetPartitionsStatsRequest request, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException { + checkReady(); + set_aggr_stats_for_call method_call = new set_aggr_stats_for_call(request, resultHandler, this, ___protocolFactory, ___transport); + this.___currentMethod = method_call; + ___manager.call(method_call); + } + + public static class set_aggr_stats_for_call extends org.apache.thrift.async.TAsyncMethodCall { + private SetPartitionsStatsRequest request; + public set_aggr_stats_for_call(SetPartitionsStatsRequest request, org.apache.thrift.async.AsyncMethodCallback resultHandler, org.apache.thrift.async.TAsyncClient client, org.apache.thrift.protocol.TProtocolFactory protocolFactory, org.apache.thrift.transport.TNonblockingTransport transport) throws org.apache.thrift.TException { + super(client, protocolFactory, transport, resultHandler, false); + this.request = request; + } + + public void write_args(org.apache.thrift.protocol.TProtocol prot) throws org.apache.thrift.TException { + prot.writeMessageBegin(new org.apache.thrift.protocol.TMessage("set_aggr_stats_for", org.apache.thrift.protocol.TMessageType.CALL, 0)); + set_aggr_stats_for_args args = new set_aggr_stats_for_args(); + args.setRequest(request); + args.write(prot); + prot.writeMessageEnd(); + } + + public boolean getResult() throws NoSuchObjectException, InvalidObjectException, MetaException, InvalidInputException, org.apache.thrift.TException { + if (getState() != org.apache.thrift.async.TAsyncMethodCall.State.RESPONSE_READ) { + throw new IllegalStateException("Method call not finished!"); + } + org.apache.thrift.transport.TMemoryInputTransport memoryTransport = new org.apache.thrift.transport.TMemoryInputTransport(getFrameBuffer().array()); + org.apache.thrift.protocol.TProtocol prot = client.getProtocolFactory().getProtocol(memoryTransport); + return (new Client(prot)).recv_set_aggr_stats_for(); + } + } + public void delete_partition_column_statistics(String db_name, String tbl_name, String part_name, String col_name, org.apache.thrift.async.AsyncMethodCallback resultHandler) throws org.apache.thrift.TException { checkReady(); delete_partition_column_statistics_call method_call = new delete_partition_column_statistics_call(db_name, tbl_name, part_name, col_name, resultHandler, this, ___protocolFactory, ___transport); @@ -7927,6 +7998,7 @@ protected Processor(I iface, Map extends org.apache.thrift.ProcessFunction { + public set_aggr_stats_for() { + super("set_aggr_stats_for"); + } + + public set_aggr_stats_for_args getEmptyArgsInstance() { + return new set_aggr_stats_for_args(); + } + + protected boolean isOneway() { + return false; + } + + public set_aggr_stats_for_result getResult(I iface, set_aggr_stats_for_args args) throws org.apache.thrift.TException { + set_aggr_stats_for_result result = new set_aggr_stats_for_result(); + try { + result.success = iface.set_aggr_stats_for(args.request); + result.setSuccessIsSet(true); + } catch (NoSuchObjectException o1) { + result.o1 = o1; + } catch (InvalidObjectException o2) { + result.o2 = o2; + } catch (MetaException o3) { + result.o3 = o3; + } catch (InvalidInputException o4) { + result.o4 = o4; + } + return result; + } + } + public static class delete_partition_column_statistics extends org.apache.thrift.ProcessFunction { public delete_partition_column_statistics() { super("delete_partition_column_statistics"); @@ -16283,13 +16386,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_databases_resul case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list450 = iprot.readListBegin(); - struct.success = new ArrayList(_list450.size); - for (int _i451 = 0; _i451 < _list450.size; ++_i451) + org.apache.thrift.protocol.TList _list458 = iprot.readListBegin(); + struct.success = new ArrayList(_list458.size); + for (int _i459 = 0; _i459 < _list458.size; ++_i459) { - String _elem452; // required - _elem452 = iprot.readString(); - struct.success.add(_elem452); + String _elem460; // required + _elem460 = iprot.readString(); + struct.success.add(_elem460); } iprot.readListEnd(); } @@ -16324,9 +16427,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_databases_resu oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.success.size())); - for (String _iter453 : struct.success) + for (String _iter461 : struct.success) { - oprot.writeString(_iter453); + oprot.writeString(_iter461); } oprot.writeListEnd(); } @@ -16365,9 +16468,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_databases_resul if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (String _iter454 : struct.success) + for (String _iter462 : struct.success) { - oprot.writeString(_iter454); + oprot.writeString(_iter462); } } } @@ -16382,13 +16485,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_databases_result BitSet incoming = iprot.readBitSet(2); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list455 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.success = new ArrayList(_list455.size); - for (int _i456 = 0; _i456 < _list455.size; ++_i456) + org.apache.thrift.protocol.TList _list463 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.success = new ArrayList(_list463.size); + for (int _i464 = 0; _i464 < _list463.size; ++_i464) { - String _elem457; // required - _elem457 = iprot.readString(); - struct.success.add(_elem457); + String _elem465; // required + _elem465 = iprot.readString(); + struct.success.add(_elem465); } } struct.setSuccessIsSet(true); @@ -17045,13 +17148,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_all_databases_r case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list458 = iprot.readListBegin(); - struct.success = new ArrayList(_list458.size); - for (int _i459 = 0; _i459 < _list458.size; ++_i459) + org.apache.thrift.protocol.TList _list466 = iprot.readListBegin(); + struct.success = new ArrayList(_list466.size); + for (int _i467 = 0; _i467 < _list466.size; ++_i467) { - String _elem460; // required - _elem460 = iprot.readString(); - struct.success.add(_elem460); + String _elem468; // required + _elem468 = iprot.readString(); + struct.success.add(_elem468); } iprot.readListEnd(); } @@ -17086,9 +17189,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_all_databases_ oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.success.size())); - for (String _iter461 : struct.success) + for (String _iter469 : struct.success) { - oprot.writeString(_iter461); + oprot.writeString(_iter469); } oprot.writeListEnd(); } @@ -17127,9 +17230,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_all_databases_r if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (String _iter462 : struct.success) + for (String _iter470 : struct.success) { - oprot.writeString(_iter462); + oprot.writeString(_iter470); } } } @@ -17144,13 +17247,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_all_databases_re BitSet incoming = iprot.readBitSet(2); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list463 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.success = new ArrayList(_list463.size); - for (int _i464 = 0; _i464 < _list463.size; ++_i464) + org.apache.thrift.protocol.TList _list471 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.success = new ArrayList(_list471.size); + for (int _i472 = 0; _i472 < _list471.size; ++_i472) { - String _elem465; // required - _elem465 = iprot.readString(); - struct.success.add(_elem465); + String _elem473; // required + _elem473 = iprot.readString(); + struct.success.add(_elem473); } } struct.setSuccessIsSet(true); @@ -21757,16 +21860,16 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_type_all_result case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.MAP) { { - org.apache.thrift.protocol.TMap _map466 = iprot.readMapBegin(); - struct.success = new HashMap(2*_map466.size); - for (int _i467 = 0; _i467 < _map466.size; ++_i467) + org.apache.thrift.protocol.TMap _map474 = iprot.readMapBegin(); + struct.success = new HashMap(2*_map474.size); + for (int _i475 = 0; _i475 < _map474.size; ++_i475) { - String _key468; // required - Type _val469; // required - _key468 = iprot.readString(); - _val469 = new Type(); - _val469.read(iprot); - struct.success.put(_key468, _val469); + String _key476; // required + Type _val477; // required + _key476 = iprot.readString(); + _val477 = new Type(); + _val477.read(iprot); + struct.success.put(_key476, _val477); } iprot.readMapEnd(); } @@ -21801,10 +21904,10 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_type_all_resul oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeMapBegin(new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRUCT, struct.success.size())); - for (Map.Entry _iter470 : struct.success.entrySet()) + for (Map.Entry _iter478 : struct.success.entrySet()) { - oprot.writeString(_iter470.getKey()); - _iter470.getValue().write(oprot); + oprot.writeString(_iter478.getKey()); + _iter478.getValue().write(oprot); } oprot.writeMapEnd(); } @@ -21843,10 +21946,10 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_type_all_result if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (Map.Entry _iter471 : struct.success.entrySet()) + for (Map.Entry _iter479 : struct.success.entrySet()) { - oprot.writeString(_iter471.getKey()); - _iter471.getValue().write(oprot); + oprot.writeString(_iter479.getKey()); + _iter479.getValue().write(oprot); } } } @@ -21861,16 +21964,16 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_type_all_result BitSet incoming = iprot.readBitSet(2); if (incoming.get(0)) { { - org.apache.thrift.protocol.TMap _map472 = new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.success = new HashMap(2*_map472.size); - for (int _i473 = 0; _i473 < _map472.size; ++_i473) + org.apache.thrift.protocol.TMap _map480 = new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.success = new HashMap(2*_map480.size); + for (int _i481 = 0; _i481 < _map480.size; ++_i481) { - String _key474; // required - Type _val475; // required - _key474 = iprot.readString(); - _val475 = new Type(); - _val475.read(iprot); - struct.success.put(_key474, _val475); + String _key482; // required + Type _val483; // required + _key482 = iprot.readString(); + _val483 = new Type(); + _val483.read(iprot); + struct.success.put(_key482, _val483); } } struct.setSuccessIsSet(true); @@ -22905,14 +23008,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_fields_result s case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list476 = iprot.readListBegin(); - struct.success = new ArrayList(_list476.size); - for (int _i477 = 0; _i477 < _list476.size; ++_i477) + org.apache.thrift.protocol.TList _list484 = iprot.readListBegin(); + struct.success = new ArrayList(_list484.size); + for (int _i485 = 0; _i485 < _list484.size; ++_i485) { - FieldSchema _elem478; // required - _elem478 = new FieldSchema(); - _elem478.read(iprot); - struct.success.add(_elem478); + FieldSchema _elem486; // required + _elem486 = new FieldSchema(); + _elem486.read(iprot); + struct.success.add(_elem486); } iprot.readListEnd(); } @@ -22965,9 +23068,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_fields_result oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.success.size())); - for (FieldSchema _iter479 : struct.success) + for (FieldSchema _iter487 : struct.success) { - _iter479.write(oprot); + _iter487.write(oprot); } oprot.writeListEnd(); } @@ -23022,9 +23125,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_fields_result s if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (FieldSchema _iter480 : struct.success) + for (FieldSchema _iter488 : struct.success) { - _iter480.write(oprot); + _iter488.write(oprot); } } } @@ -23045,14 +23148,14 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_fields_result st BitSet incoming = iprot.readBitSet(4); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list481 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.success = new ArrayList(_list481.size); - for (int _i482 = 0; _i482 < _list481.size; ++_i482) + org.apache.thrift.protocol.TList _list489 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.success = new ArrayList(_list489.size); + for (int _i490 = 0; _i490 < _list489.size; ++_i490) { - FieldSchema _elem483; // required - _elem483 = new FieldSchema(); - _elem483.read(iprot); - struct.success.add(_elem483); + FieldSchema _elem491; // required + _elem491 = new FieldSchema(); + _elem491.read(iprot); + struct.success.add(_elem491); } } struct.setSuccessIsSet(true); @@ -24097,14 +24200,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_schema_result s case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list484 = iprot.readListBegin(); - struct.success = new ArrayList(_list484.size); - for (int _i485 = 0; _i485 < _list484.size; ++_i485) + org.apache.thrift.protocol.TList _list492 = iprot.readListBegin(); + struct.success = new ArrayList(_list492.size); + for (int _i493 = 0; _i493 < _list492.size; ++_i493) { - FieldSchema _elem486; // required - _elem486 = new FieldSchema(); - _elem486.read(iprot); - struct.success.add(_elem486); + FieldSchema _elem494; // required + _elem494 = new FieldSchema(); + _elem494.read(iprot); + struct.success.add(_elem494); } iprot.readListEnd(); } @@ -24157,9 +24260,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_schema_result oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.success.size())); - for (FieldSchema _iter487 : struct.success) + for (FieldSchema _iter495 : struct.success) { - _iter487.write(oprot); + _iter495.write(oprot); } oprot.writeListEnd(); } @@ -24214,9 +24317,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_schema_result s if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (FieldSchema _iter488 : struct.success) + for (FieldSchema _iter496 : struct.success) { - _iter488.write(oprot); + _iter496.write(oprot); } } } @@ -24237,14 +24340,14 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_schema_result st BitSet incoming = iprot.readBitSet(4); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list489 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.success = new ArrayList(_list489.size); - for (int _i490 = 0; _i490 < _list489.size; ++_i490) + org.apache.thrift.protocol.TList _list497 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.success = new ArrayList(_list497.size); + for (int _i498 = 0; _i498 < _list497.size; ++_i498) { - FieldSchema _elem491; // required - _elem491 = new FieldSchema(); - _elem491.read(iprot); - struct.success.add(_elem491); + FieldSchema _elem499; // required + _elem499 = new FieldSchema(); + _elem499.read(iprot); + struct.success.add(_elem499); } } struct.setSuccessIsSet(true); @@ -29487,13 +29590,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_tables_result s case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list492 = iprot.readListBegin(); - struct.success = new ArrayList(_list492.size); - for (int _i493 = 0; _i493 < _list492.size; ++_i493) + org.apache.thrift.protocol.TList _list500 = iprot.readListBegin(); + struct.success = new ArrayList(_list500.size); + for (int _i501 = 0; _i501 < _list500.size; ++_i501) { - String _elem494; // required - _elem494 = iprot.readString(); - struct.success.add(_elem494); + String _elem502; // required + _elem502 = iprot.readString(); + struct.success.add(_elem502); } iprot.readListEnd(); } @@ -29528,9 +29631,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_tables_result oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.success.size())); - for (String _iter495 : struct.success) + for (String _iter503 : struct.success) { - oprot.writeString(_iter495); + oprot.writeString(_iter503); } oprot.writeListEnd(); } @@ -29569,9 +29672,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_tables_result s if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (String _iter496 : struct.success) + for (String _iter504 : struct.success) { - oprot.writeString(_iter496); + oprot.writeString(_iter504); } } } @@ -29586,13 +29689,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_tables_result st BitSet incoming = iprot.readBitSet(2); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list497 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.success = new ArrayList(_list497.size); - for (int _i498 = 0; _i498 < _list497.size; ++_i498) + org.apache.thrift.protocol.TList _list505 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.success = new ArrayList(_list505.size); + for (int _i506 = 0; _i506 < _list505.size; ++_i506) { - String _elem499; // required - _elem499 = iprot.readString(); - struct.success.add(_elem499); + String _elem507; // required + _elem507 = iprot.readString(); + struct.success.add(_elem507); } } struct.setSuccessIsSet(true); @@ -30361,13 +30464,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_all_tables_resu case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list500 = iprot.readListBegin(); - struct.success = new ArrayList(_list500.size); - for (int _i501 = 0; _i501 < _list500.size; ++_i501) + org.apache.thrift.protocol.TList _list508 = iprot.readListBegin(); + struct.success = new ArrayList(_list508.size); + for (int _i509 = 0; _i509 < _list508.size; ++_i509) { - String _elem502; // required - _elem502 = iprot.readString(); - struct.success.add(_elem502); + String _elem510; // required + _elem510 = iprot.readString(); + struct.success.add(_elem510); } iprot.readListEnd(); } @@ -30402,9 +30505,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_all_tables_res oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.success.size())); - for (String _iter503 : struct.success) + for (String _iter511 : struct.success) { - oprot.writeString(_iter503); + oprot.writeString(_iter511); } oprot.writeListEnd(); } @@ -30443,9 +30546,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_all_tables_resu if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (String _iter504 : struct.success) + for (String _iter512 : struct.success) { - oprot.writeString(_iter504); + oprot.writeString(_iter512); } } } @@ -30460,13 +30563,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_all_tables_resul BitSet incoming = iprot.readBitSet(2); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list505 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.success = new ArrayList(_list505.size); - for (int _i506 = 0; _i506 < _list505.size; ++_i506) + org.apache.thrift.protocol.TList _list513 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.success = new ArrayList(_list513.size); + for (int _i514 = 0; _i514 < _list513.size; ++_i514) { - String _elem507; // required - _elem507 = iprot.readString(); - struct.success.add(_elem507); + String _elem515; // required + _elem515 = iprot.readString(); + struct.success.add(_elem515); } } struct.setSuccessIsSet(true); @@ -31922,13 +32025,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_table_objects_b case 2: // TBL_NAMES if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list508 = iprot.readListBegin(); - struct.tbl_names = new ArrayList(_list508.size); - for (int _i509 = 0; _i509 < _list508.size; ++_i509) + org.apache.thrift.protocol.TList _list516 = iprot.readListBegin(); + struct.tbl_names = new ArrayList(_list516.size); + for (int _i517 = 0; _i517 < _list516.size; ++_i517) { - String _elem510; // required - _elem510 = iprot.readString(); - struct.tbl_names.add(_elem510); + String _elem518; // required + _elem518 = iprot.readString(); + struct.tbl_names.add(_elem518); } iprot.readListEnd(); } @@ -31959,9 +32062,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_table_objects_ oprot.writeFieldBegin(TBL_NAMES_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.tbl_names.size())); - for (String _iter511 : struct.tbl_names) + for (String _iter519 : struct.tbl_names) { - oprot.writeString(_iter511); + oprot.writeString(_iter519); } oprot.writeListEnd(); } @@ -31998,9 +32101,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_table_objects_b if (struct.isSetTbl_names()) { { oprot.writeI32(struct.tbl_names.size()); - for (String _iter512 : struct.tbl_names) + for (String _iter520 : struct.tbl_names) { - oprot.writeString(_iter512); + oprot.writeString(_iter520); } } } @@ -32016,13 +32119,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_table_objects_by } if (incoming.get(1)) { { - org.apache.thrift.protocol.TList _list513 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.tbl_names = new ArrayList(_list513.size); - for (int _i514 = 0; _i514 < _list513.size; ++_i514) + org.apache.thrift.protocol.TList _list521 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.tbl_names = new ArrayList(_list521.size); + for (int _i522 = 0; _i522 < _list521.size; ++_i522) { - String _elem515; // required - _elem515 = iprot.readString(); - struct.tbl_names.add(_elem515); + String _elem523; // required + _elem523 = iprot.readString(); + struct.tbl_names.add(_elem523); } } struct.setTbl_namesIsSet(true); @@ -32590,14 +32693,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_table_objects_b case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list516 = iprot.readListBegin(); - struct.success = new ArrayList
(_list516.size); - for (int _i517 = 0; _i517 < _list516.size; ++_i517) + org.apache.thrift.protocol.TList _list524 = iprot.readListBegin(); + struct.success = new ArrayList
(_list524.size); + for (int _i525 = 0; _i525 < _list524.size; ++_i525) { - Table _elem518; // required - _elem518 = new Table(); - _elem518.read(iprot); - struct.success.add(_elem518); + Table _elem526; // required + _elem526 = new Table(); + _elem526.read(iprot); + struct.success.add(_elem526); } iprot.readListEnd(); } @@ -32650,9 +32753,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_table_objects_ oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.success.size())); - for (Table _iter519 : struct.success) + for (Table _iter527 : struct.success) { - _iter519.write(oprot); + _iter527.write(oprot); } oprot.writeListEnd(); } @@ -32707,9 +32810,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_table_objects_b if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (Table _iter520 : struct.success) + for (Table _iter528 : struct.success) { - _iter520.write(oprot); + _iter528.write(oprot); } } } @@ -32730,14 +32833,14 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_table_objects_by BitSet incoming = iprot.readBitSet(4); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list521 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.success = new ArrayList
(_list521.size); - for (int _i522 = 0; _i522 < _list521.size; ++_i522) + org.apache.thrift.protocol.TList _list529 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.success = new ArrayList
(_list529.size); + for (int _i530 = 0; _i530 < _list529.size; ++_i530) { - Table _elem523; // required - _elem523 = new Table(); - _elem523.read(iprot); - struct.success.add(_elem523); + Table _elem531; // required + _elem531 = new Table(); + _elem531.read(iprot); + struct.success.add(_elem531); } } struct.setSuccessIsSet(true); @@ -33886,13 +33989,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_table_names_by_ case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list524 = iprot.readListBegin(); - struct.success = new ArrayList(_list524.size); - for (int _i525 = 0; _i525 < _list524.size; ++_i525) + org.apache.thrift.protocol.TList _list532 = iprot.readListBegin(); + struct.success = new ArrayList(_list532.size); + for (int _i533 = 0; _i533 < _list532.size; ++_i533) { - String _elem526; // required - _elem526 = iprot.readString(); - struct.success.add(_elem526); + String _elem534; // required + _elem534 = iprot.readString(); + struct.success.add(_elem534); } iprot.readListEnd(); } @@ -33945,9 +34048,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_table_names_by oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.success.size())); - for (String _iter527 : struct.success) + for (String _iter535 : struct.success) { - oprot.writeString(_iter527); + oprot.writeString(_iter535); } oprot.writeListEnd(); } @@ -34002,9 +34105,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_table_names_by_ if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (String _iter528 : struct.success) + for (String _iter536 : struct.success) { - oprot.writeString(_iter528); + oprot.writeString(_iter536); } } } @@ -34025,13 +34128,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_table_names_by_f BitSet incoming = iprot.readBitSet(4); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list529 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.success = new ArrayList(_list529.size); - for (int _i530 = 0; _i530 < _list529.size; ++_i530) + org.apache.thrift.protocol.TList _list537 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.success = new ArrayList(_list537.size); + for (int _i538 = 0; _i538 < _list537.size; ++_i538) { - String _elem531; // required - _elem531 = iprot.readString(); - struct.success.add(_elem531); + String _elem539; // required + _elem539 = iprot.readString(); + struct.success.add(_elem539); } } struct.setSuccessIsSet(true); @@ -38751,14 +38854,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, add_partitions_args case 1: // NEW_PARTS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list532 = iprot.readListBegin(); - struct.new_parts = new ArrayList(_list532.size); - for (int _i533 = 0; _i533 < _list532.size; ++_i533) + org.apache.thrift.protocol.TList _list540 = iprot.readListBegin(); + struct.new_parts = new ArrayList(_list540.size); + for (int _i541 = 0; _i541 < _list540.size; ++_i541) { - Partition _elem534; // required - _elem534 = new Partition(); - _elem534.read(iprot); - struct.new_parts.add(_elem534); + Partition _elem542; // required + _elem542 = new Partition(); + _elem542.read(iprot); + struct.new_parts.add(_elem542); } iprot.readListEnd(); } @@ -38784,9 +38887,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, add_partitions_arg oprot.writeFieldBegin(NEW_PARTS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.new_parts.size())); - for (Partition _iter535 : struct.new_parts) + for (Partition _iter543 : struct.new_parts) { - _iter535.write(oprot); + _iter543.write(oprot); } oprot.writeListEnd(); } @@ -38817,9 +38920,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, add_partitions_args if (struct.isSetNew_parts()) { { oprot.writeI32(struct.new_parts.size()); - for (Partition _iter536 : struct.new_parts) + for (Partition _iter544 : struct.new_parts) { - _iter536.write(oprot); + _iter544.write(oprot); } } } @@ -38831,14 +38934,14 @@ public void read(org.apache.thrift.protocol.TProtocol prot, add_partitions_args BitSet incoming = iprot.readBitSet(1); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list537 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.new_parts = new ArrayList(_list537.size); - for (int _i538 = 0; _i538 < _list537.size; ++_i538) + org.apache.thrift.protocol.TList _list545 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.new_parts = new ArrayList(_list545.size); + for (int _i546 = 0; _i546 < _list545.size; ++_i546) { - Partition _elem539; // required - _elem539 = new Partition(); - _elem539.read(iprot); - struct.new_parts.add(_elem539); + Partition _elem547; // required + _elem547 = new Partition(); + _elem547.read(iprot); + struct.new_parts.add(_elem547); } } struct.setNew_partsIsSet(true); @@ -40017,13 +40120,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, append_partition_ar case 3: // PART_VALS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list540 = iprot.readListBegin(); - struct.part_vals = new ArrayList(_list540.size); - for (int _i541 = 0; _i541 < _list540.size; ++_i541) + org.apache.thrift.protocol.TList _list548 = iprot.readListBegin(); + struct.part_vals = new ArrayList(_list548.size); + for (int _i549 = 0; _i549 < _list548.size; ++_i549) { - String _elem542; // required - _elem542 = iprot.readString(); - struct.part_vals.add(_elem542); + String _elem550; // required + _elem550 = iprot.readString(); + struct.part_vals.add(_elem550); } iprot.readListEnd(); } @@ -40059,9 +40162,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, append_partition_a oprot.writeFieldBegin(PART_VALS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.part_vals.size())); - for (String _iter543 : struct.part_vals) + for (String _iter551 : struct.part_vals) { - oprot.writeString(_iter543); + oprot.writeString(_iter551); } oprot.writeListEnd(); } @@ -40104,9 +40207,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, append_partition_ar if (struct.isSetPart_vals()) { { oprot.writeI32(struct.part_vals.size()); - for (String _iter544 : struct.part_vals) + for (String _iter552 : struct.part_vals) { - oprot.writeString(_iter544); + oprot.writeString(_iter552); } } } @@ -40126,13 +40229,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, append_partition_arg } if (incoming.get(2)) { { - org.apache.thrift.protocol.TList _list545 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.part_vals = new ArrayList(_list545.size); - for (int _i546 = 0; _i546 < _list545.size; ++_i546) + org.apache.thrift.protocol.TList _list553 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.part_vals = new ArrayList(_list553.size); + for (int _i554 = 0; _i554 < _list553.size; ++_i554) { - String _elem547; // required - _elem547 = iprot.readString(); - struct.part_vals.add(_elem547); + String _elem555; // required + _elem555 = iprot.readString(); + struct.part_vals.add(_elem555); } } struct.setPart_valsIsSet(true); @@ -42444,13 +42547,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, append_partition_wi case 3: // PART_VALS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list548 = iprot.readListBegin(); - struct.part_vals = new ArrayList(_list548.size); - for (int _i549 = 0; _i549 < _list548.size; ++_i549) + org.apache.thrift.protocol.TList _list556 = iprot.readListBegin(); + struct.part_vals = new ArrayList(_list556.size); + for (int _i557 = 0; _i557 < _list556.size; ++_i557) { - String _elem550; // required - _elem550 = iprot.readString(); - struct.part_vals.add(_elem550); + String _elem558; // required + _elem558 = iprot.readString(); + struct.part_vals.add(_elem558); } iprot.readListEnd(); } @@ -42495,9 +42598,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, append_partition_w oprot.writeFieldBegin(PART_VALS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.part_vals.size())); - for (String _iter551 : struct.part_vals) + for (String _iter559 : struct.part_vals) { - oprot.writeString(_iter551); + oprot.writeString(_iter559); } oprot.writeListEnd(); } @@ -42548,9 +42651,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, append_partition_wi if (struct.isSetPart_vals()) { { oprot.writeI32(struct.part_vals.size()); - for (String _iter552 : struct.part_vals) + for (String _iter560 : struct.part_vals) { - oprot.writeString(_iter552); + oprot.writeString(_iter560); } } } @@ -42573,13 +42676,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, append_partition_wit } if (incoming.get(2)) { { - org.apache.thrift.protocol.TList _list553 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.part_vals = new ArrayList(_list553.size); - for (int _i554 = 0; _i554 < _list553.size; ++_i554) + org.apache.thrift.protocol.TList _list561 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.part_vals = new ArrayList(_list561.size); + for (int _i562 = 0; _i562 < _list561.size; ++_i562) { - String _elem555; // required - _elem555 = iprot.readString(); - struct.part_vals.add(_elem555); + String _elem563; // required + _elem563 = iprot.readString(); + struct.part_vals.add(_elem563); } } struct.setPart_valsIsSet(true); @@ -46452,13 +46555,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, drop_partition_args case 3: // PART_VALS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list556 = iprot.readListBegin(); - struct.part_vals = new ArrayList(_list556.size); - for (int _i557 = 0; _i557 < _list556.size; ++_i557) + org.apache.thrift.protocol.TList _list564 = iprot.readListBegin(); + struct.part_vals = new ArrayList(_list564.size); + for (int _i565 = 0; _i565 < _list564.size; ++_i565) { - String _elem558; // required - _elem558 = iprot.readString(); - struct.part_vals.add(_elem558); + String _elem566; // required + _elem566 = iprot.readString(); + struct.part_vals.add(_elem566); } iprot.readListEnd(); } @@ -46502,9 +46605,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, drop_partition_arg oprot.writeFieldBegin(PART_VALS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.part_vals.size())); - for (String _iter559 : struct.part_vals) + for (String _iter567 : struct.part_vals) { - oprot.writeString(_iter559); + oprot.writeString(_iter567); } oprot.writeListEnd(); } @@ -46553,9 +46656,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, drop_partition_args if (struct.isSetPart_vals()) { { oprot.writeI32(struct.part_vals.size()); - for (String _iter560 : struct.part_vals) + for (String _iter568 : struct.part_vals) { - oprot.writeString(_iter560); + oprot.writeString(_iter568); } } } @@ -46578,13 +46681,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, drop_partition_args } if (incoming.get(2)) { { - org.apache.thrift.protocol.TList _list561 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.part_vals = new ArrayList(_list561.size); - for (int _i562 = 0; _i562 < _list561.size; ++_i562) + org.apache.thrift.protocol.TList _list569 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.part_vals = new ArrayList(_list569.size); + for (int _i570 = 0; _i570 < _list569.size; ++_i570) { - String _elem563; // required - _elem563 = iprot.readString(); - struct.part_vals.add(_elem563); + String _elem571; // required + _elem571 = iprot.readString(); + struct.part_vals.add(_elem571); } } struct.setPart_valsIsSet(true); @@ -47826,13 +47929,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, drop_partition_with case 3: // PART_VALS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list564 = iprot.readListBegin(); - struct.part_vals = new ArrayList(_list564.size); - for (int _i565 = 0; _i565 < _list564.size; ++_i565) + org.apache.thrift.protocol.TList _list572 = iprot.readListBegin(); + struct.part_vals = new ArrayList(_list572.size); + for (int _i573 = 0; _i573 < _list572.size; ++_i573) { - String _elem566; // required - _elem566 = iprot.readString(); - struct.part_vals.add(_elem566); + String _elem574; // required + _elem574 = iprot.readString(); + struct.part_vals.add(_elem574); } iprot.readListEnd(); } @@ -47885,9 +47988,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, drop_partition_wit oprot.writeFieldBegin(PART_VALS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.part_vals.size())); - for (String _iter567 : struct.part_vals) + for (String _iter575 : struct.part_vals) { - oprot.writeString(_iter567); + oprot.writeString(_iter575); } oprot.writeListEnd(); } @@ -47944,9 +48047,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, drop_partition_with if (struct.isSetPart_vals()) { { oprot.writeI32(struct.part_vals.size()); - for (String _iter568 : struct.part_vals) + for (String _iter576 : struct.part_vals) { - oprot.writeString(_iter568); + oprot.writeString(_iter576); } } } @@ -47972,13 +48075,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, drop_partition_with_ } if (incoming.get(2)) { { - org.apache.thrift.protocol.TList _list569 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.part_vals = new ArrayList(_list569.size); - for (int _i570 = 0; _i570 < _list569.size; ++_i570) + org.apache.thrift.protocol.TList _list577 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.part_vals = new ArrayList(_list577.size); + for (int _i578 = 0; _i578 < _list577.size; ++_i578) { - String _elem571; // required - _elem571 = iprot.readString(); - struct.part_vals.add(_elem571); + String _elem579; // required + _elem579 = iprot.readString(); + struct.part_vals.add(_elem579); } } struct.setPart_valsIsSet(true); @@ -52583,13 +52686,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_partition_args case 3: // PART_VALS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list572 = iprot.readListBegin(); - struct.part_vals = new ArrayList(_list572.size); - for (int _i573 = 0; _i573 < _list572.size; ++_i573) + org.apache.thrift.protocol.TList _list580 = iprot.readListBegin(); + struct.part_vals = new ArrayList(_list580.size); + for (int _i581 = 0; _i581 < _list580.size; ++_i581) { - String _elem574; // required - _elem574 = iprot.readString(); - struct.part_vals.add(_elem574); + String _elem582; // required + _elem582 = iprot.readString(); + struct.part_vals.add(_elem582); } iprot.readListEnd(); } @@ -52625,9 +52728,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_partition_args oprot.writeFieldBegin(PART_VALS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.part_vals.size())); - for (String _iter575 : struct.part_vals) + for (String _iter583 : struct.part_vals) { - oprot.writeString(_iter575); + oprot.writeString(_iter583); } oprot.writeListEnd(); } @@ -52670,9 +52773,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_partition_args if (struct.isSetPart_vals()) { { oprot.writeI32(struct.part_vals.size()); - for (String _iter576 : struct.part_vals) + for (String _iter584 : struct.part_vals) { - oprot.writeString(_iter576); + oprot.writeString(_iter584); } } } @@ -52692,13 +52795,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_partition_args s } if (incoming.get(2)) { { - org.apache.thrift.protocol.TList _list577 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.part_vals = new ArrayList(_list577.size); - for (int _i578 = 0; _i578 < _list577.size; ++_i578) + org.apache.thrift.protocol.TList _list585 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.part_vals = new ArrayList(_list585.size); + for (int _i586 = 0; _i586 < _list585.size; ++_i586) { - String _elem579; // required - _elem579 = iprot.readString(); - struct.part_vals.add(_elem579); + String _elem587; // required + _elem587 = iprot.readString(); + struct.part_vals.add(_elem587); } } struct.setPart_valsIsSet(true); @@ -53927,15 +54030,15 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, exchange_partition_ case 1: // PARTITION_SPECS if (schemeField.type == org.apache.thrift.protocol.TType.MAP) { { - org.apache.thrift.protocol.TMap _map580 = iprot.readMapBegin(); - struct.partitionSpecs = new HashMap(2*_map580.size); - for (int _i581 = 0; _i581 < _map580.size; ++_i581) + org.apache.thrift.protocol.TMap _map588 = iprot.readMapBegin(); + struct.partitionSpecs = new HashMap(2*_map588.size); + for (int _i589 = 0; _i589 < _map588.size; ++_i589) { - String _key582; // required - String _val583; // required - _key582 = iprot.readString(); - _val583 = iprot.readString(); - struct.partitionSpecs.put(_key582, _val583); + String _key590; // required + String _val591; // required + _key590 = iprot.readString(); + _val591 = iprot.readString(); + struct.partitionSpecs.put(_key590, _val591); } iprot.readMapEnd(); } @@ -53993,10 +54096,10 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, exchange_partition oprot.writeFieldBegin(PARTITION_SPECS_FIELD_DESC); { oprot.writeMapBegin(new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING, struct.partitionSpecs.size())); - for (Map.Entry _iter584 : struct.partitionSpecs.entrySet()) + for (Map.Entry _iter592 : struct.partitionSpecs.entrySet()) { - oprot.writeString(_iter584.getKey()); - oprot.writeString(_iter584.getValue()); + oprot.writeString(_iter592.getKey()); + oprot.writeString(_iter592.getValue()); } oprot.writeMapEnd(); } @@ -54059,10 +54162,10 @@ public void write(org.apache.thrift.protocol.TProtocol prot, exchange_partition_ if (struct.isSetPartitionSpecs()) { { oprot.writeI32(struct.partitionSpecs.size()); - for (Map.Entry _iter585 : struct.partitionSpecs.entrySet()) + for (Map.Entry _iter593 : struct.partitionSpecs.entrySet()) { - oprot.writeString(_iter585.getKey()); - oprot.writeString(_iter585.getValue()); + oprot.writeString(_iter593.getKey()); + oprot.writeString(_iter593.getValue()); } } } @@ -54086,15 +54189,15 @@ public void read(org.apache.thrift.protocol.TProtocol prot, exchange_partition_a BitSet incoming = iprot.readBitSet(5); if (incoming.get(0)) { { - org.apache.thrift.protocol.TMap _map586 = new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.partitionSpecs = new HashMap(2*_map586.size); - for (int _i587 = 0; _i587 < _map586.size; ++_i587) + org.apache.thrift.protocol.TMap _map594 = new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.partitionSpecs = new HashMap(2*_map594.size); + for (int _i595 = 0; _i595 < _map594.size; ++_i595) { - String _key588; // required - String _val589; // required - _key588 = iprot.readString(); - _val589 = iprot.readString(); - struct.partitionSpecs.put(_key588, _val589); + String _key596; // required + String _val597; // required + _key596 = iprot.readString(); + _val597 = iprot.readString(); + struct.partitionSpecs.put(_key596, _val597); } } struct.setPartitionSpecsIsSet(true); @@ -55582,13 +55685,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_partition_with_ case 3: // PART_VALS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list590 = iprot.readListBegin(); - struct.part_vals = new ArrayList(_list590.size); - for (int _i591 = 0; _i591 < _list590.size; ++_i591) + org.apache.thrift.protocol.TList _list598 = iprot.readListBegin(); + struct.part_vals = new ArrayList(_list598.size); + for (int _i599 = 0; _i599 < _list598.size; ++_i599) { - String _elem592; // required - _elem592 = iprot.readString(); - struct.part_vals.add(_elem592); + String _elem600; // required + _elem600 = iprot.readString(); + struct.part_vals.add(_elem600); } iprot.readListEnd(); } @@ -55608,13 +55711,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_partition_with_ case 5: // GROUP_NAMES if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list593 = iprot.readListBegin(); - struct.group_names = new ArrayList(_list593.size); - for (int _i594 = 0; _i594 < _list593.size; ++_i594) + org.apache.thrift.protocol.TList _list601 = iprot.readListBegin(); + struct.group_names = new ArrayList(_list601.size); + for (int _i602 = 0; _i602 < _list601.size; ++_i602) { - String _elem595; // required - _elem595 = iprot.readString(); - struct.group_names.add(_elem595); + String _elem603; // required + _elem603 = iprot.readString(); + struct.group_names.add(_elem603); } iprot.readListEnd(); } @@ -55650,9 +55753,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_partition_with oprot.writeFieldBegin(PART_VALS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.part_vals.size())); - for (String _iter596 : struct.part_vals) + for (String _iter604 : struct.part_vals) { - oprot.writeString(_iter596); + oprot.writeString(_iter604); } oprot.writeListEnd(); } @@ -55667,9 +55770,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_partition_with oprot.writeFieldBegin(GROUP_NAMES_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.group_names.size())); - for (String _iter597 : struct.group_names) + for (String _iter605 : struct.group_names) { - oprot.writeString(_iter597); + oprot.writeString(_iter605); } oprot.writeListEnd(); } @@ -55718,9 +55821,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_partition_with_ if (struct.isSetPart_vals()) { { oprot.writeI32(struct.part_vals.size()); - for (String _iter598 : struct.part_vals) + for (String _iter606 : struct.part_vals) { - oprot.writeString(_iter598); + oprot.writeString(_iter606); } } } @@ -55730,9 +55833,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_partition_with_ if (struct.isSetGroup_names()) { { oprot.writeI32(struct.group_names.size()); - for (String _iter599 : struct.group_names) + for (String _iter607 : struct.group_names) { - oprot.writeString(_iter599); + oprot.writeString(_iter607); } } } @@ -55752,13 +55855,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_partition_with_a } if (incoming.get(2)) { { - org.apache.thrift.protocol.TList _list600 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.part_vals = new ArrayList(_list600.size); - for (int _i601 = 0; _i601 < _list600.size; ++_i601) + org.apache.thrift.protocol.TList _list608 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.part_vals = new ArrayList(_list608.size); + for (int _i609 = 0; _i609 < _list608.size; ++_i609) { - String _elem602; // required - _elem602 = iprot.readString(); - struct.part_vals.add(_elem602); + String _elem610; // required + _elem610 = iprot.readString(); + struct.part_vals.add(_elem610); } } struct.setPart_valsIsSet(true); @@ -55769,13 +55872,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_partition_with_a } if (incoming.get(4)) { { - org.apache.thrift.protocol.TList _list603 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.group_names = new ArrayList(_list603.size); - for (int _i604 = 0; _i604 < _list603.size; ++_i604) + org.apache.thrift.protocol.TList _list611 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.group_names = new ArrayList(_list611.size); + for (int _i612 = 0; _i612 < _list611.size; ++_i612) { - String _elem605; // required - _elem605 = iprot.readString(); - struct.group_names.add(_elem605); + String _elem613; // required + _elem613 = iprot.readString(); + struct.group_names.add(_elem613); } } struct.setGroup_namesIsSet(true); @@ -58544,14 +58647,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_partitions_resu case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list606 = iprot.readListBegin(); - struct.success = new ArrayList(_list606.size); - for (int _i607 = 0; _i607 < _list606.size; ++_i607) + org.apache.thrift.protocol.TList _list614 = iprot.readListBegin(); + struct.success = new ArrayList(_list614.size); + for (int _i615 = 0; _i615 < _list614.size; ++_i615) { - Partition _elem608; // required - _elem608 = new Partition(); - _elem608.read(iprot); - struct.success.add(_elem608); + Partition _elem616; // required + _elem616 = new Partition(); + _elem616.read(iprot); + struct.success.add(_elem616); } iprot.readListEnd(); } @@ -58595,9 +58698,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_partitions_res oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.success.size())); - for (Partition _iter609 : struct.success) + for (Partition _iter617 : struct.success) { - _iter609.write(oprot); + _iter617.write(oprot); } oprot.writeListEnd(); } @@ -58644,9 +58747,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_partitions_resu if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (Partition _iter610 : struct.success) + for (Partition _iter618 : struct.success) { - _iter610.write(oprot); + _iter618.write(oprot); } } } @@ -58664,14 +58767,14 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_partitions_resul BitSet incoming = iprot.readBitSet(3); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list611 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.success = new ArrayList(_list611.size); - for (int _i612 = 0; _i612 < _list611.size; ++_i612) + org.apache.thrift.protocol.TList _list619 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.success = new ArrayList(_list619.size); + for (int _i620 = 0; _i620 < _list619.size; ++_i620) { - Partition _elem613; // required - _elem613 = new Partition(); - _elem613.read(iprot); - struct.success.add(_elem613); + Partition _elem621; // required + _elem621 = new Partition(); + _elem621.read(iprot); + struct.success.add(_elem621); } } struct.setSuccessIsSet(true); @@ -59364,13 +59467,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_partitions_with case 5: // GROUP_NAMES if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list614 = iprot.readListBegin(); - struct.group_names = new ArrayList(_list614.size); - for (int _i615 = 0; _i615 < _list614.size; ++_i615) + org.apache.thrift.protocol.TList _list622 = iprot.readListBegin(); + struct.group_names = new ArrayList(_list622.size); + for (int _i623 = 0; _i623 < _list622.size; ++_i623) { - String _elem616; // required - _elem616 = iprot.readString(); - struct.group_names.add(_elem616); + String _elem624; // required + _elem624 = iprot.readString(); + struct.group_names.add(_elem624); } iprot.readListEnd(); } @@ -59414,9 +59517,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_partitions_wit oprot.writeFieldBegin(GROUP_NAMES_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.group_names.size())); - for (String _iter617 : struct.group_names) + for (String _iter625 : struct.group_names) { - oprot.writeString(_iter617); + oprot.writeString(_iter625); } oprot.writeListEnd(); } @@ -59471,9 +59574,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_partitions_with if (struct.isSetGroup_names()) { { oprot.writeI32(struct.group_names.size()); - for (String _iter618 : struct.group_names) + for (String _iter626 : struct.group_names) { - oprot.writeString(_iter618); + oprot.writeString(_iter626); } } } @@ -59501,13 +59604,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_partitions_with_ } if (incoming.get(4)) { { - org.apache.thrift.protocol.TList _list619 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.group_names = new ArrayList(_list619.size); - for (int _i620 = 0; _i620 < _list619.size; ++_i620) + org.apache.thrift.protocol.TList _list627 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.group_names = new ArrayList(_list627.size); + for (int _i628 = 0; _i628 < _list627.size; ++_i628) { - String _elem621; // required - _elem621 = iprot.readString(); - struct.group_names.add(_elem621); + String _elem629; // required + _elem629 = iprot.readString(); + struct.group_names.add(_elem629); } } struct.setGroup_namesIsSet(true); @@ -59994,14 +60097,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_partitions_with case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list622 = iprot.readListBegin(); - struct.success = new ArrayList(_list622.size); - for (int _i623 = 0; _i623 < _list622.size; ++_i623) + org.apache.thrift.protocol.TList _list630 = iprot.readListBegin(); + struct.success = new ArrayList(_list630.size); + for (int _i631 = 0; _i631 < _list630.size; ++_i631) { - Partition _elem624; // required - _elem624 = new Partition(); - _elem624.read(iprot); - struct.success.add(_elem624); + Partition _elem632; // required + _elem632 = new Partition(); + _elem632.read(iprot); + struct.success.add(_elem632); } iprot.readListEnd(); } @@ -60045,9 +60148,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_partitions_wit oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.success.size())); - for (Partition _iter625 : struct.success) + for (Partition _iter633 : struct.success) { - _iter625.write(oprot); + _iter633.write(oprot); } oprot.writeListEnd(); } @@ -60094,9 +60197,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_partitions_with if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (Partition _iter626 : struct.success) + for (Partition _iter634 : struct.success) { - _iter626.write(oprot); + _iter634.write(oprot); } } } @@ -60114,14 +60217,14 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_partitions_with_ BitSet incoming = iprot.readBitSet(3); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list627 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.success = new ArrayList(_list627.size); - for (int _i628 = 0; _i628 < _list627.size; ++_i628) + org.apache.thrift.protocol.TList _list635 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.success = new ArrayList(_list635.size); + for (int _i636 = 0; _i636 < _list635.size; ++_i636) { - Partition _elem629; // required - _elem629 = new Partition(); - _elem629.read(iprot); - struct.success.add(_elem629); + Partition _elem637; // required + _elem637 = new Partition(); + _elem637.read(iprot); + struct.success.add(_elem637); } } struct.setSuccessIsSet(true); @@ -61103,13 +61206,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_partition_names case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list630 = iprot.readListBegin(); - struct.success = new ArrayList(_list630.size); - for (int _i631 = 0; _i631 < _list630.size; ++_i631) + org.apache.thrift.protocol.TList _list638 = iprot.readListBegin(); + struct.success = new ArrayList(_list638.size); + for (int _i639 = 0; _i639 < _list638.size; ++_i639) { - String _elem632; // required - _elem632 = iprot.readString(); - struct.success.add(_elem632); + String _elem640; // required + _elem640 = iprot.readString(); + struct.success.add(_elem640); } iprot.readListEnd(); } @@ -61144,9 +61247,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_partition_name oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.success.size())); - for (String _iter633 : struct.success) + for (String _iter641 : struct.success) { - oprot.writeString(_iter633); + oprot.writeString(_iter641); } oprot.writeListEnd(); } @@ -61185,9 +61288,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_partition_names if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (String _iter634 : struct.success) + for (String _iter642 : struct.success) { - oprot.writeString(_iter634); + oprot.writeString(_iter642); } } } @@ -61202,13 +61305,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_partition_names_ BitSet incoming = iprot.readBitSet(2); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list635 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.success = new ArrayList(_list635.size); - for (int _i636 = 0; _i636 < _list635.size; ++_i636) + org.apache.thrift.protocol.TList _list643 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.success = new ArrayList(_list643.size); + for (int _i644 = 0; _i644 < _list643.size; ++_i644) { - String _elem637; // required - _elem637 = iprot.readString(); - struct.success.add(_elem637); + String _elem645; // required + _elem645 = iprot.readString(); + struct.success.add(_elem645); } } struct.setSuccessIsSet(true); @@ -61799,13 +61902,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_partitions_ps_a case 3: // PART_VALS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list638 = iprot.readListBegin(); - struct.part_vals = new ArrayList(_list638.size); - for (int _i639 = 0; _i639 < _list638.size; ++_i639) + org.apache.thrift.protocol.TList _list646 = iprot.readListBegin(); + struct.part_vals = new ArrayList(_list646.size); + for (int _i647 = 0; _i647 < _list646.size; ++_i647) { - String _elem640; // required - _elem640 = iprot.readString(); - struct.part_vals.add(_elem640); + String _elem648; // required + _elem648 = iprot.readString(); + struct.part_vals.add(_elem648); } iprot.readListEnd(); } @@ -61849,9 +61952,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_partitions_ps_ oprot.writeFieldBegin(PART_VALS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.part_vals.size())); - for (String _iter641 : struct.part_vals) + for (String _iter649 : struct.part_vals) { - oprot.writeString(_iter641); + oprot.writeString(_iter649); } oprot.writeListEnd(); } @@ -61900,9 +62003,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_partitions_ps_a if (struct.isSetPart_vals()) { { oprot.writeI32(struct.part_vals.size()); - for (String _iter642 : struct.part_vals) + for (String _iter650 : struct.part_vals) { - oprot.writeString(_iter642); + oprot.writeString(_iter650); } } } @@ -61925,13 +62028,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_partitions_ps_ar } if (incoming.get(2)) { { - org.apache.thrift.protocol.TList _list643 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.part_vals = new ArrayList(_list643.size); - for (int _i644 = 0; _i644 < _list643.size; ++_i644) + org.apache.thrift.protocol.TList _list651 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.part_vals = new ArrayList(_list651.size); + for (int _i652 = 0; _i652 < _list651.size; ++_i652) { - String _elem645; // required - _elem645 = iprot.readString(); - struct.part_vals.add(_elem645); + String _elem653; // required + _elem653 = iprot.readString(); + struct.part_vals.add(_elem653); } } struct.setPart_valsIsSet(true); @@ -62422,14 +62525,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_partitions_ps_r case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list646 = iprot.readListBegin(); - struct.success = new ArrayList(_list646.size); - for (int _i647 = 0; _i647 < _list646.size; ++_i647) + org.apache.thrift.protocol.TList _list654 = iprot.readListBegin(); + struct.success = new ArrayList(_list654.size); + for (int _i655 = 0; _i655 < _list654.size; ++_i655) { - Partition _elem648; // required - _elem648 = new Partition(); - _elem648.read(iprot); - struct.success.add(_elem648); + Partition _elem656; // required + _elem656 = new Partition(); + _elem656.read(iprot); + struct.success.add(_elem656); } iprot.readListEnd(); } @@ -62473,9 +62576,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_partitions_ps_ oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.success.size())); - for (Partition _iter649 : struct.success) + for (Partition _iter657 : struct.success) { - _iter649.write(oprot); + _iter657.write(oprot); } oprot.writeListEnd(); } @@ -62522,9 +62625,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_partitions_ps_r if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (Partition _iter650 : struct.success) + for (Partition _iter658 : struct.success) { - _iter650.write(oprot); + _iter658.write(oprot); } } } @@ -62542,14 +62645,14 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_partitions_ps_re BitSet incoming = iprot.readBitSet(3); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list651 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.success = new ArrayList(_list651.size); - for (int _i652 = 0; _i652 < _list651.size; ++_i652) + org.apache.thrift.protocol.TList _list659 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.success = new ArrayList(_list659.size); + for (int _i660 = 0; _i660 < _list659.size; ++_i660) { - Partition _elem653; // required - _elem653 = new Partition(); - _elem653.read(iprot); - struct.success.add(_elem653); + Partition _elem661; // required + _elem661 = new Partition(); + _elem661.read(iprot); + struct.success.add(_elem661); } } struct.setSuccessIsSet(true); @@ -63327,13 +63430,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_partitions_ps_w case 3: // PART_VALS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list654 = iprot.readListBegin(); - struct.part_vals = new ArrayList(_list654.size); - for (int _i655 = 0; _i655 < _list654.size; ++_i655) + org.apache.thrift.protocol.TList _list662 = iprot.readListBegin(); + struct.part_vals = new ArrayList(_list662.size); + for (int _i663 = 0; _i663 < _list662.size; ++_i663) { - String _elem656; // required - _elem656 = iprot.readString(); - struct.part_vals.add(_elem656); + String _elem664; // required + _elem664 = iprot.readString(); + struct.part_vals.add(_elem664); } iprot.readListEnd(); } @@ -63361,13 +63464,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_partitions_ps_w case 6: // GROUP_NAMES if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list657 = iprot.readListBegin(); - struct.group_names = new ArrayList(_list657.size); - for (int _i658 = 0; _i658 < _list657.size; ++_i658) + org.apache.thrift.protocol.TList _list665 = iprot.readListBegin(); + struct.group_names = new ArrayList(_list665.size); + for (int _i666 = 0; _i666 < _list665.size; ++_i666) { - String _elem659; // required - _elem659 = iprot.readString(); - struct.group_names.add(_elem659); + String _elem667; // required + _elem667 = iprot.readString(); + struct.group_names.add(_elem667); } iprot.readListEnd(); } @@ -63403,9 +63506,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_partitions_ps_ oprot.writeFieldBegin(PART_VALS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.part_vals.size())); - for (String _iter660 : struct.part_vals) + for (String _iter668 : struct.part_vals) { - oprot.writeString(_iter660); + oprot.writeString(_iter668); } oprot.writeListEnd(); } @@ -63423,9 +63526,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_partitions_ps_ oprot.writeFieldBegin(GROUP_NAMES_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.group_names.size())); - for (String _iter661 : struct.group_names) + for (String _iter669 : struct.group_names) { - oprot.writeString(_iter661); + oprot.writeString(_iter669); } oprot.writeListEnd(); } @@ -63477,9 +63580,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_partitions_ps_w if (struct.isSetPart_vals()) { { oprot.writeI32(struct.part_vals.size()); - for (String _iter662 : struct.part_vals) + for (String _iter670 : struct.part_vals) { - oprot.writeString(_iter662); + oprot.writeString(_iter670); } } } @@ -63492,9 +63595,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_partitions_ps_w if (struct.isSetGroup_names()) { { oprot.writeI32(struct.group_names.size()); - for (String _iter663 : struct.group_names) + for (String _iter671 : struct.group_names) { - oprot.writeString(_iter663); + oprot.writeString(_iter671); } } } @@ -63514,13 +63617,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_partitions_ps_wi } if (incoming.get(2)) { { - org.apache.thrift.protocol.TList _list664 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.part_vals = new ArrayList(_list664.size); - for (int _i665 = 0; _i665 < _list664.size; ++_i665) + org.apache.thrift.protocol.TList _list672 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.part_vals = new ArrayList(_list672.size); + for (int _i673 = 0; _i673 < _list672.size; ++_i673) { - String _elem666; // required - _elem666 = iprot.readString(); - struct.part_vals.add(_elem666); + String _elem674; // required + _elem674 = iprot.readString(); + struct.part_vals.add(_elem674); } } struct.setPart_valsIsSet(true); @@ -63535,13 +63638,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_partitions_ps_wi } if (incoming.get(5)) { { - org.apache.thrift.protocol.TList _list667 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.group_names = new ArrayList(_list667.size); - for (int _i668 = 0; _i668 < _list667.size; ++_i668) + org.apache.thrift.protocol.TList _list675 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.group_names = new ArrayList(_list675.size); + for (int _i676 = 0; _i676 < _list675.size; ++_i676) { - String _elem669; // required - _elem669 = iprot.readString(); - struct.group_names.add(_elem669); + String _elem677; // required + _elem677 = iprot.readString(); + struct.group_names.add(_elem677); } } struct.setGroup_namesIsSet(true); @@ -64028,14 +64131,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_partitions_ps_w case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list670 = iprot.readListBegin(); - struct.success = new ArrayList(_list670.size); - for (int _i671 = 0; _i671 < _list670.size; ++_i671) + org.apache.thrift.protocol.TList _list678 = iprot.readListBegin(); + struct.success = new ArrayList(_list678.size); + for (int _i679 = 0; _i679 < _list678.size; ++_i679) { - Partition _elem672; // required - _elem672 = new Partition(); - _elem672.read(iprot); - struct.success.add(_elem672); + Partition _elem680; // required + _elem680 = new Partition(); + _elem680.read(iprot); + struct.success.add(_elem680); } iprot.readListEnd(); } @@ -64079,9 +64182,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_partitions_ps_ oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.success.size())); - for (Partition _iter673 : struct.success) + for (Partition _iter681 : struct.success) { - _iter673.write(oprot); + _iter681.write(oprot); } oprot.writeListEnd(); } @@ -64128,9 +64231,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_partitions_ps_w if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (Partition _iter674 : struct.success) + for (Partition _iter682 : struct.success) { - _iter674.write(oprot); + _iter682.write(oprot); } } } @@ -64148,14 +64251,14 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_partitions_ps_wi BitSet incoming = iprot.readBitSet(3); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list675 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.success = new ArrayList(_list675.size); - for (int _i676 = 0; _i676 < _list675.size; ++_i676) + org.apache.thrift.protocol.TList _list683 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.success = new ArrayList(_list683.size); + for (int _i684 = 0; _i684 < _list683.size; ++_i684) { - Partition _elem677; // required - _elem677 = new Partition(); - _elem677.read(iprot); - struct.success.add(_elem677); + Partition _elem685; // required + _elem685 = new Partition(); + _elem685.read(iprot); + struct.success.add(_elem685); } } struct.setSuccessIsSet(true); @@ -64751,13 +64854,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_partition_names case 3: // PART_VALS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list678 = iprot.readListBegin(); - struct.part_vals = new ArrayList(_list678.size); - for (int _i679 = 0; _i679 < _list678.size; ++_i679) + org.apache.thrift.protocol.TList _list686 = iprot.readListBegin(); + struct.part_vals = new ArrayList(_list686.size); + for (int _i687 = 0; _i687 < _list686.size; ++_i687) { - String _elem680; // required - _elem680 = iprot.readString(); - struct.part_vals.add(_elem680); + String _elem688; // required + _elem688 = iprot.readString(); + struct.part_vals.add(_elem688); } iprot.readListEnd(); } @@ -64801,9 +64904,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_partition_name oprot.writeFieldBegin(PART_VALS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.part_vals.size())); - for (String _iter681 : struct.part_vals) + for (String _iter689 : struct.part_vals) { - oprot.writeString(_iter681); + oprot.writeString(_iter689); } oprot.writeListEnd(); } @@ -64852,9 +64955,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_partition_names if (struct.isSetPart_vals()) { { oprot.writeI32(struct.part_vals.size()); - for (String _iter682 : struct.part_vals) + for (String _iter690 : struct.part_vals) { - oprot.writeString(_iter682); + oprot.writeString(_iter690); } } } @@ -64877,13 +64980,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_partition_names_ } if (incoming.get(2)) { { - org.apache.thrift.protocol.TList _list683 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.part_vals = new ArrayList(_list683.size); - for (int _i684 = 0; _i684 < _list683.size; ++_i684) + org.apache.thrift.protocol.TList _list691 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.part_vals = new ArrayList(_list691.size); + for (int _i692 = 0; _i692 < _list691.size; ++_i692) { - String _elem685; // required - _elem685 = iprot.readString(); - struct.part_vals.add(_elem685); + String _elem693; // required + _elem693 = iprot.readString(); + struct.part_vals.add(_elem693); } } struct.setPart_valsIsSet(true); @@ -65374,13 +65477,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_partition_names case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list686 = iprot.readListBegin(); - struct.success = new ArrayList(_list686.size); - for (int _i687 = 0; _i687 < _list686.size; ++_i687) + org.apache.thrift.protocol.TList _list694 = iprot.readListBegin(); + struct.success = new ArrayList(_list694.size); + for (int _i695 = 0; _i695 < _list694.size; ++_i695) { - String _elem688; // required - _elem688 = iprot.readString(); - struct.success.add(_elem688); + String _elem696; // required + _elem696 = iprot.readString(); + struct.success.add(_elem696); } iprot.readListEnd(); } @@ -65424,9 +65527,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_partition_name oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.success.size())); - for (String _iter689 : struct.success) + for (String _iter697 : struct.success) { - oprot.writeString(_iter689); + oprot.writeString(_iter697); } oprot.writeListEnd(); } @@ -65473,9 +65576,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_partition_names if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (String _iter690 : struct.success) + for (String _iter698 : struct.success) { - oprot.writeString(_iter690); + oprot.writeString(_iter698); } } } @@ -65493,13 +65596,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_partition_names_ BitSet incoming = iprot.readBitSet(3); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list691 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.success = new ArrayList(_list691.size); - for (int _i692 = 0; _i692 < _list691.size; ++_i692) + org.apache.thrift.protocol.TList _list699 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.success = new ArrayList(_list699.size); + for (int _i700 = 0; _i700 < _list699.size; ++_i700) { - String _elem693; // required - _elem693 = iprot.readString(); - struct.success.add(_elem693); + String _elem701; // required + _elem701 = iprot.readString(); + struct.success.add(_elem701); } } struct.setSuccessIsSet(true); @@ -66666,14 +66769,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_partitions_by_f case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list694 = iprot.readListBegin(); - struct.success = new ArrayList(_list694.size); - for (int _i695 = 0; _i695 < _list694.size; ++_i695) + org.apache.thrift.protocol.TList _list702 = iprot.readListBegin(); + struct.success = new ArrayList(_list702.size); + for (int _i703 = 0; _i703 < _list702.size; ++_i703) { - Partition _elem696; // required - _elem696 = new Partition(); - _elem696.read(iprot); - struct.success.add(_elem696); + Partition _elem704; // required + _elem704 = new Partition(); + _elem704.read(iprot); + struct.success.add(_elem704); } iprot.readListEnd(); } @@ -66717,9 +66820,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_partitions_by_ oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.success.size())); - for (Partition _iter697 : struct.success) + for (Partition _iter705 : struct.success) { - _iter697.write(oprot); + _iter705.write(oprot); } oprot.writeListEnd(); } @@ -66766,9 +66869,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_partitions_by_f if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (Partition _iter698 : struct.success) + for (Partition _iter706 : struct.success) { - _iter698.write(oprot); + _iter706.write(oprot); } } } @@ -66786,14 +66889,14 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_partitions_by_fi BitSet incoming = iprot.readBitSet(3); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list699 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.success = new ArrayList(_list699.size); - for (int _i700 = 0; _i700 < _list699.size; ++_i700) + org.apache.thrift.protocol.TList _list707 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.success = new ArrayList(_list707.size); + for (int _i708 = 0; _i708 < _list707.size; ++_i708) { - Partition _elem701; // required - _elem701 = new Partition(); - _elem701.read(iprot); - struct.success.add(_elem701); + Partition _elem709; // required + _elem709 = new Partition(); + _elem709.read(iprot); + struct.success.add(_elem709); } } struct.setSuccessIsSet(true); @@ -68244,13 +68347,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_partitions_by_n case 3: // NAMES if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list702 = iprot.readListBegin(); - struct.names = new ArrayList(_list702.size); - for (int _i703 = 0; _i703 < _list702.size; ++_i703) + org.apache.thrift.protocol.TList _list710 = iprot.readListBegin(); + struct.names = new ArrayList(_list710.size); + for (int _i711 = 0; _i711 < _list710.size; ++_i711) { - String _elem704; // required - _elem704 = iprot.readString(); - struct.names.add(_elem704); + String _elem712; // required + _elem712 = iprot.readString(); + struct.names.add(_elem712); } iprot.readListEnd(); } @@ -68286,9 +68389,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_partitions_by_ oprot.writeFieldBegin(NAMES_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.names.size())); - for (String _iter705 : struct.names) + for (String _iter713 : struct.names) { - oprot.writeString(_iter705); + oprot.writeString(_iter713); } oprot.writeListEnd(); } @@ -68331,9 +68434,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_partitions_by_n if (struct.isSetNames()) { { oprot.writeI32(struct.names.size()); - for (String _iter706 : struct.names) + for (String _iter714 : struct.names) { - oprot.writeString(_iter706); + oprot.writeString(_iter714); } } } @@ -68353,13 +68456,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_partitions_by_na } if (incoming.get(2)) { { - org.apache.thrift.protocol.TList _list707 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.names = new ArrayList(_list707.size); - for (int _i708 = 0; _i708 < _list707.size; ++_i708) + org.apache.thrift.protocol.TList _list715 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.names = new ArrayList(_list715.size); + for (int _i716 = 0; _i716 < _list715.size; ++_i716) { - String _elem709; // required - _elem709 = iprot.readString(); - struct.names.add(_elem709); + String _elem717; // required + _elem717 = iprot.readString(); + struct.names.add(_elem717); } } struct.setNamesIsSet(true); @@ -68846,14 +68949,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_partitions_by_n case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list710 = iprot.readListBegin(); - struct.success = new ArrayList(_list710.size); - for (int _i711 = 0; _i711 < _list710.size; ++_i711) + org.apache.thrift.protocol.TList _list718 = iprot.readListBegin(); + struct.success = new ArrayList(_list718.size); + for (int _i719 = 0; _i719 < _list718.size; ++_i719) { - Partition _elem712; // required - _elem712 = new Partition(); - _elem712.read(iprot); - struct.success.add(_elem712); + Partition _elem720; // required + _elem720 = new Partition(); + _elem720.read(iprot); + struct.success.add(_elem720); } iprot.readListEnd(); } @@ -68897,9 +69000,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_partitions_by_ oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.success.size())); - for (Partition _iter713 : struct.success) + for (Partition _iter721 : struct.success) { - _iter713.write(oprot); + _iter721.write(oprot); } oprot.writeListEnd(); } @@ -68946,9 +69049,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_partitions_by_n if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (Partition _iter714 : struct.success) + for (Partition _iter722 : struct.success) { - _iter714.write(oprot); + _iter722.write(oprot); } } } @@ -68966,14 +69069,14 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_partitions_by_na BitSet incoming = iprot.readBitSet(3); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list715 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.success = new ArrayList(_list715.size); - for (int _i716 = 0; _i716 < _list715.size; ++_i716) + org.apache.thrift.protocol.TList _list723 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.success = new ArrayList(_list723.size); + for (int _i724 = 0; _i724 < _list723.size; ++_i724) { - Partition _elem717; // required - _elem717 = new Partition(); - _elem717.read(iprot); - struct.success.add(_elem717); + Partition _elem725; // required + _elem725 = new Partition(); + _elem725.read(iprot); + struct.success.add(_elem725); } } struct.setSuccessIsSet(true); @@ -70523,14 +70626,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, alter_partitions_ar case 3: // NEW_PARTS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list718 = iprot.readListBegin(); - struct.new_parts = new ArrayList(_list718.size); - for (int _i719 = 0; _i719 < _list718.size; ++_i719) + org.apache.thrift.protocol.TList _list726 = iprot.readListBegin(); + struct.new_parts = new ArrayList(_list726.size); + for (int _i727 = 0; _i727 < _list726.size; ++_i727) { - Partition _elem720; // required - _elem720 = new Partition(); - _elem720.read(iprot); - struct.new_parts.add(_elem720); + Partition _elem728; // required + _elem728 = new Partition(); + _elem728.read(iprot); + struct.new_parts.add(_elem728); } iprot.readListEnd(); } @@ -70566,9 +70669,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, alter_partitions_a oprot.writeFieldBegin(NEW_PARTS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.new_parts.size())); - for (Partition _iter721 : struct.new_parts) + for (Partition _iter729 : struct.new_parts) { - _iter721.write(oprot); + _iter729.write(oprot); } oprot.writeListEnd(); } @@ -70611,9 +70714,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, alter_partitions_ar if (struct.isSetNew_parts()) { { oprot.writeI32(struct.new_parts.size()); - for (Partition _iter722 : struct.new_parts) + for (Partition _iter730 : struct.new_parts) { - _iter722.write(oprot); + _iter730.write(oprot); } } } @@ -70633,14 +70736,14 @@ public void read(org.apache.thrift.protocol.TProtocol prot, alter_partitions_arg } if (incoming.get(2)) { { - org.apache.thrift.protocol.TList _list723 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.new_parts = new ArrayList(_list723.size); - for (int _i724 = 0; _i724 < _list723.size; ++_i724) + org.apache.thrift.protocol.TList _list731 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.new_parts = new ArrayList(_list731.size); + for (int _i732 = 0; _i732 < _list731.size; ++_i732) { - Partition _elem725; // required - _elem725 = new Partition(); - _elem725.read(iprot); - struct.new_parts.add(_elem725); + Partition _elem733; // required + _elem733 = new Partition(); + _elem733.read(iprot); + struct.new_parts.add(_elem733); } } struct.setNew_partsIsSet(true); @@ -72839,13 +72942,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, rename_partition_ar case 3: // PART_VALS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list726 = iprot.readListBegin(); - struct.part_vals = new ArrayList(_list726.size); - for (int _i727 = 0; _i727 < _list726.size; ++_i727) + org.apache.thrift.protocol.TList _list734 = iprot.readListBegin(); + struct.part_vals = new ArrayList(_list734.size); + for (int _i735 = 0; _i735 < _list734.size; ++_i735) { - String _elem728; // required - _elem728 = iprot.readString(); - struct.part_vals.add(_elem728); + String _elem736; // required + _elem736 = iprot.readString(); + struct.part_vals.add(_elem736); } iprot.readListEnd(); } @@ -72890,9 +72993,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, rename_partition_a oprot.writeFieldBegin(PART_VALS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.part_vals.size())); - for (String _iter729 : struct.part_vals) + for (String _iter737 : struct.part_vals) { - oprot.writeString(_iter729); + oprot.writeString(_iter737); } oprot.writeListEnd(); } @@ -72943,9 +73046,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, rename_partition_ar if (struct.isSetPart_vals()) { { oprot.writeI32(struct.part_vals.size()); - for (String _iter730 : struct.part_vals) + for (String _iter738 : struct.part_vals) { - oprot.writeString(_iter730); + oprot.writeString(_iter738); } } } @@ -72968,13 +73071,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, rename_partition_arg } if (incoming.get(2)) { { - org.apache.thrift.protocol.TList _list731 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.part_vals = new ArrayList(_list731.size); - for (int _i732 = 0; _i732 < _list731.size; ++_i732) + org.apache.thrift.protocol.TList _list739 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.part_vals = new ArrayList(_list739.size); + for (int _i740 = 0; _i740 < _list739.size; ++_i740) { - String _elem733; // required - _elem733 = iprot.readString(); - struct.part_vals.add(_elem733); + String _elem741; // required + _elem741 = iprot.readString(); + struct.part_vals.add(_elem741); } } struct.setPart_valsIsSet(true); @@ -73851,13 +73954,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, partition_name_has_ case 1: // PART_VALS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list734 = iprot.readListBegin(); - struct.part_vals = new ArrayList(_list734.size); - for (int _i735 = 0; _i735 < _list734.size; ++_i735) + org.apache.thrift.protocol.TList _list742 = iprot.readListBegin(); + struct.part_vals = new ArrayList(_list742.size); + for (int _i743 = 0; _i743 < _list742.size; ++_i743) { - String _elem736; // required - _elem736 = iprot.readString(); - struct.part_vals.add(_elem736); + String _elem744; // required + _elem744 = iprot.readString(); + struct.part_vals.add(_elem744); } iprot.readListEnd(); } @@ -73891,9 +73994,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, partition_name_has oprot.writeFieldBegin(PART_VALS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.part_vals.size())); - for (String _iter737 : struct.part_vals) + for (String _iter745 : struct.part_vals) { - oprot.writeString(_iter737); + oprot.writeString(_iter745); } oprot.writeListEnd(); } @@ -73930,9 +74033,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, partition_name_has_ if (struct.isSetPart_vals()) { { oprot.writeI32(struct.part_vals.size()); - for (String _iter738 : struct.part_vals) + for (String _iter746 : struct.part_vals) { - oprot.writeString(_iter738); + oprot.writeString(_iter746); } } } @@ -73947,13 +74050,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, partition_name_has_v BitSet incoming = iprot.readBitSet(2); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list739 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.part_vals = new ArrayList(_list739.size); - for (int _i740 = 0; _i740 < _list739.size; ++_i740) + org.apache.thrift.protocol.TList _list747 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.part_vals = new ArrayList(_list747.size); + for (int _i748 = 0; _i748 < _list747.size; ++_i748) { - String _elem741; // required - _elem741 = iprot.readString(); - struct.part_vals.add(_elem741); + String _elem749; // required + _elem749 = iprot.readString(); + struct.part_vals.add(_elem749); } } struct.setPart_valsIsSet(true); @@ -76111,13 +76214,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, partition_name_to_v case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list742 = iprot.readListBegin(); - struct.success = new ArrayList(_list742.size); - for (int _i743 = 0; _i743 < _list742.size; ++_i743) + org.apache.thrift.protocol.TList _list750 = iprot.readListBegin(); + struct.success = new ArrayList(_list750.size); + for (int _i751 = 0; _i751 < _list750.size; ++_i751) { - String _elem744; // required - _elem744 = iprot.readString(); - struct.success.add(_elem744); + String _elem752; // required + _elem752 = iprot.readString(); + struct.success.add(_elem752); } iprot.readListEnd(); } @@ -76152,9 +76255,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, partition_name_to_ oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.success.size())); - for (String _iter745 : struct.success) + for (String _iter753 : struct.success) { - oprot.writeString(_iter745); + oprot.writeString(_iter753); } oprot.writeListEnd(); } @@ -76193,9 +76296,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, partition_name_to_v if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (String _iter746 : struct.success) + for (String _iter754 : struct.success) { - oprot.writeString(_iter746); + oprot.writeString(_iter754); } } } @@ -76210,13 +76313,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, partition_name_to_va BitSet incoming = iprot.readBitSet(2); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list747 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.success = new ArrayList(_list747.size); - for (int _i748 = 0; _i748 < _list747.size; ++_i748) + org.apache.thrift.protocol.TList _list755 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.success = new ArrayList(_list755.size); + for (int _i756 = 0; _i756 < _list755.size; ++_i756) { - String _elem749; // required - _elem749 = iprot.readString(); - struct.success.add(_elem749); + String _elem757; // required + _elem757 = iprot.readString(); + struct.success.add(_elem757); } } struct.setSuccessIsSet(true); @@ -76990,15 +77093,15 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, partition_name_to_s case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.MAP) { { - org.apache.thrift.protocol.TMap _map750 = iprot.readMapBegin(); - struct.success = new HashMap(2*_map750.size); - for (int _i751 = 0; _i751 < _map750.size; ++_i751) + org.apache.thrift.protocol.TMap _map758 = iprot.readMapBegin(); + struct.success = new HashMap(2*_map758.size); + for (int _i759 = 0; _i759 < _map758.size; ++_i759) { - String _key752; // required - String _val753; // required - _key752 = iprot.readString(); - _val753 = iprot.readString(); - struct.success.put(_key752, _val753); + String _key760; // required + String _val761; // required + _key760 = iprot.readString(); + _val761 = iprot.readString(); + struct.success.put(_key760, _val761); } iprot.readMapEnd(); } @@ -77033,10 +77136,10 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, partition_name_to_ oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeMapBegin(new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING, struct.success.size())); - for (Map.Entry _iter754 : struct.success.entrySet()) + for (Map.Entry _iter762 : struct.success.entrySet()) { - oprot.writeString(_iter754.getKey()); - oprot.writeString(_iter754.getValue()); + oprot.writeString(_iter762.getKey()); + oprot.writeString(_iter762.getValue()); } oprot.writeMapEnd(); } @@ -77075,10 +77178,10 @@ public void write(org.apache.thrift.protocol.TProtocol prot, partition_name_to_s if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (Map.Entry _iter755 : struct.success.entrySet()) + for (Map.Entry _iter763 : struct.success.entrySet()) { - oprot.writeString(_iter755.getKey()); - oprot.writeString(_iter755.getValue()); + oprot.writeString(_iter763.getKey()); + oprot.writeString(_iter763.getValue()); } } } @@ -77093,15 +77196,15 @@ public void read(org.apache.thrift.protocol.TProtocol prot, partition_name_to_sp BitSet incoming = iprot.readBitSet(2); if (incoming.get(0)) { { - org.apache.thrift.protocol.TMap _map756 = new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.success = new HashMap(2*_map756.size); - for (int _i757 = 0; _i757 < _map756.size; ++_i757) + org.apache.thrift.protocol.TMap _map764 = new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.success = new HashMap(2*_map764.size); + for (int _i765 = 0; _i765 < _map764.size; ++_i765) { - String _key758; // required - String _val759; // required - _key758 = iprot.readString(); - _val759 = iprot.readString(); - struct.success.put(_key758, _val759); + String _key766; // required + String _val767; // required + _key766 = iprot.readString(); + _val767 = iprot.readString(); + struct.success.put(_key766, _val767); } } struct.setSuccessIsSet(true); @@ -77707,15 +77810,15 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, markPartitionForEve case 3: // PART_VALS if (schemeField.type == org.apache.thrift.protocol.TType.MAP) { { - org.apache.thrift.protocol.TMap _map760 = iprot.readMapBegin(); - struct.part_vals = new HashMap(2*_map760.size); - for (int _i761 = 0; _i761 < _map760.size; ++_i761) + org.apache.thrift.protocol.TMap _map768 = iprot.readMapBegin(); + struct.part_vals = new HashMap(2*_map768.size); + for (int _i769 = 0; _i769 < _map768.size; ++_i769) { - String _key762; // required - String _val763; // required - _key762 = iprot.readString(); - _val763 = iprot.readString(); - struct.part_vals.put(_key762, _val763); + String _key770; // required + String _val771; // required + _key770 = iprot.readString(); + _val771 = iprot.readString(); + struct.part_vals.put(_key770, _val771); } iprot.readMapEnd(); } @@ -77759,10 +77862,10 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, markPartitionForEv oprot.writeFieldBegin(PART_VALS_FIELD_DESC); { oprot.writeMapBegin(new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING, struct.part_vals.size())); - for (Map.Entry _iter764 : struct.part_vals.entrySet()) + for (Map.Entry _iter772 : struct.part_vals.entrySet()) { - oprot.writeString(_iter764.getKey()); - oprot.writeString(_iter764.getValue()); + oprot.writeString(_iter772.getKey()); + oprot.writeString(_iter772.getValue()); } oprot.writeMapEnd(); } @@ -77813,10 +77916,10 @@ public void write(org.apache.thrift.protocol.TProtocol prot, markPartitionForEve if (struct.isSetPart_vals()) { { oprot.writeI32(struct.part_vals.size()); - for (Map.Entry _iter765 : struct.part_vals.entrySet()) + for (Map.Entry _iter773 : struct.part_vals.entrySet()) { - oprot.writeString(_iter765.getKey()); - oprot.writeString(_iter765.getValue()); + oprot.writeString(_iter773.getKey()); + oprot.writeString(_iter773.getValue()); } } } @@ -77839,15 +77942,15 @@ public void read(org.apache.thrift.protocol.TProtocol prot, markPartitionForEven } if (incoming.get(2)) { { - org.apache.thrift.protocol.TMap _map766 = new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.part_vals = new HashMap(2*_map766.size); - for (int _i767 = 0; _i767 < _map766.size; ++_i767) + org.apache.thrift.protocol.TMap _map774 = new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.part_vals = new HashMap(2*_map774.size); + for (int _i775 = 0; _i775 < _map774.size; ++_i775) { - String _key768; // required - String _val769; // required - _key768 = iprot.readString(); - _val769 = iprot.readString(); - struct.part_vals.put(_key768, _val769); + String _key776; // required + String _val777; // required + _key776 = iprot.readString(); + _val777 = iprot.readString(); + struct.part_vals.put(_key776, _val777); } } struct.setPart_valsIsSet(true); @@ -79342,15 +79445,15 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, isPartitionMarkedFo case 3: // PART_VALS if (schemeField.type == org.apache.thrift.protocol.TType.MAP) { { - org.apache.thrift.protocol.TMap _map770 = iprot.readMapBegin(); - struct.part_vals = new HashMap(2*_map770.size); - for (int _i771 = 0; _i771 < _map770.size; ++_i771) + org.apache.thrift.protocol.TMap _map778 = iprot.readMapBegin(); + struct.part_vals = new HashMap(2*_map778.size); + for (int _i779 = 0; _i779 < _map778.size; ++_i779) { - String _key772; // required - String _val773; // required - _key772 = iprot.readString(); - _val773 = iprot.readString(); - struct.part_vals.put(_key772, _val773); + String _key780; // required + String _val781; // required + _key780 = iprot.readString(); + _val781 = iprot.readString(); + struct.part_vals.put(_key780, _val781); } iprot.readMapEnd(); } @@ -79394,10 +79497,10 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, isPartitionMarkedF oprot.writeFieldBegin(PART_VALS_FIELD_DESC); { oprot.writeMapBegin(new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING, struct.part_vals.size())); - for (Map.Entry _iter774 : struct.part_vals.entrySet()) + for (Map.Entry _iter782 : struct.part_vals.entrySet()) { - oprot.writeString(_iter774.getKey()); - oprot.writeString(_iter774.getValue()); + oprot.writeString(_iter782.getKey()); + oprot.writeString(_iter782.getValue()); } oprot.writeMapEnd(); } @@ -79448,10 +79551,10 @@ public void write(org.apache.thrift.protocol.TProtocol prot, isPartitionMarkedFo if (struct.isSetPart_vals()) { { oprot.writeI32(struct.part_vals.size()); - for (Map.Entry _iter775 : struct.part_vals.entrySet()) + for (Map.Entry _iter783 : struct.part_vals.entrySet()) { - oprot.writeString(_iter775.getKey()); - oprot.writeString(_iter775.getValue()); + oprot.writeString(_iter783.getKey()); + oprot.writeString(_iter783.getValue()); } } } @@ -79474,15 +79577,15 @@ public void read(org.apache.thrift.protocol.TProtocol prot, isPartitionMarkedFor } if (incoming.get(2)) { { - org.apache.thrift.protocol.TMap _map776 = new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.part_vals = new HashMap(2*_map776.size); - for (int _i777 = 0; _i777 < _map776.size; ++_i777) + org.apache.thrift.protocol.TMap _map784 = new org.apache.thrift.protocol.TMap(org.apache.thrift.protocol.TType.STRING, org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.part_vals = new HashMap(2*_map784.size); + for (int _i785 = 0; _i785 < _map784.size; ++_i785) { - String _key778; // required - String _val779; // required - _key778 = iprot.readString(); - _val779 = iprot.readString(); - struct.part_vals.put(_key778, _val779); + String _key786; // required + String _val787; // required + _key786 = iprot.readString(); + _val787 = iprot.readString(); + struct.part_vals.put(_key786, _val787); } } struct.setPart_valsIsSet(true); @@ -86206,14 +86309,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_indexes_result case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list780 = iprot.readListBegin(); - struct.success = new ArrayList(_list780.size); - for (int _i781 = 0; _i781 < _list780.size; ++_i781) + org.apache.thrift.protocol.TList _list788 = iprot.readListBegin(); + struct.success = new ArrayList(_list788.size); + for (int _i789 = 0; _i789 < _list788.size; ++_i789) { - Index _elem782; // required - _elem782 = new Index(); - _elem782.read(iprot); - struct.success.add(_elem782); + Index _elem790; // required + _elem790 = new Index(); + _elem790.read(iprot); + struct.success.add(_elem790); } iprot.readListEnd(); } @@ -86257,9 +86360,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_indexes_result oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.success.size())); - for (Index _iter783 : struct.success) + for (Index _iter791 : struct.success) { - _iter783.write(oprot); + _iter791.write(oprot); } oprot.writeListEnd(); } @@ -86306,9 +86409,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_indexes_result if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (Index _iter784 : struct.success) + for (Index _iter792 : struct.success) { - _iter784.write(oprot); + _iter792.write(oprot); } } } @@ -86326,14 +86429,14 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_indexes_result s BitSet incoming = iprot.readBitSet(3); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list785 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.success = new ArrayList(_list785.size); - for (int _i786 = 0; _i786 < _list785.size; ++_i786) + org.apache.thrift.protocol.TList _list793 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.success = new ArrayList(_list793.size); + for (int _i794 = 0; _i794 < _list793.size; ++_i794) { - Index _elem787; // required - _elem787 = new Index(); - _elem787.read(iprot); - struct.success.add(_elem787); + Index _elem795; // required + _elem795 = new Index(); + _elem795.read(iprot); + struct.success.add(_elem795); } } struct.setSuccessIsSet(true); @@ -87315,13 +87418,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_index_names_res case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list788 = iprot.readListBegin(); - struct.success = new ArrayList(_list788.size); - for (int _i789 = 0; _i789 < _list788.size; ++_i789) + org.apache.thrift.protocol.TList _list796 = iprot.readListBegin(); + struct.success = new ArrayList(_list796.size); + for (int _i797 = 0; _i797 < _list796.size; ++_i797) { - String _elem790; // required - _elem790 = iprot.readString(); - struct.success.add(_elem790); + String _elem798; // required + _elem798 = iprot.readString(); + struct.success.add(_elem798); } iprot.readListEnd(); } @@ -87356,9 +87459,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_index_names_re oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.success.size())); - for (String _iter791 : struct.success) + for (String _iter799 : struct.success) { - oprot.writeString(_iter791); + oprot.writeString(_iter799); } oprot.writeListEnd(); } @@ -87397,9 +87500,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_index_names_res if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (String _iter792 : struct.success) + for (String _iter800 : struct.success) { - oprot.writeString(_iter792); + oprot.writeString(_iter800); } } } @@ -87414,13 +87517,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_index_names_resu BitSet incoming = iprot.readBitSet(2); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list793 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.success = new ArrayList(_list793.size); - for (int _i794 = 0; _i794 < _list793.size; ++_i794) + org.apache.thrift.protocol.TList _list801 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.success = new ArrayList(_list801.size); + for (int _i802 = 0; _i802 < _list801.size; ++_i802) { - String _elem795; // required - _elem795 = iprot.readString(); - struct.success.add(_elem795); + String _elem803; // required + _elem803 = iprot.readString(); + struct.success.add(_elem803); } } struct.setSuccessIsSet(true); @@ -92191,26 +92294,1050 @@ public int hashCode() { if (present_o2) builder.append(o2); - boolean present_o3 = true && (isSetO3()); - builder.append(present_o3); - if (present_o3) - builder.append(o3); - - boolean present_o4 = true && (isSetO4()); - builder.append(present_o4); - if (present_o4) - builder.append(o4); - + boolean present_o3 = true && (isSetO3()); + builder.append(present_o3); + if (present_o3) + builder.append(o3); + + boolean present_o4 = true && (isSetO4()); + builder.append(present_o4); + if (present_o4) + builder.append(o4); + + return builder.toHashCode(); + } + + public int compareTo(get_partition_column_statistics_result other) { + if (!getClass().equals(other.getClass())) { + return getClass().getName().compareTo(other.getClass().getName()); + } + + int lastComparison = 0; + get_partition_column_statistics_result typedOther = (get_partition_column_statistics_result)other; + + lastComparison = Boolean.valueOf(isSetSuccess()).compareTo(typedOther.isSetSuccess()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetSuccess()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.success, typedOther.success); + if (lastComparison != 0) { + return lastComparison; + } + } + lastComparison = Boolean.valueOf(isSetO1()).compareTo(typedOther.isSetO1()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetO1()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.o1, typedOther.o1); + if (lastComparison != 0) { + return lastComparison; + } + } + lastComparison = Boolean.valueOf(isSetO2()).compareTo(typedOther.isSetO2()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetO2()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.o2, typedOther.o2); + if (lastComparison != 0) { + return lastComparison; + } + } + lastComparison = Boolean.valueOf(isSetO3()).compareTo(typedOther.isSetO3()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetO3()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.o3, typedOther.o3); + if (lastComparison != 0) { + return lastComparison; + } + } + lastComparison = Boolean.valueOf(isSetO4()).compareTo(typedOther.isSetO4()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetO4()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.o4, typedOther.o4); + if (lastComparison != 0) { + return lastComparison; + } + } + return 0; + } + + public _Fields fieldForId(int fieldId) { + return _Fields.findByThriftId(fieldId); + } + + public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException { + schemes.get(iprot.getScheme()).getScheme().read(iprot, this); + } + + public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException { + schemes.get(oprot.getScheme()).getScheme().write(oprot, this); + } + + @Override + public String toString() { + StringBuilder sb = new StringBuilder("get_partition_column_statistics_result("); + boolean first = true; + + sb.append("success:"); + if (this.success == null) { + sb.append("null"); + } else { + sb.append(this.success); + } + first = false; + if (!first) sb.append(", "); + sb.append("o1:"); + if (this.o1 == null) { + sb.append("null"); + } else { + sb.append(this.o1); + } + first = false; + if (!first) sb.append(", "); + sb.append("o2:"); + if (this.o2 == null) { + sb.append("null"); + } else { + sb.append(this.o2); + } + first = false; + if (!first) sb.append(", "); + sb.append("o3:"); + if (this.o3 == null) { + sb.append("null"); + } else { + sb.append(this.o3); + } + first = false; + if (!first) sb.append(", "); + sb.append("o4:"); + if (this.o4 == null) { + sb.append("null"); + } else { + sb.append(this.o4); + } + first = false; + sb.append(")"); + return sb.toString(); + } + + public void validate() throws org.apache.thrift.TException { + // check for required fields + // check for sub-struct validity + if (success != null) { + success.validate(); + } + } + + private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { + try { + write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out))); + } catch (org.apache.thrift.TException te) { + throw new java.io.IOException(te); + } + } + + private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { + try { + read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); + } catch (org.apache.thrift.TException te) { + throw new java.io.IOException(te); + } + } + + private static class get_partition_column_statistics_resultStandardSchemeFactory implements SchemeFactory { + public get_partition_column_statistics_resultStandardScheme getScheme() { + return new get_partition_column_statistics_resultStandardScheme(); + } + } + + private static class get_partition_column_statistics_resultStandardScheme extends StandardScheme { + + public void read(org.apache.thrift.protocol.TProtocol iprot, get_partition_column_statistics_result struct) throws org.apache.thrift.TException { + org.apache.thrift.protocol.TField schemeField; + iprot.readStructBegin(); + while (true) + { + schemeField = iprot.readFieldBegin(); + if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { + break; + } + switch (schemeField.id) { + case 0: // SUCCESS + if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { + struct.success = new ColumnStatistics(); + struct.success.read(iprot); + struct.setSuccessIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; + case 1: // O1 + if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { + struct.o1 = new NoSuchObjectException(); + struct.o1.read(iprot); + struct.setO1IsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; + case 2: // O2 + if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { + struct.o2 = new MetaException(); + struct.o2.read(iprot); + struct.setO2IsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; + case 3: // O3 + if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { + struct.o3 = new InvalidInputException(); + struct.o3.read(iprot); + struct.setO3IsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; + case 4: // O4 + if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { + struct.o4 = new InvalidObjectException(); + struct.o4.read(iprot); + struct.setO4IsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; + default: + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + iprot.readFieldEnd(); + } + iprot.readStructEnd(); + struct.validate(); + } + + public void write(org.apache.thrift.protocol.TProtocol oprot, get_partition_column_statistics_result struct) throws org.apache.thrift.TException { + struct.validate(); + + oprot.writeStructBegin(STRUCT_DESC); + if (struct.success != null) { + oprot.writeFieldBegin(SUCCESS_FIELD_DESC); + struct.success.write(oprot); + oprot.writeFieldEnd(); + } + if (struct.o1 != null) { + oprot.writeFieldBegin(O1_FIELD_DESC); + struct.o1.write(oprot); + oprot.writeFieldEnd(); + } + if (struct.o2 != null) { + oprot.writeFieldBegin(O2_FIELD_DESC); + struct.o2.write(oprot); + oprot.writeFieldEnd(); + } + if (struct.o3 != null) { + oprot.writeFieldBegin(O3_FIELD_DESC); + struct.o3.write(oprot); + oprot.writeFieldEnd(); + } + if (struct.o4 != null) { + oprot.writeFieldBegin(O4_FIELD_DESC); + struct.o4.write(oprot); + oprot.writeFieldEnd(); + } + oprot.writeFieldStop(); + oprot.writeStructEnd(); + } + + } + + private static class get_partition_column_statistics_resultTupleSchemeFactory implements SchemeFactory { + public get_partition_column_statistics_resultTupleScheme getScheme() { + return new get_partition_column_statistics_resultTupleScheme(); + } + } + + private static class get_partition_column_statistics_resultTupleScheme extends TupleScheme { + + @Override + public void write(org.apache.thrift.protocol.TProtocol prot, get_partition_column_statistics_result struct) throws org.apache.thrift.TException { + TTupleProtocol oprot = (TTupleProtocol) prot; + BitSet optionals = new BitSet(); + if (struct.isSetSuccess()) { + optionals.set(0); + } + if (struct.isSetO1()) { + optionals.set(1); + } + if (struct.isSetO2()) { + optionals.set(2); + } + if (struct.isSetO3()) { + optionals.set(3); + } + if (struct.isSetO4()) { + optionals.set(4); + } + oprot.writeBitSet(optionals, 5); + if (struct.isSetSuccess()) { + struct.success.write(oprot); + } + if (struct.isSetO1()) { + struct.o1.write(oprot); + } + if (struct.isSetO2()) { + struct.o2.write(oprot); + } + if (struct.isSetO3()) { + struct.o3.write(oprot); + } + if (struct.isSetO4()) { + struct.o4.write(oprot); + } + } + + @Override + public void read(org.apache.thrift.protocol.TProtocol prot, get_partition_column_statistics_result struct) throws org.apache.thrift.TException { + TTupleProtocol iprot = (TTupleProtocol) prot; + BitSet incoming = iprot.readBitSet(5); + if (incoming.get(0)) { + struct.success = new ColumnStatistics(); + struct.success.read(iprot); + struct.setSuccessIsSet(true); + } + if (incoming.get(1)) { + struct.o1 = new NoSuchObjectException(); + struct.o1.read(iprot); + struct.setO1IsSet(true); + } + if (incoming.get(2)) { + struct.o2 = new MetaException(); + struct.o2.read(iprot); + struct.setO2IsSet(true); + } + if (incoming.get(3)) { + struct.o3 = new InvalidInputException(); + struct.o3.read(iprot); + struct.setO3IsSet(true); + } + if (incoming.get(4)) { + struct.o4 = new InvalidObjectException(); + struct.o4.read(iprot); + struct.setO4IsSet(true); + } + } + } + + } + + public static class get_table_statistics_req_args implements org.apache.thrift.TBase, java.io.Serializable, Cloneable { + private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("get_table_statistics_req_args"); + + private static final org.apache.thrift.protocol.TField REQUEST_FIELD_DESC = new org.apache.thrift.protocol.TField("request", org.apache.thrift.protocol.TType.STRUCT, (short)1); + + private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); + static { + schemes.put(StandardScheme.class, new get_table_statistics_req_argsStandardSchemeFactory()); + schemes.put(TupleScheme.class, new get_table_statistics_req_argsTupleSchemeFactory()); + } + + private TableStatsRequest request; // required + + /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ + public enum _Fields implements org.apache.thrift.TFieldIdEnum { + REQUEST((short)1, "request"); + + private static final Map byName = new HashMap(); + + static { + for (_Fields field : EnumSet.allOf(_Fields.class)) { + byName.put(field.getFieldName(), field); + } + } + + /** + * Find the _Fields constant that matches fieldId, or null if its not found. + */ + public static _Fields findByThriftId(int fieldId) { + switch(fieldId) { + case 1: // REQUEST + return REQUEST; + default: + return null; + } + } + + /** + * Find the _Fields constant that matches fieldId, throwing an exception + * if it is not found. + */ + public static _Fields findByThriftIdOrThrow(int fieldId) { + _Fields fields = findByThriftId(fieldId); + if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!"); + return fields; + } + + /** + * Find the _Fields constant that matches name, or null if its not found. + */ + public static _Fields findByName(String name) { + return byName.get(name); + } + + private final short _thriftId; + private final String _fieldName; + + _Fields(short thriftId, String fieldName) { + _thriftId = thriftId; + _fieldName = fieldName; + } + + public short getThriftFieldId() { + return _thriftId; + } + + public String getFieldName() { + return _fieldName; + } + } + + // isset id assignments + public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; + static { + Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); + tmpMap.put(_Fields.REQUEST, new org.apache.thrift.meta_data.FieldMetaData("request", org.apache.thrift.TFieldRequirementType.DEFAULT, + new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, TableStatsRequest.class))); + metaDataMap = Collections.unmodifiableMap(tmpMap); + org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(get_table_statistics_req_args.class, metaDataMap); + } + + public get_table_statistics_req_args() { + } + + public get_table_statistics_req_args( + TableStatsRequest request) + { + this(); + this.request = request; + } + + /** + * Performs a deep copy on other. + */ + public get_table_statistics_req_args(get_table_statistics_req_args other) { + if (other.isSetRequest()) { + this.request = new TableStatsRequest(other.request); + } + } + + public get_table_statistics_req_args deepCopy() { + return new get_table_statistics_req_args(this); + } + + @Override + public void clear() { + this.request = null; + } + + public TableStatsRequest getRequest() { + return this.request; + } + + public void setRequest(TableStatsRequest request) { + this.request = request; + } + + public void unsetRequest() { + this.request = null; + } + + /** Returns true if field request is set (has been assigned a value) and false otherwise */ + public boolean isSetRequest() { + return this.request != null; + } + + public void setRequestIsSet(boolean value) { + if (!value) { + this.request = null; + } + } + + public void setFieldValue(_Fields field, Object value) { + switch (field) { + case REQUEST: + if (value == null) { + unsetRequest(); + } else { + setRequest((TableStatsRequest)value); + } + break; + + } + } + + public Object getFieldValue(_Fields field) { + switch (field) { + case REQUEST: + return getRequest(); + + } + throw new IllegalStateException(); + } + + /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */ + public boolean isSet(_Fields field) { + if (field == null) { + throw new IllegalArgumentException(); + } + + switch (field) { + case REQUEST: + return isSetRequest(); + } + throw new IllegalStateException(); + } + + @Override + public boolean equals(Object that) { + if (that == null) + return false; + if (that instanceof get_table_statistics_req_args) + return this.equals((get_table_statistics_req_args)that); + return false; + } + + public boolean equals(get_table_statistics_req_args that) { + if (that == null) + return false; + + boolean this_present_request = true && this.isSetRequest(); + boolean that_present_request = true && that.isSetRequest(); + if (this_present_request || that_present_request) { + if (!(this_present_request && that_present_request)) + return false; + if (!this.request.equals(that.request)) + return false; + } + + return true; + } + + @Override + public int hashCode() { + HashCodeBuilder builder = new HashCodeBuilder(); + + boolean present_request = true && (isSetRequest()); + builder.append(present_request); + if (present_request) + builder.append(request); + + return builder.toHashCode(); + } + + public int compareTo(get_table_statistics_req_args other) { + if (!getClass().equals(other.getClass())) { + return getClass().getName().compareTo(other.getClass().getName()); + } + + int lastComparison = 0; + get_table_statistics_req_args typedOther = (get_table_statistics_req_args)other; + + lastComparison = Boolean.valueOf(isSetRequest()).compareTo(typedOther.isSetRequest()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetRequest()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.request, typedOther.request); + if (lastComparison != 0) { + return lastComparison; + } + } + return 0; + } + + public _Fields fieldForId(int fieldId) { + return _Fields.findByThriftId(fieldId); + } + + public void read(org.apache.thrift.protocol.TProtocol iprot) throws org.apache.thrift.TException { + schemes.get(iprot.getScheme()).getScheme().read(iprot, this); + } + + public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache.thrift.TException { + schemes.get(oprot.getScheme()).getScheme().write(oprot, this); + } + + @Override + public String toString() { + StringBuilder sb = new StringBuilder("get_table_statistics_req_args("); + boolean first = true; + + sb.append("request:"); + if (this.request == null) { + sb.append("null"); + } else { + sb.append(this.request); + } + first = false; + sb.append(")"); + return sb.toString(); + } + + public void validate() throws org.apache.thrift.TException { + // check for required fields + // check for sub-struct validity + if (request != null) { + request.validate(); + } + } + + private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { + try { + write(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(out))); + } catch (org.apache.thrift.TException te) { + throw new java.io.IOException(te); + } + } + + private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { + try { + read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); + } catch (org.apache.thrift.TException te) { + throw new java.io.IOException(te); + } + } + + private static class get_table_statistics_req_argsStandardSchemeFactory implements SchemeFactory { + public get_table_statistics_req_argsStandardScheme getScheme() { + return new get_table_statistics_req_argsStandardScheme(); + } + } + + private static class get_table_statistics_req_argsStandardScheme extends StandardScheme { + + public void read(org.apache.thrift.protocol.TProtocol iprot, get_table_statistics_req_args struct) throws org.apache.thrift.TException { + org.apache.thrift.protocol.TField schemeField; + iprot.readStructBegin(); + while (true) + { + schemeField = iprot.readFieldBegin(); + if (schemeField.type == org.apache.thrift.protocol.TType.STOP) { + break; + } + switch (schemeField.id) { + case 1: // REQUEST + if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { + struct.request = new TableStatsRequest(); + struct.request.read(iprot); + struct.setRequestIsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; + default: + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + iprot.readFieldEnd(); + } + iprot.readStructEnd(); + struct.validate(); + } + + public void write(org.apache.thrift.protocol.TProtocol oprot, get_table_statistics_req_args struct) throws org.apache.thrift.TException { + struct.validate(); + + oprot.writeStructBegin(STRUCT_DESC); + if (struct.request != null) { + oprot.writeFieldBegin(REQUEST_FIELD_DESC); + struct.request.write(oprot); + oprot.writeFieldEnd(); + } + oprot.writeFieldStop(); + oprot.writeStructEnd(); + } + + } + + private static class get_table_statistics_req_argsTupleSchemeFactory implements SchemeFactory { + public get_table_statistics_req_argsTupleScheme getScheme() { + return new get_table_statistics_req_argsTupleScheme(); + } + } + + private static class get_table_statistics_req_argsTupleScheme extends TupleScheme { + + @Override + public void write(org.apache.thrift.protocol.TProtocol prot, get_table_statistics_req_args struct) throws org.apache.thrift.TException { + TTupleProtocol oprot = (TTupleProtocol) prot; + BitSet optionals = new BitSet(); + if (struct.isSetRequest()) { + optionals.set(0); + } + oprot.writeBitSet(optionals, 1); + if (struct.isSetRequest()) { + struct.request.write(oprot); + } + } + + @Override + public void read(org.apache.thrift.protocol.TProtocol prot, get_table_statistics_req_args struct) throws org.apache.thrift.TException { + TTupleProtocol iprot = (TTupleProtocol) prot; + BitSet incoming = iprot.readBitSet(1); + if (incoming.get(0)) { + struct.request = new TableStatsRequest(); + struct.request.read(iprot); + struct.setRequestIsSet(true); + } + } + } + + } + + public static class get_table_statistics_req_result implements org.apache.thrift.TBase, java.io.Serializable, Cloneable { + private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("get_table_statistics_req_result"); + + private static final org.apache.thrift.protocol.TField SUCCESS_FIELD_DESC = new org.apache.thrift.protocol.TField("success", org.apache.thrift.protocol.TType.STRUCT, (short)0); + private static final org.apache.thrift.protocol.TField O1_FIELD_DESC = new org.apache.thrift.protocol.TField("o1", org.apache.thrift.protocol.TType.STRUCT, (short)1); + private static final org.apache.thrift.protocol.TField O2_FIELD_DESC = new org.apache.thrift.protocol.TField("o2", org.apache.thrift.protocol.TType.STRUCT, (short)2); + + private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); + static { + schemes.put(StandardScheme.class, new get_table_statistics_req_resultStandardSchemeFactory()); + schemes.put(TupleScheme.class, new get_table_statistics_req_resultTupleSchemeFactory()); + } + + private TableStatsResult success; // required + private NoSuchObjectException o1; // required + private MetaException o2; // required + + /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ + public enum _Fields implements org.apache.thrift.TFieldIdEnum { + SUCCESS((short)0, "success"), + O1((short)1, "o1"), + O2((short)2, "o2"); + + private static final Map byName = new HashMap(); + + static { + for (_Fields field : EnumSet.allOf(_Fields.class)) { + byName.put(field.getFieldName(), field); + } + } + + /** + * Find the _Fields constant that matches fieldId, or null if its not found. + */ + public static _Fields findByThriftId(int fieldId) { + switch(fieldId) { + case 0: // SUCCESS + return SUCCESS; + case 1: // O1 + return O1; + case 2: // O2 + return O2; + default: + return null; + } + } + + /** + * Find the _Fields constant that matches fieldId, throwing an exception + * if it is not found. + */ + public static _Fields findByThriftIdOrThrow(int fieldId) { + _Fields fields = findByThriftId(fieldId); + if (fields == null) throw new IllegalArgumentException("Field " + fieldId + " doesn't exist!"); + return fields; + } + + /** + * Find the _Fields constant that matches name, or null if its not found. + */ + public static _Fields findByName(String name) { + return byName.get(name); + } + + private final short _thriftId; + private final String _fieldName; + + _Fields(short thriftId, String fieldName) { + _thriftId = thriftId; + _fieldName = fieldName; + } + + public short getThriftFieldId() { + return _thriftId; + } + + public String getFieldName() { + return _fieldName; + } + } + + // isset id assignments + public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; + static { + Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); + tmpMap.put(_Fields.SUCCESS, new org.apache.thrift.meta_data.FieldMetaData("success", org.apache.thrift.TFieldRequirementType.DEFAULT, + new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, TableStatsResult.class))); + tmpMap.put(_Fields.O1, new org.apache.thrift.meta_data.FieldMetaData("o1", org.apache.thrift.TFieldRequirementType.DEFAULT, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRUCT))); + tmpMap.put(_Fields.O2, new org.apache.thrift.meta_data.FieldMetaData("o2", org.apache.thrift.TFieldRequirementType.DEFAULT, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRUCT))); + metaDataMap = Collections.unmodifiableMap(tmpMap); + org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(get_table_statistics_req_result.class, metaDataMap); + } + + public get_table_statistics_req_result() { + } + + public get_table_statistics_req_result( + TableStatsResult success, + NoSuchObjectException o1, + MetaException o2) + { + this(); + this.success = success; + this.o1 = o1; + this.o2 = o2; + } + + /** + * Performs a deep copy on other. + */ + public get_table_statistics_req_result(get_table_statistics_req_result other) { + if (other.isSetSuccess()) { + this.success = new TableStatsResult(other.success); + } + if (other.isSetO1()) { + this.o1 = new NoSuchObjectException(other.o1); + } + if (other.isSetO2()) { + this.o2 = new MetaException(other.o2); + } + } + + public get_table_statistics_req_result deepCopy() { + return new get_table_statistics_req_result(this); + } + + @Override + public void clear() { + this.success = null; + this.o1 = null; + this.o2 = null; + } + + public TableStatsResult getSuccess() { + return this.success; + } + + public void setSuccess(TableStatsResult success) { + this.success = success; + } + + public void unsetSuccess() { + this.success = null; + } + + /** Returns true if field success is set (has been assigned a value) and false otherwise */ + public boolean isSetSuccess() { + return this.success != null; + } + + public void setSuccessIsSet(boolean value) { + if (!value) { + this.success = null; + } + } + + public NoSuchObjectException getO1() { + return this.o1; + } + + public void setO1(NoSuchObjectException o1) { + this.o1 = o1; + } + + public void unsetO1() { + this.o1 = null; + } + + /** Returns true if field o1 is set (has been assigned a value) and false otherwise */ + public boolean isSetO1() { + return this.o1 != null; + } + + public void setO1IsSet(boolean value) { + if (!value) { + this.o1 = null; + } + } + + public MetaException getO2() { + return this.o2; + } + + public void setO2(MetaException o2) { + this.o2 = o2; + } + + public void unsetO2() { + this.o2 = null; + } + + /** Returns true if field o2 is set (has been assigned a value) and false otherwise */ + public boolean isSetO2() { + return this.o2 != null; + } + + public void setO2IsSet(boolean value) { + if (!value) { + this.o2 = null; + } + } + + public void setFieldValue(_Fields field, Object value) { + switch (field) { + case SUCCESS: + if (value == null) { + unsetSuccess(); + } else { + setSuccess((TableStatsResult)value); + } + break; + + case O1: + if (value == null) { + unsetO1(); + } else { + setO1((NoSuchObjectException)value); + } + break; + + case O2: + if (value == null) { + unsetO2(); + } else { + setO2((MetaException)value); + } + break; + + } + } + + public Object getFieldValue(_Fields field) { + switch (field) { + case SUCCESS: + return getSuccess(); + + case O1: + return getO1(); + + case O2: + return getO2(); + + } + throw new IllegalStateException(); + } + + /** Returns true if field corresponding to fieldID is set (has been assigned a value) and false otherwise */ + public boolean isSet(_Fields field) { + if (field == null) { + throw new IllegalArgumentException(); + } + + switch (field) { + case SUCCESS: + return isSetSuccess(); + case O1: + return isSetO1(); + case O2: + return isSetO2(); + } + throw new IllegalStateException(); + } + + @Override + public boolean equals(Object that) { + if (that == null) + return false; + if (that instanceof get_table_statistics_req_result) + return this.equals((get_table_statistics_req_result)that); + return false; + } + + public boolean equals(get_table_statistics_req_result that) { + if (that == null) + return false; + + boolean this_present_success = true && this.isSetSuccess(); + boolean that_present_success = true && that.isSetSuccess(); + if (this_present_success || that_present_success) { + if (!(this_present_success && that_present_success)) + return false; + if (!this.success.equals(that.success)) + return false; + } + + boolean this_present_o1 = true && this.isSetO1(); + boolean that_present_o1 = true && that.isSetO1(); + if (this_present_o1 || that_present_o1) { + if (!(this_present_o1 && that_present_o1)) + return false; + if (!this.o1.equals(that.o1)) + return false; + } + + boolean this_present_o2 = true && this.isSetO2(); + boolean that_present_o2 = true && that.isSetO2(); + if (this_present_o2 || that_present_o2) { + if (!(this_present_o2 && that_present_o2)) + return false; + if (!this.o2.equals(that.o2)) + return false; + } + + return true; + } + + @Override + public int hashCode() { + HashCodeBuilder builder = new HashCodeBuilder(); + + boolean present_success = true && (isSetSuccess()); + builder.append(present_success); + if (present_success) + builder.append(success); + + boolean present_o1 = true && (isSetO1()); + builder.append(present_o1); + if (present_o1) + builder.append(o1); + + boolean present_o2 = true && (isSetO2()); + builder.append(present_o2); + if (present_o2) + builder.append(o2); + return builder.toHashCode(); } - public int compareTo(get_partition_column_statistics_result other) { + public int compareTo(get_table_statistics_req_result other) { if (!getClass().equals(other.getClass())) { return getClass().getName().compareTo(other.getClass().getName()); } int lastComparison = 0; - get_partition_column_statistics_result typedOther = (get_partition_column_statistics_result)other; + get_table_statistics_req_result typedOther = (get_table_statistics_req_result)other; lastComparison = Boolean.valueOf(isSetSuccess()).compareTo(typedOther.isSetSuccess()); if (lastComparison != 0) { @@ -92242,26 +93369,6 @@ public int compareTo(get_partition_column_statistics_result other) { return lastComparison; } } - lastComparison = Boolean.valueOf(isSetO3()).compareTo(typedOther.isSetO3()); - if (lastComparison != 0) { - return lastComparison; - } - if (isSetO3()) { - lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.o3, typedOther.o3); - if (lastComparison != 0) { - return lastComparison; - } - } - lastComparison = Boolean.valueOf(isSetO4()).compareTo(typedOther.isSetO4()); - if (lastComparison != 0) { - return lastComparison; - } - if (isSetO4()) { - lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.o4, typedOther.o4); - if (lastComparison != 0) { - return lastComparison; - } - } return 0; } @@ -92279,7 +93386,7 @@ public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache. @Override public String toString() { - StringBuilder sb = new StringBuilder("get_partition_column_statistics_result("); + StringBuilder sb = new StringBuilder("get_table_statistics_req_result("); boolean first = true; sb.append("success:"); @@ -92305,22 +93412,6 @@ public String toString() { sb.append(this.o2); } first = false; - if (!first) sb.append(", "); - sb.append("o3:"); - if (this.o3 == null) { - sb.append("null"); - } else { - sb.append(this.o3); - } - first = false; - if (!first) sb.append(", "); - sb.append("o4:"); - if (this.o4 == null) { - sb.append("null"); - } else { - sb.append(this.o4); - } - first = false; sb.append(")"); return sb.toString(); } @@ -92349,15 +93440,15 @@ private void readObject(java.io.ObjectInputStream in) throws java.io.IOException } } - private static class get_partition_column_statistics_resultStandardSchemeFactory implements SchemeFactory { - public get_partition_column_statistics_resultStandardScheme getScheme() { - return new get_partition_column_statistics_resultStandardScheme(); + private static class get_table_statistics_req_resultStandardSchemeFactory implements SchemeFactory { + public get_table_statistics_req_resultStandardScheme getScheme() { + return new get_table_statistics_req_resultStandardScheme(); } } - private static class get_partition_column_statistics_resultStandardScheme extends StandardScheme { + private static class get_table_statistics_req_resultStandardScheme extends StandardScheme { - public void read(org.apache.thrift.protocol.TProtocol iprot, get_partition_column_statistics_result struct) throws org.apache.thrift.TException { + public void read(org.apache.thrift.protocol.TProtocol iprot, get_table_statistics_req_result struct) throws org.apache.thrift.TException { org.apache.thrift.protocol.TField schemeField; iprot.readStructBegin(); while (true) @@ -92369,7 +93460,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_partition_colum switch (schemeField.id) { case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { - struct.success = new ColumnStatistics(); + struct.success = new TableStatsResult(); struct.success.read(iprot); struct.setSuccessIsSet(true); } else { @@ -92394,24 +93485,6 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_partition_colum org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); } break; - case 3: // O3 - if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { - struct.o3 = new InvalidInputException(); - struct.o3.read(iprot); - struct.setO3IsSet(true); - } else { - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - break; - case 4: // O4 - if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { - struct.o4 = new InvalidObjectException(); - struct.o4.read(iprot); - struct.setO4IsSet(true); - } else { - org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); - } - break; default: org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); } @@ -92421,7 +93494,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_partition_colum struct.validate(); } - public void write(org.apache.thrift.protocol.TProtocol oprot, get_partition_column_statistics_result struct) throws org.apache.thrift.TException { + public void write(org.apache.thrift.protocol.TProtocol oprot, get_table_statistics_req_result struct) throws org.apache.thrift.TException { struct.validate(); oprot.writeStructBegin(STRUCT_DESC); @@ -92440,32 +93513,22 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_partition_colu struct.o2.write(oprot); oprot.writeFieldEnd(); } - if (struct.o3 != null) { - oprot.writeFieldBegin(O3_FIELD_DESC); - struct.o3.write(oprot); - oprot.writeFieldEnd(); - } - if (struct.o4 != null) { - oprot.writeFieldBegin(O4_FIELD_DESC); - struct.o4.write(oprot); - oprot.writeFieldEnd(); - } oprot.writeFieldStop(); oprot.writeStructEnd(); } } - private static class get_partition_column_statistics_resultTupleSchemeFactory implements SchemeFactory { - public get_partition_column_statistics_resultTupleScheme getScheme() { - return new get_partition_column_statistics_resultTupleScheme(); + private static class get_table_statistics_req_resultTupleSchemeFactory implements SchemeFactory { + public get_table_statistics_req_resultTupleScheme getScheme() { + return new get_table_statistics_req_resultTupleScheme(); } } - private static class get_partition_column_statistics_resultTupleScheme extends TupleScheme { + private static class get_table_statistics_req_resultTupleScheme extends TupleScheme { @Override - public void write(org.apache.thrift.protocol.TProtocol prot, get_partition_column_statistics_result struct) throws org.apache.thrift.TException { + public void write(org.apache.thrift.protocol.TProtocol prot, get_table_statistics_req_result struct) throws org.apache.thrift.TException { TTupleProtocol oprot = (TTupleProtocol) prot; BitSet optionals = new BitSet(); if (struct.isSetSuccess()) { @@ -92477,13 +93540,7 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_partition_colum if (struct.isSetO2()) { optionals.set(2); } - if (struct.isSetO3()) { - optionals.set(3); - } - if (struct.isSetO4()) { - optionals.set(4); - } - oprot.writeBitSet(optionals, 5); + oprot.writeBitSet(optionals, 3); if (struct.isSetSuccess()) { struct.success.write(oprot); } @@ -92493,20 +93550,14 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_partition_colum if (struct.isSetO2()) { struct.o2.write(oprot); } - if (struct.isSetO3()) { - struct.o3.write(oprot); - } - if (struct.isSetO4()) { - struct.o4.write(oprot); - } } @Override - public void read(org.apache.thrift.protocol.TProtocol prot, get_partition_column_statistics_result struct) throws org.apache.thrift.TException { + public void read(org.apache.thrift.protocol.TProtocol prot, get_table_statistics_req_result struct) throws org.apache.thrift.TException { TTupleProtocol iprot = (TTupleProtocol) prot; - BitSet incoming = iprot.readBitSet(5); + BitSet incoming = iprot.readBitSet(3); if (incoming.get(0)) { - struct.success = new ColumnStatistics(); + struct.success = new TableStatsResult(); struct.success.read(iprot); struct.setSuccessIsSet(true); } @@ -92520,33 +93571,23 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_partition_column struct.o2.read(iprot); struct.setO2IsSet(true); } - if (incoming.get(3)) { - struct.o3 = new InvalidInputException(); - struct.o3.read(iprot); - struct.setO3IsSet(true); - } - if (incoming.get(4)) { - struct.o4 = new InvalidObjectException(); - struct.o4.read(iprot); - struct.setO4IsSet(true); - } } } } - public static class get_table_statistics_req_args implements org.apache.thrift.TBase, java.io.Serializable, Cloneable { - private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("get_table_statistics_req_args"); + public static class get_partitions_statistics_req_args implements org.apache.thrift.TBase, java.io.Serializable, Cloneable { + private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("get_partitions_statistics_req_args"); private static final org.apache.thrift.protocol.TField REQUEST_FIELD_DESC = new org.apache.thrift.protocol.TField("request", org.apache.thrift.protocol.TType.STRUCT, (short)1); private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); static { - schemes.put(StandardScheme.class, new get_table_statistics_req_argsStandardSchemeFactory()); - schemes.put(TupleScheme.class, new get_table_statistics_req_argsTupleSchemeFactory()); + schemes.put(StandardScheme.class, new get_partitions_statistics_req_argsStandardSchemeFactory()); + schemes.put(TupleScheme.class, new get_partitions_statistics_req_argsTupleSchemeFactory()); } - private TableStatsRequest request; // required + private PartitionsStatsRequest request; // required /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ public enum _Fields implements org.apache.thrift.TFieldIdEnum { @@ -92611,16 +93652,16 @@ public String getFieldName() { static { Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); tmpMap.put(_Fields.REQUEST, new org.apache.thrift.meta_data.FieldMetaData("request", org.apache.thrift.TFieldRequirementType.DEFAULT, - new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, TableStatsRequest.class))); + new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, PartitionsStatsRequest.class))); metaDataMap = Collections.unmodifiableMap(tmpMap); - org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(get_table_statistics_req_args.class, metaDataMap); + org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(get_partitions_statistics_req_args.class, metaDataMap); } - public get_table_statistics_req_args() { + public get_partitions_statistics_req_args() { } - public get_table_statistics_req_args( - TableStatsRequest request) + public get_partitions_statistics_req_args( + PartitionsStatsRequest request) { this(); this.request = request; @@ -92629,14 +93670,14 @@ public get_table_statistics_req_args( /** * Performs a deep copy on other. */ - public get_table_statistics_req_args(get_table_statistics_req_args other) { + public get_partitions_statistics_req_args(get_partitions_statistics_req_args other) { if (other.isSetRequest()) { - this.request = new TableStatsRequest(other.request); + this.request = new PartitionsStatsRequest(other.request); } } - public get_table_statistics_req_args deepCopy() { - return new get_table_statistics_req_args(this); + public get_partitions_statistics_req_args deepCopy() { + return new get_partitions_statistics_req_args(this); } @Override @@ -92644,11 +93685,11 @@ public void clear() { this.request = null; } - public TableStatsRequest getRequest() { + public PartitionsStatsRequest getRequest() { return this.request; } - public void setRequest(TableStatsRequest request) { + public void setRequest(PartitionsStatsRequest request) { this.request = request; } @@ -92673,7 +93714,7 @@ public void setFieldValue(_Fields field, Object value) { if (value == null) { unsetRequest(); } else { - setRequest((TableStatsRequest)value); + setRequest((PartitionsStatsRequest)value); } break; @@ -92706,12 +93747,12 @@ public boolean isSet(_Fields field) { public boolean equals(Object that) { if (that == null) return false; - if (that instanceof get_table_statistics_req_args) - return this.equals((get_table_statistics_req_args)that); + if (that instanceof get_partitions_statistics_req_args) + return this.equals((get_partitions_statistics_req_args)that); return false; } - public boolean equals(get_table_statistics_req_args that) { + public boolean equals(get_partitions_statistics_req_args that) { if (that == null) return false; @@ -92739,13 +93780,13 @@ public int hashCode() { return builder.toHashCode(); } - public int compareTo(get_table_statistics_req_args other) { + public int compareTo(get_partitions_statistics_req_args other) { if (!getClass().equals(other.getClass())) { return getClass().getName().compareTo(other.getClass().getName()); } int lastComparison = 0; - get_table_statistics_req_args typedOther = (get_table_statistics_req_args)other; + get_partitions_statistics_req_args typedOther = (get_partitions_statistics_req_args)other; lastComparison = Boolean.valueOf(isSetRequest()).compareTo(typedOther.isSetRequest()); if (lastComparison != 0) { @@ -92774,7 +93815,7 @@ public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache. @Override public String toString() { - StringBuilder sb = new StringBuilder("get_table_statistics_req_args("); + StringBuilder sb = new StringBuilder("get_partitions_statistics_req_args("); boolean first = true; sb.append("request:"); @@ -92812,15 +93853,15 @@ private void readObject(java.io.ObjectInputStream in) throws java.io.IOException } } - private static class get_table_statistics_req_argsStandardSchemeFactory implements SchemeFactory { - public get_table_statistics_req_argsStandardScheme getScheme() { - return new get_table_statistics_req_argsStandardScheme(); + private static class get_partitions_statistics_req_argsStandardSchemeFactory implements SchemeFactory { + public get_partitions_statistics_req_argsStandardScheme getScheme() { + return new get_partitions_statistics_req_argsStandardScheme(); } } - private static class get_table_statistics_req_argsStandardScheme extends StandardScheme { + private static class get_partitions_statistics_req_argsStandardScheme extends StandardScheme { - public void read(org.apache.thrift.protocol.TProtocol iprot, get_table_statistics_req_args struct) throws org.apache.thrift.TException { + public void read(org.apache.thrift.protocol.TProtocol iprot, get_partitions_statistics_req_args struct) throws org.apache.thrift.TException { org.apache.thrift.protocol.TField schemeField; iprot.readStructBegin(); while (true) @@ -92832,7 +93873,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_table_statistic switch (schemeField.id) { case 1: // REQUEST if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { - struct.request = new TableStatsRequest(); + struct.request = new PartitionsStatsRequest(); struct.request.read(iprot); struct.setRequestIsSet(true); } else { @@ -92848,7 +93889,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_table_statistic struct.validate(); } - public void write(org.apache.thrift.protocol.TProtocol oprot, get_table_statistics_req_args struct) throws org.apache.thrift.TException { + public void write(org.apache.thrift.protocol.TProtocol oprot, get_partitions_statistics_req_args struct) throws org.apache.thrift.TException { struct.validate(); oprot.writeStructBegin(STRUCT_DESC); @@ -92863,16 +93904,16 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_table_statisti } - private static class get_table_statistics_req_argsTupleSchemeFactory implements SchemeFactory { - public get_table_statistics_req_argsTupleScheme getScheme() { - return new get_table_statistics_req_argsTupleScheme(); + private static class get_partitions_statistics_req_argsTupleSchemeFactory implements SchemeFactory { + public get_partitions_statistics_req_argsTupleScheme getScheme() { + return new get_partitions_statistics_req_argsTupleScheme(); } } - private static class get_table_statistics_req_argsTupleScheme extends TupleScheme { + private static class get_partitions_statistics_req_argsTupleScheme extends TupleScheme { @Override - public void write(org.apache.thrift.protocol.TProtocol prot, get_table_statistics_req_args struct) throws org.apache.thrift.TException { + public void write(org.apache.thrift.protocol.TProtocol prot, get_partitions_statistics_req_args struct) throws org.apache.thrift.TException { TTupleProtocol oprot = (TTupleProtocol) prot; BitSet optionals = new BitSet(); if (struct.isSetRequest()) { @@ -92885,11 +93926,11 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_table_statistic } @Override - public void read(org.apache.thrift.protocol.TProtocol prot, get_table_statistics_req_args struct) throws org.apache.thrift.TException { + public void read(org.apache.thrift.protocol.TProtocol prot, get_partitions_statistics_req_args struct) throws org.apache.thrift.TException { TTupleProtocol iprot = (TTupleProtocol) prot; BitSet incoming = iprot.readBitSet(1); if (incoming.get(0)) { - struct.request = new TableStatsRequest(); + struct.request = new PartitionsStatsRequest(); struct.request.read(iprot); struct.setRequestIsSet(true); } @@ -92898,8 +93939,8 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_table_statistics } - public static class get_table_statistics_req_result implements org.apache.thrift.TBase, java.io.Serializable, Cloneable { - private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("get_table_statistics_req_result"); + public static class get_partitions_statistics_req_result implements org.apache.thrift.TBase, java.io.Serializable, Cloneable { + private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("get_partitions_statistics_req_result"); private static final org.apache.thrift.protocol.TField SUCCESS_FIELD_DESC = new org.apache.thrift.protocol.TField("success", org.apache.thrift.protocol.TType.STRUCT, (short)0); private static final org.apache.thrift.protocol.TField O1_FIELD_DESC = new org.apache.thrift.protocol.TField("o1", org.apache.thrift.protocol.TType.STRUCT, (short)1); @@ -92907,11 +93948,11 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_table_statistics private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); static { - schemes.put(StandardScheme.class, new get_table_statistics_req_resultStandardSchemeFactory()); - schemes.put(TupleScheme.class, new get_table_statistics_req_resultTupleSchemeFactory()); + schemes.put(StandardScheme.class, new get_partitions_statistics_req_resultStandardSchemeFactory()); + schemes.put(TupleScheme.class, new get_partitions_statistics_req_resultTupleSchemeFactory()); } - private TableStatsResult success; // required + private PartitionsStatsResult success; // required private NoSuchObjectException o1; // required private MetaException o2; // required @@ -92984,20 +94025,20 @@ public String getFieldName() { static { Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); tmpMap.put(_Fields.SUCCESS, new org.apache.thrift.meta_data.FieldMetaData("success", org.apache.thrift.TFieldRequirementType.DEFAULT, - new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, TableStatsResult.class))); + new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, PartitionsStatsResult.class))); tmpMap.put(_Fields.O1, new org.apache.thrift.meta_data.FieldMetaData("o1", org.apache.thrift.TFieldRequirementType.DEFAULT, new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRUCT))); tmpMap.put(_Fields.O2, new org.apache.thrift.meta_data.FieldMetaData("o2", org.apache.thrift.TFieldRequirementType.DEFAULT, new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRUCT))); metaDataMap = Collections.unmodifiableMap(tmpMap); - org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(get_table_statistics_req_result.class, metaDataMap); + org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(get_partitions_statistics_req_result.class, metaDataMap); } - public get_table_statistics_req_result() { + public get_partitions_statistics_req_result() { } - public get_table_statistics_req_result( - TableStatsResult success, + public get_partitions_statistics_req_result( + PartitionsStatsResult success, NoSuchObjectException o1, MetaException o2) { @@ -93010,9 +94051,9 @@ public get_table_statistics_req_result( /** * Performs a deep copy on other. */ - public get_table_statistics_req_result(get_table_statistics_req_result other) { + public get_partitions_statistics_req_result(get_partitions_statistics_req_result other) { if (other.isSetSuccess()) { - this.success = new TableStatsResult(other.success); + this.success = new PartitionsStatsResult(other.success); } if (other.isSetO1()) { this.o1 = new NoSuchObjectException(other.o1); @@ -93022,8 +94063,8 @@ public get_table_statistics_req_result(get_table_statistics_req_result other) { } } - public get_table_statistics_req_result deepCopy() { - return new get_table_statistics_req_result(this); + public get_partitions_statistics_req_result deepCopy() { + return new get_partitions_statistics_req_result(this); } @Override @@ -93033,11 +94074,11 @@ public void clear() { this.o2 = null; } - public TableStatsResult getSuccess() { + public PartitionsStatsResult getSuccess() { return this.success; } - public void setSuccess(TableStatsResult success) { + public void setSuccess(PartitionsStatsResult success) { this.success = success; } @@ -93108,7 +94149,7 @@ public void setFieldValue(_Fields field, Object value) { if (value == null) { unsetSuccess(); } else { - setSuccess((TableStatsResult)value); + setSuccess((PartitionsStatsResult)value); } break; @@ -93167,12 +94208,12 @@ public boolean isSet(_Fields field) { public boolean equals(Object that) { if (that == null) return false; - if (that instanceof get_table_statistics_req_result) - return this.equals((get_table_statistics_req_result)that); + if (that instanceof get_partitions_statistics_req_result) + return this.equals((get_partitions_statistics_req_result)that); return false; } - public boolean equals(get_table_statistics_req_result that) { + public boolean equals(get_partitions_statistics_req_result that) { if (that == null) return false; @@ -93228,13 +94269,13 @@ public int hashCode() { return builder.toHashCode(); } - public int compareTo(get_table_statistics_req_result other) { + public int compareTo(get_partitions_statistics_req_result other) { if (!getClass().equals(other.getClass())) { return getClass().getName().compareTo(other.getClass().getName()); } int lastComparison = 0; - get_table_statistics_req_result typedOther = (get_table_statistics_req_result)other; + get_partitions_statistics_req_result typedOther = (get_partitions_statistics_req_result)other; lastComparison = Boolean.valueOf(isSetSuccess()).compareTo(typedOther.isSetSuccess()); if (lastComparison != 0) { @@ -93283,7 +94324,7 @@ public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache. @Override public String toString() { - StringBuilder sb = new StringBuilder("get_table_statistics_req_result("); + StringBuilder sb = new StringBuilder("get_partitions_statistics_req_result("); boolean first = true; sb.append("success:"); @@ -93337,15 +94378,15 @@ private void readObject(java.io.ObjectInputStream in) throws java.io.IOException } } - private static class get_table_statistics_req_resultStandardSchemeFactory implements SchemeFactory { - public get_table_statistics_req_resultStandardScheme getScheme() { - return new get_table_statistics_req_resultStandardScheme(); + private static class get_partitions_statistics_req_resultStandardSchemeFactory implements SchemeFactory { + public get_partitions_statistics_req_resultStandardScheme getScheme() { + return new get_partitions_statistics_req_resultStandardScheme(); } } - private static class get_table_statistics_req_resultStandardScheme extends StandardScheme { + private static class get_partitions_statistics_req_resultStandardScheme extends StandardScheme { - public void read(org.apache.thrift.protocol.TProtocol iprot, get_table_statistics_req_result struct) throws org.apache.thrift.TException { + public void read(org.apache.thrift.protocol.TProtocol iprot, get_partitions_statistics_req_result struct) throws org.apache.thrift.TException { org.apache.thrift.protocol.TField schemeField; iprot.readStructBegin(); while (true) @@ -93357,7 +94398,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_table_statistic switch (schemeField.id) { case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { - struct.success = new TableStatsResult(); + struct.success = new PartitionsStatsResult(); struct.success.read(iprot); struct.setSuccessIsSet(true); } else { @@ -93391,7 +94432,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_table_statistic struct.validate(); } - public void write(org.apache.thrift.protocol.TProtocol oprot, get_table_statistics_req_result struct) throws org.apache.thrift.TException { + public void write(org.apache.thrift.protocol.TProtocol oprot, get_partitions_statistics_req_result struct) throws org.apache.thrift.TException { struct.validate(); oprot.writeStructBegin(STRUCT_DESC); @@ -93416,16 +94457,16 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_table_statisti } - private static class get_table_statistics_req_resultTupleSchemeFactory implements SchemeFactory { - public get_table_statistics_req_resultTupleScheme getScheme() { - return new get_table_statistics_req_resultTupleScheme(); + private static class get_partitions_statistics_req_resultTupleSchemeFactory implements SchemeFactory { + public get_partitions_statistics_req_resultTupleScheme getScheme() { + return new get_partitions_statistics_req_resultTupleScheme(); } } - private static class get_table_statistics_req_resultTupleScheme extends TupleScheme { + private static class get_partitions_statistics_req_resultTupleScheme extends TupleScheme { @Override - public void write(org.apache.thrift.protocol.TProtocol prot, get_table_statistics_req_result struct) throws org.apache.thrift.TException { + public void write(org.apache.thrift.protocol.TProtocol prot, get_partitions_statistics_req_result struct) throws org.apache.thrift.TException { TTupleProtocol oprot = (TTupleProtocol) prot; BitSet optionals = new BitSet(); if (struct.isSetSuccess()) { @@ -93450,11 +94491,11 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_table_statistic } @Override - public void read(org.apache.thrift.protocol.TProtocol prot, get_table_statistics_req_result struct) throws org.apache.thrift.TException { + public void read(org.apache.thrift.protocol.TProtocol prot, get_partitions_statistics_req_result struct) throws org.apache.thrift.TException { TTupleProtocol iprot = (TTupleProtocol) prot; BitSet incoming = iprot.readBitSet(3); if (incoming.get(0)) { - struct.success = new TableStatsResult(); + struct.success = new PartitionsStatsResult(); struct.success.read(iprot); struct.setSuccessIsSet(true); } @@ -93473,15 +94514,15 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_table_statistics } - public static class get_partitions_statistics_req_args implements org.apache.thrift.TBase, java.io.Serializable, Cloneable { - private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("get_partitions_statistics_req_args"); + public static class get_aggr_stats_for_args implements org.apache.thrift.TBase, java.io.Serializable, Cloneable { + private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("get_aggr_stats_for_args"); private static final org.apache.thrift.protocol.TField REQUEST_FIELD_DESC = new org.apache.thrift.protocol.TField("request", org.apache.thrift.protocol.TType.STRUCT, (short)1); private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); static { - schemes.put(StandardScheme.class, new get_partitions_statistics_req_argsStandardSchemeFactory()); - schemes.put(TupleScheme.class, new get_partitions_statistics_req_argsTupleSchemeFactory()); + schemes.put(StandardScheme.class, new get_aggr_stats_for_argsStandardSchemeFactory()); + schemes.put(TupleScheme.class, new get_aggr_stats_for_argsTupleSchemeFactory()); } private PartitionsStatsRequest request; // required @@ -93551,13 +94592,13 @@ public String getFieldName() { tmpMap.put(_Fields.REQUEST, new org.apache.thrift.meta_data.FieldMetaData("request", org.apache.thrift.TFieldRequirementType.DEFAULT, new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, PartitionsStatsRequest.class))); metaDataMap = Collections.unmodifiableMap(tmpMap); - org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(get_partitions_statistics_req_args.class, metaDataMap); + org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(get_aggr_stats_for_args.class, metaDataMap); } - public get_partitions_statistics_req_args() { + public get_aggr_stats_for_args() { } - public get_partitions_statistics_req_args( + public get_aggr_stats_for_args( PartitionsStatsRequest request) { this(); @@ -93567,14 +94608,14 @@ public get_partitions_statistics_req_args( /** * Performs a deep copy on other. */ - public get_partitions_statistics_req_args(get_partitions_statistics_req_args other) { + public get_aggr_stats_for_args(get_aggr_stats_for_args other) { if (other.isSetRequest()) { this.request = new PartitionsStatsRequest(other.request); } } - public get_partitions_statistics_req_args deepCopy() { - return new get_partitions_statistics_req_args(this); + public get_aggr_stats_for_args deepCopy() { + return new get_aggr_stats_for_args(this); } @Override @@ -93644,12 +94685,12 @@ public boolean isSet(_Fields field) { public boolean equals(Object that) { if (that == null) return false; - if (that instanceof get_partitions_statistics_req_args) - return this.equals((get_partitions_statistics_req_args)that); + if (that instanceof get_aggr_stats_for_args) + return this.equals((get_aggr_stats_for_args)that); return false; } - public boolean equals(get_partitions_statistics_req_args that) { + public boolean equals(get_aggr_stats_for_args that) { if (that == null) return false; @@ -93677,13 +94718,13 @@ public int hashCode() { return builder.toHashCode(); } - public int compareTo(get_partitions_statistics_req_args other) { + public int compareTo(get_aggr_stats_for_args other) { if (!getClass().equals(other.getClass())) { return getClass().getName().compareTo(other.getClass().getName()); } int lastComparison = 0; - get_partitions_statistics_req_args typedOther = (get_partitions_statistics_req_args)other; + get_aggr_stats_for_args typedOther = (get_aggr_stats_for_args)other; lastComparison = Boolean.valueOf(isSetRequest()).compareTo(typedOther.isSetRequest()); if (lastComparison != 0) { @@ -93712,7 +94753,7 @@ public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache. @Override public String toString() { - StringBuilder sb = new StringBuilder("get_partitions_statistics_req_args("); + StringBuilder sb = new StringBuilder("get_aggr_stats_for_args("); boolean first = true; sb.append("request:"); @@ -93750,15 +94791,15 @@ private void readObject(java.io.ObjectInputStream in) throws java.io.IOException } } - private static class get_partitions_statistics_req_argsStandardSchemeFactory implements SchemeFactory { - public get_partitions_statistics_req_argsStandardScheme getScheme() { - return new get_partitions_statistics_req_argsStandardScheme(); + private static class get_aggr_stats_for_argsStandardSchemeFactory implements SchemeFactory { + public get_aggr_stats_for_argsStandardScheme getScheme() { + return new get_aggr_stats_for_argsStandardScheme(); } } - private static class get_partitions_statistics_req_argsStandardScheme extends StandardScheme { + private static class get_aggr_stats_for_argsStandardScheme extends StandardScheme { - public void read(org.apache.thrift.protocol.TProtocol iprot, get_partitions_statistics_req_args struct) throws org.apache.thrift.TException { + public void read(org.apache.thrift.protocol.TProtocol iprot, get_aggr_stats_for_args struct) throws org.apache.thrift.TException { org.apache.thrift.protocol.TField schemeField; iprot.readStructBegin(); while (true) @@ -93786,7 +94827,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_partitions_stat struct.validate(); } - public void write(org.apache.thrift.protocol.TProtocol oprot, get_partitions_statistics_req_args struct) throws org.apache.thrift.TException { + public void write(org.apache.thrift.protocol.TProtocol oprot, get_aggr_stats_for_args struct) throws org.apache.thrift.TException { struct.validate(); oprot.writeStructBegin(STRUCT_DESC); @@ -93801,16 +94842,16 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_partitions_sta } - private static class get_partitions_statistics_req_argsTupleSchemeFactory implements SchemeFactory { - public get_partitions_statistics_req_argsTupleScheme getScheme() { - return new get_partitions_statistics_req_argsTupleScheme(); + private static class get_aggr_stats_for_argsTupleSchemeFactory implements SchemeFactory { + public get_aggr_stats_for_argsTupleScheme getScheme() { + return new get_aggr_stats_for_argsTupleScheme(); } } - private static class get_partitions_statistics_req_argsTupleScheme extends TupleScheme { + private static class get_aggr_stats_for_argsTupleScheme extends TupleScheme { @Override - public void write(org.apache.thrift.protocol.TProtocol prot, get_partitions_statistics_req_args struct) throws org.apache.thrift.TException { + public void write(org.apache.thrift.protocol.TProtocol prot, get_aggr_stats_for_args struct) throws org.apache.thrift.TException { TTupleProtocol oprot = (TTupleProtocol) prot; BitSet optionals = new BitSet(); if (struct.isSetRequest()) { @@ -93823,7 +94864,7 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_partitions_stat } @Override - public void read(org.apache.thrift.protocol.TProtocol prot, get_partitions_statistics_req_args struct) throws org.apache.thrift.TException { + public void read(org.apache.thrift.protocol.TProtocol prot, get_aggr_stats_for_args struct) throws org.apache.thrift.TException { TTupleProtocol iprot = (TTupleProtocol) prot; BitSet incoming = iprot.readBitSet(1); if (incoming.get(0)) { @@ -93836,8 +94877,8 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_partitions_stati } - public static class get_partitions_statistics_req_result implements org.apache.thrift.TBase, java.io.Serializable, Cloneable { - private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("get_partitions_statistics_req_result"); + public static class get_aggr_stats_for_result implements org.apache.thrift.TBase, java.io.Serializable, Cloneable { + private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("get_aggr_stats_for_result"); private static final org.apache.thrift.protocol.TField SUCCESS_FIELD_DESC = new org.apache.thrift.protocol.TField("success", org.apache.thrift.protocol.TType.STRUCT, (short)0); private static final org.apache.thrift.protocol.TField O1_FIELD_DESC = new org.apache.thrift.protocol.TField("o1", org.apache.thrift.protocol.TType.STRUCT, (short)1); @@ -93845,11 +94886,11 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_partitions_stati private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); static { - schemes.put(StandardScheme.class, new get_partitions_statistics_req_resultStandardSchemeFactory()); - schemes.put(TupleScheme.class, new get_partitions_statistics_req_resultTupleSchemeFactory()); + schemes.put(StandardScheme.class, new get_aggr_stats_for_resultStandardSchemeFactory()); + schemes.put(TupleScheme.class, new get_aggr_stats_for_resultTupleSchemeFactory()); } - private PartitionsStatsResult success; // required + private AggrStats success; // required private NoSuchObjectException o1; // required private MetaException o2; // required @@ -93922,20 +94963,20 @@ public String getFieldName() { static { Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); tmpMap.put(_Fields.SUCCESS, new org.apache.thrift.meta_data.FieldMetaData("success", org.apache.thrift.TFieldRequirementType.DEFAULT, - new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, PartitionsStatsResult.class))); + new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, AggrStats.class))); tmpMap.put(_Fields.O1, new org.apache.thrift.meta_data.FieldMetaData("o1", org.apache.thrift.TFieldRequirementType.DEFAULT, new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRUCT))); tmpMap.put(_Fields.O2, new org.apache.thrift.meta_data.FieldMetaData("o2", org.apache.thrift.TFieldRequirementType.DEFAULT, new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRUCT))); metaDataMap = Collections.unmodifiableMap(tmpMap); - org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(get_partitions_statistics_req_result.class, metaDataMap); + org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(get_aggr_stats_for_result.class, metaDataMap); } - public get_partitions_statistics_req_result() { + public get_aggr_stats_for_result() { } - public get_partitions_statistics_req_result( - PartitionsStatsResult success, + public get_aggr_stats_for_result( + AggrStats success, NoSuchObjectException o1, MetaException o2) { @@ -93948,9 +94989,9 @@ public get_partitions_statistics_req_result( /** * Performs a deep copy on other. */ - public get_partitions_statistics_req_result(get_partitions_statistics_req_result other) { + public get_aggr_stats_for_result(get_aggr_stats_for_result other) { if (other.isSetSuccess()) { - this.success = new PartitionsStatsResult(other.success); + this.success = new AggrStats(other.success); } if (other.isSetO1()) { this.o1 = new NoSuchObjectException(other.o1); @@ -93960,8 +95001,8 @@ public get_partitions_statistics_req_result(get_partitions_statistics_req_result } } - public get_partitions_statistics_req_result deepCopy() { - return new get_partitions_statistics_req_result(this); + public get_aggr_stats_for_result deepCopy() { + return new get_aggr_stats_for_result(this); } @Override @@ -93971,11 +95012,11 @@ public void clear() { this.o2 = null; } - public PartitionsStatsResult getSuccess() { + public AggrStats getSuccess() { return this.success; } - public void setSuccess(PartitionsStatsResult success) { + public void setSuccess(AggrStats success) { this.success = success; } @@ -94046,7 +95087,7 @@ public void setFieldValue(_Fields field, Object value) { if (value == null) { unsetSuccess(); } else { - setSuccess((PartitionsStatsResult)value); + setSuccess((AggrStats)value); } break; @@ -94105,12 +95146,12 @@ public boolean isSet(_Fields field) { public boolean equals(Object that) { if (that == null) return false; - if (that instanceof get_partitions_statistics_req_result) - return this.equals((get_partitions_statistics_req_result)that); + if (that instanceof get_aggr_stats_for_result) + return this.equals((get_aggr_stats_for_result)that); return false; } - public boolean equals(get_partitions_statistics_req_result that) { + public boolean equals(get_aggr_stats_for_result that) { if (that == null) return false; @@ -94166,13 +95207,13 @@ public int hashCode() { return builder.toHashCode(); } - public int compareTo(get_partitions_statistics_req_result other) { + public int compareTo(get_aggr_stats_for_result other) { if (!getClass().equals(other.getClass())) { return getClass().getName().compareTo(other.getClass().getName()); } int lastComparison = 0; - get_partitions_statistics_req_result typedOther = (get_partitions_statistics_req_result)other; + get_aggr_stats_for_result typedOther = (get_aggr_stats_for_result)other; lastComparison = Boolean.valueOf(isSetSuccess()).compareTo(typedOther.isSetSuccess()); if (lastComparison != 0) { @@ -94221,7 +95262,7 @@ public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache. @Override public String toString() { - StringBuilder sb = new StringBuilder("get_partitions_statistics_req_result("); + StringBuilder sb = new StringBuilder("get_aggr_stats_for_result("); boolean first = true; sb.append("success:"); @@ -94275,15 +95316,15 @@ private void readObject(java.io.ObjectInputStream in) throws java.io.IOException } } - private static class get_partitions_statistics_req_resultStandardSchemeFactory implements SchemeFactory { - public get_partitions_statistics_req_resultStandardScheme getScheme() { - return new get_partitions_statistics_req_resultStandardScheme(); + private static class get_aggr_stats_for_resultStandardSchemeFactory implements SchemeFactory { + public get_aggr_stats_for_resultStandardScheme getScheme() { + return new get_aggr_stats_for_resultStandardScheme(); } } - private static class get_partitions_statistics_req_resultStandardScheme extends StandardScheme { + private static class get_aggr_stats_for_resultStandardScheme extends StandardScheme { - public void read(org.apache.thrift.protocol.TProtocol iprot, get_partitions_statistics_req_result struct) throws org.apache.thrift.TException { + public void read(org.apache.thrift.protocol.TProtocol iprot, get_aggr_stats_for_result struct) throws org.apache.thrift.TException { org.apache.thrift.protocol.TField schemeField; iprot.readStructBegin(); while (true) @@ -94295,7 +95336,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_partitions_stat switch (schemeField.id) { case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { - struct.success = new PartitionsStatsResult(); + struct.success = new AggrStats(); struct.success.read(iprot); struct.setSuccessIsSet(true); } else { @@ -94329,7 +95370,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_partitions_stat struct.validate(); } - public void write(org.apache.thrift.protocol.TProtocol oprot, get_partitions_statistics_req_result struct) throws org.apache.thrift.TException { + public void write(org.apache.thrift.protocol.TProtocol oprot, get_aggr_stats_for_result struct) throws org.apache.thrift.TException { struct.validate(); oprot.writeStructBegin(STRUCT_DESC); @@ -94354,16 +95395,16 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_partitions_sta } - private static class get_partitions_statistics_req_resultTupleSchemeFactory implements SchemeFactory { - public get_partitions_statistics_req_resultTupleScheme getScheme() { - return new get_partitions_statistics_req_resultTupleScheme(); + private static class get_aggr_stats_for_resultTupleSchemeFactory implements SchemeFactory { + public get_aggr_stats_for_resultTupleScheme getScheme() { + return new get_aggr_stats_for_resultTupleScheme(); } } - private static class get_partitions_statistics_req_resultTupleScheme extends TupleScheme { + private static class get_aggr_stats_for_resultTupleScheme extends TupleScheme { @Override - public void write(org.apache.thrift.protocol.TProtocol prot, get_partitions_statistics_req_result struct) throws org.apache.thrift.TException { + public void write(org.apache.thrift.protocol.TProtocol prot, get_aggr_stats_for_result struct) throws org.apache.thrift.TException { TTupleProtocol oprot = (TTupleProtocol) prot; BitSet optionals = new BitSet(); if (struct.isSetSuccess()) { @@ -94388,11 +95429,11 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_partitions_stat } @Override - public void read(org.apache.thrift.protocol.TProtocol prot, get_partitions_statistics_req_result struct) throws org.apache.thrift.TException { + public void read(org.apache.thrift.protocol.TProtocol prot, get_aggr_stats_for_result struct) throws org.apache.thrift.TException { TTupleProtocol iprot = (TTupleProtocol) prot; BitSet incoming = iprot.readBitSet(3); if (incoming.get(0)) { - struct.success = new PartitionsStatsResult(); + struct.success = new AggrStats(); struct.success.read(iprot); struct.setSuccessIsSet(true); } @@ -94411,18 +95452,18 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_partitions_stati } - public static class get_aggr_stats_for_args implements org.apache.thrift.TBase, java.io.Serializable, Cloneable { - private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("get_aggr_stats_for_args"); + public static class set_aggr_stats_for_args implements org.apache.thrift.TBase, java.io.Serializable, Cloneable { + private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("set_aggr_stats_for_args"); private static final org.apache.thrift.protocol.TField REQUEST_FIELD_DESC = new org.apache.thrift.protocol.TField("request", org.apache.thrift.protocol.TType.STRUCT, (short)1); private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); static { - schemes.put(StandardScheme.class, new get_aggr_stats_for_argsStandardSchemeFactory()); - schemes.put(TupleScheme.class, new get_aggr_stats_for_argsTupleSchemeFactory()); + schemes.put(StandardScheme.class, new set_aggr_stats_for_argsStandardSchemeFactory()); + schemes.put(TupleScheme.class, new set_aggr_stats_for_argsTupleSchemeFactory()); } - private PartitionsStatsRequest request; // required + private SetPartitionsStatsRequest request; // required /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ public enum _Fields implements org.apache.thrift.TFieldIdEnum { @@ -94487,16 +95528,16 @@ public String getFieldName() { static { Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); tmpMap.put(_Fields.REQUEST, new org.apache.thrift.meta_data.FieldMetaData("request", org.apache.thrift.TFieldRequirementType.DEFAULT, - new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, PartitionsStatsRequest.class))); + new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, SetPartitionsStatsRequest.class))); metaDataMap = Collections.unmodifiableMap(tmpMap); - org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(get_aggr_stats_for_args.class, metaDataMap); + org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(set_aggr_stats_for_args.class, metaDataMap); } - public get_aggr_stats_for_args() { + public set_aggr_stats_for_args() { } - public get_aggr_stats_for_args( - PartitionsStatsRequest request) + public set_aggr_stats_for_args( + SetPartitionsStatsRequest request) { this(); this.request = request; @@ -94505,14 +95546,14 @@ public get_aggr_stats_for_args( /** * Performs a deep copy on other. */ - public get_aggr_stats_for_args(get_aggr_stats_for_args other) { + public set_aggr_stats_for_args(set_aggr_stats_for_args other) { if (other.isSetRequest()) { - this.request = new PartitionsStatsRequest(other.request); + this.request = new SetPartitionsStatsRequest(other.request); } } - public get_aggr_stats_for_args deepCopy() { - return new get_aggr_stats_for_args(this); + public set_aggr_stats_for_args deepCopy() { + return new set_aggr_stats_for_args(this); } @Override @@ -94520,11 +95561,11 @@ public void clear() { this.request = null; } - public PartitionsStatsRequest getRequest() { + public SetPartitionsStatsRequest getRequest() { return this.request; } - public void setRequest(PartitionsStatsRequest request) { + public void setRequest(SetPartitionsStatsRequest request) { this.request = request; } @@ -94549,7 +95590,7 @@ public void setFieldValue(_Fields field, Object value) { if (value == null) { unsetRequest(); } else { - setRequest((PartitionsStatsRequest)value); + setRequest((SetPartitionsStatsRequest)value); } break; @@ -94582,12 +95623,12 @@ public boolean isSet(_Fields field) { public boolean equals(Object that) { if (that == null) return false; - if (that instanceof get_aggr_stats_for_args) - return this.equals((get_aggr_stats_for_args)that); + if (that instanceof set_aggr_stats_for_args) + return this.equals((set_aggr_stats_for_args)that); return false; } - public boolean equals(get_aggr_stats_for_args that) { + public boolean equals(set_aggr_stats_for_args that) { if (that == null) return false; @@ -94615,13 +95656,13 @@ public int hashCode() { return builder.toHashCode(); } - public int compareTo(get_aggr_stats_for_args other) { + public int compareTo(set_aggr_stats_for_args other) { if (!getClass().equals(other.getClass())) { return getClass().getName().compareTo(other.getClass().getName()); } int lastComparison = 0; - get_aggr_stats_for_args typedOther = (get_aggr_stats_for_args)other; + set_aggr_stats_for_args typedOther = (set_aggr_stats_for_args)other; lastComparison = Boolean.valueOf(isSetRequest()).compareTo(typedOther.isSetRequest()); if (lastComparison != 0) { @@ -94650,7 +95691,7 @@ public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache. @Override public String toString() { - StringBuilder sb = new StringBuilder("get_aggr_stats_for_args("); + StringBuilder sb = new StringBuilder("set_aggr_stats_for_args("); boolean first = true; sb.append("request:"); @@ -94688,15 +95729,15 @@ private void readObject(java.io.ObjectInputStream in) throws java.io.IOException } } - private static class get_aggr_stats_for_argsStandardSchemeFactory implements SchemeFactory { - public get_aggr_stats_for_argsStandardScheme getScheme() { - return new get_aggr_stats_for_argsStandardScheme(); + private static class set_aggr_stats_for_argsStandardSchemeFactory implements SchemeFactory { + public set_aggr_stats_for_argsStandardScheme getScheme() { + return new set_aggr_stats_for_argsStandardScheme(); } } - private static class get_aggr_stats_for_argsStandardScheme extends StandardScheme { + private static class set_aggr_stats_for_argsStandardScheme extends StandardScheme { - public void read(org.apache.thrift.protocol.TProtocol iprot, get_aggr_stats_for_args struct) throws org.apache.thrift.TException { + public void read(org.apache.thrift.protocol.TProtocol iprot, set_aggr_stats_for_args struct) throws org.apache.thrift.TException { org.apache.thrift.protocol.TField schemeField; iprot.readStructBegin(); while (true) @@ -94708,7 +95749,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_aggr_stats_for_ switch (schemeField.id) { case 1: // REQUEST if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { - struct.request = new PartitionsStatsRequest(); + struct.request = new SetPartitionsStatsRequest(); struct.request.read(iprot); struct.setRequestIsSet(true); } else { @@ -94724,7 +95765,7 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_aggr_stats_for_ struct.validate(); } - public void write(org.apache.thrift.protocol.TProtocol oprot, get_aggr_stats_for_args struct) throws org.apache.thrift.TException { + public void write(org.apache.thrift.protocol.TProtocol oprot, set_aggr_stats_for_args struct) throws org.apache.thrift.TException { struct.validate(); oprot.writeStructBegin(STRUCT_DESC); @@ -94739,16 +95780,16 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_aggr_stats_for } - private static class get_aggr_stats_for_argsTupleSchemeFactory implements SchemeFactory { - public get_aggr_stats_for_argsTupleScheme getScheme() { - return new get_aggr_stats_for_argsTupleScheme(); + private static class set_aggr_stats_for_argsTupleSchemeFactory implements SchemeFactory { + public set_aggr_stats_for_argsTupleScheme getScheme() { + return new set_aggr_stats_for_argsTupleScheme(); } } - private static class get_aggr_stats_for_argsTupleScheme extends TupleScheme { + private static class set_aggr_stats_for_argsTupleScheme extends TupleScheme { @Override - public void write(org.apache.thrift.protocol.TProtocol prot, get_aggr_stats_for_args struct) throws org.apache.thrift.TException { + public void write(org.apache.thrift.protocol.TProtocol prot, set_aggr_stats_for_args struct) throws org.apache.thrift.TException { TTupleProtocol oprot = (TTupleProtocol) prot; BitSet optionals = new BitSet(); if (struct.isSetRequest()) { @@ -94761,11 +95802,11 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_aggr_stats_for_ } @Override - public void read(org.apache.thrift.protocol.TProtocol prot, get_aggr_stats_for_args struct) throws org.apache.thrift.TException { + public void read(org.apache.thrift.protocol.TProtocol prot, set_aggr_stats_for_args struct) throws org.apache.thrift.TException { TTupleProtocol iprot = (TTupleProtocol) prot; BitSet incoming = iprot.readBitSet(1); if (incoming.get(0)) { - struct.request = new PartitionsStatsRequest(); + struct.request = new SetPartitionsStatsRequest(); struct.request.read(iprot); struct.setRequestIsSet(true); } @@ -94774,28 +95815,34 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_aggr_stats_for_a } - public static class get_aggr_stats_for_result implements org.apache.thrift.TBase, java.io.Serializable, Cloneable { - private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("get_aggr_stats_for_result"); + public static class set_aggr_stats_for_result implements org.apache.thrift.TBase, java.io.Serializable, Cloneable { + private static final org.apache.thrift.protocol.TStruct STRUCT_DESC = new org.apache.thrift.protocol.TStruct("set_aggr_stats_for_result"); - private static final org.apache.thrift.protocol.TField SUCCESS_FIELD_DESC = new org.apache.thrift.protocol.TField("success", org.apache.thrift.protocol.TType.STRUCT, (short)0); + private static final org.apache.thrift.protocol.TField SUCCESS_FIELD_DESC = new org.apache.thrift.protocol.TField("success", org.apache.thrift.protocol.TType.BOOL, (short)0); private static final org.apache.thrift.protocol.TField O1_FIELD_DESC = new org.apache.thrift.protocol.TField("o1", org.apache.thrift.protocol.TType.STRUCT, (short)1); private static final org.apache.thrift.protocol.TField O2_FIELD_DESC = new org.apache.thrift.protocol.TField("o2", org.apache.thrift.protocol.TType.STRUCT, (short)2); + private static final org.apache.thrift.protocol.TField O3_FIELD_DESC = new org.apache.thrift.protocol.TField("o3", org.apache.thrift.protocol.TType.STRUCT, (short)3); + private static final org.apache.thrift.protocol.TField O4_FIELD_DESC = new org.apache.thrift.protocol.TField("o4", org.apache.thrift.protocol.TType.STRUCT, (short)4); private static final Map, SchemeFactory> schemes = new HashMap, SchemeFactory>(); static { - schemes.put(StandardScheme.class, new get_aggr_stats_for_resultStandardSchemeFactory()); - schemes.put(TupleScheme.class, new get_aggr_stats_for_resultTupleSchemeFactory()); + schemes.put(StandardScheme.class, new set_aggr_stats_for_resultStandardSchemeFactory()); + schemes.put(TupleScheme.class, new set_aggr_stats_for_resultTupleSchemeFactory()); } - private AggrStats success; // required + private boolean success; // required private NoSuchObjectException o1; // required - private MetaException o2; // required + private InvalidObjectException o2; // required + private MetaException o3; // required + private InvalidInputException o4; // required /** The set of fields this struct contains, along with convenience methods for finding and manipulating them. */ public enum _Fields implements org.apache.thrift.TFieldIdEnum { SUCCESS((short)0, "success"), O1((short)1, "o1"), - O2((short)2, "o2"); + O2((short)2, "o2"), + O3((short)3, "o3"), + O4((short)4, "o4"); private static final Map byName = new HashMap(); @@ -94816,6 +95863,10 @@ public static _Fields findByThriftId(int fieldId) { return O1; case 2: // O2 return O2; + case 3: // O3 + return O3; + case 4: // O4 + return O4; default: return null; } @@ -94856,80 +95907,98 @@ public String getFieldName() { } // isset id assignments + private static final int __SUCCESS_ISSET_ID = 0; + private byte __isset_bitfield = 0; public static final Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> metaDataMap; static { Map<_Fields, org.apache.thrift.meta_data.FieldMetaData> tmpMap = new EnumMap<_Fields, org.apache.thrift.meta_data.FieldMetaData>(_Fields.class); tmpMap.put(_Fields.SUCCESS, new org.apache.thrift.meta_data.FieldMetaData("success", org.apache.thrift.TFieldRequirementType.DEFAULT, - new org.apache.thrift.meta_data.StructMetaData(org.apache.thrift.protocol.TType.STRUCT, AggrStats.class))); + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.BOOL))); tmpMap.put(_Fields.O1, new org.apache.thrift.meta_data.FieldMetaData("o1", org.apache.thrift.TFieldRequirementType.DEFAULT, new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRUCT))); tmpMap.put(_Fields.O2, new org.apache.thrift.meta_data.FieldMetaData("o2", org.apache.thrift.TFieldRequirementType.DEFAULT, new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRUCT))); + tmpMap.put(_Fields.O3, new org.apache.thrift.meta_data.FieldMetaData("o3", org.apache.thrift.TFieldRequirementType.DEFAULT, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRUCT))); + tmpMap.put(_Fields.O4, new org.apache.thrift.meta_data.FieldMetaData("o4", org.apache.thrift.TFieldRequirementType.DEFAULT, + new org.apache.thrift.meta_data.FieldValueMetaData(org.apache.thrift.protocol.TType.STRUCT))); metaDataMap = Collections.unmodifiableMap(tmpMap); - org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(get_aggr_stats_for_result.class, metaDataMap); + org.apache.thrift.meta_data.FieldMetaData.addStructMetaDataMap(set_aggr_stats_for_result.class, metaDataMap); } - public get_aggr_stats_for_result() { + public set_aggr_stats_for_result() { } - public get_aggr_stats_for_result( - AggrStats success, + public set_aggr_stats_for_result( + boolean success, NoSuchObjectException o1, - MetaException o2) + InvalidObjectException o2, + MetaException o3, + InvalidInputException o4) { this(); this.success = success; + setSuccessIsSet(true); this.o1 = o1; this.o2 = o2; + this.o3 = o3; + this.o4 = o4; } /** * Performs a deep copy on other. */ - public get_aggr_stats_for_result(get_aggr_stats_for_result other) { - if (other.isSetSuccess()) { - this.success = new AggrStats(other.success); - } + public set_aggr_stats_for_result(set_aggr_stats_for_result other) { + __isset_bitfield = other.__isset_bitfield; + this.success = other.success; if (other.isSetO1()) { this.o1 = new NoSuchObjectException(other.o1); } if (other.isSetO2()) { - this.o2 = new MetaException(other.o2); + this.o2 = new InvalidObjectException(other.o2); + } + if (other.isSetO3()) { + this.o3 = new MetaException(other.o3); + } + if (other.isSetO4()) { + this.o4 = new InvalidInputException(other.o4); } } - public get_aggr_stats_for_result deepCopy() { - return new get_aggr_stats_for_result(this); + public set_aggr_stats_for_result deepCopy() { + return new set_aggr_stats_for_result(this); } @Override public void clear() { - this.success = null; + setSuccessIsSet(false); + this.success = false; this.o1 = null; this.o2 = null; + this.o3 = null; + this.o4 = null; } - public AggrStats getSuccess() { + public boolean isSuccess() { return this.success; } - public void setSuccess(AggrStats success) { + public void setSuccess(boolean success) { this.success = success; + setSuccessIsSet(true); } public void unsetSuccess() { - this.success = null; + __isset_bitfield = EncodingUtils.clearBit(__isset_bitfield, __SUCCESS_ISSET_ID); } /** Returns true if field success is set (has been assigned a value) and false otherwise */ public boolean isSetSuccess() { - return this.success != null; + return EncodingUtils.testBit(__isset_bitfield, __SUCCESS_ISSET_ID); } public void setSuccessIsSet(boolean value) { - if (!value) { - this.success = null; - } + __isset_bitfield = EncodingUtils.setBit(__isset_bitfield, __SUCCESS_ISSET_ID, value); } public NoSuchObjectException getO1() { @@ -94955,11 +96024,11 @@ public void setO1IsSet(boolean value) { } } - public MetaException getO2() { + public InvalidObjectException getO2() { return this.o2; } - public void setO2(MetaException o2) { + public void setO2(InvalidObjectException o2) { this.o2 = o2; } @@ -94978,13 +96047,59 @@ public void setO2IsSet(boolean value) { } } + public MetaException getO3() { + return this.o3; + } + + public void setO3(MetaException o3) { + this.o3 = o3; + } + + public void unsetO3() { + this.o3 = null; + } + + /** Returns true if field o3 is set (has been assigned a value) and false otherwise */ + public boolean isSetO3() { + return this.o3 != null; + } + + public void setO3IsSet(boolean value) { + if (!value) { + this.o3 = null; + } + } + + public InvalidInputException getO4() { + return this.o4; + } + + public void setO4(InvalidInputException o4) { + this.o4 = o4; + } + + public void unsetO4() { + this.o4 = null; + } + + /** Returns true if field o4 is set (has been assigned a value) and false otherwise */ + public boolean isSetO4() { + return this.o4 != null; + } + + public void setO4IsSet(boolean value) { + if (!value) { + this.o4 = null; + } + } + public void setFieldValue(_Fields field, Object value) { switch (field) { case SUCCESS: if (value == null) { unsetSuccess(); } else { - setSuccess((AggrStats)value); + setSuccess((Boolean)value); } break; @@ -95000,7 +96115,23 @@ public void setFieldValue(_Fields field, Object value) { if (value == null) { unsetO2(); } else { - setO2((MetaException)value); + setO2((InvalidObjectException)value); + } + break; + + case O3: + if (value == null) { + unsetO3(); + } else { + setO3((MetaException)value); + } + break; + + case O4: + if (value == null) { + unsetO4(); + } else { + setO4((InvalidInputException)value); } break; @@ -95010,7 +96141,7 @@ public void setFieldValue(_Fields field, Object value) { public Object getFieldValue(_Fields field) { switch (field) { case SUCCESS: - return getSuccess(); + return Boolean.valueOf(isSuccess()); case O1: return getO1(); @@ -95018,6 +96149,12 @@ public Object getFieldValue(_Fields field) { case O2: return getO2(); + case O3: + return getO3(); + + case O4: + return getO4(); + } throw new IllegalStateException(); } @@ -95035,6 +96172,10 @@ public boolean isSet(_Fields field) { return isSetO1(); case O2: return isSetO2(); + case O3: + return isSetO3(); + case O4: + return isSetO4(); } throw new IllegalStateException(); } @@ -95043,21 +96184,21 @@ public boolean isSet(_Fields field) { public boolean equals(Object that) { if (that == null) return false; - if (that instanceof get_aggr_stats_for_result) - return this.equals((get_aggr_stats_for_result)that); + if (that instanceof set_aggr_stats_for_result) + return this.equals((set_aggr_stats_for_result)that); return false; } - public boolean equals(get_aggr_stats_for_result that) { + public boolean equals(set_aggr_stats_for_result that) { if (that == null) return false; - boolean this_present_success = true && this.isSetSuccess(); - boolean that_present_success = true && that.isSetSuccess(); + boolean this_present_success = true; + boolean that_present_success = true; if (this_present_success || that_present_success) { if (!(this_present_success && that_present_success)) return false; - if (!this.success.equals(that.success)) + if (this.success != that.success) return false; } @@ -95079,6 +96220,24 @@ public boolean equals(get_aggr_stats_for_result that) { return false; } + boolean this_present_o3 = true && this.isSetO3(); + boolean that_present_o3 = true && that.isSetO3(); + if (this_present_o3 || that_present_o3) { + if (!(this_present_o3 && that_present_o3)) + return false; + if (!this.o3.equals(that.o3)) + return false; + } + + boolean this_present_o4 = true && this.isSetO4(); + boolean that_present_o4 = true && that.isSetO4(); + if (this_present_o4 || that_present_o4) { + if (!(this_present_o4 && that_present_o4)) + return false; + if (!this.o4.equals(that.o4)) + return false; + } + return true; } @@ -95086,7 +96245,7 @@ public boolean equals(get_aggr_stats_for_result that) { public int hashCode() { HashCodeBuilder builder = new HashCodeBuilder(); - boolean present_success = true && (isSetSuccess()); + boolean present_success = true; builder.append(present_success); if (present_success) builder.append(success); @@ -95101,16 +96260,26 @@ public int hashCode() { if (present_o2) builder.append(o2); + boolean present_o3 = true && (isSetO3()); + builder.append(present_o3); + if (present_o3) + builder.append(o3); + + boolean present_o4 = true && (isSetO4()); + builder.append(present_o4); + if (present_o4) + builder.append(o4); + return builder.toHashCode(); } - public int compareTo(get_aggr_stats_for_result other) { + public int compareTo(set_aggr_stats_for_result other) { if (!getClass().equals(other.getClass())) { return getClass().getName().compareTo(other.getClass().getName()); } int lastComparison = 0; - get_aggr_stats_for_result typedOther = (get_aggr_stats_for_result)other; + set_aggr_stats_for_result typedOther = (set_aggr_stats_for_result)other; lastComparison = Boolean.valueOf(isSetSuccess()).compareTo(typedOther.isSetSuccess()); if (lastComparison != 0) { @@ -95142,6 +96311,26 @@ public int compareTo(get_aggr_stats_for_result other) { return lastComparison; } } + lastComparison = Boolean.valueOf(isSetO3()).compareTo(typedOther.isSetO3()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetO3()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.o3, typedOther.o3); + if (lastComparison != 0) { + return lastComparison; + } + } + lastComparison = Boolean.valueOf(isSetO4()).compareTo(typedOther.isSetO4()); + if (lastComparison != 0) { + return lastComparison; + } + if (isSetO4()) { + lastComparison = org.apache.thrift.TBaseHelper.compareTo(this.o4, typedOther.o4); + if (lastComparison != 0) { + return lastComparison; + } + } return 0; } @@ -95159,15 +96348,11 @@ public void write(org.apache.thrift.protocol.TProtocol oprot) throws org.apache. @Override public String toString() { - StringBuilder sb = new StringBuilder("get_aggr_stats_for_result("); + StringBuilder sb = new StringBuilder("set_aggr_stats_for_result("); boolean first = true; sb.append("success:"); - if (this.success == null) { - sb.append("null"); - } else { - sb.append(this.success); - } + sb.append(this.success); first = false; if (!first) sb.append(", "); sb.append("o1:"); @@ -95185,6 +96370,22 @@ public String toString() { sb.append(this.o2); } first = false; + if (!first) sb.append(", "); + sb.append("o3:"); + if (this.o3 == null) { + sb.append("null"); + } else { + sb.append(this.o3); + } + first = false; + if (!first) sb.append(", "); + sb.append("o4:"); + if (this.o4 == null) { + sb.append("null"); + } else { + sb.append(this.o4); + } + first = false; sb.append(")"); return sb.toString(); } @@ -95192,9 +96393,6 @@ public String toString() { public void validate() throws org.apache.thrift.TException { // check for required fields // check for sub-struct validity - if (success != null) { - success.validate(); - } } private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOException { @@ -95207,21 +96405,23 @@ private void writeObject(java.io.ObjectOutputStream out) throws java.io.IOExcept private void readObject(java.io.ObjectInputStream in) throws java.io.IOException, ClassNotFoundException { try { + // it doesn't seem like you should have to do this, but java serialization is wacky, and doesn't call the default constructor. + __isset_bitfield = 0; read(new org.apache.thrift.protocol.TCompactProtocol(new org.apache.thrift.transport.TIOStreamTransport(in))); } catch (org.apache.thrift.TException te) { throw new java.io.IOException(te); } } - private static class get_aggr_stats_for_resultStandardSchemeFactory implements SchemeFactory { - public get_aggr_stats_for_resultStandardScheme getScheme() { - return new get_aggr_stats_for_resultStandardScheme(); + private static class set_aggr_stats_for_resultStandardSchemeFactory implements SchemeFactory { + public set_aggr_stats_for_resultStandardScheme getScheme() { + return new set_aggr_stats_for_resultStandardScheme(); } } - private static class get_aggr_stats_for_resultStandardScheme extends StandardScheme { + private static class set_aggr_stats_for_resultStandardScheme extends StandardScheme { - public void read(org.apache.thrift.protocol.TProtocol iprot, get_aggr_stats_for_result struct) throws org.apache.thrift.TException { + public void read(org.apache.thrift.protocol.TProtocol iprot, set_aggr_stats_for_result struct) throws org.apache.thrift.TException { org.apache.thrift.protocol.TField schemeField; iprot.readStructBegin(); while (true) @@ -95232,9 +96432,8 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_aggr_stats_for_ } switch (schemeField.id) { case 0: // SUCCESS - if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { - struct.success = new AggrStats(); - struct.success.read(iprot); + if (schemeField.type == org.apache.thrift.protocol.TType.BOOL) { + struct.success = iprot.readBool(); struct.setSuccessIsSet(true); } else { org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); @@ -95251,13 +96450,31 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_aggr_stats_for_ break; case 2: // O2 if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { - struct.o2 = new MetaException(); + struct.o2 = new InvalidObjectException(); struct.o2.read(iprot); struct.setO2IsSet(true); } else { org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); } break; + case 3: // O3 + if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { + struct.o3 = new MetaException(); + struct.o3.read(iprot); + struct.setO3IsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; + case 4: // O4 + if (schemeField.type == org.apache.thrift.protocol.TType.STRUCT) { + struct.o4 = new InvalidInputException(); + struct.o4.read(iprot); + struct.setO4IsSet(true); + } else { + org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); + } + break; default: org.apache.thrift.protocol.TProtocolUtil.skip(iprot, schemeField.type); } @@ -95267,13 +96484,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_aggr_stats_for_ struct.validate(); } - public void write(org.apache.thrift.protocol.TProtocol oprot, get_aggr_stats_for_result struct) throws org.apache.thrift.TException { + public void write(org.apache.thrift.protocol.TProtocol oprot, set_aggr_stats_for_result struct) throws org.apache.thrift.TException { struct.validate(); oprot.writeStructBegin(STRUCT_DESC); - if (struct.success != null) { + if (struct.isSetSuccess()) { oprot.writeFieldBegin(SUCCESS_FIELD_DESC); - struct.success.write(oprot); + oprot.writeBool(struct.success); oprot.writeFieldEnd(); } if (struct.o1 != null) { @@ -95286,22 +96503,32 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_aggr_stats_for struct.o2.write(oprot); oprot.writeFieldEnd(); } + if (struct.o3 != null) { + oprot.writeFieldBegin(O3_FIELD_DESC); + struct.o3.write(oprot); + oprot.writeFieldEnd(); + } + if (struct.o4 != null) { + oprot.writeFieldBegin(O4_FIELD_DESC); + struct.o4.write(oprot); + oprot.writeFieldEnd(); + } oprot.writeFieldStop(); oprot.writeStructEnd(); } } - private static class get_aggr_stats_for_resultTupleSchemeFactory implements SchemeFactory { - public get_aggr_stats_for_resultTupleScheme getScheme() { - return new get_aggr_stats_for_resultTupleScheme(); + private static class set_aggr_stats_for_resultTupleSchemeFactory implements SchemeFactory { + public set_aggr_stats_for_resultTupleScheme getScheme() { + return new set_aggr_stats_for_resultTupleScheme(); } } - private static class get_aggr_stats_for_resultTupleScheme extends TupleScheme { + private static class set_aggr_stats_for_resultTupleScheme extends TupleScheme { @Override - public void write(org.apache.thrift.protocol.TProtocol prot, get_aggr_stats_for_result struct) throws org.apache.thrift.TException { + public void write(org.apache.thrift.protocol.TProtocol prot, set_aggr_stats_for_result struct) throws org.apache.thrift.TException { TTupleProtocol oprot = (TTupleProtocol) prot; BitSet optionals = new BitSet(); if (struct.isSetSuccess()) { @@ -95313,9 +96540,15 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_aggr_stats_for_ if (struct.isSetO2()) { optionals.set(2); } - oprot.writeBitSet(optionals, 3); + if (struct.isSetO3()) { + optionals.set(3); + } + if (struct.isSetO4()) { + optionals.set(4); + } + oprot.writeBitSet(optionals, 5); if (struct.isSetSuccess()) { - struct.success.write(oprot); + oprot.writeBool(struct.success); } if (struct.isSetO1()) { struct.o1.write(oprot); @@ -95323,15 +96556,20 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_aggr_stats_for_ if (struct.isSetO2()) { struct.o2.write(oprot); } + if (struct.isSetO3()) { + struct.o3.write(oprot); + } + if (struct.isSetO4()) { + struct.o4.write(oprot); + } } @Override - public void read(org.apache.thrift.protocol.TProtocol prot, get_aggr_stats_for_result struct) throws org.apache.thrift.TException { + public void read(org.apache.thrift.protocol.TProtocol prot, set_aggr_stats_for_result struct) throws org.apache.thrift.TException { TTupleProtocol iprot = (TTupleProtocol) prot; - BitSet incoming = iprot.readBitSet(3); + BitSet incoming = iprot.readBitSet(5); if (incoming.get(0)) { - struct.success = new AggrStats(); - struct.success.read(iprot); + struct.success = iprot.readBool(); struct.setSuccessIsSet(true); } if (incoming.get(1)) { @@ -95340,10 +96578,20 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_aggr_stats_for_r struct.setO1IsSet(true); } if (incoming.get(2)) { - struct.o2 = new MetaException(); + struct.o2 = new InvalidObjectException(); struct.o2.read(iprot); struct.setO2IsSet(true); } + if (incoming.get(3)) { + struct.o3 = new MetaException(); + struct.o3.read(iprot); + struct.setO3IsSet(true); + } + if (incoming.get(4)) { + struct.o4 = new InvalidInputException(); + struct.o4.read(iprot); + struct.setO4IsSet(true); + } } } @@ -102013,13 +103261,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_functions_resul case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list796 = iprot.readListBegin(); - struct.success = new ArrayList(_list796.size); - for (int _i797 = 0; _i797 < _list796.size; ++_i797) + org.apache.thrift.protocol.TList _list804 = iprot.readListBegin(); + struct.success = new ArrayList(_list804.size); + for (int _i805 = 0; _i805 < _list804.size; ++_i805) { - String _elem798; // required - _elem798 = iprot.readString(); - struct.success.add(_elem798); + String _elem806; // required + _elem806 = iprot.readString(); + struct.success.add(_elem806); } iprot.readListEnd(); } @@ -102054,9 +103302,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_functions_resu oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.success.size())); - for (String _iter799 : struct.success) + for (String _iter807 : struct.success) { - oprot.writeString(_iter799); + oprot.writeString(_iter807); } oprot.writeListEnd(); } @@ -102095,9 +103343,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_functions_resul if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (String _iter800 : struct.success) + for (String _iter808 : struct.success) { - oprot.writeString(_iter800); + oprot.writeString(_iter808); } } } @@ -102112,13 +103360,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_functions_result BitSet incoming = iprot.readBitSet(2); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list801 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.success = new ArrayList(_list801.size); - for (int _i802 = 0; _i802 < _list801.size; ++_i802) + org.apache.thrift.protocol.TList _list809 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.success = new ArrayList(_list809.size); + for (int _i810 = 0; _i810 < _list809.size; ++_i810) { - String _elem803; // required - _elem803 = iprot.readString(); - struct.success.add(_elem803); + String _elem811; // required + _elem811 = iprot.readString(); + struct.success.add(_elem811); } } struct.setSuccessIsSet(true); @@ -105461,13 +106709,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_role_names_resu case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list804 = iprot.readListBegin(); - struct.success = new ArrayList(_list804.size); - for (int _i805 = 0; _i805 < _list804.size; ++_i805) + org.apache.thrift.protocol.TList _list812 = iprot.readListBegin(); + struct.success = new ArrayList(_list812.size); + for (int _i813 = 0; _i813 < _list812.size; ++_i813) { - String _elem806; // required - _elem806 = iprot.readString(); - struct.success.add(_elem806); + String _elem814; // required + _elem814 = iprot.readString(); + struct.success.add(_elem814); } iprot.readListEnd(); } @@ -105502,9 +106750,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_role_names_res oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.success.size())); - for (String _iter807 : struct.success) + for (String _iter815 : struct.success) { - oprot.writeString(_iter807); + oprot.writeString(_iter815); } oprot.writeListEnd(); } @@ -105543,9 +106791,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_role_names_resu if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (String _iter808 : struct.success) + for (String _iter816 : struct.success) { - oprot.writeString(_iter808); + oprot.writeString(_iter816); } } } @@ -105560,13 +106808,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_role_names_resul BitSet incoming = iprot.readBitSet(2); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list809 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.success = new ArrayList(_list809.size); - for (int _i810 = 0; _i810 < _list809.size; ++_i810) + org.apache.thrift.protocol.TList _list817 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.success = new ArrayList(_list817.size); + for (int _i818 = 0; _i818 < _list817.size; ++_i818) { - String _elem811; // required - _elem811 = iprot.readString(); - struct.success.add(_elem811); + String _elem819; // required + _elem819 = iprot.readString(); + struct.success.add(_elem819); } } struct.setSuccessIsSet(true); @@ -108857,14 +110105,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, list_roles_result s case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list812 = iprot.readListBegin(); - struct.success = new ArrayList(_list812.size); - for (int _i813 = 0; _i813 < _list812.size; ++_i813) + org.apache.thrift.protocol.TList _list820 = iprot.readListBegin(); + struct.success = new ArrayList(_list820.size); + for (int _i821 = 0; _i821 < _list820.size; ++_i821) { - Role _elem814; // required - _elem814 = new Role(); - _elem814.read(iprot); - struct.success.add(_elem814); + Role _elem822; // required + _elem822 = new Role(); + _elem822.read(iprot); + struct.success.add(_elem822); } iprot.readListEnd(); } @@ -108899,9 +110147,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, list_roles_result oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.success.size())); - for (Role _iter815 : struct.success) + for (Role _iter823 : struct.success) { - _iter815.write(oprot); + _iter823.write(oprot); } oprot.writeListEnd(); } @@ -108940,9 +110188,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, list_roles_result s if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (Role _iter816 : struct.success) + for (Role _iter824 : struct.success) { - _iter816.write(oprot); + _iter824.write(oprot); } } } @@ -108957,14 +110205,14 @@ public void read(org.apache.thrift.protocol.TProtocol prot, list_roles_result st BitSet incoming = iprot.readBitSet(2); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list817 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.success = new ArrayList(_list817.size); - for (int _i818 = 0; _i818 < _list817.size; ++_i818) + org.apache.thrift.protocol.TList _list825 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.success = new ArrayList(_list825.size); + for (int _i826 = 0; _i826 < _list825.size; ++_i826) { - Role _elem819; // required - _elem819 = new Role(); - _elem819.read(iprot); - struct.success.add(_elem819); + Role _elem827; // required + _elem827 = new Role(); + _elem827.read(iprot); + struct.success.add(_elem827); } } struct.setSuccessIsSet(true); @@ -111972,13 +113220,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, get_privilege_set_a case 3: // GROUP_NAMES if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list820 = iprot.readListBegin(); - struct.group_names = new ArrayList(_list820.size); - for (int _i821 = 0; _i821 < _list820.size; ++_i821) + org.apache.thrift.protocol.TList _list828 = iprot.readListBegin(); + struct.group_names = new ArrayList(_list828.size); + for (int _i829 = 0; _i829 < _list828.size; ++_i829) { - String _elem822; // required - _elem822 = iprot.readString(); - struct.group_names.add(_elem822); + String _elem830; // required + _elem830 = iprot.readString(); + struct.group_names.add(_elem830); } iprot.readListEnd(); } @@ -112014,9 +113262,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, get_privilege_set_ oprot.writeFieldBegin(GROUP_NAMES_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.group_names.size())); - for (String _iter823 : struct.group_names) + for (String _iter831 : struct.group_names) { - oprot.writeString(_iter823); + oprot.writeString(_iter831); } oprot.writeListEnd(); } @@ -112059,9 +113307,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, get_privilege_set_a if (struct.isSetGroup_names()) { { oprot.writeI32(struct.group_names.size()); - for (String _iter824 : struct.group_names) + for (String _iter832 : struct.group_names) { - oprot.writeString(_iter824); + oprot.writeString(_iter832); } } } @@ -112082,13 +113330,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, get_privilege_set_ar } if (incoming.get(2)) { { - org.apache.thrift.protocol.TList _list825 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.group_names = new ArrayList(_list825.size); - for (int _i826 = 0; _i826 < _list825.size; ++_i826) + org.apache.thrift.protocol.TList _list833 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.group_names = new ArrayList(_list833.size); + for (int _i834 = 0; _i834 < _list833.size; ++_i834) { - String _elem827; // required - _elem827 = iprot.readString(); - struct.group_names.add(_elem827); + String _elem835; // required + _elem835 = iprot.readString(); + struct.group_names.add(_elem835); } } struct.setGroup_namesIsSet(true); @@ -113546,14 +114794,14 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, list_privileges_res case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list828 = iprot.readListBegin(); - struct.success = new ArrayList(_list828.size); - for (int _i829 = 0; _i829 < _list828.size; ++_i829) + org.apache.thrift.protocol.TList _list836 = iprot.readListBegin(); + struct.success = new ArrayList(_list836.size); + for (int _i837 = 0; _i837 < _list836.size; ++_i837) { - HiveObjectPrivilege _elem830; // required - _elem830 = new HiveObjectPrivilege(); - _elem830.read(iprot); - struct.success.add(_elem830); + HiveObjectPrivilege _elem838; // required + _elem838 = new HiveObjectPrivilege(); + _elem838.read(iprot); + struct.success.add(_elem838); } iprot.readListEnd(); } @@ -113588,9 +114836,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, list_privileges_re oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, struct.success.size())); - for (HiveObjectPrivilege _iter831 : struct.success) + for (HiveObjectPrivilege _iter839 : struct.success) { - _iter831.write(oprot); + _iter839.write(oprot); } oprot.writeListEnd(); } @@ -113629,9 +114877,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, list_privileges_res if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (HiveObjectPrivilege _iter832 : struct.success) + for (HiveObjectPrivilege _iter840 : struct.success) { - _iter832.write(oprot); + _iter840.write(oprot); } } } @@ -113646,14 +114894,14 @@ public void read(org.apache.thrift.protocol.TProtocol prot, list_privileges_resu BitSet incoming = iprot.readBitSet(2); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list833 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); - struct.success = new ArrayList(_list833.size); - for (int _i834 = 0; _i834 < _list833.size; ++_i834) + org.apache.thrift.protocol.TList _list841 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRUCT, iprot.readI32()); + struct.success = new ArrayList(_list841.size); + for (int _i842 = 0; _i842 < _list841.size; ++_i842) { - HiveObjectPrivilege _elem835; // required - _elem835 = new HiveObjectPrivilege(); - _elem835.read(iprot); - struct.success.add(_elem835); + HiveObjectPrivilege _elem843; // required + _elem843 = new HiveObjectPrivilege(); + _elem843.read(iprot); + struct.success.add(_elem843); } } struct.setSuccessIsSet(true); @@ -116558,13 +117806,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, set_ugi_args struct case 2: // GROUP_NAMES if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list836 = iprot.readListBegin(); - struct.group_names = new ArrayList(_list836.size); - for (int _i837 = 0; _i837 < _list836.size; ++_i837) + org.apache.thrift.protocol.TList _list844 = iprot.readListBegin(); + struct.group_names = new ArrayList(_list844.size); + for (int _i845 = 0; _i845 < _list844.size; ++_i845) { - String _elem838; // required - _elem838 = iprot.readString(); - struct.group_names.add(_elem838); + String _elem846; // required + _elem846 = iprot.readString(); + struct.group_names.add(_elem846); } iprot.readListEnd(); } @@ -116595,9 +117843,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, set_ugi_args struc oprot.writeFieldBegin(GROUP_NAMES_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.group_names.size())); - for (String _iter839 : struct.group_names) + for (String _iter847 : struct.group_names) { - oprot.writeString(_iter839); + oprot.writeString(_iter847); } oprot.writeListEnd(); } @@ -116634,9 +117882,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, set_ugi_args struct if (struct.isSetGroup_names()) { { oprot.writeI32(struct.group_names.size()); - for (String _iter840 : struct.group_names) + for (String _iter848 : struct.group_names) { - oprot.writeString(_iter840); + oprot.writeString(_iter848); } } } @@ -116652,13 +117900,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, set_ugi_args struct) } if (incoming.get(1)) { { - org.apache.thrift.protocol.TList _list841 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.group_names = new ArrayList(_list841.size); - for (int _i842 = 0; _i842 < _list841.size; ++_i842) + org.apache.thrift.protocol.TList _list849 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.group_names = new ArrayList(_list849.size); + for (int _i850 = 0; _i850 < _list849.size; ++_i850) { - String _elem843; // required - _elem843 = iprot.readString(); - struct.group_names.add(_elem843); + String _elem851; // required + _elem851 = iprot.readString(); + struct.group_names.add(_elem851); } } struct.setGroup_namesIsSet(true); @@ -117064,13 +118312,13 @@ public void read(org.apache.thrift.protocol.TProtocol iprot, set_ugi_result stru case 0: // SUCCESS if (schemeField.type == org.apache.thrift.protocol.TType.LIST) { { - org.apache.thrift.protocol.TList _list844 = iprot.readListBegin(); - struct.success = new ArrayList(_list844.size); - for (int _i845 = 0; _i845 < _list844.size; ++_i845) + org.apache.thrift.protocol.TList _list852 = iprot.readListBegin(); + struct.success = new ArrayList(_list852.size); + for (int _i853 = 0; _i853 < _list852.size; ++_i853) { - String _elem846; // required - _elem846 = iprot.readString(); - struct.success.add(_elem846); + String _elem854; // required + _elem854 = iprot.readString(); + struct.success.add(_elem854); } iprot.readListEnd(); } @@ -117105,9 +118353,9 @@ public void write(org.apache.thrift.protocol.TProtocol oprot, set_ugi_result str oprot.writeFieldBegin(SUCCESS_FIELD_DESC); { oprot.writeListBegin(new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, struct.success.size())); - for (String _iter847 : struct.success) + for (String _iter855 : struct.success) { - oprot.writeString(_iter847); + oprot.writeString(_iter855); } oprot.writeListEnd(); } @@ -117146,9 +118394,9 @@ public void write(org.apache.thrift.protocol.TProtocol prot, set_ugi_result stru if (struct.isSetSuccess()) { { oprot.writeI32(struct.success.size()); - for (String _iter848 : struct.success) + for (String _iter856 : struct.success) { - oprot.writeString(_iter848); + oprot.writeString(_iter856); } } } @@ -117163,13 +118411,13 @@ public void read(org.apache.thrift.protocol.TProtocol prot, set_ugi_result struc BitSet incoming = iprot.readBitSet(2); if (incoming.get(0)) { { - org.apache.thrift.protocol.TList _list849 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); - struct.success = new ArrayList(_list849.size); - for (int _i850 = 0; _i850 < _list849.size; ++_i850) + org.apache.thrift.protocol.TList _list857 = new org.apache.thrift.protocol.TList(org.apache.thrift.protocol.TType.STRING, iprot.readI32()); + struct.success = new ArrayList(_list857.size); + for (int _i858 = 0; _i858 < _list857.size; ++_i858) { - String _elem851; // required - _elem851 = iprot.readString(); - struct.success.add(_elem851); + String _elem859; // required + _elem859 = iprot.readString(); + struct.success.add(_elem859); } } struct.setSuccessIsSet(true); diff --git a/metastore/src/gen/thrift/gen-php/metastore/ThriftHiveMetastore.php b/metastore/src/gen/thrift/gen-php/metastore/ThriftHiveMetastore.php index 653b60c..95fd1fa 100644 --- a/metastore/src/gen/thrift/gen-php/metastore/ThriftHiveMetastore.php +++ b/metastore/src/gen/thrift/gen-php/metastore/ThriftHiveMetastore.php @@ -90,6 +90,7 @@ interface ThriftHiveMetastoreIf extends \FacebookServiceIf { public function get_table_statistics_req(\metastore\TableStatsRequest $request); public function get_partitions_statistics_req(\metastore\PartitionsStatsRequest $request); public function get_aggr_stats_for(\metastore\PartitionsStatsRequest $request); + public function set_aggr_stats_for(\metastore\SetPartitionsStatsRequest $request); public function delete_partition_column_statistics($db_name, $tbl_name, $part_name, $col_name); public function delete_table_column_statistics($db_name, $tbl_name, $col_name); public function create_function(\metastore\Function $func); @@ -4498,6 +4499,69 @@ class ThriftHiveMetastoreClient extends \FacebookServiceClient implements \metas throw new \Exception("get_aggr_stats_for failed: unknown result"); } + public function set_aggr_stats_for(\metastore\SetPartitionsStatsRequest $request) + { + $this->send_set_aggr_stats_for($request); + return $this->recv_set_aggr_stats_for(); + } + + public function send_set_aggr_stats_for(\metastore\SetPartitionsStatsRequest $request) + { + $args = new \metastore\ThriftHiveMetastore_set_aggr_stats_for_args(); + $args->request = $request; + $bin_accel = ($this->output_ instanceof TProtocol::$TBINARYPROTOCOLACCELERATED) && function_exists('thrift_protocol_write_binary'); + if ($bin_accel) + { + thrift_protocol_write_binary($this->output_, 'set_aggr_stats_for', TMessageType::CALL, $args, $this->seqid_, $this->output_->isStrictWrite()); + } + else + { + $this->output_->writeMessageBegin('set_aggr_stats_for', TMessageType::CALL, $this->seqid_); + $args->write($this->output_); + $this->output_->writeMessageEnd(); + $this->output_->getTransport()->flush(); + } + } + + public function recv_set_aggr_stats_for() + { + $bin_accel = ($this->input_ instanceof TProtocol::$TBINARYPROTOCOLACCELERATED) && function_exists('thrift_protocol_read_binary'); + if ($bin_accel) $result = thrift_protocol_read_binary($this->input_, '\metastore\ThriftHiveMetastore_set_aggr_stats_for_result', $this->input_->isStrictRead()); + else + { + $rseqid = 0; + $fname = null; + $mtype = 0; + + $this->input_->readMessageBegin($fname, $mtype, $rseqid); + if ($mtype == TMessageType::EXCEPTION) { + $x = new TApplicationException(); + $x->read($this->input_); + $this->input_->readMessageEnd(); + throw $x; + } + $result = new \metastore\ThriftHiveMetastore_set_aggr_stats_for_result(); + $result->read($this->input_); + $this->input_->readMessageEnd(); + } + if ($result->success !== null) { + return $result->success; + } + if ($result->o1 !== null) { + throw $result->o1; + } + if ($result->o2 !== null) { + throw $result->o2; + } + if ($result->o3 !== null) { + throw $result->o3; + } + if ($result->o4 !== null) { + throw $result->o4; + } + throw new \Exception("set_aggr_stats_for failed: unknown result"); + } + public function delete_partition_column_statistics($db_name, $tbl_name, $part_name, $col_name) { $this->send_delete_partition_column_statistics($db_name, $tbl_name, $part_name, $col_name); @@ -7662,14 +7726,14 @@ class ThriftHiveMetastore_get_databases_result { case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size400 = 0; - $_etype403 = 0; - $xfer += $input->readListBegin($_etype403, $_size400); - for ($_i404 = 0; $_i404 < $_size400; ++$_i404) + $_size407 = 0; + $_etype410 = 0; + $xfer += $input->readListBegin($_etype410, $_size407); + for ($_i411 = 0; $_i411 < $_size407; ++$_i411) { - $elem405 = null; - $xfer += $input->readString($elem405); - $this->success []= $elem405; + $elem412 = null; + $xfer += $input->readString($elem412); + $this->success []= $elem412; } $xfer += $input->readListEnd(); } else { @@ -7705,9 +7769,9 @@ class ThriftHiveMetastore_get_databases_result { { $output->writeListBegin(TType::STRING, count($this->success)); { - foreach ($this->success as $iter406) + foreach ($this->success as $iter413) { - $xfer += $output->writeString($iter406); + $xfer += $output->writeString($iter413); } } $output->writeListEnd(); @@ -7832,14 +7896,14 @@ class ThriftHiveMetastore_get_all_databases_result { case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size407 = 0; - $_etype410 = 0; - $xfer += $input->readListBegin($_etype410, $_size407); - for ($_i411 = 0; $_i411 < $_size407; ++$_i411) + $_size414 = 0; + $_etype417 = 0; + $xfer += $input->readListBegin($_etype417, $_size414); + for ($_i418 = 0; $_i418 < $_size414; ++$_i418) { - $elem412 = null; - $xfer += $input->readString($elem412); - $this->success []= $elem412; + $elem419 = null; + $xfer += $input->readString($elem419); + $this->success []= $elem419; } $xfer += $input->readListEnd(); } else { @@ -7875,9 +7939,9 @@ class ThriftHiveMetastore_get_all_databases_result { { $output->writeListBegin(TType::STRING, count($this->success)); { - foreach ($this->success as $iter413) + foreach ($this->success as $iter420) { - $xfer += $output->writeString($iter413); + $xfer += $output->writeString($iter420); } } $output->writeListEnd(); @@ -8818,18 +8882,18 @@ class ThriftHiveMetastore_get_type_all_result { case 0: if ($ftype == TType::MAP) { $this->success = array(); - $_size414 = 0; - $_ktype415 = 0; - $_vtype416 = 0; - $xfer += $input->readMapBegin($_ktype415, $_vtype416, $_size414); - for ($_i418 = 0; $_i418 < $_size414; ++$_i418) + $_size421 = 0; + $_ktype422 = 0; + $_vtype423 = 0; + $xfer += $input->readMapBegin($_ktype422, $_vtype423, $_size421); + for ($_i425 = 0; $_i425 < $_size421; ++$_i425) { - $key419 = ''; - $val420 = new \metastore\Type(); - $xfer += $input->readString($key419); - $val420 = new \metastore\Type(); - $xfer += $val420->read($input); - $this->success[$key419] = $val420; + $key426 = ''; + $val427 = new \metastore\Type(); + $xfer += $input->readString($key426); + $val427 = new \metastore\Type(); + $xfer += $val427->read($input); + $this->success[$key426] = $val427; } $xfer += $input->readMapEnd(); } else { @@ -8865,10 +8929,10 @@ class ThriftHiveMetastore_get_type_all_result { { $output->writeMapBegin(TType::STRING, TType::STRUCT, count($this->success)); { - foreach ($this->success as $kiter421 => $viter422) + foreach ($this->success as $kiter428 => $viter429) { - $xfer += $output->writeString($kiter421); - $xfer += $viter422->write($output); + $xfer += $output->writeString($kiter428); + $xfer += $viter429->write($output); } } $output->writeMapEnd(); @@ -9054,15 +9118,15 @@ class ThriftHiveMetastore_get_fields_result { case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size423 = 0; - $_etype426 = 0; - $xfer += $input->readListBegin($_etype426, $_size423); - for ($_i427 = 0; $_i427 < $_size423; ++$_i427) + $_size430 = 0; + $_etype433 = 0; + $xfer += $input->readListBegin($_etype433, $_size430); + for ($_i434 = 0; $_i434 < $_size430; ++$_i434) { - $elem428 = null; - $elem428 = new \metastore\FieldSchema(); - $xfer += $elem428->read($input); - $this->success []= $elem428; + $elem435 = null; + $elem435 = new \metastore\FieldSchema(); + $xfer += $elem435->read($input); + $this->success []= $elem435; } $xfer += $input->readListEnd(); } else { @@ -9114,9 +9178,9 @@ class ThriftHiveMetastore_get_fields_result { { $output->writeListBegin(TType::STRUCT, count($this->success)); { - foreach ($this->success as $iter429) + foreach ($this->success as $iter436) { - $xfer += $iter429->write($output); + $xfer += $iter436->write($output); } } $output->writeListEnd(); @@ -9312,15 +9376,15 @@ class ThriftHiveMetastore_get_schema_result { case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size430 = 0; - $_etype433 = 0; - $xfer += $input->readListBegin($_etype433, $_size430); - for ($_i434 = 0; $_i434 < $_size430; ++$_i434) + $_size437 = 0; + $_etype440 = 0; + $xfer += $input->readListBegin($_etype440, $_size437); + for ($_i441 = 0; $_i441 < $_size437; ++$_i441) { - $elem435 = null; - $elem435 = new \metastore\FieldSchema(); - $xfer += $elem435->read($input); - $this->success []= $elem435; + $elem442 = null; + $elem442 = new \metastore\FieldSchema(); + $xfer += $elem442->read($input); + $this->success []= $elem442; } $xfer += $input->readListEnd(); } else { @@ -9372,9 +9436,9 @@ class ThriftHiveMetastore_get_schema_result { { $output->writeListBegin(TType::STRUCT, count($this->success)); { - foreach ($this->success as $iter436) + foreach ($this->success as $iter443) { - $xfer += $iter436->write($output); + $xfer += $iter443->write($output); } } $output->writeListEnd(); @@ -10451,14 +10515,14 @@ class ThriftHiveMetastore_get_tables_result { case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size437 = 0; - $_etype440 = 0; - $xfer += $input->readListBegin($_etype440, $_size437); - for ($_i441 = 0; $_i441 < $_size437; ++$_i441) + $_size444 = 0; + $_etype447 = 0; + $xfer += $input->readListBegin($_etype447, $_size444); + for ($_i448 = 0; $_i448 < $_size444; ++$_i448) { - $elem442 = null; - $xfer += $input->readString($elem442); - $this->success []= $elem442; + $elem449 = null; + $xfer += $input->readString($elem449); + $this->success []= $elem449; } $xfer += $input->readListEnd(); } else { @@ -10494,9 +10558,9 @@ class ThriftHiveMetastore_get_tables_result { { $output->writeListBegin(TType::STRING, count($this->success)); { - foreach ($this->success as $iter443) + foreach ($this->success as $iter450) { - $xfer += $output->writeString($iter443); + $xfer += $output->writeString($iter450); } } $output->writeListEnd(); @@ -10643,14 +10707,14 @@ class ThriftHiveMetastore_get_all_tables_result { case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size444 = 0; - $_etype447 = 0; - $xfer += $input->readListBegin($_etype447, $_size444); - for ($_i448 = 0; $_i448 < $_size444; ++$_i448) + $_size451 = 0; + $_etype454 = 0; + $xfer += $input->readListBegin($_etype454, $_size451); + for ($_i455 = 0; $_i455 < $_size451; ++$_i455) { - $elem449 = null; - $xfer += $input->readString($elem449); - $this->success []= $elem449; + $elem456 = null; + $xfer += $input->readString($elem456); + $this->success []= $elem456; } $xfer += $input->readListEnd(); } else { @@ -10686,9 +10750,9 @@ class ThriftHiveMetastore_get_all_tables_result { { $output->writeListBegin(TType::STRING, count($this->success)); { - foreach ($this->success as $iter450) + foreach ($this->success as $iter457) { - $xfer += $output->writeString($iter450); + $xfer += $output->writeString($iter457); } } $output->writeListEnd(); @@ -10982,14 +11046,14 @@ class ThriftHiveMetastore_get_table_objects_by_name_args { case 2: if ($ftype == TType::LST) { $this->tbl_names = array(); - $_size451 = 0; - $_etype454 = 0; - $xfer += $input->readListBegin($_etype454, $_size451); - for ($_i455 = 0; $_i455 < $_size451; ++$_i455) + $_size458 = 0; + $_etype461 = 0; + $xfer += $input->readListBegin($_etype461, $_size458); + for ($_i462 = 0; $_i462 < $_size458; ++$_i462) { - $elem456 = null; - $xfer += $input->readString($elem456); - $this->tbl_names []= $elem456; + $elem463 = null; + $xfer += $input->readString($elem463); + $this->tbl_names []= $elem463; } $xfer += $input->readListEnd(); } else { @@ -11022,9 +11086,9 @@ class ThriftHiveMetastore_get_table_objects_by_name_args { { $output->writeListBegin(TType::STRING, count($this->tbl_names)); { - foreach ($this->tbl_names as $iter457) + foreach ($this->tbl_names as $iter464) { - $xfer += $output->writeString($iter457); + $xfer += $output->writeString($iter464); } } $output->writeListEnd(); @@ -11113,15 +11177,15 @@ class ThriftHiveMetastore_get_table_objects_by_name_result { case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size458 = 0; - $_etype461 = 0; - $xfer += $input->readListBegin($_etype461, $_size458); - for ($_i462 = 0; $_i462 < $_size458; ++$_i462) + $_size465 = 0; + $_etype468 = 0; + $xfer += $input->readListBegin($_etype468, $_size465); + for ($_i469 = 0; $_i469 < $_size465; ++$_i469) { - $elem463 = null; - $elem463 = new \metastore\Table(); - $xfer += $elem463->read($input); - $this->success []= $elem463; + $elem470 = null; + $elem470 = new \metastore\Table(); + $xfer += $elem470->read($input); + $this->success []= $elem470; } $xfer += $input->readListEnd(); } else { @@ -11173,9 +11237,9 @@ class ThriftHiveMetastore_get_table_objects_by_name_result { { $output->writeListBegin(TType::STRUCT, count($this->success)); { - foreach ($this->success as $iter464) + foreach ($this->success as $iter471) { - $xfer += $iter464->write($output); + $xfer += $iter471->write($output); } } $output->writeListEnd(); @@ -11390,14 +11454,14 @@ class ThriftHiveMetastore_get_table_names_by_filter_result { case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size465 = 0; - $_etype468 = 0; - $xfer += $input->readListBegin($_etype468, $_size465); - for ($_i469 = 0; $_i469 < $_size465; ++$_i469) + $_size472 = 0; + $_etype475 = 0; + $xfer += $input->readListBegin($_etype475, $_size472); + for ($_i476 = 0; $_i476 < $_size472; ++$_i476) { - $elem470 = null; - $xfer += $input->readString($elem470); - $this->success []= $elem470; + $elem477 = null; + $xfer += $input->readString($elem477); + $this->success []= $elem477; } $xfer += $input->readListEnd(); } else { @@ -11449,9 +11513,9 @@ class ThriftHiveMetastore_get_table_names_by_filter_result { { $output->writeListBegin(TType::STRING, count($this->success)); { - foreach ($this->success as $iter471) + foreach ($this->success as $iter478) { - $xfer += $output->writeString($iter471); + $xfer += $output->writeString($iter478); } } $output->writeListEnd(); @@ -12444,15 +12508,15 @@ class ThriftHiveMetastore_add_partitions_args { case 1: if ($ftype == TType::LST) { $this->new_parts = array(); - $_size472 = 0; - $_etype475 = 0; - $xfer += $input->readListBegin($_etype475, $_size472); - for ($_i476 = 0; $_i476 < $_size472; ++$_i476) + $_size479 = 0; + $_etype482 = 0; + $xfer += $input->readListBegin($_etype482, $_size479); + for ($_i483 = 0; $_i483 < $_size479; ++$_i483) { - $elem477 = null; - $elem477 = new \metastore\Partition(); - $xfer += $elem477->read($input); - $this->new_parts []= $elem477; + $elem484 = null; + $elem484 = new \metastore\Partition(); + $xfer += $elem484->read($input); + $this->new_parts []= $elem484; } $xfer += $input->readListEnd(); } else { @@ -12480,9 +12544,9 @@ class ThriftHiveMetastore_add_partitions_args { { $output->writeListBegin(TType::STRUCT, count($this->new_parts)); { - foreach ($this->new_parts as $iter478) + foreach ($this->new_parts as $iter485) { - $xfer += $iter478->write($output); + $xfer += $iter485->write($output); } } $output->writeListEnd(); @@ -12711,14 +12775,14 @@ class ThriftHiveMetastore_append_partition_args { case 3: if ($ftype == TType::LST) { $this->part_vals = array(); - $_size479 = 0; - $_etype482 = 0; - $xfer += $input->readListBegin($_etype482, $_size479); - for ($_i483 = 0; $_i483 < $_size479; ++$_i483) + $_size486 = 0; + $_etype489 = 0; + $xfer += $input->readListBegin($_etype489, $_size486); + for ($_i490 = 0; $_i490 < $_size486; ++$_i490) { - $elem484 = null; - $xfer += $input->readString($elem484); - $this->part_vals []= $elem484; + $elem491 = null; + $xfer += $input->readString($elem491); + $this->part_vals []= $elem491; } $xfer += $input->readListEnd(); } else { @@ -12756,9 +12820,9 @@ class ThriftHiveMetastore_append_partition_args { { $output->writeListBegin(TType::STRING, count($this->part_vals)); { - foreach ($this->part_vals as $iter485) + foreach ($this->part_vals as $iter492) { - $xfer += $output->writeString($iter485); + $xfer += $output->writeString($iter492); } } $output->writeListEnd(); @@ -13221,14 +13285,14 @@ class ThriftHiveMetastore_append_partition_with_environment_context_args { case 3: if ($ftype == TType::LST) { $this->part_vals = array(); - $_size486 = 0; - $_etype489 = 0; - $xfer += $input->readListBegin($_etype489, $_size486); - for ($_i490 = 0; $_i490 < $_size486; ++$_i490) + $_size493 = 0; + $_etype496 = 0; + $xfer += $input->readListBegin($_etype496, $_size493); + for ($_i497 = 0; $_i497 < $_size493; ++$_i497) { - $elem491 = null; - $xfer += $input->readString($elem491); - $this->part_vals []= $elem491; + $elem498 = null; + $xfer += $input->readString($elem498); + $this->part_vals []= $elem498; } $xfer += $input->readListEnd(); } else { @@ -13274,9 +13338,9 @@ class ThriftHiveMetastore_append_partition_with_environment_context_args { { $output->writeListBegin(TType::STRING, count($this->part_vals)); { - foreach ($this->part_vals as $iter492) + foreach ($this->part_vals as $iter499) { - $xfer += $output->writeString($iter492); + $xfer += $output->writeString($iter499); } } $output->writeListEnd(); @@ -14061,14 +14125,14 @@ class ThriftHiveMetastore_drop_partition_args { case 3: if ($ftype == TType::LST) { $this->part_vals = array(); - $_size493 = 0; - $_etype496 = 0; - $xfer += $input->readListBegin($_etype496, $_size493); - for ($_i497 = 0; $_i497 < $_size493; ++$_i497) + $_size500 = 0; + $_etype503 = 0; + $xfer += $input->readListBegin($_etype503, $_size500); + for ($_i504 = 0; $_i504 < $_size500; ++$_i504) { - $elem498 = null; - $xfer += $input->readString($elem498); - $this->part_vals []= $elem498; + $elem505 = null; + $xfer += $input->readString($elem505); + $this->part_vals []= $elem505; } $xfer += $input->readListEnd(); } else { @@ -14113,9 +14177,9 @@ class ThriftHiveMetastore_drop_partition_args { { $output->writeListBegin(TType::STRING, count($this->part_vals)); { - foreach ($this->part_vals as $iter499) + foreach ($this->part_vals as $iter506) { - $xfer += $output->writeString($iter499); + $xfer += $output->writeString($iter506); } } $output->writeListEnd(); @@ -14344,14 +14408,14 @@ class ThriftHiveMetastore_drop_partition_with_environment_context_args { case 3: if ($ftype == TType::LST) { $this->part_vals = array(); - $_size500 = 0; - $_etype503 = 0; - $xfer += $input->readListBegin($_etype503, $_size500); - for ($_i504 = 0; $_i504 < $_size500; ++$_i504) + $_size507 = 0; + $_etype510 = 0; + $xfer += $input->readListBegin($_etype510, $_size507); + for ($_i511 = 0; $_i511 < $_size507; ++$_i511) { - $elem505 = null; - $xfer += $input->readString($elem505); - $this->part_vals []= $elem505; + $elem512 = null; + $xfer += $input->readString($elem512); + $this->part_vals []= $elem512; } $xfer += $input->readListEnd(); } else { @@ -14404,9 +14468,9 @@ class ThriftHiveMetastore_drop_partition_with_environment_context_args { { $output->writeListBegin(TType::STRING, count($this->part_vals)); { - foreach ($this->part_vals as $iter506) + foreach ($this->part_vals as $iter513) { - $xfer += $output->writeString($iter506); + $xfer += $output->writeString($iter513); } } $output->writeListEnd(); @@ -15345,14 +15409,14 @@ class ThriftHiveMetastore_get_partition_args { case 3: if ($ftype == TType::LST) { $this->part_vals = array(); - $_size507 = 0; - $_etype510 = 0; - $xfer += $input->readListBegin($_etype510, $_size507); - for ($_i511 = 0; $_i511 < $_size507; ++$_i511) + $_size514 = 0; + $_etype517 = 0; + $xfer += $input->readListBegin($_etype517, $_size514); + for ($_i518 = 0; $_i518 < $_size514; ++$_i518) { - $elem512 = null; - $xfer += $input->readString($elem512); - $this->part_vals []= $elem512; + $elem519 = null; + $xfer += $input->readString($elem519); + $this->part_vals []= $elem519; } $xfer += $input->readListEnd(); } else { @@ -15390,9 +15454,9 @@ class ThriftHiveMetastore_get_partition_args { { $output->writeListBegin(TType::STRING, count($this->part_vals)); { - foreach ($this->part_vals as $iter513) + foreach ($this->part_vals as $iter520) { - $xfer += $output->writeString($iter513); + $xfer += $output->writeString($iter520); } } $output->writeListEnd(); @@ -15610,17 +15674,17 @@ class ThriftHiveMetastore_exchange_partition_args { case 1: if ($ftype == TType::MAP) { $this->partitionSpecs = array(); - $_size514 = 0; - $_ktype515 = 0; - $_vtype516 = 0; - $xfer += $input->readMapBegin($_ktype515, $_vtype516, $_size514); - for ($_i518 = 0; $_i518 < $_size514; ++$_i518) + $_size521 = 0; + $_ktype522 = 0; + $_vtype523 = 0; + $xfer += $input->readMapBegin($_ktype522, $_vtype523, $_size521); + for ($_i525 = 0; $_i525 < $_size521; ++$_i525) { - $key519 = ''; - $val520 = ''; - $xfer += $input->readString($key519); - $xfer += $input->readString($val520); - $this->partitionSpecs[$key519] = $val520; + $key526 = ''; + $val527 = ''; + $xfer += $input->readString($key526); + $xfer += $input->readString($val527); + $this->partitionSpecs[$key526] = $val527; } $xfer += $input->readMapEnd(); } else { @@ -15676,10 +15740,10 @@ class ThriftHiveMetastore_exchange_partition_args { { $output->writeMapBegin(TType::STRING, TType::STRING, count($this->partitionSpecs)); { - foreach ($this->partitionSpecs as $kiter521 => $viter522) + foreach ($this->partitionSpecs as $kiter528 => $viter529) { - $xfer += $output->writeString($kiter521); - $xfer += $output->writeString($viter522); + $xfer += $output->writeString($kiter528); + $xfer += $output->writeString($viter529); } } $output->writeMapEnd(); @@ -15975,14 +16039,14 @@ class ThriftHiveMetastore_get_partition_with_auth_args { case 3: if ($ftype == TType::LST) { $this->part_vals = array(); - $_size523 = 0; - $_etype526 = 0; - $xfer += $input->readListBegin($_etype526, $_size523); - for ($_i527 = 0; $_i527 < $_size523; ++$_i527) + $_size530 = 0; + $_etype533 = 0; + $xfer += $input->readListBegin($_etype533, $_size530); + for ($_i534 = 0; $_i534 < $_size530; ++$_i534) { - $elem528 = null; - $xfer += $input->readString($elem528); - $this->part_vals []= $elem528; + $elem535 = null; + $xfer += $input->readString($elem535); + $this->part_vals []= $elem535; } $xfer += $input->readListEnd(); } else { @@ -15999,14 +16063,14 @@ class ThriftHiveMetastore_get_partition_with_auth_args { case 5: if ($ftype == TType::LST) { $this->group_names = array(); - $_size529 = 0; - $_etype532 = 0; - $xfer += $input->readListBegin($_etype532, $_size529); - for ($_i533 = 0; $_i533 < $_size529; ++$_i533) + $_size536 = 0; + $_etype539 = 0; + $xfer += $input->readListBegin($_etype539, $_size536); + for ($_i540 = 0; $_i540 < $_size536; ++$_i540) { - $elem534 = null; - $xfer += $input->readString($elem534); - $this->group_names []= $elem534; + $elem541 = null; + $xfer += $input->readString($elem541); + $this->group_names []= $elem541; } $xfer += $input->readListEnd(); } else { @@ -16044,9 +16108,9 @@ class ThriftHiveMetastore_get_partition_with_auth_args { { $output->writeListBegin(TType::STRING, count($this->part_vals)); { - foreach ($this->part_vals as $iter535) + foreach ($this->part_vals as $iter542) { - $xfer += $output->writeString($iter535); + $xfer += $output->writeString($iter542); } } $output->writeListEnd(); @@ -16066,9 +16130,9 @@ class ThriftHiveMetastore_get_partition_with_auth_args { { $output->writeListBegin(TType::STRING, count($this->group_names)); { - foreach ($this->group_names as $iter536) + foreach ($this->group_names as $iter543) { - $xfer += $output->writeString($iter536); + $xfer += $output->writeString($iter543); } } $output->writeListEnd(); @@ -16614,15 +16678,15 @@ class ThriftHiveMetastore_get_partitions_result { case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size537 = 0; - $_etype540 = 0; - $xfer += $input->readListBegin($_etype540, $_size537); - for ($_i541 = 0; $_i541 < $_size537; ++$_i541) + $_size544 = 0; + $_etype547 = 0; + $xfer += $input->readListBegin($_etype547, $_size544); + for ($_i548 = 0; $_i548 < $_size544; ++$_i548) { - $elem542 = null; - $elem542 = new \metastore\Partition(); - $xfer += $elem542->read($input); - $this->success []= $elem542; + $elem549 = null; + $elem549 = new \metastore\Partition(); + $xfer += $elem549->read($input); + $this->success []= $elem549; } $xfer += $input->readListEnd(); } else { @@ -16666,9 +16730,9 @@ class ThriftHiveMetastore_get_partitions_result { { $output->writeListBegin(TType::STRUCT, count($this->success)); { - foreach ($this->success as $iter543) + foreach ($this->success as $iter550) { - $xfer += $iter543->write($output); + $xfer += $iter550->write($output); } } $output->writeListEnd(); @@ -16799,14 +16863,14 @@ class ThriftHiveMetastore_get_partitions_with_auth_args { case 5: if ($ftype == TType::LST) { $this->group_names = array(); - $_size544 = 0; - $_etype547 = 0; - $xfer += $input->readListBegin($_etype547, $_size544); - for ($_i548 = 0; $_i548 < $_size544; ++$_i548) + $_size551 = 0; + $_etype554 = 0; + $xfer += $input->readListBegin($_etype554, $_size551); + for ($_i555 = 0; $_i555 < $_size551; ++$_i555) { - $elem549 = null; - $xfer += $input->readString($elem549); - $this->group_names []= $elem549; + $elem556 = null; + $xfer += $input->readString($elem556); + $this->group_names []= $elem556; } $xfer += $input->readListEnd(); } else { @@ -16854,9 +16918,9 @@ class ThriftHiveMetastore_get_partitions_with_auth_args { { $output->writeListBegin(TType::STRING, count($this->group_names)); { - foreach ($this->group_names as $iter550) + foreach ($this->group_names as $iter557) { - $xfer += $output->writeString($iter550); + $xfer += $output->writeString($iter557); } } $output->writeListEnd(); @@ -16936,15 +17000,15 @@ class ThriftHiveMetastore_get_partitions_with_auth_result { case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size551 = 0; - $_etype554 = 0; - $xfer += $input->readListBegin($_etype554, $_size551); - for ($_i555 = 0; $_i555 < $_size551; ++$_i555) + $_size558 = 0; + $_etype561 = 0; + $xfer += $input->readListBegin($_etype561, $_size558); + for ($_i562 = 0; $_i562 < $_size558; ++$_i562) { - $elem556 = null; - $elem556 = new \metastore\Partition(); - $xfer += $elem556->read($input); - $this->success []= $elem556; + $elem563 = null; + $elem563 = new \metastore\Partition(); + $xfer += $elem563->read($input); + $this->success []= $elem563; } $xfer += $input->readListEnd(); } else { @@ -16988,9 +17052,9 @@ class ThriftHiveMetastore_get_partitions_with_auth_result { { $output->writeListBegin(TType::STRUCT, count($this->success)); { - foreach ($this->success as $iter557) + foreach ($this->success as $iter564) { - $xfer += $iter557->write($output); + $xfer += $iter564->write($output); } } $output->writeListEnd(); @@ -17182,14 +17246,14 @@ class ThriftHiveMetastore_get_partition_names_result { case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size558 = 0; - $_etype561 = 0; - $xfer += $input->readListBegin($_etype561, $_size558); - for ($_i562 = 0; $_i562 < $_size558; ++$_i562) + $_size565 = 0; + $_etype568 = 0; + $xfer += $input->readListBegin($_etype568, $_size565); + for ($_i569 = 0; $_i569 < $_size565; ++$_i569) { - $elem563 = null; - $xfer += $input->readString($elem563); - $this->success []= $elem563; + $elem570 = null; + $xfer += $input->readString($elem570); + $this->success []= $elem570; } $xfer += $input->readListEnd(); } else { @@ -17225,9 +17289,9 @@ class ThriftHiveMetastore_get_partition_names_result { { $output->writeListBegin(TType::STRING, count($this->success)); { - foreach ($this->success as $iter564) + foreach ($this->success as $iter571) { - $xfer += $output->writeString($iter564); + $xfer += $output->writeString($iter571); } } $output->writeListEnd(); @@ -17331,14 +17395,14 @@ class ThriftHiveMetastore_get_partitions_ps_args { case 3: if ($ftype == TType::LST) { $this->part_vals = array(); - $_size565 = 0; - $_etype568 = 0; - $xfer += $input->readListBegin($_etype568, $_size565); - for ($_i569 = 0; $_i569 < $_size565; ++$_i569) + $_size572 = 0; + $_etype575 = 0; + $xfer += $input->readListBegin($_etype575, $_size572); + for ($_i576 = 0; $_i576 < $_size572; ++$_i576) { - $elem570 = null; - $xfer += $input->readString($elem570); - $this->part_vals []= $elem570; + $elem577 = null; + $xfer += $input->readString($elem577); + $this->part_vals []= $elem577; } $xfer += $input->readListEnd(); } else { @@ -17383,9 +17447,9 @@ class ThriftHiveMetastore_get_partitions_ps_args { { $output->writeListBegin(TType::STRING, count($this->part_vals)); { - foreach ($this->part_vals as $iter571) + foreach ($this->part_vals as $iter578) { - $xfer += $output->writeString($iter571); + $xfer += $output->writeString($iter578); } } $output->writeListEnd(); @@ -17470,15 +17534,15 @@ class ThriftHiveMetastore_get_partitions_ps_result { case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size572 = 0; - $_etype575 = 0; - $xfer += $input->readListBegin($_etype575, $_size572); - for ($_i576 = 0; $_i576 < $_size572; ++$_i576) + $_size579 = 0; + $_etype582 = 0; + $xfer += $input->readListBegin($_etype582, $_size579); + for ($_i583 = 0; $_i583 < $_size579; ++$_i583) { - $elem577 = null; - $elem577 = new \metastore\Partition(); - $xfer += $elem577->read($input); - $this->success []= $elem577; + $elem584 = null; + $elem584 = new \metastore\Partition(); + $xfer += $elem584->read($input); + $this->success []= $elem584; } $xfer += $input->readListEnd(); } else { @@ -17522,9 +17586,9 @@ class ThriftHiveMetastore_get_partitions_ps_result { { $output->writeListBegin(TType::STRUCT, count($this->success)); { - foreach ($this->success as $iter578) + foreach ($this->success as $iter585) { - $xfer += $iter578->write($output); + $xfer += $iter585->write($output); } } $output->writeListEnd(); @@ -17653,14 +17717,14 @@ class ThriftHiveMetastore_get_partitions_ps_with_auth_args { case 3: if ($ftype == TType::LST) { $this->part_vals = array(); - $_size579 = 0; - $_etype582 = 0; - $xfer += $input->readListBegin($_etype582, $_size579); - for ($_i583 = 0; $_i583 < $_size579; ++$_i583) + $_size586 = 0; + $_etype589 = 0; + $xfer += $input->readListBegin($_etype589, $_size586); + for ($_i590 = 0; $_i590 < $_size586; ++$_i590) { - $elem584 = null; - $xfer += $input->readString($elem584); - $this->part_vals []= $elem584; + $elem591 = null; + $xfer += $input->readString($elem591); + $this->part_vals []= $elem591; } $xfer += $input->readListEnd(); } else { @@ -17684,14 +17748,14 @@ class ThriftHiveMetastore_get_partitions_ps_with_auth_args { case 6: if ($ftype == TType::LST) { $this->group_names = array(); - $_size585 = 0; - $_etype588 = 0; - $xfer += $input->readListBegin($_etype588, $_size585); - for ($_i589 = 0; $_i589 < $_size585; ++$_i589) + $_size592 = 0; + $_etype595 = 0; + $xfer += $input->readListBegin($_etype595, $_size592); + for ($_i596 = 0; $_i596 < $_size592; ++$_i596) { - $elem590 = null; - $xfer += $input->readString($elem590); - $this->group_names []= $elem590; + $elem597 = null; + $xfer += $input->readString($elem597); + $this->group_names []= $elem597; } $xfer += $input->readListEnd(); } else { @@ -17729,9 +17793,9 @@ class ThriftHiveMetastore_get_partitions_ps_with_auth_args { { $output->writeListBegin(TType::STRING, count($this->part_vals)); { - foreach ($this->part_vals as $iter591) + foreach ($this->part_vals as $iter598) { - $xfer += $output->writeString($iter591); + $xfer += $output->writeString($iter598); } } $output->writeListEnd(); @@ -17756,9 +17820,9 @@ class ThriftHiveMetastore_get_partitions_ps_with_auth_args { { $output->writeListBegin(TType::STRING, count($this->group_names)); { - foreach ($this->group_names as $iter592) + foreach ($this->group_names as $iter599) { - $xfer += $output->writeString($iter592); + $xfer += $output->writeString($iter599); } } $output->writeListEnd(); @@ -17838,15 +17902,15 @@ class ThriftHiveMetastore_get_partitions_ps_with_auth_result { case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size593 = 0; - $_etype596 = 0; - $xfer += $input->readListBegin($_etype596, $_size593); - for ($_i597 = 0; $_i597 < $_size593; ++$_i597) + $_size600 = 0; + $_etype603 = 0; + $xfer += $input->readListBegin($_etype603, $_size600); + for ($_i604 = 0; $_i604 < $_size600; ++$_i604) { - $elem598 = null; - $elem598 = new \metastore\Partition(); - $xfer += $elem598->read($input); - $this->success []= $elem598; + $elem605 = null; + $elem605 = new \metastore\Partition(); + $xfer += $elem605->read($input); + $this->success []= $elem605; } $xfer += $input->readListEnd(); } else { @@ -17890,9 +17954,9 @@ class ThriftHiveMetastore_get_partitions_ps_with_auth_result { { $output->writeListBegin(TType::STRUCT, count($this->success)); { - foreach ($this->success as $iter599) + foreach ($this->success as $iter606) { - $xfer += $iter599->write($output); + $xfer += $iter606->write($output); } } $output->writeListEnd(); @@ -18001,14 +18065,14 @@ class ThriftHiveMetastore_get_partition_names_ps_args { case 3: if ($ftype == TType::LST) { $this->part_vals = array(); - $_size600 = 0; - $_etype603 = 0; - $xfer += $input->readListBegin($_etype603, $_size600); - for ($_i604 = 0; $_i604 < $_size600; ++$_i604) + $_size607 = 0; + $_etype610 = 0; + $xfer += $input->readListBegin($_etype610, $_size607); + for ($_i611 = 0; $_i611 < $_size607; ++$_i611) { - $elem605 = null; - $xfer += $input->readString($elem605); - $this->part_vals []= $elem605; + $elem612 = null; + $xfer += $input->readString($elem612); + $this->part_vals []= $elem612; } $xfer += $input->readListEnd(); } else { @@ -18053,9 +18117,9 @@ class ThriftHiveMetastore_get_partition_names_ps_args { { $output->writeListBegin(TType::STRING, count($this->part_vals)); { - foreach ($this->part_vals as $iter606) + foreach ($this->part_vals as $iter613) { - $xfer += $output->writeString($iter606); + $xfer += $output->writeString($iter613); } } $output->writeListEnd(); @@ -18139,14 +18203,14 @@ class ThriftHiveMetastore_get_partition_names_ps_result { case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size607 = 0; - $_etype610 = 0; - $xfer += $input->readListBegin($_etype610, $_size607); - for ($_i611 = 0; $_i611 < $_size607; ++$_i611) + $_size614 = 0; + $_etype617 = 0; + $xfer += $input->readListBegin($_etype617, $_size614); + for ($_i618 = 0; $_i618 < $_size614; ++$_i618) { - $elem612 = null; - $xfer += $input->readString($elem612); - $this->success []= $elem612; + $elem619 = null; + $xfer += $input->readString($elem619); + $this->success []= $elem619; } $xfer += $input->readListEnd(); } else { @@ -18190,9 +18254,9 @@ class ThriftHiveMetastore_get_partition_names_ps_result { { $output->writeListBegin(TType::STRING, count($this->success)); { - foreach ($this->success as $iter613) + foreach ($this->success as $iter620) { - $xfer += $output->writeString($iter613); + $xfer += $output->writeString($iter620); } } $output->writeListEnd(); @@ -18414,15 +18478,15 @@ class ThriftHiveMetastore_get_partitions_by_filter_result { case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size614 = 0; - $_etype617 = 0; - $xfer += $input->readListBegin($_etype617, $_size614); - for ($_i618 = 0; $_i618 < $_size614; ++$_i618) + $_size621 = 0; + $_etype624 = 0; + $xfer += $input->readListBegin($_etype624, $_size621); + for ($_i625 = 0; $_i625 < $_size621; ++$_i625) { - $elem619 = null; - $elem619 = new \metastore\Partition(); - $xfer += $elem619->read($input); - $this->success []= $elem619; + $elem626 = null; + $elem626 = new \metastore\Partition(); + $xfer += $elem626->read($input); + $this->success []= $elem626; } $xfer += $input->readListEnd(); } else { @@ -18466,9 +18530,9 @@ class ThriftHiveMetastore_get_partitions_by_filter_result { { $output->writeListBegin(TType::STRUCT, count($this->success)); { - foreach ($this->success as $iter620) + foreach ($this->success as $iter627) { - $xfer += $iter620->write($output); + $xfer += $iter627->write($output); } } $output->writeListEnd(); @@ -18767,14 +18831,14 @@ class ThriftHiveMetastore_get_partitions_by_names_args { case 3: if ($ftype == TType::LST) { $this->names = array(); - $_size621 = 0; - $_etype624 = 0; - $xfer += $input->readListBegin($_etype624, $_size621); - for ($_i625 = 0; $_i625 < $_size621; ++$_i625) + $_size628 = 0; + $_etype631 = 0; + $xfer += $input->readListBegin($_etype631, $_size628); + for ($_i632 = 0; $_i632 < $_size628; ++$_i632) { - $elem626 = null; - $xfer += $input->readString($elem626); - $this->names []= $elem626; + $elem633 = null; + $xfer += $input->readString($elem633); + $this->names []= $elem633; } $xfer += $input->readListEnd(); } else { @@ -18812,9 +18876,9 @@ class ThriftHiveMetastore_get_partitions_by_names_args { { $output->writeListBegin(TType::STRING, count($this->names)); { - foreach ($this->names as $iter627) + foreach ($this->names as $iter634) { - $xfer += $output->writeString($iter627); + $xfer += $output->writeString($iter634); } } $output->writeListEnd(); @@ -18894,15 +18958,15 @@ class ThriftHiveMetastore_get_partitions_by_names_result { case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size628 = 0; - $_etype631 = 0; - $xfer += $input->readListBegin($_etype631, $_size628); - for ($_i632 = 0; $_i632 < $_size628; ++$_i632) + $_size635 = 0; + $_etype638 = 0; + $xfer += $input->readListBegin($_etype638, $_size635); + for ($_i639 = 0; $_i639 < $_size635; ++$_i639) { - $elem633 = null; - $elem633 = new \metastore\Partition(); - $xfer += $elem633->read($input); - $this->success []= $elem633; + $elem640 = null; + $elem640 = new \metastore\Partition(); + $xfer += $elem640->read($input); + $this->success []= $elem640; } $xfer += $input->readListEnd(); } else { @@ -18946,9 +19010,9 @@ class ThriftHiveMetastore_get_partitions_by_names_result { { $output->writeListBegin(TType::STRUCT, count($this->success)); { - foreach ($this->success as $iter634) + foreach ($this->success as $iter641) { - $xfer += $iter634->write($output); + $xfer += $iter641->write($output); } } $output->writeListEnd(); @@ -19263,15 +19327,15 @@ class ThriftHiveMetastore_alter_partitions_args { case 3: if ($ftype == TType::LST) { $this->new_parts = array(); - $_size635 = 0; - $_etype638 = 0; - $xfer += $input->readListBegin($_etype638, $_size635); - for ($_i639 = 0; $_i639 < $_size635; ++$_i639) + $_size642 = 0; + $_etype645 = 0; + $xfer += $input->readListBegin($_etype645, $_size642); + for ($_i646 = 0; $_i646 < $_size642; ++$_i646) { - $elem640 = null; - $elem640 = new \metastore\Partition(); - $xfer += $elem640->read($input); - $this->new_parts []= $elem640; + $elem647 = null; + $elem647 = new \metastore\Partition(); + $xfer += $elem647->read($input); + $this->new_parts []= $elem647; } $xfer += $input->readListEnd(); } else { @@ -19309,9 +19373,9 @@ class ThriftHiveMetastore_alter_partitions_args { { $output->writeListBegin(TType::STRUCT, count($this->new_parts)); { - foreach ($this->new_parts as $iter641) + foreach ($this->new_parts as $iter648) { - $xfer += $iter641->write($output); + $xfer += $iter648->write($output); } } $output->writeListEnd(); @@ -19745,14 +19809,14 @@ class ThriftHiveMetastore_rename_partition_args { case 3: if ($ftype == TType::LST) { $this->part_vals = array(); - $_size642 = 0; - $_etype645 = 0; - $xfer += $input->readListBegin($_etype645, $_size642); - for ($_i646 = 0; $_i646 < $_size642; ++$_i646) + $_size649 = 0; + $_etype652 = 0; + $xfer += $input->readListBegin($_etype652, $_size649); + for ($_i653 = 0; $_i653 < $_size649; ++$_i653) { - $elem647 = null; - $xfer += $input->readString($elem647); - $this->part_vals []= $elem647; + $elem654 = null; + $xfer += $input->readString($elem654); + $this->part_vals []= $elem654; } $xfer += $input->readListEnd(); } else { @@ -19798,9 +19862,9 @@ class ThriftHiveMetastore_rename_partition_args { { $output->writeListBegin(TType::STRING, count($this->part_vals)); { - foreach ($this->part_vals as $iter648) + foreach ($this->part_vals as $iter655) { - $xfer += $output->writeString($iter648); + $xfer += $output->writeString($iter655); } } $output->writeListEnd(); @@ -19973,14 +20037,14 @@ class ThriftHiveMetastore_partition_name_has_valid_characters_args { case 1: if ($ftype == TType::LST) { $this->part_vals = array(); - $_size649 = 0; - $_etype652 = 0; - $xfer += $input->readListBegin($_etype652, $_size649); - for ($_i653 = 0; $_i653 < $_size649; ++$_i653) + $_size656 = 0; + $_etype659 = 0; + $xfer += $input->readListBegin($_etype659, $_size656); + for ($_i660 = 0; $_i660 < $_size656; ++$_i660) { - $elem654 = null; - $xfer += $input->readString($elem654); - $this->part_vals []= $elem654; + $elem661 = null; + $xfer += $input->readString($elem661); + $this->part_vals []= $elem661; } $xfer += $input->readListEnd(); } else { @@ -20015,9 +20079,9 @@ class ThriftHiveMetastore_partition_name_has_valid_characters_args { { $output->writeListBegin(TType::STRING, count($this->part_vals)); { - foreach ($this->part_vals as $iter655) + foreach ($this->part_vals as $iter662) { - $xfer += $output->writeString($iter655); + $xfer += $output->writeString($iter662); } } $output->writeListEnd(); @@ -20444,14 +20508,14 @@ class ThriftHiveMetastore_partition_name_to_vals_result { case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size656 = 0; - $_etype659 = 0; - $xfer += $input->readListBegin($_etype659, $_size656); - for ($_i660 = 0; $_i660 < $_size656; ++$_i660) + $_size663 = 0; + $_etype666 = 0; + $xfer += $input->readListBegin($_etype666, $_size663); + for ($_i667 = 0; $_i667 < $_size663; ++$_i667) { - $elem661 = null; - $xfer += $input->readString($elem661); - $this->success []= $elem661; + $elem668 = null; + $xfer += $input->readString($elem668); + $this->success []= $elem668; } $xfer += $input->readListEnd(); } else { @@ -20487,9 +20551,9 @@ class ThriftHiveMetastore_partition_name_to_vals_result { { $output->writeListBegin(TType::STRING, count($this->success)); { - foreach ($this->success as $iter662) + foreach ($this->success as $iter669) { - $xfer += $output->writeString($iter662); + $xfer += $output->writeString($iter669); } } $output->writeListEnd(); @@ -20640,17 +20704,17 @@ class ThriftHiveMetastore_partition_name_to_spec_result { case 0: if ($ftype == TType::MAP) { $this->success = array(); - $_size663 = 0; - $_ktype664 = 0; - $_vtype665 = 0; - $xfer += $input->readMapBegin($_ktype664, $_vtype665, $_size663); - for ($_i667 = 0; $_i667 < $_size663; ++$_i667) + $_size670 = 0; + $_ktype671 = 0; + $_vtype672 = 0; + $xfer += $input->readMapBegin($_ktype671, $_vtype672, $_size670); + for ($_i674 = 0; $_i674 < $_size670; ++$_i674) { - $key668 = ''; - $val669 = ''; - $xfer += $input->readString($key668); - $xfer += $input->readString($val669); - $this->success[$key668] = $val669; + $key675 = ''; + $val676 = ''; + $xfer += $input->readString($key675); + $xfer += $input->readString($val676); + $this->success[$key675] = $val676; } $xfer += $input->readMapEnd(); } else { @@ -20686,10 +20750,10 @@ class ThriftHiveMetastore_partition_name_to_spec_result { { $output->writeMapBegin(TType::STRING, TType::STRING, count($this->success)); { - foreach ($this->success as $kiter670 => $viter671) + foreach ($this->success as $kiter677 => $viter678) { - $xfer += $output->writeString($kiter670); - $xfer += $output->writeString($viter671); + $xfer += $output->writeString($kiter677); + $xfer += $output->writeString($viter678); } } $output->writeMapEnd(); @@ -20797,17 +20861,17 @@ class ThriftHiveMetastore_markPartitionForEvent_args { case 3: if ($ftype == TType::MAP) { $this->part_vals = array(); - $_size672 = 0; - $_ktype673 = 0; - $_vtype674 = 0; - $xfer += $input->readMapBegin($_ktype673, $_vtype674, $_size672); - for ($_i676 = 0; $_i676 < $_size672; ++$_i676) + $_size679 = 0; + $_ktype680 = 0; + $_vtype681 = 0; + $xfer += $input->readMapBegin($_ktype680, $_vtype681, $_size679); + for ($_i683 = 0; $_i683 < $_size679; ++$_i683) { - $key677 = ''; - $val678 = ''; - $xfer += $input->readString($key677); - $xfer += $input->readString($val678); - $this->part_vals[$key677] = $val678; + $key684 = ''; + $val685 = ''; + $xfer += $input->readString($key684); + $xfer += $input->readString($val685); + $this->part_vals[$key684] = $val685; } $xfer += $input->readMapEnd(); } else { @@ -20852,10 +20916,10 @@ class ThriftHiveMetastore_markPartitionForEvent_args { { $output->writeMapBegin(TType::STRING, TType::STRING, count($this->part_vals)); { - foreach ($this->part_vals as $kiter679 => $viter680) + foreach ($this->part_vals as $kiter686 => $viter687) { - $xfer += $output->writeString($kiter679); - $xfer += $output->writeString($viter680); + $xfer += $output->writeString($kiter686); + $xfer += $output->writeString($viter687); } } $output->writeMapEnd(); @@ -21147,17 +21211,17 @@ class ThriftHiveMetastore_isPartitionMarkedForEvent_args { case 3: if ($ftype == TType::MAP) { $this->part_vals = array(); - $_size681 = 0; - $_ktype682 = 0; - $_vtype683 = 0; - $xfer += $input->readMapBegin($_ktype682, $_vtype683, $_size681); - for ($_i685 = 0; $_i685 < $_size681; ++$_i685) + $_size688 = 0; + $_ktype689 = 0; + $_vtype690 = 0; + $xfer += $input->readMapBegin($_ktype689, $_vtype690, $_size688); + for ($_i692 = 0; $_i692 < $_size688; ++$_i692) { - $key686 = ''; - $val687 = ''; - $xfer += $input->readString($key686); - $xfer += $input->readString($val687); - $this->part_vals[$key686] = $val687; + $key693 = ''; + $val694 = ''; + $xfer += $input->readString($key693); + $xfer += $input->readString($val694); + $this->part_vals[$key693] = $val694; } $xfer += $input->readMapEnd(); } else { @@ -21202,10 +21266,10 @@ class ThriftHiveMetastore_isPartitionMarkedForEvent_args { { $output->writeMapBegin(TType::STRING, TType::STRING, count($this->part_vals)); { - foreach ($this->part_vals as $kiter688 => $viter689) + foreach ($this->part_vals as $kiter695 => $viter696) { - $xfer += $output->writeString($kiter688); - $xfer += $output->writeString($viter689); + $xfer += $output->writeString($kiter695); + $xfer += $output->writeString($viter696); } } $output->writeMapEnd(); @@ -22565,15 +22629,15 @@ class ThriftHiveMetastore_get_indexes_result { case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size690 = 0; - $_etype693 = 0; - $xfer += $input->readListBegin($_etype693, $_size690); - for ($_i694 = 0; $_i694 < $_size690; ++$_i694) + $_size697 = 0; + $_etype700 = 0; + $xfer += $input->readListBegin($_etype700, $_size697); + for ($_i701 = 0; $_i701 < $_size697; ++$_i701) { - $elem695 = null; - $elem695 = new \metastore\Index(); - $xfer += $elem695->read($input); - $this->success []= $elem695; + $elem702 = null; + $elem702 = new \metastore\Index(); + $xfer += $elem702->read($input); + $this->success []= $elem702; } $xfer += $input->readListEnd(); } else { @@ -22617,9 +22681,9 @@ class ThriftHiveMetastore_get_indexes_result { { $output->writeListBegin(TType::STRUCT, count($this->success)); { - foreach ($this->success as $iter696) + foreach ($this->success as $iter703) { - $xfer += $iter696->write($output); + $xfer += $iter703->write($output); } } $output->writeListEnd(); @@ -22811,14 +22875,14 @@ class ThriftHiveMetastore_get_index_names_result { case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size697 = 0; - $_etype700 = 0; - $xfer += $input->readListBegin($_etype700, $_size697); - for ($_i701 = 0; $_i701 < $_size697; ++$_i701) + $_size704 = 0; + $_etype707 = 0; + $xfer += $input->readListBegin($_etype707, $_size704); + for ($_i708 = 0; $_i708 < $_size704; ++$_i708) { - $elem702 = null; - $xfer += $input->readString($elem702); - $this->success []= $elem702; + $elem709 = null; + $xfer += $input->readString($elem709); + $this->success []= $elem709; } $xfer += $input->readListEnd(); } else { @@ -22854,9 +22918,9 @@ class ThriftHiveMetastore_get_index_names_result { { $output->writeListBegin(TType::STRING, count($this->success)); { - foreach ($this->success as $iter703) + foreach ($this->success as $iter710) { - $xfer += $output->writeString($iter703); + $xfer += $output->writeString($iter710); } } $output->writeListEnd(); @@ -24517,53 +24581,30 @@ class ThriftHiveMetastore_get_aggr_stats_for_result { } -class ThriftHiveMetastore_delete_partition_column_statistics_args { +class ThriftHiveMetastore_set_aggr_stats_for_args { static $_TSPEC; - public $db_name = null; - public $tbl_name = null; - public $part_name = null; - public $col_name = null; + public $request = null; public function __construct($vals=null) { if (!isset(self::$_TSPEC)) { self::$_TSPEC = array( 1 => array( - 'var' => 'db_name', - 'type' => TType::STRING, - ), - 2 => array( - 'var' => 'tbl_name', - 'type' => TType::STRING, - ), - 3 => array( - 'var' => 'part_name', - 'type' => TType::STRING, - ), - 4 => array( - 'var' => 'col_name', - 'type' => TType::STRING, + 'var' => 'request', + 'type' => TType::STRUCT, + 'class' => '\metastore\SetPartitionsStatsRequest', ), ); } if (is_array($vals)) { - if (isset($vals['db_name'])) { - $this->db_name = $vals['db_name']; - } - if (isset($vals['tbl_name'])) { - $this->tbl_name = $vals['tbl_name']; - } - if (isset($vals['part_name'])) { - $this->part_name = $vals['part_name']; - } - if (isset($vals['col_name'])) { - $this->col_name = $vals['col_name']; + if (isset($vals['request'])) { + $this->request = $vals['request']; } } } public function getName() { - return 'ThriftHiveMetastore_delete_partition_column_statistics_args'; + return 'ThriftHiveMetastore_set_aggr_stats_for_args'; } public function read($input) @@ -24582,13 +24623,273 @@ class ThriftHiveMetastore_delete_partition_column_statistics_args { switch ($fid) { case 1: - if ($ftype == TType::STRING) { - $xfer += $input->readString($this->db_name); + if ($ftype == TType::STRUCT) { + $this->request = new \metastore\SetPartitionsStatsRequest(); + $xfer += $this->request->read($input); } else { $xfer += $input->skip($ftype); } break; - case 2: + default: + $xfer += $input->skip($ftype); + break; + } + $xfer += $input->readFieldEnd(); + } + $xfer += $input->readStructEnd(); + return $xfer; + } + + public function write($output) { + $xfer = 0; + $xfer += $output->writeStructBegin('ThriftHiveMetastore_set_aggr_stats_for_args'); + if ($this->request !== null) { + if (!is_object($this->request)) { + throw new TProtocolException('Bad type in structure.', TProtocolException::INVALID_DATA); + } + $xfer += $output->writeFieldBegin('request', TType::STRUCT, 1); + $xfer += $this->request->write($output); + $xfer += $output->writeFieldEnd(); + } + $xfer += $output->writeFieldStop(); + $xfer += $output->writeStructEnd(); + return $xfer; + } + +} + +class ThriftHiveMetastore_set_aggr_stats_for_result { + static $_TSPEC; + + public $success = null; + public $o1 = null; + public $o2 = null; + public $o3 = null; + public $o4 = null; + + public function __construct($vals=null) { + if (!isset(self::$_TSPEC)) { + self::$_TSPEC = array( + 0 => array( + 'var' => 'success', + 'type' => TType::BOOL, + ), + 1 => array( + 'var' => 'o1', + 'type' => TType::STRUCT, + 'class' => '\metastore\NoSuchObjectException', + ), + 2 => array( + 'var' => 'o2', + 'type' => TType::STRUCT, + 'class' => '\metastore\InvalidObjectException', + ), + 3 => array( + 'var' => 'o3', + 'type' => TType::STRUCT, + 'class' => '\metastore\MetaException', + ), + 4 => array( + 'var' => 'o4', + 'type' => TType::STRUCT, + 'class' => '\metastore\InvalidInputException', + ), + ); + } + if (is_array($vals)) { + if (isset($vals['success'])) { + $this->success = $vals['success']; + } + if (isset($vals['o1'])) { + $this->o1 = $vals['o1']; + } + if (isset($vals['o2'])) { + $this->o2 = $vals['o2']; + } + if (isset($vals['o3'])) { + $this->o3 = $vals['o3']; + } + if (isset($vals['o4'])) { + $this->o4 = $vals['o4']; + } + } + } + + public function getName() { + return 'ThriftHiveMetastore_set_aggr_stats_for_result'; + } + + public function read($input) + { + $xfer = 0; + $fname = null; + $ftype = 0; + $fid = 0; + $xfer += $input->readStructBegin($fname); + while (true) + { + $xfer += $input->readFieldBegin($fname, $ftype, $fid); + if ($ftype == TType::STOP) { + break; + } + switch ($fid) + { + case 0: + if ($ftype == TType::BOOL) { + $xfer += $input->readBool($this->success); + } else { + $xfer += $input->skip($ftype); + } + break; + case 1: + if ($ftype == TType::STRUCT) { + $this->o1 = new \metastore\NoSuchObjectException(); + $xfer += $this->o1->read($input); + } else { + $xfer += $input->skip($ftype); + } + break; + case 2: + if ($ftype == TType::STRUCT) { + $this->o2 = new \metastore\InvalidObjectException(); + $xfer += $this->o2->read($input); + } else { + $xfer += $input->skip($ftype); + } + break; + case 3: + if ($ftype == TType::STRUCT) { + $this->o3 = new \metastore\MetaException(); + $xfer += $this->o3->read($input); + } else { + $xfer += $input->skip($ftype); + } + break; + case 4: + if ($ftype == TType::STRUCT) { + $this->o4 = new \metastore\InvalidInputException(); + $xfer += $this->o4->read($input); + } else { + $xfer += $input->skip($ftype); + } + break; + default: + $xfer += $input->skip($ftype); + break; + } + $xfer += $input->readFieldEnd(); + } + $xfer += $input->readStructEnd(); + return $xfer; + } + + public function write($output) { + $xfer = 0; + $xfer += $output->writeStructBegin('ThriftHiveMetastore_set_aggr_stats_for_result'); + if ($this->success !== null) { + $xfer += $output->writeFieldBegin('success', TType::BOOL, 0); + $xfer += $output->writeBool($this->success); + $xfer += $output->writeFieldEnd(); + } + if ($this->o1 !== null) { + $xfer += $output->writeFieldBegin('o1', TType::STRUCT, 1); + $xfer += $this->o1->write($output); + $xfer += $output->writeFieldEnd(); + } + if ($this->o2 !== null) { + $xfer += $output->writeFieldBegin('o2', TType::STRUCT, 2); + $xfer += $this->o2->write($output); + $xfer += $output->writeFieldEnd(); + } + if ($this->o3 !== null) { + $xfer += $output->writeFieldBegin('o3', TType::STRUCT, 3); + $xfer += $this->o3->write($output); + $xfer += $output->writeFieldEnd(); + } + if ($this->o4 !== null) { + $xfer += $output->writeFieldBegin('o4', TType::STRUCT, 4); + $xfer += $this->o4->write($output); + $xfer += $output->writeFieldEnd(); + } + $xfer += $output->writeFieldStop(); + $xfer += $output->writeStructEnd(); + return $xfer; + } + +} + +class ThriftHiveMetastore_delete_partition_column_statistics_args { + static $_TSPEC; + + public $db_name = null; + public $tbl_name = null; + public $part_name = null; + public $col_name = null; + + public function __construct($vals=null) { + if (!isset(self::$_TSPEC)) { + self::$_TSPEC = array( + 1 => array( + 'var' => 'db_name', + 'type' => TType::STRING, + ), + 2 => array( + 'var' => 'tbl_name', + 'type' => TType::STRING, + ), + 3 => array( + 'var' => 'part_name', + 'type' => TType::STRING, + ), + 4 => array( + 'var' => 'col_name', + 'type' => TType::STRING, + ), + ); + } + if (is_array($vals)) { + if (isset($vals['db_name'])) { + $this->db_name = $vals['db_name']; + } + if (isset($vals['tbl_name'])) { + $this->tbl_name = $vals['tbl_name']; + } + if (isset($vals['part_name'])) { + $this->part_name = $vals['part_name']; + } + if (isset($vals['col_name'])) { + $this->col_name = $vals['col_name']; + } + } + } + + public function getName() { + return 'ThriftHiveMetastore_delete_partition_column_statistics_args'; + } + + public function read($input) + { + $xfer = 0; + $fname = null; + $ftype = 0; + $fid = 0; + $xfer += $input->readStructBegin($fname); + while (true) + { + $xfer += $input->readFieldBegin($fname, $ftype, $fid); + if ($ftype == TType::STOP) { + break; + } + switch ($fid) + { + case 1: + if ($ftype == TType::STRING) { + $xfer += $input->readString($this->db_name); + } else { + $xfer += $input->skip($ftype); + } + break; + case 2: if ($ftype == TType::STRING) { $xfer += $input->readString($this->tbl_name); } else { @@ -25847,14 +26148,14 @@ class ThriftHiveMetastore_get_functions_result { case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size704 = 0; - $_etype707 = 0; - $xfer += $input->readListBegin($_etype707, $_size704); - for ($_i708 = 0; $_i708 < $_size704; ++$_i708) + $_size711 = 0; + $_etype714 = 0; + $xfer += $input->readListBegin($_etype714, $_size711); + for ($_i715 = 0; $_i715 < $_size711; ++$_i715) { - $elem709 = null; - $xfer += $input->readString($elem709); - $this->success []= $elem709; + $elem716 = null; + $xfer += $input->readString($elem716); + $this->success []= $elem716; } $xfer += $input->readListEnd(); } else { @@ -25890,9 +26191,9 @@ class ThriftHiveMetastore_get_functions_result { { $output->writeListBegin(TType::STRING, count($this->success)); { - foreach ($this->success as $iter710) + foreach ($this->success as $iter717) { - $xfer += $output->writeString($iter710); + $xfer += $output->writeString($iter717); } } $output->writeListEnd(); @@ -26567,14 +26868,14 @@ class ThriftHiveMetastore_get_role_names_result { case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size711 = 0; - $_etype714 = 0; - $xfer += $input->readListBegin($_etype714, $_size711); - for ($_i715 = 0; $_i715 < $_size711; ++$_i715) + $_size718 = 0; + $_etype721 = 0; + $xfer += $input->readListBegin($_etype721, $_size718); + for ($_i722 = 0; $_i722 < $_size718; ++$_i722) { - $elem716 = null; - $xfer += $input->readString($elem716); - $this->success []= $elem716; + $elem723 = null; + $xfer += $input->readString($elem723); + $this->success []= $elem723; } $xfer += $input->readListEnd(); } else { @@ -26610,9 +26911,9 @@ class ThriftHiveMetastore_get_role_names_result { { $output->writeListBegin(TType::STRING, count($this->success)); { - foreach ($this->success as $iter717) + foreach ($this->success as $iter724) { - $xfer += $output->writeString($iter717); + $xfer += $output->writeString($iter724); } } $output->writeListEnd(); @@ -27252,15 +27553,15 @@ class ThriftHiveMetastore_list_roles_result { case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size718 = 0; - $_etype721 = 0; - $xfer += $input->readListBegin($_etype721, $_size718); - for ($_i722 = 0; $_i722 < $_size718; ++$_i722) + $_size725 = 0; + $_etype728 = 0; + $xfer += $input->readListBegin($_etype728, $_size725); + for ($_i729 = 0; $_i729 < $_size725; ++$_i729) { - $elem723 = null; - $elem723 = new \metastore\Role(); - $xfer += $elem723->read($input); - $this->success []= $elem723; + $elem730 = null; + $elem730 = new \metastore\Role(); + $xfer += $elem730->read($input); + $this->success []= $elem730; } $xfer += $input->readListEnd(); } else { @@ -27296,9 +27597,9 @@ class ThriftHiveMetastore_list_roles_result { { $output->writeListBegin(TType::STRUCT, count($this->success)); { - foreach ($this->success as $iter724) + foreach ($this->success as $iter731) { - $xfer += $iter724->write($output); + $xfer += $iter731->write($output); } } $output->writeListEnd(); @@ -27924,14 +28225,14 @@ class ThriftHiveMetastore_get_privilege_set_args { case 3: if ($ftype == TType::LST) { $this->group_names = array(); - $_size725 = 0; - $_etype728 = 0; - $xfer += $input->readListBegin($_etype728, $_size725); - for ($_i729 = 0; $_i729 < $_size725; ++$_i729) + $_size732 = 0; + $_etype735 = 0; + $xfer += $input->readListBegin($_etype735, $_size732); + for ($_i736 = 0; $_i736 < $_size732; ++$_i736) { - $elem730 = null; - $xfer += $input->readString($elem730); - $this->group_names []= $elem730; + $elem737 = null; + $xfer += $input->readString($elem737); + $this->group_names []= $elem737; } $xfer += $input->readListEnd(); } else { @@ -27972,9 +28273,9 @@ class ThriftHiveMetastore_get_privilege_set_args { { $output->writeListBegin(TType::STRING, count($this->group_names)); { - foreach ($this->group_names as $iter731) + foreach ($this->group_names as $iter738) { - $xfer += $output->writeString($iter731); + $xfer += $output->writeString($iter738); } } $output->writeListEnd(); @@ -28261,15 +28562,15 @@ class ThriftHiveMetastore_list_privileges_result { case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size732 = 0; - $_etype735 = 0; - $xfer += $input->readListBegin($_etype735, $_size732); - for ($_i736 = 0; $_i736 < $_size732; ++$_i736) + $_size739 = 0; + $_etype742 = 0; + $xfer += $input->readListBegin($_etype742, $_size739); + for ($_i743 = 0; $_i743 < $_size739; ++$_i743) { - $elem737 = null; - $elem737 = new \metastore\HiveObjectPrivilege(); - $xfer += $elem737->read($input); - $this->success []= $elem737; + $elem744 = null; + $elem744 = new \metastore\HiveObjectPrivilege(); + $xfer += $elem744->read($input); + $this->success []= $elem744; } $xfer += $input->readListEnd(); } else { @@ -28305,9 +28606,9 @@ class ThriftHiveMetastore_list_privileges_result { { $output->writeListBegin(TType::STRUCT, count($this->success)); { - foreach ($this->success as $iter738) + foreach ($this->success as $iter745) { - $xfer += $iter738->write($output); + $xfer += $iter745->write($output); } } $output->writeListEnd(); @@ -28906,14 +29207,14 @@ class ThriftHiveMetastore_set_ugi_args { case 2: if ($ftype == TType::LST) { $this->group_names = array(); - $_size739 = 0; - $_etype742 = 0; - $xfer += $input->readListBegin($_etype742, $_size739); - for ($_i743 = 0; $_i743 < $_size739; ++$_i743) + $_size746 = 0; + $_etype749 = 0; + $xfer += $input->readListBegin($_etype749, $_size746); + for ($_i750 = 0; $_i750 < $_size746; ++$_i750) { - $elem744 = null; - $xfer += $input->readString($elem744); - $this->group_names []= $elem744; + $elem751 = null; + $xfer += $input->readString($elem751); + $this->group_names []= $elem751; } $xfer += $input->readListEnd(); } else { @@ -28946,9 +29247,9 @@ class ThriftHiveMetastore_set_ugi_args { { $output->writeListBegin(TType::STRING, count($this->group_names)); { - foreach ($this->group_names as $iter745) + foreach ($this->group_names as $iter752) { - $xfer += $output->writeString($iter745); + $xfer += $output->writeString($iter752); } } $output->writeListEnd(); @@ -29018,14 +29319,14 @@ class ThriftHiveMetastore_set_ugi_result { case 0: if ($ftype == TType::LST) { $this->success = array(); - $_size746 = 0; - $_etype749 = 0; - $xfer += $input->readListBegin($_etype749, $_size746); - for ($_i750 = 0; $_i750 < $_size746; ++$_i750) + $_size753 = 0; + $_etype756 = 0; + $xfer += $input->readListBegin($_etype756, $_size753); + for ($_i757 = 0; $_i757 < $_size753; ++$_i757) { - $elem751 = null; - $xfer += $input->readString($elem751); - $this->success []= $elem751; + $elem758 = null; + $xfer += $input->readString($elem758); + $this->success []= $elem758; } $xfer += $input->readListEnd(); } else { @@ -29061,9 +29362,9 @@ class ThriftHiveMetastore_set_ugi_result { { $output->writeListBegin(TType::STRING, count($this->success)); { - foreach ($this->success as $iter752) + foreach ($this->success as $iter759) { - $xfer += $output->writeString($iter752); + $xfer += $output->writeString($iter759); } } $output->writeListEnd(); diff --git a/metastore/src/gen/thrift/gen-php/metastore/Types.php b/metastore/src/gen/thrift/gen-php/metastore/Types.php index 6cdffd5..0f0c31b 100644 --- a/metastore/src/gen/thrift/gen-php/metastore/Types.php +++ b/metastore/src/gen/thrift/gen-php/metastore/Types.php @@ -6155,6 +6155,106 @@ class AggrStats { } +class SetPartitionsStatsRequest { + static $_TSPEC; + + public $colStats = null; + + public function __construct($vals=null) { + if (!isset(self::$_TSPEC)) { + self::$_TSPEC = array( + 1 => array( + 'var' => 'colStats', + 'type' => TType::LST, + 'etype' => TType::STRUCT, + 'elem' => array( + 'type' => TType::STRUCT, + 'class' => '\metastore\ColumnStatistics', + ), + ), + ); + } + if (is_array($vals)) { + if (isset($vals['colStats'])) { + $this->colStats = $vals['colStats']; + } + } + } + + public function getName() { + return 'SetPartitionsStatsRequest'; + } + + public function read($input) + { + $xfer = 0; + $fname = null; + $ftype = 0; + $fid = 0; + $xfer += $input->readStructBegin($fname); + while (true) + { + $xfer += $input->readFieldBegin($fname, $ftype, $fid); + if ($ftype == TType::STOP) { + break; + } + switch ($fid) + { + case 1: + if ($ftype == TType::LST) { + $this->colStats = array(); + $_size223 = 0; + $_etype226 = 0; + $xfer += $input->readListBegin($_etype226, $_size223); + for ($_i227 = 0; $_i227 < $_size223; ++$_i227) + { + $elem228 = null; + $elem228 = new \metastore\ColumnStatistics(); + $xfer += $elem228->read($input); + $this->colStats []= $elem228; + } + $xfer += $input->readListEnd(); + } else { + $xfer += $input->skip($ftype); + } + break; + default: + $xfer += $input->skip($ftype); + break; + } + $xfer += $input->readFieldEnd(); + } + $xfer += $input->readStructEnd(); + return $xfer; + } + + public function write($output) { + $xfer = 0; + $xfer += $output->writeStructBegin('SetPartitionsStatsRequest'); + if ($this->colStats !== null) { + if (!is_array($this->colStats)) { + throw new TProtocolException('Bad type in structure.', TProtocolException::INVALID_DATA); + } + $xfer += $output->writeFieldBegin('colStats', TType::LST, 1); + { + $output->writeListBegin(TType::STRUCT, count($this->colStats)); + { + foreach ($this->colStats as $iter229) + { + $xfer += $iter229->write($output); + } + } + $output->writeListEnd(); + } + $xfer += $output->writeFieldEnd(); + } + $xfer += $output->writeFieldStop(); + $xfer += $output->writeStructEnd(); + return $xfer; + } + +} + class Schema { static $_TSPEC; @@ -6219,15 +6319,15 @@ class Schema { case 1: if ($ftype == TType::LST) { $this->fieldSchemas = array(); - $_size223 = 0; - $_etype226 = 0; - $xfer += $input->readListBegin($_etype226, $_size223); - for ($_i227 = 0; $_i227 < $_size223; ++$_i227) + $_size230 = 0; + $_etype233 = 0; + $xfer += $input->readListBegin($_etype233, $_size230); + for ($_i234 = 0; $_i234 < $_size230; ++$_i234) { - $elem228 = null; - $elem228 = new \metastore\FieldSchema(); - $xfer += $elem228->read($input); - $this->fieldSchemas []= $elem228; + $elem235 = null; + $elem235 = new \metastore\FieldSchema(); + $xfer += $elem235->read($input); + $this->fieldSchemas []= $elem235; } $xfer += $input->readListEnd(); } else { @@ -6237,17 +6337,17 @@ class Schema { case 2: if ($ftype == TType::MAP) { $this->properties = array(); - $_size229 = 0; - $_ktype230 = 0; - $_vtype231 = 0; - $xfer += $input->readMapBegin($_ktype230, $_vtype231, $_size229); - for ($_i233 = 0; $_i233 < $_size229; ++$_i233) + $_size236 = 0; + $_ktype237 = 0; + $_vtype238 = 0; + $xfer += $input->readMapBegin($_ktype237, $_vtype238, $_size236); + for ($_i240 = 0; $_i240 < $_size236; ++$_i240) { - $key234 = ''; - $val235 = ''; - $xfer += $input->readString($key234); - $xfer += $input->readString($val235); - $this->properties[$key234] = $val235; + $key241 = ''; + $val242 = ''; + $xfer += $input->readString($key241); + $xfer += $input->readString($val242); + $this->properties[$key241] = $val242; } $xfer += $input->readMapEnd(); } else { @@ -6275,9 +6375,9 @@ class Schema { { $output->writeListBegin(TType::STRUCT, count($this->fieldSchemas)); { - foreach ($this->fieldSchemas as $iter236) + foreach ($this->fieldSchemas as $iter243) { - $xfer += $iter236->write($output); + $xfer += $iter243->write($output); } } $output->writeListEnd(); @@ -6292,10 +6392,10 @@ class Schema { { $output->writeMapBegin(TType::STRING, TType::STRING, count($this->properties)); { - foreach ($this->properties as $kiter237 => $viter238) + foreach ($this->properties as $kiter244 => $viter245) { - $xfer += $output->writeString($kiter237); - $xfer += $output->writeString($viter238); + $xfer += $output->writeString($kiter244); + $xfer += $output->writeString($viter245); } } $output->writeMapEnd(); @@ -6360,17 +6460,17 @@ class EnvironmentContext { case 1: if ($ftype == TType::MAP) { $this->properties = array(); - $_size239 = 0; - $_ktype240 = 0; - $_vtype241 = 0; - $xfer += $input->readMapBegin($_ktype240, $_vtype241, $_size239); - for ($_i243 = 0; $_i243 < $_size239; ++$_i243) + $_size246 = 0; + $_ktype247 = 0; + $_vtype248 = 0; + $xfer += $input->readMapBegin($_ktype247, $_vtype248, $_size246); + for ($_i250 = 0; $_i250 < $_size246; ++$_i250) { - $key244 = ''; - $val245 = ''; - $xfer += $input->readString($key244); - $xfer += $input->readString($val245); - $this->properties[$key244] = $val245; + $key251 = ''; + $val252 = ''; + $xfer += $input->readString($key251); + $xfer += $input->readString($val252); + $this->properties[$key251] = $val252; } $xfer += $input->readMapEnd(); } else { @@ -6398,10 +6498,10 @@ class EnvironmentContext { { $output->writeMapBegin(TType::STRING, TType::STRING, count($this->properties)); { - foreach ($this->properties as $kiter246 => $viter247) + foreach ($this->properties as $kiter253 => $viter254) { - $xfer += $output->writeString($kiter246); - $xfer += $output->writeString($viter247); + $xfer += $output->writeString($kiter253); + $xfer += $output->writeString($viter254); } } $output->writeMapEnd(); @@ -6471,15 +6571,15 @@ class PartitionsByExprResult { case 1: if ($ftype == TType::LST) { $this->partitions = array(); - $_size248 = 0; - $_etype251 = 0; - $xfer += $input->readListBegin($_etype251, $_size248); - for ($_i252 = 0; $_i252 < $_size248; ++$_i252) + $_size255 = 0; + $_etype258 = 0; + $xfer += $input->readListBegin($_etype258, $_size255); + for ($_i259 = 0; $_i259 < $_size255; ++$_i259) { - $elem253 = null; - $elem253 = new \metastore\Partition(); - $xfer += $elem253->read($input); - $this->partitions []= $elem253; + $elem260 = null; + $elem260 = new \metastore\Partition(); + $xfer += $elem260->read($input); + $this->partitions []= $elem260; } $xfer += $input->readListEnd(); } else { @@ -6514,9 +6614,9 @@ class PartitionsByExprResult { { $output->writeListBegin(TType::STRUCT, count($this->partitions)); { - foreach ($this->partitions as $iter254) + foreach ($this->partitions as $iter261) { - $xfer += $iter254->write($output); + $xfer += $iter261->write($output); } } $output->writeListEnd(); @@ -6735,15 +6835,15 @@ class TableStatsResult { case 1: if ($ftype == TType::LST) { $this->tableStats = array(); - $_size255 = 0; - $_etype258 = 0; - $xfer += $input->readListBegin($_etype258, $_size255); - for ($_i259 = 0; $_i259 < $_size255; ++$_i259) + $_size262 = 0; + $_etype265 = 0; + $xfer += $input->readListBegin($_etype265, $_size262); + for ($_i266 = 0; $_i266 < $_size262; ++$_i266) { - $elem260 = null; - $elem260 = new \metastore\ColumnStatisticsObj(); - $xfer += $elem260->read($input); - $this->tableStats []= $elem260; + $elem267 = null; + $elem267 = new \metastore\ColumnStatisticsObj(); + $xfer += $elem267->read($input); + $this->tableStats []= $elem267; } $xfer += $input->readListEnd(); } else { @@ -6771,9 +6871,9 @@ class TableStatsResult { { $output->writeListBegin(TType::STRUCT, count($this->tableStats)); { - foreach ($this->tableStats as $iter261) + foreach ($this->tableStats as $iter268) { - $xfer += $iter261->write($output); + $xfer += $iter268->write($output); } } $output->writeListEnd(); @@ -6843,28 +6943,28 @@ class PartitionsStatsResult { case 1: if ($ftype == TType::MAP) { $this->partStats = array(); - $_size262 = 0; - $_ktype263 = 0; - $_vtype264 = 0; - $xfer += $input->readMapBegin($_ktype263, $_vtype264, $_size262); - for ($_i266 = 0; $_i266 < $_size262; ++$_i266) + $_size269 = 0; + $_ktype270 = 0; + $_vtype271 = 0; + $xfer += $input->readMapBegin($_ktype270, $_vtype271, $_size269); + for ($_i273 = 0; $_i273 < $_size269; ++$_i273) { - $key267 = ''; - $val268 = array(); - $xfer += $input->readString($key267); - $val268 = array(); - $_size269 = 0; - $_etype272 = 0; - $xfer += $input->readListBegin($_etype272, $_size269); - for ($_i273 = 0; $_i273 < $_size269; ++$_i273) + $key274 = ''; + $val275 = array(); + $xfer += $input->readString($key274); + $val275 = array(); + $_size276 = 0; + $_etype279 = 0; + $xfer += $input->readListBegin($_etype279, $_size276); + for ($_i280 = 0; $_i280 < $_size276; ++$_i280) { - $elem274 = null; - $elem274 = new \metastore\ColumnStatisticsObj(); - $xfer += $elem274->read($input); - $val268 []= $elem274; + $elem281 = null; + $elem281 = new \metastore\ColumnStatisticsObj(); + $xfer += $elem281->read($input); + $val275 []= $elem281; } $xfer += $input->readListEnd(); - $this->partStats[$key267] = $val268; + $this->partStats[$key274] = $val275; } $xfer += $input->readMapEnd(); } else { @@ -6892,15 +6992,15 @@ class PartitionsStatsResult { { $output->writeMapBegin(TType::STRING, TType::LST, count($this->partStats)); { - foreach ($this->partStats as $kiter275 => $viter276) + foreach ($this->partStats as $kiter282 => $viter283) { - $xfer += $output->writeString($kiter275); + $xfer += $output->writeString($kiter282); { - $output->writeListBegin(TType::STRUCT, count($viter276)); + $output->writeListBegin(TType::STRUCT, count($viter283)); { - foreach ($viter276 as $iter277) + foreach ($viter283 as $iter284) { - $xfer += $iter277->write($output); + $xfer += $iter284->write($output); } } $output->writeListEnd(); @@ -6995,14 +7095,14 @@ class TableStatsRequest { case 3: if ($ftype == TType::LST) { $this->colNames = array(); - $_size278 = 0; - $_etype281 = 0; - $xfer += $input->readListBegin($_etype281, $_size278); - for ($_i282 = 0; $_i282 < $_size278; ++$_i282) + $_size285 = 0; + $_etype288 = 0; + $xfer += $input->readListBegin($_etype288, $_size285); + for ($_i289 = 0; $_i289 < $_size285; ++$_i289) { - $elem283 = null; - $xfer += $input->readString($elem283); - $this->colNames []= $elem283; + $elem290 = null; + $xfer += $input->readString($elem290); + $this->colNames []= $elem290; } $xfer += $input->readListEnd(); } else { @@ -7040,9 +7140,9 @@ class TableStatsRequest { { $output->writeListBegin(TType::STRING, count($this->colNames)); { - foreach ($this->colNames as $iter284) + foreach ($this->colNames as $iter291) { - $xfer += $output->writeString($iter284); + $xfer += $output->writeString($iter291); } } $output->writeListEnd(); @@ -7145,14 +7245,14 @@ class PartitionsStatsRequest { case 3: if ($ftype == TType::LST) { $this->colNames = array(); - $_size285 = 0; - $_etype288 = 0; - $xfer += $input->readListBegin($_etype288, $_size285); - for ($_i289 = 0; $_i289 < $_size285; ++$_i289) + $_size292 = 0; + $_etype295 = 0; + $xfer += $input->readListBegin($_etype295, $_size292); + for ($_i296 = 0; $_i296 < $_size292; ++$_i296) { - $elem290 = null; - $xfer += $input->readString($elem290); - $this->colNames []= $elem290; + $elem297 = null; + $xfer += $input->readString($elem297); + $this->colNames []= $elem297; } $xfer += $input->readListEnd(); } else { @@ -7162,14 +7262,14 @@ class PartitionsStatsRequest { case 4: if ($ftype == TType::LST) { $this->partNames = array(); - $_size291 = 0; - $_etype294 = 0; - $xfer += $input->readListBegin($_etype294, $_size291); - for ($_i295 = 0; $_i295 < $_size291; ++$_i295) + $_size298 = 0; + $_etype301 = 0; + $xfer += $input->readListBegin($_etype301, $_size298); + for ($_i302 = 0; $_i302 < $_size298; ++$_i302) { - $elem296 = null; - $xfer += $input->readString($elem296); - $this->partNames []= $elem296; + $elem303 = null; + $xfer += $input->readString($elem303); + $this->partNames []= $elem303; } $xfer += $input->readListEnd(); } else { @@ -7207,9 +7307,9 @@ class PartitionsStatsRequest { { $output->writeListBegin(TType::STRING, count($this->colNames)); { - foreach ($this->colNames as $iter297) + foreach ($this->colNames as $iter304) { - $xfer += $output->writeString($iter297); + $xfer += $output->writeString($iter304); } } $output->writeListEnd(); @@ -7224,9 +7324,9 @@ class PartitionsStatsRequest { { $output->writeListBegin(TType::STRING, count($this->partNames)); { - foreach ($this->partNames as $iter298) + foreach ($this->partNames as $iter305) { - $xfer += $output->writeString($iter298); + $xfer += $output->writeString($iter305); } } $output->writeListEnd(); @@ -7288,15 +7388,15 @@ class AddPartitionsResult { case 1: if ($ftype == TType::LST) { $this->partitions = array(); - $_size299 = 0; - $_etype302 = 0; - $xfer += $input->readListBegin($_etype302, $_size299); - for ($_i303 = 0; $_i303 < $_size299; ++$_i303) + $_size306 = 0; + $_etype309 = 0; + $xfer += $input->readListBegin($_etype309, $_size306); + for ($_i310 = 0; $_i310 < $_size306; ++$_i310) { - $elem304 = null; - $elem304 = new \metastore\Partition(); - $xfer += $elem304->read($input); - $this->partitions []= $elem304; + $elem311 = null; + $elem311 = new \metastore\Partition(); + $xfer += $elem311->read($input); + $this->partitions []= $elem311; } $xfer += $input->readListEnd(); } else { @@ -7324,9 +7424,9 @@ class AddPartitionsResult { { $output->writeListBegin(TType::STRUCT, count($this->partitions)); { - foreach ($this->partitions as $iter305) + foreach ($this->partitions as $iter312) { - $xfer += $iter305->write($output); + $xfer += $iter312->write($output); } } $output->writeListEnd(); @@ -7434,15 +7534,15 @@ class AddPartitionsRequest { case 3: if ($ftype == TType::LST) { $this->parts = array(); - $_size306 = 0; - $_etype309 = 0; - $xfer += $input->readListBegin($_etype309, $_size306); - for ($_i310 = 0; $_i310 < $_size306; ++$_i310) + $_size313 = 0; + $_etype316 = 0; + $xfer += $input->readListBegin($_etype316, $_size313); + for ($_i317 = 0; $_i317 < $_size313; ++$_i317) { - $elem311 = null; - $elem311 = new \metastore\Partition(); - $xfer += $elem311->read($input); - $this->parts []= $elem311; + $elem318 = null; + $elem318 = new \metastore\Partition(); + $xfer += $elem318->read($input); + $this->parts []= $elem318; } $xfer += $input->readListEnd(); } else { @@ -7494,9 +7594,9 @@ class AddPartitionsRequest { { $output->writeListBegin(TType::STRUCT, count($this->parts)); { - foreach ($this->parts as $iter312) + foreach ($this->parts as $iter319) { - $xfer += $iter312->write($output); + $xfer += $iter319->write($output); } } $output->writeListEnd(); @@ -7568,15 +7668,15 @@ class DropPartitionsResult { case 1: if ($ftype == TType::LST) { $this->partitions = array(); - $_size313 = 0; - $_etype316 = 0; - $xfer += $input->readListBegin($_etype316, $_size313); - for ($_i317 = 0; $_i317 < $_size313; ++$_i317) + $_size320 = 0; + $_etype323 = 0; + $xfer += $input->readListBegin($_etype323, $_size320); + for ($_i324 = 0; $_i324 < $_size320; ++$_i324) { - $elem318 = null; - $elem318 = new \metastore\Partition(); - $xfer += $elem318->read($input); - $this->partitions []= $elem318; + $elem325 = null; + $elem325 = new \metastore\Partition(); + $xfer += $elem325->read($input); + $this->partitions []= $elem325; } $xfer += $input->readListEnd(); } else { @@ -7604,9 +7704,9 @@ class DropPartitionsResult { { $output->writeListBegin(TType::STRUCT, count($this->partitions)); { - foreach ($this->partitions as $iter319) + foreach ($this->partitions as $iter326) { - $xfer += $iter319->write($output); + $xfer += $iter326->write($output); } } $output->writeListEnd(); @@ -7772,14 +7872,14 @@ class RequestPartsSpec { case 1: if ($ftype == TType::LST) { $this->names = array(); - $_size320 = 0; - $_etype323 = 0; - $xfer += $input->readListBegin($_etype323, $_size320); - for ($_i324 = 0; $_i324 < $_size320; ++$_i324) + $_size327 = 0; + $_etype330 = 0; + $xfer += $input->readListBegin($_etype330, $_size327); + for ($_i331 = 0; $_i331 < $_size327; ++$_i331) { - $elem325 = null; - $xfer += $input->readString($elem325); - $this->names []= $elem325; + $elem332 = null; + $xfer += $input->readString($elem332); + $this->names []= $elem332; } $xfer += $input->readListEnd(); } else { @@ -7789,15 +7889,15 @@ class RequestPartsSpec { case 2: if ($ftype == TType::LST) { $this->exprs = array(); - $_size326 = 0; - $_etype329 = 0; - $xfer += $input->readListBegin($_etype329, $_size326); - for ($_i330 = 0; $_i330 < $_size326; ++$_i330) + $_size333 = 0; + $_etype336 = 0; + $xfer += $input->readListBegin($_etype336, $_size333); + for ($_i337 = 0; $_i337 < $_size333; ++$_i337) { - $elem331 = null; - $elem331 = new \metastore\DropPartitionsExpr(); - $xfer += $elem331->read($input); - $this->exprs []= $elem331; + $elem338 = null; + $elem338 = new \metastore\DropPartitionsExpr(); + $xfer += $elem338->read($input); + $this->exprs []= $elem338; } $xfer += $input->readListEnd(); } else { @@ -7825,9 +7925,9 @@ class RequestPartsSpec { { $output->writeListBegin(TType::STRING, count($this->names)); { - foreach ($this->names as $iter332) + foreach ($this->names as $iter339) { - $xfer += $output->writeString($iter332); + $xfer += $output->writeString($iter339); } } $output->writeListEnd(); @@ -7842,9 +7942,9 @@ class RequestPartsSpec { { $output->writeListBegin(TType::STRUCT, count($this->exprs)); { - foreach ($this->exprs as $iter333) + foreach ($this->exprs as $iter340) { - $xfer += $iter333->write($output); + $xfer += $iter340->write($output); } } $output->writeListEnd(); @@ -8325,15 +8425,15 @@ class Function { case 8: if ($ftype == TType::LST) { $this->resourceUris = array(); - $_size334 = 0; - $_etype337 = 0; - $xfer += $input->readListBegin($_etype337, $_size334); - for ($_i338 = 0; $_i338 < $_size334; ++$_i338) + $_size341 = 0; + $_etype344 = 0; + $xfer += $input->readListBegin($_etype344, $_size341); + for ($_i345 = 0; $_i345 < $_size341; ++$_i345) { - $elem339 = null; - $elem339 = new \metastore\ResourceUri(); - $xfer += $elem339->read($input); - $this->resourceUris []= $elem339; + $elem346 = null; + $elem346 = new \metastore\ResourceUri(); + $xfer += $elem346->read($input); + $this->resourceUris []= $elem346; } $xfer += $input->readListEnd(); } else { @@ -8396,9 +8496,9 @@ class Function { { $output->writeListBegin(TType::STRUCT, count($this->resourceUris)); { - foreach ($this->resourceUris as $iter340) + foreach ($this->resourceUris as $iter347) { - $xfer += $iter340->write($output); + $xfer += $iter347->write($output); } } $output->writeListEnd(); @@ -8607,15 +8707,15 @@ class GetOpenTxnsInfoResponse { case 2: if ($ftype == TType::LST) { $this->open_txns = array(); - $_size341 = 0; - $_etype344 = 0; - $xfer += $input->readListBegin($_etype344, $_size341); - for ($_i345 = 0; $_i345 < $_size341; ++$_i345) + $_size348 = 0; + $_etype351 = 0; + $xfer += $input->readListBegin($_etype351, $_size348); + for ($_i352 = 0; $_i352 < $_size348; ++$_i352) { - $elem346 = null; - $elem346 = new \metastore\TxnInfo(); - $xfer += $elem346->read($input); - $this->open_txns []= $elem346; + $elem353 = null; + $elem353 = new \metastore\TxnInfo(); + $xfer += $elem353->read($input); + $this->open_txns []= $elem353; } $xfer += $input->readListEnd(); } else { @@ -8648,9 +8748,9 @@ class GetOpenTxnsInfoResponse { { $output->writeListBegin(TType::STRUCT, count($this->open_txns)); { - foreach ($this->open_txns as $iter347) + foreach ($this->open_txns as $iter354) { - $xfer += $iter347->write($output); + $xfer += $iter354->write($output); } } $output->writeListEnd(); @@ -8726,17 +8826,17 @@ class GetOpenTxnsResponse { case 2: if ($ftype == TType::SET) { $this->open_txns = array(); - $_size348 = 0; - $_etype351 = 0; - $xfer += $input->readSetBegin($_etype351, $_size348); - for ($_i352 = 0; $_i352 < $_size348; ++$_i352) + $_size355 = 0; + $_etype358 = 0; + $xfer += $input->readSetBegin($_etype358, $_size355); + for ($_i359 = 0; $_i359 < $_size355; ++$_i359) { - $elem353 = null; - $xfer += $input->readI64($elem353); - if (is_scalar($elem353)) { - $this->open_txns[$elem353] = true; + $elem360 = null; + $xfer += $input->readI64($elem360); + if (is_scalar($elem360)) { + $this->open_txns[$elem360] = true; } else { - $this->open_txns []= $elem353; + $this->open_txns []= $elem360; } } $xfer += $input->readSetEnd(); @@ -8770,12 +8870,12 @@ class GetOpenTxnsResponse { { $output->writeSetBegin(TType::I64, count($this->open_txns)); { - foreach ($this->open_txns as $iter354 => $iter355) + foreach ($this->open_txns as $iter361 => $iter362) { - if (is_scalar($iter355)) { - $xfer += $output->writeI64($iter354); + if (is_scalar($iter362)) { + $xfer += $output->writeI64($iter361); } else { - $xfer += $output->writeI64($iter355); + $xfer += $output->writeI64($iter362); } } } @@ -8949,14 +9049,14 @@ class OpenTxnsResponse { case 1: if ($ftype == TType::LST) { $this->txn_ids = array(); - $_size356 = 0; - $_etype359 = 0; - $xfer += $input->readListBegin($_etype359, $_size356); - for ($_i360 = 0; $_i360 < $_size356; ++$_i360) + $_size363 = 0; + $_etype366 = 0; + $xfer += $input->readListBegin($_etype366, $_size363); + for ($_i367 = 0; $_i367 < $_size363; ++$_i367) { - $elem361 = null; - $xfer += $input->readI64($elem361); - $this->txn_ids []= $elem361; + $elem368 = null; + $xfer += $input->readI64($elem368); + $this->txn_ids []= $elem368; } $xfer += $input->readListEnd(); } else { @@ -8984,9 +9084,9 @@ class OpenTxnsResponse { { $output->writeListBegin(TType::I64, count($this->txn_ids)); { - foreach ($this->txn_ids as $iter362) + foreach ($this->txn_ids as $iter369) { - $xfer += $output->writeI64($iter362); + $xfer += $output->writeI64($iter369); } } $output->writeListEnd(); @@ -9368,15 +9468,15 @@ class LockRequest { case 1: if ($ftype == TType::LST) { $this->component = array(); - $_size363 = 0; - $_etype366 = 0; - $xfer += $input->readListBegin($_etype366, $_size363); - for ($_i367 = 0; $_i367 < $_size363; ++$_i367) + $_size370 = 0; + $_etype373 = 0; + $xfer += $input->readListBegin($_etype373, $_size370); + for ($_i374 = 0; $_i374 < $_size370; ++$_i374) { - $elem368 = null; - $elem368 = new \metastore\LockComponent(); - $xfer += $elem368->read($input); - $this->component []= $elem368; + $elem375 = null; + $elem375 = new \metastore\LockComponent(); + $xfer += $elem375->read($input); + $this->component []= $elem375; } $xfer += $input->readListEnd(); } else { @@ -9425,9 +9525,9 @@ class LockRequest { { $output->writeListBegin(TType::STRUCT, count($this->component)); { - foreach ($this->component as $iter369) + foreach ($this->component as $iter376) { - $xfer += $iter369->write($output); + $xfer += $iter376->write($output); } } $output->writeListEnd(); @@ -10062,15 +10162,15 @@ class ShowLocksResponse { case 1: if ($ftype == TType::LST) { $this->locks = array(); - $_size370 = 0; - $_etype373 = 0; - $xfer += $input->readListBegin($_etype373, $_size370); - for ($_i374 = 0; $_i374 < $_size370; ++$_i374) + $_size377 = 0; + $_etype380 = 0; + $xfer += $input->readListBegin($_etype380, $_size377); + for ($_i381 = 0; $_i381 < $_size377; ++$_i381) { - $elem375 = null; - $elem375 = new \metastore\ShowLocksResponseElement(); - $xfer += $elem375->read($input); - $this->locks []= $elem375; + $elem382 = null; + $elem382 = new \metastore\ShowLocksResponseElement(); + $xfer += $elem382->read($input); + $this->locks []= $elem382; } $xfer += $input->readListEnd(); } else { @@ -10098,9 +10198,9 @@ class ShowLocksResponse { { $output->writeListBegin(TType::STRUCT, count($this->locks)); { - foreach ($this->locks as $iter376) + foreach ($this->locks as $iter383) { - $xfer += $iter376->write($output); + $xfer += $iter383->write($output); } } $output->writeListEnd(); @@ -10357,17 +10457,17 @@ class HeartbeatTxnRangeResponse { case 1: if ($ftype == TType::SET) { $this->aborted = array(); - $_size377 = 0; - $_etype380 = 0; - $xfer += $input->readSetBegin($_etype380, $_size377); - for ($_i381 = 0; $_i381 < $_size377; ++$_i381) + $_size384 = 0; + $_etype387 = 0; + $xfer += $input->readSetBegin($_etype387, $_size384); + for ($_i388 = 0; $_i388 < $_size384; ++$_i388) { - $elem382 = null; - $xfer += $input->readI64($elem382); - if (is_scalar($elem382)) { - $this->aborted[$elem382] = true; + $elem389 = null; + $xfer += $input->readI64($elem389); + if (is_scalar($elem389)) { + $this->aborted[$elem389] = true; } else { - $this->aborted []= $elem382; + $this->aborted []= $elem389; } } $xfer += $input->readSetEnd(); @@ -10378,17 +10478,17 @@ class HeartbeatTxnRangeResponse { case 2: if ($ftype == TType::SET) { $this->nosuch = array(); - $_size383 = 0; - $_etype386 = 0; - $xfer += $input->readSetBegin($_etype386, $_size383); - for ($_i387 = 0; $_i387 < $_size383; ++$_i387) + $_size390 = 0; + $_etype393 = 0; + $xfer += $input->readSetBegin($_etype393, $_size390); + for ($_i394 = 0; $_i394 < $_size390; ++$_i394) { - $elem388 = null; - $xfer += $input->readI64($elem388); - if (is_scalar($elem388)) { - $this->nosuch[$elem388] = true; + $elem395 = null; + $xfer += $input->readI64($elem395); + if (is_scalar($elem395)) { + $this->nosuch[$elem395] = true; } else { - $this->nosuch []= $elem388; + $this->nosuch []= $elem395; } } $xfer += $input->readSetEnd(); @@ -10417,12 +10517,12 @@ class HeartbeatTxnRangeResponse { { $output->writeSetBegin(TType::I64, count($this->aborted)); { - foreach ($this->aborted as $iter389 => $iter390) + foreach ($this->aborted as $iter396 => $iter397) { - if (is_scalar($iter390)) { - $xfer += $output->writeI64($iter389); + if (is_scalar($iter397)) { + $xfer += $output->writeI64($iter396); } else { - $xfer += $output->writeI64($iter390); + $xfer += $output->writeI64($iter397); } } } @@ -10438,12 +10538,12 @@ class HeartbeatTxnRangeResponse { { $output->writeSetBegin(TType::I64, count($this->nosuch)); { - foreach ($this->nosuch as $iter391 => $iter392) + foreach ($this->nosuch as $iter398 => $iter399) { - if (is_scalar($iter392)) { - $xfer += $output->writeI64($iter391); + if (is_scalar($iter399)) { + $xfer += $output->writeI64($iter398); } else { - $xfer += $output->writeI64($iter392); + $xfer += $output->writeI64($iter399); } } } @@ -10920,15 +11020,15 @@ class ShowCompactResponse { case 1: if ($ftype == TType::LST) { $this->compacts = array(); - $_size393 = 0; - $_etype396 = 0; - $xfer += $input->readListBegin($_etype396, $_size393); - for ($_i397 = 0; $_i397 < $_size393; ++$_i397) + $_size400 = 0; + $_etype403 = 0; + $xfer += $input->readListBegin($_etype403, $_size400); + for ($_i404 = 0; $_i404 < $_size400; ++$_i404) { - $elem398 = null; - $elem398 = new \metastore\ShowCompactResponseElement(); - $xfer += $elem398->read($input); - $this->compacts []= $elem398; + $elem405 = null; + $elem405 = new \metastore\ShowCompactResponseElement(); + $xfer += $elem405->read($input); + $this->compacts []= $elem405; } $xfer += $input->readListEnd(); } else { @@ -10956,9 +11056,9 @@ class ShowCompactResponse { { $output->writeListBegin(TType::STRUCT, count($this->compacts)); { - foreach ($this->compacts as $iter399) + foreach ($this->compacts as $iter406) { - $xfer += $iter399->write($output); + $xfer += $iter406->write($output); } } $output->writeListEnd(); diff --git a/metastore/src/gen/thrift/gen-py/hive_metastore/ThriftHiveMetastore-remote b/metastore/src/gen/thrift/gen-py/hive_metastore/ThriftHiveMetastore-remote index e430c77..c93b74e 100644 --- a/metastore/src/gen/thrift/gen-py/hive_metastore/ThriftHiveMetastore-remote +++ b/metastore/src/gen/thrift/gen-py/hive_metastore/ThriftHiveMetastore-remote @@ -97,6 +97,7 @@ if len(sys.argv) <= 1 or sys.argv[1] == '--help': print ' TableStatsResult get_table_statistics_req(TableStatsRequest request)' print ' PartitionsStatsResult get_partitions_statistics_req(PartitionsStatsRequest request)' print ' AggrStats get_aggr_stats_for(PartitionsStatsRequest request)' + print ' bool set_aggr_stats_for(SetPartitionsStatsRequest request)' print ' bool delete_partition_column_statistics(string db_name, string tbl_name, string part_name, string col_name)' print ' bool delete_table_column_statistics(string db_name, string tbl_name, string col_name)' print ' void create_function(Function func)' @@ -630,6 +631,12 @@ elif cmd == 'get_aggr_stats_for': sys.exit(1) pp.pprint(client.get_aggr_stats_for(eval(args[0]),)) +elif cmd == 'set_aggr_stats_for': + if len(args) != 1: + print 'set_aggr_stats_for requires 1 args' + sys.exit(1) + pp.pprint(client.set_aggr_stats_for(eval(args[0]),)) + elif cmd == 'delete_partition_column_statistics': if len(args) != 4: print 'delete_partition_column_statistics requires 4 args' diff --git a/metastore/src/gen/thrift/gen-py/hive_metastore/ThriftHiveMetastore.py b/metastore/src/gen/thrift/gen-py/hive_metastore/ThriftHiveMetastore.py index 450018b..7c4c6a0 100644 --- a/metastore/src/gen/thrift/gen-py/hive_metastore/ThriftHiveMetastore.py +++ b/metastore/src/gen/thrift/gen-py/hive_metastore/ThriftHiveMetastore.py @@ -655,6 +655,13 @@ def get_aggr_stats_for(self, request): """ pass + def set_aggr_stats_for(self, request): + """ + Parameters: + - request + """ + pass + def delete_partition_column_statistics(self, db_name, tbl_name, part_name, col_name): """ Parameters: @@ -3708,6 +3715,44 @@ def recv_get_aggr_stats_for(self, ): raise result.o2 raise TApplicationException(TApplicationException.MISSING_RESULT, "get_aggr_stats_for failed: unknown result"); + def set_aggr_stats_for(self, request): + """ + Parameters: + - request + """ + self.send_set_aggr_stats_for(request) + return self.recv_set_aggr_stats_for() + + def send_set_aggr_stats_for(self, request): + self._oprot.writeMessageBegin('set_aggr_stats_for', TMessageType.CALL, self._seqid) + args = set_aggr_stats_for_args() + args.request = request + args.write(self._oprot) + self._oprot.writeMessageEnd() + self._oprot.trans.flush() + + def recv_set_aggr_stats_for(self, ): + (fname, mtype, rseqid) = self._iprot.readMessageBegin() + if mtype == TMessageType.EXCEPTION: + x = TApplicationException() + x.read(self._iprot) + self._iprot.readMessageEnd() + raise x + result = set_aggr_stats_for_result() + result.read(self._iprot) + self._iprot.readMessageEnd() + if result.success is not None: + return result.success + if result.o1 is not None: + raise result.o1 + if result.o2 is not None: + raise result.o2 + if result.o3 is not None: + raise result.o3 + if result.o4 is not None: + raise result.o4 + raise TApplicationException(TApplicationException.MISSING_RESULT, "set_aggr_stats_for failed: unknown result"); + def delete_partition_column_statistics(self, db_name, tbl_name, part_name, col_name): """ Parameters: @@ -5041,6 +5086,7 @@ def __init__(self, handler): self._processMap["get_table_statistics_req"] = Processor.process_get_table_statistics_req self._processMap["get_partitions_statistics_req"] = Processor.process_get_partitions_statistics_req self._processMap["get_aggr_stats_for"] = Processor.process_get_aggr_stats_for + self._processMap["set_aggr_stats_for"] = Processor.process_set_aggr_stats_for self._processMap["delete_partition_column_statistics"] = Processor.process_delete_partition_column_statistics self._processMap["delete_table_column_statistics"] = Processor.process_delete_table_column_statistics self._processMap["create_function"] = Processor.process_create_function @@ -6329,6 +6375,26 @@ def process_get_aggr_stats_for(self, seqid, iprot, oprot): oprot.writeMessageEnd() oprot.trans.flush() + def process_set_aggr_stats_for(self, seqid, iprot, oprot): + args = set_aggr_stats_for_args() + args.read(iprot) + iprot.readMessageEnd() + result = set_aggr_stats_for_result() + try: + result.success = self._handler.set_aggr_stats_for(args.request) + except NoSuchObjectException as o1: + result.o1 = o1 + except InvalidObjectException as o2: + result.o2 = o2 + except MetaException as o3: + result.o3 = o3 + except InvalidInputException as o4: + result.o4 = o4 + oprot.writeMessageBegin("set_aggr_stats_for", TMessageType.REPLY, seqid) + result.write(oprot) + oprot.writeMessageEnd() + oprot.trans.flush() + def process_delete_partition_column_statistics(self, seqid, iprot, oprot): args = delete_partition_column_statistics_args() args.read(iprot) @@ -7699,10 +7765,10 @@ def read(self, iprot): if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype400, _size397) = iprot.readListBegin() - for _i401 in xrange(_size397): - _elem402 = iprot.readString(); - self.success.append(_elem402) + (_etype407, _size404) = iprot.readListBegin() + for _i408 in xrange(_size404): + _elem409 = iprot.readString(); + self.success.append(_elem409) iprot.readListEnd() else: iprot.skip(ftype) @@ -7725,8 +7791,8 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRING, len(self.success)) - for iter403 in self.success: - oprot.writeString(iter403) + for iter410 in self.success: + oprot.writeString(iter410) oprot.writeListEnd() oprot.writeFieldEnd() if self.o1 is not None: @@ -7821,10 +7887,10 @@ def read(self, iprot): if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype407, _size404) = iprot.readListBegin() - for _i408 in xrange(_size404): - _elem409 = iprot.readString(); - self.success.append(_elem409) + (_etype414, _size411) = iprot.readListBegin() + for _i415 in xrange(_size411): + _elem416 = iprot.readString(); + self.success.append(_elem416) iprot.readListEnd() else: iprot.skip(ftype) @@ -7847,8 +7913,8 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRING, len(self.success)) - for iter410 in self.success: - oprot.writeString(iter410) + for iter417 in self.success: + oprot.writeString(iter417) oprot.writeListEnd() oprot.writeFieldEnd() if self.o1 is not None: @@ -8558,12 +8624,12 @@ def read(self, iprot): if fid == 0: if ftype == TType.MAP: self.success = {} - (_ktype412, _vtype413, _size411 ) = iprot.readMapBegin() - for _i415 in xrange(_size411): - _key416 = iprot.readString(); - _val417 = Type() - _val417.read(iprot) - self.success[_key416] = _val417 + (_ktype419, _vtype420, _size418 ) = iprot.readMapBegin() + for _i422 in xrange(_size418): + _key423 = iprot.readString(); + _val424 = Type() + _val424.read(iprot) + self.success[_key423] = _val424 iprot.readMapEnd() else: iprot.skip(ftype) @@ -8586,9 +8652,9 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.MAP, 0) oprot.writeMapBegin(TType.STRING, TType.STRUCT, len(self.success)) - for kiter418,viter419 in self.success.items(): - oprot.writeString(kiter418) - viter419.write(oprot) + for kiter425,viter426 in self.success.items(): + oprot.writeString(kiter425) + viter426.write(oprot) oprot.writeMapEnd() oprot.writeFieldEnd() if self.o2 is not None: @@ -8719,11 +8785,11 @@ def read(self, iprot): if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype423, _size420) = iprot.readListBegin() - for _i424 in xrange(_size420): - _elem425 = FieldSchema() - _elem425.read(iprot) - self.success.append(_elem425) + (_etype430, _size427) = iprot.readListBegin() + for _i431 in xrange(_size427): + _elem432 = FieldSchema() + _elem432.read(iprot) + self.success.append(_elem432) iprot.readListEnd() else: iprot.skip(ftype) @@ -8758,8 +8824,8 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRUCT, len(self.success)) - for iter426 in self.success: - iter426.write(oprot) + for iter433 in self.success: + iter433.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() if self.o1 is not None: @@ -8898,11 +8964,11 @@ def read(self, iprot): if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype430, _size427) = iprot.readListBegin() - for _i431 in xrange(_size427): - _elem432 = FieldSchema() - _elem432.read(iprot) - self.success.append(_elem432) + (_etype437, _size434) = iprot.readListBegin() + for _i438 in xrange(_size434): + _elem439 = FieldSchema() + _elem439.read(iprot) + self.success.append(_elem439) iprot.readListEnd() else: iprot.skip(ftype) @@ -8937,8 +9003,8 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRUCT, len(self.success)) - for iter433 in self.success: - iter433.write(oprot) + for iter440 in self.success: + iter440.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() if self.o1 is not None: @@ -9735,10 +9801,10 @@ def read(self, iprot): if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype437, _size434) = iprot.readListBegin() - for _i438 in xrange(_size434): - _elem439 = iprot.readString(); - self.success.append(_elem439) + (_etype444, _size441) = iprot.readListBegin() + for _i445 in xrange(_size441): + _elem446 = iprot.readString(); + self.success.append(_elem446) iprot.readListEnd() else: iprot.skip(ftype) @@ -9761,8 +9827,8 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRING, len(self.success)) - for iter440 in self.success: - oprot.writeString(iter440) + for iter447 in self.success: + oprot.writeString(iter447) oprot.writeListEnd() oprot.writeFieldEnd() if self.o1 is not None: @@ -9875,10 +9941,10 @@ def read(self, iprot): if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype444, _size441) = iprot.readListBegin() - for _i445 in xrange(_size441): - _elem446 = iprot.readString(); - self.success.append(_elem446) + (_etype451, _size448) = iprot.readListBegin() + for _i452 in xrange(_size448): + _elem453 = iprot.readString(); + self.success.append(_elem453) iprot.readListEnd() else: iprot.skip(ftype) @@ -9901,8 +9967,8 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRING, len(self.success)) - for iter447 in self.success: - oprot.writeString(iter447) + for iter454 in self.success: + oprot.writeString(iter454) oprot.writeListEnd() oprot.writeFieldEnd() if self.o1 is not None: @@ -10119,10 +10185,10 @@ def read(self, iprot): elif fid == 2: if ftype == TType.LIST: self.tbl_names = [] - (_etype451, _size448) = iprot.readListBegin() - for _i452 in xrange(_size448): - _elem453 = iprot.readString(); - self.tbl_names.append(_elem453) + (_etype458, _size455) = iprot.readListBegin() + for _i459 in xrange(_size455): + _elem460 = iprot.readString(); + self.tbl_names.append(_elem460) iprot.readListEnd() else: iprot.skip(ftype) @@ -10143,8 +10209,8 @@ def write(self, oprot): if self.tbl_names is not None: oprot.writeFieldBegin('tbl_names', TType.LIST, 2) oprot.writeListBegin(TType.STRING, len(self.tbl_names)) - for iter454 in self.tbl_names: - oprot.writeString(iter454) + for iter461 in self.tbl_names: + oprot.writeString(iter461) oprot.writeListEnd() oprot.writeFieldEnd() oprot.writeFieldStop() @@ -10199,11 +10265,11 @@ def read(self, iprot): if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype458, _size455) = iprot.readListBegin() - for _i459 in xrange(_size455): - _elem460 = Table() - _elem460.read(iprot) - self.success.append(_elem460) + (_etype465, _size462) = iprot.readListBegin() + for _i466 in xrange(_size462): + _elem467 = Table() + _elem467.read(iprot) + self.success.append(_elem467) iprot.readListEnd() else: iprot.skip(ftype) @@ -10238,8 +10304,8 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRUCT, len(self.success)) - for iter461 in self.success: - iter461.write(oprot) + for iter468 in self.success: + iter468.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() if self.o1 is not None: @@ -10390,10 +10456,10 @@ def read(self, iprot): if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype465, _size462) = iprot.readListBegin() - for _i466 in xrange(_size462): - _elem467 = iprot.readString(); - self.success.append(_elem467) + (_etype472, _size469) = iprot.readListBegin() + for _i473 in xrange(_size469): + _elem474 = iprot.readString(); + self.success.append(_elem474) iprot.readListEnd() else: iprot.skip(ftype) @@ -10428,8 +10494,8 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRING, len(self.success)) - for iter468 in self.success: - oprot.writeString(iter468) + for iter475 in self.success: + oprot.writeString(iter475) oprot.writeListEnd() oprot.writeFieldEnd() if self.o1 is not None: @@ -11152,11 +11218,11 @@ def read(self, iprot): if fid == 1: if ftype == TType.LIST: self.new_parts = [] - (_etype472, _size469) = iprot.readListBegin() - for _i473 in xrange(_size469): - _elem474 = Partition() - _elem474.read(iprot) - self.new_parts.append(_elem474) + (_etype479, _size476) = iprot.readListBegin() + for _i480 in xrange(_size476): + _elem481 = Partition() + _elem481.read(iprot) + self.new_parts.append(_elem481) iprot.readListEnd() else: iprot.skip(ftype) @@ -11173,8 +11239,8 @@ def write(self, oprot): if self.new_parts is not None: oprot.writeFieldBegin('new_parts', TType.LIST, 1) oprot.writeListBegin(TType.STRUCT, len(self.new_parts)) - for iter475 in self.new_parts: - iter475.write(oprot) + for iter482 in self.new_parts: + iter482.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() oprot.writeFieldStop() @@ -11335,10 +11401,10 @@ def read(self, iprot): elif fid == 3: if ftype == TType.LIST: self.part_vals = [] - (_etype479, _size476) = iprot.readListBegin() - for _i480 in xrange(_size476): - _elem481 = iprot.readString(); - self.part_vals.append(_elem481) + (_etype486, _size483) = iprot.readListBegin() + for _i487 in xrange(_size483): + _elem488 = iprot.readString(); + self.part_vals.append(_elem488) iprot.readListEnd() else: iprot.skip(ftype) @@ -11363,8 +11429,8 @@ def write(self, oprot): if self.part_vals is not None: oprot.writeFieldBegin('part_vals', TType.LIST, 3) oprot.writeListBegin(TType.STRING, len(self.part_vals)) - for iter482 in self.part_vals: - oprot.writeString(iter482) + for iter489 in self.part_vals: + oprot.writeString(iter489) oprot.writeListEnd() oprot.writeFieldEnd() oprot.writeFieldStop() @@ -11689,10 +11755,10 @@ def read(self, iprot): elif fid == 3: if ftype == TType.LIST: self.part_vals = [] - (_etype486, _size483) = iprot.readListBegin() - for _i487 in xrange(_size483): - _elem488 = iprot.readString(); - self.part_vals.append(_elem488) + (_etype493, _size490) = iprot.readListBegin() + for _i494 in xrange(_size490): + _elem495 = iprot.readString(); + self.part_vals.append(_elem495) iprot.readListEnd() else: iprot.skip(ftype) @@ -11723,8 +11789,8 @@ def write(self, oprot): if self.part_vals is not None: oprot.writeFieldBegin('part_vals', TType.LIST, 3) oprot.writeListBegin(TType.STRING, len(self.part_vals)) - for iter489 in self.part_vals: - oprot.writeString(iter489) + for iter496 in self.part_vals: + oprot.writeString(iter496) oprot.writeListEnd() oprot.writeFieldEnd() if self.environment_context is not None: @@ -12272,10 +12338,10 @@ def read(self, iprot): elif fid == 3: if ftype == TType.LIST: self.part_vals = [] - (_etype493, _size490) = iprot.readListBegin() - for _i494 in xrange(_size490): - _elem495 = iprot.readString(); - self.part_vals.append(_elem495) + (_etype500, _size497) = iprot.readListBegin() + for _i501 in xrange(_size497): + _elem502 = iprot.readString(); + self.part_vals.append(_elem502) iprot.readListEnd() else: iprot.skip(ftype) @@ -12305,8 +12371,8 @@ def write(self, oprot): if self.part_vals is not None: oprot.writeFieldBegin('part_vals', TType.LIST, 3) oprot.writeListBegin(TType.STRING, len(self.part_vals)) - for iter496 in self.part_vals: - oprot.writeString(iter496) + for iter503 in self.part_vals: + oprot.writeString(iter503) oprot.writeListEnd() oprot.writeFieldEnd() if self.deleteData is not None: @@ -12464,10 +12530,10 @@ def read(self, iprot): elif fid == 3: if ftype == TType.LIST: self.part_vals = [] - (_etype500, _size497) = iprot.readListBegin() - for _i501 in xrange(_size497): - _elem502 = iprot.readString(); - self.part_vals.append(_elem502) + (_etype507, _size504) = iprot.readListBegin() + for _i508 in xrange(_size504): + _elem509 = iprot.readString(); + self.part_vals.append(_elem509) iprot.readListEnd() else: iprot.skip(ftype) @@ -12503,8 +12569,8 @@ def write(self, oprot): if self.part_vals is not None: oprot.writeFieldBegin('part_vals', TType.LIST, 3) oprot.writeListBegin(TType.STRING, len(self.part_vals)) - for iter503 in self.part_vals: - oprot.writeString(iter503) + for iter510 in self.part_vals: + oprot.writeString(iter510) oprot.writeListEnd() oprot.writeFieldEnd() if self.deleteData is not None: @@ -13182,10 +13248,10 @@ def read(self, iprot): elif fid == 3: if ftype == TType.LIST: self.part_vals = [] - (_etype507, _size504) = iprot.readListBegin() - for _i508 in xrange(_size504): - _elem509 = iprot.readString(); - self.part_vals.append(_elem509) + (_etype514, _size511) = iprot.readListBegin() + for _i515 in xrange(_size511): + _elem516 = iprot.readString(); + self.part_vals.append(_elem516) iprot.readListEnd() else: iprot.skip(ftype) @@ -13210,8 +13276,8 @@ def write(self, oprot): if self.part_vals is not None: oprot.writeFieldBegin('part_vals', TType.LIST, 3) oprot.writeListBegin(TType.STRING, len(self.part_vals)) - for iter510 in self.part_vals: - oprot.writeString(iter510) + for iter517 in self.part_vals: + oprot.writeString(iter517) oprot.writeListEnd() oprot.writeFieldEnd() oprot.writeFieldStop() @@ -13356,11 +13422,11 @@ def read(self, iprot): if fid == 1: if ftype == TType.MAP: self.partitionSpecs = {} - (_ktype512, _vtype513, _size511 ) = iprot.readMapBegin() - for _i515 in xrange(_size511): - _key516 = iprot.readString(); - _val517 = iprot.readString(); - self.partitionSpecs[_key516] = _val517 + (_ktype519, _vtype520, _size518 ) = iprot.readMapBegin() + for _i522 in xrange(_size518): + _key523 = iprot.readString(); + _val524 = iprot.readString(); + self.partitionSpecs[_key523] = _val524 iprot.readMapEnd() else: iprot.skip(ftype) @@ -13397,9 +13463,9 @@ def write(self, oprot): if self.partitionSpecs is not None: oprot.writeFieldBegin('partitionSpecs', TType.MAP, 1) oprot.writeMapBegin(TType.STRING, TType.STRING, len(self.partitionSpecs)) - for kiter518,viter519 in self.partitionSpecs.items(): - oprot.writeString(kiter518) - oprot.writeString(viter519) + for kiter525,viter526 in self.partitionSpecs.items(): + oprot.writeString(kiter525) + oprot.writeString(viter526) oprot.writeMapEnd() oprot.writeFieldEnd() if self.source_db is not None: @@ -13596,10 +13662,10 @@ def read(self, iprot): elif fid == 3: if ftype == TType.LIST: self.part_vals = [] - (_etype523, _size520) = iprot.readListBegin() - for _i524 in xrange(_size520): - _elem525 = iprot.readString(); - self.part_vals.append(_elem525) + (_etype530, _size527) = iprot.readListBegin() + for _i531 in xrange(_size527): + _elem532 = iprot.readString(); + self.part_vals.append(_elem532) iprot.readListEnd() else: iprot.skip(ftype) @@ -13611,10 +13677,10 @@ def read(self, iprot): elif fid == 5: if ftype == TType.LIST: self.group_names = [] - (_etype529, _size526) = iprot.readListBegin() - for _i530 in xrange(_size526): - _elem531 = iprot.readString(); - self.group_names.append(_elem531) + (_etype536, _size533) = iprot.readListBegin() + for _i537 in xrange(_size533): + _elem538 = iprot.readString(); + self.group_names.append(_elem538) iprot.readListEnd() else: iprot.skip(ftype) @@ -13639,8 +13705,8 @@ def write(self, oprot): if self.part_vals is not None: oprot.writeFieldBegin('part_vals', TType.LIST, 3) oprot.writeListBegin(TType.STRING, len(self.part_vals)) - for iter532 in self.part_vals: - oprot.writeString(iter532) + for iter539 in self.part_vals: + oprot.writeString(iter539) oprot.writeListEnd() oprot.writeFieldEnd() if self.user_name is not None: @@ -13650,8 +13716,8 @@ def write(self, oprot): if self.group_names is not None: oprot.writeFieldBegin('group_names', TType.LIST, 5) oprot.writeListBegin(TType.STRING, len(self.group_names)) - for iter533 in self.group_names: - oprot.writeString(iter533) + for iter540 in self.group_names: + oprot.writeString(iter540) oprot.writeListEnd() oprot.writeFieldEnd() oprot.writeFieldStop() @@ -14043,11 +14109,11 @@ def read(self, iprot): if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype537, _size534) = iprot.readListBegin() - for _i538 in xrange(_size534): - _elem539 = Partition() - _elem539.read(iprot) - self.success.append(_elem539) + (_etype544, _size541) = iprot.readListBegin() + for _i545 in xrange(_size541): + _elem546 = Partition() + _elem546.read(iprot) + self.success.append(_elem546) iprot.readListEnd() else: iprot.skip(ftype) @@ -14076,8 +14142,8 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRUCT, len(self.success)) - for iter540 in self.success: - iter540.write(oprot) + for iter547 in self.success: + iter547.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() if self.o1 is not None: @@ -14164,10 +14230,10 @@ def read(self, iprot): elif fid == 5: if ftype == TType.LIST: self.group_names = [] - (_etype544, _size541) = iprot.readListBegin() - for _i545 in xrange(_size541): - _elem546 = iprot.readString(); - self.group_names.append(_elem546) + (_etype551, _size548) = iprot.readListBegin() + for _i552 in xrange(_size548): + _elem553 = iprot.readString(); + self.group_names.append(_elem553) iprot.readListEnd() else: iprot.skip(ftype) @@ -14200,8 +14266,8 @@ def write(self, oprot): if self.group_names is not None: oprot.writeFieldBegin('group_names', TType.LIST, 5) oprot.writeListBegin(TType.STRING, len(self.group_names)) - for iter547 in self.group_names: - oprot.writeString(iter547) + for iter554 in self.group_names: + oprot.writeString(iter554) oprot.writeListEnd() oprot.writeFieldEnd() oprot.writeFieldStop() @@ -14253,11 +14319,11 @@ def read(self, iprot): if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype551, _size548) = iprot.readListBegin() - for _i552 in xrange(_size548): - _elem553 = Partition() - _elem553.read(iprot) - self.success.append(_elem553) + (_etype558, _size555) = iprot.readListBegin() + for _i559 in xrange(_size555): + _elem560 = Partition() + _elem560.read(iprot) + self.success.append(_elem560) iprot.readListEnd() else: iprot.skip(ftype) @@ -14286,8 +14352,8 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRUCT, len(self.success)) - for iter554 in self.success: - iter554.write(oprot) + for iter561 in self.success: + iter561.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() if self.o1 is not None: @@ -14428,10 +14494,10 @@ def read(self, iprot): if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype558, _size555) = iprot.readListBegin() - for _i559 in xrange(_size555): - _elem560 = iprot.readString(); - self.success.append(_elem560) + (_etype565, _size562) = iprot.readListBegin() + for _i566 in xrange(_size562): + _elem567 = iprot.readString(); + self.success.append(_elem567) iprot.readListEnd() else: iprot.skip(ftype) @@ -14454,8 +14520,8 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRING, len(self.success)) - for iter561 in self.success: - oprot.writeString(iter561) + for iter568 in self.success: + oprot.writeString(iter568) oprot.writeListEnd() oprot.writeFieldEnd() if self.o2 is not None: @@ -14525,10 +14591,10 @@ def read(self, iprot): elif fid == 3: if ftype == TType.LIST: self.part_vals = [] - (_etype565, _size562) = iprot.readListBegin() - for _i566 in xrange(_size562): - _elem567 = iprot.readString(); - self.part_vals.append(_elem567) + (_etype572, _size569) = iprot.readListBegin() + for _i573 in xrange(_size569): + _elem574 = iprot.readString(); + self.part_vals.append(_elem574) iprot.readListEnd() else: iprot.skip(ftype) @@ -14558,8 +14624,8 @@ def write(self, oprot): if self.part_vals is not None: oprot.writeFieldBegin('part_vals', TType.LIST, 3) oprot.writeListBegin(TType.STRING, len(self.part_vals)) - for iter568 in self.part_vals: - oprot.writeString(iter568) + for iter575 in self.part_vals: + oprot.writeString(iter575) oprot.writeListEnd() oprot.writeFieldEnd() if self.max_parts is not None: @@ -14615,11 +14681,11 @@ def read(self, iprot): if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype572, _size569) = iprot.readListBegin() - for _i573 in xrange(_size569): - _elem574 = Partition() - _elem574.read(iprot) - self.success.append(_elem574) + (_etype579, _size576) = iprot.readListBegin() + for _i580 in xrange(_size576): + _elem581 = Partition() + _elem581.read(iprot) + self.success.append(_elem581) iprot.readListEnd() else: iprot.skip(ftype) @@ -14648,8 +14714,8 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRUCT, len(self.success)) - for iter575 in self.success: - iter575.write(oprot) + for iter582 in self.success: + iter582.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() if self.o1 is not None: @@ -14729,10 +14795,10 @@ def read(self, iprot): elif fid == 3: if ftype == TType.LIST: self.part_vals = [] - (_etype579, _size576) = iprot.readListBegin() - for _i580 in xrange(_size576): - _elem581 = iprot.readString(); - self.part_vals.append(_elem581) + (_etype586, _size583) = iprot.readListBegin() + for _i587 in xrange(_size583): + _elem588 = iprot.readString(); + self.part_vals.append(_elem588) iprot.readListEnd() else: iprot.skip(ftype) @@ -14749,10 +14815,10 @@ def read(self, iprot): elif fid == 6: if ftype == TType.LIST: self.group_names = [] - (_etype585, _size582) = iprot.readListBegin() - for _i586 in xrange(_size582): - _elem587 = iprot.readString(); - self.group_names.append(_elem587) + (_etype592, _size589) = iprot.readListBegin() + for _i593 in xrange(_size589): + _elem594 = iprot.readString(); + self.group_names.append(_elem594) iprot.readListEnd() else: iprot.skip(ftype) @@ -14777,8 +14843,8 @@ def write(self, oprot): if self.part_vals is not None: oprot.writeFieldBegin('part_vals', TType.LIST, 3) oprot.writeListBegin(TType.STRING, len(self.part_vals)) - for iter588 in self.part_vals: - oprot.writeString(iter588) + for iter595 in self.part_vals: + oprot.writeString(iter595) oprot.writeListEnd() oprot.writeFieldEnd() if self.max_parts is not None: @@ -14792,8 +14858,8 @@ def write(self, oprot): if self.group_names is not None: oprot.writeFieldBegin('group_names', TType.LIST, 6) oprot.writeListBegin(TType.STRING, len(self.group_names)) - for iter589 in self.group_names: - oprot.writeString(iter589) + for iter596 in self.group_names: + oprot.writeString(iter596) oprot.writeListEnd() oprot.writeFieldEnd() oprot.writeFieldStop() @@ -14845,11 +14911,11 @@ def read(self, iprot): if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype593, _size590) = iprot.readListBegin() - for _i594 in xrange(_size590): - _elem595 = Partition() - _elem595.read(iprot) - self.success.append(_elem595) + (_etype600, _size597) = iprot.readListBegin() + for _i601 in xrange(_size597): + _elem602 = Partition() + _elem602.read(iprot) + self.success.append(_elem602) iprot.readListEnd() else: iprot.skip(ftype) @@ -14878,8 +14944,8 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRUCT, len(self.success)) - for iter596 in self.success: - iter596.write(oprot) + for iter603 in self.success: + iter603.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() if self.o1 is not None: @@ -14953,10 +15019,10 @@ def read(self, iprot): elif fid == 3: if ftype == TType.LIST: self.part_vals = [] - (_etype600, _size597) = iprot.readListBegin() - for _i601 in xrange(_size597): - _elem602 = iprot.readString(); - self.part_vals.append(_elem602) + (_etype607, _size604) = iprot.readListBegin() + for _i608 in xrange(_size604): + _elem609 = iprot.readString(); + self.part_vals.append(_elem609) iprot.readListEnd() else: iprot.skip(ftype) @@ -14986,8 +15052,8 @@ def write(self, oprot): if self.part_vals is not None: oprot.writeFieldBegin('part_vals', TType.LIST, 3) oprot.writeListBegin(TType.STRING, len(self.part_vals)) - for iter603 in self.part_vals: - oprot.writeString(iter603) + for iter610 in self.part_vals: + oprot.writeString(iter610) oprot.writeListEnd() oprot.writeFieldEnd() if self.max_parts is not None: @@ -15043,10 +15109,10 @@ def read(self, iprot): if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype607, _size604) = iprot.readListBegin() - for _i608 in xrange(_size604): - _elem609 = iprot.readString(); - self.success.append(_elem609) + (_etype614, _size611) = iprot.readListBegin() + for _i615 in xrange(_size611): + _elem616 = iprot.readString(); + self.success.append(_elem616) iprot.readListEnd() else: iprot.skip(ftype) @@ -15075,8 +15141,8 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRING, len(self.success)) - for iter610 in self.success: - oprot.writeString(iter610) + for iter617 in self.success: + oprot.writeString(iter617) oprot.writeListEnd() oprot.writeFieldEnd() if self.o1 is not None: @@ -15232,11 +15298,11 @@ def read(self, iprot): if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype614, _size611) = iprot.readListBegin() - for _i615 in xrange(_size611): - _elem616 = Partition() - _elem616.read(iprot) - self.success.append(_elem616) + (_etype621, _size618) = iprot.readListBegin() + for _i622 in xrange(_size618): + _elem623 = Partition() + _elem623.read(iprot) + self.success.append(_elem623) iprot.readListEnd() else: iprot.skip(ftype) @@ -15265,8 +15331,8 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRUCT, len(self.success)) - for iter617 in self.success: - iter617.write(oprot) + for iter624 in self.success: + iter624.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() if self.o1 is not None: @@ -15484,10 +15550,10 @@ def read(self, iprot): elif fid == 3: if ftype == TType.LIST: self.names = [] - (_etype621, _size618) = iprot.readListBegin() - for _i622 in xrange(_size618): - _elem623 = iprot.readString(); - self.names.append(_elem623) + (_etype628, _size625) = iprot.readListBegin() + for _i629 in xrange(_size625): + _elem630 = iprot.readString(); + self.names.append(_elem630) iprot.readListEnd() else: iprot.skip(ftype) @@ -15512,8 +15578,8 @@ def write(self, oprot): if self.names is not None: oprot.writeFieldBegin('names', TType.LIST, 3) oprot.writeListBegin(TType.STRING, len(self.names)) - for iter624 in self.names: - oprot.writeString(iter624) + for iter631 in self.names: + oprot.writeString(iter631) oprot.writeListEnd() oprot.writeFieldEnd() oprot.writeFieldStop() @@ -15565,11 +15631,11 @@ def read(self, iprot): if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype628, _size625) = iprot.readListBegin() - for _i629 in xrange(_size625): - _elem630 = Partition() - _elem630.read(iprot) - self.success.append(_elem630) + (_etype635, _size632) = iprot.readListBegin() + for _i636 in xrange(_size632): + _elem637 = Partition() + _elem637.read(iprot) + self.success.append(_elem637) iprot.readListEnd() else: iprot.skip(ftype) @@ -15598,8 +15664,8 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRUCT, len(self.success)) - for iter631 in self.success: - iter631.write(oprot) + for iter638 in self.success: + iter638.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() if self.o1 is not None: @@ -15829,11 +15895,11 @@ def read(self, iprot): elif fid == 3: if ftype == TType.LIST: self.new_parts = [] - (_etype635, _size632) = iprot.readListBegin() - for _i636 in xrange(_size632): - _elem637 = Partition() - _elem637.read(iprot) - self.new_parts.append(_elem637) + (_etype642, _size639) = iprot.readListBegin() + for _i643 in xrange(_size639): + _elem644 = Partition() + _elem644.read(iprot) + self.new_parts.append(_elem644) iprot.readListEnd() else: iprot.skip(ftype) @@ -15858,8 +15924,8 @@ def write(self, oprot): if self.new_parts is not None: oprot.writeFieldBegin('new_parts', TType.LIST, 3) oprot.writeListBegin(TType.STRUCT, len(self.new_parts)) - for iter638 in self.new_parts: - iter638.write(oprot) + for iter645 in self.new_parts: + iter645.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() oprot.writeFieldStop() @@ -16171,10 +16237,10 @@ def read(self, iprot): elif fid == 3: if ftype == TType.LIST: self.part_vals = [] - (_etype642, _size639) = iprot.readListBegin() - for _i643 in xrange(_size639): - _elem644 = iprot.readString(); - self.part_vals.append(_elem644) + (_etype649, _size646) = iprot.readListBegin() + for _i650 in xrange(_size646): + _elem651 = iprot.readString(); + self.part_vals.append(_elem651) iprot.readListEnd() else: iprot.skip(ftype) @@ -16205,8 +16271,8 @@ def write(self, oprot): if self.part_vals is not None: oprot.writeFieldBegin('part_vals', TType.LIST, 3) oprot.writeListBegin(TType.STRING, len(self.part_vals)) - for iter645 in self.part_vals: - oprot.writeString(iter645) + for iter652 in self.part_vals: + oprot.writeString(iter652) oprot.writeListEnd() oprot.writeFieldEnd() if self.new_part is not None: @@ -16334,10 +16400,10 @@ def read(self, iprot): if fid == 1: if ftype == TType.LIST: self.part_vals = [] - (_etype649, _size646) = iprot.readListBegin() - for _i650 in xrange(_size646): - _elem651 = iprot.readString(); - self.part_vals.append(_elem651) + (_etype656, _size653) = iprot.readListBegin() + for _i657 in xrange(_size653): + _elem658 = iprot.readString(); + self.part_vals.append(_elem658) iprot.readListEnd() else: iprot.skip(ftype) @@ -16359,8 +16425,8 @@ def write(self, oprot): if self.part_vals is not None: oprot.writeFieldBegin('part_vals', TType.LIST, 1) oprot.writeListBegin(TType.STRING, len(self.part_vals)) - for iter652 in self.part_vals: - oprot.writeString(iter652) + for iter659 in self.part_vals: + oprot.writeString(iter659) oprot.writeListEnd() oprot.writeFieldEnd() if self.throw_exception is not None: @@ -16689,10 +16755,10 @@ def read(self, iprot): if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype656, _size653) = iprot.readListBegin() - for _i657 in xrange(_size653): - _elem658 = iprot.readString(); - self.success.append(_elem658) + (_etype663, _size660) = iprot.readListBegin() + for _i664 in xrange(_size660): + _elem665 = iprot.readString(); + self.success.append(_elem665) iprot.readListEnd() else: iprot.skip(ftype) @@ -16715,8 +16781,8 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRING, len(self.success)) - for iter659 in self.success: - oprot.writeString(iter659) + for iter666 in self.success: + oprot.writeString(iter666) oprot.writeListEnd() oprot.writeFieldEnd() if self.o1 is not None: @@ -16829,11 +16895,11 @@ def read(self, iprot): if fid == 0: if ftype == TType.MAP: self.success = {} - (_ktype661, _vtype662, _size660 ) = iprot.readMapBegin() - for _i664 in xrange(_size660): - _key665 = iprot.readString(); - _val666 = iprot.readString(); - self.success[_key665] = _val666 + (_ktype668, _vtype669, _size667 ) = iprot.readMapBegin() + for _i671 in xrange(_size667): + _key672 = iprot.readString(); + _val673 = iprot.readString(); + self.success[_key672] = _val673 iprot.readMapEnd() else: iprot.skip(ftype) @@ -16856,9 +16922,9 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.MAP, 0) oprot.writeMapBegin(TType.STRING, TType.STRING, len(self.success)) - for kiter667,viter668 in self.success.items(): - oprot.writeString(kiter667) - oprot.writeString(viter668) + for kiter674,viter675 in self.success.items(): + oprot.writeString(kiter674) + oprot.writeString(viter675) oprot.writeMapEnd() oprot.writeFieldEnd() if self.o1 is not None: @@ -16928,11 +16994,11 @@ def read(self, iprot): elif fid == 3: if ftype == TType.MAP: self.part_vals = {} - (_ktype670, _vtype671, _size669 ) = iprot.readMapBegin() - for _i673 in xrange(_size669): - _key674 = iprot.readString(); - _val675 = iprot.readString(); - self.part_vals[_key674] = _val675 + (_ktype677, _vtype678, _size676 ) = iprot.readMapBegin() + for _i680 in xrange(_size676): + _key681 = iprot.readString(); + _val682 = iprot.readString(); + self.part_vals[_key681] = _val682 iprot.readMapEnd() else: iprot.skip(ftype) @@ -16962,9 +17028,9 @@ def write(self, oprot): if self.part_vals is not None: oprot.writeFieldBegin('part_vals', TType.MAP, 3) oprot.writeMapBegin(TType.STRING, TType.STRING, len(self.part_vals)) - for kiter676,viter677 in self.part_vals.items(): - oprot.writeString(kiter676) - oprot.writeString(viter677) + for kiter683,viter684 in self.part_vals.items(): + oprot.writeString(kiter683) + oprot.writeString(viter684) oprot.writeMapEnd() oprot.writeFieldEnd() if self.eventType is not None: @@ -17160,11 +17226,11 @@ def read(self, iprot): elif fid == 3: if ftype == TType.MAP: self.part_vals = {} - (_ktype679, _vtype680, _size678 ) = iprot.readMapBegin() - for _i682 in xrange(_size678): - _key683 = iprot.readString(); - _val684 = iprot.readString(); - self.part_vals[_key683] = _val684 + (_ktype686, _vtype687, _size685 ) = iprot.readMapBegin() + for _i689 in xrange(_size685): + _key690 = iprot.readString(); + _val691 = iprot.readString(); + self.part_vals[_key690] = _val691 iprot.readMapEnd() else: iprot.skip(ftype) @@ -17194,9 +17260,9 @@ def write(self, oprot): if self.part_vals is not None: oprot.writeFieldBegin('part_vals', TType.MAP, 3) oprot.writeMapBegin(TType.STRING, TType.STRING, len(self.part_vals)) - for kiter685,viter686 in self.part_vals.items(): - oprot.writeString(kiter685) - oprot.writeString(viter686) + for kiter692,viter693 in self.part_vals.items(): + oprot.writeString(kiter692) + oprot.writeString(viter693) oprot.writeMapEnd() oprot.writeFieldEnd() if self.eventType is not None: @@ -18168,11 +18234,11 @@ def read(self, iprot): if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype690, _size687) = iprot.readListBegin() - for _i691 in xrange(_size687): - _elem692 = Index() - _elem692.read(iprot) - self.success.append(_elem692) + (_etype697, _size694) = iprot.readListBegin() + for _i698 in xrange(_size694): + _elem699 = Index() + _elem699.read(iprot) + self.success.append(_elem699) iprot.readListEnd() else: iprot.skip(ftype) @@ -18201,8 +18267,8 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRUCT, len(self.success)) - for iter693 in self.success: - iter693.write(oprot) + for iter700 in self.success: + iter700.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() if self.o1 is not None: @@ -18343,10 +18409,10 @@ def read(self, iprot): if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype697, _size694) = iprot.readListBegin() - for _i698 in xrange(_size694): - _elem699 = iprot.readString(); - self.success.append(_elem699) + (_etype704, _size701) = iprot.readListBegin() + for _i705 in xrange(_size701): + _elem706 = iprot.readString(); + self.success.append(_elem706) iprot.readListEnd() else: iprot.skip(ftype) @@ -18369,8 +18435,8 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRING, len(self.success)) - for iter700 in self.success: - oprot.writeString(iter700) + for iter707 in self.success: + oprot.writeString(iter707) oprot.writeListEnd() oprot.writeFieldEnd() if self.o2 is not None: @@ -19584,6 +19650,178 @@ def __eq__(self, other): def __ne__(self, other): return not (self == other) +class set_aggr_stats_for_args: + """ + Attributes: + - request + """ + + thrift_spec = ( + None, # 0 + (1, TType.STRUCT, 'request', (SetPartitionsStatsRequest, SetPartitionsStatsRequest.thrift_spec), None, ), # 1 + ) + + def __init__(self, request=None,): + self.request = request + + def read(self, iprot): + if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: + fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec)) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.STRUCT: + self.request = SetPartitionsStatsRequest() + self.request.read(iprot) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: + oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) + return + oprot.writeStructBegin('set_aggr_stats_for_args') + if self.request is not None: + oprot.writeFieldBegin('request', TType.STRUCT, 1) + self.request.write(oprot) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + + def __repr__(self): + L = ['%s=%r' % (key, value) + for key, value in self.__dict__.iteritems()] + return '%s(%s)' % (self.__class__.__name__, ', '.join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + +class set_aggr_stats_for_result: + """ + Attributes: + - success + - o1 + - o2 + - o3 + - o4 + """ + + thrift_spec = ( + (0, TType.BOOL, 'success', None, None, ), # 0 + (1, TType.STRUCT, 'o1', (NoSuchObjectException, NoSuchObjectException.thrift_spec), None, ), # 1 + (2, TType.STRUCT, 'o2', (InvalidObjectException, InvalidObjectException.thrift_spec), None, ), # 2 + (3, TType.STRUCT, 'o3', (MetaException, MetaException.thrift_spec), None, ), # 3 + (4, TType.STRUCT, 'o4', (InvalidInputException, InvalidInputException.thrift_spec), None, ), # 4 + ) + + def __init__(self, success=None, o1=None, o2=None, o3=None, o4=None,): + self.success = success + self.o1 = o1 + self.o2 = o2 + self.o3 = o3 + self.o4 = o4 + + def read(self, iprot): + if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: + fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec)) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 0: + if ftype == TType.BOOL: + self.success = iprot.readBool(); + else: + iprot.skip(ftype) + elif fid == 1: + if ftype == TType.STRUCT: + self.o1 = NoSuchObjectException() + self.o1.read(iprot) + else: + iprot.skip(ftype) + elif fid == 2: + if ftype == TType.STRUCT: + self.o2 = InvalidObjectException() + self.o2.read(iprot) + else: + iprot.skip(ftype) + elif fid == 3: + if ftype == TType.STRUCT: + self.o3 = MetaException() + self.o3.read(iprot) + else: + iprot.skip(ftype) + elif fid == 4: + if ftype == TType.STRUCT: + self.o4 = InvalidInputException() + self.o4.read(iprot) + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: + oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) + return + oprot.writeStructBegin('set_aggr_stats_for_result') + if self.success is not None: + oprot.writeFieldBegin('success', TType.BOOL, 0) + oprot.writeBool(self.success) + oprot.writeFieldEnd() + if self.o1 is not None: + oprot.writeFieldBegin('o1', TType.STRUCT, 1) + self.o1.write(oprot) + oprot.writeFieldEnd() + if self.o2 is not None: + oprot.writeFieldBegin('o2', TType.STRUCT, 2) + self.o2.write(oprot) + oprot.writeFieldEnd() + if self.o3 is not None: + oprot.writeFieldBegin('o3', TType.STRUCT, 3) + self.o3.write(oprot) + oprot.writeFieldEnd() + if self.o4 is not None: + oprot.writeFieldBegin('o4', TType.STRUCT, 4) + self.o4.write(oprot) + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + return + + + def __repr__(self): + L = ['%s=%r' % (key, value) + for key, value in self.__dict__.iteritems()] + return '%s(%s)' % (self.__class__.__name__, ', '.join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + class delete_partition_column_statistics_args: """ Attributes: @@ -20552,10 +20790,10 @@ def read(self, iprot): if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype704, _size701) = iprot.readListBegin() - for _i705 in xrange(_size701): - _elem706 = iprot.readString(); - self.success.append(_elem706) + (_etype711, _size708) = iprot.readListBegin() + for _i712 in xrange(_size708): + _elem713 = iprot.readString(); + self.success.append(_elem713) iprot.readListEnd() else: iprot.skip(ftype) @@ -20578,8 +20816,8 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRING, len(self.success)) - for iter707 in self.success: - oprot.writeString(iter707) + for iter714 in self.success: + oprot.writeString(iter714) oprot.writeListEnd() oprot.writeFieldEnd() if self.o1 is not None: @@ -21097,10 +21335,10 @@ def read(self, iprot): if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype711, _size708) = iprot.readListBegin() - for _i712 in xrange(_size708): - _elem713 = iprot.readString(); - self.success.append(_elem713) + (_etype718, _size715) = iprot.readListBegin() + for _i719 in xrange(_size715): + _elem720 = iprot.readString(); + self.success.append(_elem720) iprot.readListEnd() else: iprot.skip(ftype) @@ -21123,8 +21361,8 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRING, len(self.success)) - for iter714 in self.success: - oprot.writeString(iter714) + for iter721 in self.success: + oprot.writeString(iter721) oprot.writeListEnd() oprot.writeFieldEnd() if self.o1 is not None: @@ -21597,11 +21835,11 @@ def read(self, iprot): if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype718, _size715) = iprot.readListBegin() - for _i719 in xrange(_size715): - _elem720 = Role() - _elem720.read(iprot) - self.success.append(_elem720) + (_etype725, _size722) = iprot.readListBegin() + for _i726 in xrange(_size722): + _elem727 = Role() + _elem727.read(iprot) + self.success.append(_elem727) iprot.readListEnd() else: iprot.skip(ftype) @@ -21624,8 +21862,8 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRUCT, len(self.success)) - for iter721 in self.success: - iter721.write(oprot) + for iter728 in self.success: + iter728.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() if self.o1 is not None: @@ -22095,10 +22333,10 @@ def read(self, iprot): elif fid == 3: if ftype == TType.LIST: self.group_names = [] - (_etype725, _size722) = iprot.readListBegin() - for _i726 in xrange(_size722): - _elem727 = iprot.readString(); - self.group_names.append(_elem727) + (_etype732, _size729) = iprot.readListBegin() + for _i733 in xrange(_size729): + _elem734 = iprot.readString(); + self.group_names.append(_elem734) iprot.readListEnd() else: iprot.skip(ftype) @@ -22123,8 +22361,8 @@ def write(self, oprot): if self.group_names is not None: oprot.writeFieldBegin('group_names', TType.LIST, 3) oprot.writeListBegin(TType.STRING, len(self.group_names)) - for iter728 in self.group_names: - oprot.writeString(iter728) + for iter735 in self.group_names: + oprot.writeString(iter735) oprot.writeListEnd() oprot.writeFieldEnd() oprot.writeFieldStop() @@ -22331,11 +22569,11 @@ def read(self, iprot): if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype732, _size729) = iprot.readListBegin() - for _i733 in xrange(_size729): - _elem734 = HiveObjectPrivilege() - _elem734.read(iprot) - self.success.append(_elem734) + (_etype739, _size736) = iprot.readListBegin() + for _i740 in xrange(_size736): + _elem741 = HiveObjectPrivilege() + _elem741.read(iprot) + self.success.append(_elem741) iprot.readListEnd() else: iprot.skip(ftype) @@ -22358,8 +22596,8 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRUCT, len(self.success)) - for iter735 in self.success: - iter735.write(oprot) + for iter742 in self.success: + iter742.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() if self.o1 is not None: @@ -22818,10 +23056,10 @@ def read(self, iprot): elif fid == 2: if ftype == TType.LIST: self.group_names = [] - (_etype739, _size736) = iprot.readListBegin() - for _i740 in xrange(_size736): - _elem741 = iprot.readString(); - self.group_names.append(_elem741) + (_etype746, _size743) = iprot.readListBegin() + for _i747 in xrange(_size743): + _elem748 = iprot.readString(); + self.group_names.append(_elem748) iprot.readListEnd() else: iprot.skip(ftype) @@ -22842,8 +23080,8 @@ def write(self, oprot): if self.group_names is not None: oprot.writeFieldBegin('group_names', TType.LIST, 2) oprot.writeListBegin(TType.STRING, len(self.group_names)) - for iter742 in self.group_names: - oprot.writeString(iter742) + for iter749 in self.group_names: + oprot.writeString(iter749) oprot.writeListEnd() oprot.writeFieldEnd() oprot.writeFieldStop() @@ -22892,10 +23130,10 @@ def read(self, iprot): if fid == 0: if ftype == TType.LIST: self.success = [] - (_etype746, _size743) = iprot.readListBegin() - for _i747 in xrange(_size743): - _elem748 = iprot.readString(); - self.success.append(_elem748) + (_etype753, _size750) = iprot.readListBegin() + for _i754 in xrange(_size750): + _elem755 = iprot.readString(); + self.success.append(_elem755) iprot.readListEnd() else: iprot.skip(ftype) @@ -22918,8 +23156,8 @@ def write(self, oprot): if self.success is not None: oprot.writeFieldBegin('success', TType.LIST, 0) oprot.writeListBegin(TType.STRING, len(self.success)) - for iter749 in self.success: - oprot.writeString(iter749) + for iter756 in self.success: + oprot.writeString(iter756) oprot.writeListEnd() oprot.writeFieldEnd() if self.o1 is not None: diff --git a/metastore/src/gen/thrift/gen-py/hive_metastore/ttypes.py b/metastore/src/gen/thrift/gen-py/hive_metastore/ttypes.py index e13243b..d7e002b 100644 --- a/metastore/src/gen/thrift/gen-py/hive_metastore/ttypes.py +++ b/metastore/src/gen/thrift/gen-py/hive_metastore/ttypes.py @@ -4233,6 +4233,77 @@ def __eq__(self, other): def __ne__(self, other): return not (self == other) +class SetPartitionsStatsRequest: + """ + Attributes: + - colStats + """ + + thrift_spec = ( + None, # 0 + (1, TType.LIST, 'colStats', (TType.STRUCT,(ColumnStatistics, ColumnStatistics.thrift_spec)), None, ), # 1 + ) + + def __init__(self, colStats=None,): + self.colStats = colStats + + def read(self, iprot): + if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None: + fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec)) + return + iprot.readStructBegin() + while True: + (fname, ftype, fid) = iprot.readFieldBegin() + if ftype == TType.STOP: + break + if fid == 1: + if ftype == TType.LIST: + self.colStats = [] + (_etype226, _size223) = iprot.readListBegin() + for _i227 in xrange(_size223): + _elem228 = ColumnStatistics() + _elem228.read(iprot) + self.colStats.append(_elem228) + iprot.readListEnd() + else: + iprot.skip(ftype) + else: + iprot.skip(ftype) + iprot.readFieldEnd() + iprot.readStructEnd() + + def write(self, oprot): + if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None: + oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec))) + return + oprot.writeStructBegin('SetPartitionsStatsRequest') + if self.colStats is not None: + oprot.writeFieldBegin('colStats', TType.LIST, 1) + oprot.writeListBegin(TType.STRUCT, len(self.colStats)) + for iter229 in self.colStats: + iter229.write(oprot) + oprot.writeListEnd() + oprot.writeFieldEnd() + oprot.writeFieldStop() + oprot.writeStructEnd() + + def validate(self): + if self.colStats is None: + raise TProtocol.TProtocolException(message='Required field colStats is unset!') + return + + + def __repr__(self): + L = ['%s=%r' % (key, value) + for key, value in self.__dict__.iteritems()] + return '%s(%s)' % (self.__class__.__name__, ', '.join(L)) + + def __eq__(self, other): + return isinstance(other, self.__class__) and self.__dict__ == other.__dict__ + + def __ne__(self, other): + return not (self == other) + class Schema: """ Attributes: @@ -4262,22 +4333,22 @@ def read(self, iprot): if fid == 1: if ftype == TType.LIST: self.fieldSchemas = [] - (_etype226, _size223) = iprot.readListBegin() - for _i227 in xrange(_size223): - _elem228 = FieldSchema() - _elem228.read(iprot) - self.fieldSchemas.append(_elem228) + (_etype233, _size230) = iprot.readListBegin() + for _i234 in xrange(_size230): + _elem235 = FieldSchema() + _elem235.read(iprot) + self.fieldSchemas.append(_elem235) iprot.readListEnd() else: iprot.skip(ftype) elif fid == 2: if ftype == TType.MAP: self.properties = {} - (_ktype230, _vtype231, _size229 ) = iprot.readMapBegin() - for _i233 in xrange(_size229): - _key234 = iprot.readString(); - _val235 = iprot.readString(); - self.properties[_key234] = _val235 + (_ktype237, _vtype238, _size236 ) = iprot.readMapBegin() + for _i240 in xrange(_size236): + _key241 = iprot.readString(); + _val242 = iprot.readString(); + self.properties[_key241] = _val242 iprot.readMapEnd() else: iprot.skip(ftype) @@ -4294,16 +4365,16 @@ def write(self, oprot): if self.fieldSchemas is not None: oprot.writeFieldBegin('fieldSchemas', TType.LIST, 1) oprot.writeListBegin(TType.STRUCT, len(self.fieldSchemas)) - for iter236 in self.fieldSchemas: - iter236.write(oprot) + for iter243 in self.fieldSchemas: + iter243.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() if self.properties is not None: oprot.writeFieldBegin('properties', TType.MAP, 2) oprot.writeMapBegin(TType.STRING, TType.STRING, len(self.properties)) - for kiter237,viter238 in self.properties.items(): - oprot.writeString(kiter237) - oprot.writeString(viter238) + for kiter244,viter245 in self.properties.items(): + oprot.writeString(kiter244) + oprot.writeString(viter245) oprot.writeMapEnd() oprot.writeFieldEnd() oprot.writeFieldStop() @@ -4350,11 +4421,11 @@ def read(self, iprot): if fid == 1: if ftype == TType.MAP: self.properties = {} - (_ktype240, _vtype241, _size239 ) = iprot.readMapBegin() - for _i243 in xrange(_size239): - _key244 = iprot.readString(); - _val245 = iprot.readString(); - self.properties[_key244] = _val245 + (_ktype247, _vtype248, _size246 ) = iprot.readMapBegin() + for _i250 in xrange(_size246): + _key251 = iprot.readString(); + _val252 = iprot.readString(); + self.properties[_key251] = _val252 iprot.readMapEnd() else: iprot.skip(ftype) @@ -4371,9 +4442,9 @@ def write(self, oprot): if self.properties is not None: oprot.writeFieldBegin('properties', TType.MAP, 1) oprot.writeMapBegin(TType.STRING, TType.STRING, len(self.properties)) - for kiter246,viter247 in self.properties.items(): - oprot.writeString(kiter246) - oprot.writeString(viter247) + for kiter253,viter254 in self.properties.items(): + oprot.writeString(kiter253) + oprot.writeString(viter254) oprot.writeMapEnd() oprot.writeFieldEnd() oprot.writeFieldStop() @@ -4423,11 +4494,11 @@ def read(self, iprot): if fid == 1: if ftype == TType.LIST: self.partitions = [] - (_etype251, _size248) = iprot.readListBegin() - for _i252 in xrange(_size248): - _elem253 = Partition() - _elem253.read(iprot) - self.partitions.append(_elem253) + (_etype258, _size255) = iprot.readListBegin() + for _i259 in xrange(_size255): + _elem260 = Partition() + _elem260.read(iprot) + self.partitions.append(_elem260) iprot.readListEnd() else: iprot.skip(ftype) @@ -4449,8 +4520,8 @@ def write(self, oprot): if self.partitions is not None: oprot.writeFieldBegin('partitions', TType.LIST, 1) oprot.writeListBegin(TType.STRUCT, len(self.partitions)) - for iter254 in self.partitions: - iter254.write(oprot) + for iter261 in self.partitions: + iter261.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() if self.hasUnknownPartitions is not None: @@ -4619,11 +4690,11 @@ def read(self, iprot): if fid == 1: if ftype == TType.LIST: self.tableStats = [] - (_etype258, _size255) = iprot.readListBegin() - for _i259 in xrange(_size255): - _elem260 = ColumnStatisticsObj() - _elem260.read(iprot) - self.tableStats.append(_elem260) + (_etype265, _size262) = iprot.readListBegin() + for _i266 in xrange(_size262): + _elem267 = ColumnStatisticsObj() + _elem267.read(iprot) + self.tableStats.append(_elem267) iprot.readListEnd() else: iprot.skip(ftype) @@ -4640,8 +4711,8 @@ def write(self, oprot): if self.tableStats is not None: oprot.writeFieldBegin('tableStats', TType.LIST, 1) oprot.writeListBegin(TType.STRUCT, len(self.tableStats)) - for iter261 in self.tableStats: - iter261.write(oprot) + for iter268 in self.tableStats: + iter268.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() oprot.writeFieldStop() @@ -4690,17 +4761,17 @@ def read(self, iprot): if fid == 1: if ftype == TType.MAP: self.partStats = {} - (_ktype263, _vtype264, _size262 ) = iprot.readMapBegin() - for _i266 in xrange(_size262): - _key267 = iprot.readString(); - _val268 = [] - (_etype272, _size269) = iprot.readListBegin() - for _i273 in xrange(_size269): - _elem274 = ColumnStatisticsObj() - _elem274.read(iprot) - _val268.append(_elem274) + (_ktype270, _vtype271, _size269 ) = iprot.readMapBegin() + for _i273 in xrange(_size269): + _key274 = iprot.readString(); + _val275 = [] + (_etype279, _size276) = iprot.readListBegin() + for _i280 in xrange(_size276): + _elem281 = ColumnStatisticsObj() + _elem281.read(iprot) + _val275.append(_elem281) iprot.readListEnd() - self.partStats[_key267] = _val268 + self.partStats[_key274] = _val275 iprot.readMapEnd() else: iprot.skip(ftype) @@ -4717,11 +4788,11 @@ def write(self, oprot): if self.partStats is not None: oprot.writeFieldBegin('partStats', TType.MAP, 1) oprot.writeMapBegin(TType.STRING, TType.LIST, len(self.partStats)) - for kiter275,viter276 in self.partStats.items(): - oprot.writeString(kiter275) - oprot.writeListBegin(TType.STRUCT, len(viter276)) - for iter277 in viter276: - iter277.write(oprot) + for kiter282,viter283 in self.partStats.items(): + oprot.writeString(kiter282) + oprot.writeListBegin(TType.STRUCT, len(viter283)) + for iter284 in viter283: + iter284.write(oprot) oprot.writeListEnd() oprot.writeMapEnd() oprot.writeFieldEnd() @@ -4787,10 +4858,10 @@ def read(self, iprot): elif fid == 3: if ftype == TType.LIST: self.colNames = [] - (_etype281, _size278) = iprot.readListBegin() - for _i282 in xrange(_size278): - _elem283 = iprot.readString(); - self.colNames.append(_elem283) + (_etype288, _size285) = iprot.readListBegin() + for _i289 in xrange(_size285): + _elem290 = iprot.readString(); + self.colNames.append(_elem290) iprot.readListEnd() else: iprot.skip(ftype) @@ -4815,8 +4886,8 @@ def write(self, oprot): if self.colNames is not None: oprot.writeFieldBegin('colNames', TType.LIST, 3) oprot.writeListBegin(TType.STRING, len(self.colNames)) - for iter284 in self.colNames: - oprot.writeString(iter284) + for iter291 in self.colNames: + oprot.writeString(iter291) oprot.writeListEnd() oprot.writeFieldEnd() oprot.writeFieldStop() @@ -4888,20 +4959,20 @@ def read(self, iprot): elif fid == 3: if ftype == TType.LIST: self.colNames = [] - (_etype288, _size285) = iprot.readListBegin() - for _i289 in xrange(_size285): - _elem290 = iprot.readString(); - self.colNames.append(_elem290) + (_etype295, _size292) = iprot.readListBegin() + for _i296 in xrange(_size292): + _elem297 = iprot.readString(); + self.colNames.append(_elem297) iprot.readListEnd() else: iprot.skip(ftype) elif fid == 4: if ftype == TType.LIST: self.partNames = [] - (_etype294, _size291) = iprot.readListBegin() - for _i295 in xrange(_size291): - _elem296 = iprot.readString(); - self.partNames.append(_elem296) + (_etype301, _size298) = iprot.readListBegin() + for _i302 in xrange(_size298): + _elem303 = iprot.readString(); + self.partNames.append(_elem303) iprot.readListEnd() else: iprot.skip(ftype) @@ -4926,15 +4997,15 @@ def write(self, oprot): if self.colNames is not None: oprot.writeFieldBegin('colNames', TType.LIST, 3) oprot.writeListBegin(TType.STRING, len(self.colNames)) - for iter297 in self.colNames: - oprot.writeString(iter297) + for iter304 in self.colNames: + oprot.writeString(iter304) oprot.writeListEnd() oprot.writeFieldEnd() if self.partNames is not None: oprot.writeFieldBegin('partNames', TType.LIST, 4) oprot.writeListBegin(TType.STRING, len(self.partNames)) - for iter298 in self.partNames: - oprot.writeString(iter298) + for iter305 in self.partNames: + oprot.writeString(iter305) oprot.writeListEnd() oprot.writeFieldEnd() oprot.writeFieldStop() @@ -4989,11 +5060,11 @@ def read(self, iprot): if fid == 1: if ftype == TType.LIST: self.partitions = [] - (_etype302, _size299) = iprot.readListBegin() - for _i303 in xrange(_size299): - _elem304 = Partition() - _elem304.read(iprot) - self.partitions.append(_elem304) + (_etype309, _size306) = iprot.readListBegin() + for _i310 in xrange(_size306): + _elem311 = Partition() + _elem311.read(iprot) + self.partitions.append(_elem311) iprot.readListEnd() else: iprot.skip(ftype) @@ -5010,8 +5081,8 @@ def write(self, oprot): if self.partitions is not None: oprot.writeFieldBegin('partitions', TType.LIST, 1) oprot.writeListBegin(TType.STRUCT, len(self.partitions)) - for iter305 in self.partitions: - iter305.write(oprot) + for iter312 in self.partitions: + iter312.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() oprot.writeFieldStop() @@ -5080,11 +5151,11 @@ def read(self, iprot): elif fid == 3: if ftype == TType.LIST: self.parts = [] - (_etype309, _size306) = iprot.readListBegin() - for _i310 in xrange(_size306): - _elem311 = Partition() - _elem311.read(iprot) - self.parts.append(_elem311) + (_etype316, _size313) = iprot.readListBegin() + for _i317 in xrange(_size313): + _elem318 = Partition() + _elem318.read(iprot) + self.parts.append(_elem318) iprot.readListEnd() else: iprot.skip(ftype) @@ -5119,8 +5190,8 @@ def write(self, oprot): if self.parts is not None: oprot.writeFieldBegin('parts', TType.LIST, 3) oprot.writeListBegin(TType.STRUCT, len(self.parts)) - for iter312 in self.parts: - iter312.write(oprot) + for iter319 in self.parts: + iter319.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() if self.ifNotExists is not None: @@ -5183,11 +5254,11 @@ def read(self, iprot): if fid == 1: if ftype == TType.LIST: self.partitions = [] - (_etype316, _size313) = iprot.readListBegin() - for _i317 in xrange(_size313): - _elem318 = Partition() - _elem318.read(iprot) - self.partitions.append(_elem318) + (_etype323, _size320) = iprot.readListBegin() + for _i324 in xrange(_size320): + _elem325 = Partition() + _elem325.read(iprot) + self.partitions.append(_elem325) iprot.readListEnd() else: iprot.skip(ftype) @@ -5204,8 +5275,8 @@ def write(self, oprot): if self.partitions is not None: oprot.writeFieldBegin('partitions', TType.LIST, 1) oprot.writeListBegin(TType.STRUCT, len(self.partitions)) - for iter319 in self.partitions: - iter319.write(oprot) + for iter326 in self.partitions: + iter326.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() oprot.writeFieldStop() @@ -5329,21 +5400,21 @@ def read(self, iprot): if fid == 1: if ftype == TType.LIST: self.names = [] - (_etype323, _size320) = iprot.readListBegin() - for _i324 in xrange(_size320): - _elem325 = iprot.readString(); - self.names.append(_elem325) + (_etype330, _size327) = iprot.readListBegin() + for _i331 in xrange(_size327): + _elem332 = iprot.readString(); + self.names.append(_elem332) iprot.readListEnd() else: iprot.skip(ftype) elif fid == 2: if ftype == TType.LIST: self.exprs = [] - (_etype329, _size326) = iprot.readListBegin() - for _i330 in xrange(_size326): - _elem331 = DropPartitionsExpr() - _elem331.read(iprot) - self.exprs.append(_elem331) + (_etype336, _size333) = iprot.readListBegin() + for _i337 in xrange(_size333): + _elem338 = DropPartitionsExpr() + _elem338.read(iprot) + self.exprs.append(_elem338) iprot.readListEnd() else: iprot.skip(ftype) @@ -5360,15 +5431,15 @@ def write(self, oprot): if self.names is not None: oprot.writeFieldBegin('names', TType.LIST, 1) oprot.writeListBegin(TType.STRING, len(self.names)) - for iter332 in self.names: - oprot.writeString(iter332) + for iter339 in self.names: + oprot.writeString(iter339) oprot.writeListEnd() oprot.writeFieldEnd() if self.exprs is not None: oprot.writeFieldBegin('exprs', TType.LIST, 2) oprot.writeListBegin(TType.STRUCT, len(self.exprs)) - for iter333 in self.exprs: - iter333.write(oprot) + for iter340 in self.exprs: + iter340.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() oprot.writeFieldStop() @@ -5695,11 +5766,11 @@ def read(self, iprot): elif fid == 8: if ftype == TType.LIST: self.resourceUris = [] - (_etype337, _size334) = iprot.readListBegin() - for _i338 in xrange(_size334): - _elem339 = ResourceUri() - _elem339.read(iprot) - self.resourceUris.append(_elem339) + (_etype344, _size341) = iprot.readListBegin() + for _i345 in xrange(_size341): + _elem346 = ResourceUri() + _elem346.read(iprot) + self.resourceUris.append(_elem346) iprot.readListEnd() else: iprot.skip(ftype) @@ -5744,8 +5815,8 @@ def write(self, oprot): if self.resourceUris is not None: oprot.writeFieldBegin('resourceUris', TType.LIST, 8) oprot.writeListBegin(TType.STRUCT, len(self.resourceUris)) - for iter340 in self.resourceUris: - iter340.write(oprot) + for iter347 in self.resourceUris: + iter347.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() oprot.writeFieldStop() @@ -5904,11 +5975,11 @@ def read(self, iprot): elif fid == 2: if ftype == TType.LIST: self.open_txns = [] - (_etype344, _size341) = iprot.readListBegin() - for _i345 in xrange(_size341): - _elem346 = TxnInfo() - _elem346.read(iprot) - self.open_txns.append(_elem346) + (_etype351, _size348) = iprot.readListBegin() + for _i352 in xrange(_size348): + _elem353 = TxnInfo() + _elem353.read(iprot) + self.open_txns.append(_elem353) iprot.readListEnd() else: iprot.skip(ftype) @@ -5929,8 +6000,8 @@ def write(self, oprot): if self.open_txns is not None: oprot.writeFieldBegin('open_txns', TType.LIST, 2) oprot.writeListBegin(TType.STRUCT, len(self.open_txns)) - for iter347 in self.open_txns: - iter347.write(oprot) + for iter354 in self.open_txns: + iter354.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() oprot.writeFieldStop() @@ -5989,10 +6060,10 @@ def read(self, iprot): elif fid == 2: if ftype == TType.SET: self.open_txns = set() - (_etype351, _size348) = iprot.readSetBegin() - for _i352 in xrange(_size348): - _elem353 = iprot.readI64(); - self.open_txns.add(_elem353) + (_etype358, _size355) = iprot.readSetBegin() + for _i359 in xrange(_size355): + _elem360 = iprot.readI64(); + self.open_txns.add(_elem360) iprot.readSetEnd() else: iprot.skip(ftype) @@ -6013,8 +6084,8 @@ def write(self, oprot): if self.open_txns is not None: oprot.writeFieldBegin('open_txns', TType.SET, 2) oprot.writeSetBegin(TType.I64, len(self.open_txns)) - for iter354 in self.open_txns: - oprot.writeI64(iter354) + for iter361 in self.open_txns: + oprot.writeI64(iter361) oprot.writeSetEnd() oprot.writeFieldEnd() oprot.writeFieldStop() @@ -6155,10 +6226,10 @@ def read(self, iprot): if fid == 1: if ftype == TType.LIST: self.txn_ids = [] - (_etype358, _size355) = iprot.readListBegin() - for _i359 in xrange(_size355): - _elem360 = iprot.readI64(); - self.txn_ids.append(_elem360) + (_etype365, _size362) = iprot.readListBegin() + for _i366 in xrange(_size362): + _elem367 = iprot.readI64(); + self.txn_ids.append(_elem367) iprot.readListEnd() else: iprot.skip(ftype) @@ -6175,8 +6246,8 @@ def write(self, oprot): if self.txn_ids is not None: oprot.writeFieldBegin('txn_ids', TType.LIST, 1) oprot.writeListBegin(TType.I64, len(self.txn_ids)) - for iter361 in self.txn_ids: - oprot.writeI64(iter361) + for iter368 in self.txn_ids: + oprot.writeI64(iter368) oprot.writeListEnd() oprot.writeFieldEnd() oprot.writeFieldStop() @@ -6472,11 +6543,11 @@ def read(self, iprot): if fid == 1: if ftype == TType.LIST: self.component = [] - (_etype365, _size362) = iprot.readListBegin() - for _i366 in xrange(_size362): - _elem367 = LockComponent() - _elem367.read(iprot) - self.component.append(_elem367) + (_etype372, _size369) = iprot.readListBegin() + for _i373 in xrange(_size369): + _elem374 = LockComponent() + _elem374.read(iprot) + self.component.append(_elem374) iprot.readListEnd() else: iprot.skip(ftype) @@ -6508,8 +6579,8 @@ def write(self, oprot): if self.component is not None: oprot.writeFieldBegin('component', TType.LIST, 1) oprot.writeListBegin(TType.STRUCT, len(self.component)) - for iter368 in self.component: - iter368.write(oprot) + for iter375 in self.component: + iter375.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() if self.txnid is not None: @@ -7010,11 +7081,11 @@ def read(self, iprot): if fid == 1: if ftype == TType.LIST: self.locks = [] - (_etype372, _size369) = iprot.readListBegin() - for _i373 in xrange(_size369): - _elem374 = ShowLocksResponseElement() - _elem374.read(iprot) - self.locks.append(_elem374) + (_etype379, _size376) = iprot.readListBegin() + for _i380 in xrange(_size376): + _elem381 = ShowLocksResponseElement() + _elem381.read(iprot) + self.locks.append(_elem381) iprot.readListEnd() else: iprot.skip(ftype) @@ -7031,8 +7102,8 @@ def write(self, oprot): if self.locks is not None: oprot.writeFieldBegin('locks', TType.LIST, 1) oprot.writeListBegin(TType.STRUCT, len(self.locks)) - for iter375 in self.locks: - iter375.write(oprot) + for iter382 in self.locks: + iter382.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() oprot.writeFieldStop() @@ -7230,20 +7301,20 @@ def read(self, iprot): if fid == 1: if ftype == TType.SET: self.aborted = set() - (_etype379, _size376) = iprot.readSetBegin() - for _i380 in xrange(_size376): - _elem381 = iprot.readI64(); - self.aborted.add(_elem381) + (_etype386, _size383) = iprot.readSetBegin() + for _i387 in xrange(_size383): + _elem388 = iprot.readI64(); + self.aborted.add(_elem388) iprot.readSetEnd() else: iprot.skip(ftype) elif fid == 2: if ftype == TType.SET: self.nosuch = set() - (_etype385, _size382) = iprot.readSetBegin() - for _i386 in xrange(_size382): - _elem387 = iprot.readI64(); - self.nosuch.add(_elem387) + (_etype392, _size389) = iprot.readSetBegin() + for _i393 in xrange(_size389): + _elem394 = iprot.readI64(); + self.nosuch.add(_elem394) iprot.readSetEnd() else: iprot.skip(ftype) @@ -7260,15 +7331,15 @@ def write(self, oprot): if self.aborted is not None: oprot.writeFieldBegin('aborted', TType.SET, 1) oprot.writeSetBegin(TType.I64, len(self.aborted)) - for iter388 in self.aborted: - oprot.writeI64(iter388) + for iter395 in self.aborted: + oprot.writeI64(iter395) oprot.writeSetEnd() oprot.writeFieldEnd() if self.nosuch is not None: oprot.writeFieldBegin('nosuch', TType.SET, 2) oprot.writeSetBegin(TType.I64, len(self.nosuch)) - for iter389 in self.nosuch: - oprot.writeI64(iter389) + for iter396 in self.nosuch: + oprot.writeI64(iter396) oprot.writeSetEnd() oprot.writeFieldEnd() oprot.writeFieldStop() @@ -7635,11 +7706,11 @@ def read(self, iprot): if fid == 1: if ftype == TType.LIST: self.compacts = [] - (_etype393, _size390) = iprot.readListBegin() - for _i394 in xrange(_size390): - _elem395 = ShowCompactResponseElement() - _elem395.read(iprot) - self.compacts.append(_elem395) + (_etype400, _size397) = iprot.readListBegin() + for _i401 in xrange(_size397): + _elem402 = ShowCompactResponseElement() + _elem402.read(iprot) + self.compacts.append(_elem402) iprot.readListEnd() else: iprot.skip(ftype) @@ -7656,8 +7727,8 @@ def write(self, oprot): if self.compacts is not None: oprot.writeFieldBegin('compacts', TType.LIST, 1) oprot.writeListBegin(TType.STRUCT, len(self.compacts)) - for iter396 in self.compacts: - iter396.write(oprot) + for iter403 in self.compacts: + iter403.write(oprot) oprot.writeListEnd() oprot.writeFieldEnd() oprot.writeFieldStop() diff --git a/metastore/src/gen/thrift/gen-rb/hive_metastore_types.rb b/metastore/src/gen/thrift/gen-rb/hive_metastore_types.rb index bd05eba..1b02321 100644 --- a/metastore/src/gen/thrift/gen-rb/hive_metastore_types.rb +++ b/metastore/src/gen/thrift/gen-rb/hive_metastore_types.rb @@ -1028,6 +1028,23 @@ class AggrStats ::Thrift::Struct.generate_accessors self end +class SetPartitionsStatsRequest + include ::Thrift::Struct, ::Thrift::Struct_Union + COLSTATS = 1 + + FIELDS = { + COLSTATS => {:type => ::Thrift::Types::LIST, :name => 'colStats', :element => {:type => ::Thrift::Types::STRUCT, :class => ::ColumnStatistics}} + } + + def struct_fields; FIELDS; end + + def validate + raise ::Thrift::ProtocolException.new(::Thrift::ProtocolException::UNKNOWN, 'Required field colStats is unset!') unless @colStats + end + + ::Thrift::Struct.generate_accessors self +end + class Schema include ::Thrift::Struct, ::Thrift::Struct_Union FIELDSCHEMAS = 1 diff --git a/metastore/src/gen/thrift/gen-rb/thrift_hive_metastore.rb b/metastore/src/gen/thrift/gen-rb/thrift_hive_metastore.rb index 74964b4..2962fd5 100644 --- a/metastore/src/gen/thrift/gen-rb/thrift_hive_metastore.rb +++ b/metastore/src/gen/thrift/gen-rb/thrift_hive_metastore.rb @@ -1279,6 +1279,25 @@ module ThriftHiveMetastore raise ::Thrift::ApplicationException.new(::Thrift::ApplicationException::MISSING_RESULT, 'get_aggr_stats_for failed: unknown result') end + def set_aggr_stats_for(request) + send_set_aggr_stats_for(request) + return recv_set_aggr_stats_for() + end + + def send_set_aggr_stats_for(request) + send_message('set_aggr_stats_for', Set_aggr_stats_for_args, :request => request) + end + + def recv_set_aggr_stats_for() + result = receive_message(Set_aggr_stats_for_result) + return result.success unless result.success.nil? + raise result.o1 unless result.o1.nil? + raise result.o2 unless result.o2.nil? + raise result.o3 unless result.o3.nil? + raise result.o4 unless result.o4.nil? + raise ::Thrift::ApplicationException.new(::Thrift::ApplicationException::MISSING_RESULT, 'set_aggr_stats_for failed: unknown result') + end + def delete_partition_column_statistics(db_name, tbl_name, part_name, col_name) send_delete_partition_column_statistics(db_name, tbl_name, part_name, col_name) return recv_delete_partition_column_statistics() @@ -2907,6 +2926,23 @@ module ThriftHiveMetastore write_result(result, oprot, 'get_aggr_stats_for', seqid) end + def process_set_aggr_stats_for(seqid, iprot, oprot) + args = read_args(iprot, Set_aggr_stats_for_args) + result = Set_aggr_stats_for_result.new() + begin + result.success = @handler.set_aggr_stats_for(args.request) + rescue ::NoSuchObjectException => o1 + result.o1 = o1 + rescue ::InvalidObjectException => o2 + result.o2 = o2 + rescue ::MetaException => o3 + result.o3 = o3 + rescue ::InvalidInputException => o4 + result.o4 = o4 + end + write_result(result, oprot, 'set_aggr_stats_for', seqid) + end + def process_delete_partition_column_statistics(seqid, iprot, oprot) args = read_args(iprot, Delete_partition_column_statistics_args) result = Delete_partition_column_statistics_result.new() @@ -6264,6 +6300,46 @@ module ThriftHiveMetastore ::Thrift::Struct.generate_accessors self end + class Set_aggr_stats_for_args + include ::Thrift::Struct, ::Thrift::Struct_Union + REQUEST = 1 + + FIELDS = { + REQUEST => {:type => ::Thrift::Types::STRUCT, :name => 'request', :class => ::SetPartitionsStatsRequest} + } + + def struct_fields; FIELDS; end + + def validate + end + + ::Thrift::Struct.generate_accessors self + end + + class Set_aggr_stats_for_result + include ::Thrift::Struct, ::Thrift::Struct_Union + SUCCESS = 0 + O1 = 1 + O2 = 2 + O3 = 3 + O4 = 4 + + FIELDS = { + SUCCESS => {:type => ::Thrift::Types::BOOL, :name => 'success'}, + O1 => {:type => ::Thrift::Types::STRUCT, :name => 'o1', :class => ::NoSuchObjectException}, + O2 => {:type => ::Thrift::Types::STRUCT, :name => 'o2', :class => ::InvalidObjectException}, + O3 => {:type => ::Thrift::Types::STRUCT, :name => 'o3', :class => ::MetaException}, + O4 => {:type => ::Thrift::Types::STRUCT, :name => 'o4', :class => ::InvalidInputException} + } + + def struct_fields; FIELDS; end + + def validate + end + + ::Thrift::Struct.generate_accessors self + end + class Delete_partition_column_statistics_args include ::Thrift::Struct, ::Thrift::Struct_Union DB_NAME = 1 diff --git a/metastore/src/java/org/apache/hadoop/hive/metastore/HiveAlterHandler.java b/metastore/src/java/org/apache/hadoop/hive/metastore/HiveAlterHandler.java index c36b32e..963b184 100644 --- a/metastore/src/java/org/apache/hadoop/hive/metastore/HiveAlterHandler.java +++ b/metastore/src/java/org/apache/hadoop/hive/metastore/HiveAlterHandler.java @@ -222,10 +222,10 @@ public void alterTable(RawStore msdb, Warehouse wh, String dbname, if (success && moveData) { // change the file name in hdfs // check that src exists otherwise there is no need to copy the data + // rename the src to destination try { - if (srcFs.exists(srcPath)) { - // rename the src to destination - srcFs.rename(srcPath, destPath); + if (srcFs.exists(srcPath) && !srcFs.rename(srcPath, destPath)) { + throw new IOException("Renaming " + srcPath + " to " + destPath + " is failed"); } } catch (IOException e) { boolean revertMetaDataTransaction = false; diff --git a/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java b/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java index 84ef5f9..06d7595 100644 --- a/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java +++ b/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStore.java @@ -122,6 +122,7 @@ import org.apache.hadoop.hive.metastore.api.RequestPartsSpec; import org.apache.hadoop.hive.metastore.api.Role; import org.apache.hadoop.hive.metastore.api.RolePrincipalGrant; +import org.apache.hadoop.hive.metastore.api.SetPartitionsStatsRequest; import org.apache.hadoop.hive.metastore.api.ShowCompactRequest; import org.apache.hadoop.hive.metastore.api.ShowCompactResponse; import org.apache.hadoop.hive.metastore.api.ShowLocksRequest; @@ -5023,12 +5024,8 @@ public AggrStats get_aggr_stats_for(PartitionsStatsRequest request) startFunction("get_aggr_stats_for: db=" + request.getDbName() + " table=" + request.getTblName()); AggrStats aggrStats = null; try { - //TODO: We are setting partitionCnt for which we were able to retrieve stats same as - // incoming number from request. This is not correct, but currently no users of this api - // rely on this. Only, current user StatsAnnotation don't care for it. StatsOptimizer - // will care for it, so before StatsOptimizer begin using it, we need to fix this. aggrStats = new AggrStats(getMS().get_aggr_stats_for(request.getDbName(), - request.getTblName(), request.getPartNames(), request.getColNames()), request.getPartNames().size()); + request.getTblName(), request.getPartNames(), request.getColNames())); return aggrStats; } finally { endFunction("get_partitions_statistics_req: ", aggrStats == null, null, request.getTblName()); @@ -5036,6 +5033,17 @@ public AggrStats get_aggr_stats_for(PartitionsStatsRequest request) } + @Override + public boolean set_aggr_stats_for(SetPartitionsStatsRequest request) + throws NoSuchObjectException, InvalidObjectException, MetaException, + InvalidInputException, TException { + boolean ret = true; + for (ColumnStatistics colStats : request.getColStats()) { + ret = ret && update_partition_column_statistics(colStats); + } + return ret; + } + } public static IHMSHandler newHMSHandler(String name, HiveConf hiveConf) throws MetaException { diff --git a/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClient.java b/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClient.java index 237166e..a94a7a3 100644 --- a/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClient.java +++ b/metastore/src/java/org/apache/hadoop/hive/metastore/HiveMetaStoreClient.java @@ -105,6 +105,7 @@ import org.apache.hadoop.hive.metastore.api.PrivilegeBag; import org.apache.hadoop.hive.metastore.api.RequestPartsSpec; import org.apache.hadoop.hive.metastore.api.Role; +import org.apache.hadoop.hive.metastore.api.SetPartitionsStatsRequest; import org.apache.hadoop.hive.metastore.api.ShowCompactRequest; import org.apache.hadoop.hive.metastore.api.ShowCompactResponse; import org.apache.hadoop.hive.metastore.api.ShowLocksRequest; @@ -1264,6 +1265,13 @@ public boolean updatePartitionColumnStatistics(ColumnStatistics statsObj) InvalidInputException{ return client.update_partition_column_statistics(statsObj); } + + /** {@inheritDoc} */ + public boolean setPartitionColumnStatistics(SetPartitionsStatsRequest request) + throws NoSuchObjectException, InvalidObjectException, MetaException, TException, + InvalidInputException{ + return client.set_aggr_stats_for(request); + } /** {@inheritDoc} */ @Override diff --git a/metastore/src/java/org/apache/hadoop/hive/metastore/IExtrapolatePartStatus.java b/metastore/src/java/org/apache/hadoop/hive/metastore/IExtrapolatePartStatus.java new file mode 100644 index 0000000..74f1b01 --- /dev/null +++ b/metastore/src/java/org/apache/hadoop/hive/metastore/IExtrapolatePartStatus.java @@ -0,0 +1,73 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hive.metastore; + +import java.util.HashMap; +import java.util.Map; + +public interface IExtrapolatePartStatus { + /** + * The sequence of colStatNames. + */ + static String[] colStatNames = new String[] { "LONG_LOW_VALUE", + "LONG_HIGH_VALUE", "DOUBLE_LOW_VALUE", "DOUBLE_HIGH_VALUE", + "BIG_DECIMAL_LOW_VALUE", "BIG_DECIMAL_HIGH_VALUE", "NUM_NULLS", + "NUM_DISTINCTS", "AVG_COL_LEN", "MAX_COL_LEN", "NUM_TRUES", "NUM_FALSES" }; + + /** + * The indexes for colstats. + */ + static HashMap indexMaps = new HashMap(){{ + put("long", new Integer [] {0,1,6,7}); + put("double", new Integer [] {2,3,6,7}); + put("string", new Integer [] {8,9,6,7}); + put("boolean", new Integer [] {10,11,6}); + put("binary", new Integer [] {8,9,6}); + put("decimal", new Integer [] {4,5,6,7}); + put("default", new Integer [] {0,1,2,3,4,5,6,7,8,9,10,11}); +}}; + + /** + * The sequence of colStatTypes. + */ + static enum ColStatType { + Long, Double, Decimal + } + + static ColStatType[] colStatTypes = new ColStatType[] { ColStatType.Long, + ColStatType.Long, ColStatType.Double, ColStatType.Double, + ColStatType.Decimal, ColStatType.Decimal, ColStatType.Long, + ColStatType.Long, ColStatType.Double, ColStatType.Long, ColStatType.Long, + ColStatType.Long }; + + /** + * The sequence of aggregation function on colStats. + */ + static enum AggrType { + Min, Max, Sum + } + + static AggrType[] aggrTypes = new AggrType[] { AggrType.Min, AggrType.Max, + AggrType.Min, AggrType.Max, AggrType.Min, AggrType.Max, AggrType.Sum, + AggrType.Max, AggrType.Max, AggrType.Max, AggrType.Sum, AggrType.Sum }; + + public Object extrapolate(Object[] min, Object[] max, int colStatIndex, + Map indexMap); + +} diff --git a/metastore/src/java/org/apache/hadoop/hive/metastore/IMetaStoreClient.java b/metastore/src/java/org/apache/hadoop/hive/metastore/IMetaStoreClient.java index 143d1c7..cbdba30 100644 --- a/metastore/src/java/org/apache/hadoop/hive/metastore/IMetaStoreClient.java +++ b/metastore/src/java/org/apache/hadoop/hive/metastore/IMetaStoreClient.java @@ -65,6 +65,7 @@ import org.apache.hadoop.hive.metastore.api.PrincipalType; import org.apache.hadoop.hive.metastore.api.PrivilegeBag; import org.apache.hadoop.hive.metastore.api.Role; +import org.apache.hadoop.hive.metastore.api.SetPartitionsStatsRequest; import org.apache.hadoop.hive.metastore.api.Table; import org.apache.hadoop.hive.metastore.api.UnknownDBException; import org.apache.hadoop.hive.metastore.api.UnknownPartitionException; @@ -1298,4 +1299,6 @@ GetRoleGrantsForPrincipalResponse get_role_grants_for_principal( public AggrStats getAggrColStatsFor(String dbName, String tblName, List colNames, List partName) throws NoSuchObjectException, MetaException, TException; + + boolean setPartitionColumnStatistics(SetPartitionsStatsRequest request) throws NoSuchObjectException, InvalidObjectException, MetaException, TException, InvalidInputException; } diff --git a/metastore/src/java/org/apache/hadoop/hive/metastore/LinearExtrapolatePartStatus.java b/metastore/src/java/org/apache/hadoop/hive/metastore/LinearExtrapolatePartStatus.java new file mode 100644 index 0000000..7fc04f1 --- /dev/null +++ b/metastore/src/java/org/apache/hadoop/hive/metastore/LinearExtrapolatePartStatus.java @@ -0,0 +1,132 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hive.metastore; + +import java.math.BigDecimal; +import java.nio.ByteBuffer; +import java.util.Map; + +import org.apache.hadoop.hive.metastore.api.Decimal; + +public class LinearExtrapolatePartStatus implements IExtrapolatePartStatus { + + @Override + public Object extrapolate(Object[] min, Object[] max, int colStatIndex, + Map indexMap) { + int rightBorderInd = indexMap.size() - 1; + int minInd = indexMap.get((String) min[1]); + int maxInd = indexMap.get((String) max[1]); + if (minInd == maxInd) { + return min[0]; + } + if (aggrTypes[colStatIndex] == AggrType.Max) { + if (minInd < maxInd) { + // right border is the max + if (colStatTypes[colStatIndex] == ColStatType.Long) { + return (Long) ((Long) min[0] + (((Long) max[0] - (Long) min[0]) + * (rightBorderInd - minInd) / (maxInd - minInd))); + } else if (colStatTypes[colStatIndex] == ColStatType.Double) { + return (Double) ((Double) min[0] + (((Double) max[0] - (Double) min[0]) + * (rightBorderInd - minInd) / (maxInd - minInd))); + } else { + Decimal dmax = (Decimal) max[0]; + BigDecimal bdmax = new BigDecimal(dmax.toString()); + double doublemax = bdmax.doubleValue(); + Decimal dmin = (Decimal) min[0]; + BigDecimal bdmin = new BigDecimal(dmin.toString()); + double doublemin = bdmin.doubleValue(); + double ret = doublemin + (doublemax - doublemin) + * (rightBorderInd - minInd) / (maxInd - minInd); + return createThriftDecimal(String.valueOf(ret)); + } + } else { + // left border is the max + if (colStatTypes[colStatIndex] == ColStatType.Long) { + return (Long) ((Long) min[0] + ((Long) max[0] - (Long) min[0]) + * minInd / (minInd - maxInd)); + } else if (colStatTypes[colStatIndex] == ColStatType.Double) { + return (Double) ((Double) min[0] + ((Double) max[0] - (Double) min[0]) + * minInd / (maxInd - minInd)); + } else { + Decimal dmax = (Decimal) max[0]; + BigDecimal bdmax = new BigDecimal(dmax.toString()); + double doublemax = bdmax.doubleValue(); + Decimal dmin = (Decimal) min[0]; + BigDecimal bdmin = new BigDecimal(dmin.toString()); + double doublemin = bdmin.doubleValue(); + double ret = doublemin + (doublemax - doublemin) * minInd + / (maxInd - minInd); + return createThriftDecimal(String.valueOf(ret)); + } + } + } else { + if (minInd < maxInd) { + // left border is the min + if (colStatTypes[colStatIndex] == ColStatType.Long) { + Long ret = (Long) max[0] - ((Long) max[0] - (Long) min[0]) * maxInd + / (maxInd - minInd); + return ret; + } else if (colStatTypes[colStatIndex] == ColStatType.Double) { + Double ret = (Double) max[0] - ((Double) max[0] - (Double) min[0]) + * maxInd / (maxInd - minInd); + return ret; + } else { + Decimal dmax = (Decimal) max[0]; + BigDecimal bdmax = new BigDecimal(dmax.toString()); + double doublemax = bdmax.doubleValue(); + Decimal dmin = (Decimal) min[0]; + BigDecimal bdmin = new BigDecimal(dmin.toString()); + double doublemin = bdmin.doubleValue(); + double ret = doublemax - (doublemax - doublemin) * maxInd + / (maxInd - minInd); + return createThriftDecimal(String.valueOf(ret)); + + } + } else { + // right border is the min + if (colStatTypes[colStatIndex] == ColStatType.Long) { + Long ret = (Long) max[0] - ((Long) max[0] - (Long) min[0]) + * (rightBorderInd - maxInd) / (minInd - maxInd); + return ret; + } else if (colStatTypes[colStatIndex] == ColStatType.Double) { + Double ret = (Double) max[0] - ((Double) max[0] - (Double) min[0]) + * (rightBorderInd - maxInd) / (minInd - maxInd); + return ret; + } else { + Decimal dmax = (Decimal) max[0]; + BigDecimal bdmax = new BigDecimal(dmax.toString()); + double doublemax = bdmax.doubleValue(); + Decimal dmin = (Decimal) min[0]; + BigDecimal bdmin = new BigDecimal(dmin.toString()); + double doublemin = bdmin.doubleValue(); + double ret = doublemax - (doublemax - doublemin) + * (rightBorderInd - maxInd) / (minInd - maxInd); + return createThriftDecimal(String.valueOf(ret)); + } + } + } + } + + private static Decimal createThriftDecimal(String s) { + BigDecimal d = new BigDecimal(s); + return new Decimal(ByteBuffer.wrap(d.unscaledValue().toByteArray()), + (short) d.scale()); + } + +} diff --git a/metastore/src/java/org/apache/hadoop/hive/metastore/MetaStoreDirectSql.java b/metastore/src/java/org/apache/hadoop/hive/metastore/MetaStoreDirectSql.java index 767cffc..e7694b7 100644 --- a/metastore/src/java/org/apache/hadoop/hive/metastore/MetaStoreDirectSql.java +++ b/metastore/src/java/org/apache/hadoop/hive/metastore/MetaStoreDirectSql.java @@ -21,19 +21,18 @@ import static org.apache.commons.lang.StringUtils.join; import static org.apache.commons.lang.StringUtils.repeat; +import java.math.BigDecimal; +import java.nio.ByteBuffer; import java.sql.Connection; import java.sql.SQLException; -import java.sql.Statement; import java.text.ParseException; -import java.text.SimpleDateFormat; import java.util.ArrayList; -import java.util.Date; +import java.util.Arrays; import java.util.HashMap; import java.util.Iterator; import java.util.List; import java.util.Map; import java.util.TreeMap; -import java.util.concurrent.atomic.AtomicLong; import javax.jdo.PersistenceManager; import javax.jdo.Query; @@ -43,10 +42,12 @@ import org.apache.commons.lang.StringUtils; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.hive.metastore.api.AggrStats; import org.apache.hadoop.hive.metastore.api.ColumnStatistics; import org.apache.hadoop.hive.metastore.api.ColumnStatisticsData; import org.apache.hadoop.hive.metastore.api.ColumnStatisticsDesc; import org.apache.hadoop.hive.metastore.api.ColumnStatisticsObj; +import org.apache.hadoop.hive.metastore.api.Decimal; import org.apache.hadoop.hive.metastore.api.FieldSchema; import org.apache.hadoop.hive.metastore.api.MetaException; import org.apache.hadoop.hive.metastore.api.Order; @@ -65,9 +66,8 @@ import org.apache.hadoop.hive.metastore.parser.ExpressionTree.Operator; import org.apache.hadoop.hive.metastore.parser.ExpressionTree.TreeNode; import org.apache.hadoop.hive.metastore.parser.ExpressionTree.TreeVisitor; -import org.apache.hadoop.hive.metastore.parser.FilterLexer; import org.apache.hadoop.hive.serde.serdeConstants; -import org.datanucleus.store.schema.SchemaTool; +import org.datanucleus.store.rdbms.query.ForwardQueryResult; import com.google.common.collect.Lists; @@ -97,7 +97,7 @@ * Whether direct SQL can be used with the current datastore backing {@link #pm}. */ private final boolean isCompatibleDatastore; - + public MetaStoreDirectSql(PersistenceManager pm) { this.pm = pm; Transaction tx = pm.currentTransaction(); @@ -893,33 +893,247 @@ public ColumnStatistics getTableStats( return result; } - public List aggrColStatsForPartitions(String dbName, String tableName, + public AggrStats aggrColStatsForPartitions(String dbName, String tableName, List partNames, List colNames) throws MetaException { - String qText = "select \"COLUMN_NAME\", \"COLUMN_TYPE\", " - + "min(\"LONG_LOW_VALUE\"), max(\"LONG_HIGH_VALUE\"), min(\"DOUBLE_LOW_VALUE\"), max(\"DOUBLE_HIGH_VALUE\"), " - + "min(\"BIG_DECIMAL_LOW_VALUE\"), max(\"BIG_DECIMAL_HIGH_VALUE\"), sum(\"NUM_NULLS\"), max(\"NUM_DISTINCTS\"), " - + "max(\"AVG_COL_LEN\"), max(\"MAX_COL_LEN\"), sum(\"NUM_TRUES\"), sum(\"NUM_FALSES\") from \"PART_COL_STATS\"" - + " where \"DB_NAME\" = ? and \"TABLE_NAME\" = ? and \"COLUMN_NAME\" in (" - + makeParams(colNames.size()) + ") AND \"PARTITION_NAME\" in (" - + makeParams(partNames.size()) + ") group by \"COLUMN_NAME\", \"COLUMN_TYPE\""; + long partsFound = partsFoundForPartitions(dbName, tableName, partNames, + colNames); + List stats = columnStatisticsObjForPartitions(dbName, + tableName, partNames, colNames, partsFound); + return new AggrStats(stats, partsFound); + } + private long partsFoundForPartitions(String dbName, String tableName, + List partNames, List colNames) throws MetaException { + long partsFound = 0; boolean doTrace = LOG.isDebugEnabled(); + String qText = "select count(\"COLUMN_NAME\") from \"PART_COL_STATS\"" + + " where \"DB_NAME\" = ? and \"TABLE_NAME\" = ? " + + " and \"COLUMN_NAME\" in (" + makeParams(colNames.size()) + ")" + + " and \"PARTITION_NAME\" in (" + makeParams(partNames.size()) + ")" + + " group by \"PARTITION_NAME\""; long start = doTrace ? System.nanoTime() : 0; Query query = pm.newQuery("javax.jdo.query.SQL", qText); - Object qResult = query.executeWithArray(prepareParams(dbName, tableName, partNames, colNames)); - if (qResult == null) { - query.closeAll(); - return Lists.newArrayList(); - } - List list = ensureList(qResult); - List colStats = new ArrayList(list.size()); - for (Object[] row : list) { - colStats.add(prepareCSObj(row,0)); - } + Object qResult = query.executeWithArray(prepareParams(dbName, tableName, + partNames, colNames)); long end = doTrace ? System.nanoTime() : 0; timingTrace(doTrace, qText, start, end); - query.closeAll(); - return colStats; + ForwardQueryResult fqr = (ForwardQueryResult) qResult; + List colnumbers = new ArrayList(); + colnumbers.addAll(fqr); + for (Integer colnumber : colnumbers) { + if (colnumber == colNames.size()) + partsFound++; + } + return partsFound; + } + + private List columnStatisticsObjForPartitions( + String dbName, String tableName, List partNames, + List colNames, long partsFound) throws MetaException { + String commonPrefix = "select \"COLUMN_NAME\", \"COLUMN_TYPE\", " + + "min(\"LONG_LOW_VALUE\"), max(\"LONG_HIGH_VALUE\"), min(\"DOUBLE_LOW_VALUE\"), max(\"DOUBLE_HIGH_VALUE\"), " + + "min(\"BIG_DECIMAL_LOW_VALUE\"), max(\"BIG_DECIMAL_HIGH_VALUE\"), sum(\"NUM_NULLS\"), max(\"NUM_DISTINCTS\"), " + + "max(\"AVG_COL_LEN\"), max(\"MAX_COL_LEN\"), sum(\"NUM_TRUES\"), sum(\"NUM_FALSES\") from \"PART_COL_STATS\"" + + " where \"DB_NAME\" = ? and \"TABLE_NAME\" = ? "; + String qText = null; + long start = 0; + long end = 0; + Query query = null; + boolean doTrace = LOG.isDebugEnabled(); + Object qResult = null; + ForwardQueryResult fqr = null; + // Check if the status of all the columns of all the partitions exists + // Extrapolation is not needed. + if (partsFound == partNames.size()) { + qText = commonPrefix + + " and \"COLUMN_NAME\" in (" + makeParams(colNames.size()) + ")" + + " and \"PARTITION_NAME\" in (" + makeParams(partNames.size()) + ")" + + " group by \"COLUMN_NAME\", \"COLUMN_TYPE\""; + start = doTrace ? System.nanoTime() : 0; + query = pm.newQuery("javax.jdo.query.SQL", qText); + qResult = query.executeWithArray(prepareParams(dbName, tableName, + partNames, colNames)); + if (qResult == null) { + query.closeAll(); + return Lists.newArrayList(); + } + end = doTrace ? System.nanoTime() : 0; + timingTrace(doTrace, qText, start, end); + List list = ensureList(qResult); + List colStats = new ArrayList( + list.size()); + for (Object[] row : list) { + colStats.add(prepareCSObj(row, 0)); + } + query.closeAll(); + return colStats; + } else { + // Extrapolation is needed for some columns. + // In this case, at least a column status for a partition is missing. + // We need to extrapolate this partition based on the other partitions + List colStats = new ArrayList( + colNames.size()); + qText = "select \"COLUMN_NAME\", \"COLUMN_TYPE\", count(\"PARTITION_NAME\") " + + " from \"PART_COL_STATS\"" + + " where \"DB_NAME\" = ? and \"TABLE_NAME\" = ? " + + " and \"COLUMN_NAME\" in (" + makeParams(colNames.size()) + ")" + + " and \"PARTITION_NAME\" in (" + makeParams(partNames.size()) + ")" + + " group by \"COLUMN_NAME\", \"COLUMN_TYPE\""; + start = doTrace ? System.nanoTime() : 0; + query = pm.newQuery("javax.jdo.query.SQL", qText); + qResult = query.executeWithArray(prepareParams(dbName, tableName, + partNames, colNames)); + end = doTrace ? System.nanoTime() : 0; + timingTrace(doTrace, qText, start, end); + if (qResult == null) { + query.closeAll(); + return Lists.newArrayList(); + } + List noExtraColumnNames = new ArrayList(); + Map extraColumnNameTypeParts = new HashMap(); + List list = ensureList(qResult); + for (Object[] row : list) { + String colName = (String) row[0]; + String colType = (String) row[1]; + if ((Integer) row[2] == partNames.size() || (Integer) row[2] < 2) { + // Extrapolation is not needed for this column if + // count(\"PARTITION_NAME\")==partNames.size() + // Or, extrapolation is not possible for this column if + // count(\"PARTITION_NAME\")<2 + noExtraColumnNames.add(colName); + } else { + extraColumnNameTypeParts.put(colName, + new String[] { colType, String.valueOf((Integer) row[2]) }); + } + } + query.closeAll(); + // Extrapolation is not needed for columns noExtraColumnNames + if (noExtraColumnNames.size() != 0) { + qText = commonPrefix + + " and \"COLUMN_NAME\" in ("+ makeParams(noExtraColumnNames.size()) + ")" + + " and \"PARTITION_NAME\" in ("+ makeParams(partNames.size()) +")" + + " group by \"COLUMN_NAME\", \"COLUMN_TYPE\""; + start = doTrace ? System.nanoTime() : 0; + query = pm.newQuery("javax.jdo.query.SQL", qText); + qResult = query.executeWithArray(prepareParams(dbName, tableName, + partNames, noExtraColumnNames)); + if (qResult == null) { + query.closeAll(); + return Lists.newArrayList(); + } + list = ensureList(qResult); + for (Object[] row : list) { + colStats.add(prepareCSObj(row, 0)); + } + end = doTrace ? System.nanoTime() : 0; + timingTrace(doTrace, qText, start, end); + query.closeAll(); + } + // Extrapolation is needed for extraColumnNames. + // give a sequence number for all the partitions + if (extraColumnNameTypeParts.size() != 0) { + Map indexMap = new HashMap(); + for (int index = 0; index < partNames.size(); index++) { + indexMap.put(partNames.get(index), index); + } + // get sum for all columns to reduce the number of queries + Map> sumMap = new HashMap>(); + qText = "select \"COLUMN_NAME\", sum(\"NUM_NULLS\"), sum(\"NUM_TRUES\"), sum(\"NUM_FALSES\")" + + " from \"PART_COL_STATS\"" + + " where \"DB_NAME\" = ? and \"TABLE_NAME\" = ? " + + " and \"COLUMN_NAME\" in (" +makeParams(extraColumnNameTypeParts.size())+ ")" + + " and \"PARTITION_NAME\" in (" + makeParams(partNames.size()) + ")" + + " group by \"COLUMN_NAME\""; + start = doTrace ? System.nanoTime() : 0; + query = pm.newQuery("javax.jdo.query.SQL", qText); + List extraColumnNames = new ArrayList(); + extraColumnNames.addAll(extraColumnNameTypeParts.keySet()); + qResult = query.executeWithArray(prepareParams(dbName, tableName, + partNames, extraColumnNames)); + if (qResult == null) { + query.closeAll(); + return Lists.newArrayList(); + } + list = ensureList(qResult); + // see the indexes for colstats in IExtrapolatePartStatus + Integer[] sumIndex = new Integer[] { 6, 10, 11 }; + for (Object[] row : list) { + Map indexToObject = new HashMap(); + for (int ind = 1; ind < row.length; ind++) { + indexToObject.put(sumIndex[ind - 1], row[ind]); + } + sumMap.put((String) row[0], indexToObject); + } + end = doTrace ? System.nanoTime() : 0; + timingTrace(doTrace, qText, start, end); + query.closeAll(); + for (Map.Entry entry : extraColumnNameTypeParts + .entrySet()) { + Object[] row = new Object[IExtrapolatePartStatus.colStatNames.length + 2]; + String colName = entry.getKey(); + String colType = entry.getValue()[0]; + Long sumVal = Long.parseLong(entry.getValue()[1]); + // fill in colname + row[0] = colName; + // fill in coltype + row[1] = colType; + // use linear extrapolation. more complicated one can be added in the future. + IExtrapolatePartStatus extrapolateMethod = new LinearExtrapolatePartStatus(); + // fill in colstatus + Integer[] index = IExtrapolatePartStatus.indexMaps.get(colType + .toLowerCase()); + //if the colType is not the known type, long, double, etc, then get all index. + if (index == null) { + index = IExtrapolatePartStatus.indexMaps.get("default"); + } + for (int colStatIndex : index) { + String colStatName = IExtrapolatePartStatus.colStatNames[colStatIndex]; + // if the aggregation type is sum, we do a scale-up + if (IExtrapolatePartStatus.aggrTypes[colStatIndex] == IExtrapolatePartStatus.AggrType.Sum) { + Long val = (Long) sumMap.get(colName).get(colStatIndex); + if (val == null) { + row[2 + colStatIndex] = null; + } else { + row[2 + colStatIndex] = (Long) (val / sumVal * (partNames + .size())); + } + } else { + // if the aggregation type is min/max, we extrapolate from the + // left/right borders + qText = "select \"" + + colStatName + + "\",\"PARTITION_NAME\" from \"PART_COL_STATS\"" + + " where \"DB_NAME\" = ? and \"TABLE_NAME\" = ?" + + " and \"COLUMN_NAME\" in (" +makeParams(1)+ ")" + + " and \"PARTITION_NAME\" in (" + makeParams(partNames.size()) + ")" + + " order by \'" + colStatName + "\'"; + start = doTrace ? System.nanoTime() : 0; + query = pm.newQuery("javax.jdo.query.SQL", qText); + qResult = query.executeWithArray(prepareParams(dbName, + tableName, partNames, Arrays.asList(colName))); + if (qResult == null) { + query.closeAll(); + return Lists.newArrayList(); + } + fqr = (ForwardQueryResult) qResult; + Object[] min = (Object[]) (fqr.get(0)); + Object[] max = (Object[]) (fqr.get(fqr.size() - 1)); + end = doTrace ? System.nanoTime() : 0; + timingTrace(doTrace, qText, start, end); + query.closeAll(); + if (min[0] == null || max[0] == null) { + row[2 + colStatIndex] = null; + } else { + row[2 + colStatIndex] = extrapolateMethod.extrapolate(min, max, + colStatIndex, indexMap); + } + } + } + colStats.add(prepareCSObj(row, 0)); + } + } + return colStats; + } } private ColumnStatisticsObj prepareCSObj (Object[] row, int i) throws MetaException { @@ -949,7 +1163,7 @@ private ColumnStatisticsObj prepareCSObj (Object[] row, int i) throws MetaExcept return params; } - + public List getPartitionStats(String dbName, String tableName, List partNames, List colNames) throws MetaException { if (colNames.isEmpty() || partNames.isEmpty()) { diff --git a/metastore/src/java/org/apache/hadoop/hive/metastore/ObjectStore.java b/metastore/src/java/org/apache/hadoop/hive/metastore/ObjectStore.java index dbb7d37..0693039 100644 --- a/metastore/src/java/org/apache/hadoop/hive/metastore/ObjectStore.java +++ b/metastore/src/java/org/apache/hadoop/hive/metastore/ObjectStore.java @@ -60,6 +60,7 @@ import org.apache.hadoop.hive.common.classification.InterfaceStability; import org.apache.hadoop.hive.conf.HiveConf; import org.apache.hadoop.hive.conf.HiveConf.ConfVars; +import org.apache.hadoop.hive.metastore.api.AggrStats; import org.apache.hadoop.hive.metastore.api.ColumnStatistics; import org.apache.hadoop.hive.metastore.api.ColumnStatisticsDesc; import org.apache.hadoop.hive.metastore.api.ColumnStatisticsObj; @@ -2564,13 +2565,13 @@ public void alterTable(String dbname, String name, Table newTable) } // For now only alter name, owner, paramters, cols, bucketcols are allowed + oldt.setDatabase(newt.getDatabase()); oldt.setTableName(newt.getTableName().toLowerCase()); oldt.setParameters(newt.getParameters()); oldt.setOwner(newt.getOwner()); // Fully copy over the contents of the new SD into the old SD, // so we don't create an extra SD in the metastore db that has no references. copyMSD(newt.getSd(), oldt.getSd()); - oldt.setDatabase(newt.getDatabase()); oldt.setRetention(newt.getRetention()); oldt.setPartitionKeys(newt.getPartitionKeys()); oldt.setTableType(newt.getTableType()); @@ -5904,25 +5905,28 @@ protected ColumnStatistics getJdoResult( @Override - public List get_aggr_stats_for(String dbName, String tblName, + public AggrStats get_aggr_stats_for(String dbName, String tblName, final List partNames, final List colNames) throws MetaException, NoSuchObjectException { - - return new GetListHelper(dbName, tblName, true, false) { + return new GetHelper(dbName, tblName, true, false) { @Override - protected List getSqlResult( - GetHelper> ctx) throws MetaException { - return directSql.aggrColStatsForPartitions(dbName, tblName, partNames, colNames); + protected AggrStats getSqlResult(GetHelper ctx) + throws MetaException { + return directSql.aggrColStatsForPartitions(dbName, tblName, partNames, + colNames); } - @Override - protected List getJdoResult( - GetHelper> ctx) throws MetaException, - NoSuchObjectException { - // This is fast path for query optimizations, if we can find this info quickly using + protected AggrStats getJdoResult(GetHelper ctx) + throws MetaException, NoSuchObjectException { + // This is fast path for query optimizations, if we can find this info + // quickly using // directSql, do it. No point in failing back to slow path here. throw new MetaException("Jdo path is not implemented for stats aggr."); } - }.run(true); + @Override + protected String describeResult() { + return null; + } + }.run(true); } private List getMPartitionColumnStatistics( diff --git a/metastore/src/java/org/apache/hadoop/hive/metastore/RawStore.java b/metastore/src/java/org/apache/hadoop/hive/metastore/RawStore.java index 0364385..e435d69 100644 --- a/metastore/src/java/org/apache/hadoop/hive/metastore/RawStore.java +++ b/metastore/src/java/org/apache/hadoop/hive/metastore/RawStore.java @@ -26,6 +26,7 @@ import java.util.Map; import org.apache.hadoop.conf.Configurable; +import org.apache.hadoop.hive.metastore.api.AggrStats; import org.apache.hadoop.hive.metastore.api.ColumnStatistics; import org.apache.hadoop.hive.metastore.api.ColumnStatisticsObj; import org.apache.hadoop.hive.metastore.api.Database; @@ -548,6 +549,6 @@ public void dropFunction(String dbName, String funcName) */ public List getFunctions(String dbName, String pattern) throws MetaException; - public List get_aggr_stats_for(String dbName, String tblName, + public AggrStats get_aggr_stats_for(String dbName, String tblName, List partNames, List colNames) throws MetaException, NoSuchObjectException; } diff --git a/metastore/src/java/org/apache/hadoop/hive/metastore/txn/TxnDbUtil.java b/metastore/src/java/org/apache/hadoop/hive/metastore/txn/TxnDbUtil.java index 30cf814..06d8ac0 100644 --- a/metastore/src/java/org/apache/hadoop/hive/metastore/txn/TxnDbUtil.java +++ b/metastore/src/java/org/apache/hadoop/hive/metastore/txn/TxnDbUtil.java @@ -49,121 +49,135 @@ public static void prepDb() throws Exception { // intended for creating derby databases, and thus will inexorably get // out of date with it. I'm open to any suggestions on how to make this // read the file in a build friendly way. - Connection conn = getConnection(); - Statement s = conn.createStatement(); - s.execute("CREATE TABLE TXNS (" + - " TXN_ID bigint PRIMARY KEY," + - " TXN_STATE char(1) NOT NULL," + - " TXN_STARTED bigint NOT NULL," + - " TXN_LAST_HEARTBEAT bigint NOT NULL," + - " TXN_USER varchar(128) NOT NULL," + - " TXN_HOST varchar(128) NOT NULL)"); + Connection conn = null; + boolean committed = false; + try { + conn = getConnection(); + Statement s = conn.createStatement(); + s.execute("CREATE TABLE TXNS (" + + " TXN_ID bigint PRIMARY KEY," + + " TXN_STATE char(1) NOT NULL," + + " TXN_STARTED bigint NOT NULL," + + " TXN_LAST_HEARTBEAT bigint NOT NULL," + + " TXN_USER varchar(128) NOT NULL," + + " TXN_HOST varchar(128) NOT NULL)"); - s.execute("CREATE TABLE TXN_COMPONENTS (" + - " TC_TXNID bigint REFERENCES TXNS (TXN_ID)," + - " TC_DATABASE varchar(128) NOT NULL," + - " TC_TABLE varchar(128)," + - " TC_PARTITION varchar(767))"); - s.execute("CREATE TABLE COMPLETED_TXN_COMPONENTS (" + - " CTC_TXNID bigint," + - " CTC_DATABASE varchar(128) NOT NULL," + - " CTC_TABLE varchar(128)," + - " CTC_PARTITION varchar(767))"); - s.execute("CREATE TABLE NEXT_TXN_ID (" + - " NTXN_NEXT bigint NOT NULL)"); - s.execute("INSERT INTO NEXT_TXN_ID VALUES(1)"); - s.execute("CREATE TABLE HIVE_LOCKS (" + - " HL_LOCK_EXT_ID bigint NOT NULL," + - " HL_LOCK_INT_ID bigint NOT NULL," + - " HL_TXNID bigint," + - " HL_DB varchar(128) NOT NULL," + - " HL_TABLE varchar(128)," + - " HL_PARTITION varchar(767)," + - " HL_LOCK_STATE char(1) NOT NULL," + - " HL_LOCK_TYPE char(1) NOT NULL," + - " HL_LAST_HEARTBEAT bigint NOT NULL," + - " HL_ACQUIRED_AT bigint," + - " HL_USER varchar(128) NOT NULL," + - " HL_HOST varchar(128) NOT NULL," + - " PRIMARY KEY(HL_LOCK_EXT_ID, HL_LOCK_INT_ID))"); - s.execute("CREATE INDEX HL_TXNID_INDEX ON HIVE_LOCKS (HL_TXNID)"); + s.execute("CREATE TABLE TXN_COMPONENTS (" + + " TC_TXNID bigint REFERENCES TXNS (TXN_ID)," + + " TC_DATABASE varchar(128) NOT NULL," + + " TC_TABLE varchar(128)," + + " TC_PARTITION varchar(767))"); + s.execute("CREATE TABLE COMPLETED_TXN_COMPONENTS (" + + " CTC_TXNID bigint," + + " CTC_DATABASE varchar(128) NOT NULL," + + " CTC_TABLE varchar(128)," + + " CTC_PARTITION varchar(767))"); + s.execute("CREATE TABLE NEXT_TXN_ID (" + + " NTXN_NEXT bigint NOT NULL)"); + s.execute("INSERT INTO NEXT_TXN_ID VALUES(1)"); + s.execute("CREATE TABLE HIVE_LOCKS (" + + " HL_LOCK_EXT_ID bigint NOT NULL," + + " HL_LOCK_INT_ID bigint NOT NULL," + + " HL_TXNID bigint," + + " HL_DB varchar(128) NOT NULL," + + " HL_TABLE varchar(128)," + + " HL_PARTITION varchar(767)," + + " HL_LOCK_STATE char(1) NOT NULL," + + " HL_LOCK_TYPE char(1) NOT NULL," + + " HL_LAST_HEARTBEAT bigint NOT NULL," + + " HL_ACQUIRED_AT bigint," + + " HL_USER varchar(128) NOT NULL," + + " HL_HOST varchar(128) NOT NULL," + + " PRIMARY KEY(HL_LOCK_EXT_ID, HL_LOCK_INT_ID))"); + s.execute("CREATE INDEX HL_TXNID_INDEX ON HIVE_LOCKS (HL_TXNID)"); - s.execute("CREATE TABLE NEXT_LOCK_ID (" + - " NL_NEXT bigint NOT NULL)"); - s.execute("INSERT INTO NEXT_LOCK_ID VALUES(1)"); + s.execute("CREATE TABLE NEXT_LOCK_ID (" + + " NL_NEXT bigint NOT NULL)"); + s.execute("INSERT INTO NEXT_LOCK_ID VALUES(1)"); - s.execute("CREATE TABLE COMPACTION_QUEUE (" + - " CQ_ID bigint PRIMARY KEY," + - " CQ_DATABASE varchar(128) NOT NULL," + - " CQ_TABLE varchar(128) NOT NULL," + - " CQ_PARTITION varchar(767)," + - " CQ_STATE char(1) NOT NULL," + - " CQ_TYPE char(1) NOT NULL," + - " CQ_WORKER_ID varchar(128)," + - " CQ_START bigint," + - " CQ_RUN_AS varchar(128))"); + s.execute("CREATE TABLE COMPACTION_QUEUE (" + + " CQ_ID bigint PRIMARY KEY," + + " CQ_DATABASE varchar(128) NOT NULL," + + " CQ_TABLE varchar(128) NOT NULL," + + " CQ_PARTITION varchar(767)," + + " CQ_STATE char(1) NOT NULL," + + " CQ_TYPE char(1) NOT NULL," + + " CQ_WORKER_ID varchar(128)," + + " CQ_START bigint," + + " CQ_RUN_AS varchar(128))"); - s.execute("CREATE TABLE NEXT_COMPACTION_QUEUE_ID (NCQ_NEXT bigint NOT NULL)"); - s.execute("INSERT INTO NEXT_COMPACTION_QUEUE_ID VALUES(1)"); + s.execute("CREATE TABLE NEXT_COMPACTION_QUEUE_ID (NCQ_NEXT bigint NOT NULL)"); + s.execute("INSERT INTO NEXT_COMPACTION_QUEUE_ID VALUES(1)"); - conn.commit(); - conn.close(); + conn.commit(); + committed = true; + } finally { + if (!committed) conn.rollback(); + conn.close(); + } } public static void cleanDb() throws Exception { - Connection conn = getConnection(); - Statement s = conn.createStatement(); - // We want to try these, whether they succeed or fail. + Connection conn = null; + boolean committed = false; try { - s.execute("DROP INDEX HL_TXNID_INDEX"); - } catch (Exception e) { - System.err.println("Unable to drop index HL_TXNID_INDEX " + - e.getMessage()); + conn = getConnection(); + Statement s = conn.createStatement(); + // We want to try these, whether they succeed or fail. + try { + s.execute("DROP INDEX HL_TXNID_INDEX"); + } catch (Exception e) { + System.err.println("Unable to drop index HL_TXNID_INDEX " + + e.getMessage()); + } + try { + s.execute("DROP TABLE TXN_COMPONENTS"); + } catch (Exception e) { + System.err.println("Unable to drop table TXN_COMPONENTS " + + e.getMessage()); + } + try { + s.execute("DROP TABLE COMPLETED_TXN_COMPONENTS"); + } catch (Exception e) { + System.err.println("Unable to drop table COMPLETED_TXN_COMPONENTS " + + e.getMessage()); + } + try { + s.execute("DROP TABLE TXNS"); + } catch (Exception e) { + System.err.println("Unable to drop table TXNS " + + e.getMessage()); + } + try { + s.execute("DROP TABLE NEXT_TXN_ID"); + } catch (Exception e) { + System.err.println("Unable to drop table NEXT_TXN_ID " + + e.getMessage()); + } + try { + s.execute("DROP TABLE HIVE_LOCKS"); + } catch (Exception e) { + System.err.println("Unable to drop table HIVE_LOCKS " + + e.getMessage()); + } + try { + s.execute("DROP TABLE NEXT_LOCK_ID"); + } catch (Exception e) { + } + try { + s.execute("DROP TABLE COMPACTION_QUEUE"); + } catch (Exception e) { + } + try { + s.execute("DROP TABLE NEXT_COMPACTION_QUEUE_ID"); + } catch (Exception e) { + } + conn.commit(); + committed = true; + } finally { + if (!committed) conn.rollback(); + conn.close(); } - try { - s.execute("DROP TABLE TXN_COMPONENTS"); - } catch (Exception e) { - System.err.println("Unable to drop table TXN_COMPONENTS " + - e.getMessage()); - } - try { - s.execute("DROP TABLE COMPLETED_TXN_COMPONENTS"); - } catch (Exception e) { - System.err.println("Unable to drop table COMPLETED_TXN_COMPONENTS " + - e.getMessage()); - } - try { - s.execute("DROP TABLE TXNS"); - } catch (Exception e) { - System.err.println("Unable to drop table TXNS " + - e.getMessage()); - } - try { - s.execute("DROP TABLE NEXT_TXN_ID"); - } catch (Exception e) { - System.err.println("Unable to drop table NEXT_TXN_ID " + - e.getMessage()); - } - try { - s.execute("DROP TABLE HIVE_LOCKS"); - } catch (Exception e) { - System.err.println("Unable to drop table HIVE_LOCKS " + - e.getMessage()); - } - try { - s.execute("DROP TABLE NEXT_LOCK_ID"); - } catch (Exception e) { - } - try { - s.execute("DROP TABLE COMPACTION_QUEUE"); - } catch (Exception e) { - } - try { - s.execute("DROP TABLE NEXT_COMPACTION_QUEUE_ID"); - } catch (Exception e) { - } - conn.commit(); - conn.close(); } /** @@ -174,25 +188,34 @@ public static void cleanDb() throws Exception { */ public static int countLockComponents(long lockId) throws Exception { Connection conn = getConnection(); - Statement s = conn.createStatement(); - ResultSet rs = s.executeQuery("select count(*) from hive_locks where " + - "hl_lock_ext_id = " + lockId); - if (!rs.next()) return 0; - int rc = rs.getInt(1); - conn.rollback(); - conn.close(); - return rc; + try { + Statement s = conn.createStatement(); + ResultSet rs = s.executeQuery("select count(*) from hive_locks where hl_lock_ext_id = " + + lockId); + if (!rs.next()) return 0; + int rc = rs.getInt(1); + return rc; + } finally { + conn.rollback(); + conn.close(); + } } public static int findNumCurrentLocks() throws Exception { - Connection conn = getConnection(); - Statement s = conn.createStatement(); - ResultSet rs = s.executeQuery("select count(*) from hive_locks"); - if (!rs.next()) return 0; - int rc = rs.getInt(1); - conn.rollback(); - conn.close(); - return rc; + Connection conn = null; + try { + conn = getConnection(); + Statement s = conn.createStatement(); + ResultSet rs = s.executeQuery("select count(*) from hive_locks"); + if (!rs.next()) return 0; + int rc = rs.getInt(1); + return rc; + } finally { + if (conn != null) { + conn.rollback(); + conn.close(); + } + } } private static Connection getConnection() throws Exception { diff --git a/metastore/src/test/org/apache/hadoop/hive/metastore/DummyRawStoreControlledCommit.java b/metastore/src/test/org/apache/hadoop/hive/metastore/DummyRawStoreControlledCommit.java index 4eba2b0..3847d99 100644 --- a/metastore/src/test/org/apache/hadoop/hive/metastore/DummyRawStoreControlledCommit.java +++ b/metastore/src/test/org/apache/hadoop/hive/metastore/DummyRawStoreControlledCommit.java @@ -24,6 +24,7 @@ import org.apache.hadoop.conf.Configurable; import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hive.metastore.api.AggrStats; import org.apache.hadoop.hive.metastore.api.ColumnStatistics; import org.apache.hadoop.hive.metastore.api.ColumnStatisticsObj; import org.apache.hadoop.hive.metastore.api.Database; @@ -712,7 +713,7 @@ public Function getFunction(String dbName, String funcName) } @Override - public List get_aggr_stats_for(String dbName, + public AggrStats get_aggr_stats_for(String dbName, String tblName, List partNames, List colNames) throws MetaException { return null; diff --git a/metastore/src/test/org/apache/hadoop/hive/metastore/DummyRawStoreForJdoConnection.java b/metastore/src/test/org/apache/hadoop/hive/metastore/DummyRawStoreForJdoConnection.java index 78ab19a..981fa1a 100644 --- a/metastore/src/test/org/apache/hadoop/hive/metastore/DummyRawStoreForJdoConnection.java +++ b/metastore/src/test/org/apache/hadoop/hive/metastore/DummyRawStoreForJdoConnection.java @@ -25,6 +25,7 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hive.conf.HiveConf; +import org.apache.hadoop.hive.metastore.api.AggrStats; import org.apache.hadoop.hive.metastore.api.ColumnStatistics; import org.apache.hadoop.hive.metastore.api.ColumnStatisticsObj; import org.apache.hadoop.hive.metastore.api.Database; @@ -729,7 +730,7 @@ public Function getFunction(String dbName, String funcName) } @Override - public List get_aggr_stats_for(String dbName, + public AggrStats get_aggr_stats_for(String dbName, String tblName, List partNames, List colNames) throws MetaException { return null; diff --git a/packaging/pom.xml b/packaging/pom.xml index cc12d1b..d11fc1e 100644 --- a/packaging/pom.xml +++ b/packaging/pom.xml @@ -182,6 +182,11 @@ ${project.version} + org.apache.hive + hive-accumulo-handler + ${project.version} + + org.apache.hive.hcatalog hive-hcatalog-streaming ${project.version} diff --git a/pom.xml b/pom.xml index 78ac6ea..8713a30 100644 --- a/pom.xml +++ b/pom.xml @@ -31,6 +31,7 @@ + accumulo-handler ant beeline cli @@ -87,6 +88,7 @@ 1.8 + 1.6.0 5.5.0 1.9.1 3.4 @@ -378,6 +380,31 @@ ${commons-exec.version} + org.apache.accumulo + accumulo-core + ${accumulo.version} + + + org.apache.accumulo + accumulo-fate + ${accumulo.version} + + + org.apache.accumulo + accumulo-minicluster + ${accumulo.version} + + + org.apache.accumulo + accumulo-start + ${accumulo.version} + + + org.apache.accumulo + accumulo-trace + ${accumulo.version} + + org.apache.activemq activemq-core ${activemq.version} diff --git a/ql/src/java/org/apache/hadoop/hive/ql/Driver.java b/ql/src/java/org/apache/hadoop/hive/ql/Driver.java index ab951ff..bc1253a 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/Driver.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/Driver.java @@ -754,6 +754,9 @@ private static void doAuthorizationV2(SessionState ss, HiveOperation op, HashSet objName = privObject.getD(); break; case FUNCTION: + if(privObject.getDatabase() != null) { + dbname = privObject.getDatabase().getName(); + } objName = privObject.getFunctionName(); break; case DUMMYPARTITION: diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/ColumnStatsTask.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/ColumnStatsTask.java index 94afaba..176a593 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/exec/ColumnStatsTask.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/ColumnStatsTask.java @@ -41,6 +41,7 @@ import org.apache.hadoop.hive.metastore.api.FieldSchema; import org.apache.hadoop.hive.metastore.api.LongColumnStatsData; import org.apache.hadoop.hive.metastore.api.MetaException; +import org.apache.hadoop.hive.metastore.api.SetPartitionsStatsRequest; import org.apache.hadoop.hive.metastore.api.StringColumnStatsData; import org.apache.hadoop.hive.ql.DriverContext; import org.apache.hadoop.hive.ql.QueryPlan; @@ -342,9 +343,7 @@ private int persistPartitionStats() throws HiveException, MetaException, IOExcep // Construct a column statistics object from the result List colStats = constructColumnStatsFromPackedRows(); // Persist the column statistics object to the metastore - for (ColumnStatistics colStat : colStats) { - db.updatePartitionColumnStatistics(colStat); - } + db.setPartitionColumnStatistics(new SetPartitionsStatsRequest(colStats)); return 0; } diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/FetchOperator.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/FetchOperator.java index 0da886b..89fff81 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/exec/FetchOperator.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/FetchOperator.java @@ -105,7 +105,7 @@ private transient JobConf job; private transient WritableComparable key; private transient Writable value; - private transient Writable[] vcValues; + private transient Object[] vcValues; private transient Deserializer serde; private transient Deserializer tblSerde; private transient Converter partTblObjectInspectorConverter; @@ -141,12 +141,11 @@ private void initialize() { List names = new ArrayList(vcCols.size()); List inspectors = new ArrayList(vcCols.size()); for (VirtualColumn vc : vcCols) { - inspectors.add(PrimitiveObjectInspectorFactory.getPrimitiveWritableObjectInspector( - vc.getTypeInfo())); + inspectors.add(vc.getObjectInspector()); names.add(vc.getName()); } vcsOI = ObjectInspectorFactory.getStandardStructObjectInspector(names, inspectors); - vcValues = new Writable[vcCols.size()]; + vcValues = new Object[vcCols.size()]; } isPartitioned = work.isPartitioned(); tblDataDone = false; diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/MapOperator.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/MapOperator.java index d5de58e..b1f8358 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/exec/MapOperator.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/MapOperator.java @@ -35,6 +35,7 @@ import org.apache.hadoop.hive.metastore.api.hive_metastoreConstants; import org.apache.hadoop.hive.ql.io.IOContext; import org.apache.hadoop.hive.ql.exec.mr.ExecMapperContext; +import org.apache.hadoop.hive.ql.io.RecordIdentifier; import org.apache.hadoop.hive.ql.metadata.HiveException; import org.apache.hadoop.hive.ql.metadata.VirtualColumn; import org.apache.hadoop.hive.ql.plan.MapWork; @@ -140,7 +141,7 @@ public int hashCode() { String tableName; String partName; List vcs; - Writable[] vcValues; + Object[] vcValues; private boolean isPartitioned() { return partObjectInspector != null; @@ -165,7 +166,7 @@ public StructObjectInspector getRowObjectInspector() { * op. * * @param hconf - * @param mrwork + * @param mapWork * @throws HiveException */ public void initializeAsRoot(Configuration hconf, MapWork mapWork) @@ -250,13 +251,13 @@ private MapOpCtx initObjectInspector(Configuration hconf, MapInputPath ctx, // The op may not be a TableScan for mapjoins // Consider the query: select /*+MAPJOIN(a)*/ count(*) FROM T1 a JOIN T2 b ON a.key = b.key; - // In that case, it will be a Select, but the rowOI need not be ammended + // In that case, it will be a Select, but the rowOI need not be amended if (ctx.op instanceof TableScanOperator) { TableScanOperator tsOp = (TableScanOperator) ctx.op; TableScanDesc tsDesc = tsOp.getConf(); if (tsDesc != null && tsDesc.hasVirtualCols()) { opCtx.vcs = tsDesc.getVirtualCols(); - opCtx.vcValues = new Writable[opCtx.vcs.size()]; + opCtx.vcValues = new Object[opCtx.vcs.size()]; opCtx.vcsObjectInspector = VirtualColumn.getVCSObjectInspector(opCtx.vcs); if (opCtx.isPartitioned()) { opCtx.rowWithPartAndVC = Arrays.copyOfRange(opCtx.rowWithPart, 0, 3); @@ -550,13 +551,13 @@ public void process(Writable value) throws HiveException { } } - public static Writable[] populateVirtualColumnValues(ExecMapperContext ctx, - List vcs, Writable[] vcValues, Deserializer deserializer) { + public static Object[] populateVirtualColumnValues(ExecMapperContext ctx, + List vcs, Object[] vcValues, Deserializer deserializer) { if (vcs == null) { return vcValues; } if (vcValues == null) { - vcValues = new Writable[vcs.size()]; + vcValues = new Object[vcs.size()]; } for (int i = 0; i < vcs.size(); i++) { VirtualColumn vc = vcs.get(i); @@ -602,6 +603,19 @@ public void process(Writable value) throws HiveException { old.set(current); } } + else if(vc.equals(VirtualColumn.ROWID)) { + if(ctx.getIoCxt().ri == null) { + vcValues[i] = null; + } + else { + if(vcValues[i] == null) { + vcValues[i] = new Object[RecordIdentifier.Field.values().length]; + } + RecordIdentifier.StructInfo.toArray(ctx.getIoCxt().ri, (Object[])vcValues[i]); + ctx.getIoCxt().ri = null;//so we don't accidentally cache the value; shouldn't + //happen since IO layer either knows how to produce ROW__ID or not - but to be safe + } + } } return vcValues; } diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/mr/ExecMapper.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/mr/ExecMapper.java index 4e0fd79..7fb4c46 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/exec/mr/ExecMapper.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/mr/ExecMapper.java @@ -155,7 +155,7 @@ public void configure(JobConf job) { } } } - + @Override public void map(Object key, Object value, OutputCollector output, Reporter reporter) throws IOException { if (oc == null) { diff --git a/ql/src/java/org/apache/hadoop/hive/ql/hooks/Entity.java b/ql/src/java/org/apache/hadoop/hive/ql/hooks/Entity.java index 50c76db..76b1f01 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/hooks/Entity.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/hooks/Entity.java @@ -22,6 +22,7 @@ import java.net.URI; import java.util.Map; +import org.apache.hadoop.fs.Path; import org.apache.hadoop.hive.metastore.api.Database; import org.apache.hadoop.hive.ql.metadata.DummyPartition; import org.apache.hadoop.hive.ql.metadata.Partition; @@ -276,11 +277,13 @@ public URI getLocation() throws Exception { } if (typ == Type.TABLE) { - return t.getDataLocation().toUri(); + Path path = t.getDataLocation(); + return path == null ? null : path.toUri(); } if (typ == Type.PARTITION) { - return p.getDataLocation().toUri(); + Path path = p.getDataLocation(); + return path == null ? null : path.toUri(); } if (typ == Type.DFS_DIR || typ == Type.LOCAL_DIR) { @@ -333,6 +336,9 @@ private String computeName() { case DUMMYPARTITION: return p.getName(); case FUNCTION: + if (database != null) { + return database.getName() + "." + stringObject; + } return stringObject; default: return d; diff --git a/ql/src/java/org/apache/hadoop/hive/ql/io/AcidInputFormat.java b/ql/src/java/org/apache/hadoop/hive/ql/io/AcidInputFormat.java index 71a9dd4..2f63524 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/io/AcidInputFormat.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/io/AcidInputFormat.java @@ -22,7 +22,7 @@ import org.apache.hadoop.fs.Path; import org.apache.hadoop.hive.common.ValidTxnList; import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspector; -import org.apache.hadoop.io.NullWritable; +import org.apache.hadoop.io.WritableComparable; import org.apache.hadoop.mapred.InputFormat; import org.apache.hadoop.mapred.InputSplit; import org.apache.hadoop.mapred.RecordReader; @@ -86,11 +86,20 @@ *

* To support transitions between non-ACID layouts to ACID layouts, the input * formats are expected to support both layouts and detect the correct one. - * - * @param The row type + *

+ * A note on the KEY of this InputFormat. + * For row-at-a-time processing, KEY can conveniently pass RowId into the operator + * pipeline. For vectorized execution the KEY could perhaps represent a range in the batch. + * Since {@link org.apache.hadoop.hive.ql.io.orc.OrcInputFormat} is declared to return + * {@code NullWritable} key, {@link org.apache.hadoop.hive.ql.io.AcidRecordReader} is defined + * to provide access to the RowId. Other implementations of AcidInputFormat can use either + * mechanism. + *

+ * + * @param The row type */ -public interface AcidInputFormat - extends InputFormat, InputFormatChecker { +public interface AcidInputFormat + extends InputFormat, InputFormatChecker { /** * Options for controlling the record readers. @@ -140,7 +149,7 @@ public Reporter getReporter() { * @return a record reader * @throws IOException */ - public RowReader getReader(InputSplit split, + public RowReader getReader(InputSplit split, Options options) throws IOException; public static interface RawReader @@ -162,11 +171,18 @@ public Reporter getReporter() { * @return a record reader * @throws IOException */ - RawReader getRawReader(Configuration conf, + RawReader getRawReader(Configuration conf, boolean collapseEvents, int bucket, ValidTxnList validTxnList, Path baseDirectory, Path[] deltaDirectory ) throws IOException; + + /** + * RecordReader returned by AcidInputFormat working in row-at-a-time mode should AcidRecordReader. + */ + public interface AcidRecordReader extends RecordReader { + RecordIdentifier getRecordIdentifier(); + } } diff --git a/ql/src/java/org/apache/hadoop/hive/ql/io/AcidOutputFormat.java b/ql/src/java/org/apache/hadoop/hive/ql/io/AcidOutputFormat.java index 6b330e1..88e7106 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/io/AcidOutputFormat.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/io/AcidOutputFormat.java @@ -23,7 +23,7 @@ import org.apache.hadoop.fs.Path; import org.apache.hadoop.hive.ql.exec.FileSinkOperator.RecordWriter; import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspector; -import org.apache.hadoop.io.NullWritable; +import org.apache.hadoop.io.WritableComparable; import org.apache.hadoop.mapred.Reporter; import java.io.IOException; @@ -34,7 +34,7 @@ * An extension for OutputFormats that want to implement ACID transactions. * @param the row type of the file */ -public interface AcidOutputFormat extends HiveOutputFormat { +public interface AcidOutputFormat extends HiveOutputFormat { /** * Options to control how the files are written diff --git a/ql/src/java/org/apache/hadoop/hive/ql/io/HiveContextAwareRecordReader.java b/ql/src/java/org/apache/hadoop/hive/ql/io/HiveContextAwareRecordReader.java index f874d86..c5f6c1e 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/io/HiveContextAwareRecordReader.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/io/HiveContextAwareRecordReader.java @@ -20,17 +20,13 @@ import java.io.IOException; import java.util.ArrayList; -import java.util.LinkedList; import java.util.List; import java.util.Map; -import java.util.Properties; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; -import org.apache.hadoop.hive.conf.HiveConf; -import org.apache.hadoop.hive.common.ObjectPair; import org.apache.hadoop.hive.io.HiveIOExceptionHandlerUtil; import org.apache.hadoop.hive.ql.exec.Utilities; import org.apache.hadoop.hive.ql.exec.FooterBuffer; @@ -42,16 +38,13 @@ import org.apache.hadoop.hive.ql.udf.generic.GenericUDFOPEqualOrLessThan; import org.apache.hadoop.hive.ql.udf.generic.GenericUDFOPGreaterThan; import org.apache.hadoop.hive.ql.udf.generic.GenericUDFOPLessThan; -import org.apache.hadoop.hive.serde.serdeConstants; import org.apache.hadoop.io.SequenceFile; import org.apache.hadoop.io.Writable; import org.apache.hadoop.io.WritableComparable; -import org.apache.hadoop.io.WritableUtils; import org.apache.hadoop.mapred.FileSplit; import org.apache.hadoop.mapred.InputSplit; import org.apache.hadoop.mapred.JobConf; import org.apache.hadoop.mapred.RecordReader; -import org.apache.hadoop.util.ReflectionUtils; /** This class prepares an IOContext, and provides the ability to perform a binary search on the * data. The binary search can be used by setting the value of inputFormatSorted in the @@ -119,7 +112,18 @@ public boolean next(K key, V value) throws IOException { } updateIOContext(); try { - return doNext(key, value); + boolean retVal = doNext(key, value); + if(retVal) { + if(key instanceof RecordIdentifier) { + //supports AcidInputFormat which uses the KEY pass ROW__ID info + ioCxtRef.ri = (RecordIdentifier)key; + } + else if(recordReader instanceof AcidInputFormat.AcidRecordReader) { + //supports AcidInputFormat which do not use the KEY pass ROW__ID info + ioCxtRef.ri = ((AcidInputFormat.AcidRecordReader) recordReader).getRecordIdentifier(); + } + } + return retVal; } catch (IOException e) { ioCxtRef.setIOExceptions(true); throw e; diff --git a/ql/src/java/org/apache/hadoop/hive/ql/io/IOContext.java b/ql/src/java/org/apache/hadoop/hive/ql/io/IOContext.java index 45a49c5..081b6bd 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/io/IOContext.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/io/IOContext.java @@ -61,6 +61,10 @@ public static void clear() { Comparison comparison = null; // The class name of the generic UDF being used by the filter String genericUDFClassName = null; + /** + * supports {@link org.apache.hadoop.hive.ql.metadata.VirtualColumn#ROWID} + */ + public RecordIdentifier ri; public static enum Comparison { GREATER, diff --git a/ql/src/java/org/apache/hadoop/hive/ql/io/RecordIdentifier.java b/ql/src/java/org/apache/hadoop/hive/ql/io/RecordIdentifier.java index 38a0d6b..cdde3dc 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/io/RecordIdentifier.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/io/RecordIdentifier.java @@ -19,16 +19,81 @@ package org.apache.hadoop.hive.ql.io; import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspector; +import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspectorFactory; +import org.apache.hadoop.hive.serde2.objectinspector.primitive.PrimitiveObjectInspectorFactory; +import org.apache.hadoop.hive.serde2.typeinfo.TypeInfo; +import org.apache.hadoop.hive.serde2.typeinfo.TypeInfoFactory; import org.apache.hadoop.io.WritableComparable; import java.io.DataInput; import java.io.DataOutput; import java.io.IOException; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.List; /** - * Gives the Record identifer information for the current record. + * Gives the Record identifier information for the current record. */ public class RecordIdentifier implements WritableComparable { + /** + * This is in support of {@link org.apache.hadoop.hive.ql.metadata.VirtualColumn#ROWID} + * Contains metadata about each field in RecordIdentifier that needs to be part of ROWID + * which is represented as a struct {@link org.apache.hadoop.hive.ql.io.RecordIdentifier.StructInfo}. + * Each field of RecordIdentifier which should be part of ROWID should be in this enum... which + * really means that it should be part of VirtualColumn (so make a subclass for rowid). + */ + public static enum Field { + //note the enum names match field names in the struct + transactionId(TypeInfoFactory.longTypeInfo, + PrimitiveObjectInspectorFactory.javaLongObjectInspector), + bucketId(TypeInfoFactory.intTypeInfo, PrimitiveObjectInspectorFactory.javaIntObjectInspector), + rowId(TypeInfoFactory.longTypeInfo, PrimitiveObjectInspectorFactory.javaLongObjectInspector); + public final TypeInfo fieldType; + public final ObjectInspector fieldOI; + Field(TypeInfo fieldType, ObjectInspector fieldOI) { + this.fieldType = fieldType; + this.fieldOI = fieldOI; + } + } + /** + * RecordIdentifier is passed along the operator tree as a struct. This class contains a few + * utilities for that. + */ + public static final class StructInfo { + private static final List fieldNames = new ArrayList(Field.values().length); + private static final List fieldTypes = new ArrayList(fieldNames.size()); + private static final List fieldOis = + new ArrayList(fieldNames.size()); + static { + for(Field f : Field.values()) { + fieldNames.add(f.name()); + fieldTypes.add(f.fieldType); + fieldOis.add(f.fieldOI); + } + } + public static final TypeInfo typeInfo = + TypeInfoFactory.getStructTypeInfo(fieldNames, fieldTypes); + public static final ObjectInspector oi = + ObjectInspectorFactory.getStandardStructObjectInspector(fieldNames, fieldOis); + + /** + * Copies relevant fields from {@code ri} to {@code struct} + * @param ri + * @param struct must be of size Field.values().size() + */ + public static void toArray(RecordIdentifier ri, Object[] struct) { + assert struct != null && struct.length == Field.values().length; + if(ri == null) { + Arrays.fill(struct, null); + return; + } + struct[Field.transactionId.ordinal()] = ri.getTransactionId(); + struct[Field.bucketId.ordinal()] = ri.getBucketId(); + struct[Field.rowId.ordinal()] = ri.getRowId(); + } + } + private long transactionId; private int bucketId; private long rowId; diff --git a/ql/src/java/org/apache/hadoop/hive/ql/io/avro/AvroGenericRecordReader.java b/ql/src/java/org/apache/hadoop/hive/ql/io/avro/AvroGenericRecordReader.java index 60b4388..22e54d3 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/io/avro/AvroGenericRecordReader.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/io/avro/AvroGenericRecordReader.java @@ -127,7 +127,7 @@ private Schema getSchema(JobConf job, FileSplit split) throws AvroSerdeException String s = job.get(AvroSerdeUtils.AVRO_SERDE_SCHEMA); if(s != null) { LOG.info("Found the avro schema in the job: " + s); - return Schema.parse(s); + return AvroSerdeUtils.getSchemaFor(s); } // No more places to get the schema from. Give up. May have to re-encode later. return null; diff --git a/ql/src/java/org/apache/hadoop/hive/ql/io/orc/OrcInputFormat.java b/ql/src/java/org/apache/hadoop/hive/ql/io/orc/OrcInputFormat.java index 7edb3c2..2fcc207 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/io/orc/OrcInputFormat.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/io/orc/OrcInputFormat.java @@ -98,7 +98,7 @@ */ public class OrcInputFormat implements InputFormat, InputFormatChecker, VectorizedInputFormatInterface, - AcidInputFormat { + AcidInputFormat { private static final Log LOG = LogFactory.getLog(OrcInputFormat.class); static final HadoopShims SHIMS = ShimLoader.getHadoopShims(); @@ -989,7 +989,7 @@ private boolean isStripeSatisfyPredicate(StripeStatistics stripeStatistics, boolean vectorMode = Utilities.isVectorMode(conf); // if HiveCombineInputFormat gives us FileSplits instead of OrcSplits, - // we know it is not ACID. + // we know it is not ACID. (see a check in CombineHiveInputFormat.getSplits() that assures this) if (inputSplit.getClass() == FileSplit.class) { if (vectorMode) { return createVectorizedReader(inputSplit, conf, reporter); @@ -998,62 +998,75 @@ private boolean isStripeSatisfyPredicate(StripeStatistics stripeStatistics, ((FileSplit) inputSplit).getPath(), OrcFile.readerOptions(conf)), conf, (FileSplit) inputSplit); } - + OrcSplit split = (OrcSplit) inputSplit; reporter.setStatus(inputSplit.toString()); - // if we are strictly old-school, just use the old code + Options options = new Options(conf).reporter(reporter); + final RowReader inner = getReader(inputSplit, options); + + + /*Even though there are no delta files, we still need to produce row ids so that an + * UPDATE or DELETE statement would work on a table which didn't have any previous updates*/ if (split.isOriginal() && split.getDeltas().isEmpty()) { if (vectorMode) { return createVectorizedReader(inputSplit, conf, reporter); } else { - return new OrcRecordReader(OrcFile.createReader(split.getPath(), - OrcFile.readerOptions(conf)), conf, split); + return new NullKeyRecordReader(inner, conf); } } - Options options = new Options(conf).reporter(reporter); - final RowReader inner = getReader(inputSplit, options); if (vectorMode) { return (org.apache.hadoop.mapred.RecordReader) new VectorizedOrcAcidRowReader(inner, conf, (FileSplit) inputSplit); } - final RecordIdentifier id = inner.createKey(); + return new NullKeyRecordReader(inner, conf); + } + /** + * Return a RecordReader that is compatible with the Hive 0.12 reader + * with NullWritable for the key instead of RecordIdentifier. + */ + public static final class NullKeyRecordReader implements AcidRecordReader { + private final RecordIdentifier id; + private final RowReader inner; - // Return a RecordReader that is compatible with the Hive 0.12 reader - // with NullWritable for the key instead of RecordIdentifier. - return new org.apache.hadoop.mapred.RecordReader(){ - @Override - public boolean next(NullWritable nullWritable, - OrcStruct orcStruct) throws IOException { - return inner.next(id, orcStruct); - } + public RecordIdentifier getRecordIdentifier() { + return id; + } + private NullKeyRecordReader(RowReader inner, Configuration conf) { + this.inner = inner; + id = inner.createKey(); + } + @Override + public boolean next(NullWritable nullWritable, + OrcStruct orcStruct) throws IOException { + return inner.next(id, orcStruct); + } - @Override - public NullWritable createKey() { - return NullWritable.get(); - } + @Override + public NullWritable createKey() { + return NullWritable.get(); + } - @Override - public OrcStruct createValue() { - return inner.createValue(); - } + @Override + public OrcStruct createValue() { + return inner.createValue(); + } - @Override - public long getPos() throws IOException { - return inner.getPos(); - } + @Override + public long getPos() throws IOException { + return inner.getPos(); + } - @Override - public void close() throws IOException { - inner.close(); - } + @Override + public void close() throws IOException { + inner.close(); + } - @Override - public float getProgress() throws IOException { - return inner.getProgress(); - } - }; + @Override + public float getProgress() throws IOException { + return inner.getProgress(); + } } diff --git a/ql/src/java/org/apache/hadoop/hive/ql/io/orc/OrcOutputFormat.java b/ql/src/java/org/apache/hadoop/hive/ql/io/orc/OrcOutputFormat.java index 00e0807..2749c7f 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/io/orc/OrcOutputFormat.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/io/orc/OrcOutputFormat.java @@ -50,7 +50,7 @@ * A Hive OutputFormat for ORC files. */ public class OrcOutputFormat extends FileOutputFormat - implements AcidOutputFormat { + implements AcidOutputFormat { private static class OrcRecordWriter implements RecordWriter, diff --git a/ql/src/java/org/apache/hadoop/hive/ql/io/orc/OrcRecordUpdater.java b/ql/src/java/org/apache/hadoop/hive/ql/io/orc/OrcRecordUpdater.java index 8f17c12..b7ec309 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/io/orc/OrcRecordUpdater.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/io/orc/OrcRecordUpdater.java @@ -88,6 +88,9 @@ private final IntWritable bucket = new IntWritable(); private final LongWritable rowId = new LongWritable(); private long insertedRows = 0; + // This records how many rows have been inserted or deleted. It is separate from insertedRows + // because that is monotonically increasing to give new unique row ids. + private long rowCountDelta = 0; private final KeyIndexBuilder indexBuilder = new KeyIndexBuilder(); static class AcidStats { @@ -263,6 +266,7 @@ public void insert(long currentTransaction, Object row) throws IOException { } addEvent(INSERT_OPERATION, currentTransaction, currentTransaction, insertedRows++, row); + rowCountDelta++; } @Override @@ -283,6 +287,7 @@ public void delete(long currentTransaction, long originalTransaction, } addEvent(DELETE_OPERATION, currentTransaction, originalTransaction, rowId, null); + rowCountDelta--; } @Override @@ -317,7 +322,11 @@ public void close(boolean abort) throws IOException { @Override public SerDeStats getStats() { - return null; + SerDeStats stats = new SerDeStats(); + stats.setRowCount(rowCountDelta); + // Don't worry about setting raw data size diff. I have no idea how to calculate that + // without finding the row we are updating or deleting, which would be a mess. + return stats; } @VisibleForTesting diff --git a/ql/src/java/org/apache/hadoop/hive/ql/io/orc/RecordReaderImpl.java b/ql/src/java/org/apache/hadoop/hive/ql/io/orc/RecordReaderImpl.java index f5023bb..5f93798 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/io/orc/RecordReaderImpl.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/io/orc/RecordReaderImpl.java @@ -21,6 +21,7 @@ import java.io.EOFException; import java.io.IOException; +import java.math.BigDecimal; import java.math.BigInteger; import java.nio.ByteBuffer; import java.sql.Timestamp; @@ -1292,8 +1293,9 @@ Object nextVector(Object previousVector, long batchSize) throws IOException { BigInteger bInt = SerializationUtils.readBigInteger(valueStream); result.vector[i].update(bInt, (short) scratchScaleVector.vector[i]); - // Change the scale to match the schema if the scale in data is different. - if (scale != scratchScaleVector.vector[i]) { + // Change the scale to match the schema if the scale is less than in data. + // (HIVE-7373) If scale is bigger, then it leaves the original trailing zeros + if (scale < scratchScaleVector.vector[i]) { result.vector[i].changeScaleDestructive((short) scale); } } @@ -2410,6 +2412,9 @@ static TruthValue evaluatePredicateRange(PredicateLeaf predicate, Object min, private static Object getBaseObjectForComparison(Object predObj, Object statsObj) { if (predObj != null) { + if (predObj instanceof ExprNodeConstantDesc) { + predObj = ((ExprNodeConstantDesc) predObj).getValue(); + } // following are implicitly convertible if (statsObj instanceof Long) { if (predObj instanceof Double) { @@ -2428,10 +2433,6 @@ private static Object getBaseObjectForComparison(Object predObj, Object statsObj return Double.valueOf(predObj.toString()); } } else if (statsObj instanceof String) { - // Ex: where d = date '1970-02-01' will be ExprNodeConstantDesc - if (predObj instanceof ExprNodeConstantDesc) { - return ((ExprNodeConstantDesc) predObj).getValue().toString(); - } return predObj.toString(); } else if (statsObj instanceof HiveDecimal) { if (predObj instanceof Long) { @@ -2440,6 +2441,8 @@ private static Object getBaseObjectForComparison(Object predObj, Object statsObj return HiveDecimal.create(predObj.toString()); } else if (predObj instanceof String) { return HiveDecimal.create(predObj.toString()); + } else if (predObj instanceof BigDecimal) { + return HiveDecimal.create((BigDecimal)predObj); } } } diff --git a/ql/src/java/org/apache/hadoop/hive/ql/io/orc/RunLengthIntegerWriterV2.java b/ql/src/java/org/apache/hadoop/hive/ql/io/orc/RunLengthIntegerWriterV2.java index 3a0ba1d..5bd4599 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/io/orc/RunLengthIntegerWriterV2.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/io/orc/RunLengthIntegerWriterV2.java @@ -142,9 +142,9 @@ private final boolean signed; private EncodingType encoding; private int numLiterals; - private long[] zigzagLiterals; - private long[] baseRedLiterals; - private long[] adjDeltas; + private final long[] zigzagLiterals = new long[MAX_SCOPE]; + private final long[] baseRedLiterals = new long[MAX_SCOPE]; + private final long[] adjDeltas = new long[MAX_SCOPE]; private long fixedDelta; private int zzBits90p; private int zzBits100p; @@ -252,8 +252,11 @@ private void writeDeltaValues() throws IOException { // store the first value as delta value using zigzag encoding utils.writeVslong(output, adjDeltas[0]); - // adjacent delta values are bit packed - utils.writeInts(adjDeltas, 1, adjDeltas.length - 1, fb, output); + // adjacent delta values are bit packed. The length of adjDeltas array is + // always one less than the number of literals (delta difference for n + // elements is n-1). We have already written one element, write the + // remaining numLiterals - 2 elements here + utils.writeInts(adjDeltas, 1, numLiterals - 2, fb, output); } } @@ -323,7 +326,7 @@ private void writePatchedBaseValues() throws IOException { // base reduced literals are bit packed int closestFixedBits = utils.getClosestFixedBits(fb); - utils.writeInts(baseRedLiterals, 0, baseRedLiterals.length, closestFixedBits, + utils.writeInts(baseRedLiterals, 0, numLiterals, closestFixedBits, output); // write patch list @@ -372,7 +375,7 @@ private void writeDirectValues() throws IOException { output.write(headerSecondByte); // bit packing the zigzag encoded literals - utils.writeInts(zigzagLiterals, 0, zigzagLiterals.length, fb, output); + utils.writeInts(zigzagLiterals, 0, numLiterals, fb, output); // reset run length variableRunLength = 0; @@ -414,14 +417,6 @@ private void writeShortRepeatValues() throws IOException { } private void determineEncoding() { - // used for direct encoding - zigzagLiterals = new long[numLiterals]; - - // used for patched base encoding - baseRedLiterals = new long[numLiterals]; - - // used for delta encoding - adjDeltas = new long[numLiterals - 1]; int idx = 0; @@ -530,10 +525,10 @@ private void determineEncoding() { // is not significant then we can use direct or delta encoding double p = 0.9; - zzBits90p = utils.percentileBits(zigzagLiterals, p); + zzBits90p = utils.percentileBits(zigzagLiterals, 0, numLiterals, p); p = 1.0; - zzBits100p = utils.percentileBits(zigzagLiterals, p); + zzBits100p = utils.percentileBits(zigzagLiterals, 0, numLiterals, p); int diffBitsLH = zzBits100p - zzBits90p; @@ -543,18 +538,18 @@ private void determineEncoding() { && isFixedDelta == false) { // patching is done only on base reduced values. // remove base from literals - for(int i = 0; i < zigzagLiterals.length; i++) { + for(int i = 0; i < numLiterals; i++) { baseRedLiterals[i] = literals[i] - min; } // 95th percentile width is used to determine max allowed value // after which patching will be done p = 0.95; - brBits95p = utils.percentileBits(baseRedLiterals, p); + brBits95p = utils.percentileBits(baseRedLiterals, 0, numLiterals, p); // 100th percentile is used to compute the max patch width p = 1.0; - brBits100p = utils.percentileBits(baseRedLiterals, p); + brBits100p = utils.percentileBits(baseRedLiterals, 0, numLiterals, p); // after base reducing the values, if the difference in bits between // 95th percentile and 100th percentile value is zero then there @@ -592,7 +587,7 @@ private void preparePatchedBlob() { // since we are considering only 95 percentile, the size of gap and // patch array can contain only be 5% values - patchLength = (int) Math.ceil((baseRedLiterals.length * 0.05)); + patchLength = (int) Math.ceil((numLiterals * 0.05)); int[] gapList = new int[patchLength]; long[] patchList = new long[patchLength]; @@ -616,7 +611,7 @@ private void preparePatchedBlob() { int gap = 0; int maxGap = 0; - for(int i = 0; i < baseRedLiterals.length; i++) { + for(int i = 0; i < numLiterals; i++) { // if value is above mask then create the patch and record the gap if (baseRedLiterals[i] > mask) { gap = i - prev; @@ -694,9 +689,6 @@ private void clear() { numLiterals = 0; encoding = null; prevDelta = 0; - zigzagLiterals = null; - baseRedLiterals = null; - adjDeltas = null; fixedDelta = 0; zzBits90p = 0; zzBits100p = 0; diff --git a/ql/src/java/org/apache/hadoop/hive/ql/io/orc/SerializationUtils.java b/ql/src/java/org/apache/hadoop/hive/ql/io/orc/SerializationUtils.java index 71c1c4d..b5380c0 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/io/orc/SerializationUtils.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/io/orc/SerializationUtils.java @@ -244,7 +244,7 @@ long zigzagDecode(long val) { * @param p - percentile value (>=0.0 to <=1.0) * @return pth percentile bits */ - int percentileBits(long[] data, double p) { + int percentileBits(long[] data, int offset, int length, double p) { if ((p > 1.0) || (p <= 0.0)) { return -1; } @@ -254,13 +254,12 @@ int percentileBits(long[] data, double p) { int[] hist = new int[32]; // compute the histogram - for(long l : data) { - int idx = encodeBitWidth(findClosestNumBits(l)); + for(int i = offset; i < (offset + length); i++) { + int idx = encodeBitWidth(findClosestNumBits(data[i])); hist[idx] += 1; } - int len = data.length; - int perLen = (int) (len * (1.0 - p)); + int perLen = (int) (length * (1.0 - p)); // return the bits required by pth percentile length for(int i = hist.length - 1; i >= 0; i--) { diff --git a/ql/src/java/org/apache/hadoop/hive/ql/io/parquet/ProjectionPusher.java b/ql/src/java/org/apache/hadoop/hive/ql/io/parquet/ProjectionPusher.java index 2f155f6..4480600 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/io/parquet/ProjectionPusher.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/io/parquet/ProjectionPusher.java @@ -146,7 +146,7 @@ public JobConf pushProjectionsAndFilters(JobConf jobConf, Path path) if ((part != null) && (part.getTableDesc() != null)) { Utilities.copyTableJobPropertiesToConf(part.getTableDesc(), cloneJobConf); } - pushProjectionsAndFilters(cloneJobConf, path.toString(), path.toUri().toString()); + pushProjectionsAndFilters(cloneJobConf, path.toString(), path.toUri().getPath()); return cloneJobConf; } } diff --git a/ql/src/java/org/apache/hadoop/hive/ql/io/parquet/convert/ArrayWritableGroupConverter.java b/ql/src/java/org/apache/hadoop/hive/ql/io/parquet/convert/ArrayWritableGroupConverter.java index 582a5df..c5d80f2 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/io/parquet/convert/ArrayWritableGroupConverter.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/io/parquet/convert/ArrayWritableGroupConverter.java @@ -13,6 +13,9 @@ */ package org.apache.hadoop.hive.ql.io.parquet.convert; +import java.util.List; + +import org.apache.hadoop.hive.serde2.typeinfo.TypeInfo; import org.apache.hadoop.io.ArrayWritable; import org.apache.hadoop.io.Writable; @@ -30,7 +33,7 @@ private Writable[] mapPairContainer; public ArrayWritableGroupConverter(final GroupType groupType, final HiveGroupConverter parent, - final int index) { + final int index, List hiveSchemaTypeInfos) { this.parent = parent; this.index = index; int count = groupType.getFieldCount(); @@ -40,7 +43,8 @@ public ArrayWritableGroupConverter(final GroupType groupType, final HiveGroupCon isMap = count == 2; converters = new Converter[count]; for (int i = 0; i < count; i++) { - converters[i] = getConverterFromDescription(groupType.getType(i), i, this); + converters[i] = getConverterFromDescription(groupType.getType(i), i, this, + hiveSchemaTypeInfos); } } diff --git a/ql/src/java/org/apache/hadoop/hive/ql/io/parquet/convert/DataWritableGroupConverter.java b/ql/src/java/org/apache/hadoop/hive/ql/io/parquet/convert/DataWritableGroupConverter.java index 0e310fb..48e4a13 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/io/parquet/convert/DataWritableGroupConverter.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/io/parquet/convert/DataWritableGroupConverter.java @@ -16,6 +16,7 @@ import java.util.ArrayList; import java.util.List; +import org.apache.hadoop.hive.serde2.typeinfo.TypeInfo; import org.apache.hadoop.io.ArrayWritable; import org.apache.hadoop.io.Writable; @@ -36,19 +37,21 @@ private final Object[] currentArr; private Writable[] rootMap; - public DataWritableGroupConverter(final GroupType requestedSchema, final GroupType tableSchema) { - this(requestedSchema, null, 0, tableSchema); + public DataWritableGroupConverter(final GroupType requestedSchema, final GroupType tableSchema, + final List hiveSchemaTypeInfos) { + this(requestedSchema, null, 0, tableSchema, hiveSchemaTypeInfos); final int fieldCount = tableSchema.getFieldCount(); this.rootMap = new Writable[fieldCount]; } public DataWritableGroupConverter(final GroupType groupType, final HiveGroupConverter parent, - final int index) { - this(groupType, parent, index, groupType); + final int index, final List hiveSchemaTypeInfos) { + this(groupType, parent, index, groupType, hiveSchemaTypeInfos); } public DataWritableGroupConverter(final GroupType selectedGroupType, - final HiveGroupConverter parent, final int index, final GroupType containingGroupType) { + final HiveGroupConverter parent, final int index, final GroupType containingGroupType, + final List hiveSchemaTypeInfos) { this.parent = parent; this.index = index; final int totalFieldCount = containingGroupType.getFieldCount(); @@ -62,7 +65,8 @@ public DataWritableGroupConverter(final GroupType selectedGroupType, Type subtype = selectedFields.get(i); if (containingGroupType.getFields().contains(subtype)) { converters[i] = getConverterFromDescription(subtype, - containingGroupType.getFieldIndex(subtype.getName()), this); + containingGroupType.getFieldIndex(subtype.getName()), this, + hiveSchemaTypeInfos); } else { throw new IllegalStateException("Group type [" + containingGroupType + "] does not contain requested field: " + subtype); diff --git a/ql/src/java/org/apache/hadoop/hive/ql/io/parquet/convert/DataWritableRecordConverter.java b/ql/src/java/org/apache/hadoop/hive/ql/io/parquet/convert/DataWritableRecordConverter.java index 7762afe..0971a68 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/io/parquet/convert/DataWritableRecordConverter.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/io/parquet/convert/DataWritableRecordConverter.java @@ -13,6 +13,9 @@ */ package org.apache.hadoop.hive.ql.io.parquet.convert; +import java.util.List; + +import org.apache.hadoop.hive.serde2.typeinfo.TypeInfo; import org.apache.hadoop.io.ArrayWritable; import parquet.io.api.GroupConverter; @@ -28,8 +31,10 @@ private final DataWritableGroupConverter root; - public DataWritableRecordConverter(final GroupType requestedSchema, final GroupType tableSchema) { - this.root = new DataWritableGroupConverter(requestedSchema, tableSchema); + public DataWritableRecordConverter(final GroupType requestedSchema, final GroupType tableSchema, + final List hiveColumnTypeInfos) { + this.root = new DataWritableGroupConverter(requestedSchema, tableSchema, + hiveColumnTypeInfos); } @Override @@ -41,4 +46,4 @@ public ArrayWritable getCurrentRecord() { public GroupConverter getRootConverter() { return root; } -} +} \ No newline at end of file diff --git a/ql/src/java/org/apache/hadoop/hive/ql/io/parquet/convert/ETypeConverter.java b/ql/src/java/org/apache/hadoop/hive/ql/io/parquet/convert/ETypeConverter.java index 67ce151..e6fb5ae 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/io/parquet/convert/ETypeConverter.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/io/parquet/convert/ETypeConverter.java @@ -16,12 +16,19 @@ import java.math.BigDecimal; import java.sql.Timestamp; import java.util.ArrayList; +import java.util.List; +import org.apache.hadoop.hive.common.type.HiveChar; +import org.apache.hadoop.hive.common.type.HiveVarchar; import org.apache.hadoop.hive.ql.io.parquet.timestamp.NanoTime; import org.apache.hadoop.hive.ql.io.parquet.timestamp.NanoTimeUtils; +import org.apache.hadoop.hive.serde.serdeConstants; import org.apache.hadoop.hive.serde2.io.DoubleWritable; +import org.apache.hadoop.hive.serde2.io.HiveCharWritable; import org.apache.hadoop.hive.serde2.io.HiveDecimalWritable; +import org.apache.hadoop.hive.serde2.io.HiveVarcharWritable; import org.apache.hadoop.hive.serde2.io.TimestampWritable; +import org.apache.hadoop.hive.serde2.typeinfo.TypeInfo; import org.apache.hadoop.io.BooleanWritable; import org.apache.hadoop.io.BytesWritable; import org.apache.hadoop.io.FloatWritable; @@ -145,6 +152,32 @@ protected TimestampWritable convert(Binary binary) { } }; } + }, + ECHAR_CONVERTER(HiveCharWritable.class) { + @Override + Converter getConverter(final PrimitiveType type, final int index, final HiveGroupConverter parent) { + return new BinaryConverter(type, parent, index) { + @Override + protected HiveCharWritable convert(Binary binary) { + HiveChar hiveChar = new HiveChar(); + hiveChar.setValue(binary.toStringUsingUTF8()); + return new HiveCharWritable(hiveChar); + } + }; + } + }, + EVARCHAR_CONVERTER(HiveVarcharWritable.class) { + @Override + Converter getConverter(final PrimitiveType type, final int index, final HiveGroupConverter parent) { + return new BinaryConverter(type, parent, index) { + @Override + protected HiveVarcharWritable convert(Binary binary) { + HiveVarchar hiveVarchar = new HiveVarchar(); + hiveVarchar.setValue(binary.toStringUsingUTF8()); + return new HiveVarcharWritable(hiveVarchar); + } + }; + } }; final Class _type; @@ -159,7 +192,8 @@ private ETypeConverter(final Class type) { abstract Converter getConverter(final PrimitiveType type, final int index, final HiveGroupConverter parent); - public static Converter getNewConverter(final PrimitiveType type, final int index, final HiveGroupConverter parent) { + public static Converter getNewConverter(final PrimitiveType type, final int index, + final HiveGroupConverter parent, List hiveSchemaTypeInfos) { if (type.isPrimitive() && (type.asPrimitiveType().getPrimitiveTypeName().equals(PrimitiveType.PrimitiveTypeName.INT96))) { //TODO- cleanup once parquet support Timestamp type annotation. return ETypeConverter.ETIMESTAMP_CONVERTER.getConverter(type, index, parent); @@ -167,7 +201,15 @@ public static Converter getNewConverter(final PrimitiveType type, final int inde if (OriginalType.DECIMAL == type.getOriginalType()) { return EDECIMAL_CONVERTER.getConverter(type, index, parent); } else if (OriginalType.UTF8 == type.getOriginalType()) { - return ESTRING_CONVERTER.getConverter(type, index, parent); + if (hiveSchemaTypeInfos.get(index).getTypeName() + .startsWith(serdeConstants.CHAR_TYPE_NAME)) { + return ECHAR_CONVERTER.getConverter(type, index, parent); + } else if (hiveSchemaTypeInfos.get(index).getTypeName() + .startsWith(serdeConstants.VARCHAR_TYPE_NAME)) { + return EVARCHAR_CONVERTER.getConverter(type, index, parent); + } else if (type.isPrimitive()) { + return ESTRING_CONVERTER.getConverter(type, index, parent); + } } Class javaType = type.getPrimitiveTypeName().javaType; diff --git a/ql/src/java/org/apache/hadoop/hive/ql/io/parquet/convert/HiveGroupConverter.java b/ql/src/java/org/apache/hadoop/hive/ql/io/parquet/convert/HiveGroupConverter.java index 524a293..a364729 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/io/parquet/convert/HiveGroupConverter.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/io/parquet/convert/HiveGroupConverter.java @@ -13,6 +13,9 @@ */ package org.apache.hadoop.hive.ql.io.parquet.convert; +import java.util.List; + +import org.apache.hadoop.hive.serde2.typeinfo.TypeInfo; import org.apache.hadoop.io.Writable; import parquet.io.api.Converter; @@ -23,17 +26,20 @@ public abstract class HiveGroupConverter extends GroupConverter { protected static Converter getConverterFromDescription(final Type type, final int index, - final HiveGroupConverter parent) { + final HiveGroupConverter parent, List hiveSchemaTypeInfos) { if (type == null) { return null; } if (type.isPrimitive()) { - return ETypeConverter.getNewConverter(type.asPrimitiveType(), index, parent); + return ETypeConverter.getNewConverter(type.asPrimitiveType(), index, parent, + hiveSchemaTypeInfos); } else { if (type.asGroupType().getRepetition() == Repetition.REPEATED) { - return new ArrayWritableGroupConverter(type.asGroupType(), parent, index); + return new ArrayWritableGroupConverter(type.asGroupType(), parent, index, + hiveSchemaTypeInfos); } else { - return new DataWritableGroupConverter(type.asGroupType(), parent, index); + return new DataWritableGroupConverter(type.asGroupType(), parent, index, + hiveSchemaTypeInfos); } } } @@ -42,4 +48,4 @@ protected static Converter getConverterFromDescription(final Type type, final in protected abstract void add(int index, Writable value); -} +} \ No newline at end of file diff --git a/ql/src/java/org/apache/hadoop/hive/ql/io/parquet/convert/HiveSchemaConverter.java b/ql/src/java/org/apache/hadoop/hive/ql/io/parquet/convert/HiveSchemaConverter.java index 99901f0..3116451 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/io/parquet/convert/HiveSchemaConverter.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/io/parquet/convert/HiveSchemaConverter.java @@ -16,6 +16,7 @@ import java.util.List; import org.apache.hadoop.hive.ql.io.parquet.serde.ParquetHiveSerDe; +import org.apache.hadoop.hive.serde.serdeConstants; import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspector.Category; import org.apache.hadoop.hive.serde2.typeinfo.DecimalTypeInfo; import org.apache.hadoop.hive.serde2.typeinfo.ListTypeInfo; @@ -25,7 +26,6 @@ import org.apache.hadoop.hive.serde2.typeinfo.TypeInfoFactory; import parquet.schema.ConversionPatterns; -import parquet.schema.DecimalMetadata; import parquet.schema.GroupType; import parquet.schema.MessageType; import parquet.schema.OriginalType; @@ -81,6 +81,14 @@ private static Type convertType(final String name, final TypeInfo typeInfo, fina return new PrimitiveType(repetition, PrimitiveTypeName.INT96, name); } else if (typeInfo.equals(TypeInfoFactory.voidTypeInfo)) { throw new UnsupportedOperationException("Void type not implemented"); + } else if (typeInfo.getTypeName().toLowerCase().startsWith( + serdeConstants.CHAR_TYPE_NAME)) { + return Types.optional(PrimitiveTypeName.BINARY).as(OriginalType.UTF8) + .named(name); + } else if (typeInfo.getTypeName().toLowerCase().startsWith( + serdeConstants.VARCHAR_TYPE_NAME)) { + return Types.optional(PrimitiveTypeName.BINARY).as(OriginalType.UTF8) + .named(name); } else if (typeInfo instanceof DecimalTypeInfo) { DecimalTypeInfo decimalTypeInfo = (DecimalTypeInfo) typeInfo; int prec = decimalTypeInfo.precision(); diff --git a/ql/src/java/org/apache/hadoop/hive/ql/io/parquet/read/DataWritableReadSupport.java b/ql/src/java/org/apache/hadoop/hive/ql/io/parquet/read/DataWritableReadSupport.java index d6be4bd..3b9bf43 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/io/parquet/read/DataWritableReadSupport.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/io/parquet/read/DataWritableReadSupport.java @@ -14,6 +14,7 @@ package org.apache.hadoop.hive.ql.io.parquet.read; import java.util.ArrayList; +import java.util.Arrays; import java.util.HashMap; import java.util.List; import java.util.Map; @@ -23,6 +24,8 @@ import org.apache.hadoop.hive.ql.io.parquet.convert.DataWritableRecordConverter; import org.apache.hadoop.hive.ql.metadata.VirtualColumn; import org.apache.hadoop.hive.serde2.ColumnProjectionUtils; +import org.apache.hadoop.hive.serde2.typeinfo.TypeInfo; +import org.apache.hadoop.hive.serde2.typeinfo.TypeInfoUtils; import org.apache.hadoop.io.ArrayWritable; import org.apache.hadoop.util.StringUtils; @@ -60,6 +63,28 @@ return (List) VirtualColumn. removeVirtualColumns(StringUtils.getStringCollection(columns)); } + + private static List getColumnTypes(Configuration configuration) { + + List columnNames; + String columnNamesProperty = configuration.get(IOConstants.COLUMNS); + if (columnNamesProperty.length() == 0) { + columnNames = new ArrayList(); + } else { + columnNames = Arrays.asList(columnNamesProperty.split(",")); + } + List columnTypes; + String columnTypesProperty = configuration.get(IOConstants.COLUMNS_TYPES); + if (columnTypesProperty.length() == 0) { + columnTypes = new ArrayList(); + } else { + columnTypes = TypeInfoUtils.getTypeInfosFromTypeString(columnTypesProperty); + } + + columnTypes = VirtualColumn.removeVirtualColumnTypes(columnNames, columnTypes); + return columnTypes; + } + /** * * It creates the readContext for Parquet side with the requested schema during the init phase. @@ -100,20 +125,22 @@ final List typeListWanted = new ArrayList(); final boolean indexAccess = configuration.getBoolean(PARQUET_COLUMN_INDEX_ACCESS, false); for (final Integer idx : indexColumnsWanted) { - String col = listColumns.get(idx); - if (indexAccess) { - typeListWanted.add(tableSchema.getType(col)); - } else { - col = col.toLowerCase(); - if (lowerCaseFileSchemaColumns.containsKey(col)) { - typeListWanted.add(tableSchema.getType(lowerCaseFileSchemaColumns.get(col))); + if (idx < listColumns.size()) { + String col = listColumns.get(idx); + if (indexAccess) { + typeListWanted.add(tableSchema.getType(col)); } else { - // should never occur? - String msg = "Column " + col + " at index " + idx + " does not exist in " + + col = col.toLowerCase(); + if (lowerCaseFileSchemaColumns.containsKey(col)) { + typeListWanted.add(tableSchema.getType(lowerCaseFileSchemaColumns.get(col))); + } else { + // should never occur? + String msg = "Column " + col + " at index " + idx + " does not exist in " + lowerCaseFileSchemaColumns; - throw new IllegalStateException(msg); + throw new IllegalStateException(msg); + } } - } + } } requestedSchemaByUser = resolveSchemaAccess(new MessageType(fileSchema.getName(), typeListWanted), fileSchema, configuration); @@ -146,7 +173,8 @@ } final MessageType tableSchema = resolveSchemaAccess(MessageTypeParser. parseMessageType(metadata.get(HIVE_SCHEMA_KEY)), fileSchema, configuration); - return new DataWritableRecordConverter(readContext.getRequestedSchema(), tableSchema); + return new DataWritableRecordConverter(readContext.getRequestedSchema(), tableSchema, + getColumnTypes(configuration)); } /** diff --git a/ql/src/java/org/apache/hadoop/hive/ql/io/parquet/serde/ArrayWritableObjectInspector.java b/ql/src/java/org/apache/hadoop/hive/ql/io/parquet/serde/ArrayWritableObjectInspector.java index 47bf69c..d5aae3b 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/io/parquet/serde/ArrayWritableObjectInspector.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/io/parquet/serde/ArrayWritableObjectInspector.java @@ -25,12 +25,14 @@ import org.apache.hadoop.hive.serde2.objectinspector.SettableStructObjectInspector; import org.apache.hadoop.hive.serde2.objectinspector.StructField; import org.apache.hadoop.hive.serde2.objectinspector.primitive.PrimitiveObjectInspectorFactory; +import org.apache.hadoop.hive.serde2.typeinfo.CharTypeInfo; import org.apache.hadoop.hive.serde2.typeinfo.DecimalTypeInfo; import org.apache.hadoop.hive.serde2.typeinfo.ListTypeInfo; import org.apache.hadoop.hive.serde2.typeinfo.MapTypeInfo; import org.apache.hadoop.hive.serde2.typeinfo.StructTypeInfo; import org.apache.hadoop.hive.serde2.typeinfo.TypeInfo; import org.apache.hadoop.hive.serde2.typeinfo.TypeInfoFactory; +import org.apache.hadoop.hive.serde2.typeinfo.VarcharTypeInfo; import org.apache.hadoop.io.ArrayWritable; /** @@ -102,12 +104,10 @@ private ObjectInspector getObjectInspector(final TypeInfo typeInfo) { return PrimitiveObjectInspectorFactory.writableTimestampObjectInspector; } else if (typeInfo.equals(TypeInfoFactory.dateTypeInfo)) { throw new UnsupportedOperationException("Parquet does not support date. See HIVE-6384"); - } else if (typeInfo.getTypeName().toLowerCase().startsWith(serdeConstants.DECIMAL_TYPE_NAME)) { - throw new UnsupportedOperationException("Parquet does not support decimal. See HIVE-6384"); } else if (typeInfo.getTypeName().toLowerCase().startsWith(serdeConstants.CHAR_TYPE_NAME)) { - throw new UnsupportedOperationException("Parquet does not support char. See HIVE-6384"); + return PrimitiveObjectInspectorFactory.getPrimitiveWritableObjectInspector((CharTypeInfo) typeInfo); } else if (typeInfo.getTypeName().toLowerCase().startsWith(serdeConstants.VARCHAR_TYPE_NAME)) { - throw new UnsupportedOperationException("Parquet does not support varchar. See HIVE-6384"); + return PrimitiveObjectInspectorFactory.getPrimitiveWritableObjectInspector((VarcharTypeInfo) typeInfo); } else { throw new UnsupportedOperationException("Unknown field type: " + typeInfo); } diff --git a/ql/src/java/org/apache/hadoop/hive/ql/io/parquet/serde/ParquetHiveSerDe.java b/ql/src/java/org/apache/hadoop/hive/ql/io/parquet/serde/ParquetHiveSerDe.java index e3e327c..e5c663e 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/io/parquet/serde/ParquetHiveSerDe.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/io/parquet/serde/ParquetHiveSerDe.java @@ -42,6 +42,8 @@ import org.apache.hadoop.hive.serde2.objectinspector.primitive.ByteObjectInspector; import org.apache.hadoop.hive.serde2.objectinspector.primitive.DoubleObjectInspector; import org.apache.hadoop.hive.serde2.objectinspector.primitive.FloatObjectInspector; +import org.apache.hadoop.hive.serde2.objectinspector.primitive.HiveCharObjectInspector; +import org.apache.hadoop.hive.serde2.objectinspector.primitive.HiveVarcharObjectInspector; import org.apache.hadoop.hive.serde2.objectinspector.primitive.IntObjectInspector; import org.apache.hadoop.hive.serde2.objectinspector.primitive.LongObjectInspector; import org.apache.hadoop.hive.serde2.objectinspector.primitive.ShortObjectInspector; @@ -60,6 +62,7 @@ import org.apache.hadoop.io.LongWritable; import org.apache.hadoop.io.Text; import org.apache.hadoop.io.Writable; +import parquet.io.api.Binary; /** * @@ -280,6 +283,12 @@ private Writable createPrimitive(final Object obj, final PrimitiveObjectInspecto return new BytesWritable(tgt); case TIMESTAMP: return new TimestampWritable(((TimestampObjectInspector) inspector).getPrimitiveJavaObject(obj)); + case CHAR: + String strippedValue = ((HiveCharObjectInspector) inspector).getPrimitiveJavaObject(obj).getStrippedValue(); + return new BytesWritable(Binary.fromString(strippedValue).getBytes()); + case VARCHAR: + String value = ((HiveVarcharObjectInspector) inspector).getPrimitiveJavaObject(obj).getValue(); + return new BytesWritable(Binary.fromString(value).getBytes()); default: throw new SerDeException("Unknown primitive : " + inspector.getPrimitiveCategory()); } diff --git a/ql/src/java/org/apache/hadoop/hive/ql/io/sarg/SearchArgumentImpl.java b/ql/src/java/org/apache/hadoop/hive/ql/io/sarg/SearchArgumentImpl.java index 2c53f65..84d7e0f 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/io/sarg/SearchArgumentImpl.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/io/sarg/SearchArgumentImpl.java @@ -18,14 +18,6 @@ package org.apache.hadoop.hive.ql.io.sarg; -import java.util.ArrayDeque; -import java.util.ArrayList; -import java.util.Collections; -import java.util.Deque; -import java.util.HashMap; -import java.util.List; -import java.util.Map; - import com.esotericsoftware.kryo.Kryo; import com.esotericsoftware.kryo.io.Input; import com.esotericsoftware.kryo.io.Output; @@ -57,6 +49,15 @@ import org.apache.hadoop.hive.serde2.typeinfo.PrimitiveTypeInfo; import org.apache.hadoop.hive.serde2.typeinfo.TypeInfo; +import java.math.BigDecimal; +import java.util.ArrayDeque; +import java.util.ArrayList; +import java.util.Collections; +import java.util.Deque; +import java.util.HashMap; +import java.util.List; +import java.util.Map; + /** * The implementation of SearchArguments. */ @@ -947,7 +948,8 @@ private static Object boxLiteral(Object literal) { literal instanceof Long || literal instanceof Double || literal instanceof DateWritable || - literal instanceof HiveDecimal) { + literal instanceof HiveDecimal || + literal instanceof BigDecimal) { return literal; } else if (literal instanceof HiveChar || literal instanceof HiveVarchar) { @@ -979,7 +981,8 @@ private static Object boxLiteral(Object literal) { return PredicateLeaf.Type.FLOAT; } else if (literal instanceof DateWritable) { return PredicateLeaf.Type.DATE; - } else if (literal instanceof HiveDecimal) { + } else if (literal instanceof HiveDecimal || + literal instanceof BigDecimal) { return PredicateLeaf.Type.DECIMAL; } throw new IllegalArgumentException("Unknown type for literal " + literal); diff --git a/ql/src/java/org/apache/hadoop/hive/ql/lockmgr/DbTxnManager.java b/ql/src/java/org/apache/hadoop/hive/ql/lockmgr/DbTxnManager.java index f74f683..264052f 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/lockmgr/DbTxnManager.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/lockmgr/DbTxnManager.java @@ -165,13 +165,13 @@ public void acquireLocks(QueryPlan plan, Context ctx, String username) throws Lo break; case TABLE: + case DUMMYPARTITION: // in case of dynamic partitioning lock the table t = output.getTable(); compBuilder.setDbName(t.getDbName()); compBuilder.setTableName(t.getTableName()); break; case PARTITION: - case DUMMYPARTITION: compBuilder.setPartitionName(output.getPartition().getName()); t = output.getPartition().getTable(); compBuilder.setDbName(t.getDbName()); @@ -301,7 +301,10 @@ protected void destruct() { try { if (txnId > 0) rollbackTxn(); if (lockMgr != null) lockMgr.close(); + if (client != null) client.close(); } catch (Exception e) { + LOG.error("Caught exception " + e.getClass().getName() + " with message <" + e.getMessage() + + ">, swallowing as there is nothing we can do with it."); // Not much we can do about it here. } } diff --git a/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java b/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java index 6f225f3..e387b8f 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java @@ -89,6 +89,7 @@ import org.apache.hadoop.hive.metastore.api.Role; import org.apache.hadoop.hive.metastore.api.RolePrincipalGrant; import org.apache.hadoop.hive.metastore.api.SerDeInfo; +import org.apache.hadoop.hive.metastore.api.SetPartitionsStatsRequest; import org.apache.hadoop.hive.metastore.api.ShowCompactResponse; import org.apache.hadoop.hive.metastore.api.SkewedInfo; import org.apache.hadoop.hive.metastore.api.hive_metastoreConstants; @@ -2553,6 +2554,15 @@ public boolean updatePartitionColumnStatistics(ColumnStatistics statsObj) throws throw new HiveException(e); } } + + public boolean setPartitionColumnStatistics(SetPartitionsStatsRequest request) throws HiveException { + try { + return getMSC().setPartitionColumnStatistics(request); + } catch (Exception e) { + LOG.debug(StringUtils.stringifyException(e)); + throw new HiveException(e); + } + } public List getTableColumnStatistics( String dbName, String tableName, List colNames) throws HiveException { diff --git a/ql/src/java/org/apache/hadoop/hive/ql/metadata/Partition.java b/ql/src/java/org/apache/hadoop/hive/ql/metadata/Partition.java index 44f6198..edec1b7 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/metadata/Partition.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/metadata/Partition.java @@ -233,6 +233,10 @@ public String getName() { return ret; } + public Path getPartitionPath() { + return getDataLocation(); + } + public Path getDataLocation() { if (table.isPartitioned()) { return new Path(tPartition.getSd().getLocation()); diff --git a/ql/src/java/org/apache/hadoop/hive/ql/metadata/SessionHiveMetaStoreClient.java b/ql/src/java/org/apache/hadoop/hive/ql/metadata/SessionHiveMetaStoreClient.java index 4300145..4cf98d8 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/metadata/SessionHiveMetaStoreClient.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/metadata/SessionHiveMetaStoreClient.java @@ -249,6 +249,8 @@ private void createTempTable(org.apache.hadoop.hive.metastore.api.Table tbl, + " is not a directory or unable to create one"); } } + // Make sure location string is in proper format + tbl.getSd().setLocation(tblPath.toString()); } // Add temp table info to current session diff --git a/ql/src/java/org/apache/hadoop/hive/ql/metadata/VirtualColumn.java b/ql/src/java/org/apache/hadoop/hive/ql/metadata/VirtualColumn.java index 0637d46..ecc5d92 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/metadata/VirtualColumn.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/metadata/VirtualColumn.java @@ -22,25 +22,36 @@ import java.util.ArrayList; import java.util.Collection; import java.util.List; +import java.util.ListIterator; +import com.google.common.collect.ImmutableSet; +import com.google.common.collect.Iterables; import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hive.common.classification.InterfaceAudience; import org.apache.hadoop.hive.conf.HiveConf; +import org.apache.hadoop.hive.ql.io.RecordIdentifier; import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspector; import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspectorFactory; import org.apache.hadoop.hive.serde2.objectinspector.StructObjectInspector; import org.apache.hadoop.hive.serde2.objectinspector.primitive.PrimitiveObjectInspectorFactory; import org.apache.hadoop.hive.serde2.typeinfo.PrimitiveTypeInfo; +import org.apache.hadoop.hive.serde2.typeinfo.TypeInfo; import org.apache.hadoop.hive.serde2.typeinfo.TypeInfoFactory; +@InterfaceAudience.Private public class VirtualColumn implements Serializable { private static final long serialVersionUID = 1L; - public static VirtualColumn FILENAME = new VirtualColumn("INPUT__FILE__NAME", (PrimitiveTypeInfo)TypeInfoFactory.stringTypeInfo); - public static VirtualColumn BLOCKOFFSET = new VirtualColumn("BLOCK__OFFSET__INSIDE__FILE", (PrimitiveTypeInfo)TypeInfoFactory.longTypeInfo); - public static VirtualColumn ROWOFFSET = new VirtualColumn("ROW__OFFSET__INSIDE__BLOCK", (PrimitiveTypeInfo)TypeInfoFactory.longTypeInfo); + public static final VirtualColumn FILENAME = new VirtualColumn("INPUT__FILE__NAME", (PrimitiveTypeInfo)TypeInfoFactory.stringTypeInfo); + public static final VirtualColumn BLOCKOFFSET = new VirtualColumn("BLOCK__OFFSET__INSIDE__FILE", (PrimitiveTypeInfo)TypeInfoFactory.longTypeInfo); + public static final VirtualColumn ROWOFFSET = new VirtualColumn("ROW__OFFSET__INSIDE__BLOCK", (PrimitiveTypeInfo)TypeInfoFactory.longTypeInfo); - public static VirtualColumn RAWDATASIZE = new VirtualColumn("RAW__DATA__SIZE", (PrimitiveTypeInfo)TypeInfoFactory.longTypeInfo); + public static final VirtualColumn RAWDATASIZE = new VirtualColumn("RAW__DATA__SIZE", (PrimitiveTypeInfo)TypeInfoFactory.longTypeInfo); + /** + * {@link org.apache.hadoop.hive.ql.io.RecordIdentifier} + */ + public static final VirtualColumn ROWID = new VirtualColumn("ROW__ID", RecordIdentifier.StructInfo.typeInfo, true, RecordIdentifier.StructInfo.oi); /** * GROUPINGID is used with GROUP BY GROUPINGS SETS, ROLLUP and CUBE. @@ -49,27 +60,28 @@ * set if that column has been aggregated in that row. Otherwise the * value is "0". Returns the decimal representation of the bit vector. */ - public static VirtualColumn GROUPINGID = + public static final VirtualColumn GROUPINGID = new VirtualColumn("GROUPING__ID", (PrimitiveTypeInfo) TypeInfoFactory.intTypeInfo); - public static VirtualColumn[] VIRTUAL_COLUMNS = - new VirtualColumn[] {FILENAME, BLOCKOFFSET, ROWOFFSET, RAWDATASIZE, GROUPINGID}; - - private String name; - private PrimitiveTypeInfo typeInfo; - private boolean isHidden = true; + public static ImmutableSet VIRTUAL_COLUMN_NAMES = + ImmutableSet.of(FILENAME.getName(), BLOCKOFFSET.getName(), ROWOFFSET.getName(), + RAWDATASIZE.getName(), GROUPINGID.getName(), ROWID.getName()); - public VirtualColumn() { - } + private final String name; + private final TypeInfo typeInfo; + private final boolean isHidden; + private final ObjectInspector oi; - public VirtualColumn(String name, PrimitiveTypeInfo typeInfo) { - this(name, typeInfo, true); + private VirtualColumn(String name, PrimitiveTypeInfo typeInfo) { + this(name, typeInfo, true, + PrimitiveObjectInspectorFactory.getPrimitiveWritableObjectInspector(typeInfo)); } - VirtualColumn(String name, PrimitiveTypeInfo typeInfo, boolean isHidden) { + private VirtualColumn(String name, TypeInfo typeInfo, boolean isHidden, ObjectInspector oi) { this.name = name; this.typeInfo = typeInfo; this.isHidden = isHidden; + this.oi = oi; } public static List getStatsRegistry(Configuration conf) { @@ -87,26 +99,19 @@ public VirtualColumn(String name, PrimitiveTypeInfo typeInfo) { if (HiveConf.getBoolVar(conf, HiveConf.ConfVars.HIVEROWOFFSET)) { l.add(ROWOFFSET); } + l.add(ROWID); return l; } - public PrimitiveTypeInfo getTypeInfo() { + public TypeInfo getTypeInfo() { return typeInfo; } - public void setTypeInfo(PrimitiveTypeInfo typeInfo) { - this.typeInfo = typeInfo; - } - public String getName() { return this.name; } - public void setName(String name) { - this.name = name; - } - public boolean isHidden() { return isHidden; } @@ -115,37 +120,58 @@ public boolean getIsHidden() { return isHidden; } - public void setIsHidden(boolean isHidden) { - this.isHidden = isHidden; + public ObjectInspector getObjectInspector() { + return oi; } @Override public boolean equals(Object o) { - if (o == null) { - return false; - } if (this == o) { return true; } + if(!(o instanceof VirtualColumn)) { + return false; + } VirtualColumn c = (VirtualColumn) o; return this.name.equals(c.name) && this.typeInfo.getTypeName().equals(c.getTypeInfo().getTypeName()); } - + @Override + public int hashCode() { + int c = 19; + c = 31 * name.hashCode() + c; + return 31 * typeInfo.getTypeName().hashCode() + c; + } public static Collection removeVirtualColumns(final Collection columns) { - for(VirtualColumn vcol : VIRTUAL_COLUMNS) { - columns.remove(vcol.getName()); - } + Iterables.removeAll(columns, VIRTUAL_COLUMN_NAMES); return columns; } + public static List removeVirtualColumnTypes(final List columnNames, + final List columnTypes) { + if (columnNames.size() != columnTypes.size()) { + throw new IllegalArgumentException("Number of column names in configuration " + + columnNames.size() + " differs from column types " + columnTypes.size()); + } + + int i = 0; + ListIterator it = columnTypes.listIterator(); + while(it.hasNext()) { + it.next(); + if (VIRTUAL_COLUMN_NAMES.contains(columnNames.get(i))) { + it.remove(); + } + ++i; + } + return columnTypes; + } + public static StructObjectInspector getVCSObjectInspector(List vcs) { List names = new ArrayList(vcs.size()); List inspectors = new ArrayList(vcs.size()); for (VirtualColumn vc : vcs) { names.add(vc.getName()); - inspectors.add(PrimitiveObjectInspectorFactory.getPrimitiveWritableObjectInspector( - vc.getTypeInfo())); + inspectors.add(vc.oi); } return ObjectInspectorFactory.getStandardStructObjectInspector(names, inspectors); } diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/Vectorizer.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/Vectorizer.java index ba4ac69..a622095 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/Vectorizer.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/Vectorizer.java @@ -927,11 +927,9 @@ private boolean validateExprNodeDescRecursive(ExprNodeDesc desc) { if (desc instanceof ExprNodeColumnDesc) { ExprNodeColumnDesc c = (ExprNodeColumnDesc) desc; // Currently, we do not support vectorized virtual columns (see HIVE-5570). - for (VirtualColumn vc : VirtualColumn.VIRTUAL_COLUMNS) { - if (c.getColumn().equals(vc.getName())) { - LOG.info("Cannot vectorize virtual column " + c.getColumn()); - return false; - } + if (VirtualColumn.VIRTUAL_COLUMN_NAMES.contains(c.getColumn())) { + LOG.info("Cannot vectorize virtual column " + c.getColumn()); + return false; } } String typeName = desc.getTypeInfo().getTypeName(); @@ -1076,10 +1074,8 @@ private boolean isVirtualColumn(ColumnInfo column) { // Not using method column.getIsVirtualCol() because partitioning columns are also // treated as virtual columns in ColumnInfo. - for (VirtualColumn vc : VirtualColumn.VIRTUAL_COLUMNS) { - if (column.getInternalName().equals(vc.getName())) { + if (VirtualColumn.VIRTUAL_COLUMN_NAMES.contains(column.getInternalName())) { return true; - } } return false; } diff --git a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/stats/annotation/StatsRulesProcFactory.java b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/stats/annotation/StatsRulesProcFactory.java index 838d6b1..987ecc2 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/optimizer/stats/annotation/StatsRulesProcFactory.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/optimizer/stats/annotation/StatsRulesProcFactory.java @@ -20,6 +20,7 @@ import com.google.common.collect.Lists; import com.google.common.collect.Maps; + import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.hive.conf.HiveConf; @@ -67,8 +68,10 @@ import org.apache.hadoop.hive.serde.serdeConstants; import java.util.Collections; +import java.util.HashMap; import java.util.List; import java.util.Map; +import java.util.Set; import java.util.Stack; public class StatsRulesProcFactory { @@ -803,12 +806,13 @@ public Object process(Node nd, Stack stack, NodeProcessorCtx procCtx, // statistics object that is combination of statistics from all // relations involved in JOIN Statistics stats = new Statistics(); - List rowCountParents = Lists.newArrayList(); + Map rowCountParents = new HashMap(); List distinctVals = Lists.newArrayList(); // 2 relations, multiple attributes boolean multiAttr = false; int numAttr = 1; + int numParent = parents.size(); Map joinedColStats = Maps.newHashMap(); Map> joinKeys = Maps.newHashMap(); @@ -818,9 +822,20 @@ public Object process(Node nd, Stack stack, NodeProcessorCtx procCtx, ReduceSinkOperator parent = (ReduceSinkOperator) jop.getParentOperators().get(pos); Statistics parentStats = parent.getStatistics(); - rowCountParents.add(parentStats.getNumRows()); List keyExprs = parent.getConf().getKeyCols(); + // Parent RS may have column statistics from multiple parents. + // Populate table alias to row count map, this will be used later to + // scale down/up column statistics based on new row count + // NOTE: JOIN with UNION as parent of RS will not have table alias + // propagated properly. UNION operator does not propagate the table + // alias of subqueries properly to expression nodes. Hence union20.q + // will have wrong number of rows. + Set tableAliases = StatsUtils.getAllTableAlias(parent.getColumnExprMap()); + for (String tabAlias : tableAliases) { + rowCountParents.put(tabAlias, parentStats.getNumRows()); + } + // multi-attribute join key if (keyExprs.size() > 1) { multiAttr = true; @@ -860,12 +875,19 @@ public Object process(Node nd, Stack stack, NodeProcessorCtx procCtx, perAttrDVs.add(cs.getCountDistint()); } } + distinctVals.add(getDenominator(perAttrDVs)); perAttrDVs.clear(); } - for (Long l : distinctVals) { - denom *= l; + if (numAttr > numParent) { + // To avoid denominator getting larger and aggressively reducing + // number of rows, we will ease out denominator. + denom = getEasedOutDenominator(distinctVals); + } else { + for (Long l : distinctVals) { + denom *= l; + } } } else { for (List jkeys : joinKeys.values()) { @@ -890,6 +912,7 @@ public Object process(Node nd, Stack stack, NodeProcessorCtx procCtx, Map colExprMap = jop.getColumnExprMap(); RowSchema rs = jop.getSchema(); List outColStats = Lists.newArrayList(); + Map outInTabAlias = new HashMap(); for (ColumnInfo ci : rs.getSignature()) { String key = ci.getInternalName(); ExprNodeDesc end = colExprMap.get(key); @@ -901,6 +924,7 @@ public Object process(Node nd, Stack stack, NodeProcessorCtx procCtx, ColStatistics cs = joinedColStats.get(fqColName); String outColName = key; String outTabAlias = ci.getTabAlias(); + outInTabAlias.put(outTabAlias, tabAlias); if (cs != null) { cs.setColumnName(outColName); cs.setTableAlias(outTabAlias); @@ -911,7 +935,8 @@ public Object process(Node nd, Stack stack, NodeProcessorCtx procCtx, // update join statistics stats.setColumnStats(outColStats); - long newRowCount = computeNewRowCount(rowCountParents, denom); + long newRowCount = computeNewRowCount( + Lists.newArrayList(rowCountParents.values()), denom); if (newRowCount <= 0 && LOG.isDebugEnabled()) { newRowCount = 0; @@ -920,7 +945,8 @@ public Object process(Node nd, Stack stack, NodeProcessorCtx procCtx, + " #Rows of parents: " + rowCountParents.toString() + ". Denominator: " + denom); } - updateStatsForJoinType(stats, newRowCount, true, jop.getConf()); + updateStatsForJoinType(stats, newRowCount, jop.getConf(), + rowCountParents, outInTabAlias); jop.setStatistics(stats); if (LOG.isDebugEnabled()) { @@ -966,37 +992,54 @@ public Object process(Node nd, Stack stack, NodeProcessorCtx procCtx, return null; } + private Long getEasedOutDenominator(List distinctVals) { + // Exponential back-off for NDVs. + // 1) Descending order sort of NDVs + // 2) denominator = NDV1 * (NDV2 ^ (1/2)) * (NDV3 ^ (1/4))) * .... + Collections.sort(distinctVals, Collections.reverseOrder()); + + long denom = distinctVals.get(0); + for (int i = 1; i < distinctVals.size(); i++) { + denom = (long) (denom * Math.pow(distinctVals.get(i), 1.0 / (1 << i))); + } + + return denom; + } + private void updateStatsForJoinType(Statistics stats, long newNumRows, - boolean useColStats, JoinDesc conf) { - long oldRowCount = stats.getNumRows(); - double ratio = (double) newNumRows / (double) oldRowCount; + JoinDesc conf, Map rowCountParents, + Map outInTabAlias) { stats.setNumRows(newNumRows); - if (useColStats) { - List colStats = stats.getColumnStats(); - for (ColStatistics cs : colStats) { - long oldDV = cs.getCountDistint(); - long newDV = oldDV; - - // if ratio is greater than 1, then number of rows increases. This can happen - // when some operators like GROUPBY duplicates the input rows in which case - // number of distincts should not change. Update the distinct count only when - // the output number of rows is less than input number of rows. - if (ratio <= 1.0) { - newDV = (long) Math.ceil(ratio * oldDV); - } - // Assumes inner join - // TODO: HIVE-5579 will handle different join types - cs.setNumNulls(0); - cs.setCountDistint(newDV); + // scale down/up the column statistics based on the changes in number of + // rows from each parent. For ex: If there are 2 parents for JOIN operator + // with 1st parent having 200 rows and 2nd parent having 2000 rows. Now if + // the new number of rows after applying join rule is 10, then the column + // stats for columns from 1st parent should be scaled down by 200/10 = 20x + // and stats for columns from 2nd parent should be scaled down by 200x + List colStats = stats.getColumnStats(); + for (ColStatistics cs : colStats) { + long oldRowCount = rowCountParents.get(outInTabAlias.get(cs.getTableAlias())); + double ratio = (double) newNumRows / (double) oldRowCount; + long oldDV = cs.getCountDistint(); + long newDV = oldDV; + + // if ratio is greater than 1, then number of rows increases. This can happen + // when some operators like GROUPBY duplicates the input rows in which case + // number of distincts should not change. Update the distinct count only when + // the output number of rows is less than input number of rows. + if (ratio <= 1.0) { + newDV = (long) Math.ceil(ratio * oldDV); } - stats.setColumnStats(colStats); - long newDataSize = StatsUtils.getDataSizeFromColumnStats(newNumRows, colStats); - stats.setDataSize(newDataSize); - } else { - long newDataSize = (long) (ratio * stats.getDataSize()); - stats.setDataSize(newDataSize); + // Assumes inner join + // TODO: HIVE-5579 will handle different join types + cs.setNumNulls(0); + cs.setCountDistint(newDV); } + stats.setColumnStats(colStats); + long newDataSize = StatsUtils + .getDataSizeFromColumnStats(newNumRows, colStats); + stats.setDataSize(newDataSize); } private long computeNewRowCount(List rowCountParents, long denom) { diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/FromClauseParser.g b/ql/src/java/org/apache/hadoop/hive/ql/parse/FromClauseParser.g index f448b16..9463ef1 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/parse/FromClauseParser.g +++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/FromClauseParser.g @@ -144,7 +144,7 @@ fromSource @init { gParent.pushMsg("from source", state); } @after { gParent.popMsg(state); } : - ((Identifier LPAREN)=> partitionedTableFunction | tableSource | subQuerySource) (lateralView^)* + ((Identifier LPAREN)=> partitionedTableFunction | tableSource | subQuerySource | virtualTableSource) (lateralView^)* ; tableBucketSample @@ -256,3 +256,46 @@ searchCondition ; //----------------------------------------------------------------------------------- + +//-------- Row Constructor ---------------------------------------------------------- +//in support of SELECT * FROM (VALUES(1,2,3),(4,5,6),...) as FOO(a,b,c) and +// INSERT INTO
(col1,col2,...) VALUES(...),(...),... +// INSERT INTO
(col1,col2,...) SELECT * FROM (VALUES(1,2,3),(4,5,6),...) as Foo(a,b,c) +valueRowConstructor + : + LPAREN atomExpression (COMMA atomExpression)* RPAREN -> ^(TOK_VALUE_ROW atomExpression+) + ; + +valuesTableConstructor + : + valueRowConstructor (COMMA valueRowConstructor)* -> ^(TOK_VALUES_TABLE valueRowConstructor+) + ; + +/* +VALUES(1),(2) means 2 rows, 1 column each. +VALUES(1,2),(3,4) means 2 rows, 2 columns each. +VALUES(1,2,3) means 1 row, 3 columns +*/ +valuesClause + : + KW_VALUES valuesTableConstructor -> valuesTableConstructor + ; + +/* +This represents a clause like this: +(VALUES(1,2),(2,3)) as VirtTable(col1,col2) +*/ +virtualTableSource + : + LPAREN valuesClause RPAREN tableNameColList -> ^(TOK_VIRTUAL_TABLE tableNameColList valuesClause) + ; +/* +e.g. as VirtTable(col1,col2) +Note that we only want literals as column names +*/ +tableNameColList + : + KW_AS? identifier LPAREN identifier (COMMA identifier)* RPAREN -> ^(TOK_VIRTUAL_TABREF ^(TOK_TABNAME identifier) ^(TOK_COL_NAME identifier+)) + ; + +//----------------------------------------------------------------------------------- \ No newline at end of file diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/FunctionSemanticAnalyzer.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/FunctionSemanticAnalyzer.java index bf3b65a..f9b875e 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/parse/FunctionSemanticAnalyzer.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/FunctionSemanticAnalyzer.java @@ -169,6 +169,7 @@ private void addEntities(String functionName, boolean isTemporaryFunction) try { String[] qualifiedNameParts = FunctionUtils.getQualifiedFunctionNameParts(functionName); String dbName = qualifiedNameParts[0]; + functionName = qualifiedNameParts[1]; database = getDatabase(dbName); } catch (HiveException e) { LOG.error(e); diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/HiveLexer.g b/ql/src/java/org/apache/hadoop/hive/ql/parse/HiveLexer.g index 20334ac..ce05fff 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/parse/HiveLexer.g +++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/HiveLexer.g @@ -292,6 +292,7 @@ KW_TRANSACTIONS: 'TRANSACTIONS'; KW_REWRITE : 'REWRITE'; KW_AUTHORIZATION: 'AUTHORIZATION'; KW_CONF: 'CONF'; +KW_VALUES: 'VALUES'; // Operators // NOTE: if you add a new function/operator, add it to sysFuncNames so that describe function _FUNC_ will work. diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/HiveParser.g b/ql/src/java/org/apache/hadoop/hive/ql/parse/HiveParser.g index a76cad7..32db0c7 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/parse/HiveParser.g +++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/HiveParser.g @@ -331,6 +331,15 @@ TOK_RESOURCE_LIST; TOK_COMPACT; TOK_SHOW_COMPACTIONS; TOK_SHOW_TRANSACTIONS; +TOK_DELETE_FROM; +TOK_UPDATE_TABLE; +TOK_SET_COLUMNS_CLAUSE; +TOK_VALUE_ROW; +TOK_VALUES_TABLE; +TOK_VIRTUAL_TABLE; +TOK_VIRTUAL_TABREF; +TOK_ANONYMOUS; +TOK_COL_NAME; } @@ -469,6 +478,9 @@ import java.util.HashMap; xlateMap.put("KW_DEFINED", "DEFINED"); xlateMap.put("KW_SUBQUERY", "SUBQUERY"); xlateMap.put("KW_REWRITE", "REWRITE"); + xlateMap.put("KW_UPDATE", "UPDATE"); + + xlateMap.put("KW_VALUES", "VALUES"); // Operators xlateMap.put("DOT", "."); @@ -638,6 +650,8 @@ execStatement | exportStatement | importStatement | ddlStatement + | deleteStatement + | updateStatement ; loadStatement @@ -2095,11 +2109,28 @@ singleFromStatement ( b+=body )+ -> ^(TOK_QUERY fromClause body+) ; +/* +The valuesClause rule below ensures that the parse tree for +"insert into table FOO values (1,2),(3,4)" looks the same as +"insert into table FOO select a,b from (values(1,2),(3,4)) as BAR(a,b)" which itself is made to look +very similar to the tree for "insert into table FOO select a,b from BAR". Since virtual table name +is implicit, it's represented as TOK_ANONYMOUS. +*/ regularBody[boolean topLevel] : i=insertClause + ( s=selectStatement[topLevel] {$s.tree.getChild(1).replaceChildren(0, 0, $i.tree);} -> {$s.tree} + | + valuesClause + -> ^(TOK_QUERY + ^(TOK_FROM + ^(TOK_VIRTUAL_TABLE ^(TOK_VIRTUAL_TABREF ^(TOK_ANONYMOUS)) valuesClause) + ) + ^(TOK_INSERT {$i.tree} ^(TOK_SELECT ^(TOK_SELEXPR TOK_ALLCOLREF))) + ) + ) | selectStatement[topLevel] ; @@ -2208,3 +2239,34 @@ limitClause : KW_LIMIT num=Number -> ^(TOK_LIMIT $num) ; + +//DELETE FROM WHERE ...; +deleteStatement +@init { pushMsg("delete statement", state); } +@after { popMsg(state); } + : + KW_DELETE KW_FROM tableName (whereClause)? -> ^(TOK_DELETE_FROM tableName whereClause?) + ; + +/*SET = (3 + col2)*/ +columnAssignmentClause + : + tableOrColumn EQUAL^ atomExpression + ; + +/*SET col1 = 5, col2 = (4 + col4), ...*/ +setColumnsClause + : + KW_SET columnAssignmentClause (COMMA columnAssignmentClause)* -> ^(TOK_SET_COLUMNS_CLAUSE columnAssignmentClause* ) + ; + +/* + UPDATE
+ SET col1 = val1, col2 = val2... WHERE ... +*/ +updateStatement +@init { pushMsg("update statement", state); } +@after { popMsg(state); } + : + KW_UPDATE tableName setColumnsClause whereClause? -> ^(TOK_UPDATE_TABLE tableName setColumnsClause whereClause?) + ; diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/IdentifiersParser.g b/ql/src/java/org/apache/hadoop/hive/ql/parse/IdentifiersParser.g index 75897b8..34d2dfc 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/parse/IdentifiersParser.g +++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/IdentifiersParser.g @@ -538,5 +538,5 @@ functionIdentifier nonReserved : - KW_TRUE | KW_FALSE | KW_LIKE | KW_EXISTS | KW_ASC | KW_DESC | KW_ORDER | KW_GROUP | KW_BY | KW_AS | KW_INSERT | KW_OVERWRITE | KW_OUTER | KW_LEFT | KW_RIGHT | KW_FULL | KW_PARTITION | KW_PARTITIONS | KW_TABLE | KW_TABLES | KW_COLUMNS | KW_INDEX | KW_INDEXES | KW_REBUILD | KW_FUNCTIONS | KW_SHOW | KW_MSCK | KW_REPAIR | KW_DIRECTORY | KW_LOCAL | KW_USING | KW_CLUSTER | KW_DISTRIBUTE | KW_SORT | KW_UNION | KW_LOAD | KW_EXPORT | KW_IMPORT | KW_DATA | KW_INPATH | KW_IS | KW_NULL | KW_CREATE | KW_EXTERNAL | KW_ALTER | KW_CHANGE | KW_FIRST | KW_AFTER | KW_DESCRIBE | KW_DROP | KW_RENAME | KW_IGNORE | KW_PROTECTION | KW_TO | KW_COMMENT | KW_BOOLEAN | KW_TINYINT | KW_SMALLINT | KW_INT | KW_BIGINT | KW_FLOAT | KW_DOUBLE | KW_DATE | KW_DATETIME | KW_TIMESTAMP | KW_DECIMAL | KW_STRING | KW_ARRAY | KW_STRUCT | KW_UNIONTYPE | KW_PARTITIONED | KW_CLUSTERED | KW_SORTED | KW_INTO | KW_BUCKETS | KW_ROW | KW_ROWS | KW_FORMAT | KW_DELIMITED | KW_FIELDS | KW_TERMINATED | KW_ESCAPED | KW_COLLECTION | KW_ITEMS | KW_KEYS | KW_KEY_TYPE | KW_LINES | KW_STORED | KW_FILEFORMAT | KW_INPUTFORMAT | KW_OUTPUTFORMAT | KW_INPUTDRIVER | KW_OUTPUTDRIVER | KW_OFFLINE | KW_ENABLE | KW_DISABLE | KW_READONLY | KW_NO_DROP | KW_LOCATION | KW_BUCKET | KW_OUT | KW_OF | KW_PERCENT | KW_ADD | KW_REPLACE | KW_RLIKE | KW_REGEXP | KW_TEMPORARY | KW_EXPLAIN | KW_FORMATTED | KW_PRETTY | KW_DEPENDENCY | KW_LOGICAL | KW_SERDE | KW_WITH | KW_DEFERRED | KW_SERDEPROPERTIES | KW_DBPROPERTIES | KW_LIMIT | KW_SET | KW_UNSET | KW_TBLPROPERTIES | KW_IDXPROPERTIES | KW_VALUE_TYPE | KW_ELEM_TYPE | KW_MAPJOIN | KW_STREAMTABLE | KW_HOLD_DDLTIME | KW_CLUSTERSTATUS | KW_UTC | KW_UTCTIMESTAMP | KW_LONG | KW_DELETE | KW_PLUS | KW_MINUS | KW_FETCH | KW_INTERSECT | KW_VIEW | KW_IN | KW_DATABASES | KW_MATERIALIZED | KW_SCHEMA | KW_SCHEMAS | KW_GRANT | KW_REVOKE | KW_SSL | KW_UNDO | KW_LOCK | KW_LOCKS | KW_UNLOCK | KW_SHARED | KW_EXCLUSIVE | KW_PROCEDURE | KW_UNSIGNED | KW_WHILE | KW_READ | KW_READS | KW_PURGE | KW_RANGE | KW_ANALYZE | KW_BEFORE | KW_BETWEEN | KW_BOTH | KW_BINARY | KW_CONTINUE | KW_CURSOR | KW_TRIGGER | KW_RECORDREADER | KW_RECORDWRITER | KW_SEMI | KW_LATERAL | KW_TOUCH | KW_ARCHIVE | KW_UNARCHIVE | KW_COMPUTE | KW_STATISTICS | KW_USE | KW_OPTION | KW_CONCATENATE | KW_SHOW_DATABASE | KW_UPDATE | KW_RESTRICT | KW_CASCADE | KW_SKEWED | KW_ROLLUP | KW_CUBE | KW_DIRECTORIES | KW_FOR | KW_GROUPING | KW_SETS | KW_TRUNCATE | KW_NOSCAN | KW_USER | KW_ROLE | KW_ROLES | KW_INNER | KW_DEFINED | KW_ADMIN | KW_JAR | KW_FILE | KW_OWNER | KW_PRINCIPALS | KW_ALL | KW_DEFAULT | KW_NONE | KW_COMPACT | KW_COMPACTIONS | KW_TRANSACTIONS | KW_REWRITE | KW_AUTHORIZATION + KW_TRUE | KW_FALSE | KW_LIKE | KW_EXISTS | KW_ASC | KW_DESC | KW_ORDER | KW_GROUP | KW_BY | KW_AS | KW_INSERT | KW_OVERWRITE | KW_OUTER | KW_LEFT | KW_RIGHT | KW_FULL | KW_PARTITION | KW_PARTITIONS | KW_TABLE | KW_TABLES | KW_COLUMNS | KW_INDEX | KW_INDEXES | KW_REBUILD | KW_FUNCTIONS | KW_SHOW | KW_MSCK | KW_REPAIR | KW_DIRECTORY | KW_LOCAL | KW_USING | KW_CLUSTER | KW_DISTRIBUTE | KW_SORT | KW_UNION | KW_LOAD | KW_EXPORT | KW_IMPORT | KW_DATA | KW_INPATH | KW_IS | KW_NULL | KW_CREATE | KW_EXTERNAL | KW_ALTER | KW_CHANGE | KW_FIRST | KW_AFTER | KW_DESCRIBE | KW_DROP | KW_RENAME | KW_IGNORE | KW_PROTECTION | KW_TO | KW_COMMENT | KW_BOOLEAN | KW_TINYINT | KW_SMALLINT | KW_INT | KW_BIGINT | KW_FLOAT | KW_DOUBLE | KW_DATE | KW_DATETIME | KW_TIMESTAMP | KW_DECIMAL | KW_STRING | KW_ARRAY | KW_STRUCT | KW_UNIONTYPE | KW_PARTITIONED | KW_CLUSTERED | KW_SORTED | KW_INTO | KW_BUCKETS | KW_ROW | KW_ROWS | KW_FORMAT | KW_DELIMITED | KW_FIELDS | KW_TERMINATED | KW_ESCAPED | KW_COLLECTION | KW_ITEMS | KW_KEYS | KW_KEY_TYPE | KW_LINES | KW_STORED | KW_FILEFORMAT | KW_INPUTFORMAT | KW_OUTPUTFORMAT | KW_INPUTDRIVER | KW_OUTPUTDRIVER | KW_OFFLINE | KW_ENABLE | KW_DISABLE | KW_READONLY | KW_NO_DROP | KW_LOCATION | KW_BUCKET | KW_OUT | KW_OF | KW_PERCENT | KW_ADD | KW_REPLACE | KW_RLIKE | KW_REGEXP | KW_TEMPORARY | KW_EXPLAIN | KW_FORMATTED | KW_PRETTY | KW_DEPENDENCY | KW_LOGICAL | KW_SERDE | KW_WITH | KW_DEFERRED | KW_SERDEPROPERTIES | KW_DBPROPERTIES | KW_LIMIT | KW_SET | KW_UNSET | KW_TBLPROPERTIES | KW_IDXPROPERTIES | KW_VALUE_TYPE | KW_ELEM_TYPE | KW_MAPJOIN | KW_STREAMTABLE | KW_HOLD_DDLTIME | KW_CLUSTERSTATUS | KW_UTC | KW_UTCTIMESTAMP | KW_LONG | KW_DELETE | KW_PLUS | KW_MINUS | KW_FETCH | KW_INTERSECT | KW_VIEW | KW_IN | KW_DATABASES | KW_MATERIALIZED | KW_SCHEMA | KW_SCHEMAS | KW_GRANT | KW_REVOKE | KW_SSL | KW_UNDO | KW_LOCK | KW_LOCKS | KW_UNLOCK | KW_SHARED | KW_EXCLUSIVE | KW_PROCEDURE | KW_UNSIGNED | KW_WHILE | KW_READ | KW_READS | KW_PURGE | KW_RANGE | KW_ANALYZE | KW_BEFORE | KW_BETWEEN | KW_BOTH | KW_BINARY | KW_CONTINUE | KW_CURSOR | KW_TRIGGER | KW_RECORDREADER | KW_RECORDWRITER | KW_SEMI | KW_LATERAL | KW_TOUCH | KW_ARCHIVE | KW_UNARCHIVE | KW_COMPUTE | KW_STATISTICS | KW_USE | KW_OPTION | KW_CONCATENATE | KW_SHOW_DATABASE | KW_UPDATE | KW_RESTRICT | KW_CASCADE | KW_SKEWED | KW_ROLLUP | KW_CUBE | KW_DIRECTORIES | KW_FOR | KW_GROUPING | KW_SETS | KW_TRUNCATE | KW_NOSCAN | KW_USER | KW_ROLE | KW_ROLES | KW_INNER | KW_DEFINED | KW_ADMIN | KW_JAR | KW_FILE | KW_OWNER | KW_PRINCIPALS | KW_ALL | KW_DEFAULT | KW_NONE | KW_COMPACT | KW_COMPACTIONS | KW_TRANSACTIONS | KW_REWRITE | KW_AUTHORIZATION | KW_VALUES ; diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java index 7a71ec7..b05d3b4 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java @@ -972,6 +972,8 @@ public boolean doPhase1(ASTNode ast, QB qb, Phase1Ctx ctx_1) ASTNode frm = (ASTNode) ast.getChild(0); if (frm.getToken().getType() == HiveParser.TOK_TABREF) { processTable(qb, frm); + } else if (frm.getToken().getType() == HiveParser.TOK_VIRTUAL_TABLE) { + throw new RuntimeException("VALUES() clause is not fully supported yet..."); } else if (frm.getToken().getType() == HiveParser.TOK_SUBQUERY) { processSubQuery(qb, frm); } else if (frm.getToken().getType() == HiveParser.TOK_LATERAL_VIEW || @@ -1164,6 +1166,10 @@ public boolean doPhase1(ASTNode ast, QB qb, Phase1Ctx ctx_1) case HiveParser.TOK_CTE: processCTE(qb, ast); break; + case HiveParser.TOK_DELETE_FROM: + throw new RuntimeException("DELETE is not (yet) implemented..."); + case HiveParser.TOK_UPDATE_TABLE: + throw new RuntimeException("UPDATE is not (yet) implemented..."); default: skipRecursion = false; break; @@ -10337,6 +10343,19 @@ private void validateCreateView(CreateViewDesc createVwDesc) try { Table oldView = getTable(createVwDesc.getViewName(), false); + // Do not allow view to be defined on temp table + Set tableAliases = qb.getTabAliases(); + for (String alias : tableAliases) { + try { + Table table = db.getTable(qb.getTabNameForAlias(alias)); + if (table.isTemporary()) { + throw new SemanticException("View definition references temporary table " + alias); + } + } catch (HiveException ex) { + throw new SemanticException(ex); + } + } + // ALTER VIEW AS SELECT requires the view must exist if (createVwDesc.getIsAlterViewAs() && oldView == null) { String viewNotExistErrorMsg = diff --git a/ql/src/java/org/apache/hadoop/hive/ql/processors/HiveCommand.java b/ql/src/java/org/apache/hadoop/hive/ql/processors/HiveCommand.java index 4a6dc61..f5bc427 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/processors/HiveCommand.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/processors/HiveCommand.java @@ -49,6 +49,9 @@ public static HiveCommand find(String[] command) { if (command.length > 1 && "role".equalsIgnoreCase(command[1])) { // special handling for set role r1 statement return null; + } else if(command.length > 1 && "from".equalsIgnoreCase(command[1])) { + //special handling for SQL "delete from
where..." + return null; } else if (COMMANDS.contains(cmd)) { return HiveCommand.valueOf(cmd); } diff --git a/ql/src/java/org/apache/hadoop/hive/ql/stats/StatsUtils.java b/ql/src/java/org/apache/hadoop/hive/ql/stats/StatsUtils.java index 99b26bd..7cb7c5e 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/stats/StatsUtils.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/stats/StatsUtils.java @@ -18,11 +18,8 @@ package org.apache.hadoop.hive.ql.stats; -import java.util.ArrayList; -import java.util.HashMap; -import java.util.List; -import java.util.Map; - +import com.google.common.base.Joiner; +import com.google.common.collect.Lists; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.fs.FileSystem; @@ -79,8 +76,12 @@ import org.apache.hadoop.hive.serde2.objectinspector.primitive.WritableTimestampObjectInspector; import org.apache.hadoop.io.BytesWritable; -import com.google.common.base.Joiner; -import com.google.common.collect.Lists; +import java.util.ArrayList; +import java.util.HashMap; +import java.util.HashSet; +import java.util.List; +import java.util.Map; +import java.util.Set; public class StatsUtils { @@ -223,7 +224,7 @@ public static Statistics collectStatistics(HiveConf conf, PrunedPartitionList pa if (aggrStats.getPartsFound() != partNames.size() && colState != State.NONE) { LOG.debug("Column stats requested for : " + partNames.size() +" partitions. " + "Able to retrieve for " + aggrStats.getPartsFound() + " partitions"); - stats.updateColumnStatsState(State.PARTIAL); + colState = State.PARTIAL; } stats.setColumnStatsState(colState); } @@ -1216,4 +1217,33 @@ private static String getFullyQualifiedName(String... names) { } return result; } + + /** + * Returns all table aliases from expression nodes + * @param columnExprMap - column expression map + * @return + */ + public static Set getAllTableAlias( + Map columnExprMap) { + Set result = new HashSet(); + if (columnExprMap != null) { + for (ExprNodeDesc end : columnExprMap.values()) { + getTableAliasFromExprNode(end, result); + } + } + return result; + } + + private static void getTableAliasFromExprNode(ExprNodeDesc end, + Set output) { + + if (end instanceof ExprNodeColumnDesc) { + output.add(((ExprNodeColumnDesc) end).getTabAlias()); + } else if (end instanceof ExprNodeGenericFuncDesc) { + for (ExprNodeDesc child : end.getChildren()) { + getTableAliasFromExprNode(child, output); + } + } + + } } diff --git a/ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/CompactorMR.java b/ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/CompactorMR.java index 01c5500..1a83c64 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/CompactorMR.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/txn/compactor/CompactorMR.java @@ -39,6 +39,7 @@ import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspector; import org.apache.hadoop.io.NullWritable; import org.apache.hadoop.io.Writable; +import org.apache.hadoop.io.WritableComparable; import org.apache.hadoop.mapred.InputFormat; import org.apache.hadoop.mapred.InputSplit; import org.apache.hadoop.mapred.JobClient; @@ -485,20 +486,21 @@ public float getProgress() throws IOException { } static class CompactorMap - implements Mapper { + implements Mapper { JobConf jobConf; RecordWriter writer; @Override - public void map(NullWritable key, CompactorInputSplit split, + public void map(WritableComparable key, CompactorInputSplit split, OutputCollector nullWritableVOutputCollector, Reporter reporter) throws IOException { // This will only get called once, since CompactRecordReader only returns one record, // the input split. // Based on the split we're passed we go instantiate the real reader and then iterate on it // until it finishes. - AcidInputFormat aif = + @SuppressWarnings("unchecked")//since there is no way to parametrize instance of Class + AcidInputFormat aif = instantiate(AcidInputFormat.class, jobConf.get(INPUT_FORMAT_CLASS_NAME)); ValidTxnList txnList = new ValidTxnListImpl(jobConf.get(ValidTxnList.VALID_TXNS_KEY)); @@ -541,7 +543,8 @@ private void getWriter(Reporter reporter, ObjectInspector inspector, .bucket(bucket); // Instantiate the underlying output format - AcidOutputFormat aof = + @SuppressWarnings("unchecked")//since there is no way to parametrize instance of Class + AcidOutputFormat aof = instantiate(AcidOutputFormat.class, jobConf.get(OUTPUT_FORMAT_CLASS_NAME)); writer = aof.getRawRecordWriter(new Path(jobConf.get(TMP_LOCATION)), options); diff --git a/ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDAFStreamingEvaluator.java b/ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDAFStreamingEvaluator.java index 0a437e9..0f5a11b 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDAFStreamingEvaluator.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDAFStreamingEvaluator.java @@ -179,6 +179,7 @@ public Object terminate(AggregationBuffer agg) throws HiveException { for (int i = 0; i < ss.numFollowing; i++) { ss.results.add(getNextResult(ss)); + ss.numRows++; } return o; } diff --git a/ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDFCase.java b/ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDFCase.java index e10a2eb..07cc84c 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDFCase.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDFCase.java @@ -18,6 +18,7 @@ package org.apache.hadoop.hive.ql.udf.generic; +import org.apache.hadoop.hive.ql.exec.Description; import org.apache.hadoop.hive.ql.exec.UDFArgumentTypeException; import org.apache.hadoop.hive.ql.metadata.HiveException; import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspector; @@ -31,6 +32,23 @@ * thrown. 2. c and f should be compatible types, or an exception will be * thrown. */ +@Description( + name = "case", + value = "CASE a WHEN b THEN c [WHEN d THEN e]* [ELSE f] END - " + + "When a = b, returns c; when a = d, return e; else return f", + extended = "Example:\n " + + "SELECT\n" + + " CASE deptno\n" + + " WHEN 1 THEN Engineering\n" + + " WHEN 2 THEN Finance\n" + + " ELSE admin\n" + + " END,\n" + + " CASE zone\n" + + " WHEN 7 THEN Americas\n" + + " ELSE Asia-Pac\n" + + " END\n" + + " FROM emp_details") + public class GenericUDFCase extends GenericUDF { private transient ObjectInspector[] argumentOIs; private transient GenericUDFUtils.ReturnObjectInspectorResolver returnOIResolver; diff --git a/ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDFLag.java b/ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDFLag.java index 827ce98..fb5de19 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDFLag.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDFLag.java @@ -18,8 +18,17 @@ package org.apache.hadoop.hive.ql.udf.generic; +import org.apache.hadoop.hive.ql.exec.Description; import org.apache.hadoop.hive.ql.metadata.HiveException; import org.apache.hadoop.hive.ql.udf.UDFType; +@Description( + name = "lag", + value = "LAG (scalar_expression [,offset] [,default]) OVER ([query_partition_clause] order_by_clause); " + + "The LAG function is used to access data from a previous row.", + extended = "Example:\n " + + "select p1.p_mfgr, p1.p_name, p1.p_size,\n" + + " p1.p_size - lag(p1.p_size,1,p1.p_size) over( distribute by p1.p_mfgr sort by p1.p_name) as deltaSz\n" + + " from part p1 join part p2 on p1.p_partkey = p2.p_partkey") @UDFType(impliesOrder = true) public class GenericUDFLag extends GenericUDFLeadLag { diff --git a/ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDFLead.java b/ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDFLead.java index a074ec2..1517a11 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDFLead.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDFLead.java @@ -18,8 +18,19 @@ package org.apache.hadoop.hive.ql.udf.generic; +import org.apache.hadoop.hive.ql.exec.Description; import org.apache.hadoop.hive.ql.metadata.HiveException; import org.apache.hadoop.hive.ql.udf.UDFType; +@Description( + name = "lead", + value = "LEAD (scalar_expression [,offset] [,default]) OVER ([query_partition_clause] order_by_clause); " + + "The LEAD function is used to return data from the next row. ", + extended = "Example:\n " + + "select p_name, p_retailprice, lead(p_retailprice) over() as l1,\n" + + " lag(p_retailprice) over() as l2\n" + + " from part\n" + + " where p_retailprice = 1173.15") + @UDFType(impliesOrder = true) public class GenericUDFLead extends GenericUDFLeadLag { diff --git a/ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDFWhen.java b/ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDFWhen.java index 6ffdbd8..3853540 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDFWhen.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDFWhen.java @@ -18,6 +18,7 @@ package org.apache.hadoop.hive.ql.udf.generic; +import org.apache.hadoop.hive.ql.exec.Description; import org.apache.hadoop.hive.ql.exec.UDFArgumentTypeException; import org.apache.hadoop.hive.ql.metadata.HiveException; import org.apache.hadoop.hive.serde.serdeConstants; @@ -31,6 +32,23 @@ * NOTES: 1. a and c should be boolean, or an exception will be thrown. 2. b, d * and f should be common types, or an exception will be thrown. */ +@Description( + name = "when", + value = "CASE WHEN a THEN b [WHEN c THEN d]* [ELSE e] END - " + + "When a = true, returns b; when c = true, return d; else return e", + extended = "Example:\n " + + "SELECT\n" + + " CASE\n" + + " WHEN deptno=1 THEN Engineering\n" + + " WHEN deptno=2 THEN Finance\n" + + " ELSE admin\n" + + " END,\n" + + " CASE\n" + + " WHEN zone=7 THEN Americas\n" + + " ELSE Asia-Pac\n" + + " END\n" + + " FROM emp_details") + public class GenericUDFWhen extends GenericUDF { private transient ObjectInspector[] argumentOIs; private transient GenericUDFUtils.ReturnObjectInspectorResolver returnOIResolver; diff --git a/ql/src/test/org/apache/hadoop/hive/ql/exec/vector/expressions/TestVectorTypeCasts.java b/ql/src/test/org/apache/hadoop/hive/ql/exec/vector/expressions/TestVectorTypeCasts.java index 2a871c5..bd0d5c5 100644 --- a/ql/src/test/org/apache/hadoop/hive/ql/exec/vector/expressions/TestVectorTypeCasts.java +++ b/ql/src/test/org/apache/hadoop/hive/ql/exec/vector/expressions/TestVectorTypeCasts.java @@ -332,7 +332,7 @@ public void testCastDecimalToString() { StringExpr.compare(v, 0, v.length, r.vector[1], r.start[1], r.length[1])); - v = toBytes("9999999999999999"); + v = toBytes("9999999999999999.00"); Assert.assertEquals(0, StringExpr.compare(v, 0, v.length, r.vector[2], r.start[2], r.length[2])); diff --git a/ql/src/test/org/apache/hadoop/hive/ql/io/orc/TestInputOutputFormat.java b/ql/src/test/org/apache/hadoop/hive/ql/io/orc/TestInputOutputFormat.java index 35e30b8..2ec751d 100644 --- a/ql/src/test/org/apache/hadoop/hive/ql/io/orc/TestInputOutputFormat.java +++ b/ql/src/test/org/apache/hadoop/hive/ql/io/orc/TestInputOutputFormat.java @@ -973,7 +973,7 @@ public void testInOutFormat() throws Exception { List fields =inspector.getAllStructFieldRefs(); IntObjectInspector intInspector = (IntObjectInspector) fields.get(0).getFieldObjectInspector(); - assertEquals(0.0, reader.getProgress(), 0.00001); + assertEquals(0.33, reader.getProgress(), 0.01); while (reader.next(key, value)) { assertEquals(++rowNum, intInspector.get(inspector. getStructFieldData(serde.deserialize(value), fields.get(0)))); diff --git a/ql/src/test/org/apache/hadoop/hive/ql/io/orc/TestOrcRecordUpdater.java b/ql/src/test/org/apache/hadoop/hive/ql/io/orc/TestOrcRecordUpdater.java index b53bd85..db553f5 100644 --- a/ql/src/test/org/apache/hadoop/hive/ql/io/orc/TestOrcRecordUpdater.java +++ b/ql/src/test/org/apache/hadoop/hive/ql/io/orc/TestOrcRecordUpdater.java @@ -24,6 +24,7 @@ import org.apache.hadoop.hive.ql.io.AcidOutputFormat; import org.apache.hadoop.hive.ql.io.AcidUtils; import org.apache.hadoop.hive.ql.io.RecordUpdater; +import org.apache.hadoop.hive.serde2.SerDeStats; import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspector; import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspectorFactory; import org.apache.hadoop.io.IntWritable; @@ -95,6 +96,10 @@ public void testWriter() throws Exception { updater.insert(12, new MyRow("fourth")); updater.insert(12, new MyRow("fifth")); updater.flush(); + + // Check the stats + assertEquals(5L, updater.getStats().getRowCount()); + Path bucketPath = AcidUtils.createFilename(root, options); Path sidePath = OrcRecordUpdater.getSideFile(bucketPath); DataInputStream side = fs.open(sidePath); @@ -158,6 +163,8 @@ public void testWriter() throws Exception { reader = OrcFile.createReader(bucketPath, new OrcFile.ReaderOptions(conf).filesystem(fs)); assertEquals(6, reader.getNumberOfRows()); + assertEquals(6L, updater.getStats().getRowCount()); + assertEquals(false, fs.exists(sidePath)); } @@ -182,6 +189,7 @@ public void testUpdates() throws Exception { RecordUpdater updater = new OrcRecordUpdater(root, options); updater.update(100, 10, 30, new MyRow("update")); updater.delete(100, 40, 60); + assertEquals(-1L, updater.getStats().getRowCount()); updater.close(false); Path bucketPath = AcidUtils.createFilename(root, options); diff --git a/ql/src/test/org/apache/hadoop/hive/ql/io/parquet/TestHiveSchemaConverter.java b/ql/src/test/org/apache/hadoop/hive/ql/io/parquet/TestHiveSchemaConverter.java index b87cf74..f232c57 100644 --- a/ql/src/test/org/apache/hadoop/hive/ql/io/parquet/TestHiveSchemaConverter.java +++ b/ql/src/test/org/apache/hadoop/hive/ql/io/parquet/TestHiveSchemaConverter.java @@ -88,6 +88,26 @@ public void testDecimalType() throws Exception { } @Test + public void testCharType() throws Exception { + testConversion( + "a", + "char(5)", + "message hive_schema {\n" + + " optional binary a (UTF8);\n" + + "}\n"); + } + + @Test + public void testVarcharType() throws Exception { + testConversion( + "a", + "varchar(10)", + "message hive_schema {\n" + + " optional binary a (UTF8);\n" + + "}\n"); + } + + @Test public void testArray() throws Exception { testConversion("arrayCol", "array", diff --git a/ql/src/test/org/apache/hadoop/hive/ql/io/sarg/TestSearchArgumentImpl.java b/ql/src/test/org/apache/hadoop/hive/ql/io/sarg/TestSearchArgumentImpl.java index b1524f7..3ecf365 100644 --- a/ql/src/test/org/apache/hadoop/hive/ql/io/sarg/TestSearchArgumentImpl.java +++ b/ql/src/test/org/apache/hadoop/hive/ql/io/sarg/TestSearchArgumentImpl.java @@ -18,15 +18,7 @@ package org.apache.hadoop.hive.ql.io.sarg; -import static junit.framework.Assert.assertEquals; -import static junit.framework.Assert.assertTrue; - -import java.beans.XMLDecoder; -import java.io.ByteArrayInputStream; -import java.io.UnsupportedEncodingException; -import java.util.List; -import java.util.Set; - +import com.google.common.collect.Sets; import org.apache.hadoop.hive.common.type.HiveChar; import org.apache.hadoop.hive.common.type.HiveDecimal; import org.apache.hadoop.hive.common.type.HiveVarchar; @@ -37,7 +29,15 @@ import org.apache.hadoop.hive.serde2.io.DateWritable; import org.junit.Test; -import com.google.common.collect.Sets; +import java.beans.XMLDecoder; +import java.io.ByteArrayInputStream; +import java.io.UnsupportedEncodingException; +import java.math.BigDecimal; +import java.util.List; +import java.util.Set; + +import static junit.framework.Assert.assertEquals; +import static junit.framework.Assert.assertTrue; /** * These test the SARG implementation. @@ -2828,7 +2828,7 @@ public void testBuilderComplexTypes() throws Exception { .build(); assertEquals("leaf-0 = (LESS_THAN x 1970-01-11)\n" + "leaf-1 = (LESS_THAN_EQUALS y hi)\n" + - "leaf-2 = (EQUALS z 1)\n" + + "leaf-2 = (EQUALS z 1.0)\n" + "expr = (and leaf-0 leaf-1 leaf-2)", sarg.toString()); sarg = SearchArgument.FACTORY.newBuilder() @@ -2847,4 +2847,36 @@ public void testBuilderComplexTypes() throws Exception { "leaf-3 = (NULL_SAFE_EQUALS a stinger)\n" + "expr = (and (not leaf-0) (not leaf-1) (not leaf-2) (not leaf-3))", sarg.toString()); } + + @Test + public void testBuilderComplexTypes2() throws Exception { + SearchArgument sarg = + SearchArgument.FACTORY.newBuilder() + .startAnd() + .lessThan("x", new DateWritable(10)) + .lessThanEquals("y", new HiveChar("hi", 10)) + .equals("z", new BigDecimal("1.0")) + .end() + .build(); + assertEquals("leaf-0 = (LESS_THAN x 1970-01-11)\n" + + "leaf-1 = (LESS_THAN_EQUALS y hi)\n" + + "leaf-2 = (EQUALS z 1.0)\n" + + "expr = (and leaf-0 leaf-1 leaf-2)", sarg.toString()); + + sarg = SearchArgument.FACTORY.newBuilder() + .startNot() + .startOr() + .isNull("x") + .between("y", new BigDecimal(10), 20.0) + .in("z", (byte)1, (short)2, (int)3) + .nullSafeEquals("a", new HiveVarchar("stinger", 100)) + .end() + .end() + .build(); + assertEquals("leaf-0 = (IS_NULL x)\n" + + "leaf-1 = (BETWEEN y 10 20.0)\n" + + "leaf-2 = (IN z 1 2 3)\n" + + "leaf-3 = (NULL_SAFE_EQUALS a stinger)\n" + + "expr = (and (not leaf-0) (not leaf-1) (not leaf-2) (not leaf-3))", sarg.toString()); + } } diff --git a/ql/src/test/org/apache/hadoop/hive/ql/lockmgr/TestDbTxnManager.java b/ql/src/test/org/apache/hadoop/hive/ql/lockmgr/TestDbTxnManager.java index acc2883..8f593aa 100644 --- a/ql/src/test/org/apache/hadoop/hive/ql/lockmgr/TestDbTxnManager.java +++ b/ql/src/test/org/apache/hadoop/hive/ql/lockmgr/TestDbTxnManager.java @@ -21,12 +21,12 @@ import org.apache.hadoop.fs.Path; import org.apache.hadoop.hive.conf.HiveConf; import org.apache.hadoop.hive.metastore.api.FieldSchema; -import org.apache.hadoop.hive.metastore.txn.TxnDbUtil; +import org.apache.hadoop.hive.metastore.api.ShowLocksResponse;import org.apache.hadoop.hive.metastore.api.ShowLocksResponseElement;import org.apache.hadoop.hive.metastore.txn.TxnDbUtil; import org.apache.hadoop.hive.ql.Context; import org.apache.hadoop.hive.ql.QueryPlan; import org.apache.hadoop.hive.ql.hooks.ReadEntity; import org.apache.hadoop.hive.ql.hooks.WriteEntity; -import org.apache.hadoop.hive.ql.metadata.Partition; +import org.apache.hadoop.hive.ql.metadata.DummyPartition;import org.apache.hadoop.hive.ql.metadata.Partition; import org.apache.hadoop.hive.ql.metadata.Table; import org.apache.hadoop.hive.ql.session.SessionState; import org.apache.log4j.Level; @@ -137,6 +137,43 @@ public void testSingleWriteTable() throws Exception { Assert.assertEquals(0, locks.size()); } + + @Test + public void testSingleWritePartition() throws Exception { + WriteEntity we = addPartitionOutput(newTable(true), WriteEntity.WriteType.INSERT); + QueryPlan qp = new MockQueryPlan(this); + txnMgr.openTxn("fred"); + txnMgr.acquireLocks(qp, ctx, "fred"); + List locks = ctx.getHiveLocks(); + Assert.assertEquals(1, locks.size()); + Assert.assertEquals(1, + TxnDbUtil.countLockComponents(((DbLockManager.DbHiveLock) locks.get(0)).lockId)); + txnMgr.commitTxn(); + locks = txnMgr.getLockManager().getLocks(false, false); + Assert.assertEquals(0, locks.size()); + } + + @Test + public void testWriteDynamicPartition() throws Exception { + WriteEntity we = addDynamicPartitionedOutput(newTable(true), WriteEntity.WriteType.INSERT); + QueryPlan qp = new MockQueryPlan(this); + txnMgr.openTxn("fred"); + txnMgr.acquireLocks(qp, ctx, "fred"); + List locks = ctx.getHiveLocks(); + Assert.assertEquals(1, locks.size()); + /*Assert.assertEquals(1, + TxnDbUtil.countLockComponents(((DbLockManager.DbHiveLock) locks.get(0)).lockId)); + */// Make sure we're locking the whole table, since this is dynamic partitioning + ShowLocksResponse rsp = ((DbLockManager)txnMgr.getLockManager()).getLocks(); + List elms = rsp.getLocks(); + Assert.assertEquals(1, elms.size()); + Assert.assertNotNull(elms.get(0).getTablename()); + Assert.assertNull(elms.get(0).getPartname()); + txnMgr.commitTxn(); + locks = txnMgr.getLockManager().getLocks(false, false); + Assert.assertEquals(0, locks.size()); + } + @Test public void testReadWrite() throws Exception { Table t = newTable(true); @@ -252,6 +289,7 @@ public void setUp() throws Exception { @After public void tearDown() throws Exception { + if (txnMgr != null) txnMgr.closeTxnManager(); TxnDbUtil.cleanDb(); } @@ -318,4 +356,12 @@ private WriteEntity addPartitionOutput(Table t, WriteEntity.WriteType writeType) writeEntities.add(we); return we; } + + private WriteEntity addDynamicPartitionedOutput(Table t, WriteEntity.WriteType writeType) + throws Exception { + DummyPartition dp = new DummyPartition(t, "no clue what I should call this"); + WriteEntity we = new WriteEntity(dp, writeType, false); + writeEntities.add(we); + return we; + } } diff --git a/ql/src/test/org/apache/hadoop/hive/ql/parse/TestIUD.java b/ql/src/test/org/apache/hadoop/hive/ql/parse/TestIUD.java new file mode 100644 index 0000000..548215a --- /dev/null +++ b/ql/src/test/org/apache/hadoop/hive/ql/parse/TestIUD.java @@ -0,0 +1,221 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hive.ql.parse; + +import org.apache.hadoop.hive.conf.HiveConf; +import org.apache.hadoop.hive.ql.session.SessionState; +import org.junit.Assert; +import org.junit.Before; +import org.junit.BeforeClass; +import org.junit.Test; + +public class TestIUD { + private static HiveConf conf; + + private ParseDriver pd; + private SemanticAnalyzer sA; + + @BeforeClass + public static void initialize() { + conf = new HiveConf(SemanticAnalyzer.class); + SessionState.start(conf); + } + + @Before + public void setup() throws SemanticException { + pd = new ParseDriver(); + sA = new SemanticAnalyzer(conf); + } + + ASTNode parse(String query) throws ParseException { + ASTNode nd = pd.parse(query); + return (ASTNode) nd.getChild(0); + } + @Test + public void testDeleteNoWhere() throws ParseException { + ASTNode ast = parse("DELETE FROM src"); + Assert.assertEquals("AST doesn't match", + "(TOK_DELETE_FROM " + + "(TOK_TABNAME src))", ast.toStringTree()); + } + @Test + public void testDeleteWithWhere() throws ParseException { + ASTNode ast = parse("DELETE FROM src WHERE key IS NOT NULL AND src.value < 0"); + Assert.assertEquals("AST doesn't match", + "(TOK_DELETE_FROM " + + "(TOK_TABNAME src) " + + "(TOK_WHERE " + + "(AND " + + "(TOK_FUNCTION TOK_ISNOTNULL (TOK_TABLE_OR_COL key)) " + + "(< (. (TOK_TABLE_OR_COL src) value) 0))))", + ast.toStringTree()); + } + @Test + public void testUpdateNoWhereSingleSet() throws ParseException { + ASTNode ast = parse("UPDATE src set key = 3"); + Assert.assertEquals("AST doesn't match", + "(TOK_UPDATE_TABLE " + + "(TOK_TABNAME src) " + + "(TOK_SET_COLUMNS_CLAUSE " + + "(= " + + "(TOK_TABLE_OR_COL key) 3)))", + ast.toStringTree()); + } + @Test + public void testUpdateNoWhereMultiSet() throws ParseException { + ASTNode ast = parse("UPDATE src set key = 3, value = 8"); + Assert.assertEquals("AST doesn't match", + "(TOK_UPDATE_TABLE " + + "(TOK_TABNAME src) " + + "(TOK_SET_COLUMNS_CLAUSE " + + "(= " + + "(TOK_TABLE_OR_COL key) 3) " + + "(= " + + "(TOK_TABLE_OR_COL value) 8)))", + ast.toStringTree()); + } + @Test + public void testUpdateWithWhereSingleSet() throws ParseException { + ASTNode ast = parse("UPDATE src SET key = 3 WHERE value IS NULL"); + Assert.assertEquals("AST doesn't match", + "(TOK_UPDATE_TABLE " + + "(TOK_TABNAME src) " + + "(TOK_SET_COLUMNS_CLAUSE " + + "(= " + + "(TOK_TABLE_OR_COL key) 3)) " + + "(TOK_WHERE (TOK_FUNCTION TOK_ISNULL (TOK_TABLE_OR_COL value))))", + ast.toStringTree()); + } + @Test + public void testUpdateWithWhereMultiSet() throws ParseException { + ASTNode ast = parse("UPDATE src SET key = 3, value = 8 WHERE VALUE = 1230997"); + Assert.assertEquals("AST doesn't match", + "(TOK_UPDATE_TABLE " + + "(TOK_TABNAME src) " + + "(TOK_SET_COLUMNS_CLAUSE " + + "(= " + + "(TOK_TABLE_OR_COL key) 3) " + + "(= " + + "(TOK_TABLE_OR_COL value) 8)) " + + "(TOK_WHERE (= (TOK_TABLE_OR_COL VALUE) 1230997)))", + ast.toStringTree()); + } + @Test + public void testStandardInsertIntoTable() throws ParseException { + ASTNode ast = parse("INSERT into TABLE page_view SELECT pvs.viewTime, pvs.userid from page_view_stg pvs where pvs.userid is null"); + Assert.assertEquals("AST doesn't match", + "(TOK_QUERY " + + "(TOK_FROM " + + "(TOK_TABREF (TOK_TABNAME page_view_stg) pvs)) " + + "(TOK_INSERT (TOK_INSERT_INTO (TOK_TAB (TOK_TABNAME page_view))) " + + "(TOK_SELECT " + + "(TOK_SELEXPR (. (TOK_TABLE_OR_COL pvs) viewTime)) " + + "(TOK_SELEXPR (. (TOK_TABLE_OR_COL pvs) userid))) " + + "(TOK_WHERE (TOK_FUNCTION TOK_ISNULL (. (TOK_TABLE_OR_COL pvs) userid)))))", + ast.toStringTree()); + } + @Test + public void testSelectStarFromAnonymousVirtTable1Row() throws ParseException { + try { + parse("select * from values (3,4)"); + Assert.assertFalse("Expected ParseException", true); + } + catch(ParseException ex) { + Assert.assertEquals("Failure didn't match.", "line 1:21 missing EOF at '(' near 'values'",ex.getMessage()); + } + } + @Test + public void testSelectStarFromVirtTable1Row() throws ParseException { + ASTNode ast = parse("select * from (values (3,4)) as VC(a,b)"); + Assert.assertEquals("AST doesn't match", + "(TOK_QUERY " + + "(TOK_FROM " + + "(TOK_VIRTUAL_TABLE " + + "(TOK_VIRTUAL_TABREF (TOK_TABNAME VC) (TOK_COL_NAME a b)) " + + "(TOK_VALUES_TABLE (TOK_VALUE_ROW 3 4)))) " + + "(TOK_INSERT (TOK_DESTINATION (TOK_DIR TOK_TMP_FILE)) (TOK_SELECT (TOK_SELEXPR TOK_ALLCOLREF))))", + ast.toStringTree()); + } + @Test + public void testSelectStarFromVirtTable2Row() throws ParseException { + ASTNode ast = parse("select * from (values (1,2),(3,4)) as VC(a,b)"); + Assert.assertEquals("AST doesn't match", + "(TOK_QUERY " + + "(TOK_FROM " + + "(TOK_VIRTUAL_TABLE " + + "(TOK_VIRTUAL_TABREF (TOK_TABNAME VC) (TOK_COL_NAME a b)) " + + "(TOK_VALUES_TABLE (TOK_VALUE_ROW 1 2) (TOK_VALUE_ROW 3 4)))) " + + "(TOK_INSERT (TOK_DESTINATION (TOK_DIR TOK_TMP_FILE)) (TOK_SELECT (TOK_SELEXPR TOK_ALLCOLREF))))", + ast.toStringTree()); + } + @Test + public void testSelectStarFromVirtTable2RowNamedProjections() throws ParseException { + ASTNode ast = parse("select a as c, b as d from (values (1,2),(3,4)) as VC(a,b)"); + Assert.assertEquals("AST doesn't match", + "(TOK_QUERY " + + "(TOK_FROM " + + "(TOK_VIRTUAL_TABLE " + + "(TOK_VIRTUAL_TABREF (TOK_TABNAME VC) (TOK_COL_NAME a b)) " + + "(TOK_VALUES_TABLE (TOK_VALUE_ROW 1 2) (TOK_VALUE_ROW 3 4)))) " + + "(TOK_INSERT (TOK_DESTINATION (TOK_DIR TOK_TMP_FILE)) " + + "(TOK_SELECT (TOK_SELEXPR (TOK_TABLE_OR_COL a) c) (TOK_SELEXPR (TOK_TABLE_OR_COL b) d))))", + ast.toStringTree()); + } + @Test + public void testInsertIntoTableAsSelectFromNamedVirtTable() throws ParseException { + ASTNode ast = parse("insert into table page_view select a,b as c from (values (1,2),(3,4)) as VC(a,b) where b = 9"); + Assert.assertEquals("AST doesn't match", + "(TOK_QUERY " + + "(TOK_FROM " + + "(TOK_VIRTUAL_TABLE " + + "(TOK_VIRTUAL_TABREF (TOK_TABNAME VC) (TOK_COL_NAME a b)) " + + "(TOK_VALUES_TABLE (TOK_VALUE_ROW 1 2) (TOK_VALUE_ROW 3 4)))) " + + "(TOK_INSERT (TOK_INSERT_INTO (TOK_TAB (TOK_TABNAME page_view))) " + + "(TOK_SELECT " + + "(TOK_SELEXPR (TOK_TABLE_OR_COL a)) " + + "(TOK_SELEXPR (TOK_TABLE_OR_COL b) c)) " + + "(TOK_WHERE (= (TOK_TABLE_OR_COL b) 9))))", + ast.toStringTree()); + } + @Test + public void testInsertIntoTableFromAnonymousTable1Row() throws ParseException { + ASTNode ast = parse("insert into table page_view values(1,2)"); + Assert.assertEquals("AST doesn't match", + "(TOK_QUERY " + + "(TOK_FROM " + + "(TOK_VIRTUAL_TABLE " + + "(TOK_VIRTUAL_TABREF TOK_ANONYMOUS) " + + "(TOK_VALUES_TABLE (TOK_VALUE_ROW 1 2)))) " + + "(TOK_INSERT (TOK_INSERT_INTO (TOK_TAB (TOK_TABNAME page_view))) " + + "(TOK_SELECT (TOK_SELEXPR TOK_ALLCOLREF))))", + ast.toStringTree()); + } + @Test + public void testInsertIntoTableFromAnonymousTable() throws ParseException { + ASTNode ast = parse("insert into table page_view values(1,2),(3,4)"); + Assert.assertEquals("AST doesn't match", + "(TOK_QUERY " + + "(TOK_FROM " + + "(TOK_VIRTUAL_TABLE " + + "(TOK_VIRTUAL_TABREF TOK_ANONYMOUS) " + + "(TOK_VALUES_TABLE (TOK_VALUE_ROW 1 2) (TOK_VALUE_ROW 3 4)))) " + + "(TOK_INSERT (TOK_INSERT_INTO (TOK_TAB (TOK_TABNAME page_view))) " + + "(TOK_SELECT (TOK_SELEXPR TOK_ALLCOLREF))))", + ast.toStringTree()); + } +} diff --git a/ql/src/test/org/apache/hadoop/hive/ql/parse/TestQBCompact.java b/ql/src/test/org/apache/hadoop/hive/ql/parse/TestQBCompact.java index c5a0b8d..ccdf272 100644 --- a/ql/src/test/org/apache/hadoop/hive/ql/parse/TestQBCompact.java +++ b/ql/src/test/org/apache/hadoop/hive/ql/parse/TestQBCompact.java @@ -65,7 +65,6 @@ public static void init() throws Exception { private AlterTableSimpleDesc parseAndAnalyze(String query) throws Exception { ParseDriver hd = new ParseDriver(); ASTNode head = (ASTNode)hd.parse(query).getChild(0); - System.out.println("HERE " + head.dump()); BaseSemanticAnalyzer a = SemanticAnalyzerFactory.get(conf, head); a.analyze(head, new Context(conf)); List> roots = a.getRootTasks(); diff --git a/ql/src/test/org/apache/hadoop/hive/ql/txn/compactor/CompactorTest.java b/ql/src/test/org/apache/hadoop/hive/ql/txn/compactor/CompactorTest.java index 7f5134e..ec1379d 100644 --- a/ql/src/test/org/apache/hadoop/hive/ql/txn/compactor/CompactorTest.java +++ b/ql/src/test/org/apache/hadoop/hive/ql/txn/compactor/CompactorTest.java @@ -36,9 +36,9 @@ import org.apache.hadoop.hive.ql.io.RecordUpdater; import org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe; import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspector; -import org.apache.hadoop.io.NullWritable; import org.apache.hadoop.io.Text; import org.apache.hadoop.io.Writable; +import org.apache.hadoop.io.WritableComparable; import org.apache.hadoop.mapred.*; import org.apache.hadoop.util.Progressable; import org.apache.thrift.TException; @@ -276,7 +276,7 @@ private void addFile(HiveConf conf, Table t, Partition p, long minTxn, long maxT } } - static class MockInputFormat implements AcidInputFormat { + static class MockInputFormat implements AcidInputFormat { @Override public AcidInputFormat.RowReader getReader(InputSplit split, @@ -315,7 +315,7 @@ private void addFile(HiveConf conf, Table t, Partition p, long minTxn, long maxT } @Override - public RecordReader getRecordReader(InputSplit inputSplit, JobConf entries, + public RecordReader getRecordReader(InputSplit inputSplit, JobConf entries, Reporter reporter) throws IOException { return null; } @@ -398,7 +398,7 @@ public float getProgress() throws IOException { // This class isn't used and I suspect does totally the wrong thing. It's only here so that I // can provide some output format to the tables and partitions I create. I actually write to // those tables directory. - static class MockOutputFormat implements AcidOutputFormat { + static class MockOutputFormat implements AcidOutputFormat { @Override public RecordUpdater getRecordUpdater(Path path, Options options) throws @@ -420,7 +420,7 @@ public RecordUpdater getRecordUpdater(Path path, Options options) throws } @Override - public RecordWriter getRecordWriter(FileSystem fileSystem, JobConf entries, + public RecordWriter getRecordWriter(FileSystem fileSystem, JobConf entries, String s, Progressable progressable) throws IOException { diff --git a/ql/src/test/org/apache/hadoop/hive/ql/udf/generic/TestGenericUDFOPDivide.java b/ql/src/test/org/apache/hadoop/hive/ql/udf/generic/TestGenericUDFOPDivide.java index 4c5b3a5..ce578a0 100644 --- a/ql/src/test/org/apache/hadoop/hive/ql/udf/generic/TestGenericUDFOPDivide.java +++ b/ql/src/test/org/apache/hadoop/hive/ql/udf/generic/TestGenericUDFOPDivide.java @@ -187,7 +187,7 @@ public void testDecimalDivideDecimal() throws HiveException { PrimitiveObjectInspector oi = (PrimitiveObjectInspector) udf.initialize(inputOIs); Assert.assertEquals(TypeInfoFactory.getDecimalTypeInfo(11, 7), oi.getTypeInfo()); HiveDecimalWritable res = (HiveDecimalWritable) udf.evaluate(args); - Assert.assertEquals(HiveDecimal.create("0.06171"), res.getHiveDecimal()); + Assert.assertEquals(HiveDecimal.create("0.0617100"), res.getHiveDecimal()); } @Test diff --git a/ql/src/test/queries/clientnegative/create_view_failure10.q b/ql/src/test/queries/clientnegative/create_view_failure10.q new file mode 100644 index 0000000..13acfd0 --- /dev/null +++ b/ql/src/test/queries/clientnegative/create_view_failure10.q @@ -0,0 +1,3 @@ +-- CREATE VIEW should fail if it references a temp table +create temporary table tmp1 (c1 string, c2 string); +create view tmp1_view as select c1, count(*) from tmp1 group by c1; diff --git a/ql/src/test/queries/clientnegative/parquet_char.q b/ql/src/test/queries/clientnegative/parquet_char.q deleted file mode 100644 index 745a786..0000000 --- a/ql/src/test/queries/clientnegative/parquet_char.q +++ /dev/null @@ -1,3 +0,0 @@ -drop table if exists parquet_char; - -create table parquet_char (t char(10)) stored as parquet; diff --git a/ql/src/test/queries/clientnegative/parquet_varchar.q b/ql/src/test/queries/clientnegative/parquet_varchar.q deleted file mode 100644 index 55825f7..0000000 --- a/ql/src/test/queries/clientnegative/parquet_varchar.q +++ /dev/null @@ -1,3 +0,0 @@ -drop table if exists parquet_varchar; - -create table parquet_varchar (t varchar(10)) stored as parquet; diff --git a/ql/src/test/queries/clientpositive/alter_rename_table.q b/ql/src/test/queries/clientpositive/alter_rename_table.q index 4125c5f..0eebd0f 100644 --- a/ql/src/test/queries/clientpositive/alter_rename_table.q +++ b/ql/src/test/queries/clientpositive/alter_rename_table.q @@ -26,3 +26,10 @@ ALTER TABLE source.srcpart RENAME TO target.srcpart; ALTER TABLE source.srcpart RENAME TO target.srcpart; select * from target.srcpart tablesample (10 rows); + +create table source.src like default.src; +create table source.src1 like default.src; +load data local inpath '../../data/files/kv1.txt' overwrite into table source.src; + +ALTER TABLE source.src RENAME TO target.src1; +select * from target.src1 tablesample (10 rows); \ No newline at end of file diff --git a/ql/src/test/queries/clientpositive/analyze_tbl_part.q b/ql/src/test/queries/clientpositive/analyze_tbl_part.q new file mode 100644 index 0000000..9040bd4 --- /dev/null +++ b/ql/src/test/queries/clientpositive/analyze_tbl_part.q @@ -0,0 +1,19 @@ +set hive.stats.dbclass=jdbc:derby; + +create table src_stat_part(key string, value string) partitioned by (partitionId int); + +insert overwrite table src_stat_part partition (partitionId=1) +select * from src1; + +insert overwrite table src_stat_part partition (partitionId=2) +select * from src1; + +ANALYZE TABLE src_stat_part partition (partitionId) COMPUTE STATISTICS for columns key; + +describe formatted src_stat_part.key PARTITION(partitionId=1); + +ANALYZE TABLE src_stat_part partition (partitionId) COMPUTE STATISTICS for columns key, value; + +describe formatted src_stat_part.key PARTITION(partitionId=1); + +describe formatted src_stat_part.value PARTITION(partitionId=2); \ No newline at end of file diff --git a/ql/src/test/queries/clientpositive/decimal_trailing.q b/ql/src/test/queries/clientpositive/decimal_trailing.q new file mode 100644 index 0000000..80afb40 --- /dev/null +++ b/ql/src/test/queries/clientpositive/decimal_trailing.q @@ -0,0 +1,16 @@ +DROP TABLE IF EXISTS DECIMAL_TRAILING; + +CREATE TABLE DECIMAL_TRAILING ( + id int, + a decimal(10,4), + b decimal(15,8) + ) +ROW FORMAT DELIMITED + FIELDS TERMINATED BY ',' +STORED AS TEXTFILE; + +LOAD DATA LOCAL INPATH '../../data/files/kv10.txt' INTO TABLE DECIMAL_TRAILING; + +SELECT * FROM DECIMAL_TRAILING ORDER BY id; + +DROP TABLE DECIMAL_TRAILING; diff --git a/ql/src/test/queries/clientpositive/extrapolate_part_stats_full.q b/ql/src/test/queries/clientpositive/extrapolate_part_stats_full.q new file mode 100644 index 0000000..00c9b53 --- /dev/null +++ b/ql/src/test/queries/clientpositive/extrapolate_part_stats_full.q @@ -0,0 +1,52 @@ +set hive.stats.fetch.column.stats=true; +set hive.exec.dynamic.partition=true; +set hive.exec.dynamic.partition.mode=nonstrict; + +create table if not exists ext_loc ( + state string, + locid int, + zip int, + year string +) row format delimited fields terminated by '|' stored as textfile; + +LOAD DATA LOCAL INPATH '../../data/files/extrapolate_stats_full.txt' OVERWRITE INTO TABLE ext_loc; + +create table if not exists loc_orc_1d ( + state string, + locid int, + zip int +) partitioned by(year string) stored as orc; + +insert overwrite table loc_orc_1d partition(year) select * from ext_loc; + +analyze table loc_orc_1d partition(year='2000') compute statistics for columns state,locid; + +analyze table loc_orc_1d partition(year='2001') compute statistics for columns state,locid; + +describe formatted loc_orc_1d.state PARTITION(year='2001'); + +-- basicStatState: COMPLETE colStatState: PARTIAL +explain extended select state from loc_orc_1d; + +-- column statistics for __HIVE_DEFAULT_PARTITION__ is not supported yet. Hence colStatState reports PARTIAL +-- basicStatState: COMPLETE colStatState: PARTIAL +explain extended select state,locid from loc_orc_1d; + +create table if not exists loc_orc_2d ( + state string, + locid int +) partitioned by(zip int, year string) stored as orc; + +insert overwrite table loc_orc_2d partition(zip, year) select * from ext_loc; + +analyze table loc_orc_2d partition(zip=94086, year='2000') compute statistics for columns state,locid; + +analyze table loc_orc_2d partition(zip=94087, year='2000') compute statistics for columns state,locid; + +analyze table loc_orc_2d partition(zip=94086, year='2001') compute statistics for columns state,locid; + +analyze table loc_orc_2d partition(zip=94087, year='2001') compute statistics for columns state,locid; + +explain extended select state from loc_orc_2d; + +explain extended select state,locid from loc_orc_2d; diff --git a/ql/src/test/queries/clientpositive/extrapolate_part_stats_partial.q b/ql/src/test/queries/clientpositive/extrapolate_part_stats_partial.q new file mode 100644 index 0000000..8ae9a90 --- /dev/null +++ b/ql/src/test/queries/clientpositive/extrapolate_part_stats_partial.q @@ -0,0 +1,58 @@ +set hive.stats.fetch.column.stats=true; +set hive.exec.dynamic.partition=true; +set hive.exec.dynamic.partition.mode=nonstrict; + +create table if not exists ext_loc ( + state string, + locid int, + zip int, + year string +) row format delimited fields terminated by '|' stored as textfile; + +LOAD DATA LOCAL INPATH '../../data/files/extrapolate_stats_partial.txt' OVERWRITE INTO TABLE ext_loc; + +create table if not exists loc_orc_1d ( + state string, + locid int, + zip int +) partitioned by(year string) stored as orc; + +insert overwrite table loc_orc_1d partition(year) select * from ext_loc; + +analyze table loc_orc_1d partition(year='2001') compute statistics for columns state,locid; + +analyze table loc_orc_1d partition(year='2002') compute statistics for columns state,locid; + +describe formatted loc_orc_1d.state PARTITION(year='2001'); + +describe formatted loc_orc_1d.state PARTITION(year='2002'); + +-- basicStatState: COMPLETE colStatState: PARTIAL +explain extended select state from loc_orc_1d; + +-- column statistics for __HIVE_DEFAULT_PARTITION__ is not supported yet. Hence colStatState reports PARTIAL +-- basicStatState: COMPLETE colStatState: PARTIAL +explain extended select state,locid from loc_orc_1d; + +analyze table loc_orc_1d partition(year='2000') compute statistics for columns state; + +analyze table loc_orc_1d partition(year='2003') compute statistics for columns state; + +explain extended select state from loc_orc_1d; + +explain extended select state,locid from loc_orc_1d; + +create table if not exists loc_orc_2d ( + state string, + locid int +) partitioned by(zip int, year string) stored as orc; + +insert overwrite table loc_orc_2d partition(zip, year) select * from ext_loc; + +analyze table loc_orc_2d partition(zip=94086, year='2001') compute statistics for columns state,locid; + +analyze table loc_orc_2d partition(zip=94087, year='2002') compute statistics for columns state,locid; + +explain extended select state from loc_orc_2d; + +explain extended select state,locid from loc_orc_2d; diff --git a/ql/src/test/queries/clientpositive/literal_decimal.q b/ql/src/test/queries/clientpositive/literal_decimal.q index 08b21dc..1bafc24 100644 --- a/ql/src/test/queries/clientpositive/literal_decimal.q +++ b/ql/src/test/queries/clientpositive/literal_decimal.q @@ -1,5 +1,5 @@ set hive.fetch.task.conversion=more; -EXPLAIN SELECT -1BD, 0BD, 1BD, 3.14BD, -3.14BD, 99999999999999999BD, 99999999999999999.9999999999999BD, 1E-99BD, 1E99BD FROM src LIMIT 1; +EXPLAIN SELECT -1BD, 0BD, 1BD, 3.14BD, -3.14BD, 99999999999999999BD, 99999999999999999.9999999999999BD, 1E99BD FROM src LIMIT 1; -SELECT -1BD, 0BD, 1BD, 3.14BD, -3.14BD, 99999999999999999BD, 99999999999999999.9999999999999BD, 1E-99BD, 1E99BD FROM src LIMIT 1; +SELECT -1BD, 0BD, 1BD, 3.14BD, -3.14BD, 99999999999999999BD, 99999999999999999.9999999999999BD, 1E99BD FROM src LIMIT 1; diff --git a/ql/src/test/queries/clientpositive/orc_ppd_decimal.q b/ql/src/test/queries/clientpositive/orc_ppd_decimal.q index a93590e..a54b862 100644 --- a/ql/src/test/queries/clientpositive/orc_ppd_decimal.q +++ b/ql/src/test/queries/clientpositive/orc_ppd_decimal.q @@ -85,6 +85,18 @@ set hive.optimize.index.filter=true; select sum(hash(*)) from newtypesorc where d<=cast('11.22' as float); set hive.optimize.index.filter=false; +select sum(hash(*)) from newtypesorc where d<=cast('11.22' as decimal); + +set hive.optimize.index.filter=true; +select sum(hash(*)) from newtypesorc where d<=cast('11.22' as decimal); + +set hive.optimize.index.filter=false; +select sum(hash(*)) from newtypesorc where d<=11.22BD; + +set hive.optimize.index.filter=true; +select sum(hash(*)) from newtypesorc where d<=11.22BD; + +set hive.optimize.index.filter=false; select sum(hash(*)) from newtypesorc where d<=12; set hive.optimize.index.filter=true; diff --git a/ql/src/test/queries/clientpositive/parquet_join.q b/ql/src/test/queries/clientpositive/parquet_join.q new file mode 100644 index 0000000..fc25c1c --- /dev/null +++ b/ql/src/test/queries/clientpositive/parquet_join.q @@ -0,0 +1,43 @@ + +drop table if exists staging; +drop table if exists parquet_jointable1; +drop table if exists parquet_jointable2; +drop table if exists parquet_jointable1_bucketed_sorted; +drop table if exists parquet_jointable2_bucketed_sorted; + +create table staging (key int, value string) stored as textfile; +insert into table staging select distinct key, value from src order by key limit 2; + +create table parquet_jointable1 stored as parquet as select * from staging; + +create table parquet_jointable2 stored as parquet as select key,key+1,concat(value,"value") as myvalue from staging; + +-- MR join + +explain select p2.myvalue from parquet_jointable1 p1 join parquet_jointable2 p2 on p1.key=p2.key; +select p2.myvalue from parquet_jointable1 p1 join parquet_jointable2 p2 on p1.key=p2.key; + +set hive.auto.convert.join=true; + +-- The two tables involved in the join have differing number of columns(table1-2,table2-3). In case of Map and SMB join, +-- when the second table is loaded, the column indices in hive.io.file.readcolumn.ids refer to columns of both the first and the second table +-- and hence the parquet schema/types passed to ParquetInputSplit should contain only the column indexes belonging to second/current table + +-- Map join + +explain select p2.myvalue from parquet_jointable1 p1 join parquet_jointable2 p2 on p1.key=p2.key; +select p2.myvalue from parquet_jointable1 p1 join parquet_jointable2 p2 on p1.key=p2.key; + +set hive.optimize.bucketmapjoin=true; +set hive.optimize.bucketmapjoin.sortedmerge=true; +set hive.auto.convert.sortmerge.join=true; +set hive.input.format=org.apache.hadoop.hive.ql.io.BucketizedHiveInputFormat; + +-- SMB join + +create table parquet_jointable1_bucketed_sorted (key int,value string) clustered by (key) sorted by (key ASC) INTO 1 BUCKETS stored as parquet; +insert overwrite table parquet_jointable1_bucketed_sorted select key,concat(value,"value1") as value from staging cluster by key; +create table parquet_jointable2_bucketed_sorted (key int,value1 string, value2 string) clustered by (key) sorted by (key ASC) INTO 1 BUCKETS stored as parquet; +insert overwrite table parquet_jointable2_bucketed_sorted select key,concat(value,"value2-1") as value1,concat(value,"value2-2") as value2 from staging cluster by key; +explain select p1.value,p2.value2 from parquet_jointable1_bucketed_sorted p1 join parquet_jointable2_bucketed_sorted p2 on p1.key=p2.key; +select p1.value,p2.value2 from parquet_jointable1_bucketed_sorted p1 join parquet_jointable2_bucketed_sorted p2 on p1.key=p2.key; diff --git a/ql/src/test/queries/clientpositive/parquet_types.q b/ql/src/test/queries/clientpositive/parquet_types.q index cb0dcfd..86af5af 100644 --- a/ql/src/test/queries/clientpositive/parquet_types.q +++ b/ql/src/test/queries/clientpositive/parquet_types.q @@ -8,7 +8,9 @@ CREATE TABLE parquet_types_staging ( cfloat float, cdouble double, cstring1 string, - t timestamp + t timestamp, + cchar char(5), + cvarchar varchar(10) ) ROW FORMAT DELIMITED FIELDS TERMINATED BY '|'; @@ -19,7 +21,9 @@ CREATE TABLE parquet_types ( cfloat float, cdouble double, cstring1 string, - t timestamp + t timestamp, + cchar char(5), + cvarchar varchar(10) ) STORED AS PARQUET; LOAD DATA LOCAL INPATH '../../data/files/parquet_types.txt' OVERWRITE INTO TABLE parquet_types_staging; diff --git a/ql/src/test/queries/clientpositive/tez_union_decimal.q b/ql/src/test/queries/clientpositive/tez_union_decimal.q new file mode 100644 index 0000000..0f56e6a --- /dev/null +++ b/ql/src/test/queries/clientpositive/tez_union_decimal.q @@ -0,0 +1,37 @@ +select sum(a) from ( + select cast(1.1 as decimal) a from src tablesample (1 rows) + union all + select cast(null as decimal) a from src tablesample (1 rows) +) t; + +select sum(a) from ( + select cast(1 as tinyint) a from src tablesample (1 rows) + union all + select cast(null as tinyint) a from src tablesample (1 rows) + union all + select cast(1.1 as decimal) a from src tablesample (1 rows) +) t; + +select sum(a) from ( + select cast(1 as smallint) a from src tablesample (1 rows) + union all + select cast(null as smallint) a from src tablesample (1 rows) + union all + select cast(1.1 as decimal) a from src tablesample (1 rows) +) t; + +select sum(a) from ( + select cast(1 as int) a from src tablesample (1 rows) + union all + select cast(null as int) a from src tablesample (1 rows) + union all + select cast(1.1 as decimal) a from src tablesample (1 rows) +) t; + +select sum(a) from ( + select cast(1 as bigint) a from src tablesample (1 rows) + union all + select cast(null as bigint) a from src tablesample (1 rows) + union all + select cast(1.1 as decimal) a from src tablesample (1 rows) +) t; diff --git a/ql/src/test/queries/clientpositive/windowing.q b/ql/src/test/queries/clientpositive/windowing.q index a1f4447..3f5c3bf 100644 --- a/ql/src/test/queries/clientpositive/windowing.q +++ b/ql/src/test/queries/clientpositive/windowing.q @@ -438,3 +438,9 @@ select p_mfgr, from part where p_mfgr = 'Manufacturer#6' ; + +-- 46. window sz is same as partition sz +select p_retailprice, avg(p_retailprice) over (partition by p_mfgr order by p_name rows between current row and 6 following), +sum(p_retailprice) over (partition by p_mfgr order by p_name rows between current row and 6 following) +from part +where p_mfgr='Manufacturer#1'; diff --git a/ql/src/test/results/clientnegative/authorization_create_func1.q.out b/ql/src/test/results/clientnegative/authorization_create_func1.q.out index 3bc49a6..6e11f04 100644 --- a/ql/src/test/results/clientnegative/authorization_create_func1.q.out +++ b/ql/src/test/results/clientnegative/authorization_create_func1.q.out @@ -1 +1 @@ -FAILED: HiveAccessControlException Permission denied: Principal [name=hive_test_user, type=USER] does not have following privileges for operation CREATEFUNCTION [[ADMIN PRIVILEGE] on Object [type=DATABASE, name=default], [ADMIN PRIVILEGE] on Object [type=FUNCTION, name=perm_fn]] +FAILED: HiveAccessControlException Permission denied: Principal [name=hive_test_user, type=USER] does not have following privileges for operation CREATEFUNCTION [[ADMIN PRIVILEGE] on Object [type=DATABASE, name=default], [ADMIN PRIVILEGE] on Object [type=FUNCTION, name=default.perm_fn]] diff --git a/ql/src/test/results/clientnegative/bucket_mapjoin_mismatch1.q.out b/ql/src/test/results/clientnegative/bucket_mapjoin_mismatch1.q.out index a73db84..4606f32 100644 --- a/ql/src/test/results/clientnegative/bucket_mapjoin_mismatch1.q.out +++ b/ql/src/test/results/clientnegative/bucket_mapjoin_mismatch1.q.out @@ -133,10 +133,10 @@ STAGE PLANS: condition expressions: 0 {KEY.reducesinkkey0} {VALUE._col0} 1 {VALUE._col0} - outputColumnNames: _col0, _col1, _col6 + outputColumnNames: _col0, _col1, _col7 Statistics: Num rows: 22 Data size: 2310 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: _col0 (type: int), _col1 (type: string), _col6 (type: string) + expressions: _col0 (type: int), _col1 (type: string), _col7 (type: string) outputColumnNames: _col0, _col1, _col2 Statistics: Num rows: 22 Data size: 2310 Basic stats: COMPLETE Column stats: NONE File Output Operator @@ -209,10 +209,10 @@ STAGE PLANS: keys: 0 key (type: int) 1 key (type: int) - outputColumnNames: _col0, _col1, _col6 + outputColumnNames: _col0, _col1, _col7 Statistics: Num rows: 22 Data size: 2310 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: _col0 (type: int), _col1 (type: string), _col6 (type: string) + expressions: _col0 (type: int), _col1 (type: string), _col7 (type: string) outputColumnNames: _col0, _col1, _col2 Statistics: Num rows: 22 Data size: 2310 Basic stats: COMPLETE Column stats: NONE File Output Operator diff --git a/ql/src/test/results/clientnegative/create_view_failure10.q.out b/ql/src/test/results/clientnegative/create_view_failure10.q.out new file mode 100644 index 0000000..2823d9f --- /dev/null +++ b/ql/src/test/results/clientnegative/create_view_failure10.q.out @@ -0,0 +1,11 @@ +PREHOOK: query: -- CREATE VIEW should fail if it references a temp table +create temporary table tmp1 (c1 string, c2 string) +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +PREHOOK: Output: default@tmp1 +POSTHOOK: query: -- CREATE VIEW should fail if it references a temp table +create temporary table tmp1 (c1 string, c2 string) +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@tmp1 +FAILED: SemanticException org.apache.hadoop.hive.ql.parse.SemanticException: View definition references temporary table tmp1 diff --git a/ql/src/test/results/clientnegative/parquet_char.q.out b/ql/src/test/results/clientnegative/parquet_char.q.out deleted file mode 100644 index 8c9a52c..0000000 --- a/ql/src/test/results/clientnegative/parquet_char.q.out +++ /dev/null @@ -1,9 +0,0 @@ -PREHOOK: query: drop table if exists parquet_char -PREHOOK: type: DROPTABLE -POSTHOOK: query: drop table if exists parquet_char -POSTHOOK: type: DROPTABLE -PREHOOK: query: create table parquet_char (t char(10)) stored as parquet -PREHOOK: type: CREATETABLE -PREHOOK: Output: database:default -PREHOOK: Output: default@parquet_char -FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.exec.DDLTask. java.lang.UnsupportedOperationException: Parquet does not support char. See HIVE-6384 diff --git a/ql/src/test/results/clientnegative/parquet_timestamp.q.out b/ql/src/test/results/clientnegative/parquet_timestamp.q.out deleted file mode 100644 index 00973b7..0000000 --- a/ql/src/test/results/clientnegative/parquet_timestamp.q.out +++ /dev/null @@ -1,8 +0,0 @@ -PREHOOK: query: drop table if exists parquet_timestamp -PREHOOK: type: DROPTABLE -POSTHOOK: query: drop table if exists parquet_timestamp -POSTHOOK: type: DROPTABLE -PREHOOK: query: create table parquet_timestamp (t timestamp) stored as parquet -PREHOOK: type: CREATETABLE -PREHOOK: Output: database:default -FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.exec.DDLTask. java.lang.UnsupportedOperationException: Parquet does not support timestamp. See HIVE-6384 diff --git a/ql/src/test/results/clientnegative/parquet_varchar.q.out b/ql/src/test/results/clientnegative/parquet_varchar.q.out deleted file mode 100644 index 90f6db2..0000000 --- a/ql/src/test/results/clientnegative/parquet_varchar.q.out +++ /dev/null @@ -1,9 +0,0 @@ -PREHOOK: query: drop table if exists parquet_varchar -PREHOOK: type: DROPTABLE -POSTHOOK: query: drop table if exists parquet_varchar -POSTHOOK: type: DROPTABLE -PREHOOK: query: create table parquet_varchar (t varchar(10)) stored as parquet -PREHOOK: type: CREATETABLE -PREHOOK: Output: database:default -PREHOOK: Output: default@parquet_varchar -FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.exec.DDLTask. java.lang.UnsupportedOperationException: Parquet does not support varchar. See HIVE-6384 diff --git a/ql/src/test/results/clientnegative/sortmerge_mapjoin_mismatch_1.q.out b/ql/src/test/results/clientnegative/sortmerge_mapjoin_mismatch_1.q.out index 4fc6f27..57052aa 100644 --- a/ql/src/test/results/clientnegative/sortmerge_mapjoin_mismatch_1.q.out +++ b/ql/src/test/results/clientnegative/sortmerge_mapjoin_mismatch_1.q.out @@ -104,10 +104,10 @@ STAGE PLANS: keys: 0 key (type: int) 1 key (type: int) - outputColumnNames: _col0, _col1, _col4, _col5 + outputColumnNames: _col0, _col1, _col5, _col6 Statistics: Num rows: 275 Data size: 2646 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: _col0 (type: int), _col1 (type: string), _col4 (type: int), _col5 (type: string) + expressions: _col0 (type: int), _col1 (type: string), _col5 (type: int), _col6 (type: string) outputColumnNames: _col0, _col1, _col2, _col3 Statistics: Num rows: 275 Data size: 2646 Basic stats: COMPLETE Column stats: NONE File Output Operator diff --git a/ql/src/test/results/clientnegative/udf_assert_true.q.out b/ql/src/test/results/clientnegative/udf_assert_true.q.out index 6f18011..acc18eb 100644 --- a/ql/src/test/results/clientnegative/udf_assert_true.q.out +++ b/ql/src/test/results/clientnegative/udf_assert_true.q.out @@ -23,10 +23,10 @@ STAGE PLANS: Select Operator Statistics: Num rows: 0 Data size: 5812 Basic stats: PARTIAL Column stats: COMPLETE Lateral View Join Operator - outputColumnNames: _col4 + outputColumnNames: _col5 Statistics: Num rows: 0 Data size: 11624 Basic stats: PARTIAL Column stats: COMPLETE Select Operator - expressions: assert_true((_col4 > 0)) (type: void) + expressions: assert_true((_col5 > 0)) (type: void) outputColumnNames: _col0 Statistics: Num rows: 0 Data size: 11624 Basic stats: PARTIAL Column stats: COMPLETE Limit @@ -47,10 +47,10 @@ STAGE PLANS: Statistics: Num rows: 0 Data size: 5812 Basic stats: PARTIAL Column stats: COMPLETE function name: explode Lateral View Join Operator - outputColumnNames: _col4 + outputColumnNames: _col5 Statistics: Num rows: 0 Data size: 11624 Basic stats: PARTIAL Column stats: COMPLETE Select Operator - expressions: assert_true((_col4 > 0)) (type: void) + expressions: assert_true((_col5 > 0)) (type: void) outputColumnNames: _col0 Statistics: Num rows: 0 Data size: 11624 Basic stats: PARTIAL Column stats: COMPLETE Limit @@ -100,10 +100,10 @@ STAGE PLANS: Select Operator Statistics: Num rows: 0 Data size: 5812 Basic stats: PARTIAL Column stats: COMPLETE Lateral View Join Operator - outputColumnNames: _col4 + outputColumnNames: _col5 Statistics: Num rows: 0 Data size: 11624 Basic stats: PARTIAL Column stats: COMPLETE Select Operator - expressions: assert_true((_col4 < 2)) (type: void) + expressions: assert_true((_col5 < 2)) (type: void) outputColumnNames: _col0 Statistics: Num rows: 0 Data size: 11624 Basic stats: PARTIAL Column stats: COMPLETE Limit @@ -124,10 +124,10 @@ STAGE PLANS: Statistics: Num rows: 0 Data size: 5812 Basic stats: PARTIAL Column stats: COMPLETE function name: explode Lateral View Join Operator - outputColumnNames: _col4 + outputColumnNames: _col5 Statistics: Num rows: 0 Data size: 11624 Basic stats: PARTIAL Column stats: COMPLETE Select Operator - expressions: assert_true((_col4 < 2)) (type: void) + expressions: assert_true((_col5 < 2)) (type: void) outputColumnNames: _col0 Statistics: Num rows: 0 Data size: 11624 Basic stats: PARTIAL Column stats: COMPLETE Limit diff --git a/ql/src/test/results/clientnegative/udf_assert_true2.q.out b/ql/src/test/results/clientnegative/udf_assert_true2.q.out index abc721e..5684337 100644 --- a/ql/src/test/results/clientnegative/udf_assert_true2.q.out +++ b/ql/src/test/results/clientnegative/udf_assert_true2.q.out @@ -18,10 +18,10 @@ STAGE PLANS: Select Operator Statistics: Num rows: 0 Data size: 5812 Basic stats: PARTIAL Column stats: COMPLETE Lateral View Join Operator - outputColumnNames: _col4 + outputColumnNames: _col5 Statistics: Num rows: 0 Data size: 11624 Basic stats: PARTIAL Column stats: COMPLETE Select Operator - expressions: (1 + assert_true((_col4 < 2))) (type: double) + expressions: (1 + assert_true((_col5 < 2))) (type: double) outputColumnNames: _col0 Statistics: Num rows: 0 Data size: 11624 Basic stats: PARTIAL Column stats: COMPLETE Limit @@ -42,10 +42,10 @@ STAGE PLANS: Statistics: Num rows: 0 Data size: 5812 Basic stats: PARTIAL Column stats: COMPLETE function name: explode Lateral View Join Operator - outputColumnNames: _col4 + outputColumnNames: _col5 Statistics: Num rows: 0 Data size: 11624 Basic stats: PARTIAL Column stats: COMPLETE Select Operator - expressions: (1 + assert_true((_col4 < 2))) (type: double) + expressions: (1 + assert_true((_col5 < 2))) (type: double) outputColumnNames: _col0 Statistics: Num rows: 0 Data size: 11624 Basic stats: PARTIAL Column stats: COMPLETE Limit diff --git a/ql/src/test/results/clientnegative/udf_local_resource.q.out b/ql/src/test/results/clientnegative/udf_local_resource.q.out index 13a1726..2ebcb3b 100644 --- a/ql/src/test/results/clientnegative/udf_local_resource.q.out +++ b/ql/src/test/results/clientnegative/udf_local_resource.q.out @@ -1,5 +1,5 @@ PREHOOK: query: create function lookup as 'org.apache.hadoop.hive.ql.udf.UDFFileLookup' using file '../../data/files/sales.txt' PREHOOK: type: CREATEFUNCTION PREHOOK: Output: database:default -PREHOOK: Output: lookup +PREHOOK: Output: default.lookup FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.exec.FunctionTask. Hive warehouse is non-local, but ../../data/files/sales.txt specifies file on local filesystem. Resources on non-local warehouse should specify a non-local scheme/path diff --git a/ql/src/test/results/clientnegative/udf_nonexistent_resource.q.out b/ql/src/test/results/clientnegative/udf_nonexistent_resource.q.out index bb8ce14..60935bf 100644 --- a/ql/src/test/results/clientnegative/udf_nonexistent_resource.q.out +++ b/ql/src/test/results/clientnegative/udf_nonexistent_resource.q.out @@ -1,6 +1,6 @@ PREHOOK: query: create function lookup as 'org.apache.hadoop.hive.ql.udf.UDFFileLookup' using file 'nonexistent_file.txt' PREHOOK: type: CREATEFUNCTION PREHOOK: Output: database:default -PREHOOK: Output: lookup +PREHOOK: Output: default.lookup nonexistent_file.txt does not exist FAILED: Execution Error, return code 1 from org.apache.hadoop.hive.ql.exec.FunctionTask. nonexistent_file.txt does not exist diff --git a/ql/src/test/results/clientpositive/allcolref_in_udf.q.out b/ql/src/test/results/clientpositive/allcolref_in_udf.q.out index aee81c6..3e918be 100644 --- a/ql/src/test/results/clientpositive/allcolref_in_udf.q.out +++ b/ql/src/test/results/clientpositive/allcolref_in_udf.q.out @@ -112,10 +112,10 @@ STAGE PLANS: condition expressions: 0 {VALUE._col0} {VALUE._col1} 1 {VALUE._col0} {VALUE._col1} - outputColumnNames: _col0, _col1, _col4, _col5 + outputColumnNames: _col0, _col1, _col5, _col6 Statistics: Num rows: 16 Data size: 3306 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: 2 (type: int), concat(_col0, _col1, _col4, _col5) (type: string), concat(_col0, _col1) (type: string), concat(_col4, _col5) (type: string), concat(_col0, _col1, _col4) (type: string), concat(_col0, _col4, _col5) (type: string) + expressions: 2 (type: int), concat(_col0, _col1, _col5, _col6) (type: string), concat(_col0, _col1) (type: string), concat(_col5, _col6) (type: string), concat(_col0, _col1, _col5) (type: string), concat(_col0, _col5, _col6) (type: string) outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5 Statistics: Num rows: 16 Data size: 3306 Basic stats: COMPLETE Column stats: NONE UDTF Operator diff --git a/ql/src/test/results/clientpositive/alter_rename_table.q.out b/ql/src/test/results/clientpositive/alter_rename_table.q.out index b9cf49f..970b43b 100644 --- a/ql/src/test/results/clientpositive/alter_rename_table.q.out +++ b/ql/src/test/results/clientpositive/alter_rename_table.q.out @@ -212,3 +212,56 @@ POSTHOOK: Input: target@srcpart@ds=2008-04-09/hr=12 278 val_278 2008-04-08 11 98 val_98 2008-04-08 11 484 val_484 2008-04-08 11 +PREHOOK: query: create table source.src like default.src +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:source +PREHOOK: Output: source@source.src +POSTHOOK: query: create table source.src like default.src +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:source +POSTHOOK: Output: source@source.src +POSTHOOK: Output: source@src +PREHOOK: query: create table source.src1 like default.src +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:source +PREHOOK: Output: source@source.src1 +POSTHOOK: query: create table source.src1 like default.src +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:source +POSTHOOK: Output: source@source.src1 +POSTHOOK: Output: source@src1 +PREHOOK: query: load data local inpath '../../data/files/kv1.txt' overwrite into table source.src +PREHOOK: type: LOAD +#### A masked pattern was here #### +PREHOOK: Output: source@src +POSTHOOK: query: load data local inpath '../../data/files/kv1.txt' overwrite into table source.src +POSTHOOK: type: LOAD +#### A masked pattern was here #### +POSTHOOK: Output: source@src +PREHOOK: query: ALTER TABLE source.src RENAME TO target.src1 +PREHOOK: type: ALTERTABLE_RENAME +PREHOOK: Input: source@src +PREHOOK: Output: source@src +POSTHOOK: query: ALTER TABLE source.src RENAME TO target.src1 +POSTHOOK: type: ALTERTABLE_RENAME +POSTHOOK: Input: source@src +POSTHOOK: Output: source@src +POSTHOOK: Output: target@src1 +PREHOOK: query: select * from target.src1 tablesample (10 rows) +PREHOOK: type: QUERY +PREHOOK: Input: target@src1 +#### A masked pattern was here #### +POSTHOOK: query: select * from target.src1 tablesample (10 rows) +POSTHOOK: type: QUERY +POSTHOOK: Input: target@src1 +#### A masked pattern was here #### +238 val_238 +86 val_86 +311 val_311 +27 val_27 +165 val_165 +409 val_409 +255 val_255 +278 val_278 +98 val_98 +484 val_484 diff --git a/ql/src/test/results/clientpositive/analyze_tbl_part.q.out b/ql/src/test/results/clientpositive/analyze_tbl_part.q.out new file mode 100644 index 0000000..40b926c --- /dev/null +++ b/ql/src/test/results/clientpositive/analyze_tbl_part.q.out @@ -0,0 +1,83 @@ +PREHOOK: query: create table src_stat_part(key string, value string) partitioned by (partitionId int) +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +PREHOOK: Output: default@src_stat_part +POSTHOOK: query: create table src_stat_part(key string, value string) partitioned by (partitionId int) +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@src_stat_part +PREHOOK: query: insert overwrite table src_stat_part partition (partitionId=1) +select * from src1 +PREHOOK: type: QUERY +PREHOOK: Input: default@src1 +PREHOOK: Output: default@src_stat_part@partitionid=1 +POSTHOOK: query: insert overwrite table src_stat_part partition (partitionId=1) +select * from src1 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@src1 +POSTHOOK: Output: default@src_stat_part@partitionid=1 +POSTHOOK: Lineage: src_stat_part PARTITION(partitionid=1).key SIMPLE [(src1)src1.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: src_stat_part PARTITION(partitionid=1).value SIMPLE [(src1)src1.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: insert overwrite table src_stat_part partition (partitionId=2) +select * from src1 +PREHOOK: type: QUERY +PREHOOK: Input: default@src1 +PREHOOK: Output: default@src_stat_part@partitionid=2 +POSTHOOK: query: insert overwrite table src_stat_part partition (partitionId=2) +select * from src1 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@src1 +POSTHOOK: Output: default@src_stat_part@partitionid=2 +POSTHOOK: Lineage: src_stat_part PARTITION(partitionid=2).key SIMPLE [(src1)src1.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: src_stat_part PARTITION(partitionid=2).value SIMPLE [(src1)src1.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: ANALYZE TABLE src_stat_part partition (partitionId) COMPUTE STATISTICS for columns key +PREHOOK: type: QUERY +PREHOOK: Input: default@src_stat_part +PREHOOK: Input: default@src_stat_part@partitionid=1 +PREHOOK: Input: default@src_stat_part@partitionid=2 +#### A masked pattern was here #### +POSTHOOK: query: ANALYZE TABLE src_stat_part partition (partitionId) COMPUTE STATISTICS for columns key +POSTHOOK: type: QUERY +POSTHOOK: Input: default@src_stat_part +POSTHOOK: Input: default@src_stat_part@partitionid=1 +POSTHOOK: Input: default@src_stat_part@partitionid=2 +#### A masked pattern was here #### +PREHOOK: query: describe formatted src_stat_part.key PARTITION(partitionId=1) +PREHOOK: type: DESCTABLE +PREHOOK: Input: default@src_stat_part +POSTHOOK: query: describe formatted src_stat_part.key PARTITION(partitionId=1) +POSTHOOK: type: DESCTABLE +POSTHOOK: Input: default@src_stat_part +# col_name data_type min max num_nulls distinct_count avg_col_len max_col_len num_trues num_falses comment + +key string 0 14 1.72 3 from deserializer +PREHOOK: query: ANALYZE TABLE src_stat_part partition (partitionId) COMPUTE STATISTICS for columns key, value +PREHOOK: type: QUERY +PREHOOK: Input: default@src_stat_part +PREHOOK: Input: default@src_stat_part@partitionid=1 +PREHOOK: Input: default@src_stat_part@partitionid=2 +#### A masked pattern was here #### +POSTHOOK: query: ANALYZE TABLE src_stat_part partition (partitionId) COMPUTE STATISTICS for columns key, value +POSTHOOK: type: QUERY +POSTHOOK: Input: default@src_stat_part +POSTHOOK: Input: default@src_stat_part@partitionid=1 +POSTHOOK: Input: default@src_stat_part@partitionid=2 +#### A masked pattern was here #### +PREHOOK: query: describe formatted src_stat_part.key PARTITION(partitionId=1) +PREHOOK: type: DESCTABLE +PREHOOK: Input: default@src_stat_part +POSTHOOK: query: describe formatted src_stat_part.key PARTITION(partitionId=1) +POSTHOOK: type: DESCTABLE +POSTHOOK: Input: default@src_stat_part +# col_name data_type min max num_nulls distinct_count avg_col_len max_col_len num_trues num_falses comment + +key string 0 14 1.72 3 from deserializer +PREHOOK: query: describe formatted src_stat_part.value PARTITION(partitionId=2) +PREHOOK: type: DESCTABLE +PREHOOK: Input: default@src_stat_part +POSTHOOK: query: describe formatted src_stat_part.value PARTITION(partitionId=2) +POSTHOOK: type: DESCTABLE +POSTHOOK: Input: default@src_stat_part +# col_name data_type min max num_nulls distinct_count avg_col_len max_col_len num_trues num_falses comment + +value string 0 14 4.92 7 from deserializer diff --git a/ql/src/test/results/clientpositive/annotate_stats_join.q.out b/ql/src/test/results/clientpositive/annotate_stats_join.q.out index bfd6b51..c9ad41d 100644 --- a/ql/src/test/results/clientpositive/annotate_stats_join.q.out +++ b/ql/src/test/results/clientpositive/annotate_stats_join.q.out @@ -193,10 +193,10 @@ STAGE PLANS: condition expressions: 0 {VALUE._col0} {KEY.reducesinkkey0} {VALUE._col1} 1 {KEY.reducesinkkey0} {VALUE._col0} - outputColumnNames: _col0, _col1, _col2, _col5, _col6 + outputColumnNames: _col0, _col1, _col2, _col6, _col7 Statistics: Num rows: 41 Data size: 7954 Basic stats: COMPLETE Column stats: COMPLETE Select Operator - expressions: _col0 (type: string), _col1 (type: int), _col2 (type: int), _col5 (type: int), _col6 (type: string) + expressions: _col0 (type: string), _col1 (type: int), _col2 (type: int), _col6 (type: int), _col7 (type: string) outputColumnNames: _col0, _col1, _col2, _col3, _col4 Statistics: Num rows: 41 Data size: 7954 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator @@ -259,13 +259,13 @@ STAGE PLANS: condition expressions: 0 {KEY.reducesinkkey1} {KEY.reducesinkkey0} {VALUE._col0} 1 {KEY.reducesinkkey0} {KEY.reducesinkkey1} - outputColumnNames: _col0, _col1, _col2, _col5, _col6 + outputColumnNames: _col0, _col1, _col2, _col6, _col7 Statistics: Num rows: 6 Data size: 1164 Basic stats: COMPLETE Column stats: COMPLETE Filter Operator - predicate: ((_col1 = _col5) and (_col0 = _col6)) (type: boolean) + predicate: ((_col1 = _col6) and (_col0 = _col7)) (type: boolean) Statistics: Num rows: 1 Data size: 194 Basic stats: COMPLETE Column stats: COMPLETE Select Operator - expressions: _col0 (type: string), _col1 (type: int), _col2 (type: int), _col5 (type: int), _col6 (type: string) + expressions: _col0 (type: string), _col1 (type: int), _col2 (type: int), _col6 (type: int), _col7 (type: string) outputColumnNames: _col0, _col1, _col2, _col3, _col4 Statistics: Num rows: 1 Data size: 194 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator @@ -324,10 +324,10 @@ STAGE PLANS: condition expressions: 0 {KEY.reducesinkkey1} {KEY.reducesinkkey0} {VALUE._col0} 1 {KEY.reducesinkkey0} {KEY.reducesinkkey1} - outputColumnNames: _col0, _col1, _col2, _col5, _col6 + outputColumnNames: _col0, _col1, _col2, _col6, _col7 Statistics: Num rows: 6 Data size: 1164 Basic stats: COMPLETE Column stats: COMPLETE Select Operator - expressions: _col0 (type: string), _col1 (type: int), _col2 (type: int), _col5 (type: int), _col6 (type: string) + expressions: _col0 (type: string), _col1 (type: int), _col2 (type: int), _col6 (type: int), _col7 (type: string) outputColumnNames: _col0, _col1, _col2, _col3, _col4 Statistics: Num rows: 6 Data size: 1164 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator @@ -390,18 +390,18 @@ STAGE PLANS: condition expressions: 0 {KEY.reducesinkkey1} {KEY.reducesinkkey0} {VALUE._col0} 1 {KEY.reducesinkkey0} {KEY.reducesinkkey1} - outputColumnNames: _col0, _col1, _col2, _col5, _col6 - Statistics: Num rows: 1 Data size: 194 Basic stats: COMPLETE Column stats: COMPLETE + outputColumnNames: _col0, _col1, _col2, _col6, _col7 + Statistics: Num rows: 11 Data size: 2134 Basic stats: COMPLETE Column stats: COMPLETE Filter Operator - predicate: (((_col1 = _col5) and (_col0 = _col6)) and (_col6 = _col0)) (type: boolean) - Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: COMPLETE + predicate: (((_col1 = _col6) and (_col0 = _col7)) and (_col7 = _col0)) (type: boolean) + Statistics: Num rows: 1 Data size: 194 Basic stats: COMPLETE Column stats: COMPLETE Select Operator - expressions: _col0 (type: string), _col1 (type: int), _col2 (type: int), _col5 (type: int), _col6 (type: string) + expressions: _col0 (type: string), _col1 (type: int), _col2 (type: int), _col6 (type: int), _col7 (type: string) outputColumnNames: _col0, _col1, _col2, _col3, _col4 - Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: COMPLETE + Statistics: Num rows: 1 Data size: 194 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator compressed: false - Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: COMPLETE + Statistics: Num rows: 1 Data size: 194 Basic stats: COMPLETE Column stats: COMPLETE table: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat @@ -474,10 +474,10 @@ STAGE PLANS: 0 {VALUE._col0} {KEY.reducesinkkey0} {VALUE._col1} 1 {KEY.reducesinkkey0} {VALUE._col0} 2 {VALUE._col0} {KEY.reducesinkkey0} {VALUE._col1} - outputColumnNames: _col0, _col1, _col2, _col5, _col6, _col9, _col10, _col11 + outputColumnNames: _col0, _col1, _col2, _col6, _col7, _col11, _col12, _col13 Statistics: Num rows: 658 Data size: 192794 Basic stats: COMPLETE Column stats: COMPLETE Select Operator - expressions: _col0 (type: string), _col1 (type: int), _col2 (type: int), _col5 (type: int), _col6 (type: string), _col9 (type: string), _col10 (type: int), _col11 (type: int) + expressions: _col0 (type: string), _col1 (type: int), _col2 (type: int), _col6 (type: int), _col7 (type: string), _col11 (type: string), _col12 (type: int), _col13 (type: int) outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7 Statistics: Num rows: 658 Data size: 192794 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator @@ -553,10 +553,10 @@ STAGE PLANS: 0 {VALUE._col0} {KEY.reducesinkkey0} {VALUE._col1} 1 {KEY.reducesinkkey0} {VALUE._col0} 2 {VALUE._col0} {KEY.reducesinkkey0} {VALUE._col1} {VALUE._col2} - outputColumnNames: _col0, _col1, _col2, _col5, _col6, _col9, _col10, _col11, _col12 + outputColumnNames: _col0, _col1, _col2, _col6, _col7, _col11, _col12, _col13, _col14 Statistics: Num rows: 47 Data size: 13912 Basic stats: COMPLETE Column stats: COMPLETE Select Operator - expressions: _col0 (type: string), _col1 (type: int), _col2 (type: int), _col5 (type: int), _col6 (type: string), _col9 (type: string), _col10 (type: int), _col11 (type: bigint), _col12 (type: int) + expressions: _col0 (type: string), _col1 (type: int), _col2 (type: int), _col6 (type: int), _col7 (type: string), _col11 (type: string), _col12 (type: int), _col13 (type: bigint), _col14 (type: int) outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8 Statistics: Num rows: 47 Data size: 13912 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator @@ -633,10 +633,10 @@ STAGE PLANS: 0 {KEY.reducesinkkey1} {KEY.reducesinkkey0} {VALUE._col0} 1 {KEY.reducesinkkey0} {KEY.reducesinkkey1} 2 {KEY.reducesinkkey1} {KEY.reducesinkkey0} {VALUE._col0} {VALUE._col1} - outputColumnNames: _col0, _col1, _col2, _col5, _col6, _col9, _col10, _col11, _col12 + outputColumnNames: _col0, _col1, _col2, _col6, _col7, _col11, _col12, _col13, _col14 Statistics: Num rows: 1 Data size: 296 Basic stats: COMPLETE Column stats: COMPLETE Select Operator - expressions: _col0 (type: string), _col1 (type: int), _col2 (type: int), _col5 (type: int), _col6 (type: string), _col9 (type: string), _col10 (type: int), _col11 (type: bigint), _col12 (type: int) + expressions: _col0 (type: string), _col1 (type: int), _col2 (type: int), _col6 (type: int), _col7 (type: string), _col11 (type: string), _col12 (type: int), _col13 (type: bigint), _col14 (type: int) outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8 Statistics: Num rows: 1 Data size: 296 Basic stats: COMPLETE Column stats: COMPLETE File Output Operator diff --git a/ql/src/test/results/clientpositive/annotate_stats_part.q.out b/ql/src/test/results/clientpositive/annotate_stats_part.q.out index 10993c3..6262d37 100644 --- a/ql/src/test/results/clientpositive/annotate_stats_part.q.out +++ b/ql/src/test/results/clientpositive/annotate_stats_part.q.out @@ -366,14 +366,14 @@ STAGE PLANS: Map Operator Tree: TableScan alias: loc_orc - Statistics: Num rows: 8 Data size: 724 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 8 Data size: 724 Basic stats: COMPLETE Column stats: PARTIAL Select Operator expressions: state (type: string) outputColumnNames: _col0 - Statistics: Num rows: 8 Data size: 688 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 8 Data size: 688 Basic stats: COMPLETE Column stats: PARTIAL File Output Operator compressed: false - Statistics: Num rows: 8 Data size: 688 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 8 Data size: 688 Basic stats: COMPLETE Column stats: PARTIAL table: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat @@ -403,14 +403,14 @@ STAGE PLANS: Map Operator Tree: TableScan alias: loc_orc - Statistics: Num rows: 8 Data size: 724 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 8 Data size: 724 Basic stats: COMPLETE Column stats: PARTIAL Select Operator expressions: state (type: string), locid (type: int) outputColumnNames: _col0, _col1 - Statistics: Num rows: 8 Data size: 720 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 8 Data size: 720 Basic stats: COMPLETE Column stats: PARTIAL File Output Operator compressed: false - Statistics: Num rows: 8 Data size: 720 Basic stats: COMPLETE Column stats: COMPLETE + Statistics: Num rows: 8 Data size: 720 Basic stats: COMPLETE Column stats: PARTIAL table: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat diff --git a/ql/src/test/results/clientpositive/authorization_create_func1.q.out b/ql/src/test/results/clientpositive/authorization_create_func1.q.out index 597b187..120dacc 100644 --- a/ql/src/test/results/clientpositive/authorization_create_func1.q.out +++ b/ql/src/test/results/clientpositive/authorization_create_func1.q.out @@ -13,11 +13,11 @@ POSTHOOK: Output: temp_fn PREHOOK: query: create function perm_fn as 'org.apache.hadoop.hive.ql.udf.UDFAscii' PREHOOK: type: CREATEFUNCTION PREHOOK: Output: database:default -PREHOOK: Output: perm_fn +PREHOOK: Output: default.perm_fn POSTHOOK: query: create function perm_fn as 'org.apache.hadoop.hive.ql.udf.UDFAscii' POSTHOOK: type: CREATEFUNCTION POSTHOOK: Output: database:default -POSTHOOK: Output: perm_fn +POSTHOOK: Output: default.perm_fn PREHOOK: query: drop temporary function temp_fn PREHOOK: type: DROPFUNCTION PREHOOK: Output: temp_fn @@ -27,8 +27,8 @@ POSTHOOK: Output: temp_fn PREHOOK: query: drop function perm_fn PREHOOK: type: DROPFUNCTION PREHOOK: Output: database:default -PREHOOK: Output: perm_fn +PREHOOK: Output: default.perm_fn POSTHOOK: query: drop function perm_fn POSTHOOK: type: DROPFUNCTION POSTHOOK: Output: database:default -POSTHOOK: Output: perm_fn +POSTHOOK: Output: default.perm_fn diff --git a/ql/src/test/results/clientpositive/auto_join1.q.out b/ql/src/test/results/clientpositive/auto_join1.q.out index c0bdcdd..b91dddc 100644 --- a/ql/src/test/results/clientpositive/auto_join1.q.out +++ b/ql/src/test/results/clientpositive/auto_join1.q.out @@ -61,10 +61,10 @@ STAGE PLANS: keys: 0 key (type: string) 1 key (type: string) - outputColumnNames: _col0, _col5 + outputColumnNames: _col0, _col6 Statistics: Num rows: 31 Data size: 3196 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: UDFToInteger(_col0) (type: int), _col5 (type: string) + expressions: UDFToInteger(_col0) (type: int), _col6 (type: string) outputColumnNames: _col0, _col1 Statistics: Num rows: 31 Data size: 3196 Basic stats: COMPLETE Column stats: NONE File Output Operator diff --git a/ql/src/test/results/clientpositive/auto_join14.q.out b/ql/src/test/results/clientpositive/auto_join14.q.out index 5652060..40ca41d 100644 --- a/ql/src/test/results/clientpositive/auto_join14.q.out +++ b/ql/src/test/results/clientpositive/auto_join14.q.out @@ -65,10 +65,10 @@ STAGE PLANS: keys: 0 key (type: string) 1 key (type: string) - outputColumnNames: _col0, _col5 + outputColumnNames: _col0, _col6 Statistics: Num rows: 11 Data size: 1102 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: UDFToInteger(_col0) (type: int), _col5 (type: string) + expressions: UDFToInteger(_col0) (type: int), _col6 (type: string) outputColumnNames: _col0, _col1 Statistics: Num rows: 11 Data size: 1102 Basic stats: COMPLETE Column stats: NONE File Output Operator diff --git a/ql/src/test/results/clientpositive/auto_join15.q.out b/ql/src/test/results/clientpositive/auto_join15.q.out index f1f3f64..e5ed83e 100644 --- a/ql/src/test/results/clientpositive/auto_join15.q.out +++ b/ql/src/test/results/clientpositive/auto_join15.q.out @@ -61,10 +61,10 @@ STAGE PLANS: keys: 0 key (type: string) 1 key (type: string) - outputColumnNames: _col0, _col1, _col4, _col5 + outputColumnNames: _col0, _col1, _col5, _col6 Statistics: Num rows: 16 Data size: 3306 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: _col0 (type: string), _col1 (type: string), _col4 (type: string), _col5 (type: string) + expressions: _col0 (type: string), _col1 (type: string), _col5 (type: string), _col6 (type: string) outputColumnNames: _col0, _col1, _col2, _col3 Statistics: Num rows: 16 Data size: 3306 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator diff --git a/ql/src/test/results/clientpositive/auto_join17.q.out b/ql/src/test/results/clientpositive/auto_join17.q.out index 0a68cb1..40d1b76 100644 --- a/ql/src/test/results/clientpositive/auto_join17.q.out +++ b/ql/src/test/results/clientpositive/auto_join17.q.out @@ -61,10 +61,10 @@ STAGE PLANS: keys: 0 key (type: string) 1 key (type: string) - outputColumnNames: _col0, _col1, _col4, _col5 + outputColumnNames: _col0, _col1, _col5, _col6 Statistics: Num rows: 16 Data size: 3306 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: UDFToInteger(_col0) (type: int), _col1 (type: string), UDFToInteger(_col4) (type: int), _col5 (type: string) + expressions: UDFToInteger(_col0) (type: int), _col1 (type: string), UDFToInteger(_col5) (type: int), _col6 (type: string) outputColumnNames: _col0, _col1, _col2, _col3 Statistics: Num rows: 16 Data size: 3306 Basic stats: COMPLETE Column stats: NONE File Output Operator diff --git a/ql/src/test/results/clientpositive/auto_join19.q.out b/ql/src/test/results/clientpositive/auto_join19.q.out index a7f0f6b..11d6675 100644 --- a/ql/src/test/results/clientpositive/auto_join19.q.out +++ b/ql/src/test/results/clientpositive/auto_join19.q.out @@ -63,10 +63,10 @@ STAGE PLANS: keys: 0 key (type: string) 1 key (type: string) - outputColumnNames: _col0, _col7 + outputColumnNames: _col0, _col8 Statistics: Num rows: 127 Data size: 12786 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: UDFToInteger(_col0) (type: int), _col7 (type: string) + expressions: UDFToInteger(_col0) (type: int), _col8 (type: string) outputColumnNames: _col0, _col1 Statistics: Num rows: 127 Data size: 12786 Basic stats: COMPLETE Column stats: NONE File Output Operator diff --git a/ql/src/test/results/clientpositive/auto_join2.q.out b/ql/src/test/results/clientpositive/auto_join2.q.out index 839f4a0..ea3315b 100644 --- a/ql/src/test/results/clientpositive/auto_join2.q.out +++ b/ql/src/test/results/clientpositive/auto_join2.q.out @@ -57,7 +57,7 @@ STAGE PLANS: 0 {_col0} 1 {value} keys: - 0 (_col0 + _col4) (type: double) + 0 (_col0 + _col5) (type: double) 1 UDFToDouble(key) (type: double) Stage: Stage-6 @@ -78,10 +78,10 @@ STAGE PLANS: keys: 0 key (type: string) 1 key (type: string) - outputColumnNames: _col0, _col4 + outputColumnNames: _col0, _col5 Statistics: Num rows: 31 Data size: 3196 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: (_col0 + _col4) is not null (type: boolean) + predicate: (_col0 + _col5) is not null (type: boolean) Statistics: Num rows: 16 Data size: 1649 Basic stats: COMPLETE Column stats: NONE Map Join Operator condition map: @@ -90,12 +90,12 @@ STAGE PLANS: 0 {_col0} 1 {value} keys: - 0 (_col0 + _col4) (type: double) + 0 (_col0 + _col5) (type: double) 1 UDFToDouble(key) (type: double) - outputColumnNames: _col0, _col9 + outputColumnNames: _col0, _col11 Statistics: Num rows: 17 Data size: 1813 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: UDFToInteger(_col0) (type: int), _col9 (type: string) + expressions: UDFToInteger(_col0) (type: int), _col11 (type: string) outputColumnNames: _col0, _col1 Statistics: Num rows: 17 Data size: 1813 Basic stats: COMPLETE Column stats: NONE File Output Operator diff --git a/ql/src/test/results/clientpositive/auto_join20.q.out b/ql/src/test/results/clientpositive/auto_join20.q.out index 6cdfb96..0ef4ef0 100644 --- a/ql/src/test/results/clientpositive/auto_join20.q.out +++ b/ql/src/test/results/clientpositive/auto_join20.q.out @@ -94,10 +94,10 @@ STAGE PLANS: 0 key (type: string) 1 key (type: string) 2 key (type: string) - outputColumnNames: _col0, _col1, _col4, _col5, _col8, _col9 + outputColumnNames: _col0, _col1, _col5, _col6, _col10, _col11 Statistics: Num rows: 63 Data size: 12786 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: _col0 (type: string), _col1 (type: string), _col4 (type: string), _col5 (type: string), _col8 (type: string), _col9 (type: string) + expressions: _col0 (type: string), _col1 (type: string), _col5 (type: string), _col6 (type: string), _col10 (type: string), _col11 (type: string) outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5 Statistics: Num rows: 63 Data size: 12786 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator @@ -270,10 +270,10 @@ STAGE PLANS: 0 key (type: string) 1 key (type: string) 2 key (type: string) - outputColumnNames: _col0, _col1, _col4, _col5, _col8, _col9 + outputColumnNames: _col0, _col1, _col5, _col6, _col10, _col11 Statistics: Num rows: 63 Data size: 12786 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: _col0 (type: string), _col1 (type: string), _col4 (type: string), _col5 (type: string), _col8 (type: string), _col9 (type: string) + expressions: _col0 (type: string), _col1 (type: string), _col5 (type: string), _col6 (type: string), _col10 (type: string), _col11 (type: string) outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5 Statistics: Num rows: 63 Data size: 12786 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator diff --git a/ql/src/test/results/clientpositive/auto_join21.q.out b/ql/src/test/results/clientpositive/auto_join21.q.out index 5d6c03a..c28c4b7 100644 --- a/ql/src/test/results/clientpositive/auto_join21.q.out +++ b/ql/src/test/results/clientpositive/auto_join21.q.out @@ -80,10 +80,10 @@ STAGE PLANS: 0 key (type: string) 1 key (type: string) 2 key (type: string) - outputColumnNames: _col0, _col1, _col4, _col5, _col8, _col9 + outputColumnNames: _col0, _col1, _col5, _col6, _col10, _col11 Statistics: Num rows: 63 Data size: 12786 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: _col0 (type: string), _col1 (type: string), _col4 (type: string), _col5 (type: string), _col8 (type: string), _col9 (type: string) + expressions: _col0 (type: string), _col1 (type: string), _col5 (type: string), _col6 (type: string), _col10 (type: string), _col11 (type: string) outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5 Statistics: Num rows: 63 Data size: 12786 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator diff --git a/ql/src/test/results/clientpositive/auto_join22.q.out b/ql/src/test/results/clientpositive/auto_join22.q.out index 9a85118..416cfd9 100644 --- a/ql/src/test/results/clientpositive/auto_join22.q.out +++ b/ql/src/test/results/clientpositive/auto_join22.q.out @@ -82,10 +82,10 @@ STAGE PLANS: keys: 0 key (type: string) 1 _col2 (type: string) - outputColumnNames: _col7 + outputColumnNames: _col8 Statistics: Num rows: 34 Data size: 3515 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: _col7 (type: string) + expressions: _col8 (type: string) outputColumnNames: _col3 Statistics: Num rows: 34 Data size: 3515 Basic stats: COMPLETE Column stats: NONE Group By Operator diff --git a/ql/src/test/results/clientpositive/auto_join23.q.out b/ql/src/test/results/clientpositive/auto_join23.q.out index a29d3bf..3e1eaae 100644 --- a/ql/src/test/results/clientpositive/auto_join23.q.out +++ b/ql/src/test/results/clientpositive/auto_join23.q.out @@ -51,10 +51,10 @@ STAGE PLANS: keys: 0 1 - outputColumnNames: _col0, _col1, _col4, _col5 + outputColumnNames: _col0, _col1, _col5, _col6 Statistics: Num rows: 9 Data size: 1983 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: _col0 (type: string), _col1 (type: string), _col4 (type: string), _col5 (type: string) + expressions: _col0 (type: string), _col1 (type: string), _col5 (type: string), _col6 (type: string) outputColumnNames: _col0, _col1, _col2, _col3 Statistics: Num rows: 9 Data size: 1983 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator diff --git a/ql/src/test/results/clientpositive/auto_join28.q.out b/ql/src/test/results/clientpositive/auto_join28.q.out index 6a439ec..3591b8d 100644 --- a/ql/src/test/results/clientpositive/auto_join28.q.out +++ b/ql/src/test/results/clientpositive/auto_join28.q.out @@ -80,10 +80,10 @@ STAGE PLANS: 0 key (type: string) 1 key (type: string) 2 key (type: string) - outputColumnNames: _col0, _col1, _col4, _col5, _col8, _col9 + outputColumnNames: _col0, _col1, _col5, _col6, _col10, _col11 Statistics: Num rows: 63 Data size: 12786 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: _col0 (type: string), _col1 (type: string), _col4 (type: string), _col5 (type: string), _col8 (type: string), _col9 (type: string) + expressions: _col0 (type: string), _col1 (type: string), _col5 (type: string), _col6 (type: string), _col10 (type: string), _col11 (type: string) outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5 Statistics: Num rows: 63 Data size: 12786 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator @@ -196,10 +196,10 @@ STAGE PLANS: 0 key (type: string) 1 key (type: string) 2 key (type: string) - outputColumnNames: _col0, _col1, _col4, _col5, _col8, _col9 + outputColumnNames: _col0, _col1, _col5, _col6, _col10, _col11 Statistics: Num rows: 63 Data size: 12786 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: _col0 (type: string), _col1 (type: string), _col4 (type: string), _col5 (type: string), _col8 (type: string), _col9 (type: string) + expressions: _col0 (type: string), _col1 (type: string), _col5 (type: string), _col6 (type: string), _col10 (type: string), _col11 (type: string) outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5 Statistics: Num rows: 63 Data size: 12786 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator @@ -312,10 +312,10 @@ STAGE PLANS: 0 key (type: string) 1 key (type: string) 2 key (type: string) - outputColumnNames: _col0, _col1, _col4, _col5, _col8, _col9 + outputColumnNames: _col0, _col1, _col5, _col6, _col10, _col11 Statistics: Num rows: 63 Data size: 12786 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: _col0 (type: string), _col1 (type: string), _col4 (type: string), _col5 (type: string), _col8 (type: string), _col9 (type: string) + expressions: _col0 (type: string), _col1 (type: string), _col5 (type: string), _col6 (type: string), _col10 (type: string), _col11 (type: string) outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5 Statistics: Num rows: 63 Data size: 12786 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator @@ -425,10 +425,10 @@ STAGE PLANS: 0 key (type: string) 1 key (type: string) 2 key (type: string) - outputColumnNames: _col0, _col1, _col4, _col5, _col8, _col9 + outputColumnNames: _col0, _col1, _col5, _col6, _col10, _col11 Statistics: Num rows: 63 Data size: 12786 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: _col0 (type: string), _col1 (type: string), _col4 (type: string), _col5 (type: string), _col8 (type: string), _col9 (type: string) + expressions: _col0 (type: string), _col1 (type: string), _col5 (type: string), _col6 (type: string), _col10 (type: string), _col11 (type: string) outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5 Statistics: Num rows: 63 Data size: 12786 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator diff --git a/ql/src/test/results/clientpositive/auto_join29.q.out b/ql/src/test/results/clientpositive/auto_join29.q.out index e2e0dcd..94124d6 100644 --- a/ql/src/test/results/clientpositive/auto_join29.q.out +++ b/ql/src/test/results/clientpositive/auto_join29.q.out @@ -80,10 +80,10 @@ STAGE PLANS: 0 key (type: string) 1 key (type: string) 2 key (type: string) - outputColumnNames: _col0, _col1, _col4, _col5, _col8, _col9 + outputColumnNames: _col0, _col1, _col5, _col6, _col10, _col11 Statistics: Num rows: 63 Data size: 12786 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: _col0 (type: string), _col1 (type: string), _col4 (type: string), _col5 (type: string), _col8 (type: string), _col9 (type: string) + expressions: _col0 (type: string), _col1 (type: string), _col5 (type: string), _col6 (type: string), _col10 (type: string), _col11 (type: string) outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5 Statistics: Num rows: 63 Data size: 12786 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator @@ -704,10 +704,10 @@ STAGE PLANS: 0 key (type: string) 1 key (type: string) 2 key (type: string) - outputColumnNames: _col0, _col1, _col4, _col5, _col8, _col9 + outputColumnNames: _col0, _col1, _col5, _col6, _col10, _col11 Statistics: Num rows: 63 Data size: 12786 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: _col0 (type: string), _col1 (type: string), _col4 (type: string), _col5 (type: string), _col8 (type: string), _col9 (type: string) + expressions: _col0 (type: string), _col1 (type: string), _col5 (type: string), _col6 (type: string), _col10 (type: string), _col11 (type: string) outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5 Statistics: Num rows: 63 Data size: 12786 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator @@ -1328,10 +1328,10 @@ STAGE PLANS: 0 key (type: string) 1 key (type: string) 2 key (type: string) - outputColumnNames: _col0, _col1, _col4, _col5, _col8, _col9 + outputColumnNames: _col0, _col1, _col5, _col6, _col10, _col11 Statistics: Num rows: 63 Data size: 12786 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: _col0 (type: string), _col1 (type: string), _col4 (type: string), _col5 (type: string), _col8 (type: string), _col9 (type: string) + expressions: _col0 (type: string), _col1 (type: string), _col5 (type: string), _col6 (type: string), _col10 (type: string), _col11 (type: string) outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5 Statistics: Num rows: 63 Data size: 12786 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator @@ -1961,10 +1961,10 @@ STAGE PLANS: 0 key (type: string) 1 key (type: string) 2 key (type: string) - outputColumnNames: _col0, _col1, _col4, _col5, _col8, _col9 + outputColumnNames: _col0, _col1, _col5, _col6, _col10, _col11 Statistics: Num rows: 63 Data size: 12786 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: _col0 (type: string), _col1 (type: string), _col4 (type: string), _col5 (type: string), _col8 (type: string), _col9 (type: string) + expressions: _col0 (type: string), _col1 (type: string), _col5 (type: string), _col6 (type: string), _col10 (type: string), _col11 (type: string) outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5 Statistics: Num rows: 63 Data size: 12786 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator @@ -2588,10 +2588,10 @@ STAGE PLANS: 0 key (type: string) 1 key (type: string) 2 key (type: string) - outputColumnNames: _col0, _col1, _col4, _col5, _col8, _col9 + outputColumnNames: _col0, _col1, _col5, _col6, _col10, _col11 Statistics: Num rows: 6 Data size: 1322 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: _col0 (type: string), _col1 (type: string), _col4 (type: string), _col5 (type: string), _col8 (type: string), _col9 (type: string) + expressions: _col0 (type: string), _col1 (type: string), _col5 (type: string), _col6 (type: string), _col10 (type: string), _col11 (type: string) outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5 Statistics: Num rows: 6 Data size: 1322 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator @@ -2712,10 +2712,10 @@ STAGE PLANS: 0 key (type: string) 1 key (type: string) 2 key (type: string) - outputColumnNames: _col0, _col1, _col4, _col5, _col8, _col9 + outputColumnNames: _col0, _col1, _col5, _col6, _col10, _col11 Statistics: Num rows: 63 Data size: 12786 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: _col0 (type: string), _col1 (type: string), _col4 (type: string), _col5 (type: string), _col8 (type: string), _col9 (type: string) + expressions: _col0 (type: string), _col1 (type: string), _col5 (type: string), _col6 (type: string), _col10 (type: string), _col11 (type: string) outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5 Statistics: Num rows: 63 Data size: 12786 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator @@ -3336,10 +3336,10 @@ STAGE PLANS: 0 key (type: string) 1 key (type: string) 2 key (type: string) - outputColumnNames: _col0, _col1, _col4, _col5, _col8, _col9 + outputColumnNames: _col0, _col1, _col5, _col6, _col10, _col11 Statistics: Num rows: 63 Data size: 12786 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: _col0 (type: string), _col1 (type: string), _col4 (type: string), _col5 (type: string), _col8 (type: string), _col9 (type: string) + expressions: _col0 (type: string), _col1 (type: string), _col5 (type: string), _col6 (type: string), _col10 (type: string), _col11 (type: string) outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5 Statistics: Num rows: 63 Data size: 12786 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator @@ -3463,10 +3463,10 @@ STAGE PLANS: 0 key (type: string) 1 key (type: string) 2 key (type: string) - outputColumnNames: _col0, _col1, _col4, _col5, _col8, _col9 + outputColumnNames: _col0, _col1, _col5, _col6, _col10, _col11 Statistics: Num rows: 19 Data size: 3966 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: _col0 (type: string), _col1 (type: string), _col4 (type: string), _col5 (type: string), _col8 (type: string), _col9 (type: string) + expressions: _col0 (type: string), _col1 (type: string), _col5 (type: string), _col6 (type: string), _col10 (type: string), _col11 (type: string) outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5 Statistics: Num rows: 19 Data size: 3966 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator @@ -3600,10 +3600,10 @@ STAGE PLANS: 0 key (type: string) 1 key (type: string) 2 key (type: string) - outputColumnNames: _col0, _col1, _col4, _col5, _col8, _col9 + outputColumnNames: _col0, _col1, _col5, _col6, _col10, _col11 Statistics: Num rows: 2 Data size: 440 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: _col0 (type: string), _col1 (type: string), _col4 (type: string), _col5 (type: string), _col8 (type: string), _col9 (type: string) + expressions: _col0 (type: string), _col1 (type: string), _col5 (type: string), _col6 (type: string), _col10 (type: string), _col11 (type: string) outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5 Statistics: Num rows: 2 Data size: 440 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator diff --git a/ql/src/test/results/clientpositive/auto_join3.q.out b/ql/src/test/results/clientpositive/auto_join3.q.out index 78a7908..5e17506 100644 --- a/ql/src/test/results/clientpositive/auto_join3.q.out +++ b/ql/src/test/results/clientpositive/auto_join3.q.out @@ -85,10 +85,10 @@ STAGE PLANS: 0 key (type: string) 1 key (type: string) 2 key (type: string) - outputColumnNames: _col0, _col9 + outputColumnNames: _col0, _col11 Statistics: Num rows: 63 Data size: 6393 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: UDFToInteger(_col0) (type: int), _col9 (type: string) + expressions: UDFToInteger(_col0) (type: int), _col11 (type: string) outputColumnNames: _col0, _col1 Statistics: Num rows: 63 Data size: 6393 Basic stats: COMPLETE Column stats: NONE File Output Operator diff --git a/ql/src/test/results/clientpositive/auto_join32.q.out b/ql/src/test/results/clientpositive/auto_join32.q.out index 7fca969..bc2d56c 100644 --- a/ql/src/test/results/clientpositive/auto_join32.q.out +++ b/ql/src/test/results/clientpositive/auto_join32.q.out @@ -72,15 +72,15 @@ STAGE PLANS: keys: 0 name (type: string) 1 name (type: string) - outputColumnNames: _col0, _col7 + outputColumnNames: _col0, _col8 Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE Select Operator - expressions: _col0 (type: string), _col7 (type: string) - outputColumnNames: _col0, _col7 + expressions: _col0 (type: string), _col8 (type: string) + outputColumnNames: _col0, _col8 Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE Group By Operator - aggregations: count(DISTINCT _col7) - keys: _col0 (type: string), _col7 (type: string) + aggregations: count(DISTINCT _col8) + keys: _col0 (type: string), _col8 (type: string) mode: hash outputColumnNames: _col0, _col1, _col2 Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE @@ -183,13 +183,13 @@ STAGE PLANS: keys: 0 name (type: string) 1 name (type: string) - outputColumnNames: _col0, _col7 + outputColumnNames: _col0, _col8 Select Operator - expressions: _col0 (type: string), _col7 (type: string) - outputColumnNames: _col0, _col7 + expressions: _col0 (type: string), _col8 (type: string) + outputColumnNames: _col0, _col8 Group By Operator - aggregations: count(DISTINCT _col7) - keys: _col0 (type: string), _col7 (type: string) + aggregations: count(DISTINCT _col8) + keys: _col0 (type: string), _col8 (type: string) mode: hash outputColumnNames: _col0, _col1, _col2 Reduce Output Operator @@ -299,13 +299,13 @@ STAGE PLANS: keys: 0 name (type: string) 1 name (type: string) - outputColumnNames: _col0, _col7 + outputColumnNames: _col0, _col8 Select Operator - expressions: _col0 (type: string), _col7 (type: string) - outputColumnNames: _col0, _col7 + expressions: _col0 (type: string), _col8 (type: string) + outputColumnNames: _col0, _col8 Group By Operator - aggregations: count(DISTINCT _col7) - keys: _col0 (type: string), _col7 (type: string) + aggregations: count(DISTINCT _col8) + keys: _col0 (type: string), _col8 (type: string) mode: hash outputColumnNames: _col0, _col1, _col2 Reduce Output Operator diff --git a/ql/src/test/results/clientpositive/auto_join9.q.out b/ql/src/test/results/clientpositive/auto_join9.q.out index 609277a..bed42c1 100644 --- a/ql/src/test/results/clientpositive/auto_join9.q.out +++ b/ql/src/test/results/clientpositive/auto_join9.q.out @@ -61,10 +61,10 @@ STAGE PLANS: keys: 0 key (type: string) 1 key (type: string) - outputColumnNames: _col0, _col7 + outputColumnNames: _col0, _col8 Statistics: Num rows: 31 Data size: 3196 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: UDFToInteger(_col0) (type: int), _col7 (type: string) + expressions: UDFToInteger(_col0) (type: int), _col8 (type: string) outputColumnNames: _col0, _col1 Statistics: Num rows: 31 Data size: 3196 Basic stats: COMPLETE Column stats: NONE File Output Operator diff --git a/ql/src/test/results/clientpositive/auto_join_reordering_values.q.out b/ql/src/test/results/clientpositive/auto_join_reordering_values.q.out index 03cdcb5..aed67b6 100644 --- a/ql/src/test/results/clientpositive/auto_join_reordering_values.q.out +++ b/ql/src/test/results/clientpositive/auto_join_reordering_values.q.out @@ -266,7 +266,7 @@ STAGE PLANS: condition expressions: 0 {VALUE._col0} {VALUE._col2} {VALUE._col3} 1 {KEY.reducesinkkey0} - outputColumnNames: _col0, _col3, _col4, _col8 + outputColumnNames: _col0, _col3, _col4, _col9 Statistics: Num rows: 1 Data size: 39 Basic stats: COMPLETE Column stats: NONE File Output Operator compressed: false @@ -277,7 +277,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.SequenceFileInputFormat output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat properties: - columns _col0,_col3,_col4,_col8 + columns _col0,_col3,_col4,_col9 columns.types int,int,int,string escape.delim \ serialization.lib org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe @@ -297,7 +297,7 @@ STAGE PLANS: Map-reduce partition columns: _col0 (type: int) Statistics: Num rows: 1 Data size: 39 Basic stats: COMPLETE Column stats: NONE tag: 0 - value expressions: _col3 (type: int), _col4 (type: int), _col8 (type: string) + value expressions: _col3 (type: int), _col4 (type: int), _col9 (type: string) auto parallelism: false TableScan alias: deal @@ -323,7 +323,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.SequenceFileInputFormat output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat properties: - columns _col0,_col3,_col4,_col8 + columns _col0,_col3,_col4,_col9 columns.types int,int,int,string escape.delim \ serialization.lib org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe @@ -332,7 +332,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.SequenceFileInputFormat output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat properties: - columns _col0,_col3,_col4,_col8 + columns _col0,_col3,_col4,_col9 columns.types int,int,int,string escape.delim \ serialization.lib org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe @@ -390,9 +390,9 @@ STAGE PLANS: condition map: Inner Join 0 to 1 condition expressions: - 0 {VALUE._col2} {VALUE._col3} {VALUE._col7} + 0 {VALUE._col2} {VALUE._col3} {VALUE._col8} 1 {KEY.reducesinkkey0} - outputColumnNames: _col3, _col4, _col8, _col14 + outputColumnNames: _col3, _col4, _col9, _col16 Statistics: Num rows: 1 Data size: 42 Basic stats: COMPLETE Column stats: NONE File Output Operator compressed: false @@ -403,7 +403,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.SequenceFileInputFormat output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat properties: - columns _col3,_col4,_col8,_col14 + columns _col3,_col4,_col9,_col16 columns.types int,int,string,int escape.delim \ serialization.lib org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe @@ -423,7 +423,7 @@ STAGE PLANS: Map-reduce partition columns: _col3 (type: int) Statistics: Num rows: 1 Data size: 42 Basic stats: COMPLETE Column stats: NONE tag: 0 - value expressions: _col4 (type: int), _col8 (type: string), _col14 (type: int) + value expressions: _col4 (type: int), _col9 (type: string), _col16 (type: int) auto parallelism: false TableScan alias: order_city @@ -449,7 +449,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.SequenceFileInputFormat output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat properties: - columns _col3,_col4,_col8,_col14 + columns _col3,_col4,_col9,_col16 columns.types int,int,string,int escape.delim \ serialization.lib org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe @@ -458,7 +458,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.SequenceFileInputFormat output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat properties: - columns _col3,_col4,_col8,_col14 + columns _col3,_col4,_col9,_col16 columns.types int,int,string,int escape.delim \ serialization.lib org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe @@ -516,9 +516,9 @@ STAGE PLANS: condition map: Inner Join 0 to 1 condition expressions: - 0 {VALUE._col3} {VALUE._col7} {VALUE._col13} + 0 {VALUE._col3} {VALUE._col8} {VALUE._col15} 1 - outputColumnNames: _col4, _col8, _col14 + outputColumnNames: _col4, _col9, _col16 Statistics: Num rows: 1 Data size: 46 Basic stats: COMPLETE Column stats: NONE File Output Operator compressed: false @@ -529,7 +529,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.SequenceFileInputFormat output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat properties: - columns _col4,_col8,_col14 + columns _col4,_col9,_col16 columns.types int,string,int escape.delim \ serialization.lib org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe @@ -549,7 +549,7 @@ STAGE PLANS: Map-reduce partition columns: _col4 (type: int) Statistics: Num rows: 1 Data size: 46 Basic stats: COMPLETE Column stats: NONE tag: 0 - value expressions: _col8 (type: string), _col14 (type: int) + value expressions: _col9 (type: string), _col16 (type: int) auto parallelism: false TableScan alias: user @@ -575,7 +575,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.SequenceFileInputFormat output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat properties: - columns _col4,_col8,_col14 + columns _col4,_col9,_col16 columns.types int,string,int escape.delim \ serialization.lib org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe @@ -584,7 +584,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.SequenceFileInputFormat output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat properties: - columns _col4,_col8,_col14 + columns _col4,_col9,_col16 columns.types int,string,int escape.delim \ serialization.lib org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe @@ -642,12 +642,12 @@ STAGE PLANS: condition map: Inner Join 0 to 1 condition expressions: - 0 {VALUE._col7} {VALUE._col13} + 0 {VALUE._col8} {VALUE._col15} 1 - outputColumnNames: _col8, _col14 + outputColumnNames: _col9, _col16 Statistics: Num rows: 55 Data size: 158 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: _col8 (type: string), _col14 (type: int) + expressions: _col9 (type: string), _col16 (type: int) outputColumnNames: _col0, _col1 Statistics: Num rows: 55 Data size: 158 Basic stats: COMPLETE Column stats: NONE Limit diff --git a/ql/src/test/results/clientpositive/auto_smb_mapjoin_14.q.out b/ql/src/test/results/clientpositive/auto_smb_mapjoin_14.q.out index 7690041..011f7bf 100644 --- a/ql/src/test/results/clientpositive/auto_smb_mapjoin_14.q.out +++ b/ql/src/test/results/clientpositive/auto_smb_mapjoin_14.q.out @@ -1426,9 +1426,9 @@ STAGE PLANS: keys: 0 key (type: int) 1 key (type: int) - outputColumnNames: _col0, _col1, _col5 + outputColumnNames: _col0, _col1, _col6 Select Operator - expressions: _col0 (type: int), _col1 (type: string), _col5 (type: string) + expressions: _col0 (type: int), _col1 (type: string), _col6 (type: string) outputColumnNames: _col0, _col1, _col2 Select Operator expressions: _col0 (type: int), _col1 (type: string) diff --git a/ql/src/test/results/clientpositive/auto_sortmerge_join_13.q.out b/ql/src/test/results/clientpositive/auto_sortmerge_join_13.q.out index 37d31c3..d49549d 100644 --- a/ql/src/test/results/clientpositive/auto_sortmerge_join_13.q.out +++ b/ql/src/test/results/clientpositive/auto_sortmerge_join_13.q.out @@ -98,9 +98,9 @@ STAGE PLANS: keys: 0 key (type: int) 1 key (type: int) - outputColumnNames: _col0, _col1, _col4, _col5 + outputColumnNames: _col0, _col1, _col5, _col6 Select Operator - expressions: _col0 (type: int), _col1 (type: string), _col4 (type: int), _col5 (type: string) + expressions: _col0 (type: int), _col1 (type: string), _col5 (type: int), _col6 (type: string) outputColumnNames: _col0, _col1, _col2, _col3 Select Operator expressions: _col0 (type: int), _col2 (type: int) @@ -279,9 +279,9 @@ STAGE PLANS: keys: 0 key (type: int) 1 key (type: int) - outputColumnNames: _col0, _col1, _col4, _col5 + outputColumnNames: _col0, _col1, _col5, _col6 Select Operator - expressions: _col0 (type: int), _col1 (type: string), _col4 (type: int), _col5 (type: string) + expressions: _col0 (type: int), _col1 (type: string), _col5 (type: int), _col6 (type: string) outputColumnNames: _col0, _col1, _col2, _col3 Select Operator expressions: _col0 (type: int), _col2 (type: int) @@ -460,9 +460,9 @@ STAGE PLANS: keys: 0 key (type: int) 1 key (type: int) - outputColumnNames: _col0, _col1, _col4, _col5 + outputColumnNames: _col0, _col1, _col5, _col6 Select Operator - expressions: _col0 (type: int), _col1 (type: string), _col4 (type: int), _col5 (type: string) + expressions: _col0 (type: int), _col1 (type: string), _col5 (type: int), _col6 (type: string) outputColumnNames: _col0, _col1, _col2, _col3 Select Operator expressions: _col0 (type: int), _col2 (type: int) diff --git a/ql/src/test/results/clientpositive/avro_decimal.q.out b/ql/src/test/results/clientpositive/avro_decimal.q.out index 88268ce..921a418 100644 --- a/ql/src/test/results/clientpositive/avro_decimal.q.out +++ b/ql/src/test/results/clientpositive/avro_decimal.q.out @@ -106,9 +106,9 @@ Mary 4.33 Cluck 5.96 Tom -12.25 Mary 33.33 -Tom 19 -Beck 0 -Beck 79.9 +Tom 19.00 +Beck 0.00 +Beck 79.90 PREHOOK: query: DROP TABLE IF EXISTS avro_dec1 PREHOOK: type: DROPTABLE POSTHOOK: query: DROP TABLE IF EXISTS avro_dec1 @@ -175,10 +175,10 @@ POSTHOOK: Input: default@avro_dec1 77.3 55.7 4.3 -6 +6.0 12.3 33.3 -19 +19.0 3.2 79.9 PREHOOK: query: DROP TABLE dec diff --git a/ql/src/test/results/clientpositive/avro_decimal_native.q.out b/ql/src/test/results/clientpositive/avro_decimal_native.q.out index c8ae0fb..60b4ccc 100644 --- a/ql/src/test/results/clientpositive/avro_decimal_native.q.out +++ b/ql/src/test/results/clientpositive/avro_decimal_native.q.out @@ -92,9 +92,9 @@ Mary 4.33 Cluck 5.96 Tom -12.25 Mary 33.33 -Tom 19 -Beck 0 -Beck 79.9 +Tom 19.00 +Beck 0.00 +Beck 79.90 PREHOOK: query: DROP TABLE IF EXISTS avro_dec1 PREHOOK: type: DROPTABLE POSTHOOK: query: DROP TABLE IF EXISTS avro_dec1 @@ -143,10 +143,10 @@ POSTHOOK: Input: default@avro_dec1 77.3 55.7 4.3 -6 +6.0 12.3 33.3 -19 +19.0 3.2 79.9 PREHOOK: query: DROP TABLE dec diff --git a/ql/src/test/results/clientpositive/bucketmapjoin1.q.out b/ql/src/test/results/clientpositive/bucketmapjoin1.q.out index 1746aba..72a9173 100644 --- a/ql/src/test/results/clientpositive/bucketmapjoin1.q.out +++ b/ql/src/test/results/clientpositive/bucketmapjoin1.q.out @@ -547,12 +547,12 @@ STAGE PLANS: keys: 0 key (type: int) 1 key (type: int) - outputColumnNames: _col0, _col1, _col5 + outputColumnNames: _col0, _col1, _col6 Position of Big Table: 0 Statistics: Num rows: 30 Data size: 3253 Basic stats: COMPLETE Column stats: NONE BucketMapJoin: true Select Operator - expressions: _col0 (type: int), _col1 (type: string), _col5 (type: string) + expressions: _col0 (type: int), _col1 (type: string), _col6 (type: string) outputColumnNames: _col0, _col1, _col2 Statistics: Num rows: 30 Data size: 3253 Basic stats: COMPLETE Column stats: NONE File Output Operator @@ -1048,12 +1048,12 @@ STAGE PLANS: keys: 0 key (type: int) 1 key (type: int) - outputColumnNames: _col0, _col1, _col5 + outputColumnNames: _col0, _col1, _col6 Position of Big Table: 1 Statistics: Num rows: 30 Data size: 3253 Basic stats: COMPLETE Column stats: NONE BucketMapJoin: true Select Operator - expressions: _col0 (type: int), _col1 (type: string), _col5 (type: string) + expressions: _col0 (type: int), _col1 (type: string), _col6 (type: string) outputColumnNames: _col0, _col1, _col2 Statistics: Num rows: 30 Data size: 3253 Basic stats: COMPLETE Column stats: NONE File Output Operator diff --git a/ql/src/test/results/clientpositive/bucketmapjoin2.q.out b/ql/src/test/results/clientpositive/bucketmapjoin2.q.out index 729f3f4..3936d40 100644 --- a/ql/src/test/results/clientpositive/bucketmapjoin2.q.out +++ b/ql/src/test/results/clientpositive/bucketmapjoin2.q.out @@ -267,12 +267,12 @@ STAGE PLANS: keys: 0 key (type: int) 1 key (type: int) - outputColumnNames: _col0, _col1, _col6 + outputColumnNames: _col0, _col1, _col7 Position of Big Table: 0 Statistics: Num rows: 30 Data size: 3253 Basic stats: COMPLETE Column stats: NONE BucketMapJoin: true Select Operator - expressions: _col0 (type: int), _col1 (type: string), _col6 (type: string) + expressions: _col0 (type: int), _col1 (type: string), _col7 (type: string) outputColumnNames: _col0, _col1, _col2 Statistics: Num rows: 30 Data size: 3253 Basic stats: COMPLETE Column stats: NONE File Output Operator @@ -819,12 +819,12 @@ STAGE PLANS: keys: 0 key (type: int) 1 key (type: int) - outputColumnNames: _col0, _col1, _col6 + outputColumnNames: _col0, _col1, _col7 Position of Big Table: 1 Statistics: Num rows: 30 Data size: 3253 Basic stats: COMPLETE Column stats: NONE BucketMapJoin: true Select Operator - expressions: _col0 (type: int), _col1 (type: string), _col6 (type: string) + expressions: _col0 (type: int), _col1 (type: string), _col7 (type: string) outputColumnNames: _col0, _col1, _col2 Statistics: Num rows: 30 Data size: 3253 Basic stats: COMPLETE Column stats: NONE File Output Operator @@ -1468,12 +1468,12 @@ STAGE PLANS: keys: 0 key (type: int) 1 key (type: int) - outputColumnNames: _col0, _col1, _col6 + outputColumnNames: _col0, _col1, _col7 Position of Big Table: 0 Statistics: Num rows: 31 Data size: 3368 Basic stats: COMPLETE Column stats: NONE BucketMapJoin: true Select Operator - expressions: _col0 (type: int), _col1 (type: string), _col6 (type: string) + expressions: _col0 (type: int), _col1 (type: string), _col7 (type: string) outputColumnNames: _col0, _col1, _col2 Statistics: Num rows: 31 Data size: 3368 Basic stats: COMPLETE Column stats: NONE File Output Operator diff --git a/ql/src/test/results/clientpositive/bucketmapjoin3.q.out b/ql/src/test/results/clientpositive/bucketmapjoin3.q.out index f63bb57..b9c219b 100644 --- a/ql/src/test/results/clientpositive/bucketmapjoin3.q.out +++ b/ql/src/test/results/clientpositive/bucketmapjoin3.q.out @@ -298,12 +298,12 @@ STAGE PLANS: keys: 0 key (type: int) 1 key (type: int) - outputColumnNames: _col0, _col1, _col6 + outputColumnNames: _col0, _col1, _col7 Position of Big Table: 0 Statistics: Num rows: 30 Data size: 3253 Basic stats: COMPLETE Column stats: NONE BucketMapJoin: true Select Operator - expressions: _col0 (type: int), _col1 (type: string), _col6 (type: string) + expressions: _col0 (type: int), _col1 (type: string), _col7 (type: string) outputColumnNames: _col0, _col1, _col2 Statistics: Num rows: 30 Data size: 3253 Basic stats: COMPLETE Column stats: NONE File Output Operator @@ -857,12 +857,12 @@ STAGE PLANS: keys: 0 key (type: int) 1 key (type: int) - outputColumnNames: _col0, _col1, _col6 + outputColumnNames: _col0, _col1, _col7 Position of Big Table: 1 Statistics: Num rows: 30 Data size: 3253 Basic stats: COMPLETE Column stats: NONE BucketMapJoin: true Select Operator - expressions: _col0 (type: int), _col1 (type: string), _col6 (type: string) + expressions: _col0 (type: int), _col1 (type: string), _col7 (type: string) outputColumnNames: _col0, _col1, _col2 Statistics: Num rows: 30 Data size: 3253 Basic stats: COMPLETE Column stats: NONE File Output Operator diff --git a/ql/src/test/results/clientpositive/bucketmapjoin4.q.out b/ql/src/test/results/clientpositive/bucketmapjoin4.q.out index a58af3a..1210bed 100644 --- a/ql/src/test/results/clientpositive/bucketmapjoin4.q.out +++ b/ql/src/test/results/clientpositive/bucketmapjoin4.q.out @@ -238,12 +238,12 @@ STAGE PLANS: keys: 0 key (type: int) 1 key (type: int) - outputColumnNames: _col0, _col1, _col5 + outputColumnNames: _col0, _col1, _col6 Position of Big Table: 0 Statistics: Num rows: 14 Data size: 1512 Basic stats: COMPLETE Column stats: NONE BucketMapJoin: true Select Operator - expressions: _col0 (type: int), _col1 (type: string), _col5 (type: string) + expressions: _col0 (type: int), _col1 (type: string), _col6 (type: string) outputColumnNames: _col0, _col1, _col2 Statistics: Num rows: 14 Data size: 1512 Basic stats: COMPLETE Column stats: NONE File Output Operator @@ -724,12 +724,12 @@ STAGE PLANS: keys: 0 key (type: int) 1 key (type: int) - outputColumnNames: _col0, _col1, _col5 + outputColumnNames: _col0, _col1, _col6 Position of Big Table: 1 Statistics: Num rows: 14 Data size: 1512 Basic stats: COMPLETE Column stats: NONE BucketMapJoin: true Select Operator - expressions: _col0 (type: int), _col1 (type: string), _col5 (type: string) + expressions: _col0 (type: int), _col1 (type: string), _col6 (type: string) outputColumnNames: _col0, _col1, _col2 Statistics: Num rows: 14 Data size: 1512 Basic stats: COMPLETE Column stats: NONE File Output Operator diff --git a/ql/src/test/results/clientpositive/bucketmapjoin5.q.out b/ql/src/test/results/clientpositive/bucketmapjoin5.q.out index 08ad318..0347457 100644 --- a/ql/src/test/results/clientpositive/bucketmapjoin5.q.out +++ b/ql/src/test/results/clientpositive/bucketmapjoin5.q.out @@ -288,12 +288,12 @@ STAGE PLANS: keys: 0 key (type: int) 1 key (type: int) - outputColumnNames: _col0, _col1, _col5 + outputColumnNames: _col0, _col1, _col6 Position of Big Table: 1 Statistics: Num rows: 60 Data size: 6393 Basic stats: COMPLETE Column stats: NONE BucketMapJoin: true Select Operator - expressions: _col0 (type: int), _col1 (type: string), _col5 (type: string) + expressions: _col0 (type: int), _col1 (type: string), _col6 (type: string) outputColumnNames: _col0, _col1, _col2 Statistics: Num rows: 60 Data size: 6393 Basic stats: COMPLETE Column stats: NONE File Output Operator @@ -835,12 +835,12 @@ STAGE PLANS: keys: 0 key (type: int) 1 key (type: int) - outputColumnNames: _col0, _col1, _col5 + outputColumnNames: _col0, _col1, _col6 Position of Big Table: 1 Statistics: Num rows: 31 Data size: 3368 Basic stats: COMPLETE Column stats: NONE BucketMapJoin: true Select Operator - expressions: _col0 (type: int), _col1 (type: string), _col5 (type: string) + expressions: _col0 (type: int), _col1 (type: string), _col6 (type: string) outputColumnNames: _col0, _col1, _col2 Statistics: Num rows: 31 Data size: 3368 Basic stats: COMPLETE Column stats: NONE File Output Operator diff --git a/ql/src/test/results/clientpositive/bucketmapjoin7.q.out b/ql/src/test/results/clientpositive/bucketmapjoin7.q.out index 8cc001e..58dc678 100644 --- a/ql/src/test/results/clientpositive/bucketmapjoin7.q.out +++ b/ql/src/test/results/clientpositive/bucketmapjoin7.q.out @@ -231,12 +231,12 @@ STAGE PLANS: keys: 0 key (type: int) 1 key (type: int) - outputColumnNames: _col0, _col7 + outputColumnNames: _col0, _col8 Position of Big Table: 0 Statistics: Num rows: 378 Data size: 1514 Basic stats: COMPLETE Column stats: NONE BucketMapJoin: true Select Operator - expressions: _col0 (type: int), _col7 (type: string) + expressions: _col0 (type: int), _col8 (type: string) outputColumnNames: _col0, _col1 Statistics: Num rows: 378 Data size: 1514 Basic stats: COMPLETE Column stats: NONE Limit diff --git a/ql/src/test/results/clientpositive/bucketmapjoin_negative.q.out b/ql/src/test/results/clientpositive/bucketmapjoin_negative.q.out index fa2692e..8181498 100644 --- a/ql/src/test/results/clientpositive/bucketmapjoin_negative.q.out +++ b/ql/src/test/results/clientpositive/bucketmapjoin_negative.q.out @@ -235,11 +235,11 @@ STAGE PLANS: keys: 0 key (type: int) 1 key (type: int) - outputColumnNames: _col0, _col1, _col5 + outputColumnNames: _col0, _col1, _col6 Position of Big Table: 0 Statistics: Num rows: 22 Data size: 2310 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: _col0 (type: int), _col1 (type: string), _col5 (type: string) + expressions: _col0 (type: int), _col1 (type: string), _col6 (type: string) outputColumnNames: _col0, _col1, _col2 Statistics: Num rows: 22 Data size: 2310 Basic stats: COMPLETE Column stats: NONE File Output Operator diff --git a/ql/src/test/results/clientpositive/bucketmapjoin_negative2.q.out b/ql/src/test/results/clientpositive/bucketmapjoin_negative2.q.out index 7810b12..c2abc1c 100644 --- a/ql/src/test/results/clientpositive/bucketmapjoin_negative2.q.out +++ b/ql/src/test/results/clientpositive/bucketmapjoin_negative2.q.out @@ -289,12 +289,12 @@ STAGE PLANS: keys: 0 key (type: int) 1 key (type: int) - outputColumnNames: _col0, _col1, _col5 + outputColumnNames: _col0, _col1, _col6 Position of Big Table: 0 Statistics: Num rows: 31 Data size: 3368 Basic stats: COMPLETE Column stats: NONE BucketMapJoin: true Select Operator - expressions: _col0 (type: int), _col1 (type: string), _col5 (type: string) + expressions: _col0 (type: int), _col1 (type: string), _col6 (type: string) outputColumnNames: _col0, _col1, _col2 Statistics: Num rows: 31 Data size: 3368 Basic stats: COMPLETE Column stats: NONE File Output Operator diff --git a/ql/src/test/results/clientpositive/bucketmapjoin_negative3.q.out b/ql/src/test/results/clientpositive/bucketmapjoin_negative3.q.out index b2abb45..c87b857 100644 --- a/ql/src/test/results/clientpositive/bucketmapjoin_negative3.q.out +++ b/ql/src/test/results/clientpositive/bucketmapjoin_negative3.q.out @@ -252,12 +252,12 @@ STAGE PLANS: keys: 0 key (type: string), value (type: string) 1 key (type: string), value (type: string) - outputColumnNames: _col0, _col1, _col4, _col5 + outputColumnNames: _col0, _col1, _col5, _col6 Position of Big Table: 0 Statistics: Num rows: 6 Data size: 1320 Basic stats: COMPLETE Column stats: NONE BucketMapJoin: true Select Operator - expressions: _col0 (type: string), _col1 (type: string), _col4 (type: string), _col5 (type: string) + expressions: _col0 (type: string), _col1 (type: string), _col5 (type: string), _col6 (type: string) outputColumnNames: _col0, _col1, _col2, _col3 Statistics: Num rows: 6 Data size: 1320 Basic stats: COMPLETE Column stats: NONE File Output Operator @@ -451,12 +451,12 @@ STAGE PLANS: keys: 0 key (type: string), value (type: string) 1 key (type: string), value (type: string) - outputColumnNames: _col0, _col1, _col4, _col5 + outputColumnNames: _col0, _col1, _col5, _col6 Position of Big Table: 0 Statistics: Num rows: 6 Data size: 1320 Basic stats: COMPLETE Column stats: NONE BucketMapJoin: true Select Operator - expressions: _col0 (type: string), _col1 (type: string), _col4 (type: string), _col5 (type: string) + expressions: _col0 (type: string), _col1 (type: string), _col5 (type: string), _col6 (type: string) outputColumnNames: _col0, _col1, _col2, _col3 Statistics: Num rows: 6 Data size: 1320 Basic stats: COMPLETE Column stats: NONE File Output Operator @@ -640,11 +640,11 @@ STAGE PLANS: keys: 0 (key + key) (type: double) 1 UDFToDouble(key) (type: double) - outputColumnNames: _col0, _col1, _col4, _col5 + outputColumnNames: _col0, _col1, _col5, _col6 Position of Big Table: 0 Statistics: Num rows: 12 Data size: 2420 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: _col0 (type: string), _col1 (type: string), _col4 (type: string), _col5 (type: string) + expressions: _col0 (type: string), _col1 (type: string), _col5 (type: string), _col6 (type: string) outputColumnNames: _col0, _col1, _col2, _col3 Statistics: Num rows: 12 Data size: 2420 Basic stats: COMPLETE Column stats: NONE File Output Operator @@ -831,11 +831,11 @@ STAGE PLANS: keys: 0 key (type: string), value (type: string) 1 key (type: string), value (type: string) - outputColumnNames: _col0, _col1, _col4, _col5 + outputColumnNames: _col0, _col1, _col5, _col6 Position of Big Table: 0 Statistics: Num rows: 6 Data size: 1320 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: _col0 (type: string), _col1 (type: string), _col4 (type: string), _col5 (type: string) + expressions: _col0 (type: string), _col1 (type: string), _col5 (type: string), _col6 (type: string) outputColumnNames: _col0, _col1, _col2, _col3 Statistics: Num rows: 6 Data size: 1320 Basic stats: COMPLETE Column stats: NONE File Output Operator @@ -1022,11 +1022,11 @@ STAGE PLANS: keys: 0 key (type: string), value (type: string) 1 key (type: string), value (type: string) - outputColumnNames: _col0, _col1, _col4, _col5 + outputColumnNames: _col0, _col1, _col5, _col6 Position of Big Table: 0 Statistics: Num rows: 6 Data size: 1320 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: _col0 (type: string), _col1 (type: string), _col4 (type: string), _col5 (type: string) + expressions: _col0 (type: string), _col1 (type: string), _col5 (type: string), _col6 (type: string) outputColumnNames: _col0, _col1, _col2, _col3 Statistics: Num rows: 6 Data size: 1320 Basic stats: COMPLETE Column stats: NONE File Output Operator @@ -1213,11 +1213,11 @@ STAGE PLANS: keys: 0 key (type: string), value (type: string) 1 key (type: string), value (type: string) - outputColumnNames: _col0, _col1, _col4, _col5 + outputColumnNames: _col0, _col1, _col5, _col6 Position of Big Table: 0 Statistics: Num rows: 6 Data size: 1320 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: _col0 (type: string), _col1 (type: string), _col4 (type: string), _col5 (type: string) + expressions: _col0 (type: string), _col1 (type: string), _col5 (type: string), _col6 (type: string) outputColumnNames: _col0, _col1, _col2, _col3 Statistics: Num rows: 6 Data size: 1320 Basic stats: COMPLETE Column stats: NONE File Output Operator @@ -1404,11 +1404,11 @@ STAGE PLANS: keys: 0 key (type: string), value (type: string) 1 key (type: string), value (type: string) - outputColumnNames: _col0, _col1, _col4, _col5 + outputColumnNames: _col0, _col1, _col5, _col6 Position of Big Table: 0 Statistics: Num rows: 6 Data size: 1320 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: _col0 (type: string), _col1 (type: string), _col4 (type: string), _col5 (type: string) + expressions: _col0 (type: string), _col1 (type: string), _col5 (type: string), _col6 (type: string) outputColumnNames: _col0, _col1, _col2, _col3 Statistics: Num rows: 6 Data size: 1320 Basic stats: COMPLETE Column stats: NONE File Output Operator @@ -1595,11 +1595,11 @@ STAGE PLANS: keys: 0 key (type: string), value (type: string) 1 key (type: string), value (type: string) - outputColumnNames: _col0, _col1, _col4, _col5 + outputColumnNames: _col0, _col1, _col5, _col6 Position of Big Table: 0 Statistics: Num rows: 6 Data size: 1320 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: _col0 (type: string), _col1 (type: string), _col4 (type: string), _col5 (type: string) + expressions: _col0 (type: string), _col1 (type: string), _col5 (type: string), _col6 (type: string) outputColumnNames: _col0, _col1, _col2, _col3 Statistics: Num rows: 6 Data size: 1320 Basic stats: COMPLETE Column stats: NONE File Output Operator @@ -1786,11 +1786,11 @@ STAGE PLANS: keys: 0 key (type: string), value (type: string) 1 key (type: string), value (type: string) - outputColumnNames: _col0, _col1, _col4, _col5 + outputColumnNames: _col0, _col1, _col5, _col6 Position of Big Table: 0 Statistics: Num rows: 6 Data size: 1320 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: _col0 (type: string), _col1 (type: string), _col4 (type: string), _col5 (type: string) + expressions: _col0 (type: string), _col1 (type: string), _col5 (type: string), _col6 (type: string) outputColumnNames: _col0, _col1, _col2, _col3 Statistics: Num rows: 6 Data size: 1320 Basic stats: COMPLETE Column stats: NONE File Output Operator diff --git a/ql/src/test/results/clientpositive/bucketsortoptimize_insert_2.q.out b/ql/src/test/results/clientpositive/bucketsortoptimize_insert_2.q.out index 97cab5b..f6a1c5c 100644 --- a/ql/src/test/results/clientpositive/bucketsortoptimize_insert_2.q.out +++ b/ql/src/test/results/clientpositive/bucketsortoptimize_insert_2.q.out @@ -118,9 +118,9 @@ STAGE PLANS: keys: 0 key (type: int) 1 key (type: int) - outputColumnNames: _col0, _col1, _col6 + outputColumnNames: _col0, _col1, _col7 Select Operator - expressions: _col0 (type: int), concat(_col1, _col6) (type: string) + expressions: _col0 (type: int), concat(_col1, _col7) (type: string) outputColumnNames: _col0, _col1 File Output Operator compressed: false @@ -275,9 +275,9 @@ STAGE PLANS: keys: 0 key (type: int) 1 key (type: int) - outputColumnNames: _col0, _col1, _col6 + outputColumnNames: _col0, _col1, _col7 Select Operator - expressions: _col0 (type: int), concat(_col1, _col6) (type: string) + expressions: _col0 (type: int), concat(_col1, _col7) (type: string) outputColumnNames: _col0, _col1 Reduce Output Operator key expressions: _col0 (type: int) @@ -347,9 +347,9 @@ STAGE PLANS: keys: 0 key (type: int) 1 key (type: int) - outputColumnNames: _col0, _col1, _col6 + outputColumnNames: _col0, _col1, _col7 Select Operator - expressions: _col0 (type: int), concat(_col1, _col6) (type: string) + expressions: _col0 (type: int), concat(_col1, _col7) (type: string) outputColumnNames: _col0, _col1 Reduce Output Operator key expressions: _col0 (type: int) @@ -386,9 +386,9 @@ STAGE PLANS: keys: 0 key (type: int) 1 key (type: int) - outputColumnNames: _col0, _col1, _col6 + outputColumnNames: _col0, _col1, _col7 Select Operator - expressions: _col0 (type: int), concat(_col1, _col6) (type: string) + expressions: _col0 (type: int), concat(_col1, _col7) (type: string) outputColumnNames: _col0, _col1 Reduce Output Operator key expressions: _col0 (type: int) @@ -533,9 +533,9 @@ STAGE PLANS: keys: 0 key (type: int) 1 key (type: int) - outputColumnNames: _col0, _col1, _col6 + outputColumnNames: _col0, _col1, _col7 Select Operator - expressions: _col0 (type: int), concat(_col1, _col6) (type: string) + expressions: _col0 (type: int), concat(_col1, _col7) (type: string) outputColumnNames: _col0, _col1 File Output Operator compressed: false diff --git a/ql/src/test/results/clientpositive/bucketsortoptimize_insert_4.q.out b/ql/src/test/results/clientpositive/bucketsortoptimize_insert_4.q.out index 4ff5c24..950cd98 100644 --- a/ql/src/test/results/clientpositive/bucketsortoptimize_insert_4.q.out +++ b/ql/src/test/results/clientpositive/bucketsortoptimize_insert_4.q.out @@ -94,9 +94,9 @@ STAGE PLANS: keys: 0 key (type: int) 1 key (type: int) - outputColumnNames: _col0, _col1, _col6 + outputColumnNames: _col0, _col1, _col7 Select Operator - expressions: _col0 (type: int), _col0 (type: int), concat(_col1, _col6) (type: string) + expressions: _col0 (type: int), _col0 (type: int), concat(_col1, _col7) (type: string) outputColumnNames: _col0, _col1, _col2 File Output Operator compressed: false diff --git a/ql/src/test/results/clientpositive/bucketsortoptimize_insert_5.q.out b/ql/src/test/results/clientpositive/bucketsortoptimize_insert_5.q.out index 0cd45d4..81ad83f 100644 --- a/ql/src/test/results/clientpositive/bucketsortoptimize_insert_5.q.out +++ b/ql/src/test/results/clientpositive/bucketsortoptimize_insert_5.q.out @@ -120,9 +120,9 @@ STAGE PLANS: keys: 0 key (type: int) 1 key (type: int) - outputColumnNames: _col0, _col1, _col6 + outputColumnNames: _col0, _col1, _col7 Select Operator - expressions: _col0 (type: int), concat(_col1, _col6) (type: string) + expressions: _col0 (type: int), concat(_col1, _col7) (type: string) outputColumnNames: _col0, _col1 Reduce Output Operator key expressions: _col0 (type: int) @@ -192,9 +192,9 @@ STAGE PLANS: keys: 0 key (type: int) 1 key (type: int) - outputColumnNames: _col0, _col1, _col6 + outputColumnNames: _col0, _col1, _col7 Select Operator - expressions: _col0 (type: int), concat(_col1, _col6) (type: string) + expressions: _col0 (type: int), concat(_col1, _col7) (type: string) outputColumnNames: _col0, _col1 Reduce Output Operator key expressions: _col0 (type: int) @@ -231,9 +231,9 @@ STAGE PLANS: keys: 0 key (type: int) 1 key (type: int) - outputColumnNames: _col0, _col1, _col6 + outputColumnNames: _col0, _col1, _col7 Select Operator - expressions: _col0 (type: int), concat(_col1, _col6) (type: string) + expressions: _col0 (type: int), concat(_col1, _col7) (type: string) outputColumnNames: _col0, _col1 Reduce Output Operator key expressions: _col0 (type: int) diff --git a/ql/src/test/results/clientpositive/bucketsortoptimize_insert_6.q.out b/ql/src/test/results/clientpositive/bucketsortoptimize_insert_6.q.out index 6e7e240..e6976db 100644 --- a/ql/src/test/results/clientpositive/bucketsortoptimize_insert_6.q.out +++ b/ql/src/test/results/clientpositive/bucketsortoptimize_insert_6.q.out @@ -96,9 +96,9 @@ STAGE PLANS: keys: 0 key (type: int), key2 (type: int) 1 key (type: int), key2 (type: int) - outputColumnNames: _col0, _col1, _col2, _col8 + outputColumnNames: _col0, _col1, _col2, _col9 Select Operator - expressions: _col0 (type: int), _col1 (type: int), concat(_col2, _col8) (type: string) + expressions: _col0 (type: int), _col1 (type: int), concat(_col2, _col9) (type: string) outputColumnNames: _col0, _col1, _col2 File Output Operator compressed: false @@ -234,9 +234,9 @@ STAGE PLANS: keys: 0 key (type: int), key2 (type: int) 1 key (type: int), key2 (type: int) - outputColumnNames: _col0, _col1, _col2, _col8 + outputColumnNames: _col0, _col1, _col2, _col9 Select Operator - expressions: _col0 (type: int), _col1 (type: int), concat(_col2, _col8) (type: string) + expressions: _col0 (type: int), _col1 (type: int), concat(_col2, _col9) (type: string) outputColumnNames: _col0, _col1, _col2 File Output Operator compressed: false @@ -398,9 +398,9 @@ STAGE PLANS: keys: 0 key (type: int), key2 (type: int) 1 key (type: int), key2 (type: int) - outputColumnNames: _col0, _col1, _col2, _col8 + outputColumnNames: _col0, _col1, _col2, _col9 Select Operator - expressions: _col1 (type: int), _col0 (type: int), concat(_col2, _col8) (type: string) + expressions: _col1 (type: int), _col0 (type: int), concat(_col2, _col9) (type: string) outputColumnNames: _col0, _col1, _col2 Reduce Output Operator key expressions: _col0 (type: int), _col1 (type: int) @@ -470,9 +470,9 @@ STAGE PLANS: keys: 0 key (type: int), key2 (type: int) 1 key (type: int), key2 (type: int) - outputColumnNames: _col0, _col1, _col2, _col8 + outputColumnNames: _col0, _col1, _col2, _col9 Select Operator - expressions: _col1 (type: int), _col0 (type: int), concat(_col2, _col8) (type: string) + expressions: _col1 (type: int), _col0 (type: int), concat(_col2, _col9) (type: string) outputColumnNames: _col0, _col1, _col2 Reduce Output Operator key expressions: _col0 (type: int), _col1 (type: int) @@ -509,9 +509,9 @@ STAGE PLANS: keys: 0 key (type: int), key2 (type: int) 1 key (type: int), key2 (type: int) - outputColumnNames: _col0, _col1, _col2, _col8 + outputColumnNames: _col0, _col1, _col2, _col9 Select Operator - expressions: _col1 (type: int), _col0 (type: int), concat(_col2, _col8) (type: string) + expressions: _col1 (type: int), _col0 (type: int), concat(_col2, _col9) (type: string) outputColumnNames: _col0, _col1, _col2 Reduce Output Operator key expressions: _col0 (type: int), _col1 (type: int) @@ -600,9 +600,9 @@ STAGE PLANS: keys: 0 key (type: int), key2 (type: int) 1 key (type: int), key2 (type: int) - outputColumnNames: _col0, _col1, _col2, _col8 + outputColumnNames: _col0, _col1, _col2, _col9 Select Operator - expressions: _col1 (type: int), _col0 (type: int), concat(_col2, _col8) (type: string) + expressions: _col1 (type: int), _col0 (type: int), concat(_col2, _col9) (type: string) outputColumnNames: _col0, _col1, _col2 Reduce Output Operator key expressions: _col0 (type: int), _col1 (type: int) @@ -672,9 +672,9 @@ STAGE PLANS: keys: 0 key (type: int), key2 (type: int) 1 key (type: int), key2 (type: int) - outputColumnNames: _col0, _col1, _col2, _col8 + outputColumnNames: _col0, _col1, _col2, _col9 Select Operator - expressions: _col1 (type: int), _col0 (type: int), concat(_col2, _col8) (type: string) + expressions: _col1 (type: int), _col0 (type: int), concat(_col2, _col9) (type: string) outputColumnNames: _col0, _col1, _col2 Reduce Output Operator key expressions: _col0 (type: int), _col1 (type: int) @@ -711,9 +711,9 @@ STAGE PLANS: keys: 0 key (type: int), key2 (type: int) 1 key (type: int), key2 (type: int) - outputColumnNames: _col0, _col1, _col2, _col8 + outputColumnNames: _col0, _col1, _col2, _col9 Select Operator - expressions: _col1 (type: int), _col0 (type: int), concat(_col2, _col8) (type: string) + expressions: _col1 (type: int), _col0 (type: int), concat(_col2, _col9) (type: string) outputColumnNames: _col0, _col1, _col2 Reduce Output Operator key expressions: _col0 (type: int), _col1 (type: int) @@ -782,9 +782,9 @@ STAGE PLANS: keys: 0 key (type: int), key2 (type: int) 1 key (type: int), key2 (type: int) - outputColumnNames: _col0, _col1, _col2, _col8 + outputColumnNames: _col0, _col1, _col2, _col9 Select Operator - expressions: _col0 (type: int), _col1 (type: int), concat(_col2, _col8) (type: string) + expressions: _col0 (type: int), _col1 (type: int), concat(_col2, _col9) (type: string) outputColumnNames: _col0, _col1, _col2 File Output Operator compressed: false @@ -938,9 +938,9 @@ STAGE PLANS: keys: 0 key (type: int), key2 (type: int) 1 key (type: int), key2 (type: int) - outputColumnNames: _col0, _col1, _col2, _col8 + outputColumnNames: _col0, _col1, _col2, _col9 Select Operator - expressions: _col0 (type: int), _col1 (type: int), concat(_col2, _col8) (type: string) + expressions: _col0 (type: int), _col1 (type: int), concat(_col2, _col9) (type: string) outputColumnNames: _col0, _col1, _col2 File Output Operator compressed: false @@ -1130,9 +1130,9 @@ STAGE PLANS: keys: 0 key (type: int), key2 (type: int) 1 key (type: int), key2 (type: int) - outputColumnNames: _col0, _col1, _col2, _col8 + outputColumnNames: _col0, _col1, _col2, _col9 Select Operator - expressions: _col0 (type: int), _col1 (type: int), concat(_col2, _col8) (type: string) + expressions: _col0 (type: int), _col1 (type: int), concat(_col2, _col9) (type: string) outputColumnNames: _col0, _col1, _col2 Reduce Output Operator key expressions: _col0 (type: int), _col1 (type: int) @@ -1202,9 +1202,9 @@ STAGE PLANS: keys: 0 key (type: int), key2 (type: int) 1 key (type: int), key2 (type: int) - outputColumnNames: _col0, _col1, _col2, _col8 + outputColumnNames: _col0, _col1, _col2, _col9 Select Operator - expressions: _col0 (type: int), _col1 (type: int), concat(_col2, _col8) (type: string) + expressions: _col0 (type: int), _col1 (type: int), concat(_col2, _col9) (type: string) outputColumnNames: _col0, _col1, _col2 Reduce Output Operator key expressions: _col0 (type: int), _col1 (type: int) @@ -1241,9 +1241,9 @@ STAGE PLANS: keys: 0 key (type: int), key2 (type: int) 1 key (type: int), key2 (type: int) - outputColumnNames: _col0, _col1, _col2, _col8 + outputColumnNames: _col0, _col1, _col2, _col9 Select Operator - expressions: _col0 (type: int), _col1 (type: int), concat(_col2, _col8) (type: string) + expressions: _col0 (type: int), _col1 (type: int), concat(_col2, _col9) (type: string) outputColumnNames: _col0, _col1, _col2 Reduce Output Operator key expressions: _col0 (type: int), _col1 (type: int) diff --git a/ql/src/test/results/clientpositive/bucketsortoptimize_insert_7.q.out b/ql/src/test/results/clientpositive/bucketsortoptimize_insert_7.q.out index 802da21..99c157d 100644 --- a/ql/src/test/results/clientpositive/bucketsortoptimize_insert_7.q.out +++ b/ql/src/test/results/clientpositive/bucketsortoptimize_insert_7.q.out @@ -96,9 +96,9 @@ STAGE PLANS: keys: 0 key (type: int) 1 key (type: int) - outputColumnNames: _col0, _col1, _col6 + outputColumnNames: _col0, _col1, _col7 Select Operator - expressions: _col0 (type: int), concat(_col1, _col6) (type: string) + expressions: _col0 (type: int), concat(_col1, _col7) (type: string) outputColumnNames: _col0, _col1 File Output Operator compressed: false diff --git a/ql/src/test/results/clientpositive/bucketsortoptimize_insert_8.q.out b/ql/src/test/results/clientpositive/bucketsortoptimize_insert_8.q.out index aa29b70..e28acfd 100644 --- a/ql/src/test/results/clientpositive/bucketsortoptimize_insert_8.q.out +++ b/ql/src/test/results/clientpositive/bucketsortoptimize_insert_8.q.out @@ -94,9 +94,9 @@ STAGE PLANS: keys: 0 key (type: int) 1 key (type: int) - outputColumnNames: _col0, _col1, _col5, _col6 + outputColumnNames: _col0, _col1, _col6, _col7 Select Operator - expressions: _col0 (type: int), _col5 (type: int), concat(_col1, _col6) (type: string) + expressions: _col0 (type: int), _col6 (type: int), concat(_col1, _col7) (type: string) outputColumnNames: _col0, _col1, _col2 File Output Operator compressed: false @@ -226,9 +226,9 @@ STAGE PLANS: keys: 0 key (type: int) 1 key (type: int) - outputColumnNames: _col0, _col1, _col5, _col6 + outputColumnNames: _col0, _col1, _col6, _col7 Select Operator - expressions: _col5 (type: int), _col0 (type: int), concat(_col1, _col6) (type: string) + expressions: _col6 (type: int), _col0 (type: int), concat(_col1, _col7) (type: string) outputColumnNames: _col0, _col1, _col2 File Output Operator compressed: false diff --git a/ql/src/test/results/clientpositive/char_pad_convert.q.out b/ql/src/test/results/clientpositive/char_pad_convert.q.out index 26102e4..63568af 100644 --- a/ql/src/test/results/clientpositive/char_pad_convert.q.out +++ b/ql/src/test/results/clientpositive/char_pad_convert.q.out @@ -144,7 +144,7 @@ select lpad(f, 4, ' '), POSTHOOK: type: QUERY POSTHOOK: Input: default@over1k #### A masked pattern was here #### -74.7 42 zzzzzTRUE 20 ddd45.4 yard du +74.7 42 zzzzzTRUE 20 dd45.40 yard du 26.4 37 zzzzzTRUE 20 dd29.62 history 96.9 18 zzzzFALSE 20 dd27.32 history 13.0 34 zzzzFALSE 20 dd23.91 topolog @@ -190,7 +190,7 @@ POSTHOOK: query: select rpad(f, 4, ' '), POSTHOOK: type: QUERY POSTHOOK: Input: default@over1k #### A masked pattern was here #### -74.7 42 TRUEzzzzz 20 45.4ddd yard du +74.7 42 TRUEzzzzz 20 45.40dd yard du 26.4 37 TRUEzzzzz 20 29.62dd history 96.9 18 FALSEzzzz 20 27.32dd history 13.0 34 FALSEzzzz 20 23.91dd topolog diff --git a/ql/src/test/results/clientpositive/cluster.q.out b/ql/src/test/results/clientpositive/cluster.q.out index aae499b..62b37c3 100644 --- a/ql/src/test/results/clientpositive/cluster.q.out +++ b/ql/src/test/results/clientpositive/cluster.q.out @@ -591,10 +591,10 @@ STAGE PLANS: condition expressions: 0 {VALUE._col0} 1 {VALUE._col0} - outputColumnNames: _col1, _col5 + outputColumnNames: _col1, _col6 Statistics: Num rows: 15 Data size: 1598 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: _col1 (type: string), _col5 (type: string) + expressions: _col1 (type: string), _col6 (type: string) outputColumnNames: _col1, _col3 Statistics: Num rows: 15 Data size: 1598 Basic stats: COMPLETE Column stats: NONE File Output Operator @@ -686,10 +686,10 @@ STAGE PLANS: condition expressions: 0 {VALUE._col0} 1 {VALUE._col0} - outputColumnNames: _col1, _col5 + outputColumnNames: _col1, _col6 Statistics: Num rows: 15 Data size: 1598 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: _col1 (type: string), _col5 (type: string) + expressions: _col1 (type: string), _col6 (type: string) outputColumnNames: _col1, _col3 Statistics: Num rows: 15 Data size: 1598 Basic stats: COMPLETE Column stats: NONE File Output Operator diff --git a/ql/src/test/results/clientpositive/column_access_stats.q.out b/ql/src/test/results/clientpositive/column_access_stats.q.out index 644cd2d..b7da2b8 100644 --- a/ql/src/test/results/clientpositive/column_access_stats.q.out +++ b/ql/src/test/results/clientpositive/column_access_stats.q.out @@ -553,10 +553,10 @@ STAGE PLANS: condition expressions: 0 {KEY.reducesinkkey0} 1 {KEY.reducesinkkey0} - outputColumnNames: _col0, _col4 + outputColumnNames: _col0, _col5 Statistics: Num rows: 1 Data size: 3 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: _col0 (type: string), '3' (type: string), _col4 (type: string), '3' (type: string) + expressions: _col0 (type: string), '3' (type: string), _col5 (type: string), '3' (type: string) outputColumnNames: _col0, _col1, _col2, _col3 Statistics: Num rows: 1 Data size: 3 Basic stats: COMPLETE Column stats: NONE File Output Operator diff --git a/ql/src/test/results/clientpositive/constprog2.q.out b/ql/src/test/results/clientpositive/constprog2.q.out index a4a4cf4..6a89efe 100644 --- a/ql/src/test/results/clientpositive/constprog2.q.out +++ b/ql/src/test/results/clientpositive/constprog2.q.out @@ -42,10 +42,10 @@ STAGE PLANS: condition expressions: 0 1 {VALUE._col0} - outputColumnNames: _col5 + outputColumnNames: _col6 Statistics: Num rows: 799 Data size: 1599 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: '86' (type: string), 87.0 (type: double), _col5 (type: string) + expressions: '86' (type: string), 87.0 (type: double), _col6 (type: string) outputColumnNames: _col0, _col1, _col2 Statistics: Num rows: 799 Data size: 1599 Basic stats: COMPLETE Column stats: NONE File Output Operator diff --git a/ql/src/test/results/clientpositive/correlationoptimizer1.q.out b/ql/src/test/results/clientpositive/correlationoptimizer1.q.out index e41f8e3..a42edd7 100644 --- a/ql/src/test/results/clientpositive/correlationoptimizer1.q.out +++ b/ql/src/test/results/clientpositive/correlationoptimizer1.q.out @@ -1085,15 +1085,15 @@ STAGE PLANS: condition expressions: 0 1 {KEY.reducesinkkey0} - outputColumnNames: _col4 + outputColumnNames: _col5 Statistics: Num rows: 63 Data size: 6393 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: _col4 (type: string) - outputColumnNames: _col4 + expressions: _col5 (type: string) + outputColumnNames: _col5 Statistics: Num rows: 63 Data size: 6393 Basic stats: COMPLETE Column stats: NONE Group By Operator aggregations: count(1) - keys: _col4 (type: string) + keys: _col5 (type: string) mode: hash outputColumnNames: _col0, _col1 Statistics: Num rows: 63 Data size: 6393 Basic stats: COMPLETE Column stats: NONE @@ -1231,15 +1231,15 @@ STAGE PLANS: condition expressions: 0 1 {KEY.reducesinkkey0} - outputColumnNames: _col4 + outputColumnNames: _col5 Statistics: Num rows: 63 Data size: 6393 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: _col4 (type: string) - outputColumnNames: _col4 + expressions: _col5 (type: string) + outputColumnNames: _col5 Statistics: Num rows: 63 Data size: 6393 Basic stats: COMPLETE Column stats: NONE Group By Operator aggregations: count(1) - keys: _col4 (type: string) + keys: _col5 (type: string) mode: hash outputColumnNames: _col0, _col1 Statistics: Num rows: 63 Data size: 6393 Basic stats: COMPLETE Column stats: NONE @@ -1380,15 +1380,15 @@ STAGE PLANS: condition expressions: 0 {KEY.reducesinkkey0} 1 {KEY.reducesinkkey1} - outputColumnNames: _col0, _col5 + outputColumnNames: _col0, _col6 Statistics: Num rows: 31 Data size: 6393 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: _col0 (type: string), _col5 (type: string) - outputColumnNames: _col0, _col5 + expressions: _col0 (type: string), _col6 (type: string) + outputColumnNames: _col0, _col6 Statistics: Num rows: 31 Data size: 6393 Basic stats: COMPLETE Column stats: NONE Group By Operator aggregations: count(1) - keys: _col0 (type: string), _col5 (type: string) + keys: _col0 (type: string), _col6 (type: string) mode: hash outputColumnNames: _col0, _col1, _col2 Statistics: Num rows: 31 Data size: 6393 Basic stats: COMPLETE Column stats: NONE @@ -1506,15 +1506,15 @@ STAGE PLANS: condition expressions: 0 {KEY.reducesinkkey0} 1 {KEY.reducesinkkey1} - outputColumnNames: _col0, _col5 + outputColumnNames: _col0, _col6 Statistics: Num rows: 31 Data size: 6393 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: _col0 (type: string), _col5 (type: string) - outputColumnNames: _col0, _col5 + expressions: _col0 (type: string), _col6 (type: string) + outputColumnNames: _col0, _col6 Statistics: Num rows: 31 Data size: 6393 Basic stats: COMPLETE Column stats: NONE Group By Operator aggregations: count(1) - keys: _col0 (type: string), _col5 (type: string) + keys: _col0 (type: string), _col6 (type: string) mode: hash outputColumnNames: _col0, _col1, _col2 Statistics: Num rows: 31 Data size: 6393 Basic stats: COMPLETE Column stats: NONE @@ -1641,15 +1641,15 @@ STAGE PLANS: condition expressions: 0 1 {KEY.reducesinkkey0} - outputColumnNames: _col4 + outputColumnNames: _col5 Statistics: Num rows: 63 Data size: 6393 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: _col4 (type: string) - outputColumnNames: _col4 + expressions: _col5 (type: string) + outputColumnNames: _col5 Statistics: Num rows: 63 Data size: 6393 Basic stats: COMPLETE Column stats: NONE Group By Operator aggregations: count(1) - keys: _col4 (type: string) + keys: _col5 (type: string) mode: hash outputColumnNames: _col0, _col1 Statistics: Num rows: 63 Data size: 6393 Basic stats: COMPLETE Column stats: NONE @@ -1788,17 +1788,17 @@ STAGE PLANS: condition expressions: 0 1 {KEY.reducesinkkey0} - outputColumnNames: _col4 + outputColumnNames: _col5 Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE Select Operator - expressions: _col4 (type: string) - outputColumnNames: _col4 + expressions: _col5 (type: string) + outputColumnNames: _col5 Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE Mux Operator Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE Group By Operator aggregations: count(1) - keys: _col4 (type: string) + keys: _col5 (type: string) mode: complete outputColumnNames: _col0, _col1 Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE diff --git a/ql/src/test/results/clientpositive/correlationoptimizer4.q.out b/ql/src/test/results/clientpositive/correlationoptimizer4.q.out index 31bd28b..80496d6 100644 --- a/ql/src/test/results/clientpositive/correlationoptimizer4.q.out +++ b/ql/src/test/results/clientpositive/correlationoptimizer4.q.out @@ -118,15 +118,15 @@ STAGE PLANS: 0 1 {KEY.reducesinkkey0} 2 - outputColumnNames: _col4 + outputColumnNames: _col5 Statistics: Num rows: 8 Data size: 37 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: _col4 (type: int) - outputColumnNames: _col4 + expressions: _col5 (type: int) + outputColumnNames: _col5 Statistics: Num rows: 8 Data size: 37 Basic stats: COMPLETE Column stats: NONE Group By Operator aggregations: count(1) - keys: _col4 (type: int) + keys: _col5 (type: int) mode: hash outputColumnNames: _col0, _col1 Statistics: Num rows: 8 Data size: 37 Basic stats: COMPLETE Column stats: NONE @@ -286,17 +286,17 @@ STAGE PLANS: 0 1 {KEY.reducesinkkey0} 2 - outputColumnNames: _col4 + outputColumnNames: _col5 Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE Select Operator - expressions: _col4 (type: int) - outputColumnNames: _col4 + expressions: _col5 (type: int) + outputColumnNames: _col5 Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE Mux Operator Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE Group By Operator aggregations: count(1) - keys: _col4 (type: int) + keys: _col5 (type: int) mode: complete outputColumnNames: _col0, _col1 Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE @@ -452,15 +452,15 @@ STAGE PLANS: 0 key (type: int) 1 key (type: int) 2 key (type: int) - outputColumnNames: _col4 + outputColumnNames: _col5 Statistics: Num rows: 8 Data size: 37 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: _col4 (type: int) - outputColumnNames: _col4 + expressions: _col5 (type: int) + outputColumnNames: _col5 Statistics: Num rows: 8 Data size: 37 Basic stats: COMPLETE Column stats: NONE Group By Operator aggregations: count(1) - keys: _col4 (type: int) + keys: _col5 (type: int) mode: hash outputColumnNames: _col0, _col1 Statistics: Num rows: 8 Data size: 37 Basic stats: COMPLETE Column stats: NONE @@ -906,15 +906,15 @@ STAGE PLANS: 0 1 {KEY.reducesinkkey0} 2 - outputColumnNames: _col4 + outputColumnNames: _col5 Statistics: Num rows: 15 Data size: 66 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: _col4 (type: int) - outputColumnNames: _col4 + expressions: _col5 (type: int) + outputColumnNames: _col5 Statistics: Num rows: 15 Data size: 66 Basic stats: COMPLETE Column stats: NONE Group By Operator aggregations: count(1) - keys: _col4 (type: int) + keys: _col5 (type: int) mode: hash outputColumnNames: _col0, _col1 Statistics: Num rows: 15 Data size: 66 Basic stats: COMPLETE Column stats: NONE @@ -1068,15 +1068,15 @@ STAGE PLANS: 0 1 2 {KEY.reducesinkkey0} - outputColumnNames: _col8 + outputColumnNames: _col10 Statistics: Num rows: 15 Data size: 66 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: _col8 (type: int) - outputColumnNames: _col8 + expressions: _col10 (type: int) + outputColumnNames: _col10 Statistics: Num rows: 15 Data size: 66 Basic stats: COMPLETE Column stats: NONE Group By Operator aggregations: count(1) - keys: _col8 (type: int) + keys: _col10 (type: int) mode: hash outputColumnNames: _col0, _col1 Statistics: Num rows: 15 Data size: 66 Basic stats: COMPLETE Column stats: NONE @@ -1227,17 +1227,17 @@ STAGE PLANS: 0 1 2 {KEY.reducesinkkey0} - outputColumnNames: _col8 + outputColumnNames: _col10 Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE Select Operator - expressions: _col8 (type: int) - outputColumnNames: _col8 + expressions: _col10 (type: int) + outputColumnNames: _col10 Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE Mux Operator Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE Group By Operator aggregations: count(1) - keys: _col8 (type: int) + keys: _col10 (type: int) mode: complete outputColumnNames: _col0, _col1 Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE @@ -1369,15 +1369,15 @@ STAGE PLANS: 0 1 {KEY.reducesinkkey0} 2 - outputColumnNames: _col4 + outputColumnNames: _col5 Statistics: Num rows: 15 Data size: 66 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: _col4 (type: int) - outputColumnNames: _col4 + expressions: _col5 (type: int) + outputColumnNames: _col5 Statistics: Num rows: 15 Data size: 66 Basic stats: COMPLETE Column stats: NONE Group By Operator aggregations: count(1) - keys: _col4 (type: int) + keys: _col5 (type: int) mode: hash outputColumnNames: _col0, _col1 Statistics: Num rows: 15 Data size: 66 Basic stats: COMPLETE Column stats: NONE @@ -1531,15 +1531,15 @@ STAGE PLANS: 0 1 {KEY.reducesinkkey0} 2 - outputColumnNames: _col4 + outputColumnNames: _col5 Statistics: Num rows: 15 Data size: 66 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: _col4 (type: int) - outputColumnNames: _col4 + expressions: _col5 (type: int) + outputColumnNames: _col5 Statistics: Num rows: 15 Data size: 66 Basic stats: COMPLETE Column stats: NONE Group By Operator aggregations: count(1) - keys: _col4 (type: int) + keys: _col5 (type: int) mode: hash outputColumnNames: _col0, _col1 Statistics: Num rows: 15 Data size: 66 Basic stats: COMPLETE Column stats: NONE @@ -1689,15 +1689,15 @@ STAGE PLANS: 0 1 {KEY.reducesinkkey0} 2 - outputColumnNames: _col4 + outputColumnNames: _col5 Statistics: Num rows: 15 Data size: 66 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: _col4 (type: int) - outputColumnNames: _col4 + expressions: _col5 (type: int) + outputColumnNames: _col5 Statistics: Num rows: 15 Data size: 66 Basic stats: COMPLETE Column stats: NONE Group By Operator aggregations: count(1) - keys: _col4 (type: int) + keys: _col5 (type: int) mode: hash outputColumnNames: _col0, _col1 Statistics: Num rows: 15 Data size: 66 Basic stats: COMPLETE Column stats: NONE diff --git a/ql/src/test/results/clientpositive/correlationoptimizer5.q.out b/ql/src/test/results/clientpositive/correlationoptimizer5.q.out index b187a85..ba74fa2 100644 --- a/ql/src/test/results/clientpositive/correlationoptimizer5.q.out +++ b/ql/src/test/results/clientpositive/correlationoptimizer5.q.out @@ -149,10 +149,10 @@ STAGE PLANS: condition expressions: 0 {KEY.reducesinkkey0} 1 {VALUE._col0} - outputColumnNames: _col0, _col5 + outputColumnNames: _col0, _col6 Statistics: Num rows: 29 Data size: 118 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: _col0 (type: int), _col5 (type: string) + expressions: _col0 (type: int), _col6 (type: string) outputColumnNames: _col0, _col1 Statistics: Num rows: 29 Data size: 118 Basic stats: COMPLETE Column stats: NONE File Output Operator @@ -401,10 +401,10 @@ STAGE PLANS: condition expressions: 0 {KEY.reducesinkkey0} 1 {VALUE._col0} - outputColumnNames: _col0, _col5 + outputColumnNames: _col0, _col6 Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE Select Operator - expressions: _col0 (type: int), _col5 (type: string) + expressions: _col0 (type: int), _col6 (type: string) outputColumnNames: _col0, _col1 Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE Mux Operator @@ -546,10 +546,10 @@ STAGE PLANS: keys: 0 key (type: int) 1 key (type: int) - outputColumnNames: _col0, _col5 + outputColumnNames: _col0, _col6 Statistics: Num rows: 29 Data size: 118 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: _col0 (type: int), _col5 (type: string) + expressions: _col0 (type: int), _col6 (type: string) outputColumnNames: _col0, _col1 Statistics: Num rows: 29 Data size: 118 Basic stats: COMPLETE Column stats: NONE File Output Operator diff --git a/ql/src/test/results/clientpositive/correlationoptimizer6.q.out b/ql/src/test/results/clientpositive/correlationoptimizer6.q.out index 1a7441e..762ca3f 100644 --- a/ql/src/test/results/clientpositive/correlationoptimizer6.q.out +++ b/ql/src/test/results/clientpositive/correlationoptimizer6.q.out @@ -811,10 +811,10 @@ STAGE PLANS: condition expressions: 0 {KEY.reducesinkkey0} 1 {KEY.reducesinkkey0} {VALUE._col0} - outputColumnNames: _col0, _col4, _col5 + outputColumnNames: _col0, _col5, _col6 Statistics: Num rows: 15 Data size: 1542 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: _col0 (type: string), _col4 (type: string), _col5 (type: bigint) + expressions: _col0 (type: string), _col5 (type: string), _col6 (type: bigint) outputColumnNames: _col0, _col1, _col2 Statistics: Num rows: 15 Data size: 1542 Basic stats: COMPLETE Column stats: NONE File Output Operator @@ -930,10 +930,10 @@ STAGE PLANS: condition expressions: 0 {KEY.reducesinkkey0} 1 {KEY.reducesinkkey0} {VALUE._col0} - outputColumnNames: _col0, _col4, _col5 + outputColumnNames: _col0, _col5, _col6 Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE Select Operator - expressions: _col0 (type: string), _col4 (type: string), _col5 (type: bigint) + expressions: _col0 (type: string), _col5 (type: string), _col6 (type: bigint) outputColumnNames: _col0, _col1, _col2 Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE File Output Operator @@ -961,10 +961,10 @@ STAGE PLANS: condition expressions: 0 {KEY.reducesinkkey0} 1 {KEY.reducesinkkey0} {VALUE._col0} - outputColumnNames: _col0, _col4, _col5 + outputColumnNames: _col0, _col5, _col6 Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE Select Operator - expressions: _col0 (type: string), _col4 (type: string), _col5 (type: bigint) + expressions: _col0 (type: string), _col5 (type: string), _col6 (type: bigint) outputColumnNames: _col0, _col1, _col2 Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE File Output Operator @@ -1148,10 +1148,10 @@ STAGE PLANS: condition expressions: 0 {KEY.reducesinkkey0} 1 {KEY.reducesinkkey0} {VALUE._col0} - outputColumnNames: _col0, _col4, _col5 + outputColumnNames: _col0, _col5, _col6 Statistics: Num rows: 16 Data size: 1700 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: _col0 (type: string), _col4 (type: string), _col5 (type: bigint) + expressions: _col0 (type: string), _col5 (type: string), _col6 (type: bigint) outputColumnNames: _col0, _col1, _col2 Statistics: Num rows: 16 Data size: 1700 Basic stats: COMPLETE Column stats: NONE File Output Operator @@ -1267,10 +1267,10 @@ STAGE PLANS: condition expressions: 0 {KEY.reducesinkkey0} 1 {KEY.reducesinkkey0} {VALUE._col0} - outputColumnNames: _col0, _col4, _col5 + outputColumnNames: _col0, _col5, _col6 Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE Select Operator - expressions: _col0 (type: string), _col4 (type: string), _col5 (type: bigint) + expressions: _col0 (type: string), _col5 (type: string), _col6 (type: bigint) outputColumnNames: _col0, _col1, _col2 Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE File Output Operator @@ -1312,10 +1312,10 @@ STAGE PLANS: condition expressions: 0 {KEY.reducesinkkey0} 1 {KEY.reducesinkkey0} {VALUE._col0} - outputColumnNames: _col0, _col4, _col5 + outputColumnNames: _col0, _col5, _col6 Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE Select Operator - expressions: _col0 (type: string), _col4 (type: string), _col5 (type: bigint) + expressions: _col0 (type: string), _col5 (type: string), _col6 (type: bigint) outputColumnNames: _col0, _col1, _col2 Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE File Output Operator @@ -1909,10 +1909,10 @@ STAGE PLANS: 0 {KEY.reducesinkkey0} 1 2 {KEY.reducesinkkey0} {VALUE._col0} - outputColumnNames: _col0, _col8, _col9 + outputColumnNames: _col0, _col10, _col11 Statistics: Num rows: 63 Data size: 6393 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: _col0 (type: string), _col8 (type: string), _col9 (type: bigint) + expressions: _col0 (type: string), _col10 (type: string), _col11 (type: bigint) outputColumnNames: _col0, _col1, _col2 Statistics: Num rows: 63 Data size: 6393 Basic stats: COMPLETE Column stats: NONE File Output Operator @@ -2073,10 +2073,10 @@ STAGE PLANS: 0 {KEY.reducesinkkey0} 1 2 {KEY.reducesinkkey0} {VALUE._col0} - outputColumnNames: _col0, _col8, _col9 + outputColumnNames: _col0, _col10, _col11 Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE Select Operator - expressions: _col0 (type: string), _col8 (type: string), _col9 (type: bigint) + expressions: _col0 (type: string), _col10 (type: string), _col11 (type: bigint) outputColumnNames: _col0, _col1, _col2 Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE File Output Operator @@ -2120,10 +2120,10 @@ STAGE PLANS: 0 {KEY.reducesinkkey0} 1 2 {KEY.reducesinkkey0} {VALUE._col0} - outputColumnNames: _col0, _col8, _col9 + outputColumnNames: _col0, _col10, _col11 Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE Select Operator - expressions: _col0 (type: string), _col8 (type: string), _col9 (type: bigint) + expressions: _col0 (type: string), _col10 (type: string), _col11 (type: bigint) outputColumnNames: _col0, _col1, _col2 Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE File Output Operator @@ -2346,10 +2346,10 @@ STAGE PLANS: 0 {KEY.reducesinkkey0} 1 {KEY.reducesinkkey0} {VALUE._col0} 2 - outputColumnNames: _col0, _col4, _col5 + outputColumnNames: _col0, _col5, _col6 Statistics: Num rows: 63 Data size: 6393 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: _col0 (type: string), _col4 (type: string), _col5 (type: bigint) + expressions: _col0 (type: string), _col5 (type: string), _col6 (type: bigint) outputColumnNames: _col0, _col1, _col2 Statistics: Num rows: 63 Data size: 6393 Basic stats: COMPLETE Column stats: NONE File Output Operator @@ -2504,10 +2504,10 @@ STAGE PLANS: 0 {KEY.reducesinkkey0} 1 {KEY.reducesinkkey0} {VALUE._col0} 2 - outputColumnNames: _col0, _col4, _col5 + outputColumnNames: _col0, _col5, _col6 Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE Select Operator - expressions: _col0 (type: string), _col4 (type: string), _col5 (type: bigint) + expressions: _col0 (type: string), _col5 (type: string), _col6 (type: bigint) outputColumnNames: _col0, _col1, _col2 Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE File Output Operator @@ -2551,10 +2551,10 @@ STAGE PLANS: 0 {KEY.reducesinkkey0} 1 {KEY.reducesinkkey0} {VALUE._col0} 2 - outputColumnNames: _col0, _col4, _col5 + outputColumnNames: _col0, _col5, _col6 Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE Select Operator - expressions: _col0 (type: string), _col4 (type: string), _col5 (type: bigint) + expressions: _col0 (type: string), _col5 (type: string), _col6 (type: bigint) outputColumnNames: _col0, _col1, _col2 Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE File Output Operator diff --git a/ql/src/test/results/clientpositive/cross_product_check_1.q.out b/ql/src/test/results/clientpositive/cross_product_check_1.q.out index 9268ba9..bc7551c 100644 --- a/ql/src/test/results/clientpositive/cross_product_check_1.q.out +++ b/ql/src/test/results/clientpositive/cross_product_check_1.q.out @@ -52,10 +52,10 @@ STAGE PLANS: condition expressions: 0 {VALUE._col0} {VALUE._col1} 1 {VALUE._col0} {VALUE._col1} - outputColumnNames: _col0, _col1, _col4, _col5 + outputColumnNames: _col0, _col1, _col5, _col6 Statistics: Num rows: 550 Data size: 5843 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: _col0 (type: string), _col1 (type: string), _col4 (type: string), _col5 (type: string) + expressions: _col0 (type: string), _col1 (type: string), _col5 (type: string), _col6 (type: string) outputColumnNames: _col0, _col1, _col2, _col3 Statistics: Num rows: 550 Data size: 5843 Basic stats: COMPLETE Column stats: NONE File Output Operator @@ -117,7 +117,7 @@ STAGE PLANS: condition expressions: 0 {KEY.reducesinkkey0} {VALUE._col0} 1 {KEY.reducesinkkey0} {VALUE._col0} - outputColumnNames: _col0, _col1, _col4, _col5 + outputColumnNames: _col0, _col1, _col5, _col6 Statistics: Num rows: 5 Data size: 57 Basic stats: COMPLETE Column stats: NONE File Output Operator compressed: false @@ -133,7 +133,7 @@ STAGE PLANS: Reduce Output Operator sort order: Statistics: Num rows: 5 Data size: 57 Basic stats: COMPLETE Column stats: NONE - value expressions: _col0 (type: string), _col1 (type: string), _col4 (type: string), _col5 (type: string) + value expressions: _col0 (type: string), _col1 (type: string), _col5 (type: string), _col6 (type: string) TableScan alias: a Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE @@ -146,12 +146,12 @@ STAGE PLANS: condition map: Inner Join 0 to 1 condition expressions: - 0 {VALUE._col0} {VALUE._col1} {VALUE._col4} {VALUE._col5} + 0 {VALUE._col0} {VALUE._col1} {VALUE._col5} {VALUE._col6} 1 {VALUE._col0} {VALUE._col1} - outputColumnNames: _col0, _col1, _col4, _col5, _col8, _col9 + outputColumnNames: _col0, _col1, _col5, _col6, _col10, _col11 Statistics: Num rows: 550 Data size: 5843 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: _col0 (type: string), _col1 (type: string), _col4 (type: string), _col5 (type: string), _col8 (type: string), _col9 (type: string) + expressions: _col0 (type: string), _col1 (type: string), _col5 (type: string), _col6 (type: string), _col10 (type: string), _col11 (type: string) outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5 Statistics: Num rows: 550 Data size: 5843 Basic stats: COMPLETE Column stats: NONE File Output Operator @@ -284,10 +284,10 @@ STAGE PLANS: condition expressions: 0 {VALUE._col0} {VALUE._col1} 1 {VALUE._col0} - outputColumnNames: _col0, _col1, _col4 + outputColumnNames: _col0, _col1, _col5 Statistics: Num rows: 550 Data size: 5843 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: _col0 (type: string), _col1 (type: string), _col4 (type: string) + expressions: _col0 (type: string), _col1 (type: string), _col5 (type: string) outputColumnNames: _col0, _col1, _col2 Statistics: Num rows: 550 Data size: 5843 Basic stats: COMPLETE Column stats: NONE File Output Operator @@ -406,10 +406,10 @@ STAGE PLANS: condition expressions: 0 {VALUE._col0} {VALUE._col1} 1 {VALUE._col0} - outputColumnNames: _col0, _col1, _col4 + outputColumnNames: _col0, _col1, _col5 Statistics: Num rows: 550 Data size: 5843 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: _col0 (type: string), _col1 (type: string), _col4 (type: string) + expressions: _col0 (type: string), _col1 (type: string), _col5 (type: string) outputColumnNames: _col0, _col1, _col2 Statistics: Num rows: 550 Data size: 5843 Basic stats: COMPLETE Column stats: NONE File Output Operator diff --git a/ql/src/test/results/clientpositive/cross_product_check_2.q.out b/ql/src/test/results/clientpositive/cross_product_check_2.q.out index 994bca1..29e9c7a 100644 --- a/ql/src/test/results/clientpositive/cross_product_check_2.q.out +++ b/ql/src/test/results/clientpositive/cross_product_check_2.q.out @@ -63,10 +63,10 @@ STAGE PLANS: keys: 0 1 - outputColumnNames: _col0, _col1, _col4, _col5 + outputColumnNames: _col0, _col1, _col5, _col6 Statistics: Num rows: 550 Data size: 5843 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: _col0 (type: string), _col1 (type: string), _col4 (type: string), _col5 (type: string) + expressions: _col0 (type: string), _col1 (type: string), _col5 (type: string), _col6 (type: string) outputColumnNames: _col0, _col1, _col2, _col3 Statistics: Num rows: 550 Data size: 5843 Basic stats: COMPLETE Column stats: NONE File Output Operator @@ -112,7 +112,7 @@ STAGE PLANS: Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE HashTable Sink Operator condition expressions: - 0 {_col0} {_col1} {_col4} {_col5} + 0 {_col0} {_col1} {_col5} {_col6} 1 {key} {value} keys: 0 @@ -150,21 +150,21 @@ STAGE PLANS: keys: 0 key (type: string) 1 key (type: string) - outputColumnNames: _col0, _col1, _col4, _col5 + outputColumnNames: _col0, _col1, _col5, _col6 Statistics: Num rows: 5 Data size: 57 Basic stats: COMPLETE Column stats: NONE Map Join Operator condition map: Inner Join 0 to 1 condition expressions: - 0 {_col0} {_col1} {_col4} {_col5} + 0 {_col0} {_col1} {_col5} {_col6} 1 {key} {value} keys: 0 1 - outputColumnNames: _col0, _col1, _col4, _col5, _col8, _col9 + outputColumnNames: _col0, _col1, _col5, _col6, _col10, _col11 Statistics: Num rows: 550 Data size: 5843 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: _col0 (type: string), _col1 (type: string), _col4 (type: string), _col5 (type: string), _col8 (type: string), _col9 (type: string) + expressions: _col0 (type: string), _col1 (type: string), _col5 (type: string), _col6 (type: string), _col10 (type: string), _col11 (type: string) outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5 Statistics: Num rows: 550 Data size: 5843 Basic stats: COMPLETE Column stats: NONE File Output Operator @@ -309,10 +309,10 @@ STAGE PLANS: keys: 0 1 - outputColumnNames: _col0, _col1, _col4 + outputColumnNames: _col0, _col1, _col5 Statistics: Num rows: 550 Data size: 5843 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: _col0 (type: string), _col1 (type: string), _col4 (type: string) + expressions: _col0 (type: string), _col1 (type: string), _col5 (type: string) outputColumnNames: _col0, _col1, _col2 Statistics: Num rows: 550 Data size: 5843 Basic stats: COMPLETE Column stats: NONE File Output Operator @@ -446,10 +446,10 @@ STAGE PLANS: keys: 0 1 - outputColumnNames: _col0, _col1, _col4 + outputColumnNames: _col0, _col1, _col5 Statistics: Num rows: 550 Data size: 5843 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: _col0 (type: string), _col1 (type: string), _col4 (type: string) + expressions: _col0 (type: string), _col1 (type: string), _col5 (type: string) outputColumnNames: _col0, _col1, _col2 Statistics: Num rows: 550 Data size: 5843 Basic stats: COMPLETE Column stats: NONE File Output Operator diff --git a/ql/src/test/results/clientpositive/decimal_2.q.out b/ql/src/test/results/clientpositive/decimal_2.q.out index 934590c..759ecf4 100644 --- a/ql/src/test/results/clientpositive/decimal_2.q.out +++ b/ql/src/test/results/clientpositive/decimal_2.q.out @@ -264,7 +264,7 @@ POSTHOOK: query: select cast(0.99999999999999999999 as decimal(20,19)) from deci POSTHOOK: type: QUERY POSTHOOK: Input: default@decimal_2 #### A masked pattern was here #### -1 +1.0 PREHOOK: query: select cast('0.99999999999999999999' as decimal(20,20)) from decimal_2 PREHOOK: type: QUERY PREHOOK: Input: default@decimal_2 diff --git a/ql/src/test/results/clientpositive/decimal_3.q.out b/ql/src/test/results/clientpositive/decimal_3.q.out index 8e9a30a..acaae65 100644 --- a/ql/src/test/results/clientpositive/decimal_3.q.out +++ b/ql/src/test/results/clientpositive/decimal_3.q.out @@ -33,7 +33,7 @@ POSTHOOK: type: QUERY POSTHOOK: Input: default@decimal_3 #### A masked pattern was here #### NULL 0 --1234567890.123456789 -1234567890 +-1234567890.1234567890 -1234567890 -4400 4400 -1255.49 -1255 -1.122 -11 @@ -42,7 +42,7 @@ NULL 0 -0.333 0 -0.33 0 -0.3 0 -0 0 +0.000000000000000000 0 0 0 0 0 0.01 0 @@ -53,8 +53,8 @@ NULL 0 0.33 0 0.333 0 1 1 -1 1 -1 1 +1.0 1 +1.000000000000000000 1 1.12 1 1.122 1 2 2 @@ -62,14 +62,14 @@ NULL 0 3.14 3 3.14 3 3.14 3 -3.14 4 +3.140 4 10 10 20 20 100 100 -124 124 +124.00 124 125.2 125 200 200 -1234567890.12345678 1234567890 +1234567890.1234567800 1234567890 PREHOOK: query: SELECT * FROM DECIMAL_3 ORDER BY key DESC, value DESC PREHOOK: type: QUERY PREHOOK: Input: default@decimal_3 @@ -78,14 +78,14 @@ POSTHOOK: query: SELECT * FROM DECIMAL_3 ORDER BY key DESC, value DESC POSTHOOK: type: QUERY POSTHOOK: Input: default@decimal_3 #### A masked pattern was here #### -1234567890.12345678 1234567890 +1234567890.1234567800 1234567890 200 200 125.2 125 -124 124 +124.00 124 100 100 20 20 10 10 -3.14 4 +3.140 4 3.14 3 3.14 3 3.14 3 @@ -93,8 +93,8 @@ POSTHOOK: Input: default@decimal_3 2 2 1.122 1 1.12 1 -1 1 -1 1 +1.000000000000000000 1 +1.0 1 1 1 0.333 0 0.33 0 @@ -105,7 +105,7 @@ POSTHOOK: Input: default@decimal_3 0.01 0 0 0 0 0 -0 0 +0.000000000000000000 0 -0.3 0 -0.33 0 -0.333 0 @@ -114,7 +114,7 @@ POSTHOOK: Input: default@decimal_3 -1.122 -11 -1255.49 -1255 -4400 4400 --1234567890.123456789 -1234567890 +-1234567890.1234567890 -1234567890 NULL 0 PREHOOK: query: SELECT * FROM DECIMAL_3 ORDER BY key, value PREHOOK: type: QUERY @@ -125,7 +125,7 @@ POSTHOOK: type: QUERY POSTHOOK: Input: default@decimal_3 #### A masked pattern was here #### NULL 0 --1234567890.123456789 -1234567890 +-1234567890.1234567890 -1234567890 -4400 4400 -1255.49 -1255 -1.122 -11 @@ -134,7 +134,7 @@ NULL 0 -0.333 0 -0.33 0 -0.3 0 -0 0 +0.000000000000000000 0 0 0 0 0 0.01 0 @@ -145,8 +145,8 @@ NULL 0 0.33 0 0.333 0 1 1 -1 1 -1 1 +1.0 1 +1.000000000000000000 1 1.12 1 1.122 1 2 2 @@ -154,14 +154,14 @@ NULL 0 3.14 3 3.14 3 3.14 3 -3.14 4 +3.140 4 10 10 20 20 100 100 -124 124 +124.00 124 125.2 125 200 200 -1234567890.12345678 1234567890 +1234567890.1234567800 1234567890 PREHOOK: query: SELECT DISTINCT key FROM DECIMAL_3 ORDER BY key PREHOOK: type: QUERY PREHOOK: Input: default@decimal_3 @@ -171,7 +171,7 @@ POSTHOOK: type: QUERY POSTHOOK: Input: default@decimal_3 #### A masked pattern was here #### NULL --1234567890.123456789 +-1234567890.1234567890 -4400 -1255.49 -1.122 @@ -179,7 +179,7 @@ NULL -0.333 -0.33 -0.3 -0 +0.000000000000000000 0.01 0.02 0.1 @@ -195,10 +195,10 @@ NULL 10 20 100 -124 +124.00 125.2 200 -1234567890.12345678 +1234567890.1234567800 PREHOOK: query: SELECT key, sum(value) FROM DECIMAL_3 GROUP BY key ORDER BY key PREHOOK: type: QUERY PREHOOK: Input: default@decimal_3 @@ -208,7 +208,7 @@ POSTHOOK: type: QUERY POSTHOOK: Input: default@decimal_3 #### A masked pattern was here #### NULL 0 --1234567890.123456789 -1234567890 +-1234567890.1234567890 -1234567890 -4400 4400 -1255.49 -1255 -1.122 -11 @@ -216,7 +216,7 @@ NULL 0 -0.333 0 -0.33 0 -0.3 0 -0 0 +0.000000000000000000 0 0.01 0 0.02 0 0.1 0 @@ -232,10 +232,10 @@ NULL 0 10 10 20 20 100 100 -124 124 +124.00 124 125.2 125 200 200 -1234567890.12345678 1234567890 +1234567890.1234567800 1234567890 PREHOOK: query: SELECT value, sum(key) FROM DECIMAL_3 GROUP BY value ORDER BY value PREHOOK: type: QUERY PREHOOK: Input: default@decimal_3 @@ -244,23 +244,23 @@ POSTHOOK: query: SELECT value, sum(key) FROM DECIMAL_3 GROUP BY value ORDER BY v POSTHOOK: type: QUERY POSTHOOK: Input: default@decimal_3 #### A masked pattern was here #### --1234567890 -1234567890.123456789 +-1234567890 -1234567890.1234567890 -1255 -1255.49 -11 -1.122 -1 -2.24 -0 0.33 -1 5.242 +0 0.330000000000000000 +1 5.242000000000000000 2 4 3 9.42 -4 3.14 +4 3.140 10 10 20 20 100 100 -124 124 +124 124.00 125 125.2 200 200 4400 -4400 -1234567890 1234567890.12345678 +1234567890 1234567890.1234567800 PREHOOK: query: SELECT * FROM DECIMAL_3 a JOIN DECIMAL_3 b ON (a.key = b.key) ORDER BY a.key, a.value, b.value PREHOOK: type: QUERY PREHOOK: Input: default@decimal_3 @@ -269,7 +269,7 @@ POSTHOOK: query: SELECT * FROM DECIMAL_3 a JOIN DECIMAL_3 b ON (a.key = b.key) O POSTHOOK: type: QUERY POSTHOOK: Input: default@decimal_3 #### A masked pattern was here #### --1234567890.123456789 -1234567890 -1234567890.123456789 -1234567890 +-1234567890.1234567890 -1234567890 -1234567890.1234567890 -1234567890 -4400 4400 -4400 4400 -1255.49 -1255 -1255.49 -1255 -1.122 -11 -1.122 -11 @@ -280,11 +280,7 @@ POSTHOOK: Input: default@decimal_3 -0.333 0 -0.333 0 -0.33 0 -0.33 0 -0.3 0 -0.3 0 -0 0 0 0 -0 0 0 0 -0 0 0 0 -0 0 0 0 -0 0 0 0 +0.000000000000000000 0 0.000000000000000000 0 0 0 0 0 0 0 0 0 0 0 0 0 @@ -297,14 +293,8 @@ POSTHOOK: Input: default@decimal_3 0.33 0 0.33 0 0.333 0 0.333 0 1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 -1 1 1 1 +1.0 1 1.0 1 +1.000000000000000000 1 1.000000000000000000 1 1.12 1 1.12 1 1.122 1 1.122 1 2 2 2 2 @@ -320,20 +310,14 @@ POSTHOOK: Input: default@decimal_3 3.14 3 3.14 3 3.14 3 3.14 3 3.14 3 3.14 3 -3.14 3 3.14 4 -3.14 3 3.14 4 -3.14 3 3.14 4 -3.14 4 3.14 3 -3.14 4 3.14 3 -3.14 4 3.14 3 -3.14 4 3.14 4 +3.140 4 3.140 4 10 10 10 10 20 20 20 20 100 100 100 100 -124 124 124 124 +124.00 124 124.00 124 125.2 125 125.2 125 200 200 200 200 -1234567890.12345678 1234567890 1234567890.12345678 1234567890 +1234567890.1234567800 1234567890 1234567890.1234567800 1234567890 PREHOOK: query: SELECT * FROM DECIMAL_3 WHERE key=3.14 ORDER BY key, value PREHOOK: type: QUERY PREHOOK: Input: default@decimal_3 @@ -345,7 +329,7 @@ POSTHOOK: Input: default@decimal_3 3.14 3 3.14 3 3.14 3 -3.14 4 +3.140 4 PREHOOK: query: SELECT * FROM DECIMAL_3 WHERE key=3.140 ORDER BY key, value PREHOOK: type: QUERY PREHOOK: Input: default@decimal_3 @@ -357,7 +341,7 @@ POSTHOOK: Input: default@decimal_3 3.14 3 3.14 3 3.14 3 -3.14 4 +3.140 4 PREHOOK: query: DROP TABLE DECIMAL_3 PREHOOK: type: DROPTABLE PREHOOK: Input: default@decimal_3 diff --git a/ql/src/test/results/clientpositive/decimal_4.q.out b/ql/src/test/results/clientpositive/decimal_4.q.out index 50662af..a31d27a 100644 --- a/ql/src/test/results/clientpositive/decimal_4.q.out +++ b/ql/src/test/results/clientpositive/decimal_4.q.out @@ -57,7 +57,7 @@ POSTHOOK: type: QUERY POSTHOOK: Input: default@decimal_4_1 #### A masked pattern was here #### NULL 0 --1234567890.123456789 -1234567890 +-1234567890.1234567890 -1234567890 -4400 4400 -1255.49 -1255 -1.122 -11 @@ -66,7 +66,7 @@ NULL 0 -0.333 0 -0.33 0 -0.3 0 -0 0 +0.0000000000000000000000000 0 0 0 0 0 0.01 0 @@ -78,7 +78,7 @@ NULL 0 0.333 0 0.9999999999999999999999999 1 1 1 -1 1 +1.0 1 1.12 1 1.122 1 2 2 @@ -86,14 +86,14 @@ NULL 0 3.14 3 3.14 3 3.14 3 -3.14 4 +3.140 4 10 10 20 20 100 100 -124 124 +124.00 124 125.2 125 200 200 -1234567890.12345678 1234567890 +1234567890.1234567800 1234567890 PREHOOK: query: SELECT * FROM DECIMAL_4_2 ORDER BY key PREHOOK: type: QUERY PREHOOK: Input: default@decimal_4_2 @@ -103,7 +103,7 @@ POSTHOOK: type: QUERY POSTHOOK: Input: default@decimal_4_2 #### A masked pattern was here #### NULL NULL --1234567890.123456789 -3703703670.370370367 +-1234567890.1234567890 -3703703670.3703703670 -4400 -13200 -1255.49 -3766.47 -1.122 -3.366 @@ -112,7 +112,7 @@ NULL NULL -0.333 -0.999 -0.33 -0.99 -0.3 -0.9 -0 0 +0.0000000000000000000000000 0.0000000000000000000000000 0 0 0 0 0.01 0.03 @@ -124,7 +124,7 @@ NULL NULL 0.333 0.999 0.9999999999999999999999999 2.9999999999999999999999997 1 3 -1 3 +1.0 3.0 1.12 3.36 1.122 3.366 2 6 @@ -132,14 +132,14 @@ NULL NULL 3.14 9.42 3.14 9.42 3.14 9.42 -3.14 9.42 +3.140 9.420 10 30 20 60 100 300 -124 372 +124.00 372.00 125.2 375.6 200 600 -1234567890.12345678 3703703670.37037034 +1234567890.1234567800 3703703670.3703703400 PREHOOK: query: DROP TABLE DECIMAL_4_1 PREHOOK: type: DROPTABLE PREHOOK: Input: default@decimal_4_1 diff --git a/ql/src/test/results/clientpositive/decimal_5.q.out b/ql/src/test/results/clientpositive/decimal_5.q.out index 0f24b8a..6df5097 100644 --- a/ql/src/test/results/clientpositive/decimal_5.q.out +++ b/ql/src/test/results/clientpositive/decimal_5.q.out @@ -43,7 +43,7 @@ NULL -0.333 -0.33 -0.3 -0 +0.00000 0 0 0.01 @@ -54,8 +54,8 @@ NULL 0.33 0.333 1 -1 -1 +1.0 +1.00000 1.12 1.122 2 @@ -63,11 +63,11 @@ NULL 3.14 3.14 3.14 -3.14 +3.140 10 20 100 -124 +124.00 125.2 200 PREHOOK: query: SELECT DISTINCT key FROM DECIMAL_5 ORDER BY key @@ -86,7 +86,7 @@ NULL -0.333 -0.33 -0.3 -0 +0.00000 0.01 0.02 0.1 @@ -102,7 +102,7 @@ NULL 10 20 100 -124 +124.00 125.2 200 PREHOOK: query: SELECT cast(key as decimal) FROM DECIMAL_5 @@ -161,7 +161,7 @@ POSTHOOK: Input: default@decimal_5 #### A masked pattern was here #### NULL NULL -0 +0.000 0 100 10 @@ -180,7 +180,7 @@ NULL -0.3 -0.33 -0.333 -1 +1.0 2 3.14 -1.12 @@ -188,13 +188,13 @@ NULL -1.122 1.12 1.122 -124 +124.00 125.2 NULL 3.14 3.14 -3.14 -1 +3.140 +1.000 NULL NULL PREHOOK: query: DROP TABLE DECIMAL_5 diff --git a/ql/src/test/results/clientpositive/decimal_6.q.out b/ql/src/test/results/clientpositive/decimal_6.q.out index c0cad1f..720966f 100644 --- a/ql/src/test/results/clientpositive/decimal_6.q.out +++ b/ql/src/test/results/clientpositive/decimal_6.q.out @@ -91,16 +91,16 @@ NULL -0.333 -0.3 -0.3 -0 -0 +0.00000 +0.0000 0 0 0.333 0.333 -1 -1 -1 -1 +1.0 +1.0 +1.0000 +1.00000 1.12 1.12 1.122 @@ -111,14 +111,14 @@ NULL 3.14 3.14 3.14 -3.14 -3.14 +3.140 +3.140 10 10 10.7343 10.73433 -124 -124 +124.00 +124.00 125.2 125.2 23232.23435 diff --git a/ql/src/test/results/clientpositive/decimal_precision.q.out b/ql/src/test/results/clientpositive/decimal_precision.q.out index f3f2cbc..94c63cb 100644 --- a/ql/src/test/results/clientpositive/decimal_precision.q.out +++ b/ql/src/test/results/clientpositive/decimal_precision.q.out @@ -76,13 +76,13 @@ NULL NULL NULL NULL +0.0000000000 +0.0000000000 +0.0000000000 +0.0000000000 0 -0 -0 -0 -0 -0.123456789 -0.123456789 +0.1234567890 +0.1234567890 1.2345678901 1.2345678901 1.2345678901 @@ -106,7 +106,7 @@ NULL 123456789.0123456 123456789.0123456789 1234567890.123456 -1234567890.123456789 +1234567890.1234567890 PREHOOK: query: SELECT dec, dec + 1, dec - 1 FROM DECIMAL_PRECISION ORDER BY dec PREHOOK: type: QUERY PREHOOK: Input: default@decimal_precision @@ -159,13 +159,13 @@ NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL +0.0000000000 1.0000000000 -1.0000000000 +0.0000000000 1.0000000000 -1.0000000000 +0.0000000000 1.0000000000 -1.0000000000 +0.0000000000 1.0000000000 -1.0000000000 0 1 -1 -0 1 -1 -0 1 -1 -0 1 -1 -0 1 -1 -0.123456789 1.123456789 -0.876543211 -0.123456789 1.123456789 -0.876543211 +0.1234567890 1.1234567890 -0.8765432110 +0.1234567890 1.1234567890 -0.8765432110 1.2345678901 2.2345678901 0.2345678901 1.2345678901 2.2345678901 0.2345678901 1.2345678901 2.2345678901 0.2345678901 @@ -189,7 +189,7 @@ NULL NULL NULL 123456789.0123456 123456790.0123456 123456788.0123456 123456789.0123456789 123456790.0123456789 123456788.0123456789 1234567890.123456 1234567891.123456 1234567889.123456 -1234567890.123456789 1234567891.123456789 1234567889.123456789 +1234567890.1234567890 1234567891.1234567890 1234567889.1234567890 PREHOOK: query: SELECT dec, dec * 2, dec / 3 FROM DECIMAL_PRECISION ORDER BY dec PREHOOK: type: QUERY PREHOOK: Input: default@decimal_precision @@ -242,13 +242,13 @@ NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL NULL +0.0000000000 0.0000000000 0 +0.0000000000 0.0000000000 0 +0.0000000000 0.0000000000 0 +0.0000000000 0.0000000000 0 0 0 0 -0 0 0 -0 0 0 -0 0 0 -0 0 0 -0.123456789 0.246913578 0.041152263 -0.123456789 0.246913578 0.041152263 +0.1234567890 0.2469135780 0.041152263 +0.1234567890 0.2469135780 0.041152263 1.2345678901 2.4691357802 0.411522630033 1.2345678901 2.4691357802 0.411522630033 1.2345678901 2.4691357802 0.411522630033 @@ -258,9 +258,9 @@ NULL NULL NULL 123.4567890123 246.9135780246 41.1522630041 123.4567890123 246.9135780246 41.1522630041 123.4567890123 246.9135780246 41.1522630041 -1234.5678901235 2469.135780247 411.522630041167 -1234.5678901235 2469.135780247 411.522630041167 -1234.5678901235 2469.135780247 411.522630041167 +1234.5678901235 2469.1357802470 411.522630041167 +1234.5678901235 2469.1357802470 411.522630041167 +1234.5678901235 2469.1357802470 411.522630041167 12345.6789012346 24691.3578024692 4115.226300411533 12345.6789012346 24691.3578024692 4115.226300411533 123456.7890123456 246913.5780246912 41152.2630041152 @@ -272,7 +272,7 @@ NULL NULL NULL 123456789.0123456 246913578.0246912 41152263.0041152 123456789.0123456789 246913578.0246913578 41152263.0041152263 1234567890.123456 2469135780.246912 411522630.041152 -1234567890.123456789 2469135780.246913578 411522630.041152263 +1234567890.1234567890 2469135780.2469135780 411522630.041152263 PREHOOK: query: SELECT dec, dec / 9 FROM DECIMAL_PRECISION ORDER BY dec PREHOOK: type: QUERY PREHOOK: Input: default@decimal_precision @@ -325,13 +325,13 @@ NULL NULL NULL NULL NULL NULL NULL NULL +0.0000000000 0 +0.0000000000 0 +0.0000000000 0 +0.0000000000 0 0 0 -0 0 -0 0 -0 0 -0 0 -0.123456789 0.013717421 -0.123456789 0.013717421 +0.1234567890 0.013717421 +0.1234567890 0.013717421 1.2345678901 0.137174210011 1.2345678901 0.137174210011 1.2345678901 0.137174210011 @@ -355,7 +355,7 @@ NULL NULL 123456789.0123456 13717421.001371733333 123456789.0123456789 13717421.0013717421 1234567890.123456 137174210.013717333333 -1234567890.123456789 137174210.013717421 +1234567890.1234567890 137174210.013717421 PREHOOK: query: SELECT dec, dec / 27 FROM DECIMAL_PRECISION ORDER BY dec PREHOOK: type: QUERY PREHOOK: Input: default@decimal_precision @@ -408,13 +408,13 @@ NULL NULL NULL NULL NULL NULL NULL NULL +0.0000000000 0 +0.0000000000 0 +0.0000000000 0 +0.0000000000 0 0 0 -0 0 -0 0 -0 0 -0 0 -0.123456789 0.0045724736667 -0.123456789 0.0045724736667 +0.1234567890 0.0045724736667 +0.1234567890 0.0045724736667 1.2345678901 0.0457247366704 1.2345678901 0.0457247366704 1.2345678901 0.0457247366704 @@ -438,7 +438,7 @@ NULL NULL 123456789.0123456 4572473.6671239111111 123456789.0123456789 4572473.6671239140333 1234567890.123456 45724736.6712391111111 -1234567890.123456789 45724736.6712391403333 +1234567890.1234567890 45724736.6712391403333 PREHOOK: query: SELECT dec, dec * dec FROM DECIMAL_PRECISION ORDER BY dec PREHOOK: type: QUERY PREHOOK: Input: default@decimal_precision @@ -491,13 +491,13 @@ NULL NULL NULL NULL NULL NULL NULL NULL +0.0000000000 0.00000000000000000000 +0.0000000000 0.00000000000000000000 +0.0000000000 0.00000000000000000000 +0.0000000000 0.00000000000000000000 0 0 -0 0 -0 0 -0 0 -0 0 -0.123456789 0.015241578750190521 -0.123456789 0.015241578750190521 +0.1234567890 0.01524157875019052100 +0.1234567890 0.01524157875019052100 1.2345678901 1.52415787526596567801 1.2345678901 1.52415787526596567801 1.2345678901 1.52415787526596567801 @@ -521,7 +521,7 @@ NULL NULL 123456789.0123456 15241578753238817.26870921383936 123456789.0123456789 15241578753238836.75019051998750190521 1234567890.123456 NULL -1234567890.123456789 NULL +1234567890.1234567890 NULL PREHOOK: query: EXPLAIN SELECT avg(dec), sum(dec) FROM DECIMAL_PRECISION PREHOOK: type: QUERY POSTHOOK: query: EXPLAIN SELECT avg(dec), sum(dec) FROM DECIMAL_PRECISION diff --git a/ql/src/test/results/clientpositive/decimal_trailing.q.out b/ql/src/test/results/clientpositive/decimal_trailing.q.out new file mode 100644 index 0000000..c6991fd --- /dev/null +++ b/ql/src/test/results/clientpositive/decimal_trailing.q.out @@ -0,0 +1,80 @@ +PREHOOK: query: DROP TABLE IF EXISTS DECIMAL_TRAILING +PREHOOK: type: DROPTABLE +POSTHOOK: query: DROP TABLE IF EXISTS DECIMAL_TRAILING +POSTHOOK: type: DROPTABLE +PREHOOK: query: CREATE TABLE DECIMAL_TRAILING ( + id int, + a decimal(10,4), + b decimal(15,8) + ) +ROW FORMAT DELIMITED + FIELDS TERMINATED BY ',' +STORED AS TEXTFILE +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +PREHOOK: Output: default@DECIMAL_TRAILING +POSTHOOK: query: CREATE TABLE DECIMAL_TRAILING ( + id int, + a decimal(10,4), + b decimal(15,8) + ) +ROW FORMAT DELIMITED + FIELDS TERMINATED BY ',' +STORED AS TEXTFILE +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@DECIMAL_TRAILING +PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/kv10.txt' INTO TABLE DECIMAL_TRAILING +PREHOOK: type: LOAD +#### A masked pattern was here #### +PREHOOK: Output: default@decimal_trailing +POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/kv10.txt' INTO TABLE DECIMAL_TRAILING +POSTHOOK: type: LOAD +#### A masked pattern was here #### +POSTHOOK: Output: default@decimal_trailing +PREHOOK: query: SELECT * FROM DECIMAL_TRAILING ORDER BY id +PREHOOK: type: QUERY +PREHOOK: Input: default@decimal_trailing +#### A masked pattern was here #### +POSTHOOK: query: SELECT * FROM DECIMAL_TRAILING ORDER BY id +POSTHOOK: type: QUERY +POSTHOOK: Input: default@decimal_trailing +#### A masked pattern was here #### +0 0 0 +1 0 0 +2 NULL NULL +3 1.0000 1.00000000 +4 10.0000 10.00000000 +5 100.0000 100.00000000 +6 1000.0000 1000.00000000 +7 10000.0000 10000.00000000 +8 100000.0000 100000.00000000 +9 NULL 1000000.00000000 +10 NULL NULL +11 NULL NULL +12 NULL NULL +13 NULL NULL +14 NULL NULL +15 NULL NULL +16 NULL NULL +17 NULL NULL +18 1.0000 1.00000000 +19 10.000 10.0000000 +20 100.00 100.000000 +21 1000.0 1000.00000 +22 100000 10000.0000 +23 0.0000 0.00000000 +24 0.000 0.0000000 +25 0.00 0.000000 +26 0.0 0.00000 +27 0 0.00000 +28 12313.2000 134134.31252500 +29 99999.9990 134134.31242553 +PREHOOK: query: DROP TABLE DECIMAL_TRAILING +PREHOOK: type: DROPTABLE +PREHOOK: Input: default@decimal_trailing +PREHOOK: Output: default@decimal_trailing +POSTHOOK: query: DROP TABLE DECIMAL_TRAILING +POSTHOOK: type: DROPTABLE +POSTHOOK: Input: default@decimal_trailing +POSTHOOK: Output: default@decimal_trailing diff --git a/ql/src/test/results/clientpositive/decimal_udf.q.out b/ql/src/test/results/clientpositive/decimal_udf.q.out index 1d5fee9..c5c2031 100644 --- a/ql/src/test/results/clientpositive/decimal_udf.q.out +++ b/ql/src/test/results/clientpositive/decimal_udf.q.out @@ -57,7 +57,7 @@ POSTHOOK: Input: default@decimal_udf #### A masked pattern was here #### -8800 NULL -0 +0.0000000000 0 200 20 @@ -76,7 +76,7 @@ NULL -0.6 -0.66 -0.666 -2 +2.0 4 6.28 -2.24 @@ -84,15 +84,15 @@ NULL -2.244 2.24 2.244 -248 +248.00 250.4 -2510.98 6.28 6.28 -6.28 -2 --2469135780.246913578 -2469135780.24691356 +6.280 +2.0000000000 +-2469135780.2469135780 +2469135780.2469135600 PREHOOK: query: EXPLAIN SELECT key + value FROM DECIMAL_UDF PREHOOK: type: QUERY POSTHOOK: query: EXPLAIN SELECT key + value FROM DECIMAL_UDF @@ -124,7 +124,7 @@ POSTHOOK: Input: default@decimal_udf #### A masked pattern was here #### 0 NULL -0 +0.0000000000 0 200 20 @@ -143,7 +143,7 @@ NULL -0.3 -0.33 -0.333 -2 +2.0 4 6.14 -2.12 @@ -151,15 +151,15 @@ NULL -12.122 2.12 2.122 -248 +248.00 250.2 -2510.49 6.14 6.14 -7.14 -2 --2469135780.123456789 -2469135780.12345678 +7.140 +2.0000000000 +-2469135780.1234567890 +2469135780.1234567800 PREHOOK: query: EXPLAIN SELECT key + (value/2) FROM DECIMAL_UDF PREHOOK: type: QUERY POSTHOOK: query: EXPLAIN SELECT key + (value/2) FROM DECIMAL_UDF @@ -327,42 +327,42 @@ POSTHOOK: Input: default@decimal_udf #### A masked pattern was here #### 0 NULL +0.0000000000 0 0 0 0 +0.0 +0.00 0 0 0 0 +0.0 +0.00 +0.0 +0.00 +0.000 +0.0 +0.00 +0.000 +0.0 0 -0 -0 -0 -0 -0 -0 -0 -0 -0 -0 -0 -0 -0 -0 -0 -0 -0 -0 -0 -0 -0 -0 -0 -0 -0 -0 -0 +0.00 +0.00 +0.00 +0.000 +0.00 +0.000 +0.00 +0.0 +0.00 +0.00 +0.00 +0.000 +0.0000000000 +0.0000000000 +0.0000000000 PREHOOK: query: EXPLAIN SELECT key - value FROM DECIMAL_UDF PREHOOK: type: QUERY POSTHOOK: query: EXPLAIN SELECT key - value FROM DECIMAL_UDF @@ -394,7 +394,7 @@ POSTHOOK: Input: default@decimal_udf #### A masked pattern was here #### -8800 NULL -0 +0.0000000000 0 0 0 @@ -413,7 +413,7 @@ NULL -0.3 -0.33 -0.333 -0 +0.0 0 0.14 -0.12 @@ -421,15 +421,15 @@ NULL 9.878 0.12 0.122 -0 +0.00 0.2 -0.49 0.14 0.14 --0.86 -0 --0.123456789 -0.12345678 +-0.860 +0.0000000000 +-0.1234567890 +0.1234567800 PREHOOK: query: EXPLAIN SELECT key - (value/2) FROM DECIMAL_UDF PREHOOK: type: QUERY POSTHOOK: query: EXPLAIN SELECT key - (value/2) FROM DECIMAL_UDF @@ -597,7 +597,7 @@ POSTHOOK: Input: default@decimal_udf #### A masked pattern was here #### 19360000 NULL -0 +0.00000000000000000000 0 10000 100 @@ -616,7 +616,7 @@ NULL 0.09 0.1089 0.110889 -1 +1.00 4 9.8596 1.2544 @@ -624,13 +624,13 @@ NULL 1.258884 1.2544 1.258884 -15376 +15376.0000 15675.04 1576255.1401 9.8596 9.8596 -9.8596 -1 +9.859600 +1.00000000000000000000 NULL NULL PREHOOK: query: EXPLAIN SELECT key * value FROM DECIMAL_UDF @@ -664,26 +664,26 @@ POSTHOOK: Input: default@decimal_udf #### A masked pattern was here #### -19360000 NULL -0 +0.0000000000 0 10000 100 1 -0 -0 +0.0 +0.00 40000 400 4 0 -0 -0 -0 -0 -0 -0 -0 -0 -1 +0.0 +0.00 +0.0 +0.00 +0.000 +0.0 +0.00 +0.000 +1.0 4 9.42 1.12 @@ -691,15 +691,15 @@ NULL 12.342 1.12 1.122 -15376 -15650 +15376.00 +15650.0 1575639.95 9.42 9.42 -12.56 -1 -1524157875171467887.50190521 -1524157875171467876.3907942 +12.560 +1.0000000000 +1524157875171467887.5019052100 +1524157875171467876.3907942000 PREHOOK: query: EXPLAIN SELECT key * (value/2) FROM DECIMAL_UDF PREHOOK: type: QUERY POSTHOOK: query: EXPLAIN SELECT key * (value/2) FROM DECIMAL_UDF @@ -1023,7 +1023,7 @@ POSTHOOK: Input: default@decimal_udf 0.785 1 1.0000000001 -1.00000000009999999271 +1.000000000099999992710 PREHOOK: query: EXPLAIN SELECT key / (value/2) FROM DECIMAL_UDF WHERE value is not null and value <> 0 PREHOOK: type: QUERY POSTHOOK: query: EXPLAIN SELECT key / (value/2) FROM DECIMAL_UDF WHERE value is not null and value <> 0 @@ -1180,7 +1180,7 @@ POSTHOOK: Input: default@decimal_udf #### A masked pattern was here #### 4400 NULL -0 +0.0000000000 0 100 10 @@ -1199,7 +1199,7 @@ NULL 0.3 0.33 0.333 -1 +1.0 2 3.14 1.12 @@ -1207,15 +1207,15 @@ NULL 1.122 1.12 1.122 -124 +124.00 125.2 1255.49 3.14 3.14 -3.14 -1 -1234567890.123456789 -1234567890.12345678 +3.140 +1.0000000000 +1234567890.1234567890 +1234567890.1234567800 PREHOOK: query: -- avg EXPLAIN SELECT value, sum(key) / count(key), avg(key), sum(key) FROM DECIMAL_UDF GROUP BY value ORDER BY value PREHOOK: type: QUERY @@ -1304,23 +1304,23 @@ POSTHOOK: query: SELECT value, sum(key) / count(key), avg(key), sum(key) FROM DE POSTHOOK: type: QUERY POSTHOOK: Input: default@decimal_udf #### A masked pattern was here #### --1234567890 -1234567890.123456789 -1234567890.123456789 -1234567890.123456789 +-1234567890 -1234567890.123456789 -1234567890.123456789 -1234567890.1234567890 -1255 -1255.49 -1255.49 -1255.49 -11 -1.122 -1.122 -1.122 -1 -1.12 -1.12 -2.24 -0 0.02538461538461538461538 0.02538461538462 0.33 -1 1.0484 1.0484 5.242 +0 0.02538461538461538461538 0.02538461538462 0.3300000000 +1 1.0484 1.0484 5.2420000000 2 2 2 4 3 3.14 3.14 9.42 -4 3.14 3.14 3.14 +4 3.14 3.14 3.140 10 10 10 10 20 20 20 20 100 100 100 100 -124 124 124 124 +124 124 124 124.00 125 125.2 125.2 125.2 200 200 200 200 4400 -4400 -4400 -4400 -1234567890 1234567890.12345678 1234567890.12345678 1234567890.12345678 +1234567890 1234567890.12345678 1234567890.12345678 1234567890.1234567800 PREHOOK: query: -- negative EXPLAIN SELECT -key FROM DECIMAL_UDF PREHOOK: type: QUERY @@ -1354,7 +1354,7 @@ POSTHOOK: Input: default@decimal_udf #### A masked pattern was here #### 4400 NULL -0 +0.0000000000 0 -100 -10 @@ -1373,7 +1373,7 @@ NULL 0.3 0.33 0.333 --1 +-1.0 -2 -3.14 1.12 @@ -1381,15 +1381,15 @@ NULL 1.122 -1.12 -1.122 --124 +-124.00 -125.2 1255.49 -3.14 -3.14 --3.14 --1 -1234567890.123456789 --1234567890.12345678 +-3.140 +-1.0000000000 +1234567890.1234567890 +-1234567890.1234567800 PREHOOK: query: -- positive EXPLAIN SELECT +key FROM DECIMAL_UDF PREHOOK: type: QUERY @@ -1423,7 +1423,7 @@ POSTHOOK: Input: default@decimal_udf #### A masked pattern was here #### -4400 NULL -0 +0.0000000000 0 100 10 @@ -1442,7 +1442,7 @@ NULL -0.3 -0.33 -0.333 -1 +1.0 2 3.14 -1.12 @@ -1450,15 +1450,15 @@ NULL -1.122 1.12 1.122 -124 +124.00 125.2 -1255.49 3.14 3.14 -3.14 -1 --1234567890.123456789 -1234567890.12345678 +3.140 +1.0000000000 +-1234567890.1234567890 +1234567890.1234567800 PREHOOK: query: -- ceiling EXPlAIN SELECT CEIL(key) FROM DECIMAL_UDF PREHOOK: type: QUERY @@ -1628,42 +1628,42 @@ POSTHOOK: query: SELECT ROUND(key, 2) FROM DECIMAL_UDF POSTHOOK: type: QUERY POSTHOOK: Input: default@decimal_udf #### A masked pattern was here #### --4400 +-4400.00 NULL -0 -0 -100 -10 -1 -0.1 +0.00 +0.00 +100.00 +10.00 +1.00 +0.10 0.01 -200 -20 -2 -0 -0.2 +200.00 +20.00 +2.00 +0.00 +0.20 0.02 -0.3 +0.30 0.33 0.33 --0.3 +-0.30 -0.33 -0.33 -1 -2 +1.00 +2.00 3.14 -1.12 -1.12 -1.12 1.12 1.12 -124 -125.2 +124.00 +125.20 -1255.49 3.14 3.14 3.14 -1 +1.00 -1234567890.12 1234567890.12 PREHOOK: query: -- power @@ -1772,38 +1772,38 @@ NULL NULL 1 1 -0 -0 -0 +0.0 +0.00 +0.000 1 1 0 NULL +0.0 +0.00 +0.10 +0.010 +0.0010 +0.10 +0.010 +0.0010 +0.0 0 -0 -0.1 -0.01 -0.001 -0.1 -0.01 -0.001 -0 -0 -1 +1.00 -0.12 -0.12 -0.122 0.44 0.439 -1 -1 +1.00 +1.0 -626.745 -1 -1 -1 -0 +1.00 +1.00 +1.000 +0.0000000000 -617283944.0617283945 -1 +1.0000000000 PREHOOK: query: -- stddev, var EXPLAIN SELECT value, stddev(key), variance(key) FROM DECIMAL_UDF GROUP BY value PREHOOK: type: QUERY @@ -2095,7 +2095,7 @@ POSTHOOK: query: SELECT MIN(key) FROM DECIMAL_UDF POSTHOOK: type: QUERY POSTHOOK: Input: default@decimal_udf #### A masked pattern was here #### --1234567890.123456789 +-1234567890.1234567890 PREHOOK: query: -- max EXPLAIN SELECT MAX(key) FROM DECIMAL_UDF PREHOOK: type: QUERY @@ -2158,7 +2158,7 @@ POSTHOOK: query: SELECT MAX(key) FROM DECIMAL_UDF POSTHOOK: type: QUERY POSTHOOK: Input: default@decimal_udf #### A masked pattern was here #### -1234567890.12345678 +1234567890.1234567800 PREHOOK: query: -- count EXPLAIN SELECT COUNT(key) FROM DECIMAL_UDF PREHOOK: type: QUERY diff --git a/ql/src/test/results/clientpositive/explain_logical.q.out b/ql/src/test/results/clientpositive/explain_logical.q.out index e803b51..862422a 100644 --- a/ql/src/test/results/clientpositive/explain_logical.q.out +++ b/ql/src/test/results/clientpositive/explain_logical.q.out @@ -375,10 +375,10 @@ s1 condition expressions: 0 {KEY.reducesinkkey0} 1 {VALUE._col0} - outputColumnNames: _col0, _col5 + outputColumnNames: _col0, _col6 Statistics: Num rows: 63 Data size: 12786 Basic stats: COMPLETE Column stats: NONE Select Operator (SEL_8) - expressions: _col0 (type: string), _col5 (type: string) + expressions: _col0 (type: string), _col6 (type: string) outputColumnNames: _col0, _col1 Statistics: Num rows: 63 Data size: 12786 Basic stats: COMPLETE Column stats: NONE File Output Operator (FS_9) @@ -407,7 +407,7 @@ s2 condition expressions: 0 {KEY.reducesinkkey0} 1 {VALUE._col0} - outputColumnNames: _col0, _col5 + outputColumnNames: _col0, _col6 Statistics: Num rows: 63 Data size: 12786 Basic stats: COMPLETE Column stats: NONE PREHOOK: query: -- With views diff --git a/ql/src/test/results/clientpositive/extrapolate_part_stats_full.q.out b/ql/src/test/results/clientpositive/extrapolate_part_stats_full.q.out new file mode 100644 index 0000000..ef63e74 --- /dev/null +++ b/ql/src/test/results/clientpositive/extrapolate_part_stats_full.q.out @@ -0,0 +1,1007 @@ +PREHOOK: query: create table if not exists ext_loc ( + state string, + locid int, + zip int, + year string +) row format delimited fields terminated by '|' stored as textfile +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +PREHOOK: Output: default@ext_loc +POSTHOOK: query: create table if not exists ext_loc ( + state string, + locid int, + zip int, + year string +) row format delimited fields terminated by '|' stored as textfile +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@ext_loc +PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/extrapolate_stats_full.txt' OVERWRITE INTO TABLE ext_loc +PREHOOK: type: LOAD +#### A masked pattern was here #### +PREHOOK: Output: default@ext_loc +POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/extrapolate_stats_full.txt' OVERWRITE INTO TABLE ext_loc +POSTHOOK: type: LOAD +#### A masked pattern was here #### +POSTHOOK: Output: default@ext_loc +PREHOOK: query: create table if not exists loc_orc_1d ( + state string, + locid int, + zip int +) partitioned by(year string) stored as orc +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +PREHOOK: Output: default@loc_orc_1d +POSTHOOK: query: create table if not exists loc_orc_1d ( + state string, + locid int, + zip int +) partitioned by(year string) stored as orc +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@loc_orc_1d +PREHOOK: query: insert overwrite table loc_orc_1d partition(year) select * from ext_loc +PREHOOK: type: QUERY +PREHOOK: Input: default@ext_loc +PREHOOK: Output: default@loc_orc_1d +POSTHOOK: query: insert overwrite table loc_orc_1d partition(year) select * from ext_loc +POSTHOOK: type: QUERY +POSTHOOK: Input: default@ext_loc +POSTHOOK: Output: default@loc_orc_1d@year=2000 +POSTHOOK: Output: default@loc_orc_1d@year=2001 +POSTHOOK: Lineage: loc_orc_1d PARTITION(year=2000).locid SIMPLE [(ext_loc)ext_loc.FieldSchema(name:locid, type:int, comment:null), ] +POSTHOOK: Lineage: loc_orc_1d PARTITION(year=2000).state SIMPLE [(ext_loc)ext_loc.FieldSchema(name:state, type:string, comment:null), ] +POSTHOOK: Lineage: loc_orc_1d PARTITION(year=2000).zip SIMPLE [(ext_loc)ext_loc.FieldSchema(name:zip, type:int, comment:null), ] +POSTHOOK: Lineage: loc_orc_1d PARTITION(year=2001).locid SIMPLE [(ext_loc)ext_loc.FieldSchema(name:locid, type:int, comment:null), ] +POSTHOOK: Lineage: loc_orc_1d PARTITION(year=2001).state SIMPLE [(ext_loc)ext_loc.FieldSchema(name:state, type:string, comment:null), ] +POSTHOOK: Lineage: loc_orc_1d PARTITION(year=2001).zip SIMPLE [(ext_loc)ext_loc.FieldSchema(name:zip, type:int, comment:null), ] +PREHOOK: query: analyze table loc_orc_1d partition(year='2000') compute statistics for columns state,locid +PREHOOK: type: QUERY +PREHOOK: Input: default@loc_orc_1d +PREHOOK: Input: default@loc_orc_1d@year=2000 +#### A masked pattern was here #### +POSTHOOK: query: analyze table loc_orc_1d partition(year='2000') compute statistics for columns state,locid +POSTHOOK: type: QUERY +POSTHOOK: Input: default@loc_orc_1d +POSTHOOK: Input: default@loc_orc_1d@year=2000 +#### A masked pattern was here #### +PREHOOK: query: analyze table loc_orc_1d partition(year='2001') compute statistics for columns state,locid +PREHOOK: type: QUERY +PREHOOK: Input: default@loc_orc_1d +PREHOOK: Input: default@loc_orc_1d@year=2001 +#### A masked pattern was here #### +POSTHOOK: query: analyze table loc_orc_1d partition(year='2001') compute statistics for columns state,locid +POSTHOOK: type: QUERY +POSTHOOK: Input: default@loc_orc_1d +POSTHOOK: Input: default@loc_orc_1d@year=2001 +#### A masked pattern was here #### +PREHOOK: query: describe formatted loc_orc_1d.state PARTITION(year='2001') +PREHOOK: type: DESCTABLE +PREHOOK: Input: default@loc_orc_1d +POSTHOOK: query: describe formatted loc_orc_1d.state PARTITION(year='2001') +POSTHOOK: type: DESCTABLE +POSTHOOK: Input: default@loc_orc_1d +# col_name data_type min max num_nulls distinct_count avg_col_len max_col_len num_trues num_falses comment + +state string 0 3 0.75 2 from deserializer +PREHOOK: query: -- basicStatState: COMPLETE colStatState: PARTIAL +explain extended select state from loc_orc_1d +PREHOOK: type: QUERY +POSTHOOK: query: -- basicStatState: COMPLETE colStatState: PARTIAL +explain extended select state from loc_orc_1d +POSTHOOK: type: QUERY +ABSTRACT SYNTAX TREE: + +TOK_QUERY + TOK_FROM + TOK_TABREF + TOK_TABNAME + loc_orc_1d + TOK_INSERT + TOK_DESTINATION + TOK_DIR + TOK_TMP_FILE + TOK_SELECT + TOK_SELEXPR + TOK_TABLE_OR_COL + state + + +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-0 depends on stages: Stage-1 + +STAGE PLANS: + Stage: Stage-1 + Map Reduce + Map Operator Tree: + TableScan + alias: loc_orc_1d + Statistics: Num rows: 6 Data size: 552 Basic stats: COMPLETE Column stats: COMPLETE + GatherStats: false + Select Operator + expressions: state (type: string) + outputColumnNames: _col0 + Statistics: Num rows: 6 Data size: 510 Basic stats: COMPLETE Column stats: COMPLETE + File Output Operator + compressed: false + GlobalTableId: 0 +#### A masked pattern was here #### + NumFilesPerFileSink: 1 + Statistics: Num rows: 6 Data size: 510 Basic stats: COMPLETE Column stats: COMPLETE +#### A masked pattern was here #### + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + properties: + columns _col0 + columns.types string + escape.delim \ + hive.serialization.extend.nesting.levels true + serialization.format 1 + serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + TotalFiles: 1 + GatherStats: false + MultiFileSpray: false + Path -> Alias: +#### A masked pattern was here #### + Path -> Partition: +#### A masked pattern was here #### + Partition + base file name: year=2000 + input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat + output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat + partition values: + year 2000 + properties: + COLUMN_STATS_ACCURATE true + bucket_count -1 + columns state,locid,zip + columns.comments + columns.types string:int:int +#### A masked pattern was here #### + name default.loc_orc_1d + numFiles 1 + numRows 2 + partition_columns year + partition_columns.types string + rawDataSize 184 + serialization.ddl struct loc_orc_1d { string state, i32 locid, i32 zip} + serialization.format 1 + serialization.lib org.apache.hadoop.hive.ql.io.orc.OrcSerde + totalSize 342 +#### A masked pattern was here #### + serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde + + input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat + output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat + properties: + bucket_count -1 + columns state,locid,zip + columns.comments + columns.types string:int:int +#### A masked pattern was here #### + name default.loc_orc_1d + partition_columns year + partition_columns.types string + serialization.ddl struct loc_orc_1d { string state, i32 locid, i32 zip} + serialization.format 1 + serialization.lib org.apache.hadoop.hive.ql.io.orc.OrcSerde +#### A masked pattern was here #### + serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde + name: default.loc_orc_1d + name: default.loc_orc_1d +#### A masked pattern was here #### + Partition + base file name: year=2001 + input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat + output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat + partition values: + year 2001 + properties: + COLUMN_STATS_ACCURATE true + bucket_count -1 + columns state,locid,zip + columns.comments + columns.types string:int:int +#### A masked pattern was here #### + name default.loc_orc_1d + numFiles 1 + numRows 4 + partition_columns year + partition_columns.types string + rawDataSize 368 + serialization.ddl struct loc_orc_1d { string state, i32 locid, i32 zip} + serialization.format 1 + serialization.lib org.apache.hadoop.hive.ql.io.orc.OrcSerde + totalSize 364 +#### A masked pattern was here #### + serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde + + input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat + output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat + properties: + bucket_count -1 + columns state,locid,zip + columns.comments + columns.types string:int:int +#### A masked pattern was here #### + name default.loc_orc_1d + partition_columns year + partition_columns.types string + serialization.ddl struct loc_orc_1d { string state, i32 locid, i32 zip} + serialization.format 1 + serialization.lib org.apache.hadoop.hive.ql.io.orc.OrcSerde +#### A masked pattern was here #### + serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde + name: default.loc_orc_1d + name: default.loc_orc_1d + Truncated Path -> Alias: + /loc_orc_1d/year=2000 [loc_orc_1d] + /loc_orc_1d/year=2001 [loc_orc_1d] + + Stage: Stage-0 + Fetch Operator + limit: -1 + Processor Tree: + ListSink + +PREHOOK: query: -- column statistics for __HIVE_DEFAULT_PARTITION__ is not supported yet. Hence colStatState reports PARTIAL +-- basicStatState: COMPLETE colStatState: PARTIAL +explain extended select state,locid from loc_orc_1d +PREHOOK: type: QUERY +POSTHOOK: query: -- column statistics for __HIVE_DEFAULT_PARTITION__ is not supported yet. Hence colStatState reports PARTIAL +-- basicStatState: COMPLETE colStatState: PARTIAL +explain extended select state,locid from loc_orc_1d +POSTHOOK: type: QUERY +ABSTRACT SYNTAX TREE: + +TOK_QUERY + TOK_FROM + TOK_TABREF + TOK_TABNAME + loc_orc_1d + TOK_INSERT + TOK_DESTINATION + TOK_DIR + TOK_TMP_FILE + TOK_SELECT + TOK_SELEXPR + TOK_TABLE_OR_COL + state + TOK_SELEXPR + TOK_TABLE_OR_COL + locid + + +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-0 depends on stages: Stage-1 + +STAGE PLANS: + Stage: Stage-1 + Map Reduce + Map Operator Tree: + TableScan + alias: loc_orc_1d + Statistics: Num rows: 6 Data size: 552 Basic stats: COMPLETE Column stats: COMPLETE + GatherStats: false + Select Operator + expressions: state (type: string), locid (type: int) + outputColumnNames: _col0, _col1 + Statistics: Num rows: 6 Data size: 534 Basic stats: COMPLETE Column stats: COMPLETE + File Output Operator + compressed: false + GlobalTableId: 0 +#### A masked pattern was here #### + NumFilesPerFileSink: 1 + Statistics: Num rows: 6 Data size: 534 Basic stats: COMPLETE Column stats: COMPLETE +#### A masked pattern was here #### + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + properties: + columns _col0,_col1 + columns.types string:int + escape.delim \ + hive.serialization.extend.nesting.levels true + serialization.format 1 + serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + TotalFiles: 1 + GatherStats: false + MultiFileSpray: false + Path -> Alias: +#### A masked pattern was here #### + Path -> Partition: +#### A masked pattern was here #### + Partition + base file name: year=2000 + input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat + output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat + partition values: + year 2000 + properties: + COLUMN_STATS_ACCURATE true + bucket_count -1 + columns state,locid,zip + columns.comments + columns.types string:int:int +#### A masked pattern was here #### + name default.loc_orc_1d + numFiles 1 + numRows 2 + partition_columns year + partition_columns.types string + rawDataSize 184 + serialization.ddl struct loc_orc_1d { string state, i32 locid, i32 zip} + serialization.format 1 + serialization.lib org.apache.hadoop.hive.ql.io.orc.OrcSerde + totalSize 342 +#### A masked pattern was here #### + serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde + + input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat + output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat + properties: + bucket_count -1 + columns state,locid,zip + columns.comments + columns.types string:int:int +#### A masked pattern was here #### + name default.loc_orc_1d + partition_columns year + partition_columns.types string + serialization.ddl struct loc_orc_1d { string state, i32 locid, i32 zip} + serialization.format 1 + serialization.lib org.apache.hadoop.hive.ql.io.orc.OrcSerde +#### A masked pattern was here #### + serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde + name: default.loc_orc_1d + name: default.loc_orc_1d +#### A masked pattern was here #### + Partition + base file name: year=2001 + input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat + output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat + partition values: + year 2001 + properties: + COLUMN_STATS_ACCURATE true + bucket_count -1 + columns state,locid,zip + columns.comments + columns.types string:int:int +#### A masked pattern was here #### + name default.loc_orc_1d + numFiles 1 + numRows 4 + partition_columns year + partition_columns.types string + rawDataSize 368 + serialization.ddl struct loc_orc_1d { string state, i32 locid, i32 zip} + serialization.format 1 + serialization.lib org.apache.hadoop.hive.ql.io.orc.OrcSerde + totalSize 364 +#### A masked pattern was here #### + serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde + + input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat + output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat + properties: + bucket_count -1 + columns state,locid,zip + columns.comments + columns.types string:int:int +#### A masked pattern was here #### + name default.loc_orc_1d + partition_columns year + partition_columns.types string + serialization.ddl struct loc_orc_1d { string state, i32 locid, i32 zip} + serialization.format 1 + serialization.lib org.apache.hadoop.hive.ql.io.orc.OrcSerde +#### A masked pattern was here #### + serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde + name: default.loc_orc_1d + name: default.loc_orc_1d + Truncated Path -> Alias: + /loc_orc_1d/year=2000 [loc_orc_1d] + /loc_orc_1d/year=2001 [loc_orc_1d] + + Stage: Stage-0 + Fetch Operator + limit: -1 + Processor Tree: + ListSink + +PREHOOK: query: create table if not exists loc_orc_2d ( + state string, + locid int +) partitioned by(zip int, year string) stored as orc +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +PREHOOK: Output: default@loc_orc_2d +POSTHOOK: query: create table if not exists loc_orc_2d ( + state string, + locid int +) partitioned by(zip int, year string) stored as orc +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@loc_orc_2d +PREHOOK: query: insert overwrite table loc_orc_2d partition(zip, year) select * from ext_loc +PREHOOK: type: QUERY +PREHOOK: Input: default@ext_loc +PREHOOK: Output: default@loc_orc_2d +POSTHOOK: query: insert overwrite table loc_orc_2d partition(zip, year) select * from ext_loc +POSTHOOK: type: QUERY +POSTHOOK: Input: default@ext_loc +POSTHOOK: Output: default@loc_orc_2d@zip=94086/year=2000 +POSTHOOK: Output: default@loc_orc_2d@zip=94086/year=2001 +POSTHOOK: Output: default@loc_orc_2d@zip=94087/year=2000 +POSTHOOK: Output: default@loc_orc_2d@zip=94087/year=2001 +POSTHOOK: Lineage: loc_orc_2d PARTITION(zip=94086,year=2000).locid SIMPLE [(ext_loc)ext_loc.FieldSchema(name:locid, type:int, comment:null), ] +POSTHOOK: Lineage: loc_orc_2d PARTITION(zip=94086,year=2000).state SIMPLE [(ext_loc)ext_loc.FieldSchema(name:state, type:string, comment:null), ] +POSTHOOK: Lineage: loc_orc_2d PARTITION(zip=94086,year=2001).locid SIMPLE [(ext_loc)ext_loc.FieldSchema(name:locid, type:int, comment:null), ] +POSTHOOK: Lineage: loc_orc_2d PARTITION(zip=94086,year=2001).state SIMPLE [(ext_loc)ext_loc.FieldSchema(name:state, type:string, comment:null), ] +POSTHOOK: Lineage: loc_orc_2d PARTITION(zip=94087,year=2000).locid SIMPLE [(ext_loc)ext_loc.FieldSchema(name:locid, type:int, comment:null), ] +POSTHOOK: Lineage: loc_orc_2d PARTITION(zip=94087,year=2000).state SIMPLE [(ext_loc)ext_loc.FieldSchema(name:state, type:string, comment:null), ] +POSTHOOK: Lineage: loc_orc_2d PARTITION(zip=94087,year=2001).locid SIMPLE [(ext_loc)ext_loc.FieldSchema(name:locid, type:int, comment:null), ] +POSTHOOK: Lineage: loc_orc_2d PARTITION(zip=94087,year=2001).state SIMPLE [(ext_loc)ext_loc.FieldSchema(name:state, type:string, comment:null), ] +PREHOOK: query: analyze table loc_orc_2d partition(zip=94086, year='2000') compute statistics for columns state,locid +PREHOOK: type: QUERY +PREHOOK: Input: default@loc_orc_2d +PREHOOK: Input: default@loc_orc_2d@zip=94086/year=2000 +#### A masked pattern was here #### +POSTHOOK: query: analyze table loc_orc_2d partition(zip=94086, year='2000') compute statistics for columns state,locid +POSTHOOK: type: QUERY +POSTHOOK: Input: default@loc_orc_2d +POSTHOOK: Input: default@loc_orc_2d@zip=94086/year=2000 +#### A masked pattern was here #### +PREHOOK: query: analyze table loc_orc_2d partition(zip=94087, year='2000') compute statistics for columns state,locid +PREHOOK: type: QUERY +PREHOOK: Input: default@loc_orc_2d +PREHOOK: Input: default@loc_orc_2d@zip=94087/year=2000 +#### A masked pattern was here #### +POSTHOOK: query: analyze table loc_orc_2d partition(zip=94087, year='2000') compute statistics for columns state,locid +POSTHOOK: type: QUERY +POSTHOOK: Input: default@loc_orc_2d +POSTHOOK: Input: default@loc_orc_2d@zip=94087/year=2000 +#### A masked pattern was here #### +PREHOOK: query: analyze table loc_orc_2d partition(zip=94086, year='2001') compute statistics for columns state,locid +PREHOOK: type: QUERY +PREHOOK: Input: default@loc_orc_2d +PREHOOK: Input: default@loc_orc_2d@zip=94086/year=2001 +#### A masked pattern was here #### +POSTHOOK: query: analyze table loc_orc_2d partition(zip=94086, year='2001') compute statistics for columns state,locid +POSTHOOK: type: QUERY +POSTHOOK: Input: default@loc_orc_2d +POSTHOOK: Input: default@loc_orc_2d@zip=94086/year=2001 +#### A masked pattern was here #### +PREHOOK: query: analyze table loc_orc_2d partition(zip=94087, year='2001') compute statistics for columns state,locid +PREHOOK: type: QUERY +PREHOOK: Input: default@loc_orc_2d +PREHOOK: Input: default@loc_orc_2d@zip=94087/year=2001 +#### A masked pattern was here #### +POSTHOOK: query: analyze table loc_orc_2d partition(zip=94087, year='2001') compute statistics for columns state,locid +POSTHOOK: type: QUERY +POSTHOOK: Input: default@loc_orc_2d +POSTHOOK: Input: default@loc_orc_2d@zip=94087/year=2001 +#### A masked pattern was here #### +PREHOOK: query: explain extended select state from loc_orc_2d +PREHOOK: type: QUERY +POSTHOOK: query: explain extended select state from loc_orc_2d +POSTHOOK: type: QUERY +ABSTRACT SYNTAX TREE: + +TOK_QUERY + TOK_FROM + TOK_TABREF + TOK_TABNAME + loc_orc_2d + TOK_INSERT + TOK_DESTINATION + TOK_DIR + TOK_TMP_FILE + TOK_SELECT + TOK_SELEXPR + TOK_TABLE_OR_COL + state + + +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-0 depends on stages: Stage-1 + +STAGE PLANS: + Stage: Stage-1 + Map Reduce + Map Operator Tree: + TableScan + alias: loc_orc_2d + Statistics: Num rows: 6 Data size: 532 Basic stats: COMPLETE Column stats: COMPLETE + GatherStats: false + Select Operator + expressions: state (type: string) + outputColumnNames: _col0 + Statistics: Num rows: 6 Data size: 510 Basic stats: COMPLETE Column stats: COMPLETE + File Output Operator + compressed: false + GlobalTableId: 0 +#### A masked pattern was here #### + NumFilesPerFileSink: 1 + Statistics: Num rows: 6 Data size: 510 Basic stats: COMPLETE Column stats: COMPLETE +#### A masked pattern was here #### + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + properties: + columns _col0 + columns.types string + escape.delim \ + hive.serialization.extend.nesting.levels true + serialization.format 1 + serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + TotalFiles: 1 + GatherStats: false + MultiFileSpray: false + Path -> Alias: +#### A masked pattern was here #### + Path -> Partition: +#### A masked pattern was here #### + Partition + base file name: year=2000 + input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat + output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat + partition values: + year 2000 + zip 94086 + properties: + COLUMN_STATS_ACCURATE true + bucket_count -1 + columns state,locid + columns.comments + columns.types string:int +#### A masked pattern was here #### + name default.loc_orc_2d + numFiles 1 + numRows 1 + partition_columns zip/year + partition_columns.types int:string + rawDataSize 89 + serialization.ddl struct loc_orc_2d { string state, i32 locid} + serialization.format 1 + serialization.lib org.apache.hadoop.hive.ql.io.orc.OrcSerde + totalSize 260 +#### A masked pattern was here #### + serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde + + input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat + output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat + properties: + bucket_count -1 + columns state,locid + columns.comments + columns.types string:int +#### A masked pattern was here #### + name default.loc_orc_2d + partition_columns zip/year + partition_columns.types int:string + serialization.ddl struct loc_orc_2d { string state, i32 locid} + serialization.format 1 + serialization.lib org.apache.hadoop.hive.ql.io.orc.OrcSerde +#### A masked pattern was here #### + serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde + name: default.loc_orc_2d + name: default.loc_orc_2d +#### A masked pattern was here #### + Partition + base file name: year=2001 + input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat + output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat + partition values: + year 2001 + zip 94086 + properties: + COLUMN_STATS_ACCURATE true + bucket_count -1 + columns state,locid + columns.comments + columns.types string:int +#### A masked pattern was here #### + name default.loc_orc_2d + numFiles 1 + numRows 3 + partition_columns zip/year + partition_columns.types int:string + rawDataSize 267 + serialization.ddl struct loc_orc_2d { string state, i32 locid} + serialization.format 1 + serialization.lib org.apache.hadoop.hive.ql.io.orc.OrcSerde + totalSize 257 +#### A masked pattern was here #### + serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde + + input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat + output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat + properties: + bucket_count -1 + columns state,locid + columns.comments + columns.types string:int +#### A masked pattern was here #### + name default.loc_orc_2d + partition_columns zip/year + partition_columns.types int:string + serialization.ddl struct loc_orc_2d { string state, i32 locid} + serialization.format 1 + serialization.lib org.apache.hadoop.hive.ql.io.orc.OrcSerde +#### A masked pattern was here #### + serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde + name: default.loc_orc_2d + name: default.loc_orc_2d +#### A masked pattern was here #### + Partition + base file name: year=2000 + input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat + output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat + partition values: + year 2000 + zip 94087 + properties: + COLUMN_STATS_ACCURATE true + bucket_count -1 + columns state,locid + columns.comments + columns.types string:int +#### A masked pattern was here #### + name default.loc_orc_2d + numFiles 1 + numRows 1 + partition_columns zip/year + partition_columns.types int:string + rawDataSize 88 + serialization.ddl struct loc_orc_2d { string state, i32 locid} + serialization.format 1 + serialization.lib org.apache.hadoop.hive.ql.io.orc.OrcSerde + totalSize 247 +#### A masked pattern was here #### + serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde + + input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat + output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat + properties: + bucket_count -1 + columns state,locid + columns.comments + columns.types string:int +#### A masked pattern was here #### + name default.loc_orc_2d + partition_columns zip/year + partition_columns.types int:string + serialization.ddl struct loc_orc_2d { string state, i32 locid} + serialization.format 1 + serialization.lib org.apache.hadoop.hive.ql.io.orc.OrcSerde +#### A masked pattern was here #### + serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde + name: default.loc_orc_2d + name: default.loc_orc_2d +#### A masked pattern was here #### + Partition + base file name: year=2001 + input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat + output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat + partition values: + year 2001 + zip 94087 + properties: + COLUMN_STATS_ACCURATE true + bucket_count -1 + columns state,locid + columns.comments + columns.types string:int +#### A masked pattern was here #### + name default.loc_orc_2d + numFiles 1 + numRows 1 + partition_columns zip/year + partition_columns.types int:string + rawDataSize 88 + serialization.ddl struct loc_orc_2d { string state, i32 locid} + serialization.format 1 + serialization.lib org.apache.hadoop.hive.ql.io.orc.OrcSerde + totalSize 247 +#### A masked pattern was here #### + serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde + + input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat + output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat + properties: + bucket_count -1 + columns state,locid + columns.comments + columns.types string:int +#### A masked pattern was here #### + name default.loc_orc_2d + partition_columns zip/year + partition_columns.types int:string + serialization.ddl struct loc_orc_2d { string state, i32 locid} + serialization.format 1 + serialization.lib org.apache.hadoop.hive.ql.io.orc.OrcSerde +#### A masked pattern was here #### + serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde + name: default.loc_orc_2d + name: default.loc_orc_2d + Truncated Path -> Alias: + /loc_orc_2d/zip=94086/year=2000 [loc_orc_2d] + /loc_orc_2d/zip=94086/year=2001 [loc_orc_2d] + /loc_orc_2d/zip=94087/year=2000 [loc_orc_2d] + /loc_orc_2d/zip=94087/year=2001 [loc_orc_2d] + + Stage: Stage-0 + Fetch Operator + limit: -1 + Processor Tree: + ListSink + +PREHOOK: query: explain extended select state,locid from loc_orc_2d +PREHOOK: type: QUERY +POSTHOOK: query: explain extended select state,locid from loc_orc_2d +POSTHOOK: type: QUERY +ABSTRACT SYNTAX TREE: + +TOK_QUERY + TOK_FROM + TOK_TABREF + TOK_TABNAME + loc_orc_2d + TOK_INSERT + TOK_DESTINATION + TOK_DIR + TOK_TMP_FILE + TOK_SELECT + TOK_SELEXPR + TOK_TABLE_OR_COL + state + TOK_SELEXPR + TOK_TABLE_OR_COL + locid + + +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-0 depends on stages: Stage-1 + +STAGE PLANS: + Stage: Stage-1 + Map Reduce + Map Operator Tree: + TableScan + alias: loc_orc_2d + Statistics: Num rows: 6 Data size: 532 Basic stats: COMPLETE Column stats: COMPLETE + GatherStats: false + Select Operator + expressions: state (type: string), locid (type: int) + outputColumnNames: _col0, _col1 + Statistics: Num rows: 6 Data size: 534 Basic stats: COMPLETE Column stats: COMPLETE + File Output Operator + compressed: false + GlobalTableId: 0 +#### A masked pattern was here #### + NumFilesPerFileSink: 1 + Statistics: Num rows: 6 Data size: 534 Basic stats: COMPLETE Column stats: COMPLETE +#### A masked pattern was here #### + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + properties: + columns _col0,_col1 + columns.types string:int + escape.delim \ + hive.serialization.extend.nesting.levels true + serialization.format 1 + serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + TotalFiles: 1 + GatherStats: false + MultiFileSpray: false + Path -> Alias: +#### A masked pattern was here #### + Path -> Partition: +#### A masked pattern was here #### + Partition + base file name: year=2000 + input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat + output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat + partition values: + year 2000 + zip 94086 + properties: + COLUMN_STATS_ACCURATE true + bucket_count -1 + columns state,locid + columns.comments + columns.types string:int +#### A masked pattern was here #### + name default.loc_orc_2d + numFiles 1 + numRows 1 + partition_columns zip/year + partition_columns.types int:string + rawDataSize 89 + serialization.ddl struct loc_orc_2d { string state, i32 locid} + serialization.format 1 + serialization.lib org.apache.hadoop.hive.ql.io.orc.OrcSerde + totalSize 260 +#### A masked pattern was here #### + serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde + + input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat + output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat + properties: + bucket_count -1 + columns state,locid + columns.comments + columns.types string:int +#### A masked pattern was here #### + name default.loc_orc_2d + partition_columns zip/year + partition_columns.types int:string + serialization.ddl struct loc_orc_2d { string state, i32 locid} + serialization.format 1 + serialization.lib org.apache.hadoop.hive.ql.io.orc.OrcSerde +#### A masked pattern was here #### + serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde + name: default.loc_orc_2d + name: default.loc_orc_2d +#### A masked pattern was here #### + Partition + base file name: year=2001 + input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat + output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat + partition values: + year 2001 + zip 94086 + properties: + COLUMN_STATS_ACCURATE true + bucket_count -1 + columns state,locid + columns.comments + columns.types string:int +#### A masked pattern was here #### + name default.loc_orc_2d + numFiles 1 + numRows 3 + partition_columns zip/year + partition_columns.types int:string + rawDataSize 267 + serialization.ddl struct loc_orc_2d { string state, i32 locid} + serialization.format 1 + serialization.lib org.apache.hadoop.hive.ql.io.orc.OrcSerde + totalSize 257 +#### A masked pattern was here #### + serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde + + input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat + output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat + properties: + bucket_count -1 + columns state,locid + columns.comments + columns.types string:int +#### A masked pattern was here #### + name default.loc_orc_2d + partition_columns zip/year + partition_columns.types int:string + serialization.ddl struct loc_orc_2d { string state, i32 locid} + serialization.format 1 + serialization.lib org.apache.hadoop.hive.ql.io.orc.OrcSerde +#### A masked pattern was here #### + serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde + name: default.loc_orc_2d + name: default.loc_orc_2d +#### A masked pattern was here #### + Partition + base file name: year=2000 + input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat + output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat + partition values: + year 2000 + zip 94087 + properties: + COLUMN_STATS_ACCURATE true + bucket_count -1 + columns state,locid + columns.comments + columns.types string:int +#### A masked pattern was here #### + name default.loc_orc_2d + numFiles 1 + numRows 1 + partition_columns zip/year + partition_columns.types int:string + rawDataSize 88 + serialization.ddl struct loc_orc_2d { string state, i32 locid} + serialization.format 1 + serialization.lib org.apache.hadoop.hive.ql.io.orc.OrcSerde + totalSize 247 +#### A masked pattern was here #### + serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde + + input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat + output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat + properties: + bucket_count -1 + columns state,locid + columns.comments + columns.types string:int +#### A masked pattern was here #### + name default.loc_orc_2d + partition_columns zip/year + partition_columns.types int:string + serialization.ddl struct loc_orc_2d { string state, i32 locid} + serialization.format 1 + serialization.lib org.apache.hadoop.hive.ql.io.orc.OrcSerde +#### A masked pattern was here #### + serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde + name: default.loc_orc_2d + name: default.loc_orc_2d +#### A masked pattern was here #### + Partition + base file name: year=2001 + input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat + output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat + partition values: + year 2001 + zip 94087 + properties: + COLUMN_STATS_ACCURATE true + bucket_count -1 + columns state,locid + columns.comments + columns.types string:int +#### A masked pattern was here #### + name default.loc_orc_2d + numFiles 1 + numRows 1 + partition_columns zip/year + partition_columns.types int:string + rawDataSize 88 + serialization.ddl struct loc_orc_2d { string state, i32 locid} + serialization.format 1 + serialization.lib org.apache.hadoop.hive.ql.io.orc.OrcSerde + totalSize 247 +#### A masked pattern was here #### + serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde + + input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat + output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat + properties: + bucket_count -1 + columns state,locid + columns.comments + columns.types string:int +#### A masked pattern was here #### + name default.loc_orc_2d + partition_columns zip/year + partition_columns.types int:string + serialization.ddl struct loc_orc_2d { string state, i32 locid} + serialization.format 1 + serialization.lib org.apache.hadoop.hive.ql.io.orc.OrcSerde +#### A masked pattern was here #### + serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde + name: default.loc_orc_2d + name: default.loc_orc_2d + Truncated Path -> Alias: + /loc_orc_2d/zip=94086/year=2000 [loc_orc_2d] + /loc_orc_2d/zip=94086/year=2001 [loc_orc_2d] + /loc_orc_2d/zip=94087/year=2000 [loc_orc_2d] + /loc_orc_2d/zip=94087/year=2001 [loc_orc_2d] + + Stage: Stage-0 + Fetch Operator + limit: -1 + Processor Tree: + ListSink + diff --git a/ql/src/test/results/clientpositive/extrapolate_part_stats_partial.q.out b/ql/src/test/results/clientpositive/extrapolate_part_stats_partial.q.out new file mode 100644 index 0000000..cbe210b --- /dev/null +++ b/ql/src/test/results/clientpositive/extrapolate_part_stats_partial.q.out @@ -0,0 +1,2396 @@ +PREHOOK: query: create table if not exists ext_loc ( + state string, + locid int, + zip int, + year string +) row format delimited fields terminated by '|' stored as textfile +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +PREHOOK: Output: default@ext_loc +POSTHOOK: query: create table if not exists ext_loc ( + state string, + locid int, + zip int, + year string +) row format delimited fields terminated by '|' stored as textfile +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@ext_loc +PREHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/extrapolate_stats_partial.txt' OVERWRITE INTO TABLE ext_loc +PREHOOK: type: LOAD +#### A masked pattern was here #### +PREHOOK: Output: default@ext_loc +POSTHOOK: query: LOAD DATA LOCAL INPATH '../../data/files/extrapolate_stats_partial.txt' OVERWRITE INTO TABLE ext_loc +POSTHOOK: type: LOAD +#### A masked pattern was here #### +POSTHOOK: Output: default@ext_loc +PREHOOK: query: create table if not exists loc_orc_1d ( + state string, + locid int, + zip int +) partitioned by(year string) stored as orc +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +PREHOOK: Output: default@loc_orc_1d +POSTHOOK: query: create table if not exists loc_orc_1d ( + state string, + locid int, + zip int +) partitioned by(year string) stored as orc +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@loc_orc_1d +PREHOOK: query: insert overwrite table loc_orc_1d partition(year) select * from ext_loc +PREHOOK: type: QUERY +PREHOOK: Input: default@ext_loc +PREHOOK: Output: default@loc_orc_1d +POSTHOOK: query: insert overwrite table loc_orc_1d partition(year) select * from ext_loc +POSTHOOK: type: QUERY +POSTHOOK: Input: default@ext_loc +POSTHOOK: Output: default@loc_orc_1d@year=2000 +POSTHOOK: Output: default@loc_orc_1d@year=2001 +POSTHOOK: Output: default@loc_orc_1d@year=2002 +POSTHOOK: Output: default@loc_orc_1d@year=2003 +POSTHOOK: Lineage: loc_orc_1d PARTITION(year=2000).locid SIMPLE [(ext_loc)ext_loc.FieldSchema(name:locid, type:int, comment:null), ] +POSTHOOK: Lineage: loc_orc_1d PARTITION(year=2000).state SIMPLE [(ext_loc)ext_loc.FieldSchema(name:state, type:string, comment:null), ] +POSTHOOK: Lineage: loc_orc_1d PARTITION(year=2000).zip SIMPLE [(ext_loc)ext_loc.FieldSchema(name:zip, type:int, comment:null), ] +POSTHOOK: Lineage: loc_orc_1d PARTITION(year=2001).locid SIMPLE [(ext_loc)ext_loc.FieldSchema(name:locid, type:int, comment:null), ] +POSTHOOK: Lineage: loc_orc_1d PARTITION(year=2001).state SIMPLE [(ext_loc)ext_loc.FieldSchema(name:state, type:string, comment:null), ] +POSTHOOK: Lineage: loc_orc_1d PARTITION(year=2001).zip SIMPLE [(ext_loc)ext_loc.FieldSchema(name:zip, type:int, comment:null), ] +POSTHOOK: Lineage: loc_orc_1d PARTITION(year=2002).locid SIMPLE [(ext_loc)ext_loc.FieldSchema(name:locid, type:int, comment:null), ] +POSTHOOK: Lineage: loc_orc_1d PARTITION(year=2002).state SIMPLE [(ext_loc)ext_loc.FieldSchema(name:state, type:string, comment:null), ] +POSTHOOK: Lineage: loc_orc_1d PARTITION(year=2002).zip SIMPLE [(ext_loc)ext_loc.FieldSchema(name:zip, type:int, comment:null), ] +POSTHOOK: Lineage: loc_orc_1d PARTITION(year=2003).locid SIMPLE [(ext_loc)ext_loc.FieldSchema(name:locid, type:int, comment:null), ] +POSTHOOK: Lineage: loc_orc_1d PARTITION(year=2003).state SIMPLE [(ext_loc)ext_loc.FieldSchema(name:state, type:string, comment:null), ] +POSTHOOK: Lineage: loc_orc_1d PARTITION(year=2003).zip SIMPLE [(ext_loc)ext_loc.FieldSchema(name:zip, type:int, comment:null), ] +PREHOOK: query: analyze table loc_orc_1d partition(year='2001') compute statistics for columns state,locid +PREHOOK: type: QUERY +PREHOOK: Input: default@loc_orc_1d +PREHOOK: Input: default@loc_orc_1d@year=2001 +#### A masked pattern was here #### +POSTHOOK: query: analyze table loc_orc_1d partition(year='2001') compute statistics for columns state,locid +POSTHOOK: type: QUERY +POSTHOOK: Input: default@loc_orc_1d +POSTHOOK: Input: default@loc_orc_1d@year=2001 +#### A masked pattern was here #### +PREHOOK: query: analyze table loc_orc_1d partition(year='2002') compute statistics for columns state,locid +PREHOOK: type: QUERY +PREHOOK: Input: default@loc_orc_1d +PREHOOK: Input: default@loc_orc_1d@year=2002 +#### A masked pattern was here #### +POSTHOOK: query: analyze table loc_orc_1d partition(year='2002') compute statistics for columns state,locid +POSTHOOK: type: QUERY +POSTHOOK: Input: default@loc_orc_1d +POSTHOOK: Input: default@loc_orc_1d@year=2002 +#### A masked pattern was here #### +PREHOOK: query: describe formatted loc_orc_1d.state PARTITION(year='2001') +PREHOOK: type: DESCTABLE +PREHOOK: Input: default@loc_orc_1d +POSTHOOK: query: describe formatted loc_orc_1d.state PARTITION(year='2001') +POSTHOOK: type: DESCTABLE +POSTHOOK: Input: default@loc_orc_1d +# col_name data_type min max num_nulls distinct_count avg_col_len max_col_len num_trues num_falses comment + +state string 0 3 0.75 2 from deserializer +PREHOOK: query: describe formatted loc_orc_1d.state PARTITION(year='2002') +PREHOOK: type: DESCTABLE +PREHOOK: Input: default@loc_orc_1d +POSTHOOK: query: describe formatted loc_orc_1d.state PARTITION(year='2002') +POSTHOOK: type: DESCTABLE +POSTHOOK: Input: default@loc_orc_1d +# col_name data_type min max num_nulls distinct_count avg_col_len max_col_len num_trues num_falses comment + +state string 0 6 3.0 3 from deserializer +PREHOOK: query: -- basicStatState: COMPLETE colStatState: PARTIAL +explain extended select state from loc_orc_1d +PREHOOK: type: QUERY +POSTHOOK: query: -- basicStatState: COMPLETE colStatState: PARTIAL +explain extended select state from loc_orc_1d +POSTHOOK: type: QUERY +ABSTRACT SYNTAX TREE: + +TOK_QUERY + TOK_FROM + TOK_TABREF + TOK_TABNAME + loc_orc_1d + TOK_INSERT + TOK_DESTINATION + TOK_DIR + TOK_TMP_FILE + TOK_SELECT + TOK_SELEXPR + TOK_TABLE_OR_COL + state + + +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-0 depends on stages: Stage-1 + +STAGE PLANS: + Stage: Stage-1 + Map Reduce + Map Operator Tree: + TableScan + alias: loc_orc_1d + Statistics: Num rows: 20 Data size: 1866 Basic stats: COMPLETE Column stats: PARTIAL + GatherStats: false + Select Operator + expressions: state (type: string) + outputColumnNames: _col0 + Statistics: Num rows: 20 Data size: 1780 Basic stats: COMPLETE Column stats: PARTIAL + File Output Operator + compressed: false + GlobalTableId: 0 +#### A masked pattern was here #### + NumFilesPerFileSink: 1 + Statistics: Num rows: 20 Data size: 1780 Basic stats: COMPLETE Column stats: PARTIAL +#### A masked pattern was here #### + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + properties: + columns _col0 + columns.types string + escape.delim \ + hive.serialization.extend.nesting.levels true + serialization.format 1 + serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + TotalFiles: 1 + GatherStats: false + MultiFileSpray: false + Path -> Alias: +#### A masked pattern was here #### + Path -> Partition: +#### A masked pattern was here #### + Partition + base file name: year=2000 + input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat + output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat + partition values: + year 2000 + properties: + COLUMN_STATS_ACCURATE true + bucket_count -1 + columns state,locid,zip + columns.comments + columns.types string:int:int +#### A masked pattern was here #### + name default.loc_orc_1d + numFiles 1 + numRows 2 + partition_columns year + partition_columns.types string + rawDataSize 184 + serialization.ddl struct loc_orc_1d { string state, i32 locid, i32 zip} + serialization.format 1 + serialization.lib org.apache.hadoop.hive.ql.io.orc.OrcSerde + totalSize 342 +#### A masked pattern was here #### + serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde + + input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat + output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat + properties: + bucket_count -1 + columns state,locid,zip + columns.comments + columns.types string:int:int +#### A masked pattern was here #### + name default.loc_orc_1d + partition_columns year + partition_columns.types string + serialization.ddl struct loc_orc_1d { string state, i32 locid, i32 zip} + serialization.format 1 + serialization.lib org.apache.hadoop.hive.ql.io.orc.OrcSerde +#### A masked pattern was here #### + serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde + name: default.loc_orc_1d + name: default.loc_orc_1d +#### A masked pattern was here #### + Partition + base file name: year=2001 + input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat + output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat + partition values: + year 2001 + properties: + COLUMN_STATS_ACCURATE true + bucket_count -1 + columns state,locid,zip + columns.comments + columns.types string:int:int +#### A masked pattern was here #### + name default.loc_orc_1d + numFiles 1 + numRows 4 + partition_columns year + partition_columns.types string + rawDataSize 368 + serialization.ddl struct loc_orc_1d { string state, i32 locid, i32 zip} + serialization.format 1 + serialization.lib org.apache.hadoop.hive.ql.io.orc.OrcSerde + totalSize 364 +#### A masked pattern was here #### + serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde + + input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat + output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat + properties: + bucket_count -1 + columns state,locid,zip + columns.comments + columns.types string:int:int +#### A masked pattern was here #### + name default.loc_orc_1d + partition_columns year + partition_columns.types string + serialization.ddl struct loc_orc_1d { string state, i32 locid, i32 zip} + serialization.format 1 + serialization.lib org.apache.hadoop.hive.ql.io.orc.OrcSerde +#### A masked pattern was here #### + serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde + name: default.loc_orc_1d + name: default.loc_orc_1d +#### A masked pattern was here #### + Partition + base file name: year=2002 + input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat + output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat + partition values: + year 2002 + properties: + COLUMN_STATS_ACCURATE true + bucket_count -1 + columns state,locid,zip + columns.comments + columns.types string:int:int +#### A masked pattern was here #### + name default.loc_orc_1d + numFiles 1 + numRows 6 + partition_columns year + partition_columns.types string + rawDataSize 570 + serialization.ddl struct loc_orc_1d { string state, i32 locid, i32 zip} + serialization.format 1 + serialization.lib org.apache.hadoop.hive.ql.io.orc.OrcSerde + totalSize 383 +#### A masked pattern was here #### + serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde + + input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat + output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat + properties: + bucket_count -1 + columns state,locid,zip + columns.comments + columns.types string:int:int +#### A masked pattern was here #### + name default.loc_orc_1d + partition_columns year + partition_columns.types string + serialization.ddl struct loc_orc_1d { string state, i32 locid, i32 zip} + serialization.format 1 + serialization.lib org.apache.hadoop.hive.ql.io.orc.OrcSerde +#### A masked pattern was here #### + serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde + name: default.loc_orc_1d + name: default.loc_orc_1d +#### A masked pattern was here #### + Partition + base file name: year=2003 + input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat + output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat + partition values: + year 2003 + properties: + COLUMN_STATS_ACCURATE true + bucket_count -1 + columns state,locid,zip + columns.comments + columns.types string:int:int +#### A masked pattern was here #### + name default.loc_orc_1d + numFiles 1 + numRows 8 + partition_columns year + partition_columns.types string + rawDataSize 744 + serialization.ddl struct loc_orc_1d { string state, i32 locid, i32 zip} + serialization.format 1 + serialization.lib org.apache.hadoop.hive.ql.io.orc.OrcSerde + totalSize 390 +#### A masked pattern was here #### + serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde + + input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat + output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat + properties: + bucket_count -1 + columns state,locid,zip + columns.comments + columns.types string:int:int +#### A masked pattern was here #### + name default.loc_orc_1d + partition_columns year + partition_columns.types string + serialization.ddl struct loc_orc_1d { string state, i32 locid, i32 zip} + serialization.format 1 + serialization.lib org.apache.hadoop.hive.ql.io.orc.OrcSerde +#### A masked pattern was here #### + serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde + name: default.loc_orc_1d + name: default.loc_orc_1d + Truncated Path -> Alias: + /loc_orc_1d/year=2000 [loc_orc_1d] + /loc_orc_1d/year=2001 [loc_orc_1d] + /loc_orc_1d/year=2002 [loc_orc_1d] + /loc_orc_1d/year=2003 [loc_orc_1d] + + Stage: Stage-0 + Fetch Operator + limit: -1 + Processor Tree: + ListSink + +PREHOOK: query: -- column statistics for __HIVE_DEFAULT_PARTITION__ is not supported yet. Hence colStatState reports PARTIAL +-- basicStatState: COMPLETE colStatState: PARTIAL +explain extended select state,locid from loc_orc_1d +PREHOOK: type: QUERY +POSTHOOK: query: -- column statistics for __HIVE_DEFAULT_PARTITION__ is not supported yet. Hence colStatState reports PARTIAL +-- basicStatState: COMPLETE colStatState: PARTIAL +explain extended select state,locid from loc_orc_1d +POSTHOOK: type: QUERY +ABSTRACT SYNTAX TREE: + +TOK_QUERY + TOK_FROM + TOK_TABREF + TOK_TABNAME + loc_orc_1d + TOK_INSERT + TOK_DESTINATION + TOK_DIR + TOK_TMP_FILE + TOK_SELECT + TOK_SELEXPR + TOK_TABLE_OR_COL + state + TOK_SELEXPR + TOK_TABLE_OR_COL + locid + + +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-0 depends on stages: Stage-1 + +STAGE PLANS: + Stage: Stage-1 + Map Reduce + Map Operator Tree: + TableScan + alias: loc_orc_1d + Statistics: Num rows: 20 Data size: 1866 Basic stats: COMPLETE Column stats: PARTIAL + GatherStats: false + Select Operator + expressions: state (type: string), locid (type: int) + outputColumnNames: _col0, _col1 + Statistics: Num rows: 20 Data size: 1860 Basic stats: COMPLETE Column stats: PARTIAL + File Output Operator + compressed: false + GlobalTableId: 0 +#### A masked pattern was here #### + NumFilesPerFileSink: 1 + Statistics: Num rows: 20 Data size: 1860 Basic stats: COMPLETE Column stats: PARTIAL +#### A masked pattern was here #### + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + properties: + columns _col0,_col1 + columns.types string:int + escape.delim \ + hive.serialization.extend.nesting.levels true + serialization.format 1 + serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + TotalFiles: 1 + GatherStats: false + MultiFileSpray: false + Path -> Alias: +#### A masked pattern was here #### + Path -> Partition: +#### A masked pattern was here #### + Partition + base file name: year=2000 + input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat + output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat + partition values: + year 2000 + properties: + COLUMN_STATS_ACCURATE true + bucket_count -1 + columns state,locid,zip + columns.comments + columns.types string:int:int +#### A masked pattern was here #### + name default.loc_orc_1d + numFiles 1 + numRows 2 + partition_columns year + partition_columns.types string + rawDataSize 184 + serialization.ddl struct loc_orc_1d { string state, i32 locid, i32 zip} + serialization.format 1 + serialization.lib org.apache.hadoop.hive.ql.io.orc.OrcSerde + totalSize 342 +#### A masked pattern was here #### + serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde + + input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat + output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat + properties: + bucket_count -1 + columns state,locid,zip + columns.comments + columns.types string:int:int +#### A masked pattern was here #### + name default.loc_orc_1d + partition_columns year + partition_columns.types string + serialization.ddl struct loc_orc_1d { string state, i32 locid, i32 zip} + serialization.format 1 + serialization.lib org.apache.hadoop.hive.ql.io.orc.OrcSerde +#### A masked pattern was here #### + serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde + name: default.loc_orc_1d + name: default.loc_orc_1d +#### A masked pattern was here #### + Partition + base file name: year=2001 + input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat + output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat + partition values: + year 2001 + properties: + COLUMN_STATS_ACCURATE true + bucket_count -1 + columns state,locid,zip + columns.comments + columns.types string:int:int +#### A masked pattern was here #### + name default.loc_orc_1d + numFiles 1 + numRows 4 + partition_columns year + partition_columns.types string + rawDataSize 368 + serialization.ddl struct loc_orc_1d { string state, i32 locid, i32 zip} + serialization.format 1 + serialization.lib org.apache.hadoop.hive.ql.io.orc.OrcSerde + totalSize 364 +#### A masked pattern was here #### + serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde + + input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat + output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat + properties: + bucket_count -1 + columns state,locid,zip + columns.comments + columns.types string:int:int +#### A masked pattern was here #### + name default.loc_orc_1d + partition_columns year + partition_columns.types string + serialization.ddl struct loc_orc_1d { string state, i32 locid, i32 zip} + serialization.format 1 + serialization.lib org.apache.hadoop.hive.ql.io.orc.OrcSerde +#### A masked pattern was here #### + serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde + name: default.loc_orc_1d + name: default.loc_orc_1d +#### A masked pattern was here #### + Partition + base file name: year=2002 + input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat + output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat + partition values: + year 2002 + properties: + COLUMN_STATS_ACCURATE true + bucket_count -1 + columns state,locid,zip + columns.comments + columns.types string:int:int +#### A masked pattern was here #### + name default.loc_orc_1d + numFiles 1 + numRows 6 + partition_columns year + partition_columns.types string + rawDataSize 570 + serialization.ddl struct loc_orc_1d { string state, i32 locid, i32 zip} + serialization.format 1 + serialization.lib org.apache.hadoop.hive.ql.io.orc.OrcSerde + totalSize 383 +#### A masked pattern was here #### + serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde + + input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat + output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat + properties: + bucket_count -1 + columns state,locid,zip + columns.comments + columns.types string:int:int +#### A masked pattern was here #### + name default.loc_orc_1d + partition_columns year + partition_columns.types string + serialization.ddl struct loc_orc_1d { string state, i32 locid, i32 zip} + serialization.format 1 + serialization.lib org.apache.hadoop.hive.ql.io.orc.OrcSerde +#### A masked pattern was here #### + serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde + name: default.loc_orc_1d + name: default.loc_orc_1d +#### A masked pattern was here #### + Partition + base file name: year=2003 + input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat + output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat + partition values: + year 2003 + properties: + COLUMN_STATS_ACCURATE true + bucket_count -1 + columns state,locid,zip + columns.comments + columns.types string:int:int +#### A masked pattern was here #### + name default.loc_orc_1d + numFiles 1 + numRows 8 + partition_columns year + partition_columns.types string + rawDataSize 744 + serialization.ddl struct loc_orc_1d { string state, i32 locid, i32 zip} + serialization.format 1 + serialization.lib org.apache.hadoop.hive.ql.io.orc.OrcSerde + totalSize 390 +#### A masked pattern was here #### + serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde + + input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat + output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat + properties: + bucket_count -1 + columns state,locid,zip + columns.comments + columns.types string:int:int +#### A masked pattern was here #### + name default.loc_orc_1d + partition_columns year + partition_columns.types string + serialization.ddl struct loc_orc_1d { string state, i32 locid, i32 zip} + serialization.format 1 + serialization.lib org.apache.hadoop.hive.ql.io.orc.OrcSerde +#### A masked pattern was here #### + serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde + name: default.loc_orc_1d + name: default.loc_orc_1d + Truncated Path -> Alias: + /loc_orc_1d/year=2000 [loc_orc_1d] + /loc_orc_1d/year=2001 [loc_orc_1d] + /loc_orc_1d/year=2002 [loc_orc_1d] + /loc_orc_1d/year=2003 [loc_orc_1d] + + Stage: Stage-0 + Fetch Operator + limit: -1 + Processor Tree: + ListSink + +PREHOOK: query: analyze table loc_orc_1d partition(year='2000') compute statistics for columns state +PREHOOK: type: QUERY +PREHOOK: Input: default@loc_orc_1d +PREHOOK: Input: default@loc_orc_1d@year=2000 +#### A masked pattern was here #### +POSTHOOK: query: analyze table loc_orc_1d partition(year='2000') compute statistics for columns state +POSTHOOK: type: QUERY +POSTHOOK: Input: default@loc_orc_1d +POSTHOOK: Input: default@loc_orc_1d@year=2000 +#### A masked pattern was here #### +PREHOOK: query: analyze table loc_orc_1d partition(year='2003') compute statistics for columns state +PREHOOK: type: QUERY +PREHOOK: Input: default@loc_orc_1d +PREHOOK: Input: default@loc_orc_1d@year=2003 +#### A masked pattern was here #### +POSTHOOK: query: analyze table loc_orc_1d partition(year='2003') compute statistics for columns state +POSTHOOK: type: QUERY +POSTHOOK: Input: default@loc_orc_1d +POSTHOOK: Input: default@loc_orc_1d@year=2003 +#### A masked pattern was here #### +PREHOOK: query: explain extended select state from loc_orc_1d +PREHOOK: type: QUERY +POSTHOOK: query: explain extended select state from loc_orc_1d +POSTHOOK: type: QUERY +ABSTRACT SYNTAX TREE: + +TOK_QUERY + TOK_FROM + TOK_TABREF + TOK_TABNAME + loc_orc_1d + TOK_INSERT + TOK_DESTINATION + TOK_DIR + TOK_TMP_FILE + TOK_SELECT + TOK_SELEXPR + TOK_TABLE_OR_COL + state + + +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-0 depends on stages: Stage-1 + +STAGE PLANS: + Stage: Stage-1 + Map Reduce + Map Operator Tree: + TableScan + alias: loc_orc_1d + Statistics: Num rows: 20 Data size: 1866 Basic stats: COMPLETE Column stats: COMPLETE + GatherStats: false + Select Operator + expressions: state (type: string) + outputColumnNames: _col0 + Statistics: Num rows: 20 Data size: 1740 Basic stats: COMPLETE Column stats: COMPLETE + File Output Operator + compressed: false + GlobalTableId: 0 +#### A masked pattern was here #### + NumFilesPerFileSink: 1 + Statistics: Num rows: 20 Data size: 1740 Basic stats: COMPLETE Column stats: COMPLETE +#### A masked pattern was here #### + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + properties: + columns _col0 + columns.types string + escape.delim \ + hive.serialization.extend.nesting.levels true + serialization.format 1 + serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + TotalFiles: 1 + GatherStats: false + MultiFileSpray: false + Path -> Alias: +#### A masked pattern was here #### + Path -> Partition: +#### A masked pattern was here #### + Partition + base file name: year=2000 + input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat + output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat + partition values: + year 2000 + properties: + COLUMN_STATS_ACCURATE true + bucket_count -1 + columns state,locid,zip + columns.comments + columns.types string:int:int +#### A masked pattern was here #### + name default.loc_orc_1d + numFiles 1 + numRows 2 + partition_columns year + partition_columns.types string + rawDataSize 184 + serialization.ddl struct loc_orc_1d { string state, i32 locid, i32 zip} + serialization.format 1 + serialization.lib org.apache.hadoop.hive.ql.io.orc.OrcSerde + totalSize 342 +#### A masked pattern was here #### + serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde + + input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat + output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat + properties: + bucket_count -1 + columns state,locid,zip + columns.comments + columns.types string:int:int +#### A masked pattern was here #### + name default.loc_orc_1d + partition_columns year + partition_columns.types string + serialization.ddl struct loc_orc_1d { string state, i32 locid, i32 zip} + serialization.format 1 + serialization.lib org.apache.hadoop.hive.ql.io.orc.OrcSerde +#### A masked pattern was here #### + serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde + name: default.loc_orc_1d + name: default.loc_orc_1d +#### A masked pattern was here #### + Partition + base file name: year=2001 + input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat + output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat + partition values: + year 2001 + properties: + COLUMN_STATS_ACCURATE true + bucket_count -1 + columns state,locid,zip + columns.comments + columns.types string:int:int +#### A masked pattern was here #### + name default.loc_orc_1d + numFiles 1 + numRows 4 + partition_columns year + partition_columns.types string + rawDataSize 368 + serialization.ddl struct loc_orc_1d { string state, i32 locid, i32 zip} + serialization.format 1 + serialization.lib org.apache.hadoop.hive.ql.io.orc.OrcSerde + totalSize 364 +#### A masked pattern was here #### + serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde + + input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat + output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat + properties: + bucket_count -1 + columns state,locid,zip + columns.comments + columns.types string:int:int +#### A masked pattern was here #### + name default.loc_orc_1d + partition_columns year + partition_columns.types string + serialization.ddl struct loc_orc_1d { string state, i32 locid, i32 zip} + serialization.format 1 + serialization.lib org.apache.hadoop.hive.ql.io.orc.OrcSerde +#### A masked pattern was here #### + serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde + name: default.loc_orc_1d + name: default.loc_orc_1d +#### A masked pattern was here #### + Partition + base file name: year=2002 + input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat + output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat + partition values: + year 2002 + properties: + COLUMN_STATS_ACCURATE true + bucket_count -1 + columns state,locid,zip + columns.comments + columns.types string:int:int +#### A masked pattern was here #### + name default.loc_orc_1d + numFiles 1 + numRows 6 + partition_columns year + partition_columns.types string + rawDataSize 570 + serialization.ddl struct loc_orc_1d { string state, i32 locid, i32 zip} + serialization.format 1 + serialization.lib org.apache.hadoop.hive.ql.io.orc.OrcSerde + totalSize 383 +#### A masked pattern was here #### + serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde + + input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat + output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat + properties: + bucket_count -1 + columns state,locid,zip + columns.comments + columns.types string:int:int +#### A masked pattern was here #### + name default.loc_orc_1d + partition_columns year + partition_columns.types string + serialization.ddl struct loc_orc_1d { string state, i32 locid, i32 zip} + serialization.format 1 + serialization.lib org.apache.hadoop.hive.ql.io.orc.OrcSerde +#### A masked pattern was here #### + serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde + name: default.loc_orc_1d + name: default.loc_orc_1d +#### A masked pattern was here #### + Partition + base file name: year=2003 + input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat + output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat + partition values: + year 2003 + properties: + COLUMN_STATS_ACCURATE true + bucket_count -1 + columns state,locid,zip + columns.comments + columns.types string:int:int +#### A masked pattern was here #### + name default.loc_orc_1d + numFiles 1 + numRows 8 + partition_columns year + partition_columns.types string + rawDataSize 744 + serialization.ddl struct loc_orc_1d { string state, i32 locid, i32 zip} + serialization.format 1 + serialization.lib org.apache.hadoop.hive.ql.io.orc.OrcSerde + totalSize 390 +#### A masked pattern was here #### + serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde + + input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat + output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat + properties: + bucket_count -1 + columns state,locid,zip + columns.comments + columns.types string:int:int +#### A masked pattern was here #### + name default.loc_orc_1d + partition_columns year + partition_columns.types string + serialization.ddl struct loc_orc_1d { string state, i32 locid, i32 zip} + serialization.format 1 + serialization.lib org.apache.hadoop.hive.ql.io.orc.OrcSerde +#### A masked pattern was here #### + serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde + name: default.loc_orc_1d + name: default.loc_orc_1d + Truncated Path -> Alias: + /loc_orc_1d/year=2000 [loc_orc_1d] + /loc_orc_1d/year=2001 [loc_orc_1d] + /loc_orc_1d/year=2002 [loc_orc_1d] + /loc_orc_1d/year=2003 [loc_orc_1d] + + Stage: Stage-0 + Fetch Operator + limit: -1 + Processor Tree: + ListSink + +PREHOOK: query: explain extended select state,locid from loc_orc_1d +PREHOOK: type: QUERY +POSTHOOK: query: explain extended select state,locid from loc_orc_1d +POSTHOOK: type: QUERY +ABSTRACT SYNTAX TREE: + +TOK_QUERY + TOK_FROM + TOK_TABREF + TOK_TABNAME + loc_orc_1d + TOK_INSERT + TOK_DESTINATION + TOK_DIR + TOK_TMP_FILE + TOK_SELECT + TOK_SELEXPR + TOK_TABLE_OR_COL + state + TOK_SELEXPR + TOK_TABLE_OR_COL + locid + + +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-0 depends on stages: Stage-1 + +STAGE PLANS: + Stage: Stage-1 + Map Reduce + Map Operator Tree: + TableScan + alias: loc_orc_1d + Statistics: Num rows: 20 Data size: 1866 Basic stats: COMPLETE Column stats: PARTIAL + GatherStats: false + Select Operator + expressions: state (type: string), locid (type: int) + outputColumnNames: _col0, _col1 + Statistics: Num rows: 20 Data size: 1820 Basic stats: COMPLETE Column stats: PARTIAL + File Output Operator + compressed: false + GlobalTableId: 0 +#### A masked pattern was here #### + NumFilesPerFileSink: 1 + Statistics: Num rows: 20 Data size: 1820 Basic stats: COMPLETE Column stats: PARTIAL +#### A masked pattern was here #### + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + properties: + columns _col0,_col1 + columns.types string:int + escape.delim \ + hive.serialization.extend.nesting.levels true + serialization.format 1 + serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + TotalFiles: 1 + GatherStats: false + MultiFileSpray: false + Path -> Alias: +#### A masked pattern was here #### + Path -> Partition: +#### A masked pattern was here #### + Partition + base file name: year=2000 + input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat + output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat + partition values: + year 2000 + properties: + COLUMN_STATS_ACCURATE true + bucket_count -1 + columns state,locid,zip + columns.comments + columns.types string:int:int +#### A masked pattern was here #### + name default.loc_orc_1d + numFiles 1 + numRows 2 + partition_columns year + partition_columns.types string + rawDataSize 184 + serialization.ddl struct loc_orc_1d { string state, i32 locid, i32 zip} + serialization.format 1 + serialization.lib org.apache.hadoop.hive.ql.io.orc.OrcSerde + totalSize 342 +#### A masked pattern was here #### + serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde + + input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat + output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat + properties: + bucket_count -1 + columns state,locid,zip + columns.comments + columns.types string:int:int +#### A masked pattern was here #### + name default.loc_orc_1d + partition_columns year + partition_columns.types string + serialization.ddl struct loc_orc_1d { string state, i32 locid, i32 zip} + serialization.format 1 + serialization.lib org.apache.hadoop.hive.ql.io.orc.OrcSerde +#### A masked pattern was here #### + serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde + name: default.loc_orc_1d + name: default.loc_orc_1d +#### A masked pattern was here #### + Partition + base file name: year=2001 + input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat + output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat + partition values: + year 2001 + properties: + COLUMN_STATS_ACCURATE true + bucket_count -1 + columns state,locid,zip + columns.comments + columns.types string:int:int +#### A masked pattern was here #### + name default.loc_orc_1d + numFiles 1 + numRows 4 + partition_columns year + partition_columns.types string + rawDataSize 368 + serialization.ddl struct loc_orc_1d { string state, i32 locid, i32 zip} + serialization.format 1 + serialization.lib org.apache.hadoop.hive.ql.io.orc.OrcSerde + totalSize 364 +#### A masked pattern was here #### + serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde + + input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat + output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat + properties: + bucket_count -1 + columns state,locid,zip + columns.comments + columns.types string:int:int +#### A masked pattern was here #### + name default.loc_orc_1d + partition_columns year + partition_columns.types string + serialization.ddl struct loc_orc_1d { string state, i32 locid, i32 zip} + serialization.format 1 + serialization.lib org.apache.hadoop.hive.ql.io.orc.OrcSerde +#### A masked pattern was here #### + serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde + name: default.loc_orc_1d + name: default.loc_orc_1d +#### A masked pattern was here #### + Partition + base file name: year=2002 + input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat + output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat + partition values: + year 2002 + properties: + COLUMN_STATS_ACCURATE true + bucket_count -1 + columns state,locid,zip + columns.comments + columns.types string:int:int +#### A masked pattern was here #### + name default.loc_orc_1d + numFiles 1 + numRows 6 + partition_columns year + partition_columns.types string + rawDataSize 570 + serialization.ddl struct loc_orc_1d { string state, i32 locid, i32 zip} + serialization.format 1 + serialization.lib org.apache.hadoop.hive.ql.io.orc.OrcSerde + totalSize 383 +#### A masked pattern was here #### + serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde + + input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat + output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat + properties: + bucket_count -1 + columns state,locid,zip + columns.comments + columns.types string:int:int +#### A masked pattern was here #### + name default.loc_orc_1d + partition_columns year + partition_columns.types string + serialization.ddl struct loc_orc_1d { string state, i32 locid, i32 zip} + serialization.format 1 + serialization.lib org.apache.hadoop.hive.ql.io.orc.OrcSerde +#### A masked pattern was here #### + serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde + name: default.loc_orc_1d + name: default.loc_orc_1d +#### A masked pattern was here #### + Partition + base file name: year=2003 + input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat + output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat + partition values: + year 2003 + properties: + COLUMN_STATS_ACCURATE true + bucket_count -1 + columns state,locid,zip + columns.comments + columns.types string:int:int +#### A masked pattern was here #### + name default.loc_orc_1d + numFiles 1 + numRows 8 + partition_columns year + partition_columns.types string + rawDataSize 744 + serialization.ddl struct loc_orc_1d { string state, i32 locid, i32 zip} + serialization.format 1 + serialization.lib org.apache.hadoop.hive.ql.io.orc.OrcSerde + totalSize 390 +#### A masked pattern was here #### + serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde + + input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat + output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat + properties: + bucket_count -1 + columns state,locid,zip + columns.comments + columns.types string:int:int +#### A masked pattern was here #### + name default.loc_orc_1d + partition_columns year + partition_columns.types string + serialization.ddl struct loc_orc_1d { string state, i32 locid, i32 zip} + serialization.format 1 + serialization.lib org.apache.hadoop.hive.ql.io.orc.OrcSerde +#### A masked pattern was here #### + serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde + name: default.loc_orc_1d + name: default.loc_orc_1d + Truncated Path -> Alias: + /loc_orc_1d/year=2000 [loc_orc_1d] + /loc_orc_1d/year=2001 [loc_orc_1d] + /loc_orc_1d/year=2002 [loc_orc_1d] + /loc_orc_1d/year=2003 [loc_orc_1d] + + Stage: Stage-0 + Fetch Operator + limit: -1 + Processor Tree: + ListSink + +PREHOOK: query: create table if not exists loc_orc_2d ( + state string, + locid int +) partitioned by(zip int, year string) stored as orc +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +PREHOOK: Output: default@loc_orc_2d +POSTHOOK: query: create table if not exists loc_orc_2d ( + state string, + locid int +) partitioned by(zip int, year string) stored as orc +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@loc_orc_2d +PREHOOK: query: insert overwrite table loc_orc_2d partition(zip, year) select * from ext_loc +PREHOOK: type: QUERY +PREHOOK: Input: default@ext_loc +PREHOOK: Output: default@loc_orc_2d +POSTHOOK: query: insert overwrite table loc_orc_2d partition(zip, year) select * from ext_loc +POSTHOOK: type: QUERY +POSTHOOK: Input: default@ext_loc +POSTHOOK: Output: default@loc_orc_2d@zip=43201/year=2001 +POSTHOOK: Output: default@loc_orc_2d@zip=43201/year=2002 +POSTHOOK: Output: default@loc_orc_2d@zip=43201/year=2003 +POSTHOOK: Output: default@loc_orc_2d@zip=94086/year=2000 +POSTHOOK: Output: default@loc_orc_2d@zip=94086/year=2001 +POSTHOOK: Output: default@loc_orc_2d@zip=94086/year=2002 +POSTHOOK: Output: default@loc_orc_2d@zip=94086/year=2003 +POSTHOOK: Output: default@loc_orc_2d@zip=94087/year=2000 +POSTHOOK: Output: default@loc_orc_2d@zip=94087/year=2001 +POSTHOOK: Output: default@loc_orc_2d@zip=94087/year=2002 +POSTHOOK: Output: default@loc_orc_2d@zip=94087/year=2003 +POSTHOOK: Lineage: loc_orc_2d PARTITION(zip=43201,year=2001).locid SIMPLE [(ext_loc)ext_loc.FieldSchema(name:locid, type:int, comment:null), ] +POSTHOOK: Lineage: loc_orc_2d PARTITION(zip=43201,year=2001).state SIMPLE [(ext_loc)ext_loc.FieldSchema(name:state, type:string, comment:null), ] +POSTHOOK: Lineage: loc_orc_2d PARTITION(zip=43201,year=2002).locid SIMPLE [(ext_loc)ext_loc.FieldSchema(name:locid, type:int, comment:null), ] +POSTHOOK: Lineage: loc_orc_2d PARTITION(zip=43201,year=2002).state SIMPLE [(ext_loc)ext_loc.FieldSchema(name:state, type:string, comment:null), ] +POSTHOOK: Lineage: loc_orc_2d PARTITION(zip=43201,year=2003).locid SIMPLE [(ext_loc)ext_loc.FieldSchema(name:locid, type:int, comment:null), ] +POSTHOOK: Lineage: loc_orc_2d PARTITION(zip=43201,year=2003).state SIMPLE [(ext_loc)ext_loc.FieldSchema(name:state, type:string, comment:null), ] +POSTHOOK: Lineage: loc_orc_2d PARTITION(zip=94086,year=2000).locid SIMPLE [(ext_loc)ext_loc.FieldSchema(name:locid, type:int, comment:null), ] +POSTHOOK: Lineage: loc_orc_2d PARTITION(zip=94086,year=2000).state SIMPLE [(ext_loc)ext_loc.FieldSchema(name:state, type:string, comment:null), ] +POSTHOOK: Lineage: loc_orc_2d PARTITION(zip=94086,year=2001).locid SIMPLE [(ext_loc)ext_loc.FieldSchema(name:locid, type:int, comment:null), ] +POSTHOOK: Lineage: loc_orc_2d PARTITION(zip=94086,year=2001).state SIMPLE [(ext_loc)ext_loc.FieldSchema(name:state, type:string, comment:null), ] +POSTHOOK: Lineage: loc_orc_2d PARTITION(zip=94086,year=2002).locid SIMPLE [(ext_loc)ext_loc.FieldSchema(name:locid, type:int, comment:null), ] +POSTHOOK: Lineage: loc_orc_2d PARTITION(zip=94086,year=2002).state SIMPLE [(ext_loc)ext_loc.FieldSchema(name:state, type:string, comment:null), ] +POSTHOOK: Lineage: loc_orc_2d PARTITION(zip=94086,year=2003).locid SIMPLE [(ext_loc)ext_loc.FieldSchema(name:locid, type:int, comment:null), ] +POSTHOOK: Lineage: loc_orc_2d PARTITION(zip=94086,year=2003).state SIMPLE [(ext_loc)ext_loc.FieldSchema(name:state, type:string, comment:null), ] +POSTHOOK: Lineage: loc_orc_2d PARTITION(zip=94087,year=2000).locid SIMPLE [(ext_loc)ext_loc.FieldSchema(name:locid, type:int, comment:null), ] +POSTHOOK: Lineage: loc_orc_2d PARTITION(zip=94087,year=2000).state SIMPLE [(ext_loc)ext_loc.FieldSchema(name:state, type:string, comment:null), ] +POSTHOOK: Lineage: loc_orc_2d PARTITION(zip=94087,year=2001).locid SIMPLE [(ext_loc)ext_loc.FieldSchema(name:locid, type:int, comment:null), ] +POSTHOOK: Lineage: loc_orc_2d PARTITION(zip=94087,year=2001).state SIMPLE [(ext_loc)ext_loc.FieldSchema(name:state, type:string, comment:null), ] +POSTHOOK: Lineage: loc_orc_2d PARTITION(zip=94087,year=2002).locid SIMPLE [(ext_loc)ext_loc.FieldSchema(name:locid, type:int, comment:null), ] +POSTHOOK: Lineage: loc_orc_2d PARTITION(zip=94087,year=2002).state SIMPLE [(ext_loc)ext_loc.FieldSchema(name:state, type:string, comment:null), ] +POSTHOOK: Lineage: loc_orc_2d PARTITION(zip=94087,year=2003).locid SIMPLE [(ext_loc)ext_loc.FieldSchema(name:locid, type:int, comment:null), ] +POSTHOOK: Lineage: loc_orc_2d PARTITION(zip=94087,year=2003).state SIMPLE [(ext_loc)ext_loc.FieldSchema(name:state, type:string, comment:null), ] +PREHOOK: query: analyze table loc_orc_2d partition(zip=94086, year='2001') compute statistics for columns state,locid +PREHOOK: type: QUERY +PREHOOK: Input: default@loc_orc_2d +PREHOOK: Input: default@loc_orc_2d@zip=94086/year=2001 +#### A masked pattern was here #### +POSTHOOK: query: analyze table loc_orc_2d partition(zip=94086, year='2001') compute statistics for columns state,locid +POSTHOOK: type: QUERY +POSTHOOK: Input: default@loc_orc_2d +POSTHOOK: Input: default@loc_orc_2d@zip=94086/year=2001 +#### A masked pattern was here #### +PREHOOK: query: analyze table loc_orc_2d partition(zip=94087, year='2002') compute statistics for columns state,locid +PREHOOK: type: QUERY +PREHOOK: Input: default@loc_orc_2d +PREHOOK: Input: default@loc_orc_2d@zip=94087/year=2002 +#### A masked pattern was here #### +POSTHOOK: query: analyze table loc_orc_2d partition(zip=94087, year='2002') compute statistics for columns state,locid +POSTHOOK: type: QUERY +POSTHOOK: Input: default@loc_orc_2d +POSTHOOK: Input: default@loc_orc_2d@zip=94087/year=2002 +#### A masked pattern was here #### +PREHOOK: query: explain extended select state from loc_orc_2d +PREHOOK: type: QUERY +POSTHOOK: query: explain extended select state from loc_orc_2d +POSTHOOK: type: QUERY +ABSTRACT SYNTAX TREE: + +TOK_QUERY + TOK_FROM + TOK_TABREF + TOK_TABNAME + loc_orc_2d + TOK_INSERT + TOK_DESTINATION + TOK_DIR + TOK_TMP_FILE + TOK_SELECT + TOK_SELEXPR + TOK_TABLE_OR_COL + state + + +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-0 depends on stages: Stage-1 + +STAGE PLANS: + Stage: Stage-1 + Map Reduce + Map Operator Tree: + TableScan + alias: loc_orc_2d + Statistics: Num rows: 20 Data size: 1788 Basic stats: COMPLETE Column stats: PARTIAL + GatherStats: false + Select Operator + expressions: state (type: string) + outputColumnNames: _col0 + Statistics: Num rows: 20 Data size: 1760 Basic stats: COMPLETE Column stats: PARTIAL + File Output Operator + compressed: false + GlobalTableId: 0 +#### A masked pattern was here #### + NumFilesPerFileSink: 1 + Statistics: Num rows: 20 Data size: 1760 Basic stats: COMPLETE Column stats: PARTIAL +#### A masked pattern was here #### + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + properties: + columns _col0 + columns.types string + escape.delim \ + hive.serialization.extend.nesting.levels true + serialization.format 1 + serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + TotalFiles: 1 + GatherStats: false + MultiFileSpray: false + Path -> Alias: +#### A masked pattern was here #### + Path -> Partition: +#### A masked pattern was here #### + Partition + base file name: year=2001 + input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat + output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat + partition values: + year 2001 + zip 43201 + properties: + COLUMN_STATS_ACCURATE true + bucket_count -1 + columns state,locid + columns.comments + columns.types string:int +#### A masked pattern was here #### + name default.loc_orc_2d + numFiles 1 + numRows 1 + partition_columns zip/year + partition_columns.types int:string + rawDataSize 90 + serialization.ddl struct loc_orc_2d { string state, i32 locid} + serialization.format 1 + serialization.lib org.apache.hadoop.hive.ql.io.orc.OrcSerde + totalSize 264 +#### A masked pattern was here #### + serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde + + input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat + output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat + properties: + bucket_count -1 + columns state,locid + columns.comments + columns.types string:int +#### A masked pattern was here #### + name default.loc_orc_2d + partition_columns zip/year + partition_columns.types int:string + serialization.ddl struct loc_orc_2d { string state, i32 locid} + serialization.format 1 + serialization.lib org.apache.hadoop.hive.ql.io.orc.OrcSerde +#### A masked pattern was here #### + serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde + name: default.loc_orc_2d + name: default.loc_orc_2d +#### A masked pattern was here #### + Partition + base file name: year=2002 + input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat + output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat + partition values: + year 2002 + zip 43201 + properties: + COLUMN_STATS_ACCURATE true + bucket_count -1 + columns state,locid + columns.comments + columns.types string:int +#### A masked pattern was here #### + name default.loc_orc_2d + numFiles 1 + numRows 2 + partition_columns zip/year + partition_columns.types int:string + rawDataSize 182 + serialization.ddl struct loc_orc_2d { string state, i32 locid} + serialization.format 1 + serialization.lib org.apache.hadoop.hive.ql.io.orc.OrcSerde + totalSize 278 +#### A masked pattern was here #### + serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde + + input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat + output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat + properties: + bucket_count -1 + columns state,locid + columns.comments + columns.types string:int +#### A masked pattern was here #### + name default.loc_orc_2d + partition_columns zip/year + partition_columns.types int:string + serialization.ddl struct loc_orc_2d { string state, i32 locid} + serialization.format 1 + serialization.lib org.apache.hadoop.hive.ql.io.orc.OrcSerde +#### A masked pattern was here #### + serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde + name: default.loc_orc_2d + name: default.loc_orc_2d +#### A masked pattern was here #### + Partition + base file name: year=2003 + input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat + output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat + partition values: + year 2003 + zip 43201 + properties: + COLUMN_STATS_ACCURATE true + bucket_count -1 + columns state,locid + columns.comments + columns.types string:int +#### A masked pattern was here #### + name default.loc_orc_2d + numFiles 1 + numRows 3 + partition_columns zip/year + partition_columns.types int:string + rawDataSize 267 + serialization.ddl struct loc_orc_2d { string state, i32 locid} + serialization.format 1 + serialization.lib org.apache.hadoop.hive.ql.io.orc.OrcSerde + totalSize 280 +#### A masked pattern was here #### + serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde + + input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat + output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat + properties: + bucket_count -1 + columns state,locid + columns.comments + columns.types string:int +#### A masked pattern was here #### + name default.loc_orc_2d + partition_columns zip/year + partition_columns.types int:string + serialization.ddl struct loc_orc_2d { string state, i32 locid} + serialization.format 1 + serialization.lib org.apache.hadoop.hive.ql.io.orc.OrcSerde +#### A masked pattern was here #### + serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde + name: default.loc_orc_2d + name: default.loc_orc_2d +#### A masked pattern was here #### + Partition + base file name: year=2000 + input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat + output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat + partition values: + year 2000 + zip 94086 + properties: + COLUMN_STATS_ACCURATE true + bucket_count -1 + columns state,locid + columns.comments + columns.types string:int +#### A masked pattern was here #### + name default.loc_orc_2d + numFiles 1 + numRows 1 + partition_columns zip/year + partition_columns.types int:string + rawDataSize 89 + serialization.ddl struct loc_orc_2d { string state, i32 locid} + serialization.format 1 + serialization.lib org.apache.hadoop.hive.ql.io.orc.OrcSerde + totalSize 260 +#### A masked pattern was here #### + serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde + + input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat + output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat + properties: + bucket_count -1 + columns state,locid + columns.comments + columns.types string:int +#### A masked pattern was here #### + name default.loc_orc_2d + partition_columns zip/year + partition_columns.types int:string + serialization.ddl struct loc_orc_2d { string state, i32 locid} + serialization.format 1 + serialization.lib org.apache.hadoop.hive.ql.io.orc.OrcSerde +#### A masked pattern was here #### + serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde + name: default.loc_orc_2d + name: default.loc_orc_2d +#### A masked pattern was here #### + Partition + base file name: year=2001 + input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat + output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat + partition values: + year 2001 + zip 94086 + properties: + COLUMN_STATS_ACCURATE true + bucket_count -1 + columns state,locid + columns.comments + columns.types string:int +#### A masked pattern was here #### + name default.loc_orc_2d + numFiles 1 + numRows 2 + partition_columns zip/year + partition_columns.types int:string + rawDataSize 176 + serialization.ddl struct loc_orc_2d { string state, i32 locid} + serialization.format 1 + serialization.lib org.apache.hadoop.hive.ql.io.orc.OrcSerde + totalSize 257 +#### A masked pattern was here #### + serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde + + input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat + output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat + properties: + bucket_count -1 + columns state,locid + columns.comments + columns.types string:int +#### A masked pattern was here #### + name default.loc_orc_2d + partition_columns zip/year + partition_columns.types int:string + serialization.ddl struct loc_orc_2d { string state, i32 locid} + serialization.format 1 + serialization.lib org.apache.hadoop.hive.ql.io.orc.OrcSerde +#### A masked pattern was here #### + serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde + name: default.loc_orc_2d + name: default.loc_orc_2d +#### A masked pattern was here #### + Partition + base file name: year=2002 + input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat + output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat + partition values: + year 2002 + zip 94086 + properties: + COLUMN_STATS_ACCURATE true + bucket_count -1 + columns state,locid + columns.comments + columns.types string:int +#### A masked pattern was here #### + name default.loc_orc_2d + numFiles 1 + numRows 1 + partition_columns zip/year + partition_columns.types int:string + rawDataSize 91 + serialization.ddl struct loc_orc_2d { string state, i32 locid} + serialization.format 1 + serialization.lib org.apache.hadoop.hive.ql.io.orc.OrcSerde + totalSize 269 +#### A masked pattern was here #### + serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde + + input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat + output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat + properties: + bucket_count -1 + columns state,locid + columns.comments + columns.types string:int +#### A masked pattern was here #### + name default.loc_orc_2d + partition_columns zip/year + partition_columns.types int:string + serialization.ddl struct loc_orc_2d { string state, i32 locid} + serialization.format 1 + serialization.lib org.apache.hadoop.hive.ql.io.orc.OrcSerde +#### A masked pattern was here #### + serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde + name: default.loc_orc_2d + name: default.loc_orc_2d +#### A masked pattern was here #### + Partition + base file name: year=2003 + input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat + output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat + partition values: + year 2003 + zip 94086 + properties: + COLUMN_STATS_ACCURATE true + bucket_count -1 + columns state,locid + columns.comments + columns.types string:int +#### A masked pattern was here #### + name default.loc_orc_2d + numFiles 1 + numRows 2 + partition_columns zip/year + partition_columns.types int:string + rawDataSize 180 + serialization.ddl struct loc_orc_2d { string state, i32 locid} + serialization.format 1 + serialization.lib org.apache.hadoop.hive.ql.io.orc.OrcSerde + totalSize 278 +#### A masked pattern was here #### + serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde + + input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat + output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat + properties: + bucket_count -1 + columns state,locid + columns.comments + columns.types string:int +#### A masked pattern was here #### + name default.loc_orc_2d + partition_columns zip/year + partition_columns.types int:string + serialization.ddl struct loc_orc_2d { string state, i32 locid} + serialization.format 1 + serialization.lib org.apache.hadoop.hive.ql.io.orc.OrcSerde +#### A masked pattern was here #### + serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde + name: default.loc_orc_2d + name: default.loc_orc_2d +#### A masked pattern was here #### + Partition + base file name: year=2000 + input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat + output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat + partition values: + year 2000 + zip 94087 + properties: + COLUMN_STATS_ACCURATE true + bucket_count -1 + columns state,locid + columns.comments + columns.types string:int +#### A masked pattern was here #### + name default.loc_orc_2d + numFiles 1 + numRows 1 + partition_columns zip/year + partition_columns.types int:string + rawDataSize 88 + serialization.ddl struct loc_orc_2d { string state, i32 locid} + serialization.format 1 + serialization.lib org.apache.hadoop.hive.ql.io.orc.OrcSerde + totalSize 247 +#### A masked pattern was here #### + serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde + + input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat + output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat + properties: + bucket_count -1 + columns state,locid + columns.comments + columns.types string:int +#### A masked pattern was here #### + name default.loc_orc_2d + partition_columns zip/year + partition_columns.types int:string + serialization.ddl struct loc_orc_2d { string state, i32 locid} + serialization.format 1 + serialization.lib org.apache.hadoop.hive.ql.io.orc.OrcSerde +#### A masked pattern was here #### + serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde + name: default.loc_orc_2d + name: default.loc_orc_2d +#### A masked pattern was here #### + Partition + base file name: year=2001 + input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat + output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat + partition values: + year 2001 + zip 94087 + properties: + COLUMN_STATS_ACCURATE true + bucket_count -1 + columns state,locid + columns.comments + columns.types string:int +#### A masked pattern was here #### + name default.loc_orc_2d + numFiles 1 + numRows 1 + partition_columns zip/year + partition_columns.types int:string + rawDataSize 88 + serialization.ddl struct loc_orc_2d { string state, i32 locid} + serialization.format 1 + serialization.lib org.apache.hadoop.hive.ql.io.orc.OrcSerde + totalSize 247 +#### A masked pattern was here #### + serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde + + input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat + output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat + properties: + bucket_count -1 + columns state,locid + columns.comments + columns.types string:int +#### A masked pattern was here #### + name default.loc_orc_2d + partition_columns zip/year + partition_columns.types int:string + serialization.ddl struct loc_orc_2d { string state, i32 locid} + serialization.format 1 + serialization.lib org.apache.hadoop.hive.ql.io.orc.OrcSerde +#### A masked pattern was here #### + serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde + name: default.loc_orc_2d + name: default.loc_orc_2d +#### A masked pattern was here #### + Partition + base file name: year=2002 + input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat + output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat + partition values: + year 2002 + zip 94087 + properties: + COLUMN_STATS_ACCURATE true + bucket_count -1 + columns state,locid + columns.comments + columns.types string:int +#### A masked pattern was here #### + name default.loc_orc_2d + numFiles 1 + numRows 3 + partition_columns zip/year + partition_columns.types int:string + rawDataSize 273 + serialization.ddl struct loc_orc_2d { string state, i32 locid} + serialization.format 1 + serialization.lib org.apache.hadoop.hive.ql.io.orc.OrcSerde + totalSize 277 +#### A masked pattern was here #### + serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde + + input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat + output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat + properties: + bucket_count -1 + columns state,locid + columns.comments + columns.types string:int +#### A masked pattern was here #### + name default.loc_orc_2d + partition_columns zip/year + partition_columns.types int:string + serialization.ddl struct loc_orc_2d { string state, i32 locid} + serialization.format 1 + serialization.lib org.apache.hadoop.hive.ql.io.orc.OrcSerde +#### A masked pattern was here #### + serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde + name: default.loc_orc_2d + name: default.loc_orc_2d +#### A masked pattern was here #### + Partition + base file name: year=2003 + input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat + output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat + partition values: + year 2003 + zip 94087 + properties: + COLUMN_STATS_ACCURATE true + bucket_count -1 + columns state,locid + columns.comments + columns.types string:int +#### A masked pattern was here #### + name default.loc_orc_2d + numFiles 1 + numRows 3 + partition_columns zip/year + partition_columns.types int:string + rawDataSize 264 + serialization.ddl struct loc_orc_2d { string state, i32 locid} + serialization.format 1 + serialization.lib org.apache.hadoop.hive.ql.io.orc.OrcSerde + totalSize 271 +#### A masked pattern was here #### + serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde + + input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat + output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat + properties: + bucket_count -1 + columns state,locid + columns.comments + columns.types string:int +#### A masked pattern was here #### + name default.loc_orc_2d + partition_columns zip/year + partition_columns.types int:string + serialization.ddl struct loc_orc_2d { string state, i32 locid} + serialization.format 1 + serialization.lib org.apache.hadoop.hive.ql.io.orc.OrcSerde +#### A masked pattern was here #### + serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde + name: default.loc_orc_2d + name: default.loc_orc_2d + Truncated Path -> Alias: + /loc_orc_2d/zip=43201/year=2001 [loc_orc_2d] + /loc_orc_2d/zip=43201/year=2002 [loc_orc_2d] + /loc_orc_2d/zip=43201/year=2003 [loc_orc_2d] + /loc_orc_2d/zip=94086/year=2000 [loc_orc_2d] + /loc_orc_2d/zip=94086/year=2001 [loc_orc_2d] + /loc_orc_2d/zip=94086/year=2002 [loc_orc_2d] + /loc_orc_2d/zip=94086/year=2003 [loc_orc_2d] + /loc_orc_2d/zip=94087/year=2000 [loc_orc_2d] + /loc_orc_2d/zip=94087/year=2001 [loc_orc_2d] + /loc_orc_2d/zip=94087/year=2002 [loc_orc_2d] + /loc_orc_2d/zip=94087/year=2003 [loc_orc_2d] + + Stage: Stage-0 + Fetch Operator + limit: -1 + Processor Tree: + ListSink + +PREHOOK: query: explain extended select state,locid from loc_orc_2d +PREHOOK: type: QUERY +POSTHOOK: query: explain extended select state,locid from loc_orc_2d +POSTHOOK: type: QUERY +ABSTRACT SYNTAX TREE: + +TOK_QUERY + TOK_FROM + TOK_TABREF + TOK_TABNAME + loc_orc_2d + TOK_INSERT + TOK_DESTINATION + TOK_DIR + TOK_TMP_FILE + TOK_SELECT + TOK_SELEXPR + TOK_TABLE_OR_COL + state + TOK_SELEXPR + TOK_TABLE_OR_COL + locid + + +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-0 depends on stages: Stage-1 + +STAGE PLANS: + Stage: Stage-1 + Map Reduce + Map Operator Tree: + TableScan + alias: loc_orc_2d + Statistics: Num rows: 20 Data size: 1788 Basic stats: COMPLETE Column stats: PARTIAL + GatherStats: false + Select Operator + expressions: state (type: string), locid (type: int) + outputColumnNames: _col0, _col1 + Statistics: Num rows: 20 Data size: 1840 Basic stats: COMPLETE Column stats: PARTIAL + File Output Operator + compressed: false + GlobalTableId: 0 +#### A masked pattern was here #### + NumFilesPerFileSink: 1 + Statistics: Num rows: 20 Data size: 1840 Basic stats: COMPLETE Column stats: PARTIAL +#### A masked pattern was here #### + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + properties: + columns _col0,_col1 + columns.types string:int + escape.delim \ + hive.serialization.extend.nesting.levels true + serialization.format 1 + serialization.lib org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + TotalFiles: 1 + GatherStats: false + MultiFileSpray: false + Path -> Alias: +#### A masked pattern was here #### + Path -> Partition: +#### A masked pattern was here #### + Partition + base file name: year=2001 + input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat + output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat + partition values: + year 2001 + zip 43201 + properties: + COLUMN_STATS_ACCURATE true + bucket_count -1 + columns state,locid + columns.comments + columns.types string:int +#### A masked pattern was here #### + name default.loc_orc_2d + numFiles 1 + numRows 1 + partition_columns zip/year + partition_columns.types int:string + rawDataSize 90 + serialization.ddl struct loc_orc_2d { string state, i32 locid} + serialization.format 1 + serialization.lib org.apache.hadoop.hive.ql.io.orc.OrcSerde + totalSize 264 +#### A masked pattern was here #### + serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde + + input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat + output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat + properties: + bucket_count -1 + columns state,locid + columns.comments + columns.types string:int +#### A masked pattern was here #### + name default.loc_orc_2d + partition_columns zip/year + partition_columns.types int:string + serialization.ddl struct loc_orc_2d { string state, i32 locid} + serialization.format 1 + serialization.lib org.apache.hadoop.hive.ql.io.orc.OrcSerde +#### A masked pattern was here #### + serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde + name: default.loc_orc_2d + name: default.loc_orc_2d +#### A masked pattern was here #### + Partition + base file name: year=2002 + input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat + output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat + partition values: + year 2002 + zip 43201 + properties: + COLUMN_STATS_ACCURATE true + bucket_count -1 + columns state,locid + columns.comments + columns.types string:int +#### A masked pattern was here #### + name default.loc_orc_2d + numFiles 1 + numRows 2 + partition_columns zip/year + partition_columns.types int:string + rawDataSize 182 + serialization.ddl struct loc_orc_2d { string state, i32 locid} + serialization.format 1 + serialization.lib org.apache.hadoop.hive.ql.io.orc.OrcSerde + totalSize 278 +#### A masked pattern was here #### + serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde + + input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat + output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat + properties: + bucket_count -1 + columns state,locid + columns.comments + columns.types string:int +#### A masked pattern was here #### + name default.loc_orc_2d + partition_columns zip/year + partition_columns.types int:string + serialization.ddl struct loc_orc_2d { string state, i32 locid} + serialization.format 1 + serialization.lib org.apache.hadoop.hive.ql.io.orc.OrcSerde +#### A masked pattern was here #### + serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde + name: default.loc_orc_2d + name: default.loc_orc_2d +#### A masked pattern was here #### + Partition + base file name: year=2003 + input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat + output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat + partition values: + year 2003 + zip 43201 + properties: + COLUMN_STATS_ACCURATE true + bucket_count -1 + columns state,locid + columns.comments + columns.types string:int +#### A masked pattern was here #### + name default.loc_orc_2d + numFiles 1 + numRows 3 + partition_columns zip/year + partition_columns.types int:string + rawDataSize 267 + serialization.ddl struct loc_orc_2d { string state, i32 locid} + serialization.format 1 + serialization.lib org.apache.hadoop.hive.ql.io.orc.OrcSerde + totalSize 280 +#### A masked pattern was here #### + serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde + + input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat + output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat + properties: + bucket_count -1 + columns state,locid + columns.comments + columns.types string:int +#### A masked pattern was here #### + name default.loc_orc_2d + partition_columns zip/year + partition_columns.types int:string + serialization.ddl struct loc_orc_2d { string state, i32 locid} + serialization.format 1 + serialization.lib org.apache.hadoop.hive.ql.io.orc.OrcSerde +#### A masked pattern was here #### + serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde + name: default.loc_orc_2d + name: default.loc_orc_2d +#### A masked pattern was here #### + Partition + base file name: year=2000 + input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat + output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat + partition values: + year 2000 + zip 94086 + properties: + COLUMN_STATS_ACCURATE true + bucket_count -1 + columns state,locid + columns.comments + columns.types string:int +#### A masked pattern was here #### + name default.loc_orc_2d + numFiles 1 + numRows 1 + partition_columns zip/year + partition_columns.types int:string + rawDataSize 89 + serialization.ddl struct loc_orc_2d { string state, i32 locid} + serialization.format 1 + serialization.lib org.apache.hadoop.hive.ql.io.orc.OrcSerde + totalSize 260 +#### A masked pattern was here #### + serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde + + input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat + output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat + properties: + bucket_count -1 + columns state,locid + columns.comments + columns.types string:int +#### A masked pattern was here #### + name default.loc_orc_2d + partition_columns zip/year + partition_columns.types int:string + serialization.ddl struct loc_orc_2d { string state, i32 locid} + serialization.format 1 + serialization.lib org.apache.hadoop.hive.ql.io.orc.OrcSerde +#### A masked pattern was here #### + serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde + name: default.loc_orc_2d + name: default.loc_orc_2d +#### A masked pattern was here #### + Partition + base file name: year=2001 + input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat + output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat + partition values: + year 2001 + zip 94086 + properties: + COLUMN_STATS_ACCURATE true + bucket_count -1 + columns state,locid + columns.comments + columns.types string:int +#### A masked pattern was here #### + name default.loc_orc_2d + numFiles 1 + numRows 2 + partition_columns zip/year + partition_columns.types int:string + rawDataSize 176 + serialization.ddl struct loc_orc_2d { string state, i32 locid} + serialization.format 1 + serialization.lib org.apache.hadoop.hive.ql.io.orc.OrcSerde + totalSize 257 +#### A masked pattern was here #### + serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde + + input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat + output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat + properties: + bucket_count -1 + columns state,locid + columns.comments + columns.types string:int +#### A masked pattern was here #### + name default.loc_orc_2d + partition_columns zip/year + partition_columns.types int:string + serialization.ddl struct loc_orc_2d { string state, i32 locid} + serialization.format 1 + serialization.lib org.apache.hadoop.hive.ql.io.orc.OrcSerde +#### A masked pattern was here #### + serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde + name: default.loc_orc_2d + name: default.loc_orc_2d +#### A masked pattern was here #### + Partition + base file name: year=2002 + input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat + output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat + partition values: + year 2002 + zip 94086 + properties: + COLUMN_STATS_ACCURATE true + bucket_count -1 + columns state,locid + columns.comments + columns.types string:int +#### A masked pattern was here #### + name default.loc_orc_2d + numFiles 1 + numRows 1 + partition_columns zip/year + partition_columns.types int:string + rawDataSize 91 + serialization.ddl struct loc_orc_2d { string state, i32 locid} + serialization.format 1 + serialization.lib org.apache.hadoop.hive.ql.io.orc.OrcSerde + totalSize 269 +#### A masked pattern was here #### + serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde + + input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat + output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat + properties: + bucket_count -1 + columns state,locid + columns.comments + columns.types string:int +#### A masked pattern was here #### + name default.loc_orc_2d + partition_columns zip/year + partition_columns.types int:string + serialization.ddl struct loc_orc_2d { string state, i32 locid} + serialization.format 1 + serialization.lib org.apache.hadoop.hive.ql.io.orc.OrcSerde +#### A masked pattern was here #### + serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde + name: default.loc_orc_2d + name: default.loc_orc_2d +#### A masked pattern was here #### + Partition + base file name: year=2003 + input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat + output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat + partition values: + year 2003 + zip 94086 + properties: + COLUMN_STATS_ACCURATE true + bucket_count -1 + columns state,locid + columns.comments + columns.types string:int +#### A masked pattern was here #### + name default.loc_orc_2d + numFiles 1 + numRows 2 + partition_columns zip/year + partition_columns.types int:string + rawDataSize 180 + serialization.ddl struct loc_orc_2d { string state, i32 locid} + serialization.format 1 + serialization.lib org.apache.hadoop.hive.ql.io.orc.OrcSerde + totalSize 278 +#### A masked pattern was here #### + serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde + + input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat + output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat + properties: + bucket_count -1 + columns state,locid + columns.comments + columns.types string:int +#### A masked pattern was here #### + name default.loc_orc_2d + partition_columns zip/year + partition_columns.types int:string + serialization.ddl struct loc_orc_2d { string state, i32 locid} + serialization.format 1 + serialization.lib org.apache.hadoop.hive.ql.io.orc.OrcSerde +#### A masked pattern was here #### + serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde + name: default.loc_orc_2d + name: default.loc_orc_2d +#### A masked pattern was here #### + Partition + base file name: year=2000 + input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat + output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat + partition values: + year 2000 + zip 94087 + properties: + COLUMN_STATS_ACCURATE true + bucket_count -1 + columns state,locid + columns.comments + columns.types string:int +#### A masked pattern was here #### + name default.loc_orc_2d + numFiles 1 + numRows 1 + partition_columns zip/year + partition_columns.types int:string + rawDataSize 88 + serialization.ddl struct loc_orc_2d { string state, i32 locid} + serialization.format 1 + serialization.lib org.apache.hadoop.hive.ql.io.orc.OrcSerde + totalSize 247 +#### A masked pattern was here #### + serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde + + input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat + output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat + properties: + bucket_count -1 + columns state,locid + columns.comments + columns.types string:int +#### A masked pattern was here #### + name default.loc_orc_2d + partition_columns zip/year + partition_columns.types int:string + serialization.ddl struct loc_orc_2d { string state, i32 locid} + serialization.format 1 + serialization.lib org.apache.hadoop.hive.ql.io.orc.OrcSerde +#### A masked pattern was here #### + serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde + name: default.loc_orc_2d + name: default.loc_orc_2d +#### A masked pattern was here #### + Partition + base file name: year=2001 + input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat + output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat + partition values: + year 2001 + zip 94087 + properties: + COLUMN_STATS_ACCURATE true + bucket_count -1 + columns state,locid + columns.comments + columns.types string:int +#### A masked pattern was here #### + name default.loc_orc_2d + numFiles 1 + numRows 1 + partition_columns zip/year + partition_columns.types int:string + rawDataSize 88 + serialization.ddl struct loc_orc_2d { string state, i32 locid} + serialization.format 1 + serialization.lib org.apache.hadoop.hive.ql.io.orc.OrcSerde + totalSize 247 +#### A masked pattern was here #### + serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde + + input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat + output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat + properties: + bucket_count -1 + columns state,locid + columns.comments + columns.types string:int +#### A masked pattern was here #### + name default.loc_orc_2d + partition_columns zip/year + partition_columns.types int:string + serialization.ddl struct loc_orc_2d { string state, i32 locid} + serialization.format 1 + serialization.lib org.apache.hadoop.hive.ql.io.orc.OrcSerde +#### A masked pattern was here #### + serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde + name: default.loc_orc_2d + name: default.loc_orc_2d +#### A masked pattern was here #### + Partition + base file name: year=2002 + input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat + output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat + partition values: + year 2002 + zip 94087 + properties: + COLUMN_STATS_ACCURATE true + bucket_count -1 + columns state,locid + columns.comments + columns.types string:int +#### A masked pattern was here #### + name default.loc_orc_2d + numFiles 1 + numRows 3 + partition_columns zip/year + partition_columns.types int:string + rawDataSize 273 + serialization.ddl struct loc_orc_2d { string state, i32 locid} + serialization.format 1 + serialization.lib org.apache.hadoop.hive.ql.io.orc.OrcSerde + totalSize 277 +#### A masked pattern was here #### + serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde + + input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat + output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat + properties: + bucket_count -1 + columns state,locid + columns.comments + columns.types string:int +#### A masked pattern was here #### + name default.loc_orc_2d + partition_columns zip/year + partition_columns.types int:string + serialization.ddl struct loc_orc_2d { string state, i32 locid} + serialization.format 1 + serialization.lib org.apache.hadoop.hive.ql.io.orc.OrcSerde +#### A masked pattern was here #### + serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde + name: default.loc_orc_2d + name: default.loc_orc_2d +#### A masked pattern was here #### + Partition + base file name: year=2003 + input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat + output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat + partition values: + year 2003 + zip 94087 + properties: + COLUMN_STATS_ACCURATE true + bucket_count -1 + columns state,locid + columns.comments + columns.types string:int +#### A masked pattern was here #### + name default.loc_orc_2d + numFiles 1 + numRows 3 + partition_columns zip/year + partition_columns.types int:string + rawDataSize 264 + serialization.ddl struct loc_orc_2d { string state, i32 locid} + serialization.format 1 + serialization.lib org.apache.hadoop.hive.ql.io.orc.OrcSerde + totalSize 271 +#### A masked pattern was here #### + serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde + + input format: org.apache.hadoop.hive.ql.io.orc.OrcInputFormat + output format: org.apache.hadoop.hive.ql.io.orc.OrcOutputFormat + properties: + bucket_count -1 + columns state,locid + columns.comments + columns.types string:int +#### A masked pattern was here #### + name default.loc_orc_2d + partition_columns zip/year + partition_columns.types int:string + serialization.ddl struct loc_orc_2d { string state, i32 locid} + serialization.format 1 + serialization.lib org.apache.hadoop.hive.ql.io.orc.OrcSerde +#### A masked pattern was here #### + serde: org.apache.hadoop.hive.ql.io.orc.OrcSerde + name: default.loc_orc_2d + name: default.loc_orc_2d + Truncated Path -> Alias: + /loc_orc_2d/zip=43201/year=2001 [loc_orc_2d] + /loc_orc_2d/zip=43201/year=2002 [loc_orc_2d] + /loc_orc_2d/zip=43201/year=2003 [loc_orc_2d] + /loc_orc_2d/zip=94086/year=2000 [loc_orc_2d] + /loc_orc_2d/zip=94086/year=2001 [loc_orc_2d] + /loc_orc_2d/zip=94086/year=2002 [loc_orc_2d] + /loc_orc_2d/zip=94086/year=2003 [loc_orc_2d] + /loc_orc_2d/zip=94087/year=2000 [loc_orc_2d] + /loc_orc_2d/zip=94087/year=2001 [loc_orc_2d] + /loc_orc_2d/zip=94087/year=2002 [loc_orc_2d] + /loc_orc_2d/zip=94087/year=2003 [loc_orc_2d] + + Stage: Stage-0 + Fetch Operator + limit: -1 + Processor Tree: + ListSink + diff --git a/ql/src/test/results/clientpositive/filter_join_breaktask.q.out b/ql/src/test/results/clientpositive/filter_join_breaktask.q.out index 3deb166..c540c4a 100644 --- a/ql/src/test/results/clientpositive/filter_join_breaktask.q.out +++ b/ql/src/test/results/clientpositive/filter_join_breaktask.q.out @@ -228,7 +228,7 @@ STAGE PLANS: condition expressions: 0 {KEY.reducesinkkey0} 1 {VALUE._col0} - outputColumnNames: _col0, _col6 + outputColumnNames: _col0, _col7 Statistics: Num rows: 14 Data size: 119 Basic stats: COMPLETE Column stats: NONE File Output Operator compressed: false @@ -239,7 +239,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.SequenceFileInputFormat output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat properties: - columns _col0,_col6 + columns _col0,_col7 columns.types int,string escape.delim \ serialization.lib org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe @@ -254,9 +254,9 @@ STAGE PLANS: TableScan GatherStats: false Reduce Output Operator - key expressions: _col6 (type: string) + key expressions: _col7 (type: string) sort order: + - Map-reduce partition columns: _col6 (type: string) + Map-reduce partition columns: _col7 (type: string) Statistics: Num rows: 14 Data size: 119 Basic stats: COMPLETE Column stats: NONE tag: 0 value expressions: _col0 (type: int) @@ -285,7 +285,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.SequenceFileInputFormat output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat properties: - columns _col0,_col6 + columns _col0,_col7 columns.types int,string escape.delim \ serialization.lib org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe @@ -294,7 +294,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.SequenceFileInputFormat output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat properties: - columns _col0,_col6 + columns _col0,_col7 columns.types int,string escape.delim \ serialization.lib org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe @@ -355,10 +355,10 @@ STAGE PLANS: condition expressions: 0 {VALUE._col0} 1 {KEY.reducesinkkey0} - outputColumnNames: _col0, _col11 + outputColumnNames: _col0, _col13 Statistics: Num rows: 15 Data size: 130 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: _col0 (type: int), _col11 (type: string) + expressions: _col0 (type: int), _col13 (type: string) outputColumnNames: _col0, _col1 Statistics: Num rows: 15 Data size: 130 Basic stats: COMPLETE Column stats: NONE File Output Operator diff --git a/ql/src/test/results/clientpositive/index_auto_self_join.q.out b/ql/src/test/results/clientpositive/index_auto_self_join.q.out index b7c40bd..1c0e564 100644 --- a/ql/src/test/results/clientpositive/index_auto_self_join.q.out +++ b/ql/src/test/results/clientpositive/index_auto_self_join.q.out @@ -47,10 +47,10 @@ STAGE PLANS: condition expressions: 0 {VALUE._col0} 1 {VALUE._col0} - outputColumnNames: _col0, _col4 + outputColumnNames: _col0, _col5 Statistics: Num rows: 1 Data size: 220 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: _col0 (type: string), _col4 (type: string) + expressions: _col0 (type: string), _col5 (type: string) outputColumnNames: _col0, _col1 Statistics: Num rows: 1 Data size: 220 Basic stats: COMPLETE Column stats: NONE File Output Operator @@ -198,10 +198,10 @@ STAGE PLANS: condition expressions: 0 {VALUE._col0} 1 {VALUE._col0} - outputColumnNames: _col0, _col4 + outputColumnNames: _col0, _col5 Statistics: Num rows: 1 Data size: 220 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: _col0 (type: string), _col4 (type: string) + expressions: _col0 (type: string), _col5 (type: string) outputColumnNames: _col0, _col1 Statistics: Num rows: 1 Data size: 220 Basic stats: COMPLETE Column stats: NONE File Output Operator diff --git a/ql/src/test/results/clientpositive/infer_bucket_sort_map_operators.q.out b/ql/src/test/results/clientpositive/infer_bucket_sort_map_operators.q.out index b6962b6..689caf4 100644 --- a/ql/src/test/results/clientpositive/infer_bucket_sort_map_operators.q.out +++ b/ql/src/test/results/clientpositive/infer_bucket_sort_map_operators.q.out @@ -416,9 +416,9 @@ STAGE PLANS: keys: 0 key (type: string) 1 key (type: string) - outputColumnNames: _col0, _col5 + outputColumnNames: _col0, _col6 Select Operator - expressions: _col0 (type: string), _col5 (type: string) + expressions: _col0 (type: string), _col6 (type: string) outputColumnNames: _col0, _col1 File Output Operator compressed: false @@ -571,13 +571,13 @@ STAGE PLANS: keys: 0 key (type: string) 1 key (type: string) - outputColumnNames: _col5 + outputColumnNames: _col6 Select Operator - expressions: _col5 (type: string) - outputColumnNames: _col5 + expressions: _col6 (type: string) + outputColumnNames: _col6 Group By Operator aggregations: count() - keys: _col5 (type: string) + keys: _col6 (type: string) mode: hash outputColumnNames: _col0, _col1 Reduce Output Operator diff --git a/ql/src/test/results/clientpositive/innerjoin.q.out b/ql/src/test/results/clientpositive/innerjoin.q.out index 72ef8e5..0cfc098 100644 --- a/ql/src/test/results/clientpositive/innerjoin.q.out +++ b/ql/src/test/results/clientpositive/innerjoin.q.out @@ -57,10 +57,10 @@ STAGE PLANS: condition expressions: 0 {KEY.reducesinkkey0} 1 {VALUE._col0} - outputColumnNames: _col0, _col5 + outputColumnNames: _col0, _col6 Statistics: Num rows: 31 Data size: 3196 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: UDFToInteger(_col0) (type: int), _col5 (type: string) + expressions: UDFToInteger(_col0) (type: int), _col6 (type: string) outputColumnNames: _col0, _col1 Statistics: Num rows: 31 Data size: 3196 Basic stats: COMPLETE Column stats: NONE File Output Operator diff --git a/ql/src/test/results/clientpositive/input23.q.out b/ql/src/test/results/clientpositive/input23.q.out index 090d5b5..2df3126 100644 --- a/ql/src/test/results/clientpositive/input23.q.out +++ b/ql/src/test/results/clientpositive/input23.q.out @@ -134,10 +134,10 @@ STAGE PLANS: condition expressions: 0 {VALUE._col0} {VALUE._col1} {VALUE._col2} {VALUE._col3} 1 {VALUE._col0} {VALUE._col1} - outputColumnNames: _col0, _col1, _col2, _col3, _col6, _col7 + outputColumnNames: _col0, _col1, _col2, _col3, _col7, _col8 Statistics: Num rows: 31 Data size: 6393 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: _col0 (type: string), _col1 (type: string), _col2 (type: string), _col3 (type: string), _col6 (type: string), _col7 (type: string), '2008-04-08' (type: string), '14' (type: string) + expressions: _col0 (type: string), _col1 (type: string), _col2 (type: string), _col3 (type: string), _col7 (type: string), _col8 (type: string), '2008-04-08' (type: string), '14' (type: string) outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7 Statistics: Num rows: 31 Data size: 6393 Basic stats: COMPLETE Column stats: NONE Limit diff --git a/ql/src/test/results/clientpositive/join1.q.out b/ql/src/test/results/clientpositive/join1.q.out index 1097e1c..a971a60 100644 --- a/ql/src/test/results/clientpositive/join1.q.out +++ b/ql/src/test/results/clientpositive/join1.q.out @@ -53,10 +53,10 @@ STAGE PLANS: condition expressions: 0 {KEY.reducesinkkey0} 1 {VALUE._col0} - outputColumnNames: _col0, _col5 + outputColumnNames: _col0, _col6 Statistics: Num rows: 31 Data size: 3196 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: UDFToInteger(_col0) (type: int), _col5 (type: string) + expressions: UDFToInteger(_col0) (type: int), _col6 (type: string) outputColumnNames: _col0, _col1 Statistics: Num rows: 31 Data size: 3196 Basic stats: COMPLETE Column stats: NONE File Output Operator diff --git a/ql/src/test/results/clientpositive/join14.q.out b/ql/src/test/results/clientpositive/join14.q.out index 2ebe4b4..6de16f0 100644 --- a/ql/src/test/results/clientpositive/join14.q.out +++ b/ql/src/test/results/clientpositive/join14.q.out @@ -57,10 +57,10 @@ STAGE PLANS: condition expressions: 0 {KEY.reducesinkkey0} 1 {VALUE._col0} - outputColumnNames: _col0, _col5 + outputColumnNames: _col0, _col6 Statistics: Num rows: 11 Data size: 1102 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: UDFToInteger(_col0) (type: int), _col5 (type: string) + expressions: UDFToInteger(_col0) (type: int), _col6 (type: string) outputColumnNames: _col0, _col1 Statistics: Num rows: 11 Data size: 1102 Basic stats: COMPLETE Column stats: NONE File Output Operator diff --git a/ql/src/test/results/clientpositive/join15.q.out b/ql/src/test/results/clientpositive/join15.q.out index 2caf4d5..b0510d1 100644 --- a/ql/src/test/results/clientpositive/join15.q.out +++ b/ql/src/test/results/clientpositive/join15.q.out @@ -44,10 +44,10 @@ STAGE PLANS: condition expressions: 0 {KEY.reducesinkkey0} {VALUE._col0} 1 {KEY.reducesinkkey0} {VALUE._col0} - outputColumnNames: _col0, _col1, _col4, _col5 + outputColumnNames: _col0, _col1, _col5, _col6 Statistics: Num rows: 16 Data size: 3306 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: _col0 (type: string), _col1 (type: string), _col4 (type: string), _col5 (type: string) + expressions: _col0 (type: string), _col1 (type: string), _col5 (type: string), _col6 (type: string) outputColumnNames: _col0, _col1, _col2, _col3 Statistics: Num rows: 16 Data size: 3306 Basic stats: COMPLETE Column stats: NONE File Output Operator diff --git a/ql/src/test/results/clientpositive/join17.q.out b/ql/src/test/results/clientpositive/join17.q.out index b090b09..a74aba9 100644 --- a/ql/src/test/results/clientpositive/join17.q.out +++ b/ql/src/test/results/clientpositive/join17.q.out @@ -150,10 +150,10 @@ STAGE PLANS: condition expressions: 0 {KEY.reducesinkkey0} {VALUE._col0} 1 {KEY.reducesinkkey0} {VALUE._col0} - outputColumnNames: _col0, _col1, _col4, _col5 + outputColumnNames: _col0, _col1, _col5, _col6 Statistics: Num rows: 16 Data size: 3306 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: UDFToInteger(_col0) (type: int), _col1 (type: string), UDFToInteger(_col4) (type: int), _col5 (type: string) + expressions: UDFToInteger(_col0) (type: int), _col1 (type: string), UDFToInteger(_col5) (type: int), _col6 (type: string) outputColumnNames: _col0, _col1, _col2, _col3 Statistics: Num rows: 16 Data size: 3306 Basic stats: COMPLETE Column stats: NONE File Output Operator diff --git a/ql/src/test/results/clientpositive/join2.q.out b/ql/src/test/results/clientpositive/join2.q.out index dea8751..5280b08 100644 --- a/ql/src/test/results/clientpositive/join2.q.out +++ b/ql/src/test/results/clientpositive/join2.q.out @@ -53,10 +53,10 @@ STAGE PLANS: condition expressions: 0 {KEY.reducesinkkey0} 1 {KEY.reducesinkkey0} - outputColumnNames: _col0, _col4 + outputColumnNames: _col0, _col5 Statistics: Num rows: 31 Data size: 3196 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: (_col0 + _col4) is not null (type: boolean) + predicate: (_col0 + _col5) is not null (type: boolean) Statistics: Num rows: 16 Data size: 1649 Basic stats: COMPLETE Column stats: NONE File Output Operator compressed: false @@ -70,9 +70,9 @@ STAGE PLANS: Map Operator Tree: TableScan Reduce Output Operator - key expressions: (_col0 + _col4) (type: double) + key expressions: (_col0 + _col5) (type: double) sort order: + - Map-reduce partition columns: (_col0 + _col4) (type: double) + Map-reduce partition columns: (_col0 + _col5) (type: double) Statistics: Num rows: 16 Data size: 1649 Basic stats: COMPLETE Column stats: NONE value expressions: _col0 (type: string) TableScan @@ -94,10 +94,10 @@ STAGE PLANS: condition expressions: 0 {VALUE._col0} 1 {VALUE._col1} - outputColumnNames: _col0, _col9 + outputColumnNames: _col0, _col11 Statistics: Num rows: 17 Data size: 1813 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: UDFToInteger(_col0) (type: int), _col9 (type: string) + expressions: UDFToInteger(_col0) (type: int), _col11 (type: string) outputColumnNames: _col0, _col1 Statistics: Num rows: 17 Data size: 1813 Basic stats: COMPLETE Column stats: NONE File Output Operator diff --git a/ql/src/test/results/clientpositive/join20.q.out b/ql/src/test/results/clientpositive/join20.q.out index a064345..f63db94 100644 --- a/ql/src/test/results/clientpositive/join20.q.out +++ b/ql/src/test/results/clientpositive/join20.q.out @@ -61,10 +61,10 @@ STAGE PLANS: 0 1 2 {(KEY.reducesinkkey0 < 20)} - outputColumnNames: _col0, _col1, _col4, _col5, _col8, _col9 + outputColumnNames: _col0, _col1, _col5, _col6, _col10, _col11 Statistics: Num rows: 63 Data size: 12786 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: _col0 (type: string), _col1 (type: string), _col4 (type: string), _col5 (type: string), _col8 (type: string), _col9 (type: string) + expressions: _col0 (type: string), _col1 (type: string), _col5 (type: string), _col6 (type: string), _col10 (type: string), _col11 (type: string) outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5 Statistics: Num rows: 63 Data size: 12786 Basic stats: COMPLETE Column stats: NONE File Output Operator @@ -722,10 +722,10 @@ STAGE PLANS: 0 1 2 {(KEY.reducesinkkey0 < 20)} - outputColumnNames: _col0, _col1, _col4, _col5, _col8, _col9 + outputColumnNames: _col0, _col1, _col5, _col6, _col10, _col11 Statistics: Num rows: 63 Data size: 12786 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: _col0 (type: string), _col1 (type: string), _col4 (type: string), _col5 (type: string), _col8 (type: string), _col9 (type: string) + expressions: _col0 (type: string), _col1 (type: string), _col5 (type: string), _col6 (type: string), _col10 (type: string), _col11 (type: string) outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5 Statistics: Num rows: 63 Data size: 12786 Basic stats: COMPLETE Column stats: NONE File Output Operator diff --git a/ql/src/test/results/clientpositive/join21.q.out b/ql/src/test/results/clientpositive/join21.q.out index dbe3a2f..2494b56 100644 --- a/ql/src/test/results/clientpositive/join21.q.out +++ b/ql/src/test/results/clientpositive/join21.q.out @@ -56,10 +56,10 @@ STAGE PLANS: 0 {(KEY.reducesinkkey0 < 10)} 1 2 {(KEY.reducesinkkey0 < 10)} - outputColumnNames: _col0, _col1, _col4, _col5, _col8, _col9 + outputColumnNames: _col0, _col1, _col5, _col6, _col10, _col11 Statistics: Num rows: 63 Data size: 12786 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: _col0 (type: string), _col1 (type: string), _col4 (type: string), _col5 (type: string), _col8 (type: string), _col9 (type: string) + expressions: _col0 (type: string), _col1 (type: string), _col5 (type: string), _col6 (type: string), _col10 (type: string), _col11 (type: string) outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5 Statistics: Num rows: 63 Data size: 12786 Basic stats: COMPLETE Column stats: NONE File Output Operator diff --git a/ql/src/test/results/clientpositive/join22.q.out b/ql/src/test/results/clientpositive/join22.q.out index f7f0e34..0916179 100644 --- a/ql/src/test/results/clientpositive/join22.q.out +++ b/ql/src/test/results/clientpositive/join22.q.out @@ -84,10 +84,10 @@ STAGE PLANS: condition expressions: 0 1 {VALUE._col2} - outputColumnNames: _col7 + outputColumnNames: _col8 Statistics: Num rows: 34 Data size: 3515 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: _col7 (type: string) + expressions: _col8 (type: string) outputColumnNames: _col0 Statistics: Num rows: 34 Data size: 3515 Basic stats: COMPLETE Column stats: NONE File Output Operator diff --git a/ql/src/test/results/clientpositive/join23.q.out b/ql/src/test/results/clientpositive/join23.q.out index ce081a4..6f36347 100644 --- a/ql/src/test/results/clientpositive/join23.q.out +++ b/ql/src/test/results/clientpositive/join23.q.out @@ -41,10 +41,10 @@ STAGE PLANS: condition expressions: 0 {VALUE._col0} {VALUE._col1} 1 {VALUE._col0} {VALUE._col1} - outputColumnNames: _col0, _col1, _col4, _col5 + outputColumnNames: _col0, _col1, _col5, _col6 Statistics: Num rows: 9 Data size: 1983 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: _col0 (type: string), _col1 (type: string), _col4 (type: string), _col5 (type: string) + expressions: _col0 (type: string), _col1 (type: string), _col5 (type: string), _col6 (type: string) outputColumnNames: _col0, _col1, _col2, _col3 Statistics: Num rows: 9 Data size: 1983 Basic stats: COMPLETE Column stats: NONE File Output Operator diff --git a/ql/src/test/results/clientpositive/join25.q.out b/ql/src/test/results/clientpositive/join25.q.out index b7b7d6d..5984f55 100644 --- a/ql/src/test/results/clientpositive/join25.q.out +++ b/ql/src/test/results/clientpositive/join25.q.out @@ -72,10 +72,10 @@ STAGE PLANS: keys: 0 key (type: string) 1 key (type: string) - outputColumnNames: _col0, _col1, _col5 + outputColumnNames: _col0, _col1, _col6 Statistics: Num rows: 16 Data size: 3306 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: UDFToInteger(_col0) (type: int), _col1 (type: string), _col5 (type: string) + expressions: UDFToInteger(_col0) (type: int), _col1 (type: string), _col6 (type: string) outputColumnNames: _col0, _col1, _col2 Statistics: Num rows: 16 Data size: 3306 Basic stats: COMPLETE Column stats: NONE File Output Operator diff --git a/ql/src/test/results/clientpositive/join26.q.out b/ql/src/test/results/clientpositive/join26.q.out index 9f589d2..6a118a8 100644 --- a/ql/src/test/results/clientpositive/join26.q.out +++ b/ql/src/test/results/clientpositive/join26.q.out @@ -185,11 +185,11 @@ STAGE PLANS: 0 key (type: string) 1 key (type: string) 2 key (type: string) - outputColumnNames: _col0, _col5, _col9 + outputColumnNames: _col0, _col6, _col11 Position of Big Table: 2 Statistics: Num rows: 33 Data size: 6613 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: _col0 (type: string), _col9 (type: string), _col5 (type: string) + expressions: _col0 (type: string), _col11 (type: string), _col6 (type: string) outputColumnNames: _col0, _col1, _col2 Statistics: Num rows: 33 Data size: 6613 Basic stats: COMPLETE Column stats: NONE File Output Operator diff --git a/ql/src/test/results/clientpositive/join27.q.out b/ql/src/test/results/clientpositive/join27.q.out index a396391..72dbdaf 100644 --- a/ql/src/test/results/clientpositive/join27.q.out +++ b/ql/src/test/results/clientpositive/join27.q.out @@ -72,10 +72,10 @@ STAGE PLANS: keys: 0 value (type: string) 1 value (type: string) - outputColumnNames: _col0, _col1, _col5 + outputColumnNames: _col0, _col1, _col6 Statistics: Num rows: 31 Data size: 3196 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: UDFToInteger(_col0) (type: int), _col1 (type: string), _col5 (type: string) + expressions: UDFToInteger(_col0) (type: int), _col1 (type: string), _col6 (type: string) outputColumnNames: _col0, _col1, _col2 Statistics: Num rows: 31 Data size: 3196 Basic stats: COMPLETE Column stats: NONE File Output Operator diff --git a/ql/src/test/results/clientpositive/join3.q.out b/ql/src/test/results/clientpositive/join3.q.out index 5454b6a..0700051 100644 --- a/ql/src/test/results/clientpositive/join3.q.out +++ b/ql/src/test/results/clientpositive/join3.q.out @@ -66,10 +66,10 @@ STAGE PLANS: 0 {KEY.reducesinkkey0} 1 2 {VALUE._col0} - outputColumnNames: _col0, _col9 + outputColumnNames: _col0, _col11 Statistics: Num rows: 63 Data size: 6393 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: UDFToInteger(_col0) (type: int), _col9 (type: string) + expressions: UDFToInteger(_col0) (type: int), _col11 (type: string) outputColumnNames: _col0, _col1 Statistics: Num rows: 63 Data size: 6393 Basic stats: COMPLETE Column stats: NONE File Output Operator diff --git a/ql/src/test/results/clientpositive/join32.q.out b/ql/src/test/results/clientpositive/join32.q.out index aa44362..918262e 100644 --- a/ql/src/test/results/clientpositive/join32.q.out +++ b/ql/src/test/results/clientpositive/join32.q.out @@ -190,7 +190,7 @@ STAGE PLANS: Statistics: Num rows: 29 Data size: 2906 Basic stats: COMPLETE Column stats: NONE HashTable Sink Operator condition expressions: - 0 {_col0} {_col5} + 0 {_col0} {_col6} 1 keys: 0 _col1 (type: string) @@ -217,23 +217,23 @@ STAGE PLANS: keys: 0 key (type: string) 1 key (type: string) - outputColumnNames: _col0, _col1, _col5 + outputColumnNames: _col0, _col1, _col6 Position of Big Table: 1 Statistics: Num rows: 16 Data size: 3306 Basic stats: COMPLETE Column stats: NONE Map Join Operator condition map: Inner Join 0 to 1 condition expressions: - 0 {_col0} {_col5} + 0 {_col0} {_col6} 1 {value} keys: 0 _col1 (type: string) 1 value (type: string) - outputColumnNames: _col0, _col5, _col9 + outputColumnNames: _col0, _col6, _col11 Position of Big Table: 0 Statistics: Num rows: 31 Data size: 3196 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: _col0 (type: string), _col9 (type: string), _col5 (type: string) + expressions: _col0 (type: string), _col11 (type: string), _col6 (type: string) outputColumnNames: _col0, _col1, _col2 Statistics: Num rows: 31 Data size: 3196 Basic stats: COMPLETE Column stats: NONE File Output Operator diff --git a/ql/src/test/results/clientpositive/join32_lessSize.q.out b/ql/src/test/results/clientpositive/join32_lessSize.q.out index 18c0d72..bf00d82 100644 --- a/ql/src/test/results/clientpositive/join32_lessSize.q.out +++ b/ql/src/test/results/clientpositive/join32_lessSize.q.out @@ -161,7 +161,7 @@ STAGE PLANS: keys: 0 key (type: string) 1 key (type: string) - outputColumnNames: _col0, _col1, _col5 + outputColumnNames: _col0, _col1, _col6 Position of Big Table: 1 Statistics: Num rows: 16 Data size: 3306 Basic stats: COMPLETE Column stats: NONE File Output Operator @@ -173,7 +173,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.SequenceFileInputFormat output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat properties: - columns _col0,_col1,_col5 + columns _col0,_col1,_col6 columns.types string,string,string escape.delim \ serialization.lib org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe @@ -341,7 +341,7 @@ STAGE PLANS: Statistics: Num rows: 29 Data size: 2906 Basic stats: COMPLETE Column stats: NONE HashTable Sink Operator condition expressions: - 0 {_col0} {_col5} + 0 {_col0} {_col6} 1 keys: 0 _col1 (type: string) @@ -357,16 +357,16 @@ STAGE PLANS: condition map: Inner Join 0 to 1 condition expressions: - 0 {_col0} {_col5} + 0 {_col0} {_col6} 1 {value} keys: 0 _col1 (type: string) 1 value (type: string) - outputColumnNames: _col0, _col5, _col9 + outputColumnNames: _col0, _col6, _col11 Position of Big Table: 0 Statistics: Num rows: 31 Data size: 3196 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: _col0 (type: string), _col9 (type: string), _col5 (type: string) + expressions: _col0 (type: string), _col11 (type: string), _col6 (type: string) outputColumnNames: _col0, _col1, _col2 Statistics: Num rows: 31 Data size: 3196 Basic stats: COMPLETE Column stats: NONE File Output Operator @@ -406,7 +406,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.SequenceFileInputFormat output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat properties: - columns _col0,_col1,_col5 + columns _col0,_col1,_col6 columns.types string,string,string escape.delim \ serialization.lib org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe @@ -415,7 +415,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.SequenceFileInputFormat output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat properties: - columns _col0,_col1,_col5 + columns _col0,_col1,_col6 columns.types string,string,string escape.delim \ serialization.lib org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe @@ -758,7 +758,7 @@ STAGE PLANS: keys: 0 value (type: string) 1 value (type: string) - outputColumnNames: _col4 + outputColumnNames: _col5 Position of Big Table: 0 Statistics: Num rows: 31 Data size: 3196 Basic stats: COMPLETE Column stats: NONE File Output Operator @@ -770,7 +770,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.SequenceFileInputFormat output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat properties: - columns _col4 + columns _col5 columns.types string escape.delim \ serialization.lib org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe @@ -896,11 +896,11 @@ STAGE PLANS: predicate: key is not null (type: boolean) HashTable Sink Operator condition expressions: - 0 {_col4} + 0 {_col5} 1 {value} 2 {value} keys: - 0 _col4 (type: string) + 0 _col5 (type: string) 1 key (type: string) 2 key (type: string) Position of Big Table: 0 @@ -913,11 +913,11 @@ STAGE PLANS: predicate: key is not null (type: boolean) HashTable Sink Operator condition expressions: - 0 {_col4} + 0 {_col5} 1 {value} 2 {value} keys: - 0 _col4 (type: string) + 0 _col5 (type: string) 1 key (type: string) 2 key (type: string) Position of Big Table: 0 @@ -932,17 +932,17 @@ STAGE PLANS: Inner Join 0 to 1 Inner Join 0 to 2 condition expressions: - 0 {_col4} + 0 {_col5} 1 {value} 2 {value} keys: - 0 _col4 (type: string) + 0 _col5 (type: string) 1 key (type: string) 2 key (type: string) - outputColumnNames: _col4, _col9, _col13 + outputColumnNames: _col5, _col11, _col16 Position of Big Table: 0 Select Operator - expressions: _col4 (type: string), _col13 (type: string), _col9 (type: string) + expressions: _col5 (type: string), _col16 (type: string), _col11 (type: string) outputColumnNames: _col0, _col1, _col2 File Output Operator compressed: false @@ -985,7 +985,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.SequenceFileInputFormat output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat properties: - columns _col4 + columns _col5 columns.types string escape.delim \ serialization.lib org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe @@ -994,7 +994,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.SequenceFileInputFormat output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat properties: - columns _col4 + columns _col5 columns.types string escape.delim \ serialization.lib org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe @@ -1140,7 +1140,7 @@ STAGE PLANS: 1 {value} 2 {value} keys: - 0 _col4 (type: string) + 0 _col5 (type: string) 1 key (type: string) 2 key (type: string) Position of Big Table: 1 @@ -1157,7 +1157,7 @@ STAGE PLANS: 1 {value} 2 {value} keys: - 0 _col4 (type: string) + 0 _col5 (type: string) 1 key (type: string) 2 key (type: string) Position of Big Table: 1 @@ -1176,17 +1176,17 @@ STAGE PLANS: Inner Join 0 to 1 Inner Join 0 to 2 condition expressions: - 0 {_col4} + 0 {_col5} 1 {value} 2 {value} keys: - 0 _col4 (type: string) + 0 _col5 (type: string) 1 key (type: string) 2 key (type: string) - outputColumnNames: _col4, _col9, _col13 + outputColumnNames: _col5, _col11, _col16 Position of Big Table: 1 Select Operator - expressions: _col4 (type: string), _col13 (type: string), _col9 (type: string) + expressions: _col5 (type: string), _col16 (type: string), _col11 (type: string) outputColumnNames: _col0, _col1, _col2 File Output Operator compressed: false @@ -1229,7 +1229,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.SequenceFileInputFormat output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat properties: - columns _col4 + columns _col5 columns.types string escape.delim \ serialization.lib org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe @@ -1238,7 +1238,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.SequenceFileInputFormat output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat properties: - columns _col4 + columns _col5 columns.types string escape.delim \ serialization.lib org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe @@ -1353,7 +1353,7 @@ STAGE PLANS: 1 {value} 2 {value} keys: - 0 _col4 (type: string) + 0 _col5 (type: string) 1 key (type: string) 2 key (type: string) Position of Big Table: 2 @@ -1370,7 +1370,7 @@ STAGE PLANS: 1 {value} 2 {value} keys: - 0 _col4 (type: string) + 0 _col5 (type: string) 1 key (type: string) 2 key (type: string) Position of Big Table: 2 @@ -1389,17 +1389,17 @@ STAGE PLANS: Inner Join 0 to 1 Inner Join 0 to 2 condition expressions: - 0 {_col4} + 0 {_col5} 1 {value} 2 {value} keys: - 0 _col4 (type: string) + 0 _col5 (type: string) 1 key (type: string) 2 key (type: string) - outputColumnNames: _col4, _col9, _col13 + outputColumnNames: _col5, _col11, _col16 Position of Big Table: 2 Select Operator - expressions: _col4 (type: string), _col13 (type: string), _col9 (type: string) + expressions: _col5 (type: string), _col16 (type: string), _col11 (type: string) outputColumnNames: _col0, _col1, _col2 File Output Operator compressed: false @@ -1442,7 +1442,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.SequenceFileInputFormat output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat properties: - columns _col4 + columns _col5 columns.types string escape.delim \ serialization.lib org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe @@ -1451,7 +1451,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.SequenceFileInputFormat output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat properties: - columns _col4 + columns _col5 columns.types string escape.delim \ serialization.lib org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe @@ -1553,9 +1553,9 @@ STAGE PLANS: TableScan GatherStats: false Reduce Output Operator - key expressions: _col4 (type: string) + key expressions: _col5 (type: string) sort order: + - Map-reduce partition columns: _col4 (type: string) + Map-reduce partition columns: _col5 (type: string) Statistics: Num rows: 31 Data size: 3196 Basic stats: COMPLETE Column stats: NONE tag: 0 auto parallelism: false @@ -1600,7 +1600,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.SequenceFileInputFormat output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat properties: - columns _col4 + columns _col5 columns.types string escape.delim \ serialization.lib org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe @@ -1609,7 +1609,7 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.SequenceFileInputFormat output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat properties: - columns _col4 + columns _col5 columns.types string escape.delim \ serialization.lib org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe @@ -1716,10 +1716,10 @@ STAGE PLANS: 0 {KEY.reducesinkkey0} 1 {VALUE._col0} 2 {VALUE._col0} - outputColumnNames: _col4, _col9, _col13 + outputColumnNames: _col5, _col11, _col16 Statistics: Num rows: 68 Data size: 7031 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: _col4 (type: string), _col13 (type: string), _col9 (type: string) + expressions: _col5 (type: string), _col16 (type: string), _col11 (type: string) outputColumnNames: _col0, _col1, _col2 Statistics: Num rows: 68 Data size: 7031 Basic stats: COMPLETE Column stats: NONE File Output Operator diff --git a/ql/src/test/results/clientpositive/join33.q.out b/ql/src/test/results/clientpositive/join33.q.out index aa44362..918262e 100644 --- a/ql/src/test/results/clientpositive/join33.q.out +++ b/ql/src/test/results/clientpositive/join33.q.out @@ -190,7 +190,7 @@ STAGE PLANS: Statistics: Num rows: 29 Data size: 2906 Basic stats: COMPLETE Column stats: NONE HashTable Sink Operator condition expressions: - 0 {_col0} {_col5} + 0 {_col0} {_col6} 1 keys: 0 _col1 (type: string) @@ -217,23 +217,23 @@ STAGE PLANS: keys: 0 key (type: string) 1 key (type: string) - outputColumnNames: _col0, _col1, _col5 + outputColumnNames: _col0, _col1, _col6 Position of Big Table: 1 Statistics: Num rows: 16 Data size: 3306 Basic stats: COMPLETE Column stats: NONE Map Join Operator condition map: Inner Join 0 to 1 condition expressions: - 0 {_col0} {_col5} + 0 {_col0} {_col6} 1 {value} keys: 0 _col1 (type: string) 1 value (type: string) - outputColumnNames: _col0, _col5, _col9 + outputColumnNames: _col0, _col6, _col11 Position of Big Table: 0 Statistics: Num rows: 31 Data size: 3196 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: _col0 (type: string), _col9 (type: string), _col5 (type: string) + expressions: _col0 (type: string), _col11 (type: string), _col6 (type: string) outputColumnNames: _col0, _col1, _col2 Statistics: Num rows: 31 Data size: 3196 Basic stats: COMPLETE Column stats: NONE File Output Operator diff --git a/ql/src/test/results/clientpositive/join36.q.out b/ql/src/test/results/clientpositive/join36.q.out index df8acea..03b1377 100644 --- a/ql/src/test/results/clientpositive/join36.q.out +++ b/ql/src/test/results/clientpositive/join36.q.out @@ -112,10 +112,10 @@ STAGE PLANS: keys: 0 key (type: int) 1 key (type: int) - outputColumnNames: _col0, _col1, _col5 + outputColumnNames: _col0, _col1, _col6 Statistics: Num rows: 170 Data size: 817 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: _col0 (type: int), _col1 (type: int), _col5 (type: int) + expressions: _col0 (type: int), _col1 (type: int), _col6 (type: int) outputColumnNames: _col0, _col1, _col2 Statistics: Num rows: 170 Data size: 817 Basic stats: COMPLETE Column stats: NONE File Output Operator diff --git a/ql/src/test/results/clientpositive/join37.q.out b/ql/src/test/results/clientpositive/join37.q.out index dc92ddd..1b76016 100644 --- a/ql/src/test/results/clientpositive/join37.q.out +++ b/ql/src/test/results/clientpositive/join37.q.out @@ -72,10 +72,10 @@ STAGE PLANS: keys: 0 key (type: string) 1 key (type: string) - outputColumnNames: _col0, _col1, _col5 + outputColumnNames: _col0, _col1, _col6 Statistics: Num rows: 16 Data size: 3306 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: UDFToInteger(_col0) (type: int), _col1 (type: string), _col5 (type: string) + expressions: UDFToInteger(_col0) (type: int), _col1 (type: string), _col6 (type: string) outputColumnNames: _col0, _col1, _col2 Statistics: Num rows: 16 Data size: 3306 Basic stats: COMPLETE Column stats: NONE File Output Operator diff --git a/ql/src/test/results/clientpositive/join38.q.out b/ql/src/test/results/clientpositive/join38.q.out index 44dca4e..e025503 100644 --- a/ql/src/test/results/clientpositive/join38.q.out +++ b/ql/src/test/results/clientpositive/join38.q.out @@ -94,15 +94,15 @@ STAGE PLANS: keys: 0 '111' (type: string) 1 '111' (type: string) - outputColumnNames: _col1, _col9 + outputColumnNames: _col1, _col10 Statistics: Num rows: 15 Data size: 1598 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: _col1 (type: string), _col9 (type: string) - outputColumnNames: _col1, _col9 + expressions: _col1 (type: string), _col10 (type: string) + outputColumnNames: _col1, _col10 Statistics: Num rows: 15 Data size: 1598 Basic stats: COMPLETE Column stats: NONE Group By Operator aggregations: count(1) - keys: _col1 (type: string), _col9 (type: string) + keys: _col1 (type: string), _col10 (type: string) mode: hash outputColumnNames: _col0, _col1, _col2 Statistics: Num rows: 15 Data size: 1598 Basic stats: COMPLETE Column stats: NONE diff --git a/ql/src/test/results/clientpositive/join39.q.out b/ql/src/test/results/clientpositive/join39.q.out index a82a6fe..1942567 100644 --- a/ql/src/test/results/clientpositive/join39.q.out +++ b/ql/src/test/results/clientpositive/join39.q.out @@ -73,10 +73,10 @@ STAGE PLANS: keys: 0 key (type: string) 1 _col0 (type: string) - outputColumnNames: _col0, _col1, _col4, _col5 + outputColumnNames: _col0, _col1, _col5, _col6 Statistics: Num rows: 31 Data size: 6393 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: _col0 (type: string), _col1 (type: string), _col4 (type: string), _col5 (type: string) + expressions: _col0 (type: string), _col1 (type: string), _col5 (type: string), _col6 (type: string) outputColumnNames: _col0, _col1, _col2, _col3 Statistics: Num rows: 31 Data size: 6393 Basic stats: COMPLETE Column stats: NONE File Output Operator diff --git a/ql/src/test/results/clientpositive/join40.q.out b/ql/src/test/results/clientpositive/join40.q.out index 69f6cbe..29ad289 100644 --- a/ql/src/test/results/clientpositive/join40.q.out +++ b/ql/src/test/results/clientpositive/join40.q.out @@ -44,10 +44,10 @@ STAGE PLANS: condition expressions: 0 {KEY.reducesinkkey0} {VALUE._col0} 1 {KEY.reducesinkkey0} {VALUE._col0} - outputColumnNames: _col0, _col1, _col4, _col5 + outputColumnNames: _col0, _col1, _col5, _col6 Statistics: Num rows: 31 Data size: 6393 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: _col0 (type: string), _col1 (type: string), _col4 (type: string), _col5 (type: string) + expressions: _col0 (type: string), _col1 (type: string), _col5 (type: string), _col6 (type: string) outputColumnNames: _col0, _col1, _col2, _col3 Statistics: Num rows: 31 Data size: 6393 Basic stats: COMPLETE Column stats: NONE File Output Operator @@ -684,10 +684,10 @@ STAGE PLANS: condition expressions: 0 {KEY.reducesinkkey0} 1 {VALUE._col0} - outputColumnNames: _col0, _col5 + outputColumnNames: _col0, _col6 Statistics: Num rows: 31 Data size: 3196 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: _col0 (type: string), _col5 (type: string) + expressions: _col0 (type: string), _col6 (type: string) outputColumnNames: _col0, _col1 Statistics: Num rows: 31 Data size: 3196 Basic stats: COMPLETE Column stats: NONE File Output Operator @@ -1805,10 +1805,10 @@ STAGE PLANS: 0 1 2 {(KEY.reducesinkkey0 < 20)} - outputColumnNames: _col0, _col1, _col4, _col5, _col8, _col9 + outputColumnNames: _col0, _col1, _col5, _col6, _col10, _col11 Statistics: Num rows: 63 Data size: 12786 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: _col0 (type: string), _col1 (type: string), _col4 (type: string), _col5 (type: string), _col8 (type: string), _col9 (type: string) + expressions: _col0 (type: string), _col1 (type: string), _col5 (type: string), _col6 (type: string), _col10 (type: string), _col11 (type: string) outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5 Statistics: Num rows: 63 Data size: 12786 Basic stats: COMPLETE Column stats: NONE File Output Operator @@ -2466,10 +2466,10 @@ STAGE PLANS: 0 1 2 {(KEY.reducesinkkey0 < 20)} - outputColumnNames: _col0, _col1, _col4, _col5, _col8, _col9 + outputColumnNames: _col0, _col1, _col5, _col6, _col10, _col11 Statistics: Num rows: 63 Data size: 12786 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: _col0 (type: string), _col1 (type: string), _col4 (type: string), _col5 (type: string), _col8 (type: string), _col9 (type: string) + expressions: _col0 (type: string), _col1 (type: string), _col5 (type: string), _col6 (type: string), _col10 (type: string), _col11 (type: string) outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5 Statistics: Num rows: 63 Data size: 12786 Basic stats: COMPLETE Column stats: NONE File Output Operator @@ -3119,10 +3119,10 @@ STAGE PLANS: keys: 0 key (type: string) 1 _col0 (type: string) - outputColumnNames: _col0, _col1, _col4, _col5 + outputColumnNames: _col0, _col1, _col5, _col6 Statistics: Num rows: 31 Data size: 6393 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: _col0 (type: string), _col1 (type: string), _col4 (type: string), _col5 (type: string) + expressions: _col0 (type: string), _col1 (type: string), _col5 (type: string), _col6 (type: string) outputColumnNames: _col0, _col1, _col2, _col3 Statistics: Num rows: 31 Data size: 6393 Basic stats: COMPLETE Column stats: NONE File Output Operator diff --git a/ql/src/test/results/clientpositive/join41.q.out b/ql/src/test/results/clientpositive/join41.q.out index b7bfd08..cfb41fd 100644 --- a/ql/src/test/results/clientpositive/join41.q.out +++ b/ql/src/test/results/clientpositive/join41.q.out @@ -47,10 +47,10 @@ STAGE PLANS: condition expressions: 0 {KEY.reducesinkkey0} {VALUE._col0} 1 {KEY.reducesinkkey0} {VALUE._col0} - outputColumnNames: _col0, _col1, _col4, _col5 + outputColumnNames: _col0, _col1, _col5, _col6 Statistics: Num rows: 3 Data size: 23 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: _col0 (type: string), _col1 (type: string), _col4 (type: string), _col5 (type: string) + expressions: _col0 (type: string), _col1 (type: string), _col5 (type: string), _col6 (type: string) outputColumnNames: _col0, _col1, _col2, _col3 Statistics: Num rows: 3 Data size: 23 Basic stats: COMPLETE Column stats: NONE File Output Operator @@ -122,10 +122,10 @@ STAGE PLANS: condition expressions: 0 {KEY.reducesinkkey0} {VALUE._col0} 1 {KEY.reducesinkkey0} {VALUE._col0} - outputColumnNames: _col0, _col1, _col4, _col5 + outputColumnNames: _col0, _col1, _col5, _col6 Statistics: Num rows: 3 Data size: 23 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: _col0 (type: string), _col1 (type: string), _col4 (type: string), _col5 (type: string) + expressions: _col0 (type: string), _col1 (type: string), _col5 (type: string), _col6 (type: string) outputColumnNames: _col0, _col1, _col2, _col3 Statistics: Num rows: 3 Data size: 23 Basic stats: COMPLETE Column stats: NONE File Output Operator diff --git a/ql/src/test/results/clientpositive/join9.q.out b/ql/src/test/results/clientpositive/join9.q.out index 4ece610..4bab9de 100644 --- a/ql/src/test/results/clientpositive/join9.q.out +++ b/ql/src/test/results/clientpositive/join9.q.out @@ -212,10 +212,10 @@ STAGE PLANS: condition expressions: 0 {KEY.reducesinkkey0} 1 {VALUE._col0} - outputColumnNames: _col0, _col7 + outputColumnNames: _col0, _col8 Statistics: Num rows: 31 Data size: 3196 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: UDFToInteger(_col0) (type: int), _col7 (type: string) + expressions: UDFToInteger(_col0) (type: int), _col8 (type: string) outputColumnNames: _col0, _col1 Statistics: Num rows: 31 Data size: 3196 Basic stats: COMPLETE Column stats: NONE File Output Operator diff --git a/ql/src/test/results/clientpositive/join_alt_syntax.q.out b/ql/src/test/results/clientpositive/join_alt_syntax.q.out index 801e32e..590e4d1 100644 --- a/ql/src/test/results/clientpositive/join_alt_syntax.q.out +++ b/ql/src/test/results/clientpositive/join_alt_syntax.q.out @@ -76,10 +76,10 @@ STAGE PLANS: condition expressions: 0 {VALUE._col1} 1 {VALUE._col1} - outputColumnNames: _col1, _col12 + outputColumnNames: _col1, _col13 Statistics: Num rows: 34 Data size: 3490 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: _col1 (type: string), _col12 (type: string) + expressions: _col1 (type: string), _col13 (type: string) outputColumnNames: _col0, _col1 Statistics: Num rows: 34 Data size: 3490 Basic stats: COMPLETE Column stats: NONE File Output Operator @@ -154,13 +154,13 @@ STAGE PLANS: 0 {KEY.reducesinkkey0} 1 {KEY.reducesinkkey0} 2 {KEY.reducesinkkey0} - outputColumnNames: _col1, _col12, _col23 + outputColumnNames: _col1, _col13, _col25 Statistics: Num rows: 35 Data size: 3601 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: ((_col1 = _col12) and (_col12 = _col23)) (type: boolean) + predicate: ((_col1 = _col13) and (_col13 = _col25)) (type: boolean) Statistics: Num rows: 8 Data size: 823 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: _col1 (type: string), _col12 (type: string), _col23 (type: string) + expressions: _col1 (type: string), _col13 (type: string), _col25 (type: string) outputColumnNames: _col0, _col1, _col2 Statistics: Num rows: 8 Data size: 823 Basic stats: COMPLETE Column stats: NONE File Output Operator @@ -239,13 +239,13 @@ STAGE PLANS: 0 {KEY.reducesinkkey0} 1 {KEY.reducesinkkey0} 2 {KEY.reducesinkkey0} - outputColumnNames: _col1, _col11, _col13 + outputColumnNames: _col1, _col12, _col14 Statistics: Num rows: 35 Data size: 3601 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: ((_col1 = _col11) and (_col11 = _col13)) (type: boolean) + predicate: ((_col1 = _col12) and (_col12 = _col14)) (type: boolean) Statistics: Num rows: 8 Data size: 823 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: _col1 (type: string), _col11 (type: string), _col13 (type: string) + expressions: _col1 (type: string), _col12 (type: string), _col14 (type: string) outputColumnNames: _col0, _col1, _col2 Statistics: Num rows: 8 Data size: 823 Basic stats: COMPLETE Column stats: NONE File Output Operator @@ -304,10 +304,10 @@ STAGE PLANS: condition expressions: 0 {VALUE._col0} {VALUE._col1} 1 {VALUE._col0} {VALUE._col1} - outputColumnNames: _col0, _col1, _col11, _col12 + outputColumnNames: _col0, _col1, _col12, _col13 Statistics: Num rows: 33 Data size: 3490 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: (((_col11 + _col0) = _col0) and _col12 is not null) (type: boolean) + predicate: (((_col12 + _col0) = _col0) and _col13 is not null) (type: boolean) Statistics: Num rows: 8 Data size: 846 Basic stats: COMPLETE Column stats: NONE File Output Operator compressed: false @@ -332,25 +332,25 @@ STAGE PLANS: Statistics: Num rows: 16 Data size: 1637 Basic stats: COMPLETE Column stats: NONE TableScan Reduce Output Operator - key expressions: _col12 (type: string) + key expressions: _col13 (type: string) sort order: + - Map-reduce partition columns: _col12 (type: string) + Map-reduce partition columns: _col13 (type: string) Statistics: Num rows: 8 Data size: 846 Basic stats: COMPLETE Column stats: NONE - value expressions: _col0 (type: int), _col1 (type: string), _col11 (type: int) + value expressions: _col0 (type: int), _col1 (type: string), _col12 (type: int) Reduce Operator Tree: Join Operator condition map: Inner Join 0 to 1 condition expressions: - 0 {VALUE._col0} {VALUE._col1} {VALUE._col11} {KEY.reducesinkkey0} + 0 {VALUE._col0} {VALUE._col1} {VALUE._col12} {KEY.reducesinkkey0} 1 {KEY.reducesinkkey0} - outputColumnNames: _col0, _col1, _col11, _col12, _col23 + outputColumnNames: _col0, _col1, _col12, _col13, _col25 Statistics: Num rows: 17 Data size: 1800 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: (((_col11 + _col0) = _col0) and (_col23 = _col12)) (type: boolean) + predicate: (((_col12 + _col0) = _col0) and (_col25 = _col13)) (type: boolean) Statistics: Num rows: 4 Data size: 423 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: _col1 (type: string), _col12 (type: string), _col23 (type: string) + expressions: _col1 (type: string), _col13 (type: string), _col25 (type: string) outputColumnNames: _col0, _col1, _col2 Statistics: Num rows: 4 Data size: 423 Basic stats: COMPLETE Column stats: NONE File Output Operator @@ -416,7 +416,7 @@ STAGE PLANS: condition expressions: 0 {KEY.reducesinkkey1} {KEY.reducesinkkey0} 1 {KEY.reducesinkkey1} {KEY.reducesinkkey0} - outputColumnNames: _col0, _col1, _col11, _col12 + outputColumnNames: _col0, _col1, _col12, _col13 Statistics: Num rows: 8 Data size: 930 Basic stats: COMPLETE Column stats: NONE File Output Operator compressed: false @@ -441,19 +441,19 @@ STAGE PLANS: Statistics: Num rows: 16 Data size: 1637 Basic stats: COMPLETE Column stats: NONE TableScan Reduce Output Operator - key expressions: _col12 (type: string) + key expressions: _col13 (type: string) sort order: + - Map-reduce partition columns: _col12 (type: string) + Map-reduce partition columns: _col13 (type: string) Statistics: Num rows: 8 Data size: 930 Basic stats: COMPLETE Column stats: NONE - value expressions: _col0 (type: int), _col1 (type: string), _col11 (type: int) + value expressions: _col0 (type: int), _col1 (type: string), _col12 (type: int) Reduce Operator Tree: Join Operator condition map: Inner Join 0 to 1 condition expressions: - 0 {VALUE._col0} {VALUE._col1} {VALUE._col11} {KEY.reducesinkkey0} + 0 {VALUE._col0} {VALUE._col1} {VALUE._col12} {KEY.reducesinkkey0} 1 {KEY.reducesinkkey0} - outputColumnNames: _col0, _col1, _col11, _col12, _col23 + outputColumnNames: _col0, _col1, _col12, _col13, _col25 Statistics: Num rows: 17 Data size: 1800 Basic stats: COMPLETE Column stats: NONE File Output Operator compressed: false @@ -483,21 +483,21 @@ STAGE PLANS: sort order: + Map-reduce partition columns: _col0 (type: int) Statistics: Num rows: 17 Data size: 1800 Basic stats: COMPLETE Column stats: NONE - value expressions: _col1 (type: string), _col11 (type: int), _col12 (type: string), _col23 (type: string) + value expressions: _col1 (type: string), _col12 (type: int), _col13 (type: string), _col25 (type: string) Reduce Operator Tree: Join Operator condition map: Inner Join 0 to 1 condition expressions: - 0 {KEY.reducesinkkey0} {VALUE._col0} {VALUE._col10} {VALUE._col11} {VALUE._col22} + 0 {KEY.reducesinkkey0} {VALUE._col0} {VALUE._col11} {VALUE._col12} {VALUE._col24} 1 {KEY.reducesinkkey0} {VALUE._col0} - outputColumnNames: _col0, _col1, _col11, _col12, _col23, _col33, _col34 + outputColumnNames: _col0, _col1, _col12, _col13, _col25, _col36, _col37 Statistics: Num rows: 18 Data size: 1980 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: (((_col12 = _col23) and (_col0 = _col33)) and (_col0 = _col11)) (type: boolean) + predicate: (((_col13 = _col25) and (_col0 = _col36)) and (_col0 = _col12)) (type: boolean) Statistics: Num rows: 2 Data size: 220 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: _col1 (type: string), _col12 (type: string), _col23 (type: string), _col34 (type: string) + expressions: _col1 (type: string), _col13 (type: string), _col25 (type: string), _col37 (type: string) outputColumnNames: _col0, _col1, _col2, _col3 Statistics: Num rows: 2 Data size: 220 Basic stats: COMPLETE Column stats: NONE File Output Operator @@ -563,7 +563,7 @@ STAGE PLANS: condition expressions: 0 {KEY.reducesinkkey1} {KEY.reducesinkkey0} 1 {KEY.reducesinkkey1} {KEY.reducesinkkey0} - outputColumnNames: _col0, _col1, _col11, _col12 + outputColumnNames: _col0, _col1, _col12, _col13 Statistics: Num rows: 8 Data size: 930 Basic stats: COMPLETE Column stats: NONE File Output Operator compressed: false @@ -588,19 +588,19 @@ STAGE PLANS: Statistics: Num rows: 16 Data size: 1637 Basic stats: COMPLETE Column stats: NONE TableScan Reduce Output Operator - key expressions: _col12 (type: string) + key expressions: _col13 (type: string) sort order: + - Map-reduce partition columns: _col12 (type: string) + Map-reduce partition columns: _col13 (type: string) Statistics: Num rows: 8 Data size: 930 Basic stats: COMPLETE Column stats: NONE - value expressions: _col0 (type: int), _col1 (type: string), _col11 (type: int) + value expressions: _col0 (type: int), _col1 (type: string), _col12 (type: int) Reduce Operator Tree: Join Operator condition map: Inner Join 0 to 1 condition expressions: - 0 {VALUE._col0} {VALUE._col1} {VALUE._col11} {KEY.reducesinkkey0} + 0 {VALUE._col0} {VALUE._col1} {VALUE._col12} {KEY.reducesinkkey0} 1 {KEY.reducesinkkey0} - outputColumnNames: _col0, _col1, _col11, _col12, _col23 + outputColumnNames: _col0, _col1, _col12, _col13, _col25 Statistics: Num rows: 17 Data size: 1800 Basic stats: COMPLETE Column stats: NONE File Output Operator compressed: false @@ -630,21 +630,21 @@ STAGE PLANS: sort order: + Map-reduce partition columns: _col0 (type: int) Statistics: Num rows: 17 Data size: 1800 Basic stats: COMPLETE Column stats: NONE - value expressions: _col1 (type: string), _col11 (type: int), _col12 (type: string), _col23 (type: string) + value expressions: _col1 (type: string), _col12 (type: int), _col13 (type: string), _col25 (type: string) Reduce Operator Tree: Join Operator condition map: Inner Join 0 to 1 condition expressions: - 0 {KEY.reducesinkkey0} {VALUE._col0} {VALUE._col10} {VALUE._col11} {VALUE._col22} + 0 {KEY.reducesinkkey0} {VALUE._col0} {VALUE._col11} {VALUE._col12} {VALUE._col24} 1 {KEY.reducesinkkey0} {VALUE._col0} - outputColumnNames: _col0, _col1, _col11, _col12, _col23, _col33, _col34 + outputColumnNames: _col0, _col1, _col12, _col13, _col25, _col36, _col37 Statistics: Num rows: 18 Data size: 1980 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: (((_col12 = _col23) and (_col0 = _col33)) and (_col0 = _col11)) (type: boolean) + predicate: (((_col13 = _col25) and (_col0 = _col36)) and (_col0 = _col12)) (type: boolean) Statistics: Num rows: 2 Data size: 220 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: _col1 (type: string), _col12 (type: string), _col23 (type: string), _col34 (type: string) + expressions: _col1 (type: string), _col13 (type: string), _col25 (type: string), _col37 (type: string) outputColumnNames: _col0, _col1, _col2, _col3 Statistics: Num rows: 2 Data size: 220 Basic stats: COMPLETE Column stats: NONE File Output Operator diff --git a/ql/src/test/results/clientpositive/join_cond_pushdown_1.q.out b/ql/src/test/results/clientpositive/join_cond_pushdown_1.q.out index 581de53..22eb697 100644 --- a/ql/src/test/results/clientpositive/join_cond_pushdown_1.q.out +++ b/ql/src/test/results/clientpositive/join_cond_pushdown_1.q.out @@ -99,10 +99,10 @@ STAGE PLANS: 0 {VALUE._col0} {KEY.reducesinkkey0} {VALUE._col1} {VALUE._col2} {VALUE._col3} {VALUE._col4} {VALUE._col5} {VALUE._col6} {VALUE._col7} 1 {VALUE._col0} {KEY.reducesinkkey0} {VALUE._col1} {VALUE._col2} {VALUE._col3} {VALUE._col4} {VALUE._col5} {VALUE._col6} {VALUE._col7} 2 {VALUE._col0} {KEY.reducesinkkey0} {VALUE._col1} {VALUE._col2} {VALUE._col3} {VALUE._col4} {VALUE._col5} {VALUE._col6} {VALUE._col7} - outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col11, _col12, _col13, _col14, _col15, _col16, _col17, _col18, _col19, _col22, _col23, _col24, _col25, _col26, _col27, _col28, _col29, _col30 + outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col12, _col13, _col14, _col15, _col16, _col17, _col18, _col19, _col20, _col24, _col25, _col26, _col27, _col28, _col29, _col30, _col31, _col32 Statistics: Num rows: 6 Data size: 4186 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: _col0 (type: int), _col1 (type: string), _col2 (type: string), _col3 (type: string), _col4 (type: string), _col5 (type: int), _col6 (type: string), _col7 (type: double), _col8 (type: string), _col11 (type: int), _col12 (type: string), _col13 (type: string), _col14 (type: string), _col15 (type: string), _col16 (type: int), _col17 (type: string), _col18 (type: double), _col19 (type: string), _col22 (type: int), _col23 (type: string), _col24 (type: string), _col25 (type: string), _col26 (type: string), _col27 (type: int), _col28 (type: string), _col29 (type: double), _col30 (type: string) + expressions: _col0 (type: int), _col1 (type: string), _col2 (type: string), _col3 (type: string), _col4 (type: string), _col5 (type: int), _col6 (type: string), _col7 (type: double), _col8 (type: string), _col12 (type: int), _col13 (type: string), _col14 (type: string), _col15 (type: string), _col16 (type: string), _col17 (type: int), _col18 (type: string), _col19 (type: double), _col20 (type: string), _col24 (type: int), _col25 (type: string), _col26 (type: string), _col27 (type: string), _col28 (type: string), _col29 (type: int), _col30 (type: string), _col31 (type: double), _col32 (type: string) outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10, _col11, _col12, _col13, _col14, _col15, _col16, _col17, _col18, _col19, _col20, _col21, _col22, _col23, _col24, _col25, _col26 Statistics: Num rows: 6 Data size: 4186 Basic stats: COMPLETE Column stats: NONE File Output Operator @@ -178,10 +178,10 @@ STAGE PLANS: 0 {VALUE._col0} {KEY.reducesinkkey0} {VALUE._col1} {VALUE._col2} {VALUE._col3} {VALUE._col4} {VALUE._col5} {VALUE._col6} {VALUE._col7} 1 {VALUE._col0} {KEY.reducesinkkey0} {VALUE._col1} {VALUE._col2} {VALUE._col3} {VALUE._col4} {VALUE._col5} {VALUE._col6} {VALUE._col7} 2 {VALUE._col0} {KEY.reducesinkkey0} {VALUE._col1} {VALUE._col2} {VALUE._col3} {VALUE._col4} {VALUE._col5} {VALUE._col6} {VALUE._col7} - outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col11, _col12, _col13, _col14, _col15, _col16, _col17, _col18, _col19, _col22, _col23, _col24, _col25, _col26, _col27, _col28, _col29, _col30 + outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col12, _col13, _col14, _col15, _col16, _col17, _col18, _col19, _col20, _col24, _col25, _col26, _col27, _col28, _col29, _col30, _col31, _col32 Statistics: Num rows: 6 Data size: 4186 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: _col0 (type: int), _col1 (type: string), _col2 (type: string), _col3 (type: string), _col4 (type: string), _col5 (type: int), _col6 (type: string), _col7 (type: double), _col8 (type: string), _col11 (type: int), _col12 (type: string), _col13 (type: string), _col14 (type: string), _col15 (type: string), _col16 (type: int), _col17 (type: string), _col18 (type: double), _col19 (type: string), _col22 (type: int), _col23 (type: string), _col24 (type: string), _col25 (type: string), _col26 (type: string), _col27 (type: int), _col28 (type: string), _col29 (type: double), _col30 (type: string) + expressions: _col0 (type: int), _col1 (type: string), _col2 (type: string), _col3 (type: string), _col4 (type: string), _col5 (type: int), _col6 (type: string), _col7 (type: double), _col8 (type: string), _col12 (type: int), _col13 (type: string), _col14 (type: string), _col15 (type: string), _col16 (type: string), _col17 (type: int), _col18 (type: string), _col19 (type: double), _col20 (type: string), _col24 (type: int), _col25 (type: string), _col26 (type: string), _col27 (type: string), _col28 (type: string), _col29 (type: int), _col30 (type: string), _col31 (type: double), _col32 (type: string) outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10, _col11, _col12, _col13, _col14, _col15, _col16, _col17, _col18, _col19, _col20, _col21, _col22, _col23, _col24, _col25, _col26 Statistics: Num rows: 6 Data size: 4186 Basic stats: COMPLETE Column stats: NONE File Output Operator @@ -238,10 +238,10 @@ STAGE PLANS: condition expressions: 0 {VALUE._col0} {VALUE._col1} {VALUE._col2} {VALUE._col3} {VALUE._col4} {VALUE._col5} {VALUE._col6} {VALUE._col7} {VALUE._col8} 1 {VALUE._col0} {VALUE._col1} {VALUE._col2} {VALUE._col3} {VALUE._col4} {VALUE._col5} {VALUE._col6} {VALUE._col7} {VALUE._col8} - outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col11, _col12, _col13, _col14, _col15, _col16, _col17, _col18, _col19 + outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col12, _col13, _col14, _col15, _col16, _col17, _col18, _col19, _col20 Statistics: Num rows: 5 Data size: 3490 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: (((_col11 + _col0) = _col0) and _col12 is not null) (type: boolean) + predicate: (((_col12 + _col0) = _col0) and _col13 is not null) (type: boolean) Statistics: Num rows: 1 Data size: 698 Basic stats: COMPLETE Column stats: NONE File Output Operator compressed: false @@ -267,22 +267,22 @@ STAGE PLANS: value expressions: p_partkey (type: int), p_mfgr (type: string), p_brand (type: string), p_type (type: string), p_size (type: int), p_container (type: string), p_retailprice (type: double), p_comment (type: string) TableScan Reduce Output Operator - key expressions: _col12 (type: string) + key expressions: _col13 (type: string) sort order: + - Map-reduce partition columns: _col12 (type: string) + Map-reduce partition columns: _col13 (type: string) Statistics: Num rows: 1 Data size: 698 Basic stats: COMPLETE Column stats: NONE - value expressions: _col0 (type: int), _col1 (type: string), _col2 (type: string), _col3 (type: string), _col4 (type: string), _col5 (type: int), _col6 (type: string), _col7 (type: double), _col8 (type: string), _col11 (type: int), _col13 (type: string), _col14 (type: string), _col15 (type: string), _col16 (type: int), _col17 (type: string), _col18 (type: double), _col19 (type: string) + value expressions: _col0 (type: int), _col1 (type: string), _col2 (type: string), _col3 (type: string), _col4 (type: string), _col5 (type: int), _col6 (type: string), _col7 (type: double), _col8 (type: string), _col12 (type: int), _col14 (type: string), _col15 (type: string), _col16 (type: string), _col17 (type: int), _col18 (type: string), _col19 (type: double), _col20 (type: string) Reduce Operator Tree: Join Operator condition map: Inner Join 0 to 1 condition expressions: - 0 {VALUE._col0} {VALUE._col1} {VALUE._col2} {VALUE._col3} {VALUE._col4} {VALUE._col5} {VALUE._col6} {VALUE._col7} {VALUE._col8} {VALUE._col11} {KEY.reducesinkkey0} {VALUE._col12} {VALUE._col13} {VALUE._col14} {VALUE._col15} {VALUE._col16} {VALUE._col17} {VALUE._col18} + 0 {VALUE._col0} {VALUE._col1} {VALUE._col2} {VALUE._col3} {VALUE._col4} {VALUE._col5} {VALUE._col6} {VALUE._col7} {VALUE._col8} {VALUE._col12} {KEY.reducesinkkey0} {VALUE._col13} {VALUE._col14} {VALUE._col15} {VALUE._col16} {VALUE._col17} {VALUE._col18} {VALUE._col19} 1 {VALUE._col0} {KEY.reducesinkkey0} {VALUE._col1} {VALUE._col2} {VALUE._col3} {VALUE._col4} {VALUE._col5} {VALUE._col6} {VALUE._col7} - outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col11, _col12, _col13, _col14, _col15, _col16, _col17, _col18, _col19, _col22, _col23, _col24, _col25, _col26, _col27, _col28, _col29, _col30 + outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col12, _col13, _col14, _col15, _col16, _col17, _col18, _col19, _col20, _col24, _col25, _col26, _col27, _col28, _col29, _col30, _col31, _col32 Statistics: Num rows: 3 Data size: 2093 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: _col0 (type: int), _col1 (type: string), _col2 (type: string), _col3 (type: string), _col4 (type: string), _col5 (type: int), _col6 (type: string), _col7 (type: double), _col8 (type: string), _col11 (type: int), _col12 (type: string), _col13 (type: string), _col14 (type: string), _col15 (type: string), _col16 (type: int), _col17 (type: string), _col18 (type: double), _col19 (type: string), _col22 (type: int), _col23 (type: string), _col24 (type: string), _col25 (type: string), _col26 (type: string), _col27 (type: int), _col28 (type: string), _col29 (type: double), _col30 (type: string) + expressions: _col0 (type: int), _col1 (type: string), _col2 (type: string), _col3 (type: string), _col4 (type: string), _col5 (type: int), _col6 (type: string), _col7 (type: double), _col8 (type: string), _col12 (type: int), _col13 (type: string), _col14 (type: string), _col15 (type: string), _col16 (type: string), _col17 (type: int), _col18 (type: string), _col19 (type: double), _col20 (type: string), _col24 (type: int), _col25 (type: string), _col26 (type: string), _col27 (type: string), _col28 (type: string), _col29 (type: int), _col30 (type: string), _col31 (type: double), _col32 (type: string) outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10, _col11, _col12, _col13, _col14, _col15, _col16, _col17, _col18, _col19, _col20, _col21, _col22, _col23, _col24, _col25, _col26 Statistics: Num rows: 3 Data size: 2093 Basic stats: COMPLETE Column stats: NONE File Output Operator @@ -339,7 +339,7 @@ STAGE PLANS: condition expressions: 0 {VALUE._col0} {VALUE._col1} {VALUE._col2} {VALUE._col3} {VALUE._col4} {VALUE._col5} {VALUE._col6} {VALUE._col7} {VALUE._col8} 1 {VALUE._col1} {VALUE._col2} {VALUE._col3} {VALUE._col4} {VALUE._col5} {VALUE._col6} {VALUE._col7} {VALUE._col8} - outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col12, _col13, _col14, _col15, _col16, _col17, _col18, _col19 + outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col13, _col14, _col15, _col16, _col17, _col18, _col19, _col20 Statistics: Num rows: 5 Data size: 3490 Basic stats: COMPLETE Column stats: NONE File Output Operator compressed: false @@ -365,22 +365,22 @@ STAGE PLANS: value expressions: p_partkey (type: int), p_mfgr (type: string), p_brand (type: string), p_type (type: string), p_size (type: int), p_container (type: string), p_retailprice (type: double), p_comment (type: string) TableScan Reduce Output Operator - key expressions: _col12 (type: string) + key expressions: _col13 (type: string) sort order: + - Map-reduce partition columns: _col12 (type: string) + Map-reduce partition columns: _col13 (type: string) Statistics: Num rows: 5 Data size: 3490 Basic stats: COMPLETE Column stats: NONE - value expressions: _col0 (type: int), _col1 (type: string), _col2 (type: string), _col3 (type: string), _col4 (type: string), _col5 (type: int), _col6 (type: string), _col7 (type: double), _col8 (type: string), _col13 (type: string), _col14 (type: string), _col15 (type: string), _col16 (type: int), _col17 (type: string), _col18 (type: double), _col19 (type: string) + value expressions: _col0 (type: int), _col1 (type: string), _col2 (type: string), _col3 (type: string), _col4 (type: string), _col5 (type: int), _col6 (type: string), _col7 (type: double), _col8 (type: string), _col14 (type: string), _col15 (type: string), _col16 (type: string), _col17 (type: int), _col18 (type: string), _col19 (type: double), _col20 (type: string) Reduce Operator Tree: Join Operator condition map: Inner Join 0 to 1 condition expressions: - 0 {VALUE._col0} {VALUE._col1} {VALUE._col2} {VALUE._col3} {VALUE._col4} {VALUE._col5} {VALUE._col6} {VALUE._col7} {VALUE._col8} {KEY.reducesinkkey0} {VALUE._col12} {VALUE._col13} {VALUE._col14} {VALUE._col15} {VALUE._col16} {VALUE._col17} {VALUE._col18} + 0 {VALUE._col0} {VALUE._col1} {VALUE._col2} {VALUE._col3} {VALUE._col4} {VALUE._col5} {VALUE._col6} {VALUE._col7} {VALUE._col8} {KEY.reducesinkkey0} {VALUE._col13} {VALUE._col14} {VALUE._col15} {VALUE._col16} {VALUE._col17} {VALUE._col18} {VALUE._col19} 1 {VALUE._col0} {KEY.reducesinkkey0} {VALUE._col1} {VALUE._col2} {VALUE._col3} {VALUE._col4} {VALUE._col5} {VALUE._col6} {VALUE._col7} - outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col12, _col13, _col14, _col15, _col16, _col17, _col18, _col19, _col22, _col23, _col24, _col25, _col26, _col27, _col28, _col29, _col30 + outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col13, _col14, _col15, _col16, _col17, _col18, _col19, _col20, _col24, _col25, _col26, _col27, _col28, _col29, _col30, _col31, _col32 Statistics: Num rows: 5 Data size: 3839 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: _col0 (type: int), _col1 (type: string), _col2 (type: string), _col3 (type: string), _col4 (type: string), _col5 (type: int), _col6 (type: string), _col7 (type: double), _col8 (type: string), 1 (type: int), _col12 (type: string), _col13 (type: string), _col14 (type: string), _col15 (type: string), _col16 (type: int), _col17 (type: string), _col18 (type: double), _col19 (type: string), _col22 (type: int), _col23 (type: string), _col24 (type: string), _col25 (type: string), _col26 (type: string), _col27 (type: int), _col28 (type: string), _col29 (type: double), _col30 (type: string) + expressions: _col0 (type: int), _col1 (type: string), _col2 (type: string), _col3 (type: string), _col4 (type: string), _col5 (type: int), _col6 (type: string), _col7 (type: double), _col8 (type: string), 1 (type: int), _col13 (type: string), _col14 (type: string), _col15 (type: string), _col16 (type: string), _col17 (type: int), _col18 (type: string), _col19 (type: double), _col20 (type: string), _col24 (type: int), _col25 (type: string), _col26 (type: string), _col27 (type: string), _col28 (type: string), _col29 (type: int), _col30 (type: string), _col31 (type: double), _col32 (type: string) outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10, _col11, _col12, _col13, _col14, _col15, _col16, _col17, _col18, _col19, _col20, _col21, _col22, _col23, _col24, _col25, _col26 Statistics: Num rows: 5 Data size: 3839 Basic stats: COMPLETE Column stats: NONE File Output Operator diff --git a/ql/src/test/results/clientpositive/join_cond_pushdown_2.q.out b/ql/src/test/results/clientpositive/join_cond_pushdown_2.q.out index 94d238f..70153ad 100644 --- a/ql/src/test/results/clientpositive/join_cond_pushdown_2.q.out +++ b/ql/src/test/results/clientpositive/join_cond_pushdown_2.q.out @@ -113,10 +113,10 @@ STAGE PLANS: 1 {VALUE._col0} {KEY.reducesinkkey0} {VALUE._col1} {VALUE._col2} {VALUE._col3} {VALUE._col4} {VALUE._col5} {VALUE._col6} {VALUE._col7} 2 {VALUE._col0} {KEY.reducesinkkey0} {VALUE._col1} {VALUE._col2} {VALUE._col3} {VALUE._col4} {VALUE._col5} {VALUE._col6} {VALUE._col7} 3 {VALUE._col0} {KEY.reducesinkkey0} {VALUE._col1} {VALUE._col2} {VALUE._col3} {VALUE._col4} {VALUE._col5} {VALUE._col6} {VALUE._col7} - outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col11, _col12, _col13, _col14, _col15, _col16, _col17, _col18, _col19, _col22, _col23, _col24, _col25, _col26, _col27, _col28, _col29, _col30, _col33, _col34, _col35, _col36, _col37, _col38, _col39, _col40, _col41 + outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col12, _col13, _col14, _col15, _col16, _col17, _col18, _col19, _col20, _col24, _col25, _col26, _col27, _col28, _col29, _col30, _col31, _col32, _col36, _col37, _col38, _col39, _col40, _col41, _col42, _col43, _col44 Statistics: Num rows: 9 Data size: 6279 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: _col0 (type: int), _col1 (type: string), _col2 (type: string), _col3 (type: string), _col4 (type: string), _col5 (type: int), _col6 (type: string), _col7 (type: double), _col8 (type: string), _col11 (type: int), _col12 (type: string), _col13 (type: string), _col14 (type: string), _col15 (type: string), _col16 (type: int), _col17 (type: string), _col18 (type: double), _col19 (type: string), _col22 (type: int), _col23 (type: string), _col24 (type: string), _col25 (type: string), _col26 (type: string), _col27 (type: int), _col28 (type: string), _col29 (type: double), _col30 (type: string), _col33 (type: int), _col34 (type: string), _col35 (type: string), _col36 (type: string), _col37 (type: string), _col38 (type: int), _col39 (type: string), _col40 (type: double), _col41 (type: string) + expressions: _col0 (type: int), _col1 (type: string), _col2 (type: string), _col3 (type: string), _col4 (type: string), _col5 (type: int), _col6 (type: string), _col7 (type: double), _col8 (type: string), _col12 (type: int), _col13 (type: string), _col14 (type: string), _col15 (type: string), _col16 (type: string), _col17 (type: int), _col18 (type: string), _col19 (type: double), _col20 (type: string), _col24 (type: int), _col25 (type: string), _col26 (type: string), _col27 (type: string), _col28 (type: string), _col29 (type: int), _col30 (type: string), _col31 (type: double), _col32 (type: string), _col36 (type: int), _col37 (type: string), _col38 (type: string), _col39 (type: string), _col40 (type: string), _col41 (type: int), _col42 (type: string), _col43 (type: double), _col44 (type: string) outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10, _col11, _col12, _col13, _col14, _col15, _col16, _col17, _col18, _col19, _col20, _col21, _col22, _col23, _col24, _col25, _col26, _col27, _col28, _col29, _col30, _col31, _col32, _col33, _col34, _col35 Statistics: Num rows: 9 Data size: 6279 Basic stats: COMPLETE Column stats: NONE File Output Operator @@ -182,7 +182,7 @@ STAGE PLANS: condition expressions: 0 {KEY.reducesinkkey1} {KEY.reducesinkkey0} {VALUE._col0} {VALUE._col1} {VALUE._col2} {VALUE._col3} {VALUE._col4} {VALUE._col5} {VALUE._col6} 1 {KEY.reducesinkkey1} {KEY.reducesinkkey0} {VALUE._col0} {VALUE._col1} {VALUE._col2} {VALUE._col3} {VALUE._col4} {VALUE._col5} {VALUE._col6} - outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col11, _col12, _col13, _col14, _col15, _col16, _col17, _col18, _col19 + outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col12, _col13, _col14, _col15, _col16, _col17, _col18, _col19, _col20 Statistics: Num rows: 2 Data size: 1395 Basic stats: COMPLETE Column stats: NONE File Output Operator compressed: false @@ -208,19 +208,19 @@ STAGE PLANS: value expressions: p_partkey (type: int), p_mfgr (type: string), p_brand (type: string), p_type (type: string), p_size (type: int), p_container (type: string), p_retailprice (type: double), p_comment (type: string) TableScan Reduce Output Operator - key expressions: _col12 (type: string) + key expressions: _col13 (type: string) sort order: + - Map-reduce partition columns: _col12 (type: string) + Map-reduce partition columns: _col13 (type: string) Statistics: Num rows: 2 Data size: 1395 Basic stats: COMPLETE Column stats: NONE - value expressions: _col0 (type: int), _col1 (type: string), _col2 (type: string), _col3 (type: string), _col4 (type: string), _col5 (type: int), _col6 (type: string), _col7 (type: double), _col8 (type: string), _col11 (type: int), _col13 (type: string), _col14 (type: string), _col15 (type: string), _col16 (type: int), _col17 (type: string), _col18 (type: double), _col19 (type: string) + value expressions: _col0 (type: int), _col1 (type: string), _col2 (type: string), _col3 (type: string), _col4 (type: string), _col5 (type: int), _col6 (type: string), _col7 (type: double), _col8 (type: string), _col12 (type: int), _col14 (type: string), _col15 (type: string), _col16 (type: string), _col17 (type: int), _col18 (type: string), _col19 (type: double), _col20 (type: string) Reduce Operator Tree: Join Operator condition map: Inner Join 0 to 1 condition expressions: - 0 {VALUE._col0} {VALUE._col1} {VALUE._col2} {VALUE._col3} {VALUE._col4} {VALUE._col5} {VALUE._col6} {VALUE._col7} {VALUE._col8} {VALUE._col11} {KEY.reducesinkkey0} {VALUE._col12} {VALUE._col13} {VALUE._col14} {VALUE._col15} {VALUE._col16} {VALUE._col17} {VALUE._col18} + 0 {VALUE._col0} {VALUE._col1} {VALUE._col2} {VALUE._col3} {VALUE._col4} {VALUE._col5} {VALUE._col6} {VALUE._col7} {VALUE._col8} {VALUE._col12} {KEY.reducesinkkey0} {VALUE._col13} {VALUE._col14} {VALUE._col15} {VALUE._col16} {VALUE._col17} {VALUE._col18} {VALUE._col19} 1 {VALUE._col0} {KEY.reducesinkkey0} {VALUE._col1} {VALUE._col2} {VALUE._col3} {VALUE._col4} {VALUE._col5} {VALUE._col6} {VALUE._col7} - outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col11, _col12, _col13, _col14, _col15, _col16, _col17, _col18, _col19, _col22, _col23, _col24, _col25, _col26, _col27, _col28, _col29, _col30 + outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col12, _col13, _col14, _col15, _col16, _col17, _col18, _col19, _col20, _col24, _col25, _col26, _col27, _col28, _col29, _col30, _col31, _col32 Statistics: Num rows: 3 Data size: 2093 Basic stats: COMPLETE Column stats: NONE File Output Operator compressed: false @@ -250,18 +250,18 @@ STAGE PLANS: sort order: + Map-reduce partition columns: _col0 (type: int) Statistics: Num rows: 3 Data size: 2093 Basic stats: COMPLETE Column stats: NONE - value expressions: _col1 (type: string), _col2 (type: string), _col3 (type: string), _col4 (type: string), _col5 (type: int), _col6 (type: string), _col7 (type: double), _col8 (type: string), _col11 (type: int), _col12 (type: string), _col13 (type: string), _col14 (type: string), _col15 (type: string), _col16 (type: int), _col17 (type: string), _col18 (type: double), _col19 (type: string), _col22 (type: int), _col23 (type: string), _col24 (type: string), _col25 (type: string), _col26 (type: string), _col27 (type: int), _col28 (type: string), _col29 (type: double), _col30 (type: string) + value expressions: _col1 (type: string), _col2 (type: string), _col3 (type: string), _col4 (type: string), _col5 (type: int), _col6 (type: string), _col7 (type: double), _col8 (type: string), _col12 (type: int), _col13 (type: string), _col14 (type: string), _col15 (type: string), _col16 (type: string), _col17 (type: int), _col18 (type: string), _col19 (type: double), _col20 (type: string), _col24 (type: int), _col25 (type: string), _col26 (type: string), _col27 (type: string), _col28 (type: string), _col29 (type: int), _col30 (type: string), _col31 (type: double), _col32 (type: string) Reduce Operator Tree: Join Operator condition map: Inner Join 0 to 1 condition expressions: - 0 {KEY.reducesinkkey0} {VALUE._col0} {VALUE._col1} {VALUE._col2} {VALUE._col3} {VALUE._col4} {VALUE._col5} {VALUE._col6} {VALUE._col7} {VALUE._col10} {VALUE._col11} {VALUE._col12} {VALUE._col13} {VALUE._col14} {VALUE._col15} {VALUE._col16} {VALUE._col17} {VALUE._col18} {VALUE._col21} {VALUE._col22} {VALUE._col23} {VALUE._col24} {VALUE._col25} {VALUE._col26} {VALUE._col27} {VALUE._col28} {VALUE._col29} + 0 {KEY.reducesinkkey0} {VALUE._col0} {VALUE._col1} {VALUE._col2} {VALUE._col3} {VALUE._col4} {VALUE._col5} {VALUE._col6} {VALUE._col7} {VALUE._col11} {VALUE._col12} {VALUE._col13} {VALUE._col14} {VALUE._col15} {VALUE._col16} {VALUE._col17} {VALUE._col18} {VALUE._col19} {VALUE._col23} {VALUE._col24} {VALUE._col25} {VALUE._col26} {VALUE._col27} {VALUE._col28} {VALUE._col29} {VALUE._col30} {VALUE._col31} 1 {KEY.reducesinkkey0} {VALUE._col0} {VALUE._col1} {VALUE._col2} {VALUE._col3} {VALUE._col4} {VALUE._col5} {VALUE._col6} {VALUE._col7} - outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col11, _col12, _col13, _col14, _col15, _col16, _col17, _col18, _col19, _col22, _col23, _col24, _col25, _col26, _col27, _col28, _col29, _col30, _col33, _col34, _col35, _col36, _col37, _col38, _col39, _col40, _col41 + outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col12, _col13, _col14, _col15, _col16, _col17, _col18, _col19, _col20, _col24, _col25, _col26, _col27, _col28, _col29, _col30, _col31, _col32, _col36, _col37, _col38, _col39, _col40, _col41, _col42, _col43, _col44 Statistics: Num rows: 3 Data size: 2302 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: _col0 (type: int), _col1 (type: string), _col2 (type: string), _col3 (type: string), _col4 (type: string), _col5 (type: int), _col6 (type: string), _col7 (type: double), _col8 (type: string), _col11 (type: int), _col12 (type: string), _col13 (type: string), _col14 (type: string), _col15 (type: string), _col16 (type: int), _col17 (type: string), _col18 (type: double), _col19 (type: string), _col22 (type: int), _col23 (type: string), _col24 (type: string), _col25 (type: string), _col26 (type: string), _col27 (type: int), _col28 (type: string), _col29 (type: double), _col30 (type: string), _col33 (type: int), _col34 (type: string), _col35 (type: string), _col36 (type: string), _col37 (type: string), _col38 (type: int), _col39 (type: string), _col40 (type: double), _col41 (type: string) + expressions: _col0 (type: int), _col1 (type: string), _col2 (type: string), _col3 (type: string), _col4 (type: string), _col5 (type: int), _col6 (type: string), _col7 (type: double), _col8 (type: string), _col12 (type: int), _col13 (type: string), _col14 (type: string), _col15 (type: string), _col16 (type: string), _col17 (type: int), _col18 (type: string), _col19 (type: double), _col20 (type: string), _col24 (type: int), _col25 (type: string), _col26 (type: string), _col27 (type: string), _col28 (type: string), _col29 (type: int), _col30 (type: string), _col31 (type: double), _col32 (type: string), _col36 (type: int), _col37 (type: string), _col38 (type: string), _col39 (type: string), _col40 (type: string), _col41 (type: int), _col42 (type: string), _col43 (type: double), _col44 (type: string) outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10, _col11, _col12, _col13, _col14, _col15, _col16, _col17, _col18, _col19, _col20, _col21, _col22, _col23, _col24, _col25, _col26, _col27, _col28, _col29, _col30, _col31, _col32, _col33, _col34, _col35 Statistics: Num rows: 3 Data size: 2302 Basic stats: COMPLETE Column stats: NONE File Output Operator diff --git a/ql/src/test/results/clientpositive/join_cond_pushdown_3.q.out b/ql/src/test/results/clientpositive/join_cond_pushdown_3.q.out index 1ce5e17..8232795 100644 --- a/ql/src/test/results/clientpositive/join_cond_pushdown_3.q.out +++ b/ql/src/test/results/clientpositive/join_cond_pushdown_3.q.out @@ -101,13 +101,13 @@ STAGE PLANS: 0 {VALUE._col0} {KEY.reducesinkkey0} {VALUE._col1} {VALUE._col2} {VALUE._col3} {VALUE._col4} {VALUE._col5} {VALUE._col6} {VALUE._col7} 1 {VALUE._col0} {KEY.reducesinkkey0} {VALUE._col1} {VALUE._col2} {VALUE._col3} {VALUE._col4} {VALUE._col5} {VALUE._col6} {VALUE._col7} 2 {VALUE._col0} {KEY.reducesinkkey0} {VALUE._col1} {VALUE._col2} {VALUE._col3} {VALUE._col4} {VALUE._col5} {VALUE._col6} {VALUE._col7} - outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col11, _col12, _col13, _col14, _col15, _col16, _col17, _col18, _col19, _col22, _col23, _col24, _col25, _col26, _col27, _col28, _col29, _col30 + outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col12, _col13, _col14, _col15, _col16, _col17, _col18, _col19, _col20, _col24, _col25, _col26, _col27, _col28, _col29, _col30, _col31, _col32 Statistics: Num rows: 6 Data size: 4186 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: ((_col1 = _col12) and (_col12 = _col23)) (type: boolean) + predicate: ((_col1 = _col13) and (_col13 = _col25)) (type: boolean) Statistics: Num rows: 1 Data size: 697 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: _col0 (type: int), _col1 (type: string), _col2 (type: string), _col3 (type: string), _col4 (type: string), _col5 (type: int), _col6 (type: string), _col7 (type: double), _col8 (type: string), _col11 (type: int), _col12 (type: string), _col13 (type: string), _col14 (type: string), _col15 (type: string), _col16 (type: int), _col17 (type: string), _col18 (type: double), _col19 (type: string), _col22 (type: int), _col23 (type: string), _col24 (type: string), _col25 (type: string), _col26 (type: string), _col27 (type: int), _col28 (type: string), _col29 (type: double), _col30 (type: string) + expressions: _col0 (type: int), _col1 (type: string), _col2 (type: string), _col3 (type: string), _col4 (type: string), _col5 (type: int), _col6 (type: string), _col7 (type: double), _col8 (type: string), _col12 (type: int), _col13 (type: string), _col14 (type: string), _col15 (type: string), _col16 (type: string), _col17 (type: int), _col18 (type: string), _col19 (type: double), _col20 (type: string), _col24 (type: int), _col25 (type: string), _col26 (type: string), _col27 (type: string), _col28 (type: string), _col29 (type: int), _col30 (type: string), _col31 (type: double), _col32 (type: string) outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10, _col11, _col12, _col13, _col14, _col15, _col16, _col17, _col18, _col19, _col20, _col21, _col22, _col23, _col24, _col25, _col26 Statistics: Num rows: 1 Data size: 697 Basic stats: COMPLETE Column stats: NONE File Output Operator @@ -185,13 +185,13 @@ STAGE PLANS: 0 {VALUE._col0} {KEY.reducesinkkey0} {VALUE._col1} {VALUE._col2} {VALUE._col3} {VALUE._col4} {VALUE._col5} {VALUE._col6} {VALUE._col7} 1 {VALUE._col0} {KEY.reducesinkkey0} {VALUE._col1} {VALUE._col2} {VALUE._col3} {VALUE._col4} {VALUE._col5} {VALUE._col6} {VALUE._col7} 2 {VALUE._col0} {KEY.reducesinkkey0} {VALUE._col1} {VALUE._col2} {VALUE._col3} {VALUE._col4} {VALUE._col5} {VALUE._col6} {VALUE._col7} - outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col11, _col12, _col13, _col14, _col15, _col16, _col17, _col18, _col19, _col22, _col23, _col24, _col25, _col26, _col27, _col28, _col29, _col30 + outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col12, _col13, _col14, _col15, _col16, _col17, _col18, _col19, _col20, _col24, _col25, _col26, _col27, _col28, _col29, _col30, _col31, _col32 Statistics: Num rows: 6 Data size: 4186 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: ((_col12 = _col1) and (_col23 = _col12)) (type: boolean) + predicate: ((_col13 = _col1) and (_col25 = _col13)) (type: boolean) Statistics: Num rows: 1 Data size: 697 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: _col0 (type: int), _col1 (type: string), _col2 (type: string), _col3 (type: string), _col4 (type: string), _col5 (type: int), _col6 (type: string), _col7 (type: double), _col8 (type: string), _col11 (type: int), _col12 (type: string), _col13 (type: string), _col14 (type: string), _col15 (type: string), _col16 (type: int), _col17 (type: string), _col18 (type: double), _col19 (type: string), _col22 (type: int), _col23 (type: string), _col24 (type: string), _col25 (type: string), _col26 (type: string), _col27 (type: int), _col28 (type: string), _col29 (type: double), _col30 (type: string) + expressions: _col0 (type: int), _col1 (type: string), _col2 (type: string), _col3 (type: string), _col4 (type: string), _col5 (type: int), _col6 (type: string), _col7 (type: double), _col8 (type: string), _col12 (type: int), _col13 (type: string), _col14 (type: string), _col15 (type: string), _col16 (type: string), _col17 (type: int), _col18 (type: string), _col19 (type: double), _col20 (type: string), _col24 (type: int), _col25 (type: string), _col26 (type: string), _col27 (type: string), _col28 (type: string), _col29 (type: int), _col30 (type: string), _col31 (type: double), _col32 (type: string) outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10, _col11, _col12, _col13, _col14, _col15, _col16, _col17, _col18, _col19, _col20, _col21, _col22, _col23, _col24, _col25, _col26 Statistics: Num rows: 1 Data size: 697 Basic stats: COMPLETE Column stats: NONE File Output Operator @@ -250,10 +250,10 @@ STAGE PLANS: condition expressions: 0 {VALUE._col0} {VALUE._col1} {VALUE._col2} {VALUE._col3} {VALUE._col4} {VALUE._col5} {VALUE._col6} {VALUE._col7} {VALUE._col8} 1 {VALUE._col0} {VALUE._col1} {VALUE._col2} {VALUE._col3} {VALUE._col4} {VALUE._col5} {VALUE._col6} {VALUE._col7} {VALUE._col8} - outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col11, _col12, _col13, _col14, _col15, _col16, _col17, _col18, _col19 + outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col12, _col13, _col14, _col15, _col16, _col17, _col18, _col19, _col20 Statistics: Num rows: 5 Data size: 3490 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: (((_col11 + _col0) = _col0) and _col12 is not null) (type: boolean) + predicate: (((_col12 + _col0) = _col0) and _col13 is not null) (type: boolean) Statistics: Num rows: 1 Data size: 698 Basic stats: COMPLETE Column stats: NONE File Output Operator compressed: false @@ -279,25 +279,25 @@ STAGE PLANS: value expressions: p_partkey (type: int), p_mfgr (type: string), p_brand (type: string), p_type (type: string), p_size (type: int), p_container (type: string), p_retailprice (type: double), p_comment (type: string) TableScan Reduce Output Operator - key expressions: _col12 (type: string) + key expressions: _col13 (type: string) sort order: + - Map-reduce partition columns: _col12 (type: string) + Map-reduce partition columns: _col13 (type: string) Statistics: Num rows: 1 Data size: 698 Basic stats: COMPLETE Column stats: NONE - value expressions: _col0 (type: int), _col1 (type: string), _col2 (type: string), _col3 (type: string), _col4 (type: string), _col5 (type: int), _col6 (type: string), _col7 (type: double), _col8 (type: string), _col11 (type: int), _col13 (type: string), _col14 (type: string), _col15 (type: string), _col16 (type: int), _col17 (type: string), _col18 (type: double), _col19 (type: string) + value expressions: _col0 (type: int), _col1 (type: string), _col2 (type: string), _col3 (type: string), _col4 (type: string), _col5 (type: int), _col6 (type: string), _col7 (type: double), _col8 (type: string), _col12 (type: int), _col14 (type: string), _col15 (type: string), _col16 (type: string), _col17 (type: int), _col18 (type: string), _col19 (type: double), _col20 (type: string) Reduce Operator Tree: Join Operator condition map: Inner Join 0 to 1 condition expressions: - 0 {VALUE._col0} {VALUE._col1} {VALUE._col2} {VALUE._col3} {VALUE._col4} {VALUE._col5} {VALUE._col6} {VALUE._col7} {VALUE._col8} {VALUE._col11} {KEY.reducesinkkey0} {VALUE._col12} {VALUE._col13} {VALUE._col14} {VALUE._col15} {VALUE._col16} {VALUE._col17} {VALUE._col18} + 0 {VALUE._col0} {VALUE._col1} {VALUE._col2} {VALUE._col3} {VALUE._col4} {VALUE._col5} {VALUE._col6} {VALUE._col7} {VALUE._col8} {VALUE._col12} {KEY.reducesinkkey0} {VALUE._col13} {VALUE._col14} {VALUE._col15} {VALUE._col16} {VALUE._col17} {VALUE._col18} {VALUE._col19} 1 {VALUE._col0} {KEY.reducesinkkey0} {VALUE._col1} {VALUE._col2} {VALUE._col3} {VALUE._col4} {VALUE._col5} {VALUE._col6} {VALUE._col7} - outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col11, _col12, _col13, _col14, _col15, _col16, _col17, _col18, _col19, _col22, _col23, _col24, _col25, _col26, _col27, _col28, _col29, _col30 + outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col12, _col13, _col14, _col15, _col16, _col17, _col18, _col19, _col20, _col24, _col25, _col26, _col27, _col28, _col29, _col30, _col31, _col32 Statistics: Num rows: 3 Data size: 2093 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: (((_col11 + _col0) = _col0) and (_col23 = _col12)) (type: boolean) + predicate: (((_col12 + _col0) = _col0) and (_col25 = _col13)) (type: boolean) Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE Select Operator - expressions: _col0 (type: int), _col1 (type: string), _col2 (type: string), _col3 (type: string), _col4 (type: string), _col5 (type: int), _col6 (type: string), _col7 (type: double), _col8 (type: string), _col11 (type: int), _col12 (type: string), _col13 (type: string), _col14 (type: string), _col15 (type: string), _col16 (type: int), _col17 (type: string), _col18 (type: double), _col19 (type: string), _col22 (type: int), _col23 (type: string), _col24 (type: string), _col25 (type: string), _col26 (type: string), _col27 (type: int), _col28 (type: string), _col29 (type: double), _col30 (type: string) + expressions: _col0 (type: int), _col1 (type: string), _col2 (type: string), _col3 (type: string), _col4 (type: string), _col5 (type: int), _col6 (type: string), _col7 (type: double), _col8 (type: string), _col12 (type: int), _col13 (type: string), _col14 (type: string), _col15 (type: string), _col16 (type: string), _col17 (type: int), _col18 (type: string), _col19 (type: double), _col20 (type: string), _col24 (type: int), _col25 (type: string), _col26 (type: string), _col27 (type: string), _col28 (type: string), _col29 (type: int), _col30 (type: string), _col31 (type: double), _col32 (type: string) outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10, _col11, _col12, _col13, _col14, _col15, _col16, _col17, _col18, _col19, _col20, _col21, _col22, _col23, _col24, _col25, _col26 Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE File Output Operator @@ -356,7 +356,7 @@ STAGE PLANS: condition expressions: 0 {VALUE._col0} {VALUE._col1} {VALUE._col2} {VALUE._col3} {VALUE._col4} {VALUE._col5} {VALUE._col6} {VALUE._col7} {VALUE._col8} 1 {VALUE._col1} {VALUE._col2} {VALUE._col3} {VALUE._col4} {VALUE._col5} {VALUE._col6} {VALUE._col7} {VALUE._col8} - outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col12, _col13, _col14, _col15, _col16, _col17, _col18, _col19 + outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col13, _col14, _col15, _col16, _col17, _col18, _col19, _col20 Statistics: Num rows: 5 Data size: 3490 Basic stats: COMPLETE Column stats: NONE File Output Operator compressed: false @@ -382,25 +382,25 @@ STAGE PLANS: value expressions: p_partkey (type: int), p_mfgr (type: string), p_brand (type: string), p_type (type: string), p_size (type: int), p_container (type: string), p_retailprice (type: double), p_comment (type: string) TableScan Reduce Output Operator - key expressions: _col12 (type: string) + key expressions: _col13 (type: string) sort order: + - Map-reduce partition columns: _col12 (type: string) + Map-reduce partition columns: _col13 (type: string) Statistics: Num rows: 5 Data size: 3490 Basic stats: COMPLETE Column stats: NONE - value expressions: _col0 (type: int), _col1 (type: string), _col2 (type: string), _col3 (type: string), _col4 (type: string), _col5 (type: int), _col6 (type: string), _col7 (type: double), _col8 (type: string), _col13 (type: string), _col14 (type: string), _col15 (type: string), _col16 (type: int), _col17 (type: string), _col18 (type: double), _col19 (type: string) + value expressions: _col0 (type: int), _col1 (type: string), _col2 (type: string), _col3 (type: string), _col4 (type: string), _col5 (type: int), _col6 (type: string), _col7 (type: double), _col8 (type: string), _col14 (type: string), _col15 (type: string), _col16 (type: string), _col17 (type: int), _col18 (type: string), _col19 (type: double), _col20 (type: string) Reduce Operator Tree: Join Operator condition map: Inner Join 0 to 1 condition expressions: - 0 {VALUE._col0} {VALUE._col1} {VALUE._col2} {VALUE._col3} {VALUE._col4} {VALUE._col5} {VALUE._col6} {VALUE._col7} {VALUE._col8} {KEY.reducesinkkey0} {VALUE._col12} {VALUE._col13} {VALUE._col14} {VALUE._col15} {VALUE._col16} {VALUE._col17} {VALUE._col18} + 0 {VALUE._col0} {VALUE._col1} {VALUE._col2} {VALUE._col3} {VALUE._col4} {VALUE._col5} {VALUE._col6} {VALUE._col7} {VALUE._col8} {KEY.reducesinkkey0} {VALUE._col13} {VALUE._col14} {VALUE._col15} {VALUE._col16} {VALUE._col17} {VALUE._col18} {VALUE._col19} 1 {VALUE._col0} {KEY.reducesinkkey0} {VALUE._col1} {VALUE._col2} {VALUE._col3} {VALUE._col4} {VALUE._col5} {VALUE._col6} {VALUE._col7} - outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col12, _col13, _col14, _col15, _col16, _col17, _col18, _col19, _col22, _col23, _col24, _col25, _col26, _col27, _col28, _col29, _col30 + outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col13, _col14, _col15, _col16, _col17, _col18, _col19, _col20, _col24, _col25, _col26, _col27, _col28, _col29, _col30, _col31, _col32 Statistics: Num rows: 5 Data size: 3839 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: (_col23 = _col12) (type: boolean) + predicate: (_col25 = _col13) (type: boolean) Statistics: Num rows: 2 Data size: 1535 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: _col0 (type: int), _col1 (type: string), _col2 (type: string), _col3 (type: string), _col4 (type: string), _col5 (type: int), _col6 (type: string), _col7 (type: double), _col8 (type: string), 1 (type: int), _col12 (type: string), _col13 (type: string), _col14 (type: string), _col15 (type: string), _col16 (type: int), _col17 (type: string), _col18 (type: double), _col19 (type: string), _col22 (type: int), _col23 (type: string), _col24 (type: string), _col25 (type: string), _col26 (type: string), _col27 (type: int), _col28 (type: string), _col29 (type: double), _col30 (type: string) + expressions: _col0 (type: int), _col1 (type: string), _col2 (type: string), _col3 (type: string), _col4 (type: string), _col5 (type: int), _col6 (type: string), _col7 (type: double), _col8 (type: string), 1 (type: int), _col13 (type: string), _col14 (type: string), _col15 (type: string), _col16 (type: string), _col17 (type: int), _col18 (type: string), _col19 (type: double), _col20 (type: string), _col24 (type: int), _col25 (type: string), _col26 (type: string), _col27 (type: string), _col28 (type: string), _col29 (type: int), _col30 (type: string), _col31 (type: double), _col32 (type: string) outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10, _col11, _col12, _col13, _col14, _col15, _col16, _col17, _col18, _col19, _col20, _col21, _col22, _col23, _col24, _col25, _col26 Statistics: Num rows: 2 Data size: 1535 Basic stats: COMPLETE Column stats: NONE File Output Operator diff --git a/ql/src/test/results/clientpositive/join_cond_pushdown_4.q.out b/ql/src/test/results/clientpositive/join_cond_pushdown_4.q.out index e2f0682..803597a 100644 --- a/ql/src/test/results/clientpositive/join_cond_pushdown_4.q.out +++ b/ql/src/test/results/clientpositive/join_cond_pushdown_4.q.out @@ -115,13 +115,13 @@ STAGE PLANS: 1 {VALUE._col0} {KEY.reducesinkkey0} {VALUE._col1} {VALUE._col2} {VALUE._col3} {VALUE._col4} {VALUE._col5} {VALUE._col6} {VALUE._col7} 2 {VALUE._col0} {KEY.reducesinkkey0} {VALUE._col1} {VALUE._col2} {VALUE._col3} {VALUE._col4} {VALUE._col5} {VALUE._col6} {VALUE._col7} 3 {VALUE._col0} {KEY.reducesinkkey0} {VALUE._col1} {VALUE._col2} {VALUE._col3} {VALUE._col4} {VALUE._col5} {VALUE._col6} {VALUE._col7} - outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col11, _col12, _col13, _col14, _col15, _col16, _col17, _col18, _col19, _col22, _col23, _col24, _col25, _col26, _col27, _col28, _col29, _col30, _col33, _col34, _col35, _col36, _col37, _col38, _col39, _col40, _col41 + outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col12, _col13, _col14, _col15, _col16, _col17, _col18, _col19, _col20, _col24, _col25, _col26, _col27, _col28, _col29, _col30, _col31, _col32, _col36, _col37, _col38, _col39, _col40, _col41, _col42, _col43, _col44 Statistics: Num rows: 9 Data size: 6279 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: ((_col12 = _col23) and (_col1 = _col34)) (type: boolean) + predicate: ((_col13 = _col25) and (_col1 = _col37)) (type: boolean) Statistics: Num rows: 2 Data size: 1395 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: _col0 (type: int), _col1 (type: string), _col2 (type: string), _col3 (type: string), _col4 (type: string), _col5 (type: int), _col6 (type: string), _col7 (type: double), _col8 (type: string), _col11 (type: int), _col12 (type: string), _col13 (type: string), _col14 (type: string), _col15 (type: string), _col16 (type: int), _col17 (type: string), _col18 (type: double), _col19 (type: string), _col22 (type: int), _col23 (type: string), _col24 (type: string), _col25 (type: string), _col26 (type: string), _col27 (type: int), _col28 (type: string), _col29 (type: double), _col30 (type: string), _col33 (type: int), _col34 (type: string), _col35 (type: string), _col36 (type: string), _col37 (type: string), _col38 (type: int), _col39 (type: string), _col40 (type: double), _col41 (type: string) + expressions: _col0 (type: int), _col1 (type: string), _col2 (type: string), _col3 (type: string), _col4 (type: string), _col5 (type: int), _col6 (type: string), _col7 (type: double), _col8 (type: string), _col12 (type: int), _col13 (type: string), _col14 (type: string), _col15 (type: string), _col16 (type: string), _col17 (type: int), _col18 (type: string), _col19 (type: double), _col20 (type: string), _col24 (type: int), _col25 (type: string), _col26 (type: string), _col27 (type: string), _col28 (type: string), _col29 (type: int), _col30 (type: string), _col31 (type: double), _col32 (type: string), _col36 (type: int), _col37 (type: string), _col38 (type: string), _col39 (type: string), _col40 (type: string), _col41 (type: int), _col42 (type: string), _col43 (type: double), _col44 (type: string) outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10, _col11, _col12, _col13, _col14, _col15, _col16, _col17, _col18, _col19, _col20, _col21, _col22, _col23, _col24, _col25, _col26, _col27, _col28, _col29, _col30, _col31, _col32, _col33, _col34, _col35 Statistics: Num rows: 2 Data size: 1395 Basic stats: COMPLETE Column stats: NONE File Output Operator @@ -189,7 +189,7 @@ STAGE PLANS: condition expressions: 0 {KEY.reducesinkkey1} {KEY.reducesinkkey0} {VALUE._col0} {VALUE._col1} {VALUE._col2} {VALUE._col3} {VALUE._col4} {VALUE._col5} {VALUE._col6} 1 {KEY.reducesinkkey1} {KEY.reducesinkkey0} {VALUE._col0} {VALUE._col1} {VALUE._col2} {VALUE._col3} {VALUE._col4} {VALUE._col5} {VALUE._col6} - outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col11, _col12, _col13, _col14, _col15, _col16, _col17, _col18, _col19 + outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col12, _col13, _col14, _col15, _col16, _col17, _col18, _col19, _col20 Statistics: Num rows: 2 Data size: 1395 Basic stats: COMPLETE Column stats: NONE File Output Operator compressed: false @@ -215,19 +215,19 @@ STAGE PLANS: value expressions: p_partkey (type: int), p_mfgr (type: string), p_brand (type: string), p_type (type: string), p_size (type: int), p_container (type: string), p_retailprice (type: double), p_comment (type: string) TableScan Reduce Output Operator - key expressions: _col12 (type: string) + key expressions: _col13 (type: string) sort order: + - Map-reduce partition columns: _col12 (type: string) + Map-reduce partition columns: _col13 (type: string) Statistics: Num rows: 2 Data size: 1395 Basic stats: COMPLETE Column stats: NONE - value expressions: _col0 (type: int), _col1 (type: string), _col2 (type: string), _col3 (type: string), _col4 (type: string), _col5 (type: int), _col6 (type: string), _col7 (type: double), _col8 (type: string), _col11 (type: int), _col13 (type: string), _col14 (type: string), _col15 (type: string), _col16 (type: int), _col17 (type: string), _col18 (type: double), _col19 (type: string) + value expressions: _col0 (type: int), _col1 (type: string), _col2 (type: string), _col3 (type: string), _col4 (type: string), _col5 (type: int), _col6 (type: string), _col7 (type: double), _col8 (type: string), _col12 (type: int), _col14 (type: string), _col15 (type: string), _col16 (type: string), _col17 (type: int), _col18 (type: string), _col19 (type: double), _col20 (type: string) Reduce Operator Tree: Join Operator condition map: Inner Join 0 to 1 condition expressions: - 0 {VALUE._col0} {VALUE._col1} {VALUE._col2} {VALUE._col3} {VALUE._col4} {VALUE._col5} {VALUE._col6} {VALUE._col7} {VALUE._col8} {VALUE._col11} {KEY.reducesinkkey0} {VALUE._col12} {VALUE._col13} {VALUE._col14} {VALUE._col15} {VALUE._col16} {VALUE._col17} {VALUE._col18} + 0 {VALUE._col0} {VALUE._col1} {VALUE._col2} {VALUE._col3} {VALUE._col4} {VALUE._col5} {VALUE._col6} {VALUE._col7} {VALUE._col8} {VALUE._col12} {KEY.reducesinkkey0} {VALUE._col13} {VALUE._col14} {VALUE._col15} {VALUE._col16} {VALUE._col17} {VALUE._col18} {VALUE._col19} 1 {VALUE._col0} {KEY.reducesinkkey0} {VALUE._col1} {VALUE._col2} {VALUE._col3} {VALUE._col4} {VALUE._col5} {VALUE._col6} {VALUE._col7} - outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col11, _col12, _col13, _col14, _col15, _col16, _col17, _col18, _col19, _col22, _col23, _col24, _col25, _col26, _col27, _col28, _col29, _col30 + outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col12, _col13, _col14, _col15, _col16, _col17, _col18, _col19, _col20, _col24, _col25, _col26, _col27, _col28, _col29, _col30, _col31, _col32 Statistics: Num rows: 3 Data size: 2093 Basic stats: COMPLETE Column stats: NONE File Output Operator compressed: false @@ -257,21 +257,21 @@ STAGE PLANS: sort order: + Map-reduce partition columns: _col0 (type: int) Statistics: Num rows: 3 Data size: 2093 Basic stats: COMPLETE Column stats: NONE - value expressions: _col1 (type: string), _col2 (type: string), _col3 (type: string), _col4 (type: string), _col5 (type: int), _col6 (type: string), _col7 (type: double), _col8 (type: string), _col11 (type: int), _col12 (type: string), _col13 (type: string), _col14 (type: string), _col15 (type: string), _col16 (type: int), _col17 (type: string), _col18 (type: double), _col19 (type: string), _col22 (type: int), _col23 (type: string), _col24 (type: string), _col25 (type: string), _col26 (type: string), _col27 (type: int), _col28 (type: string), _col29 (type: double), _col30 (type: string) + value expressions: _col1 (type: string), _col2 (type: string), _col3 (type: string), _col4 (type: string), _col5 (type: int), _col6 (type: string), _col7 (type: double), _col8 (type: string), _col12 (type: int), _col13 (type: string), _col14 (type: string), _col15 (type: string), _col16 (type: string), _col17 (type: int), _col18 (type: string), _col19 (type: double), _col20 (type: string), _col24 (type: int), _col25 (type: string), _col26 (type: string), _col27 (type: string), _col28 (type: string), _col29 (type: int), _col30 (type: string), _col31 (type: double), _col32 (type: string) Reduce Operator Tree: Join Operator condition map: Inner Join 0 to 1 condition expressions: - 0 {KEY.reducesinkkey0} {VALUE._col0} {VALUE._col1} {VALUE._col2} {VALUE._col3} {VALUE._col4} {VALUE._col5} {VALUE._col6} {VALUE._col7} {VALUE._col10} {VALUE._col11} {VALUE._col12} {VALUE._col13} {VALUE._col14} {VALUE._col15} {VALUE._col16} {VALUE._col17} {VALUE._col18} {VALUE._col21} {VALUE._col22} {VALUE._col23} {VALUE._col24} {VALUE._col25} {VALUE._col26} {VALUE._col27} {VALUE._col28} {VALUE._col29} + 0 {KEY.reducesinkkey0} {VALUE._col0} {VALUE._col1} {VALUE._col2} {VALUE._col3} {VALUE._col4} {VALUE._col5} {VALUE._col6} {VALUE._col7} {VALUE._col11} {VALUE._col12} {VALUE._col13} {VALUE._col14} {VALUE._col15} {VALUE._col16} {VALUE._col17} {VALUE._col18} {VALUE._col19} {VALUE._col23} {VALUE._col24} {VALUE._col25} {VALUE._col26} {VALUE._col27} {VALUE._col28} {VALUE._col29} {VALUE._col30} {VALUE._col31} 1 {KEY.reducesinkkey0} {VALUE._col0} {VALUE._col1} {VALUE._col2} {VALUE._col3} {VALUE._col4} {VALUE._col5} {VALUE._col6} {VALUE._col7} - outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col11, _col12, _col13, _col14, _col15, _col16, _col17, _col18, _col19, _col22, _col23, _col24, _col25, _col26, _col27, _col28, _col29, _col30, _col33, _col34, _col35, _col36, _col37, _col38, _col39, _col40, _col41 + outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col12, _col13, _col14, _col15, _col16, _col17, _col18, _col19, _col20, _col24, _col25, _col26, _col27, _col28, _col29, _col30, _col31, _col32, _col36, _col37, _col38, _col39, _col40, _col41, _col42, _col43, _col44 Statistics: Num rows: 3 Data size: 2302 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: (((_col12 = _col23) and (_col0 = _col33)) and (_col0 = _col11)) (type: boolean) + predicate: (((_col13 = _col25) and (_col0 = _col36)) and (_col0 = _col12)) (type: boolean) Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE Select Operator - expressions: _col0 (type: int), _col1 (type: string), _col2 (type: string), _col3 (type: string), _col4 (type: string), _col5 (type: int), _col6 (type: string), _col7 (type: double), _col8 (type: string), _col11 (type: int), _col12 (type: string), _col13 (type: string), _col14 (type: string), _col15 (type: string), _col16 (type: int), _col17 (type: string), _col18 (type: double), _col19 (type: string), _col22 (type: int), _col23 (type: string), _col24 (type: string), _col25 (type: string), _col26 (type: string), _col27 (type: int), _col28 (type: string), _col29 (type: double), _col30 (type: string), _col33 (type: int), _col34 (type: string), _col35 (type: string), _col36 (type: string), _col37 (type: string), _col38 (type: int), _col39 (type: string), _col40 (type: double), _col41 (type: string) + expressions: _col0 (type: int), _col1 (type: string), _col2 (type: string), _col3 (type: string), _col4 (type: string), _col5 (type: int), _col6 (type: string), _col7 (type: double), _col8 (type: string), _col12 (type: int), _col13 (type: string), _col14 (type: string), _col15 (type: string), _col16 (type: string), _col17 (type: int), _col18 (type: string), _col19 (type: double), _col20 (type: string), _col24 (type: int), _col25 (type: string), _col26 (type: string), _col27 (type: string), _col28 (type: string), _col29 (type: int), _col30 (type: string), _col31 (type: double), _col32 (type: string), _col36 (type: int), _col37 (type: string), _col38 (type: string), _col39 (type: string), _col40 (type: string), _col41 (type: int), _col42 (type: string), _col43 (type: double), _col44 (type: string) outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10, _col11, _col12, _col13, _col14, _col15, _col16, _col17, _col18, _col19, _col20, _col21, _col22, _col23, _col24, _col25, _col26, _col27, _col28, _col29, _col30, _col31, _col32, _col33, _col34, _col35 Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE File Output Operator diff --git a/ql/src/test/results/clientpositive/join_cond_pushdown_unqual1.q.out b/ql/src/test/results/clientpositive/join_cond_pushdown_unqual1.q.out index 62eab2a..00f63b0 100644 --- a/ql/src/test/results/clientpositive/join_cond_pushdown_unqual1.q.out +++ b/ql/src/test/results/clientpositive/join_cond_pushdown_unqual1.q.out @@ -155,10 +155,10 @@ STAGE PLANS: 0 {VALUE._col0} {KEY.reducesinkkey0} {VALUE._col1} {VALUE._col2} {VALUE._col3} {VALUE._col4} {VALUE._col5} {VALUE._col6} {VALUE._col7} 1 {VALUE._col0} {KEY.reducesinkkey0} {VALUE._col1} {VALUE._col2} {VALUE._col3} {VALUE._col4} {VALUE._col5} {VALUE._col6} {VALUE._col7} 2 {VALUE._col0} {KEY.reducesinkkey0} {VALUE._col1} {VALUE._col2} {VALUE._col3} {VALUE._col4} {VALUE._col5} {VALUE._col6} {VALUE._col7} - outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col11, _col12, _col13, _col14, _col15, _col16, _col17, _col18, _col19, _col22, _col23, _col24, _col25, _col26, _col27, _col28, _col29, _col30 + outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col12, _col13, _col14, _col15, _col16, _col17, _col18, _col19, _col20, _col24, _col25, _col26, _col27, _col28, _col29, _col30, _col31, _col32 Statistics: Num rows: 6 Data size: 4186 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: _col0 (type: int), _col1 (type: string), _col2 (type: string), _col3 (type: string), _col4 (type: string), _col5 (type: int), _col6 (type: string), _col7 (type: double), _col8 (type: string), _col11 (type: int), _col12 (type: string), _col13 (type: string), _col14 (type: string), _col15 (type: string), _col16 (type: int), _col17 (type: string), _col18 (type: double), _col19 (type: string), _col22 (type: int), _col23 (type: string), _col24 (type: string), _col25 (type: string), _col26 (type: string), _col27 (type: int), _col28 (type: string), _col29 (type: double), _col30 (type: string) + expressions: _col0 (type: int), _col1 (type: string), _col2 (type: string), _col3 (type: string), _col4 (type: string), _col5 (type: int), _col6 (type: string), _col7 (type: double), _col8 (type: string), _col12 (type: int), _col13 (type: string), _col14 (type: string), _col15 (type: string), _col16 (type: string), _col17 (type: int), _col18 (type: string), _col19 (type: double), _col20 (type: string), _col24 (type: int), _col25 (type: string), _col26 (type: string), _col27 (type: string), _col28 (type: string), _col29 (type: int), _col30 (type: string), _col31 (type: double), _col32 (type: string) outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10, _col11, _col12, _col13, _col14, _col15, _col16, _col17, _col18, _col19, _col20, _col21, _col22, _col23, _col24, _col25, _col26 Statistics: Num rows: 6 Data size: 4186 Basic stats: COMPLETE Column stats: NONE File Output Operator @@ -234,10 +234,10 @@ STAGE PLANS: 0 {VALUE._col0} {KEY.reducesinkkey0} {VALUE._col1} {VALUE._col2} {VALUE._col3} {VALUE._col4} {VALUE._col5} {VALUE._col6} {VALUE._col7} 1 {VALUE._col0} {KEY.reducesinkkey0} {VALUE._col1} {VALUE._col2} {VALUE._col3} {VALUE._col4} {VALUE._col5} {VALUE._col6} {VALUE._col7} 2 {VALUE._col0} {KEY.reducesinkkey0} {VALUE._col1} {VALUE._col2} {VALUE._col3} {VALUE._col4} {VALUE._col5} {VALUE._col6} {VALUE._col7} - outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col11, _col12, _col13, _col14, _col15, _col16, _col17, _col18, _col19, _col22, _col23, _col24, _col25, _col26, _col27, _col28, _col29, _col30 + outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col12, _col13, _col14, _col15, _col16, _col17, _col18, _col19, _col20, _col24, _col25, _col26, _col27, _col28, _col29, _col30, _col31, _col32 Statistics: Num rows: 6 Data size: 4186 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: _col0 (type: int), _col1 (type: string), _col2 (type: string), _col3 (type: string), _col4 (type: string), _col5 (type: int), _col6 (type: string), _col7 (type: double), _col8 (type: string), _col11 (type: int), _col12 (type: string), _col13 (type: string), _col14 (type: string), _col15 (type: string), _col16 (type: int), _col17 (type: string), _col18 (type: double), _col19 (type: string), _col22 (type: int), _col23 (type: string), _col24 (type: string), _col25 (type: string), _col26 (type: string), _col27 (type: int), _col28 (type: string), _col29 (type: double), _col30 (type: string) + expressions: _col0 (type: int), _col1 (type: string), _col2 (type: string), _col3 (type: string), _col4 (type: string), _col5 (type: int), _col6 (type: string), _col7 (type: double), _col8 (type: string), _col12 (type: int), _col13 (type: string), _col14 (type: string), _col15 (type: string), _col16 (type: string), _col17 (type: int), _col18 (type: string), _col19 (type: double), _col20 (type: string), _col24 (type: int), _col25 (type: string), _col26 (type: string), _col27 (type: string), _col28 (type: string), _col29 (type: int), _col30 (type: string), _col31 (type: double), _col32 (type: string) outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10, _col11, _col12, _col13, _col14, _col15, _col16, _col17, _col18, _col19, _col20, _col21, _col22, _col23, _col24, _col25, _col26 Statistics: Num rows: 6 Data size: 4186 Basic stats: COMPLETE Column stats: NONE File Output Operator @@ -294,10 +294,10 @@ STAGE PLANS: condition expressions: 0 {VALUE._col0} {VALUE._col1} {VALUE._col2} {VALUE._col3} {VALUE._col4} {VALUE._col5} {VALUE._col6} {VALUE._col7} {VALUE._col8} 1 {VALUE._col0} {VALUE._col1} {VALUE._col2} {VALUE._col3} {VALUE._col4} {VALUE._col5} {VALUE._col6} {VALUE._col7} {VALUE._col8} - outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col11, _col12, _col13, _col14, _col15, _col16, _col17, _col18, _col19 + outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col12, _col13, _col14, _col15, _col16, _col17, _col18, _col19, _col20 Statistics: Num rows: 5 Data size: 3490 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: (((_col11 + _col0) = _col0) and _col12 is not null) (type: boolean) + predicate: (((_col12 + _col0) = _col0) and _col13 is not null) (type: boolean) Statistics: Num rows: 1 Data size: 698 Basic stats: COMPLETE Column stats: NONE File Output Operator compressed: false @@ -323,22 +323,22 @@ STAGE PLANS: value expressions: p3_partkey (type: int), p3_mfgr (type: string), p3_brand (type: string), p3_type (type: string), p3_size (type: int), p3_container (type: string), p3_retailprice (type: double), p3_comment (type: string) TableScan Reduce Output Operator - key expressions: _col12 (type: string) + key expressions: _col13 (type: string) sort order: + - Map-reduce partition columns: _col12 (type: string) + Map-reduce partition columns: _col13 (type: string) Statistics: Num rows: 1 Data size: 698 Basic stats: COMPLETE Column stats: NONE - value expressions: _col0 (type: int), _col1 (type: string), _col2 (type: string), _col3 (type: string), _col4 (type: string), _col5 (type: int), _col6 (type: string), _col7 (type: double), _col8 (type: string), _col11 (type: int), _col13 (type: string), _col14 (type: string), _col15 (type: string), _col16 (type: int), _col17 (type: string), _col18 (type: double), _col19 (type: string) + value expressions: _col0 (type: int), _col1 (type: string), _col2 (type: string), _col3 (type: string), _col4 (type: string), _col5 (type: int), _col6 (type: string), _col7 (type: double), _col8 (type: string), _col12 (type: int), _col14 (type: string), _col15 (type: string), _col16 (type: string), _col17 (type: int), _col18 (type: string), _col19 (type: double), _col20 (type: string) Reduce Operator Tree: Join Operator condition map: Inner Join 0 to 1 condition expressions: - 0 {VALUE._col0} {VALUE._col1} {VALUE._col2} {VALUE._col3} {VALUE._col4} {VALUE._col5} {VALUE._col6} {VALUE._col7} {VALUE._col8} {VALUE._col11} {KEY.reducesinkkey0} {VALUE._col12} {VALUE._col13} {VALUE._col14} {VALUE._col15} {VALUE._col16} {VALUE._col17} {VALUE._col18} + 0 {VALUE._col0} {VALUE._col1} {VALUE._col2} {VALUE._col3} {VALUE._col4} {VALUE._col5} {VALUE._col6} {VALUE._col7} {VALUE._col8} {VALUE._col12} {KEY.reducesinkkey0} {VALUE._col13} {VALUE._col14} {VALUE._col15} {VALUE._col16} {VALUE._col17} {VALUE._col18} {VALUE._col19} 1 {VALUE._col0} {KEY.reducesinkkey0} {VALUE._col1} {VALUE._col2} {VALUE._col3} {VALUE._col4} {VALUE._col5} {VALUE._col6} {VALUE._col7} - outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col11, _col12, _col13, _col14, _col15, _col16, _col17, _col18, _col19, _col22, _col23, _col24, _col25, _col26, _col27, _col28, _col29, _col30 + outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col12, _col13, _col14, _col15, _col16, _col17, _col18, _col19, _col20, _col24, _col25, _col26, _col27, _col28, _col29, _col30, _col31, _col32 Statistics: Num rows: 1 Data size: 767 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: _col0 (type: int), _col1 (type: string), _col2 (type: string), _col3 (type: string), _col4 (type: string), _col5 (type: int), _col6 (type: string), _col7 (type: double), _col8 (type: string), _col11 (type: int), _col12 (type: string), _col13 (type: string), _col14 (type: string), _col15 (type: string), _col16 (type: int), _col17 (type: string), _col18 (type: double), _col19 (type: string), _col22 (type: int), _col23 (type: string), _col24 (type: string), _col25 (type: string), _col26 (type: string), _col27 (type: int), _col28 (type: string), _col29 (type: double), _col30 (type: string) + expressions: _col0 (type: int), _col1 (type: string), _col2 (type: string), _col3 (type: string), _col4 (type: string), _col5 (type: int), _col6 (type: string), _col7 (type: double), _col8 (type: string), _col12 (type: int), _col13 (type: string), _col14 (type: string), _col15 (type: string), _col16 (type: string), _col17 (type: int), _col18 (type: string), _col19 (type: double), _col20 (type: string), _col24 (type: int), _col25 (type: string), _col26 (type: string), _col27 (type: string), _col28 (type: string), _col29 (type: int), _col30 (type: string), _col31 (type: double), _col32 (type: string) outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10, _col11, _col12, _col13, _col14, _col15, _col16, _col17, _col18, _col19, _col20, _col21, _col22, _col23, _col24, _col25, _col26 Statistics: Num rows: 1 Data size: 767 Basic stats: COMPLETE Column stats: NONE File Output Operator @@ -395,7 +395,7 @@ STAGE PLANS: condition expressions: 0 {VALUE._col0} {VALUE._col1} {VALUE._col2} {VALUE._col3} {VALUE._col4} {VALUE._col5} {VALUE._col6} {VALUE._col7} {VALUE._col8} 1 {VALUE._col1} {VALUE._col2} {VALUE._col3} {VALUE._col4} {VALUE._col5} {VALUE._col6} {VALUE._col7} {VALUE._col8} - outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col12, _col13, _col14, _col15, _col16, _col17, _col18, _col19 + outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col13, _col14, _col15, _col16, _col17, _col18, _col19, _col20 Statistics: Num rows: 5 Data size: 3490 Basic stats: COMPLETE Column stats: NONE File Output Operator compressed: false @@ -421,22 +421,22 @@ STAGE PLANS: value expressions: p3_partkey (type: int), p3_mfgr (type: string), p3_brand (type: string), p3_type (type: string), p3_size (type: int), p3_container (type: string), p3_retailprice (type: double), p3_comment (type: string) TableScan Reduce Output Operator - key expressions: _col12 (type: string) + key expressions: _col13 (type: string) sort order: + - Map-reduce partition columns: _col12 (type: string) + Map-reduce partition columns: _col13 (type: string) Statistics: Num rows: 5 Data size: 3490 Basic stats: COMPLETE Column stats: NONE - value expressions: _col0 (type: int), _col1 (type: string), _col2 (type: string), _col3 (type: string), _col4 (type: string), _col5 (type: int), _col6 (type: string), _col7 (type: double), _col8 (type: string), _col13 (type: string), _col14 (type: string), _col15 (type: string), _col16 (type: int), _col17 (type: string), _col18 (type: double), _col19 (type: string) + value expressions: _col0 (type: int), _col1 (type: string), _col2 (type: string), _col3 (type: string), _col4 (type: string), _col5 (type: int), _col6 (type: string), _col7 (type: double), _col8 (type: string), _col14 (type: string), _col15 (type: string), _col16 (type: string), _col17 (type: int), _col18 (type: string), _col19 (type: double), _col20 (type: string) Reduce Operator Tree: Join Operator condition map: Inner Join 0 to 1 condition expressions: - 0 {VALUE._col0} {VALUE._col1} {VALUE._col2} {VALUE._col3} {VALUE._col4} {VALUE._col5} {VALUE._col6} {VALUE._col7} {VALUE._col8} {KEY.reducesinkkey0} {VALUE._col12} {VALUE._col13} {VALUE._col14} {VALUE._col15} {VALUE._col16} {VALUE._col17} {VALUE._col18} + 0 {VALUE._col0} {VALUE._col1} {VALUE._col2} {VALUE._col3} {VALUE._col4} {VALUE._col5} {VALUE._col6} {VALUE._col7} {VALUE._col8} {KEY.reducesinkkey0} {VALUE._col13} {VALUE._col14} {VALUE._col15} {VALUE._col16} {VALUE._col17} {VALUE._col18} {VALUE._col19} 1 {VALUE._col0} {KEY.reducesinkkey0} {VALUE._col1} {VALUE._col2} {VALUE._col3} {VALUE._col4} {VALUE._col5} {VALUE._col6} {VALUE._col7} - outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col12, _col13, _col14, _col15, _col16, _col17, _col18, _col19, _col22, _col23, _col24, _col25, _col26, _col27, _col28, _col29, _col30 + outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col13, _col14, _col15, _col16, _col17, _col18, _col19, _col20, _col24, _col25, _col26, _col27, _col28, _col29, _col30, _col31, _col32 Statistics: Num rows: 5 Data size: 3839 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: _col0 (type: int), _col1 (type: string), _col2 (type: string), _col3 (type: string), _col4 (type: string), _col5 (type: int), _col6 (type: string), _col7 (type: double), _col8 (type: string), 1 (type: int), _col12 (type: string), _col13 (type: string), _col14 (type: string), _col15 (type: string), _col16 (type: int), _col17 (type: string), _col18 (type: double), _col19 (type: string), _col22 (type: int), _col23 (type: string), _col24 (type: string), _col25 (type: string), _col26 (type: string), _col27 (type: int), _col28 (type: string), _col29 (type: double), _col30 (type: string) + expressions: _col0 (type: int), _col1 (type: string), _col2 (type: string), _col3 (type: string), _col4 (type: string), _col5 (type: int), _col6 (type: string), _col7 (type: double), _col8 (type: string), 1 (type: int), _col13 (type: string), _col14 (type: string), _col15 (type: string), _col16 (type: string), _col17 (type: int), _col18 (type: string), _col19 (type: double), _col20 (type: string), _col24 (type: int), _col25 (type: string), _col26 (type: string), _col27 (type: string), _col28 (type: string), _col29 (type: int), _col30 (type: string), _col31 (type: double), _col32 (type: string) outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10, _col11, _col12, _col13, _col14, _col15, _col16, _col17, _col18, _col19, _col20, _col21, _col22, _col23, _col24, _col25, _col26 Statistics: Num rows: 5 Data size: 3839 Basic stats: COMPLETE Column stats: NONE File Output Operator diff --git a/ql/src/test/results/clientpositive/join_cond_pushdown_unqual2.q.out b/ql/src/test/results/clientpositive/join_cond_pushdown_unqual2.q.out index 754bc40..a1a12d6 100644 --- a/ql/src/test/results/clientpositive/join_cond_pushdown_unqual2.q.out +++ b/ql/src/test/results/clientpositive/join_cond_pushdown_unqual2.q.out @@ -169,10 +169,10 @@ STAGE PLANS: 1 {VALUE._col0} {KEY.reducesinkkey0} {VALUE._col1} {VALUE._col2} {VALUE._col3} {VALUE._col4} {VALUE._col5} {VALUE._col6} {VALUE._col7} 2 {VALUE._col0} {KEY.reducesinkkey0} {VALUE._col1} {VALUE._col2} {VALUE._col3} {VALUE._col4} {VALUE._col5} {VALUE._col6} {VALUE._col7} 3 {VALUE._col0} {KEY.reducesinkkey0} {VALUE._col1} {VALUE._col2} {VALUE._col3} {VALUE._col4} {VALUE._col5} {VALUE._col6} {VALUE._col7} - outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col11, _col12, _col13, _col14, _col15, _col16, _col17, _col18, _col19, _col22, _col23, _col24, _col25, _col26, _col27, _col28, _col29, _col30, _col33, _col34, _col35, _col36, _col37, _col38, _col39, _col40, _col41 + outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col12, _col13, _col14, _col15, _col16, _col17, _col18, _col19, _col20, _col24, _col25, _col26, _col27, _col28, _col29, _col30, _col31, _col32, _col36, _col37, _col38, _col39, _col40, _col41, _col42, _col43, _col44 Statistics: Num rows: 9 Data size: 6279 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: _col0 (type: int), _col1 (type: string), _col2 (type: string), _col3 (type: string), _col4 (type: string), _col5 (type: int), _col6 (type: string), _col7 (type: double), _col8 (type: string), _col11 (type: int), _col12 (type: string), _col13 (type: string), _col14 (type: string), _col15 (type: string), _col16 (type: int), _col17 (type: string), _col18 (type: double), _col19 (type: string), _col22 (type: int), _col23 (type: string), _col24 (type: string), _col25 (type: string), _col26 (type: string), _col27 (type: int), _col28 (type: string), _col29 (type: double), _col30 (type: string), _col33 (type: int), _col34 (type: string), _col35 (type: string), _col36 (type: string), _col37 (type: string), _col38 (type: int), _col39 (type: string), _col40 (type: double), _col41 (type: string) + expressions: _col0 (type: int), _col1 (type: string), _col2 (type: string), _col3 (type: string), _col4 (type: string), _col5 (type: int), _col6 (type: string), _col7 (type: double), _col8 (type: string), _col12 (type: int), _col13 (type: string), _col14 (type: string), _col15 (type: string), _col16 (type: string), _col17 (type: int), _col18 (type: string), _col19 (type: double), _col20 (type: string), _col24 (type: int), _col25 (type: string), _col26 (type: string), _col27 (type: string), _col28 (type: string), _col29 (type: int), _col30 (type: string), _col31 (type: double), _col32 (type: string), _col36 (type: int), _col37 (type: string), _col38 (type: string), _col39 (type: string), _col40 (type: string), _col41 (type: int), _col42 (type: string), _col43 (type: double), _col44 (type: string) outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10, _col11, _col12, _col13, _col14, _col15, _col16, _col17, _col18, _col19, _col20, _col21, _col22, _col23, _col24, _col25, _col26, _col27, _col28, _col29, _col30, _col31, _col32, _col33, _col34, _col35 Statistics: Num rows: 9 Data size: 6279 Basic stats: COMPLETE Column stats: NONE File Output Operator @@ -238,7 +238,7 @@ STAGE PLANS: condition expressions: 0 {KEY.reducesinkkey1} {KEY.reducesinkkey0} {VALUE._col0} {VALUE._col1} {VALUE._col2} {VALUE._col3} {VALUE._col4} {VALUE._col5} {VALUE._col6} 1 {KEY.reducesinkkey1} {KEY.reducesinkkey0} {VALUE._col0} {VALUE._col1} {VALUE._col2} {VALUE._col3} {VALUE._col4} {VALUE._col5} {VALUE._col6} - outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col11, _col12, _col13, _col14, _col15, _col16, _col17, _col18, _col19 + outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col12, _col13, _col14, _col15, _col16, _col17, _col18, _col19, _col20 Statistics: Num rows: 2 Data size: 1395 Basic stats: COMPLETE Column stats: NONE File Output Operator compressed: false @@ -264,19 +264,19 @@ STAGE PLANS: value expressions: p3_partkey (type: int), p3_mfgr (type: string), p3_brand (type: string), p3_type (type: string), p3_size (type: int), p3_container (type: string), p3_retailprice (type: double), p3_comment (type: string) TableScan Reduce Output Operator - key expressions: _col12 (type: string) + key expressions: _col13 (type: string) sort order: + - Map-reduce partition columns: _col12 (type: string) + Map-reduce partition columns: _col13 (type: string) Statistics: Num rows: 2 Data size: 1395 Basic stats: COMPLETE Column stats: NONE - value expressions: _col0 (type: int), _col1 (type: string), _col2 (type: string), _col3 (type: string), _col4 (type: string), _col5 (type: int), _col6 (type: string), _col7 (type: double), _col8 (type: string), _col11 (type: int), _col13 (type: string), _col14 (type: string), _col15 (type: string), _col16 (type: int), _col17 (type: string), _col18 (type: double), _col19 (type: string) + value expressions: _col0 (type: int), _col1 (type: string), _col2 (type: string), _col3 (type: string), _col4 (type: string), _col5 (type: int), _col6 (type: string), _col7 (type: double), _col8 (type: string), _col12 (type: int), _col14 (type: string), _col15 (type: string), _col16 (type: string), _col17 (type: int), _col18 (type: string), _col19 (type: double), _col20 (type: string) Reduce Operator Tree: Join Operator condition map: Inner Join 0 to 1 condition expressions: - 0 {VALUE._col0} {VALUE._col1} {VALUE._col2} {VALUE._col3} {VALUE._col4} {VALUE._col5} {VALUE._col6} {VALUE._col7} {VALUE._col8} {VALUE._col11} {KEY.reducesinkkey0} {VALUE._col12} {VALUE._col13} {VALUE._col14} {VALUE._col15} {VALUE._col16} {VALUE._col17} {VALUE._col18} + 0 {VALUE._col0} {VALUE._col1} {VALUE._col2} {VALUE._col3} {VALUE._col4} {VALUE._col5} {VALUE._col6} {VALUE._col7} {VALUE._col8} {VALUE._col12} {KEY.reducesinkkey0} {VALUE._col13} {VALUE._col14} {VALUE._col15} {VALUE._col16} {VALUE._col17} {VALUE._col18} {VALUE._col19} 1 {VALUE._col0} {KEY.reducesinkkey0} {VALUE._col1} {VALUE._col2} {VALUE._col3} {VALUE._col4} {VALUE._col5} {VALUE._col6} {VALUE._col7} - outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col11, _col12, _col13, _col14, _col15, _col16, _col17, _col18, _col19, _col22, _col23, _col24, _col25, _col26, _col27, _col28, _col29, _col30 + outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col12, _col13, _col14, _col15, _col16, _col17, _col18, _col19, _col20, _col24, _col25, _col26, _col27, _col28, _col29, _col30, _col31, _col32 Statistics: Num rows: 2 Data size: 1534 Basic stats: COMPLETE Column stats: NONE File Output Operator compressed: false @@ -306,18 +306,18 @@ STAGE PLANS: sort order: + Map-reduce partition columns: _col0 (type: int) Statistics: Num rows: 2 Data size: 1534 Basic stats: COMPLETE Column stats: NONE - value expressions: _col1 (type: string), _col2 (type: string), _col3 (type: string), _col4 (type: string), _col5 (type: int), _col6 (type: string), _col7 (type: double), _col8 (type: string), _col11 (type: int), _col12 (type: string), _col13 (type: string), _col14 (type: string), _col15 (type: string), _col16 (type: int), _col17 (type: string), _col18 (type: double), _col19 (type: string), _col22 (type: int), _col23 (type: string), _col24 (type: string), _col25 (type: string), _col26 (type: string), _col27 (type: int), _col28 (type: string), _col29 (type: double), _col30 (type: string) + value expressions: _col1 (type: string), _col2 (type: string), _col3 (type: string), _col4 (type: string), _col5 (type: int), _col6 (type: string), _col7 (type: double), _col8 (type: string), _col12 (type: int), _col13 (type: string), _col14 (type: string), _col15 (type: string), _col16 (type: string), _col17 (type: int), _col18 (type: string), _col19 (type: double), _col20 (type: string), _col24 (type: int), _col25 (type: string), _col26 (type: string), _col27 (type: string), _col28 (type: string), _col29 (type: int), _col30 (type: string), _col31 (type: double), _col32 (type: string) Reduce Operator Tree: Join Operator condition map: Inner Join 0 to 1 condition expressions: - 0 {KEY.reducesinkkey0} {VALUE._col0} {VALUE._col1} {VALUE._col2} {VALUE._col3} {VALUE._col4} {VALUE._col5} {VALUE._col6} {VALUE._col7} {VALUE._col10} {VALUE._col11} {VALUE._col12} {VALUE._col13} {VALUE._col14} {VALUE._col15} {VALUE._col16} {VALUE._col17} {VALUE._col18} {VALUE._col21} {VALUE._col22} {VALUE._col23} {VALUE._col24} {VALUE._col25} {VALUE._col26} {VALUE._col27} {VALUE._col28} {VALUE._col29} + 0 {KEY.reducesinkkey0} {VALUE._col0} {VALUE._col1} {VALUE._col2} {VALUE._col3} {VALUE._col4} {VALUE._col5} {VALUE._col6} {VALUE._col7} {VALUE._col11} {VALUE._col12} {VALUE._col13} {VALUE._col14} {VALUE._col15} {VALUE._col16} {VALUE._col17} {VALUE._col18} {VALUE._col19} {VALUE._col23} {VALUE._col24} {VALUE._col25} {VALUE._col26} {VALUE._col27} {VALUE._col28} {VALUE._col29} {VALUE._col30} {VALUE._col31} 1 {KEY.reducesinkkey0} {VALUE._col0} {VALUE._col1} {VALUE._col2} {VALUE._col3} {VALUE._col4} {VALUE._col5} {VALUE._col6} {VALUE._col7} - outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col11, _col12, _col13, _col14, _col15, _col16, _col17, _col18, _col19, _col22, _col23, _col24, _col25, _col26, _col27, _col28, _col29, _col30, _col33, _col34, _col35, _col36, _col37, _col38, _col39, _col40, _col41 + outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col12, _col13, _col14, _col15, _col16, _col17, _col18, _col19, _col20, _col24, _col25, _col26, _col27, _col28, _col29, _col30, _col31, _col32, _col36, _col37, _col38, _col39, _col40, _col41, _col42, _col43, _col44 Statistics: Num rows: 3 Data size: 2093 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: _col0 (type: int), _col1 (type: string), _col2 (type: string), _col3 (type: string), _col4 (type: string), _col5 (type: int), _col6 (type: string), _col7 (type: double), _col8 (type: string), _col11 (type: int), _col12 (type: string), _col13 (type: string), _col14 (type: string), _col15 (type: string), _col16 (type: int), _col17 (type: string), _col18 (type: double), _col19 (type: string), _col22 (type: int), _col23 (type: string), _col24 (type: string), _col25 (type: string), _col26 (type: string), _col27 (type: int), _col28 (type: string), _col29 (type: double), _col30 (type: string), _col33 (type: int), _col34 (type: string), _col35 (type: string), _col36 (type: string), _col37 (type: string), _col38 (type: int), _col39 (type: string), _col40 (type: double), _col41 (type: string) + expressions: _col0 (type: int), _col1 (type: string), _col2 (type: string), _col3 (type: string), _col4 (type: string), _col5 (type: int), _col6 (type: string), _col7 (type: double), _col8 (type: string), _col12 (type: int), _col13 (type: string), _col14 (type: string), _col15 (type: string), _col16 (type: string), _col17 (type: int), _col18 (type: string), _col19 (type: double), _col20 (type: string), _col24 (type: int), _col25 (type: string), _col26 (type: string), _col27 (type: string), _col28 (type: string), _col29 (type: int), _col30 (type: string), _col31 (type: double), _col32 (type: string), _col36 (type: int), _col37 (type: string), _col38 (type: string), _col39 (type: string), _col40 (type: string), _col41 (type: int), _col42 (type: string), _col43 (type: double), _col44 (type: string) outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10, _col11, _col12, _col13, _col14, _col15, _col16, _col17, _col18, _col19, _col20, _col21, _col22, _col23, _col24, _col25, _col26, _col27, _col28, _col29, _col30, _col31, _col32, _col33, _col34, _col35 Statistics: Num rows: 3 Data size: 2093 Basic stats: COMPLETE Column stats: NONE File Output Operator diff --git a/ql/src/test/results/clientpositive/join_cond_pushdown_unqual3.q.out b/ql/src/test/results/clientpositive/join_cond_pushdown_unqual3.q.out index 804795e..ef24620 100644 --- a/ql/src/test/results/clientpositive/join_cond_pushdown_unqual3.q.out +++ b/ql/src/test/results/clientpositive/join_cond_pushdown_unqual3.q.out @@ -157,13 +157,13 @@ STAGE PLANS: 0 {VALUE._col0} {KEY.reducesinkkey0} {VALUE._col1} {VALUE._col2} {VALUE._col3} {VALUE._col4} {VALUE._col5} {VALUE._col6} {VALUE._col7} 1 {VALUE._col0} {KEY.reducesinkkey0} {VALUE._col1} {VALUE._col2} {VALUE._col3} {VALUE._col4} {VALUE._col5} {VALUE._col6} {VALUE._col7} 2 {VALUE._col0} {KEY.reducesinkkey0} {VALUE._col1} {VALUE._col2} {VALUE._col3} {VALUE._col4} {VALUE._col5} {VALUE._col6} {VALUE._col7} - outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col11, _col12, _col13, _col14, _col15, _col16, _col17, _col18, _col19, _col22, _col23, _col24, _col25, _col26, _col27, _col28, _col29, _col30 + outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col12, _col13, _col14, _col15, _col16, _col17, _col18, _col19, _col20, _col24, _col25, _col26, _col27, _col28, _col29, _col30, _col31, _col32 Statistics: Num rows: 6 Data size: 4186 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: ((_col1 = _col12) and (_col12 = _col23)) (type: boolean) + predicate: ((_col1 = _col13) and (_col13 = _col25)) (type: boolean) Statistics: Num rows: 1 Data size: 697 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: _col0 (type: int), _col1 (type: string), _col2 (type: string), _col3 (type: string), _col4 (type: string), _col5 (type: int), _col6 (type: string), _col7 (type: double), _col8 (type: string), _col11 (type: int), _col12 (type: string), _col13 (type: string), _col14 (type: string), _col15 (type: string), _col16 (type: int), _col17 (type: string), _col18 (type: double), _col19 (type: string), _col22 (type: int), _col23 (type: string), _col24 (type: string), _col25 (type: string), _col26 (type: string), _col27 (type: int), _col28 (type: string), _col29 (type: double), _col30 (type: string) + expressions: _col0 (type: int), _col1 (type: string), _col2 (type: string), _col3 (type: string), _col4 (type: string), _col5 (type: int), _col6 (type: string), _col7 (type: double), _col8 (type: string), _col12 (type: int), _col13 (type: string), _col14 (type: string), _col15 (type: string), _col16 (type: string), _col17 (type: int), _col18 (type: string), _col19 (type: double), _col20 (type: string), _col24 (type: int), _col25 (type: string), _col26 (type: string), _col27 (type: string), _col28 (type: string), _col29 (type: int), _col30 (type: string), _col31 (type: double), _col32 (type: string) outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10, _col11, _col12, _col13, _col14, _col15, _col16, _col17, _col18, _col19, _col20, _col21, _col22, _col23, _col24, _col25, _col26 Statistics: Num rows: 1 Data size: 697 Basic stats: COMPLETE Column stats: NONE File Output Operator @@ -241,13 +241,13 @@ STAGE PLANS: 0 {VALUE._col0} {KEY.reducesinkkey0} {VALUE._col1} {VALUE._col2} {VALUE._col3} {VALUE._col4} {VALUE._col5} {VALUE._col6} {VALUE._col7} 1 {VALUE._col0} {KEY.reducesinkkey0} {VALUE._col1} {VALUE._col2} {VALUE._col3} {VALUE._col4} {VALUE._col5} {VALUE._col6} {VALUE._col7} 2 {VALUE._col0} {KEY.reducesinkkey0} {VALUE._col1} {VALUE._col2} {VALUE._col3} {VALUE._col4} {VALUE._col5} {VALUE._col6} {VALUE._col7} - outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col11, _col12, _col13, _col14, _col15, _col16, _col17, _col18, _col19, _col22, _col23, _col24, _col25, _col26, _col27, _col28, _col29, _col30 + outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col12, _col13, _col14, _col15, _col16, _col17, _col18, _col19, _col20, _col24, _col25, _col26, _col27, _col28, _col29, _col30, _col31, _col32 Statistics: Num rows: 6 Data size: 4186 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: ((_col12 = _col1) and (_col23 = _col12)) (type: boolean) + predicate: ((_col13 = _col1) and (_col25 = _col13)) (type: boolean) Statistics: Num rows: 1 Data size: 697 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: _col0 (type: int), _col1 (type: string), _col2 (type: string), _col3 (type: string), _col4 (type: string), _col5 (type: int), _col6 (type: string), _col7 (type: double), _col8 (type: string), _col11 (type: int), _col12 (type: string), _col13 (type: string), _col14 (type: string), _col15 (type: string), _col16 (type: int), _col17 (type: string), _col18 (type: double), _col19 (type: string), _col22 (type: int), _col23 (type: string), _col24 (type: string), _col25 (type: string), _col26 (type: string), _col27 (type: int), _col28 (type: string), _col29 (type: double), _col30 (type: string) + expressions: _col0 (type: int), _col1 (type: string), _col2 (type: string), _col3 (type: string), _col4 (type: string), _col5 (type: int), _col6 (type: string), _col7 (type: double), _col8 (type: string), _col12 (type: int), _col13 (type: string), _col14 (type: string), _col15 (type: string), _col16 (type: string), _col17 (type: int), _col18 (type: string), _col19 (type: double), _col20 (type: string), _col24 (type: int), _col25 (type: string), _col26 (type: string), _col27 (type: string), _col28 (type: string), _col29 (type: int), _col30 (type: string), _col31 (type: double), _col32 (type: string) outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10, _col11, _col12, _col13, _col14, _col15, _col16, _col17, _col18, _col19, _col20, _col21, _col22, _col23, _col24, _col25, _col26 Statistics: Num rows: 1 Data size: 697 Basic stats: COMPLETE Column stats: NONE File Output Operator @@ -306,10 +306,10 @@ STAGE PLANS: condition expressions: 0 {VALUE._col0} {VALUE._col1} {VALUE._col2} {VALUE._col3} {VALUE._col4} {VALUE._col5} {VALUE._col6} {VALUE._col7} {VALUE._col8} 1 {VALUE._col0} {VALUE._col1} {VALUE._col2} {VALUE._col3} {VALUE._col4} {VALUE._col5} {VALUE._col6} {VALUE._col7} {VALUE._col8} - outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col11, _col12, _col13, _col14, _col15, _col16, _col17, _col18, _col19 + outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col12, _col13, _col14, _col15, _col16, _col17, _col18, _col19, _col20 Statistics: Num rows: 5 Data size: 3490 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: (((_col11 + _col0) = _col0) and _col12 is not null) (type: boolean) + predicate: (((_col12 + _col0) = _col0) and _col13 is not null) (type: boolean) Statistics: Num rows: 1 Data size: 698 Basic stats: COMPLETE Column stats: NONE File Output Operator compressed: false @@ -335,25 +335,25 @@ STAGE PLANS: value expressions: p3_partkey (type: int), p3_mfgr (type: string), p3_brand (type: string), p3_type (type: string), p3_size (type: int), p3_container (type: string), p3_retailprice (type: double), p3_comment (type: string) TableScan Reduce Output Operator - key expressions: _col12 (type: string) + key expressions: _col13 (type: string) sort order: + - Map-reduce partition columns: _col12 (type: string) + Map-reduce partition columns: _col13 (type: string) Statistics: Num rows: 1 Data size: 698 Basic stats: COMPLETE Column stats: NONE - value expressions: _col0 (type: int), _col1 (type: string), _col2 (type: string), _col3 (type: string), _col4 (type: string), _col5 (type: int), _col6 (type: string), _col7 (type: double), _col8 (type: string), _col11 (type: int), _col13 (type: string), _col14 (type: string), _col15 (type: string), _col16 (type: int), _col17 (type: string), _col18 (type: double), _col19 (type: string) + value expressions: _col0 (type: int), _col1 (type: string), _col2 (type: string), _col3 (type: string), _col4 (type: string), _col5 (type: int), _col6 (type: string), _col7 (type: double), _col8 (type: string), _col12 (type: int), _col14 (type: string), _col15 (type: string), _col16 (type: string), _col17 (type: int), _col18 (type: string), _col19 (type: double), _col20 (type: string) Reduce Operator Tree: Join Operator condition map: Inner Join 0 to 1 condition expressions: - 0 {VALUE._col0} {VALUE._col1} {VALUE._col2} {VALUE._col3} {VALUE._col4} {VALUE._col5} {VALUE._col6} {VALUE._col7} {VALUE._col8} {VALUE._col11} {KEY.reducesinkkey0} {VALUE._col12} {VALUE._col13} {VALUE._col14} {VALUE._col15} {VALUE._col16} {VALUE._col17} {VALUE._col18} + 0 {VALUE._col0} {VALUE._col1} {VALUE._col2} {VALUE._col3} {VALUE._col4} {VALUE._col5} {VALUE._col6} {VALUE._col7} {VALUE._col8} {VALUE._col12} {KEY.reducesinkkey0} {VALUE._col13} {VALUE._col14} {VALUE._col15} {VALUE._col16} {VALUE._col17} {VALUE._col18} {VALUE._col19} 1 {VALUE._col0} {KEY.reducesinkkey0} {VALUE._col1} {VALUE._col2} {VALUE._col3} {VALUE._col4} {VALUE._col5} {VALUE._col6} {VALUE._col7} - outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col11, _col12, _col13, _col14, _col15, _col16, _col17, _col18, _col19, _col22, _col23, _col24, _col25, _col26, _col27, _col28, _col29, _col30 + outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col12, _col13, _col14, _col15, _col16, _col17, _col18, _col19, _col20, _col24, _col25, _col26, _col27, _col28, _col29, _col30, _col31, _col32 Statistics: Num rows: 1 Data size: 767 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: (((_col11 + _col0) = _col0) and (_col23 = _col12)) (type: boolean) + predicate: (((_col12 + _col0) = _col0) and (_col25 = _col13)) (type: boolean) Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE Select Operator - expressions: _col0 (type: int), _col1 (type: string), _col2 (type: string), _col3 (type: string), _col4 (type: string), _col5 (type: int), _col6 (type: string), _col7 (type: double), _col8 (type: string), _col11 (type: int), _col12 (type: string), _col13 (type: string), _col14 (type: string), _col15 (type: string), _col16 (type: int), _col17 (type: string), _col18 (type: double), _col19 (type: string), _col22 (type: int), _col23 (type: string), _col24 (type: string), _col25 (type: string), _col26 (type: string), _col27 (type: int), _col28 (type: string), _col29 (type: double), _col30 (type: string) + expressions: _col0 (type: int), _col1 (type: string), _col2 (type: string), _col3 (type: string), _col4 (type: string), _col5 (type: int), _col6 (type: string), _col7 (type: double), _col8 (type: string), _col12 (type: int), _col13 (type: string), _col14 (type: string), _col15 (type: string), _col16 (type: string), _col17 (type: int), _col18 (type: string), _col19 (type: double), _col20 (type: string), _col24 (type: int), _col25 (type: string), _col26 (type: string), _col27 (type: string), _col28 (type: string), _col29 (type: int), _col30 (type: string), _col31 (type: double), _col32 (type: string) outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10, _col11, _col12, _col13, _col14, _col15, _col16, _col17, _col18, _col19, _col20, _col21, _col22, _col23, _col24, _col25, _col26 Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE File Output Operator @@ -412,7 +412,7 @@ STAGE PLANS: condition expressions: 0 {VALUE._col0} {VALUE._col1} {VALUE._col2} {VALUE._col3} {VALUE._col4} {VALUE._col5} {VALUE._col6} {VALUE._col7} {VALUE._col8} 1 {VALUE._col1} {VALUE._col2} {VALUE._col3} {VALUE._col4} {VALUE._col5} {VALUE._col6} {VALUE._col7} {VALUE._col8} - outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col12, _col13, _col14, _col15, _col16, _col17, _col18, _col19 + outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col13, _col14, _col15, _col16, _col17, _col18, _col19, _col20 Statistics: Num rows: 5 Data size: 3490 Basic stats: COMPLETE Column stats: NONE File Output Operator compressed: false @@ -438,25 +438,25 @@ STAGE PLANS: value expressions: p3_partkey (type: int), p3_mfgr (type: string), p3_brand (type: string), p3_type (type: string), p3_size (type: int), p3_container (type: string), p3_retailprice (type: double), p3_comment (type: string) TableScan Reduce Output Operator - key expressions: _col12 (type: string) + key expressions: _col13 (type: string) sort order: + - Map-reduce partition columns: _col12 (type: string) + Map-reduce partition columns: _col13 (type: string) Statistics: Num rows: 5 Data size: 3490 Basic stats: COMPLETE Column stats: NONE - value expressions: _col0 (type: int), _col1 (type: string), _col2 (type: string), _col3 (type: string), _col4 (type: string), _col5 (type: int), _col6 (type: string), _col7 (type: double), _col8 (type: string), _col13 (type: string), _col14 (type: string), _col15 (type: string), _col16 (type: int), _col17 (type: string), _col18 (type: double), _col19 (type: string) + value expressions: _col0 (type: int), _col1 (type: string), _col2 (type: string), _col3 (type: string), _col4 (type: string), _col5 (type: int), _col6 (type: string), _col7 (type: double), _col8 (type: string), _col14 (type: string), _col15 (type: string), _col16 (type: string), _col17 (type: int), _col18 (type: string), _col19 (type: double), _col20 (type: string) Reduce Operator Tree: Join Operator condition map: Inner Join 0 to 1 condition expressions: - 0 {VALUE._col0} {VALUE._col1} {VALUE._col2} {VALUE._col3} {VALUE._col4} {VALUE._col5} {VALUE._col6} {VALUE._col7} {VALUE._col8} {KEY.reducesinkkey0} {VALUE._col12} {VALUE._col13} {VALUE._col14} {VALUE._col15} {VALUE._col16} {VALUE._col17} {VALUE._col18} + 0 {VALUE._col0} {VALUE._col1} {VALUE._col2} {VALUE._col3} {VALUE._col4} {VALUE._col5} {VALUE._col6} {VALUE._col7} {VALUE._col8} {KEY.reducesinkkey0} {VALUE._col13} {VALUE._col14} {VALUE._col15} {VALUE._col16} {VALUE._col17} {VALUE._col18} {VALUE._col19} 1 {VALUE._col0} {KEY.reducesinkkey0} {VALUE._col1} {VALUE._col2} {VALUE._col3} {VALUE._col4} {VALUE._col5} {VALUE._col6} {VALUE._col7} - outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col12, _col13, _col14, _col15, _col16, _col17, _col18, _col19, _col22, _col23, _col24, _col25, _col26, _col27, _col28, _col29, _col30 + outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col13, _col14, _col15, _col16, _col17, _col18, _col19, _col20, _col24, _col25, _col26, _col27, _col28, _col29, _col30, _col31, _col32 Statistics: Num rows: 5 Data size: 3839 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: (_col23 = _col12) (type: boolean) + predicate: (_col25 = _col13) (type: boolean) Statistics: Num rows: 2 Data size: 1535 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: _col0 (type: int), _col1 (type: string), _col2 (type: string), _col3 (type: string), _col4 (type: string), _col5 (type: int), _col6 (type: string), _col7 (type: double), _col8 (type: string), 1 (type: int), _col12 (type: string), _col13 (type: string), _col14 (type: string), _col15 (type: string), _col16 (type: int), _col17 (type: string), _col18 (type: double), _col19 (type: string), _col22 (type: int), _col23 (type: string), _col24 (type: string), _col25 (type: string), _col26 (type: string), _col27 (type: int), _col28 (type: string), _col29 (type: double), _col30 (type: string) + expressions: _col0 (type: int), _col1 (type: string), _col2 (type: string), _col3 (type: string), _col4 (type: string), _col5 (type: int), _col6 (type: string), _col7 (type: double), _col8 (type: string), 1 (type: int), _col13 (type: string), _col14 (type: string), _col15 (type: string), _col16 (type: string), _col17 (type: int), _col18 (type: string), _col19 (type: double), _col20 (type: string), _col24 (type: int), _col25 (type: string), _col26 (type: string), _col27 (type: string), _col28 (type: string), _col29 (type: int), _col30 (type: string), _col31 (type: double), _col32 (type: string) outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10, _col11, _col12, _col13, _col14, _col15, _col16, _col17, _col18, _col19, _col20, _col21, _col22, _col23, _col24, _col25, _col26 Statistics: Num rows: 2 Data size: 1535 Basic stats: COMPLETE Column stats: NONE File Output Operator diff --git a/ql/src/test/results/clientpositive/join_cond_pushdown_unqual4.q.out b/ql/src/test/results/clientpositive/join_cond_pushdown_unqual4.q.out index 8589651..1e25778 100644 --- a/ql/src/test/results/clientpositive/join_cond_pushdown_unqual4.q.out +++ b/ql/src/test/results/clientpositive/join_cond_pushdown_unqual4.q.out @@ -171,13 +171,13 @@ STAGE PLANS: 1 {VALUE._col0} {KEY.reducesinkkey0} {VALUE._col1} {VALUE._col2} {VALUE._col3} {VALUE._col4} {VALUE._col5} {VALUE._col6} {VALUE._col7} 2 {VALUE._col0} {KEY.reducesinkkey0} {VALUE._col1} {VALUE._col2} {VALUE._col3} {VALUE._col4} {VALUE._col5} {VALUE._col6} {VALUE._col7} 3 {VALUE._col0} {KEY.reducesinkkey0} {VALUE._col1} {VALUE._col2} {VALUE._col3} {VALUE._col4} {VALUE._col5} {VALUE._col6} {VALUE._col7} - outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col11, _col12, _col13, _col14, _col15, _col16, _col17, _col18, _col19, _col22, _col23, _col24, _col25, _col26, _col27, _col28, _col29, _col30, _col33, _col34, _col35, _col36, _col37, _col38, _col39, _col40, _col41 + outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col12, _col13, _col14, _col15, _col16, _col17, _col18, _col19, _col20, _col24, _col25, _col26, _col27, _col28, _col29, _col30, _col31, _col32, _col36, _col37, _col38, _col39, _col40, _col41, _col42, _col43, _col44 Statistics: Num rows: 9 Data size: 6279 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: ((_col12 = _col23) and (_col1 = _col34)) (type: boolean) + predicate: ((_col13 = _col25) and (_col1 = _col37)) (type: boolean) Statistics: Num rows: 2 Data size: 1395 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: _col0 (type: int), _col1 (type: string), _col2 (type: string), _col3 (type: string), _col4 (type: string), _col5 (type: int), _col6 (type: string), _col7 (type: double), _col8 (type: string), _col11 (type: int), _col12 (type: string), _col13 (type: string), _col14 (type: string), _col15 (type: string), _col16 (type: int), _col17 (type: string), _col18 (type: double), _col19 (type: string), _col22 (type: int), _col23 (type: string), _col24 (type: string), _col25 (type: string), _col26 (type: string), _col27 (type: int), _col28 (type: string), _col29 (type: double), _col30 (type: string), _col33 (type: int), _col34 (type: string), _col35 (type: string), _col36 (type: string), _col37 (type: string), _col38 (type: int), _col39 (type: string), _col40 (type: double), _col41 (type: string) + expressions: _col0 (type: int), _col1 (type: string), _col2 (type: string), _col3 (type: string), _col4 (type: string), _col5 (type: int), _col6 (type: string), _col7 (type: double), _col8 (type: string), _col12 (type: int), _col13 (type: string), _col14 (type: string), _col15 (type: string), _col16 (type: string), _col17 (type: int), _col18 (type: string), _col19 (type: double), _col20 (type: string), _col24 (type: int), _col25 (type: string), _col26 (type: string), _col27 (type: string), _col28 (type: string), _col29 (type: int), _col30 (type: string), _col31 (type: double), _col32 (type: string), _col36 (type: int), _col37 (type: string), _col38 (type: string), _col39 (type: string), _col40 (type: string), _col41 (type: int), _col42 (type: string), _col43 (type: double), _col44 (type: string) outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10, _col11, _col12, _col13, _col14, _col15, _col16, _col17, _col18, _col19, _col20, _col21, _col22, _col23, _col24, _col25, _col26, _col27, _col28, _col29, _col30, _col31, _col32, _col33, _col34, _col35 Statistics: Num rows: 2 Data size: 1395 Basic stats: COMPLETE Column stats: NONE File Output Operator @@ -245,7 +245,7 @@ STAGE PLANS: condition expressions: 0 {KEY.reducesinkkey1} {KEY.reducesinkkey0} {VALUE._col0} {VALUE._col1} {VALUE._col2} {VALUE._col3} {VALUE._col4} {VALUE._col5} {VALUE._col6} 1 {KEY.reducesinkkey1} {KEY.reducesinkkey0} {VALUE._col0} {VALUE._col1} {VALUE._col2} {VALUE._col3} {VALUE._col4} {VALUE._col5} {VALUE._col6} - outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col11, _col12, _col13, _col14, _col15, _col16, _col17, _col18, _col19 + outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col12, _col13, _col14, _col15, _col16, _col17, _col18, _col19, _col20 Statistics: Num rows: 2 Data size: 1395 Basic stats: COMPLETE Column stats: NONE File Output Operator compressed: false @@ -271,19 +271,19 @@ STAGE PLANS: value expressions: p3_partkey (type: int), p3_mfgr (type: string), p3_brand (type: string), p3_type (type: string), p3_size (type: int), p3_container (type: string), p3_retailprice (type: double), p3_comment (type: string) TableScan Reduce Output Operator - key expressions: _col12 (type: string) + key expressions: _col13 (type: string) sort order: + - Map-reduce partition columns: _col12 (type: string) + Map-reduce partition columns: _col13 (type: string) Statistics: Num rows: 2 Data size: 1395 Basic stats: COMPLETE Column stats: NONE - value expressions: _col0 (type: int), _col1 (type: string), _col2 (type: string), _col3 (type: string), _col4 (type: string), _col5 (type: int), _col6 (type: string), _col7 (type: double), _col8 (type: string), _col11 (type: int), _col13 (type: string), _col14 (type: string), _col15 (type: string), _col16 (type: int), _col17 (type: string), _col18 (type: double), _col19 (type: string) + value expressions: _col0 (type: int), _col1 (type: string), _col2 (type: string), _col3 (type: string), _col4 (type: string), _col5 (type: int), _col6 (type: string), _col7 (type: double), _col8 (type: string), _col12 (type: int), _col14 (type: string), _col15 (type: string), _col16 (type: string), _col17 (type: int), _col18 (type: string), _col19 (type: double), _col20 (type: string) Reduce Operator Tree: Join Operator condition map: Inner Join 0 to 1 condition expressions: - 0 {VALUE._col0} {VALUE._col1} {VALUE._col2} {VALUE._col3} {VALUE._col4} {VALUE._col5} {VALUE._col6} {VALUE._col7} {VALUE._col8} {VALUE._col11} {KEY.reducesinkkey0} {VALUE._col12} {VALUE._col13} {VALUE._col14} {VALUE._col15} {VALUE._col16} {VALUE._col17} {VALUE._col18} + 0 {VALUE._col0} {VALUE._col1} {VALUE._col2} {VALUE._col3} {VALUE._col4} {VALUE._col5} {VALUE._col6} {VALUE._col7} {VALUE._col8} {VALUE._col12} {KEY.reducesinkkey0} {VALUE._col13} {VALUE._col14} {VALUE._col15} {VALUE._col16} {VALUE._col17} {VALUE._col18} {VALUE._col19} 1 {VALUE._col0} {KEY.reducesinkkey0} {VALUE._col1} {VALUE._col2} {VALUE._col3} {VALUE._col4} {VALUE._col5} {VALUE._col6} {VALUE._col7} - outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col11, _col12, _col13, _col14, _col15, _col16, _col17, _col18, _col19, _col22, _col23, _col24, _col25, _col26, _col27, _col28, _col29, _col30 + outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col12, _col13, _col14, _col15, _col16, _col17, _col18, _col19, _col20, _col24, _col25, _col26, _col27, _col28, _col29, _col30, _col31, _col32 Statistics: Num rows: 2 Data size: 1534 Basic stats: COMPLETE Column stats: NONE File Output Operator compressed: false @@ -313,21 +313,21 @@ STAGE PLANS: sort order: + Map-reduce partition columns: _col0 (type: int) Statistics: Num rows: 2 Data size: 1534 Basic stats: COMPLETE Column stats: NONE - value expressions: _col1 (type: string), _col2 (type: string), _col3 (type: string), _col4 (type: string), _col5 (type: int), _col6 (type: string), _col7 (type: double), _col8 (type: string), _col11 (type: int), _col12 (type: string), _col13 (type: string), _col14 (type: string), _col15 (type: string), _col16 (type: int), _col17 (type: string), _col18 (type: double), _col19 (type: string), _col22 (type: int), _col23 (type: string), _col24 (type: string), _col25 (type: string), _col26 (type: string), _col27 (type: int), _col28 (type: string), _col29 (type: double), _col30 (type: string) + value expressions: _col1 (type: string), _col2 (type: string), _col3 (type: string), _col4 (type: string), _col5 (type: int), _col6 (type: string), _col7 (type: double), _col8 (type: string), _col12 (type: int), _col13 (type: string), _col14 (type: string), _col15 (type: string), _col16 (type: string), _col17 (type: int), _col18 (type: string), _col19 (type: double), _col20 (type: string), _col24 (type: int), _col25 (type: string), _col26 (type: string), _col27 (type: string), _col28 (type: string), _col29 (type: int), _col30 (type: string), _col31 (type: double), _col32 (type: string) Reduce Operator Tree: Join Operator condition map: Inner Join 0 to 1 condition expressions: - 0 {KEY.reducesinkkey0} {VALUE._col0} {VALUE._col1} {VALUE._col2} {VALUE._col3} {VALUE._col4} {VALUE._col5} {VALUE._col6} {VALUE._col7} {VALUE._col10} {VALUE._col11} {VALUE._col12} {VALUE._col13} {VALUE._col14} {VALUE._col15} {VALUE._col16} {VALUE._col17} {VALUE._col18} {VALUE._col21} {VALUE._col22} {VALUE._col23} {VALUE._col24} {VALUE._col25} {VALUE._col26} {VALUE._col27} {VALUE._col28} {VALUE._col29} + 0 {KEY.reducesinkkey0} {VALUE._col0} {VALUE._col1} {VALUE._col2} {VALUE._col3} {VALUE._col4} {VALUE._col5} {VALUE._col6} {VALUE._col7} {VALUE._col11} {VALUE._col12} {VALUE._col13} {VALUE._col14} {VALUE._col15} {VALUE._col16} {VALUE._col17} {VALUE._col18} {VALUE._col19} {VALUE._col23} {VALUE._col24} {VALUE._col25} {VALUE._col26} {VALUE._col27} {VALUE._col28} {VALUE._col29} {VALUE._col30} {VALUE._col31} 1 {KEY.reducesinkkey0} {VALUE._col0} {VALUE._col1} {VALUE._col2} {VALUE._col3} {VALUE._col4} {VALUE._col5} {VALUE._col6} {VALUE._col7} - outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col11, _col12, _col13, _col14, _col15, _col16, _col17, _col18, _col19, _col22, _col23, _col24, _col25, _col26, _col27, _col28, _col29, _col30, _col33, _col34, _col35, _col36, _col37, _col38, _col39, _col40, _col41 + outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col12, _col13, _col14, _col15, _col16, _col17, _col18, _col19, _col20, _col24, _col25, _col26, _col27, _col28, _col29, _col30, _col31, _col32, _col36, _col37, _col38, _col39, _col40, _col41, _col42, _col43, _col44 Statistics: Num rows: 3 Data size: 2093 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: (((_col12 = _col23) and (_col0 = _col33)) and (_col0 = _col11)) (type: boolean) + predicate: (((_col13 = _col25) and (_col0 = _col36)) and (_col0 = _col12)) (type: boolean) Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE Select Operator - expressions: _col0 (type: int), _col1 (type: string), _col2 (type: string), _col3 (type: string), _col4 (type: string), _col5 (type: int), _col6 (type: string), _col7 (type: double), _col8 (type: string), _col11 (type: int), _col12 (type: string), _col13 (type: string), _col14 (type: string), _col15 (type: string), _col16 (type: int), _col17 (type: string), _col18 (type: double), _col19 (type: string), _col22 (type: int), _col23 (type: string), _col24 (type: string), _col25 (type: string), _col26 (type: string), _col27 (type: int), _col28 (type: string), _col29 (type: double), _col30 (type: string), _col33 (type: int), _col34 (type: string), _col35 (type: string), _col36 (type: string), _col37 (type: string), _col38 (type: int), _col39 (type: string), _col40 (type: double), _col41 (type: string) + expressions: _col0 (type: int), _col1 (type: string), _col2 (type: string), _col3 (type: string), _col4 (type: string), _col5 (type: int), _col6 (type: string), _col7 (type: double), _col8 (type: string), _col12 (type: int), _col13 (type: string), _col14 (type: string), _col15 (type: string), _col16 (type: string), _col17 (type: int), _col18 (type: string), _col19 (type: double), _col20 (type: string), _col24 (type: int), _col25 (type: string), _col26 (type: string), _col27 (type: string), _col28 (type: string), _col29 (type: int), _col30 (type: string), _col31 (type: double), _col32 (type: string), _col36 (type: int), _col37 (type: string), _col38 (type: string), _col39 (type: string), _col40 (type: string), _col41 (type: int), _col42 (type: string), _col43 (type: double), _col44 (type: string) outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9, _col10, _col11, _col12, _col13, _col14, _col15, _col16, _col17, _col18, _col19, _col20, _col21, _col22, _col23, _col24, _col25, _col26, _col27, _col28, _col29, _col30, _col31, _col32, _col33, _col34, _col35 Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE File Output Operator diff --git a/ql/src/test/results/clientpositive/join_filters_overlap.q.out b/ql/src/test/results/clientpositive/join_filters_overlap.q.out index 3dae4e1..5ec5e28 100644 --- a/ql/src/test/results/clientpositive/join_filters_overlap.q.out +++ b/ql/src/test/results/clientpositive/join_filters_overlap.q.out @@ -206,10 +206,10 @@ STAGE PLANS: 0 {(VALUE._col0 = 50)} {(VALUE._col0 = 60)} 1 2 - outputColumnNames: _col0, _col1, _col4, _col5, _col8, _col9 + outputColumnNames: _col0, _col1, _col5, _col6, _col10, _col11 Statistics: Num rows: 6 Data size: 39 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: _col0 (type: int), _col1 (type: int), _col4 (type: int), _col5 (type: int), _col8 (type: int), _col9 (type: int) + expressions: _col0 (type: int), _col1 (type: int), _col5 (type: int), _col6 (type: int), _col10 (type: int), _col11 (type: int) outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5 Statistics: Num rows: 6 Data size: 39 Basic stats: COMPLETE Column stats: NONE File Output Operator @@ -457,10 +457,10 @@ STAGE PLANS: 0 1 {(VALUE._col0 = 50)} {(VALUE._col0 = 60)} 2 - outputColumnNames: _col0, _col1, _col4, _col5, _col8, _col9 + outputColumnNames: _col0, _col1, _col5, _col6, _col10, _col11 Statistics: Num rows: 6 Data size: 39 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: _col0 (type: int), _col1 (type: int), _col4 (type: int), _col5 (type: int), _col8 (type: int), _col9 (type: int) + expressions: _col0 (type: int), _col1 (type: int), _col5 (type: int), _col6 (type: int), _col10 (type: int), _col11 (type: int) outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5 Statistics: Num rows: 6 Data size: 39 Basic stats: COMPLETE Column stats: NONE File Output Operator @@ -722,10 +722,10 @@ STAGE PLANS: 0 1 {(VALUE._col0 = 50)} {(VALUE._col0 > 10)} {(VALUE._col0 = 60)} {(VALUE._col0 > 20)} 2 - outputColumnNames: _col0, _col1, _col4, _col5, _col8, _col9 + outputColumnNames: _col0, _col1, _col5, _col6, _col10, _col11 Statistics: Num rows: 6 Data size: 39 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: _col0 (type: int), _col1 (type: int), _col4 (type: int), _col5 (type: int), _col8 (type: int), _col9 (type: int) + expressions: _col0 (type: int), _col1 (type: int), _col5 (type: int), _col6 (type: int), _col10 (type: int), _col11 (type: int) outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5 Statistics: Num rows: 6 Data size: 39 Basic stats: COMPLETE Column stats: NONE File Output Operator @@ -1017,10 +1017,10 @@ STAGE PLANS: 1 {(VALUE._col0 = 50)} {(VALUE._col0 = 60)} 2 3 - outputColumnNames: _col0, _col1, _col4, _col5, _col8, _col9, _col12, _col13 + outputColumnNames: _col0, _col1, _col5, _col6, _col10, _col11, _col15, _col16 Statistics: Num rows: 9 Data size: 59 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: _col0 (type: int), _col1 (type: int), _col4 (type: int), _col5 (type: int), _col8 (type: int), _col9 (type: int), _col12 (type: int), _col13 (type: int) + expressions: _col0 (type: int), _col1 (type: int), _col5 (type: int), _col6 (type: int), _col10 (type: int), _col11 (type: int), _col15 (type: int), _col16 (type: int) outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7 Statistics: Num rows: 9 Data size: 59 Basic stats: COMPLETE Column stats: NONE File Output Operator @@ -1306,10 +1306,10 @@ STAGE PLANS: 1 2 3 - outputColumnNames: _col0, _col1, _col4, _col5, _col8, _col9, _col12, _col13 + outputColumnNames: _col0, _col1, _col5, _col6, _col10, _col11, _col15, _col16 Statistics: Num rows: 9 Data size: 59 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: _col0 (type: int), _col1 (type: int), _col4 (type: int), _col5 (type: int), _col8 (type: int), _col9 (type: int), _col12 (type: int), _col13 (type: int) + expressions: _col0 (type: int), _col1 (type: int), _col5 (type: int), _col6 (type: int), _col10 (type: int), _col11 (type: int), _col15 (type: int), _col16 (type: int) outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7 Statistics: Num rows: 9 Data size: 59 Basic stats: COMPLETE Column stats: NONE File Output Operator diff --git a/ql/src/test/results/clientpositive/join_hive_626.q.out b/ql/src/test/results/clientpositive/join_hive_626.q.out index 52b6c78..13a6f94 100644 --- a/ql/src/test/results/clientpositive/join_hive_626.q.out +++ b/ql/src/test/results/clientpositive/join_hive_626.q.out @@ -104,7 +104,7 @@ STAGE PLANS: condition expressions: 0 {VALUE._col0} 1 {VALUE._col0} {VALUE._col3} - outputColumnNames: _col1, _col8, _col12 + outputColumnNames: _col1, _col9, _col13 Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE File Output Operator compressed: false @@ -118,11 +118,11 @@ STAGE PLANS: Map Operator Tree: TableScan Reduce Output Operator - key expressions: _col8 (type: int) + key expressions: _col9 (type: int) sort order: + - Map-reduce partition columns: _col8 (type: int) + Map-reduce partition columns: _col9 (type: int) Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE - value expressions: _col1 (type: string), _col12 (type: string) + value expressions: _col1 (type: string), _col13 (type: string) TableScan alias: hive_count Statistics: Num rows: 0 Data size: 5 Basic stats: PARTIAL Column stats: NONE @@ -140,12 +140,12 @@ STAGE PLANS: condition map: Inner Join 0 to 1 condition expressions: - 0 {VALUE._col1} {VALUE._col11} + 0 {VALUE._col1} {VALUE._col12} 1 {VALUE._col0} - outputColumnNames: _col1, _col12, _col20 + outputColumnNames: _col1, _col13, _col22 Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE Select Operator - expressions: _col1 (type: string), _col12 (type: string), _col20 (type: int) + expressions: _col1 (type: string), _col13 (type: string), _col22 (type: int) outputColumnNames: _col0, _col1, _col2 Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE File Output Operator diff --git a/ql/src/test/results/clientpositive/join_map_ppr.q.out b/ql/src/test/results/clientpositive/join_map_ppr.q.out index 58f1898..e4867d8 100644 --- a/ql/src/test/results/clientpositive/join_map_ppr.q.out +++ b/ql/src/test/results/clientpositive/join_map_ppr.q.out @@ -187,11 +187,11 @@ STAGE PLANS: 0 key (type: string) 1 key (type: string) 2 key (type: string) - outputColumnNames: _col0, _col5, _col9 + outputColumnNames: _col0, _col6, _col11 Position of Big Table: 2 Statistics: Num rows: 33 Data size: 6613 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: _col0 (type: string), _col9 (type: string), _col5 (type: string) + expressions: _col0 (type: string), _col11 (type: string), _col6 (type: string) outputColumnNames: _col0, _col1, _col2 Statistics: Num rows: 33 Data size: 6613 Basic stats: COMPLETE Column stats: NONE File Output Operator @@ -806,11 +806,11 @@ STAGE PLANS: 0 UDFToDouble(key) (type: double) 1 UDFToDouble(key) (type: double) 2 UDFToDouble(key) (type: double) - outputColumnNames: _col0, _col5, _col9 + outputColumnNames: _col0, _col6, _col11 Position of Big Table: 2 Statistics: Num rows: 550 Data size: 5843 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: _col0 (type: string), _col9 (type: string), _col5 (type: string) + expressions: _col0 (type: string), _col11 (type: string), _col6 (type: string) outputColumnNames: _col0, _col1, _col2 Statistics: Num rows: 550 Data size: 5843 Basic stats: COMPLETE Column stats: NONE File Output Operator diff --git a/ql/src/test/results/clientpositive/join_merging.q.out b/ql/src/test/results/clientpositive/join_merging.q.out index 6d11ff8..61d1e17 100644 --- a/ql/src/test/results/clientpositive/join_merging.q.out +++ b/ql/src/test/results/clientpositive/join_merging.q.out @@ -82,10 +82,10 @@ STAGE PLANS: 0 {VALUE._col4} 1 {VALUE._col4} 2 - outputColumnNames: _col5, _col16 + outputColumnNames: _col5, _col17 Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE Select Operator - expressions: _col5 (type: int), _col16 (type: int) + expressions: _col5 (type: int), _col17 (type: int) outputColumnNames: _col0, _col1 Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE File Output Operator @@ -158,13 +158,13 @@ STAGE PLANS: 0 {VALUE._col4} 1 {VALUE._col4} 2 - outputColumnNames: _col5, _col16 + outputColumnNames: _col5, _col17 Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE Filter Operator - predicate: (_col5 > (_col16 + 10)) (type: boolean) + predicate: (_col5 > (_col17 + 10)) (type: boolean) Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE Select Operator - expressions: _col5 (type: int), _col16 (type: int) + expressions: _col5 (type: int), _col17 (type: int) outputColumnNames: _col0, _col1 Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE File Output Operator diff --git a/ql/src/test/results/clientpositive/join_nullsafe.q.out b/ql/src/test/results/clientpositive/join_nullsafe.q.out index 10c4e2e..d56a04d 100644 --- a/ql/src/test/results/clientpositive/join_nullsafe.q.out +++ b/ql/src/test/results/clientpositive/join_nullsafe.q.out @@ -58,10 +58,10 @@ STAGE PLANS: 0 {KEY.reducesinkkey0} {VALUE._col0} 1 {VALUE._col0} {KEY.reducesinkkey0} nullSafes: [true] - outputColumnNames: _col0, _col1, _col4, _col5 + outputColumnNames: _col0, _col1, _col5, _col6 Statistics: Num rows: 3 Data size: 28 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: _col0 (type: int), _col1 (type: int), _col4 (type: int), _col5 (type: int) + expressions: _col0 (type: int), _col1 (type: int), _col5 (type: int), _col6 (type: int) outputColumnNames: _col0, _col1, _col2, _col3 Statistics: Num rows: 3 Data size: 28 Basic stats: COMPLETE Column stats: NONE File Output Operator @@ -154,10 +154,10 @@ STAGE PLANS: 0 {KEY.reducesinkkey0} {VALUE._col0} 1 {VALUE._col0} {KEY.reducesinkkey0} 2 {KEY.reducesinkkey0} {VALUE._col0} - outputColumnNames: _col0, _col1, _col4, _col5, _col8, _col9 + outputColumnNames: _col0, _col1, _col5, _col6, _col10, _col11 Statistics: Num rows: 4 Data size: 37 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: _col0 (type: int), _col1 (type: int), _col4 (type: int), _col5 (type: int), _col8 (type: int), _col9 (type: int) + expressions: _col0 (type: int), _col1 (type: int), _col5 (type: int), _col6 (type: int), _col10 (type: int), _col11 (type: int) outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5 Statistics: Num rows: 4 Data size: 37 Basic stats: COMPLETE Column stats: NONE File Output Operator @@ -233,10 +233,10 @@ STAGE PLANS: 1 {VALUE._col0} {KEY.reducesinkkey0} 2 {KEY.reducesinkkey0} {VALUE._col0} nullSafes: [true] - outputColumnNames: _col0, _col1, _col4, _col5, _col8, _col9 + outputColumnNames: _col0, _col1, _col5, _col6, _col10, _col11 Statistics: Num rows: 6 Data size: 57 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: _col0 (type: int), _col1 (type: int), _col4 (type: int), _col5 (type: int), _col8 (type: int), _col9 (type: int) + expressions: _col0 (type: int), _col1 (type: int), _col5 (type: int), _col6 (type: int), _col10 (type: int), _col11 (type: int) outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5 Statistics: Num rows: 6 Data size: 57 Basic stats: COMPLETE Column stats: NONE File Output Operator @@ -345,10 +345,10 @@ STAGE PLANS: 1 {KEY.reducesinkkey1} {KEY.reducesinkkey0} 2 {KEY.reducesinkkey0} {KEY.reducesinkkey1} nullSafes: [true, false] - outputColumnNames: _col0, _col1, _col4, _col5, _col8, _col9 + outputColumnNames: _col0, _col1, _col5, _col6, _col10, _col11 Statistics: Num rows: 4 Data size: 37 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: _col0 (type: int), _col1 (type: int), _col4 (type: int), _col5 (type: int), _col8 (type: int), _col9 (type: int) + expressions: _col0 (type: int), _col1 (type: int), _col5 (type: int), _col6 (type: int), _col10 (type: int), _col11 (type: int) outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5 Statistics: Num rows: 4 Data size: 37 Basic stats: COMPLETE Column stats: NONE File Output Operator @@ -421,10 +421,10 @@ STAGE PLANS: 1 {KEY.reducesinkkey1} {KEY.reducesinkkey0} 2 {KEY.reducesinkkey0} {KEY.reducesinkkey1} nullSafes: [true, true] - outputColumnNames: _col0, _col1, _col4, _col5, _col8, _col9 + outputColumnNames: _col0, _col1, _col5, _col6, _col10, _col11 Statistics: Num rows: 6 Data size: 57 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: _col0 (type: int), _col1 (type: int), _col4 (type: int), _col5 (type: int), _col8 (type: int), _col9 (type: int) + expressions: _col0 (type: int), _col1 (type: int), _col5 (type: int), _col6 (type: int), _col10 (type: int), _col11 (type: int) outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5 Statistics: Num rows: 6 Data size: 57 Basic stats: COMPLETE Column stats: NONE File Output Operator @@ -1546,10 +1546,10 @@ STAGE PLANS: 0 {VALUE._col0} 1 {VALUE._col0} nullSafes: [true] - outputColumnNames: _col1, _col4 + outputColumnNames: _col1, _col5 Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: null (type: void), _col1 (type: int), _col4 (type: int), null (type: void) + expressions: null (type: void), _col1 (type: int), _col5 (type: int), null (type: void) outputColumnNames: _col0, _col1, _col2, _col3 Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE File Output Operator diff --git a/ql/src/test/results/clientpositive/join_rc.q.out b/ql/src/test/results/clientpositive/join_rc.q.out index d0c4462..5a1c511 100644 --- a/ql/src/test/results/clientpositive/join_rc.q.out +++ b/ql/src/test/results/clientpositive/join_rc.q.out @@ -80,10 +80,10 @@ STAGE PLANS: condition expressions: 0 {KEY.reducesinkkey0} 1 {VALUE._col0} - outputColumnNames: _col0, _col5 + outputColumnNames: _col0, _col6 Statistics: Num rows: 275 Data size: 2646 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: _col0 (type: string), _col5 (type: string) + expressions: _col0 (type: string), _col6 (type: string) outputColumnNames: _col0, _col1 Statistics: Num rows: 275 Data size: 2646 Basic stats: COMPLETE Column stats: NONE File Output Operator diff --git a/ql/src/test/results/clientpositive/join_reorder.q.out b/ql/src/test/results/clientpositive/join_reorder.q.out index c74dbf7..4be2f50 100644 --- a/ql/src/test/results/clientpositive/join_reorder.q.out +++ b/ql/src/test/results/clientpositive/join_reorder.q.out @@ -91,10 +91,10 @@ STAGE PLANS: condition expressions: 0 {VALUE._col0} {VALUE._col1} 1 {VALUE._col0} - outputColumnNames: _col0, _col1, _col4 + outputColumnNames: _col0, _col1, _col5 Statistics: Num rows: 31 Data size: 3196 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: _col0 (type: string), _col1 (type: string), _col4 (type: string) + expressions: _col0 (type: string), _col1 (type: string), _col5 (type: string) outputColumnNames: _col0, _col1, _col2 Statistics: Num rows: 31 Data size: 3196 Basic stats: COMPLETE Column stats: NONE File Output Operator @@ -156,10 +156,10 @@ STAGE PLANS: condition expressions: 0 {VALUE._col0} {VALUE._col1} 1 {VALUE._col0} - outputColumnNames: _col0, _col1, _col4 + outputColumnNames: _col0, _col1, _col5 Statistics: Num rows: 31 Data size: 3196 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: _col0 (type: string), _col1 (type: string), _col4 (type: string) + expressions: _col0 (type: string), _col1 (type: string), _col5 (type: string) outputColumnNames: _col0, _col1, _col2 Statistics: Num rows: 31 Data size: 3196 Basic stats: COMPLETE Column stats: NONE File Output Operator @@ -251,7 +251,7 @@ STAGE PLANS: condition expressions: 0 {KEY.reducesinkkey0} {VALUE._col0} 1 {KEY.reducesinkkey0} - outputColumnNames: _col0, _col1, _col4 + outputColumnNames: _col0, _col1, _col5 Statistics: Num rows: 0 Data size: 33 Basic stats: PARTIAL Column stats: NONE File Output Operator compressed: false @@ -269,7 +269,7 @@ STAGE PLANS: sort order: + Map-reduce partition columns: _col1 (type: string) Statistics: Num rows: 0 Data size: 33 Basic stats: PARTIAL Column stats: NONE - value expressions: _col0 (type: string), _col4 (type: string) + value expressions: _col0 (type: string), _col5 (type: string) TableScan alias: c Statistics: Num rows: 0 Data size: 20 Basic stats: PARTIAL Column stats: NONE @@ -283,12 +283,12 @@ STAGE PLANS: condition map: Right Outer Join0 to 1 condition expressions: - 0 {VALUE._col0} {KEY.reducesinkkey0} {VALUE._col3} + 0 {VALUE._col0} {KEY.reducesinkkey0} {VALUE._col4} 1 {KEY.reducesinkkey0} - outputColumnNames: _col0, _col1, _col4, _col9 + outputColumnNames: _col0, _col1, _col5, _col11 Statistics: Num rows: 0 Data size: 36 Basic stats: PARTIAL Column stats: NONE Select Operator - expressions: _col0 (type: string), _col4 (type: string), _col1 (type: string), _col9 (type: string) + expressions: _col0 (type: string), _col5 (type: string), _col1 (type: string), _col11 (type: string) outputColumnNames: _col0, _col1, _col2, _col3 Statistics: Num rows: 0 Data size: 36 Basic stats: PARTIAL Column stats: NONE File Output Operator @@ -348,7 +348,7 @@ STAGE PLANS: condition expressions: 0 {KEY.reducesinkkey0} {VALUE._col0} 1 {KEY.reducesinkkey0} - outputColumnNames: _col0, _col1, _col4 + outputColumnNames: _col0, _col1, _col5 Statistics: Num rows: 0 Data size: 33 Basic stats: PARTIAL Column stats: NONE File Output Operator compressed: false @@ -366,7 +366,7 @@ STAGE PLANS: sort order: + Map-reduce partition columns: _col1 (type: string) Statistics: Num rows: 0 Data size: 33 Basic stats: PARTIAL Column stats: NONE - value expressions: _col0 (type: string), _col4 (type: string) + value expressions: _col0 (type: string), _col5 (type: string) TableScan alias: c Statistics: Num rows: 0 Data size: 20 Basic stats: PARTIAL Column stats: NONE @@ -380,12 +380,12 @@ STAGE PLANS: condition map: Right Outer Join0 to 1 condition expressions: - 0 {VALUE._col0} {KEY.reducesinkkey0} {VALUE._col3} + 0 {VALUE._col0} {KEY.reducesinkkey0} {VALUE._col4} 1 {KEY.reducesinkkey0} - outputColumnNames: _col0, _col1, _col4, _col9 + outputColumnNames: _col0, _col1, _col5, _col11 Statistics: Num rows: 0 Data size: 36 Basic stats: PARTIAL Column stats: NONE Select Operator - expressions: _col0 (type: string), _col4 (type: string), _col1 (type: string), _col9 (type: string) + expressions: _col0 (type: string), _col5 (type: string), _col1 (type: string), _col11 (type: string) outputColumnNames: _col0, _col1, _col2, _col3 Statistics: Num rows: 0 Data size: 36 Basic stats: PARTIAL Column stats: NONE File Output Operator @@ -500,10 +500,10 @@ STAGE PLANS: 0 {KEY.reducesinkkey0} 1 {KEY.reducesinkkey0} 2 {KEY.reducesinkkey0} - outputColumnNames: _col0, _col4, _col8 + outputColumnNames: _col0, _col5, _col10 Statistics: Num rows: 0 Data size: 66 Basic stats: PARTIAL Column stats: NONE Select Operator - expressions: _col0 (type: string), _col4 (type: string), _col8 (type: string) + expressions: _col0 (type: string), _col5 (type: string), _col10 (type: string) outputColumnNames: _col0, _col1, _col2 Statistics: Num rows: 0 Data size: 66 Basic stats: PARTIAL Column stats: NONE File Output Operator @@ -574,10 +574,10 @@ STAGE PLANS: 0 {KEY.reducesinkkey0} 1 {KEY.reducesinkkey0} 2 {KEY.reducesinkkey0} - outputColumnNames: _col0, _col4, _col8 + outputColumnNames: _col0, _col5, _col10 Statistics: Num rows: 0 Data size: 66 Basic stats: PARTIAL Column stats: NONE Select Operator - expressions: _col0 (type: string), _col4 (type: string), _col8 (type: string) + expressions: _col0 (type: string), _col5 (type: string), _col10 (type: string) outputColumnNames: _col0, _col1, _col2 Statistics: Num rows: 0 Data size: 66 Basic stats: PARTIAL Column stats: NONE File Output Operator diff --git a/ql/src/test/results/clientpositive/join_reorder2.q.out b/ql/src/test/results/clientpositive/join_reorder2.q.out index 3a0abe1..9278ab5 100644 --- a/ql/src/test/results/clientpositive/join_reorder2.q.out +++ b/ql/src/test/results/clientpositive/join_reorder2.q.out @@ -141,10 +141,10 @@ STAGE PLANS: 1 {KEY.reducesinkkey0} {VALUE._col0} 2 {KEY.reducesinkkey0} {VALUE._col0} 3 {KEY.reducesinkkey0} {VALUE._col0} - outputColumnNames: _col0, _col1, _col4, _col5, _col8, _col9, _col12, _col13 + outputColumnNames: _col0, _col1, _col5, _col6, _col10, _col11, _col15, _col16 Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE Select Operator - expressions: _col0 (type: string), _col1 (type: string), _col4 (type: string), _col5 (type: string), _col8 (type: string), _col9 (type: string), _col12 (type: string), _col13 (type: string) + expressions: _col0 (type: string), _col1 (type: string), _col5 (type: string), _col6 (type: string), _col10 (type: string), _col11 (type: string), _col15 (type: string), _col16 (type: string) outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7 Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE File Output Operator @@ -235,7 +235,7 @@ STAGE PLANS: condition expressions: 0 {KEY.reducesinkkey0} {VALUE._col0} 1 {KEY.reducesinkkey0} {VALUE._col0} - outputColumnNames: _col0, _col1, _col4, _col5 + outputColumnNames: _col0, _col1, _col5, _col6 Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE File Output Operator compressed: false @@ -253,7 +253,7 @@ STAGE PLANS: sort order: + Map-reduce partition columns: _col1 (type: string) Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE - value expressions: _col0 (type: string), _col4 (type: string), _col5 (type: string) + value expressions: _col0 (type: string), _col5 (type: string), _col6 (type: string) TableScan alias: c Statistics: Num rows: 0 Data size: 20 Basic stats: PARTIAL Column stats: NONE @@ -271,9 +271,9 @@ STAGE PLANS: condition map: Inner Join 0 to 1 condition expressions: - 0 {VALUE._col0} {KEY.reducesinkkey0} {VALUE._col3} {VALUE._col4} + 0 {VALUE._col0} {KEY.reducesinkkey0} {VALUE._col4} {VALUE._col5} 1 {VALUE._col0} {KEY.reducesinkkey0} - outputColumnNames: _col0, _col1, _col4, _col5, _col8, _col9 + outputColumnNames: _col0, _col1, _col5, _col6, _col10, _col11 Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE File Output Operator compressed: false @@ -303,18 +303,18 @@ STAGE PLANS: sort order: + Map-reduce partition columns: (_col0 + 1) (type: double) Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE - value expressions: _col0 (type: string), _col1 (type: string), _col4 (type: string), _col5 (type: string), _col8 (type: string), _col9 (type: string) + value expressions: _col0 (type: string), _col1 (type: string), _col5 (type: string), _col6 (type: string), _col10 (type: string), _col11 (type: string) Reduce Operator Tree: Join Operator condition map: Inner Join 0 to 1 condition expressions: - 0 {VALUE._col0} {VALUE._col1} {VALUE._col4} {VALUE._col5} {VALUE._col8} {VALUE._col9} + 0 {VALUE._col0} {VALUE._col1} {VALUE._col5} {VALUE._col6} {VALUE._col10} {VALUE._col11} 1 {VALUE._col0} {VALUE._col1} - outputColumnNames: _col0, _col1, _col4, _col5, _col8, _col9, _col12, _col13 + outputColumnNames: _col0, _col1, _col5, _col6, _col10, _col11, _col15, _col16 Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE Select Operator - expressions: _col0 (type: string), _col1 (type: string), _col4 (type: string), _col5 (type: string), _col8 (type: string), _col9 (type: string), _col12 (type: string), _col13 (type: string) + expressions: _col0 (type: string), _col1 (type: string), _col5 (type: string), _col6 (type: string), _col10 (type: string), _col11 (type: string), _col15 (type: string), _col16 (type: string) outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7 Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE File Output Operator diff --git a/ql/src/test/results/clientpositive/join_reorder3.q.out b/ql/src/test/results/clientpositive/join_reorder3.q.out index 0192acf..761b8b3 100644 --- a/ql/src/test/results/clientpositive/join_reorder3.q.out +++ b/ql/src/test/results/clientpositive/join_reorder3.q.out @@ -141,10 +141,10 @@ STAGE PLANS: 1 {KEY.reducesinkkey0} {VALUE._col0} 2 {KEY.reducesinkkey0} {VALUE._col0} 3 {KEY.reducesinkkey0} {VALUE._col0} - outputColumnNames: _col0, _col1, _col4, _col5, _col8, _col9, _col12, _col13 + outputColumnNames: _col0, _col1, _col5, _col6, _col10, _col11, _col15, _col16 Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE Select Operator - expressions: _col0 (type: string), _col1 (type: string), _col4 (type: string), _col5 (type: string), _col8 (type: string), _col9 (type: string), _col12 (type: string), _col13 (type: string) + expressions: _col0 (type: string), _col1 (type: string), _col5 (type: string), _col6 (type: string), _col10 (type: string), _col11 (type: string), _col15 (type: string), _col16 (type: string) outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7 Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE File Output Operator @@ -235,7 +235,7 @@ STAGE PLANS: condition expressions: 0 {KEY.reducesinkkey0} {VALUE._col0} 1 {KEY.reducesinkkey0} {VALUE._col0} - outputColumnNames: _col0, _col1, _col4, _col5 + outputColumnNames: _col0, _col1, _col5, _col6 Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE File Output Operator compressed: false @@ -253,7 +253,7 @@ STAGE PLANS: sort order: + Map-reduce partition columns: _col1 (type: string) Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE - value expressions: _col0 (type: string), _col4 (type: string), _col5 (type: string) + value expressions: _col0 (type: string), _col5 (type: string), _col6 (type: string) TableScan alias: c Statistics: Num rows: 0 Data size: 20 Basic stats: PARTIAL Column stats: NONE @@ -271,9 +271,9 @@ STAGE PLANS: condition map: Inner Join 0 to 1 condition expressions: - 0 {VALUE._col0} {KEY.reducesinkkey0} {VALUE._col3} {VALUE._col4} + 0 {VALUE._col0} {KEY.reducesinkkey0} {VALUE._col4} {VALUE._col5} 1 {VALUE._col0} {KEY.reducesinkkey0} - outputColumnNames: _col0, _col1, _col4, _col5, _col8, _col9 + outputColumnNames: _col0, _col1, _col5, _col6, _col10, _col11 Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE File Output Operator compressed: false @@ -303,18 +303,18 @@ STAGE PLANS: sort order: + Map-reduce partition columns: (_col0 + 1) (type: double) Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE - value expressions: _col0 (type: string), _col1 (type: string), _col4 (type: string), _col5 (type: string), _col8 (type: string), _col9 (type: string) + value expressions: _col0 (type: string), _col1 (type: string), _col5 (type: string), _col6 (type: string), _col10 (type: string), _col11 (type: string) Reduce Operator Tree: Join Operator condition map: Inner Join 0 to 1 condition expressions: - 0 {VALUE._col0} {VALUE._col1} {VALUE._col4} {VALUE._col5} {VALUE._col8} {VALUE._col9} + 0 {VALUE._col0} {VALUE._col1} {VALUE._col5} {VALUE._col6} {VALUE._col10} {VALUE._col11} 1 {VALUE._col0} {VALUE._col1} - outputColumnNames: _col0, _col1, _col4, _col5, _col8, _col9, _col12, _col13 + outputColumnNames: _col0, _col1, _col5, _col6, _col10, _col11, _col15, _col16 Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE Select Operator - expressions: _col0 (type: string), _col1 (type: string), _col4 (type: string), _col5 (type: string), _col8 (type: string), _col9 (type: string), _col12 (type: string), _col13 (type: string) + expressions: _col0 (type: string), _col1 (type: string), _col5 (type: string), _col6 (type: string), _col10 (type: string), _col11 (type: string), _col15 (type: string), _col16 (type: string) outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7 Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE File Output Operator diff --git a/ql/src/test/results/clientpositive/join_reorder4.q.out b/ql/src/test/results/clientpositive/join_reorder4.q.out index cc15fa9..5eabec3 100644 --- a/ql/src/test/results/clientpositive/join_reorder4.q.out +++ b/ql/src/test/results/clientpositive/join_reorder4.q.out @@ -120,10 +120,10 @@ STAGE PLANS: 0 key1 (type: string) 1 key2 (type: string) 2 key3 (type: string) - outputColumnNames: _col0, _col1, _col4, _col5, _col8, _col9 + outputColumnNames: _col0, _col1, _col5, _col6, _col10, _col11 Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE Select Operator - expressions: _col0 (type: string), _col1 (type: string), _col4 (type: string), _col5 (type: string), _col8 (type: string), _col9 (type: string) + expressions: _col0 (type: string), _col1 (type: string), _col5 (type: string), _col6 (type: string), _col10 (type: string), _col11 (type: string) outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5 Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE File Output Operator @@ -229,10 +229,10 @@ STAGE PLANS: 0 key1 (type: string) 1 key2 (type: string) 2 key3 (type: string) - outputColumnNames: _col0, _col1, _col4, _col5, _col8, _col9 + outputColumnNames: _col0, _col1, _col5, _col6, _col10, _col11 Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE Select Operator - expressions: _col0 (type: string), _col1 (type: string), _col4 (type: string), _col5 (type: string), _col8 (type: string), _col9 (type: string) + expressions: _col0 (type: string), _col1 (type: string), _col5 (type: string), _col6 (type: string), _col10 (type: string), _col11 (type: string) outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5 Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE File Output Operator @@ -338,10 +338,10 @@ STAGE PLANS: 0 key1 (type: string) 1 key2 (type: string) 2 key3 (type: string) - outputColumnNames: _col0, _col1, _col4, _col5, _col8, _col9 + outputColumnNames: _col0, _col1, _col5, _col6, _col10, _col11 Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE Select Operator - expressions: _col0 (type: string), _col1 (type: string), _col4 (type: string), _col5 (type: string), _col8 (type: string), _col9 (type: string) + expressions: _col0 (type: string), _col1 (type: string), _col5 (type: string), _col6 (type: string), _col10 (type: string), _col11 (type: string) outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5 Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE File Output Operator diff --git a/ql/src/test/results/clientpositive/join_star.q.out b/ql/src/test/results/clientpositive/join_star.q.out index 7f1fcb2..94c3a8c 100644 --- a/ql/src/test/results/clientpositive/join_star.q.out +++ b/ql/src/test/results/clientpositive/join_star.q.out @@ -176,10 +176,10 @@ STAGE PLANS: keys: 0 d1 (type: int) 1 f1 (type: int) - outputColumnNames: _col0, _col1, _col7 + outputColumnNames: _col0, _col1, _col8 Statistics: Num rows: 4 Data size: 53 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: _col0 (type: int), _col1 (type: int), _col7 (type: int) + expressions: _col0 (type: int), _col1 (type: int), _col8 (type: int) outputColumnNames: _col0, _col1, _col2 Statistics: Num rows: 4 Data size: 53 Basic stats: COMPLETE Column stats: NONE File Output Operator @@ -261,7 +261,7 @@ STAGE PLANS: Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE HashTable Sink Operator condition expressions: - 0 {_col0} {_col1} {_col7} + 0 {_col0} {_col1} {_col8} 1 {f4} keys: 0 _col3 (type: int) @@ -285,21 +285,21 @@ STAGE PLANS: keys: 0 d1 (type: int) 1 f1 (type: int) - outputColumnNames: _col0, _col1, _col3, _col7 + outputColumnNames: _col0, _col1, _col3, _col8 Statistics: Num rows: 2 Data size: 35 Basic stats: COMPLETE Column stats: NONE Map Join Operator condition map: Inner Join 0 to 1 condition expressions: - 0 {_col0} {_col1} {_col7} + 0 {_col0} {_col1} {_col8} 1 {f4} keys: 0 _col3 (type: int) 1 f3 (type: int) - outputColumnNames: _col0, _col1, _col7, _col11 + outputColumnNames: _col0, _col1, _col8, _col13 Statistics: Num rows: 2 Data size: 38 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: _col0 (type: int), _col1 (type: int), _col7 (type: int), _col11 (type: int) + expressions: _col0 (type: int), _col1 (type: int), _col8 (type: int), _col13 (type: int) outputColumnNames: _col0, _col1, _col2, _col3 Statistics: Num rows: 2 Data size: 38 Basic stats: COMPLETE Column stats: NONE File Output Operator @@ -383,10 +383,10 @@ STAGE PLANS: Statistics: Num rows: 1 Data size: 8 Basic stats: COMPLETE Column stats: NONE HashTable Sink Operator condition expressions: - 0 {_col0} {_col1} {_col7} + 0 {_col0} {_col1} {_col8} 1 {f4} keys: - 0 _col7 (type: int) + 0 _col8 (type: int) 1 f3 (type: int) Stage: Stage-4 @@ -407,21 +407,21 @@ STAGE PLANS: keys: 0 d1 (type: int) 1 f1 (type: int) - outputColumnNames: _col0, _col1, _col7 + outputColumnNames: _col0, _col1, _col8 Statistics: Num rows: 4 Data size: 53 Basic stats: COMPLETE Column stats: NONE Map Join Operator condition map: Inner Join 0 to 1 condition expressions: - 0 {_col0} {_col1} {_col7} + 0 {_col0} {_col1} {_col8} 1 {f4} keys: - 0 _col7 (type: int) + 0 _col8 (type: int) 1 f3 (type: int) - outputColumnNames: _col0, _col1, _col7, _col11 + outputColumnNames: _col0, _col1, _col8, _col13 Statistics: Num rows: 4 Data size: 58 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: _col0 (type: int), _col1 (type: int), _col7 (type: int), _col11 (type: int) + expressions: _col0 (type: int), _col1 (type: int), _col8 (type: int), _col13 (type: int) outputColumnNames: _col0, _col1, _col2, _col3 Statistics: Num rows: 4 Data size: 58 Basic stats: COMPLETE Column stats: NONE File Output Operator @@ -499,10 +499,10 @@ STAGE PLANS: Statistics: Num rows: 2 Data size: 16 Basic stats: COMPLETE Column stats: NONE HashTable Sink Operator condition expressions: - 0 {_col0} {_col1} {_col7} + 0 {_col0} {_col1} {_col8} 1 {f4} keys: - 0 _col7 (type: int) + 0 _col8 (type: int) 1 f3 (type: int) Stage: Stage-4 @@ -520,21 +520,21 @@ STAGE PLANS: keys: 0 d1 (type: int) 1 f1 (type: int) - outputColumnNames: _col0, _col1, _col7 + outputColumnNames: _col0, _col1, _col8 Statistics: Num rows: 8 Data size: 107 Basic stats: COMPLETE Column stats: NONE Map Join Operator condition map: Left Outer Join0 to 1 condition expressions: - 0 {_col0} {_col1} {_col7} + 0 {_col0} {_col1} {_col8} 1 {f4} keys: - 0 _col7 (type: int) + 0 _col8 (type: int) 1 f3 (type: int) - outputColumnNames: _col0, _col1, _col7, _col11 + outputColumnNames: _col0, _col1, _col8, _col13 Statistics: Num rows: 8 Data size: 117 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: _col0 (type: int), _col1 (type: int), _col7 (type: int), _col11 (type: int) + expressions: _col0 (type: int), _col1 (type: int), _col8 (type: int), _col13 (type: int) outputColumnNames: _col0, _col1, _col2, _col3 Statistics: Num rows: 8 Data size: 117 Basic stats: COMPLETE Column stats: NONE File Output Operator @@ -643,10 +643,10 @@ STAGE PLANS: Statistics: Num rows: 2 Data size: 16 Basic stats: COMPLETE Column stats: NONE HashTable Sink Operator condition expressions: - 0 {_col0} {_col1} {_col3} {_col7} + 0 {_col0} {_col1} {_col3} {_col8} 1 {f4} keys: - 0 _col7 (type: int) + 0 _col8 (type: int) 1 f3 (type: int) dim3 TableScan @@ -654,7 +654,7 @@ STAGE PLANS: Statistics: Num rows: 2 Data size: 16 Basic stats: COMPLETE Column stats: NONE HashTable Sink Operator condition expressions: - 0 {_col0} {_col1} {_col7} {_col11} + 0 {_col0} {_col1} {_col8} {_col13} 1 {f6} keys: 0 _col3 (type: int) @@ -665,11 +665,11 @@ STAGE PLANS: Statistics: Num rows: 2 Data size: 16 Basic stats: COMPLETE Column stats: NONE HashTable Sink Operator condition expressions: - 0 {_col0} {_col1} {_col7} {_col11} {_col15} + 0 {_col0} {_col1} {_col8} {_col13} {_col18} 1 {f8} 2 {f12} keys: - 0 _col15 (type: int) + 0 _col18 (type: int) 1 f7 (type: int) 2 f11 (type: int) dim5 @@ -678,10 +678,10 @@ STAGE PLANS: Statistics: Num rows: 2 Data size: 16 Basic stats: COMPLETE Column stats: NONE HashTable Sink Operator condition expressions: - 0 {_col0} {_col1} {_col7} {_col11} {_col15} {_col19} {_col23} + 0 {_col0} {_col1} {_col8} {_col13} {_col18} {_col23} {_col28} 1 {f10} keys: - 0 _col19 (type: int) + 0 _col23 (type: int) 1 f9 (type: int) dim6 TableScan @@ -689,11 +689,11 @@ STAGE PLANS: Statistics: Num rows: 2 Data size: 16 Basic stats: COMPLETE Column stats: NONE HashTable Sink Operator condition expressions: - 0 {_col0} {_col1} {_col7} {_col11} {_col15} + 0 {_col0} {_col1} {_col8} {_col13} {_col18} 1 {f8} 2 {f12} keys: - 0 _col15 (type: int) + 0 _col18 (type: int) 1 f7 (type: int) 2 f11 (type: int) dim7 @@ -702,10 +702,10 @@ STAGE PLANS: Statistics: Num rows: 2 Data size: 16 Basic stats: COMPLETE Column stats: NONE HashTable Sink Operator condition expressions: - 0 {_col0} {_col1} {_col7} {_col11} {_col15} {_col19} {_col23} {_col27} + 0 {_col0} {_col1} {_col8} {_col13} {_col18} {_col23} {_col28} {_col33} 1 {f14} keys: - 0 _col23 (type: int) + 0 _col28 (type: int) 1 f13 (type: int) Stage: Stage-12 @@ -723,68 +723,68 @@ STAGE PLANS: keys: 0 d1 (type: int) 1 f1 (type: int) - outputColumnNames: _col0, _col1, _col3, _col7 + outputColumnNames: _col0, _col1, _col3, _col8 Statistics: Num rows: 6 Data size: 107 Basic stats: COMPLETE Column stats: NONE Map Join Operator condition map: Left Outer Join0 to 1 condition expressions: - 0 {_col0} {_col1} {_col3} {_col7} + 0 {_col0} {_col1} {_col3} {_col8} 1 {f4} keys: - 0 _col7 (type: int) + 0 _col8 (type: int) 1 f3 (type: int) - outputColumnNames: _col0, _col1, _col3, _col7, _col11 + outputColumnNames: _col0, _col1, _col3, _col8, _col13 Statistics: Num rows: 6 Data size: 117 Basic stats: COMPLETE Column stats: NONE Map Join Operator condition map: Left Outer Join0 to 1 condition expressions: - 0 {_col0} {_col1} {_col7} {_col11} + 0 {_col0} {_col1} {_col8} {_col13} 1 {f6} keys: 0 _col3 (type: int) 1 f5 (type: int) - outputColumnNames: _col0, _col1, _col7, _col11, _col15 + outputColumnNames: _col0, _col1, _col8, _col13, _col18 Statistics: Num rows: 6 Data size: 128 Basic stats: COMPLETE Column stats: NONE Map Join Operator condition map: Left Outer Join0 to 1 Left Outer Join0 to 2 condition expressions: - 0 {_col0} {_col1} {_col7} {_col11} {_col15} + 0 {_col0} {_col1} {_col8} {_col13} {_col18} 1 {f8} 2 {f12} keys: - 0 _col15 (type: int) + 0 _col18 (type: int) 1 f7 (type: int) 2 f11 (type: int) - outputColumnNames: _col0, _col1, _col7, _col11, _col15, _col19, _col23 + outputColumnNames: _col0, _col1, _col8, _col13, _col18, _col23, _col28 Statistics: Num rows: 13 Data size: 281 Basic stats: COMPLETE Column stats: NONE Map Join Operator condition map: Left Outer Join0 to 1 condition expressions: - 0 {_col0} {_col1} {_col7} {_col11} {_col15} {_col19} {_col23} + 0 {_col0} {_col1} {_col8} {_col13} {_col18} {_col23} {_col28} 1 {f10} keys: - 0 _col19 (type: int) + 0 _col23 (type: int) 1 f9 (type: int) - outputColumnNames: _col0, _col1, _col7, _col11, _col15, _col19, _col23, _col27 + outputColumnNames: _col0, _col1, _col8, _col13, _col18, _col23, _col28, _col33 Statistics: Num rows: 14 Data size: 309 Basic stats: COMPLETE Column stats: NONE Map Join Operator condition map: Left Outer Join0 to 1 condition expressions: - 0 {_col0} {_col1} {_col7} {_col11} {_col15} {_col19} {_col23} {_col27} + 0 {_col0} {_col1} {_col8} {_col13} {_col18} {_col23} {_col28} {_col33} 1 {f14} keys: - 0 _col23 (type: int) + 0 _col28 (type: int) 1 f13 (type: int) - outputColumnNames: _col0, _col1, _col7, _col11, _col15, _col19, _col23, _col27, _col31 + outputColumnNames: _col0, _col1, _col8, _col13, _col18, _col23, _col28, _col33, _col38 Statistics: Num rows: 15 Data size: 339 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: _col0 (type: int), _col1 (type: int), _col7 (type: int), _col11 (type: int), _col15 (type: int), _col19 (type: int), _col27 (type: int), _col23 (type: int), _col31 (type: int) + expressions: _col0 (type: int), _col1 (type: int), _col8 (type: int), _col13 (type: int), _col18 (type: int), _col23 (type: int), _col33 (type: int), _col28 (type: int), _col38 (type: int) outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8 Statistics: Num rows: 15 Data size: 339 Basic stats: COMPLETE Column stats: NONE File Output Operator diff --git a/ql/src/test/results/clientpositive/join_thrift.q.out b/ql/src/test/results/clientpositive/join_thrift.q.out index 67a6966..bbedfa9 100644 --- a/ql/src/test/results/clientpositive/join_thrift.q.out +++ b/ql/src/test/results/clientpositive/join_thrift.q.out @@ -60,10 +60,10 @@ STAGE PLANS: condition expressions: 0 {KEY.reducesinkkey0} 1 {VALUE._col3} - outputColumnNames: _col0, _col12 + outputColumnNames: _col0, _col13 Statistics: Num rows: 221 Data size: 885 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: _col0 (type: int), _col12 (type: array>) + expressions: _col0 (type: int), _col13 (type: array>) outputColumnNames: _col0, _col1 Statistics: Num rows: 221 Data size: 885 Basic stats: COMPLETE Column stats: NONE File Output Operator diff --git a/ql/src/test/results/clientpositive/join_vc.q.out b/ql/src/test/results/clientpositive/join_vc.q.out index 1a47f66..b716880 100644 --- a/ql/src/test/results/clientpositive/join_vc.q.out +++ b/ql/src/test/results/clientpositive/join_vc.q.out @@ -46,7 +46,7 @@ STAGE PLANS: condition expressions: 0 1 {VALUE._col0} - outputColumnNames: _col5 + outputColumnNames: _col6 Statistics: Num rows: 31 Data size: 3196 Basic stats: COMPLETE Column stats: NONE File Output Operator compressed: false @@ -72,9 +72,9 @@ STAGE PLANS: value expressions: key (type: string), BLOCK__OFFSET__INSIDE__FILE (type: bigint) TableScan Reduce Output Operator - key expressions: _col5 (type: string) + key expressions: _col6 (type: string) sort order: + - Map-reduce partition columns: _col5 (type: string) + Map-reduce partition columns: _col6 (type: string) Statistics: Num rows: 31 Data size: 3196 Basic stats: COMPLETE Column stats: NONE Reduce Operator Tree: Join Operator @@ -83,10 +83,10 @@ STAGE PLANS: condition expressions: 0 1 {VALUE._col0} {KEY.reducesinkkey0} {VALUE._col1} - outputColumnNames: _col8, _col9, _col10 + outputColumnNames: _col10, _col11, _col12 Statistics: Num rows: 34 Data size: 3515 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: _col10 (type: bigint), _col8 (type: string), _col9 (type: string) + expressions: _col12 (type: bigint), _col10 (type: string), _col11 (type: string) outputColumnNames: _col0, _col1, _col2 Statistics: Num rows: 34 Data size: 3515 Basic stats: COMPLETE Column stats: NONE File Output Operator diff --git a/ql/src/test/results/clientpositive/join_view.q.out b/ql/src/test/results/clientpositive/join_view.q.out index 12256b1..2f4b8c4 100644 --- a/ql/src/test/results/clientpositive/join_view.q.out +++ b/ql/src/test/results/clientpositive/join_view.q.out @@ -53,10 +53,10 @@ STAGE PLANS: condition expressions: 0 {VALUE._col1} 1 {VALUE._col0} - outputColumnNames: _col1, _col5 + outputColumnNames: _col1, _col6 Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE Select Operator - expressions: _col1 (type: string), _col5 (type: int), _col7 (type: string) + expressions: _col1 (type: string), _col6 (type: int), _col8 (type: string) outputColumnNames: _col0, _col1, _col2 Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE File Output Operator diff --git a/ql/src/test/results/clientpositive/lateral_view.q.out b/ql/src/test/results/clientpositive/lateral_view.q.out index 6c9d578..9ccf8e2 100644 --- a/ql/src/test/results/clientpositive/lateral_view.q.out +++ b/ql/src/test/results/clientpositive/lateral_view.q.out @@ -38,10 +38,10 @@ STAGE PLANS: outputColumnNames: key, value Statistics: Num rows: 29 Data size: 5812 Basic stats: COMPLETE Column stats: NONE Lateral View Join Operator - outputColumnNames: _col0, _col1, _col4 + outputColumnNames: _col0, _col1, _col5 Statistics: Num rows: 58 Data size: 11624 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: _col0 (type: string), _col1 (type: string), _col4 (type: int) + expressions: _col0 (type: string), _col1 (type: string), _col5 (type: int) outputColumnNames: _col0, _col1, _col2 Statistics: Num rows: 58 Data size: 11624 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator @@ -57,10 +57,10 @@ STAGE PLANS: Statistics: Num rows: 29 Data size: 5812 Basic stats: COMPLETE Column stats: NONE function name: explode Lateral View Join Operator - outputColumnNames: _col0, _col1, _col4 + outputColumnNames: _col0, _col1, _col5 Statistics: Num rows: 58 Data size: 11624 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: _col0 (type: string), _col1 (type: string), _col4 (type: int) + expressions: _col0 (type: string), _col1 (type: string), _col5 (type: int) outputColumnNames: _col0, _col1, _col2 Statistics: Num rows: 58 Data size: 11624 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator @@ -134,10 +134,10 @@ STAGE PLANS: Select Operator Statistics: Num rows: 0 Data size: 5812 Basic stats: PARTIAL Column stats: COMPLETE Lateral View Join Operator - outputColumnNames: _col4 + outputColumnNames: _col5 Statistics: Num rows: 0 Data size: 11624 Basic stats: PARTIAL Column stats: COMPLETE Select Operator - expressions: _col4 (type: int) + expressions: _col5 (type: int) outputColumnNames: _col0 Statistics: Num rows: 0 Data size: 11624 Basic stats: PARTIAL Column stats: COMPLETE Limit @@ -158,10 +158,10 @@ STAGE PLANS: Statistics: Num rows: 0 Data size: 5812 Basic stats: PARTIAL Column stats: COMPLETE function name: explode Lateral View Join Operator - outputColumnNames: _col4 + outputColumnNames: _col5 Statistics: Num rows: 0 Data size: 11624 Basic stats: PARTIAL Column stats: COMPLETE Select Operator - expressions: _col4 (type: int) + expressions: _col5 (type: int) outputColumnNames: _col0 Statistics: Num rows: 0 Data size: 11624 Basic stats: PARTIAL Column stats: COMPLETE Limit @@ -201,19 +201,19 @@ STAGE PLANS: Select Operator Statistics: Num rows: 0 Data size: 5812 Basic stats: PARTIAL Column stats: COMPLETE Lateral View Join Operator - outputColumnNames: _col4 + outputColumnNames: _col5 Statistics: Num rows: 0 Data size: 11624 Basic stats: PARTIAL Column stats: COMPLETE Lateral View Forward Statistics: Num rows: 0 Data size: 11624 Basic stats: PARTIAL Column stats: COMPLETE Select Operator - expressions: _col4 (type: int) - outputColumnNames: _col4 + expressions: _col5 (type: int) + outputColumnNames: _col5 Statistics: Num rows: 0 Data size: 11624 Basic stats: PARTIAL Column stats: COMPLETE Lateral View Join Operator - outputColumnNames: _col4, _col5 + outputColumnNames: _col5, _col6 Statistics: Num rows: 0 Data size: 23248 Basic stats: PARTIAL Column stats: COMPLETE Select Operator - expressions: _col4 (type: int), _col5 (type: string) + expressions: _col5 (type: int), _col6 (type: string) outputColumnNames: _col0, _col1 Statistics: Num rows: 0 Data size: 23248 Basic stats: PARTIAL Column stats: COMPLETE Limit @@ -234,10 +234,10 @@ STAGE PLANS: Statistics: Num rows: 0 Data size: 11624 Basic stats: PARTIAL Column stats: COMPLETE function name: explode Lateral View Join Operator - outputColumnNames: _col4, _col5 + outputColumnNames: _col5, _col6 Statistics: Num rows: 0 Data size: 23248 Basic stats: PARTIAL Column stats: COMPLETE Select Operator - expressions: _col4 (type: int), _col5 (type: string) + expressions: _col5 (type: int), _col6 (type: string) outputColumnNames: _col0, _col1 Statistics: Num rows: 0 Data size: 23248 Basic stats: PARTIAL Column stats: COMPLETE Limit @@ -258,19 +258,19 @@ STAGE PLANS: Statistics: Num rows: 0 Data size: 5812 Basic stats: PARTIAL Column stats: COMPLETE function name: explode Lateral View Join Operator - outputColumnNames: _col4 + outputColumnNames: _col5 Statistics: Num rows: 0 Data size: 11624 Basic stats: PARTIAL Column stats: COMPLETE Lateral View Forward Statistics: Num rows: 0 Data size: 11624 Basic stats: PARTIAL Column stats: COMPLETE Select Operator - expressions: _col4 (type: int) - outputColumnNames: _col4 + expressions: _col5 (type: int) + outputColumnNames: _col5 Statistics: Num rows: 0 Data size: 11624 Basic stats: PARTIAL Column stats: COMPLETE Lateral View Join Operator - outputColumnNames: _col4, _col5 + outputColumnNames: _col5, _col6 Statistics: Num rows: 0 Data size: 23248 Basic stats: PARTIAL Column stats: COMPLETE Select Operator - expressions: _col4 (type: int), _col5 (type: string) + expressions: _col5 (type: int), _col6 (type: string) outputColumnNames: _col0, _col1 Statistics: Num rows: 0 Data size: 23248 Basic stats: PARTIAL Column stats: COMPLETE Limit @@ -291,10 +291,10 @@ STAGE PLANS: Statistics: Num rows: 0 Data size: 11624 Basic stats: PARTIAL Column stats: COMPLETE function name: explode Lateral View Join Operator - outputColumnNames: _col4, _col5 + outputColumnNames: _col5, _col6 Statistics: Num rows: 0 Data size: 23248 Basic stats: PARTIAL Column stats: COMPLETE Select Operator - expressions: _col4 (type: int), _col5 (type: string) + expressions: _col5 (type: int), _col6 (type: string) outputColumnNames: _col0, _col1 Statistics: Num rows: 0 Data size: 23248 Basic stats: PARTIAL Column stats: COMPLETE Limit @@ -334,17 +334,17 @@ STAGE PLANS: Select Operator Statistics: Num rows: 0 Data size: 5812 Basic stats: PARTIAL Column stats: COMPLETE Lateral View Join Operator - outputColumnNames: _col4 + outputColumnNames: _col5 Statistics: Num rows: 0 Data size: 11624 Basic stats: PARTIAL Column stats: COMPLETE Lateral View Forward Statistics: Num rows: 0 Data size: 11624 Basic stats: PARTIAL Column stats: COMPLETE Select Operator Statistics: Num rows: 0 Data size: 11624 Basic stats: PARTIAL Column stats: COMPLETE Lateral View Join Operator - outputColumnNames: _col5 + outputColumnNames: _col6 Statistics: Num rows: 0 Data size: 23248 Basic stats: PARTIAL Column stats: COMPLETE Select Operator - expressions: _col5 (type: int) + expressions: _col6 (type: int) outputColumnNames: _col0 Statistics: Num rows: 0 Data size: 23248 Basic stats: PARTIAL Column stats: COMPLETE Limit @@ -358,17 +358,17 @@ STAGE PLANS: output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe Select Operator - expressions: _col4 (type: array) + expressions: _col5 (type: array) outputColumnNames: _col0 Statistics: Num rows: 0 Data size: 11624 Basic stats: PARTIAL Column stats: COMPLETE UDTF Operator Statistics: Num rows: 0 Data size: 11624 Basic stats: PARTIAL Column stats: COMPLETE function name: explode Lateral View Join Operator - outputColumnNames: _col5 + outputColumnNames: _col6 Statistics: Num rows: 0 Data size: 23248 Basic stats: PARTIAL Column stats: COMPLETE Select Operator - expressions: _col5 (type: int) + expressions: _col6 (type: int) outputColumnNames: _col0 Statistics: Num rows: 0 Data size: 23248 Basic stats: PARTIAL Column stats: COMPLETE Limit @@ -389,17 +389,17 @@ STAGE PLANS: Statistics: Num rows: 0 Data size: 5812 Basic stats: PARTIAL Column stats: COMPLETE function name: explode Lateral View Join Operator - outputColumnNames: _col4 + outputColumnNames: _col5 Statistics: Num rows: 0 Data size: 11624 Basic stats: PARTIAL Column stats: COMPLETE Lateral View Forward Statistics: Num rows: 0 Data size: 11624 Basic stats: PARTIAL Column stats: COMPLETE Select Operator Statistics: Num rows: 0 Data size: 11624 Basic stats: PARTIAL Column stats: COMPLETE Lateral View Join Operator - outputColumnNames: _col5 + outputColumnNames: _col6 Statistics: Num rows: 0 Data size: 23248 Basic stats: PARTIAL Column stats: COMPLETE Select Operator - expressions: _col5 (type: int) + expressions: _col6 (type: int) outputColumnNames: _col0 Statistics: Num rows: 0 Data size: 23248 Basic stats: PARTIAL Column stats: COMPLETE Limit @@ -413,17 +413,17 @@ STAGE PLANS: output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe Select Operator - expressions: _col4 (type: array) + expressions: _col5 (type: array) outputColumnNames: _col0 Statistics: Num rows: 0 Data size: 11624 Basic stats: PARTIAL Column stats: COMPLETE UDTF Operator Statistics: Num rows: 0 Data size: 11624 Basic stats: PARTIAL Column stats: COMPLETE function name: explode Lateral View Join Operator - outputColumnNames: _col5 + outputColumnNames: _col6 Statistics: Num rows: 0 Data size: 23248 Basic stats: PARTIAL Column stats: COMPLETE Select Operator - expressions: _col5 (type: int) + expressions: _col6 (type: int) outputColumnNames: _col0 Statistics: Num rows: 0 Data size: 23248 Basic stats: PARTIAL Column stats: COMPLETE Limit @@ -521,10 +521,10 @@ STAGE PLANS: Select Operator Statistics: Num rows: 500 Data size: 0 Basic stats: PARTIAL Column stats: COMPLETE Lateral View Join Operator - outputColumnNames: _col3 + outputColumnNames: _col4 Statistics: Num rows: 1000 Data size: 28000 Basic stats: COMPLETE Column stats: COMPLETE Select Operator - expressions: _col3 (type: int) + expressions: _col4 (type: int) outputColumnNames: _col0 Statistics: Num rows: 1000 Data size: 0 Basic stats: PARTIAL Column stats: COMPLETE Limit @@ -545,10 +545,10 @@ STAGE PLANS: Statistics: Num rows: 500 Data size: 28000 Basic stats: COMPLETE Column stats: COMPLETE function name: explode Lateral View Join Operator - outputColumnNames: _col3 + outputColumnNames: _col4 Statistics: Num rows: 1000 Data size: 28000 Basic stats: COMPLETE Column stats: COMPLETE Select Operator - expressions: _col3 (type: int) + expressions: _col4 (type: int) outputColumnNames: _col0 Statistics: Num rows: 1000 Data size: 0 Basic stats: PARTIAL Column stats: COMPLETE Limit diff --git a/ql/src/test/results/clientpositive/lateral_view_cp.q.out b/ql/src/test/results/clientpositive/lateral_view_cp.q.out index 22032fd..e3574c7 100644 --- a/ql/src/test/results/clientpositive/lateral_view_cp.q.out +++ b/ql/src/test/results/clientpositive/lateral_view_cp.q.out @@ -67,10 +67,10 @@ STAGE PLANS: condition expressions: 0 1 {VALUE._col0} - outputColumnNames: _col5 + outputColumnNames: _col6 Statistics: Num rows: 275 Data size: 2921 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: _col5 (type: array) + expressions: _col6 (type: array) outputColumnNames: _col1 Statistics: Num rows: 275 Data size: 2921 Basic stats: COMPLETE Column stats: NONE Lateral View Forward diff --git a/ql/src/test/results/clientpositive/lateral_view_noalias.q.out b/ql/src/test/results/clientpositive/lateral_view_noalias.q.out index d51b2de..8141d5c 100644 --- a/ql/src/test/results/clientpositive/lateral_view_noalias.q.out +++ b/ql/src/test/results/clientpositive/lateral_view_noalias.q.out @@ -20,10 +20,10 @@ STAGE PLANS: Select Operator Statistics: Num rows: 0 Data size: 5812 Basic stats: PARTIAL Column stats: COMPLETE Lateral View Join Operator - outputColumnNames: _col4, _col5 + outputColumnNames: _col5, _col6 Statistics: Num rows: 0 Data size: 11624 Basic stats: PARTIAL Column stats: COMPLETE Select Operator - expressions: _col4 (type: string), _col5 (type: int) + expressions: _col5 (type: string), _col6 (type: int) outputColumnNames: _col0, _col1 Statistics: Num rows: 0 Data size: 11624 Basic stats: PARTIAL Column stats: COMPLETE Limit @@ -44,10 +44,10 @@ STAGE PLANS: Statistics: Num rows: 0 Data size: 5812 Basic stats: PARTIAL Column stats: COMPLETE function name: explode Lateral View Join Operator - outputColumnNames: _col4, _col5 + outputColumnNames: _col5, _col6 Statistics: Num rows: 0 Data size: 11624 Basic stats: PARTIAL Column stats: COMPLETE Select Operator - expressions: _col4 (type: string), _col5 (type: int) + expressions: _col5 (type: string), _col6 (type: int) outputColumnNames: _col0, _col1 Statistics: Num rows: 0 Data size: 11624 Basic stats: PARTIAL Column stats: COMPLETE Limit @@ -157,10 +157,10 @@ STAGE PLANS: Select Operator Statistics: Num rows: 0 Data size: 5812 Basic stats: PARTIAL Column stats: COMPLETE Lateral View Join Operator - outputColumnNames: _col4, _col5 + outputColumnNames: _col5, _col6 Statistics: Num rows: 0 Data size: 11624 Basic stats: PARTIAL Column stats: COMPLETE Select Operator - expressions: _col4 (type: string), _col5 (type: int) + expressions: _col5 (type: string), _col6 (type: int) outputColumnNames: _col0, _col1 Statistics: Num rows: 0 Data size: 11624 Basic stats: PARTIAL Column stats: COMPLETE Limit @@ -178,10 +178,10 @@ STAGE PLANS: Statistics: Num rows: 0 Data size: 5812 Basic stats: PARTIAL Column stats: COMPLETE function name: explode Lateral View Join Operator - outputColumnNames: _col4, _col5 + outputColumnNames: _col5, _col6 Statistics: Num rows: 0 Data size: 11624 Basic stats: PARTIAL Column stats: COMPLETE Select Operator - expressions: _col4 (type: string), _col5 (type: int) + expressions: _col5 (type: string), _col6 (type: int) outputColumnNames: _col0, _col1 Statistics: Num rows: 0 Data size: 11624 Basic stats: PARTIAL Column stats: COMPLETE Limit @@ -258,10 +258,10 @@ STAGE PLANS: Select Operator Statistics: Num rows: 0 Data size: 5812 Basic stats: PARTIAL Column stats: COMPLETE Lateral View Join Operator - outputColumnNames: _col4, _col5 + outputColumnNames: _col5, _col6 Statistics: Num rows: 0 Data size: 11624 Basic stats: PARTIAL Column stats: COMPLETE Select Operator - expressions: _col4 (type: string), _col5 (type: int) + expressions: _col5 (type: string), _col6 (type: int) outputColumnNames: _col0, _col1 Statistics: Num rows: 0 Data size: 11624 Basic stats: PARTIAL Column stats: COMPLETE Limit @@ -279,10 +279,10 @@ STAGE PLANS: Statistics: Num rows: 0 Data size: 5812 Basic stats: PARTIAL Column stats: COMPLETE function name: explode Lateral View Join Operator - outputColumnNames: _col4, _col5 + outputColumnNames: _col5, _col6 Statistics: Num rows: 0 Data size: 11624 Basic stats: PARTIAL Column stats: COMPLETE Select Operator - expressions: _col4 (type: string), _col5 (type: int) + expressions: _col5 (type: string), _col6 (type: int) outputColumnNames: _col0, _col1 Statistics: Num rows: 0 Data size: 11624 Basic stats: PARTIAL Column stats: COMPLETE Limit diff --git a/ql/src/test/results/clientpositive/lateral_view_outer.q.out b/ql/src/test/results/clientpositive/lateral_view_outer.q.out index b36c115..e39b0f8 100644 --- a/ql/src/test/results/clientpositive/lateral_view_outer.q.out +++ b/ql/src/test/results/clientpositive/lateral_view_outer.q.out @@ -24,10 +24,10 @@ STAGE PLANS: outputColumnNames: key, value Statistics: Num rows: 29 Data size: 5812 Basic stats: COMPLETE Column stats: NONE Lateral View Join Operator - outputColumnNames: _col0, _col1, _col4 + outputColumnNames: _col0, _col1, _col5 Statistics: Num rows: 58 Data size: 11624 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: _col0 (type: string), _col1 (type: string), _col4 (type: string) + expressions: _col0 (type: string), _col1 (type: string), _col5 (type: string) outputColumnNames: _col0, _col1, _col2 Statistics: Num rows: 58 Data size: 11624 Basic stats: COMPLETE Column stats: NONE Limit @@ -49,10 +49,10 @@ STAGE PLANS: function name: explode outer lateral view: true Lateral View Join Operator - outputColumnNames: _col0, _col1, _col4 + outputColumnNames: _col0, _col1, _col5 Statistics: Num rows: 58 Data size: 11624 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: _col0 (type: string), _col1 (type: string), _col4 (type: string) + expressions: _col0 (type: string), _col1 (type: string), _col5 (type: string) outputColumnNames: _col0, _col1, _col2 Statistics: Num rows: 58 Data size: 11624 Basic stats: COMPLETE Column stats: NONE Limit @@ -116,10 +116,10 @@ STAGE PLANS: outputColumnNames: key, value Statistics: Num rows: 29 Data size: 5812 Basic stats: COMPLETE Column stats: NONE Lateral View Join Operator - outputColumnNames: _col0, _col1, _col4 + outputColumnNames: _col0, _col1, _col5 Statistics: Num rows: 58 Data size: 11624 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: _col0 (type: string), _col1 (type: string), _col4 (type: int) + expressions: _col0 (type: string), _col1 (type: string), _col5 (type: int) outputColumnNames: _col0, _col1, _col2 Statistics: Num rows: 58 Data size: 11624 Basic stats: COMPLETE Column stats: NONE Limit @@ -141,10 +141,10 @@ STAGE PLANS: function name: explode outer lateral view: true Lateral View Join Operator - outputColumnNames: _col0, _col1, _col4 + outputColumnNames: _col0, _col1, _col5 Statistics: Num rows: 58 Data size: 11624 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: _col0 (type: string), _col1 (type: string), _col4 (type: int) + expressions: _col0 (type: string), _col1 (type: string), _col5 (type: int) outputColumnNames: _col0, _col1, _col2 Statistics: Num rows: 58 Data size: 11624 Basic stats: COMPLETE Column stats: NONE Limit @@ -213,10 +213,10 @@ STAGE PLANS: outputColumnNames: key, value Statistics: Num rows: 500 Data size: 5610 Basic stats: COMPLETE Column stats: NONE Lateral View Join Operator - outputColumnNames: _col0, _col1, _col4 + outputColumnNames: _col0, _col1, _col5 Statistics: Num rows: 1000 Data size: 11220 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: _col0 (type: string), _col1 (type: array), _col4 (type: string) + expressions: _col0 (type: string), _col1 (type: array), _col5 (type: string) outputColumnNames: _col0, _col1, _col2 Statistics: Num rows: 1000 Data size: 11220 Basic stats: COMPLETE Column stats: NONE Limit @@ -238,10 +238,10 @@ STAGE PLANS: function name: explode outer lateral view: true Lateral View Join Operator - outputColumnNames: _col0, _col1, _col4 + outputColumnNames: _col0, _col1, _col5 Statistics: Num rows: 1000 Data size: 11220 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: _col0 (type: string), _col1 (type: array), _col4 (type: string) + expressions: _col0 (type: string), _col1 (type: array), _col5 (type: string) outputColumnNames: _col0, _col1, _col2 Statistics: Num rows: 1000 Data size: 11220 Basic stats: COMPLETE Column stats: NONE Limit diff --git a/ql/src/test/results/clientpositive/lateral_view_ppd.q.out b/ql/src/test/results/clientpositive/lateral_view_ppd.q.out index 1aaf70a..d306c1a 100644 --- a/ql/src/test/results/clientpositive/lateral_view_ppd.q.out +++ b/ql/src/test/results/clientpositive/lateral_view_ppd.q.out @@ -23,10 +23,10 @@ STAGE PLANS: outputColumnNames: value Statistics: Num rows: 28 Data size: 2855 Basic stats: COMPLETE Column stats: NONE Lateral View Join Operator - outputColumnNames: _col1, _col4 + outputColumnNames: _col1, _col5 Statistics: Num rows: 56 Data size: 5710 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: _col1 (type: string), _col4 (type: int) + expressions: _col1 (type: string), _col5 (type: int) outputColumnNames: _col0, _col1 Statistics: Num rows: 56 Data size: 5710 Basic stats: COMPLETE Column stats: NONE File Output Operator @@ -44,10 +44,10 @@ STAGE PLANS: Statistics: Num rows: 28 Data size: 2855 Basic stats: COMPLETE Column stats: NONE function name: explode Lateral View Join Operator - outputColumnNames: _col1, _col4 + outputColumnNames: _col1, _col5 Statistics: Num rows: 56 Data size: 5710 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: _col1 (type: string), _col4 (type: int) + expressions: _col1 (type: string), _col5 (type: int) outputColumnNames: _col0, _col1 Statistics: Num rows: 56 Data size: 5710 Basic stats: COMPLETE Column stats: NONE File Output Operator @@ -106,10 +106,10 @@ STAGE PLANS: outputColumnNames: value Statistics: Num rows: 28 Data size: 2855 Basic stats: COMPLETE Column stats: NONE Lateral View Join Operator - outputColumnNames: _col1, _col4 + outputColumnNames: _col1, _col5 Statistics: Num rows: 42 Data size: 4282 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: _col1 (type: string), _col4 (type: int) + expressions: _col1 (type: string), _col5 (type: int) outputColumnNames: _col0, _col1 Statistics: Num rows: 42 Data size: 4282 Basic stats: COMPLETE Column stats: NONE File Output Operator @@ -130,10 +130,10 @@ STAGE PLANS: predicate: (col = 1) (type: boolean) Statistics: Num rows: 14 Data size: 1427 Basic stats: COMPLETE Column stats: NONE Lateral View Join Operator - outputColumnNames: _col1, _col4 + outputColumnNames: _col1, _col5 Statistics: Num rows: 42 Data size: 4282 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: _col1 (type: string), _col4 (type: int) + expressions: _col1 (type: string), _col5 (type: int) outputColumnNames: _col0, _col1 Statistics: Num rows: 42 Data size: 4282 Basic stats: COMPLETE Column stats: NONE File Output Operator @@ -183,10 +183,10 @@ STAGE PLANS: outputColumnNames: value Statistics: Num rows: 58 Data size: 5812 Basic stats: COMPLETE Column stats: NONE Lateral View Join Operator - outputColumnNames: _col1, _col6 + outputColumnNames: _col1, _col7 Statistics: Num rows: 116 Data size: 11624 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: _col1 (type: string), _col6 (type: int) + expressions: _col1 (type: string), _col7 (type: int) outputColumnNames: _col0, _col1 Statistics: Num rows: 116 Data size: 11624 Basic stats: COMPLETE Column stats: NONE Limit @@ -207,10 +207,10 @@ STAGE PLANS: Statistics: Num rows: 58 Data size: 5812 Basic stats: COMPLETE Column stats: NONE function name: explode Lateral View Join Operator - outputColumnNames: _col1, _col6 + outputColumnNames: _col1, _col7 Statistics: Num rows: 116 Data size: 11624 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: _col1 (type: string), _col6 (type: int) + expressions: _col1 (type: string), _col7 (type: int) outputColumnNames: _col0, _col1 Statistics: Num rows: 116 Data size: 11624 Basic stats: COMPLETE Column stats: NONE Limit @@ -277,19 +277,19 @@ STAGE PLANS: outputColumnNames: value Statistics: Num rows: 28 Data size: 2855 Basic stats: COMPLETE Column stats: NONE Lateral View Join Operator - outputColumnNames: _col1, _col4 + outputColumnNames: _col1, _col5 Statistics: Num rows: 56 Data size: 5710 Basic stats: COMPLETE Column stats: NONE Lateral View Forward Statistics: Num rows: 56 Data size: 5710 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: _col1 (type: string), _col4 (type: int) - outputColumnNames: _col1, _col4 + expressions: _col1 (type: string), _col5 (type: int) + outputColumnNames: _col1, _col5 Statistics: Num rows: 56 Data size: 5710 Basic stats: COMPLETE Column stats: NONE Lateral View Join Operator - outputColumnNames: _col1, _col4, _col5 + outputColumnNames: _col1, _col5, _col6 Statistics: Num rows: 112 Data size: 11420 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: _col1 (type: string), _col4 (type: int) + expressions: _col1 (type: string), _col5 (type: int) outputColumnNames: _col0, _col1 Statistics: Num rows: 112 Data size: 11420 Basic stats: COMPLETE Column stats: NONE File Output Operator @@ -307,10 +307,10 @@ STAGE PLANS: Statistics: Num rows: 56 Data size: 5710 Basic stats: COMPLETE Column stats: NONE function name: explode Lateral View Join Operator - outputColumnNames: _col1, _col4, _col5 + outputColumnNames: _col1, _col5, _col6 Statistics: Num rows: 112 Data size: 11420 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: _col1 (type: string), _col4 (type: int) + expressions: _col1 (type: string), _col5 (type: int) outputColumnNames: _col0, _col1 Statistics: Num rows: 112 Data size: 11420 Basic stats: COMPLETE Column stats: NONE File Output Operator @@ -328,19 +328,19 @@ STAGE PLANS: Statistics: Num rows: 28 Data size: 2855 Basic stats: COMPLETE Column stats: NONE function name: explode Lateral View Join Operator - outputColumnNames: _col1, _col4 + outputColumnNames: _col1, _col5 Statistics: Num rows: 56 Data size: 5710 Basic stats: COMPLETE Column stats: NONE Lateral View Forward Statistics: Num rows: 56 Data size: 5710 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: _col1 (type: string), _col4 (type: int) - outputColumnNames: _col1, _col4 + expressions: _col1 (type: string), _col5 (type: int) + outputColumnNames: _col1, _col5 Statistics: Num rows: 56 Data size: 5710 Basic stats: COMPLETE Column stats: NONE Lateral View Join Operator - outputColumnNames: _col1, _col4, _col5 + outputColumnNames: _col1, _col5, _col6 Statistics: Num rows: 112 Data size: 11420 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: _col1 (type: string), _col4 (type: int) + expressions: _col1 (type: string), _col5 (type: int) outputColumnNames: _col0, _col1 Statistics: Num rows: 112 Data size: 11420 Basic stats: COMPLETE Column stats: NONE File Output Operator @@ -358,10 +358,10 @@ STAGE PLANS: Statistics: Num rows: 56 Data size: 5710 Basic stats: COMPLETE Column stats: NONE function name: explode Lateral View Join Operator - outputColumnNames: _col1, _col4, _col5 + outputColumnNames: _col1, _col5, _col6 Statistics: Num rows: 112 Data size: 11420 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: _col1 (type: string), _col4 (type: int) + expressions: _col1 (type: string), _col5 (type: int) outputColumnNames: _col0, _col1 Statistics: Num rows: 112 Data size: 11420 Basic stats: COMPLETE Column stats: NONE File Output Operator @@ -440,10 +440,10 @@ STAGE PLANS: outputColumnNames: value Statistics: Num rows: 28 Data size: 2855 Basic stats: COMPLETE Column stats: NONE Lateral View Join Operator - outputColumnNames: _col1, _col4 + outputColumnNames: _col1, _col5 Statistics: Num rows: 37 Data size: 3772 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: _col1 (type: string), _col4 (type: int) + expressions: _col1 (type: string), _col5 (type: int) outputColumnNames: _col0, _col1 Statistics: Num rows: 37 Data size: 3772 Basic stats: COMPLETE Column stats: NONE File Output Operator @@ -464,10 +464,10 @@ STAGE PLANS: predicate: (col > 1) (type: boolean) Statistics: Num rows: 9 Data size: 917 Basic stats: COMPLETE Column stats: NONE Lateral View Join Operator - outputColumnNames: _col1, _col4 + outputColumnNames: _col1, _col5 Statistics: Num rows: 37 Data size: 3772 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: _col1 (type: string), _col4 (type: int) + expressions: _col1 (type: string), _col5 (type: int) outputColumnNames: _col0, _col1 Statistics: Num rows: 37 Data size: 3772 Basic stats: COMPLETE Column stats: NONE File Output Operator diff --git a/ql/src/test/results/clientpositive/limit_pushdown_negative.q.out b/ql/src/test/results/clientpositive/limit_pushdown_negative.q.out index cc4211c..a848bb0 100644 --- a/ql/src/test/results/clientpositive/limit_pushdown_negative.q.out +++ b/ql/src/test/results/clientpositive/limit_pushdown_negative.q.out @@ -43,10 +43,10 @@ STAGE PLANS: condition expressions: 0 {KEY.reducesinkkey0} {VALUE._col0} 1 {KEY.reducesinkkey0} {VALUE._col0} - outputColumnNames: _col0, _col1, _col4, _col5 + outputColumnNames: _col0, _col1, _col5, _col6 Statistics: Num rows: 16 Data size: 3306 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: _col0 (type: string), _col1 (type: string), _col4 (type: string), _col5 (type: string) + expressions: _col0 (type: string), _col1 (type: string), _col5 (type: string), _col6 (type: string) outputColumnNames: _col0, _col1, _col2, _col3 Statistics: Num rows: 16 Data size: 3306 Basic stats: COMPLETE Column stats: NONE Limit diff --git a/ql/src/test/results/clientpositive/lineage1.q.out b/ql/src/test/results/clientpositive/lineage1.q.out index 6ad932b..9469736 100644 --- a/ql/src/test/results/clientpositive/lineage1.q.out +++ b/ql/src/test/results/clientpositive/lineage1.q.out @@ -72,10 +72,10 @@ STAGE PLANS: condition expressions: 0 {KEY.reducesinkkey0} 1 {VALUE._col0} - outputColumnNames: _col0, _col5 + outputColumnNames: _col0, _col6 Statistics: Num rows: 31 Data size: 6393 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: _col0 (type: string), _col5 (type: string) + expressions: _col0 (type: string), _col6 (type: string) outputColumnNames: _col0, _col1 Statistics: Num rows: 31 Data size: 6393 Basic stats: COMPLETE Column stats: NONE File Output Operator @@ -198,10 +198,10 @@ STAGE PLANS: condition expressions: 0 {KEY.reducesinkkey0} 1 {VALUE._col0} - outputColumnNames: _col0, _col5 + outputColumnNames: _col0, _col6 Statistics: Num rows: 31 Data size: 6393 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: _col0 (type: string), _col5 (type: string) + expressions: _col0 (type: string), _col6 (type: string) outputColumnNames: _col0, _col1 Statistics: Num rows: 31 Data size: 6393 Basic stats: COMPLETE Column stats: NONE File Output Operator diff --git a/ql/src/test/results/clientpositive/literal_decimal.q.out b/ql/src/test/results/clientpositive/literal_decimal.q.out index 2f2df6a..5d028b5 100644 --- a/ql/src/test/results/clientpositive/literal_decimal.q.out +++ b/ql/src/test/results/clientpositive/literal_decimal.q.out @@ -1,6 +1,6 @@ -PREHOOK: query: EXPLAIN SELECT -1BD, 0BD, 1BD, 3.14BD, -3.14BD, 99999999999999999BD, 99999999999999999.9999999999999BD, 1E-99BD, 1E99BD FROM src LIMIT 1 +PREHOOK: query: EXPLAIN SELECT -1BD, 0BD, 1BD, 3.14BD, -3.14BD, 99999999999999999BD, 99999999999999999.9999999999999BD, 1E99BD FROM src LIMIT 1 PREHOOK: type: QUERY -POSTHOOK: query: EXPLAIN SELECT -1BD, 0BD, 1BD, 3.14BD, -3.14BD, 99999999999999999BD, 99999999999999999.9999999999999BD, 1E-99BD, 1E99BD FROM src LIMIT 1 +POSTHOOK: query: EXPLAIN SELECT -1BD, 0BD, 1BD, 3.14BD, -3.14BD, 99999999999999999BD, 99999999999999999.9999999999999BD, 1E99BD FROM src LIMIT 1 POSTHOOK: type: QUERY STAGE DEPENDENCIES: Stage-0 is a root stage @@ -14,20 +14,20 @@ STAGE PLANS: alias: src Statistics: Num rows: 0 Data size: 5812 Basic stats: PARTIAL Column stats: COMPLETE Select Operator - expressions: (- 1) (type: decimal(1,0)), 0 (type: decimal(1,0)), 1 (type: decimal(1,0)), 3.14 (type: decimal(3,2)), (- 3.14) (type: decimal(3,2)), 99999999999999999 (type: decimal(17,0)), 99999999999999999.9999999999999 (type: decimal(30,13)), 1E-99 (type: decimal(1,0)), 1E99 (type: decimal(1,0)) - outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8 + expressions: (- 1) (type: decimal(1,0)), 0 (type: decimal(1,0)), 1 (type: decimal(1,0)), 3.14 (type: decimal(3,2)), (- 3.14) (type: decimal(3,2)), 99999999999999999 (type: decimal(17,0)), 99999999999999999.9999999999999 (type: decimal(30,13)), 1E99 (type: decimal(1,0)) + outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7 Statistics: Num rows: 0 Data size: 5812 Basic stats: PARTIAL Column stats: COMPLETE Limit Number of rows: 1 Statistics: Num rows: 0 Data size: 5812 Basic stats: PARTIAL Column stats: COMPLETE ListSink -PREHOOK: query: SELECT -1BD, 0BD, 1BD, 3.14BD, -3.14BD, 99999999999999999BD, 99999999999999999.9999999999999BD, 1E-99BD, 1E99BD FROM src LIMIT 1 +PREHOOK: query: SELECT -1BD, 0BD, 1BD, 3.14BD, -3.14BD, 99999999999999999BD, 99999999999999999.9999999999999BD, 1E99BD FROM src LIMIT 1 PREHOOK: type: QUERY PREHOOK: Input: default@src #### A masked pattern was here #### -POSTHOOK: query: SELECT -1BD, 0BD, 1BD, 3.14BD, -3.14BD, 99999999999999999BD, 99999999999999999.9999999999999BD, 1E-99BD, 1E99BD FROM src LIMIT 1 +POSTHOOK: query: SELECT -1BD, 0BD, 1BD, 3.14BD, -3.14BD, 99999999999999999BD, 99999999999999999.9999999999999BD, 1E99BD FROM src LIMIT 1 POSTHOOK: type: QUERY POSTHOOK: Input: default@src #### A masked pattern was here #### --1 0 1 3.14 -3.14 99999999999999999 99999999999999999.9999999999999 0 NULL +-1 0 1 3.14 -3.14 99999999999999999 99999999999999999.9999999999999 NULL diff --git a/ql/src/test/results/clientpositive/louter_join_ppr.q.out b/ql/src/test/results/clientpositive/louter_join_ppr.q.out index fde3f9d..c126200 100644 --- a/ql/src/test/results/clientpositive/louter_join_ppr.q.out +++ b/ql/src/test/results/clientpositive/louter_join_ppr.q.out @@ -291,14 +291,14 @@ STAGE PLANS: condition expressions: 0 {KEY.reducesinkkey0} {VALUE._col0} 1 {KEY.reducesinkkey0} {VALUE._col0} - outputColumnNames: _col0, _col1, _col4, _col5 + outputColumnNames: _col0, _col1, _col5, _col6 Statistics: Num rows: 6 Data size: 1322 Basic stats: COMPLETE Column stats: NONE Filter Operator isSamplingPred: false - predicate: ((_col4 > 15) and (_col4 < 25)) (type: boolean) + predicate: ((_col5 > 15) and (_col5 < 25)) (type: boolean) Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE Select Operator - expressions: _col0 (type: string), _col1 (type: string), _col4 (type: string), _col5 (type: string) + expressions: _col0 (type: string), _col1 (type: string), _col5 (type: string), _col6 (type: string) outputColumnNames: _col0, _col1, _col2, _col3 Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE File Output Operator @@ -759,14 +759,14 @@ STAGE PLANS: filter predicates: 0 {(VALUE._col1 = '2008-04-08')} 1 - outputColumnNames: _col0, _col1, _col6, _col7 + outputColumnNames: _col0, _col1, _col7, _col8 Statistics: Num rows: 13 Data size: 2644 Basic stats: COMPLETE Column stats: NONE Filter Operator isSamplingPred: false - predicate: ((_col6 > 15) and (_col6 < 25)) (type: boolean) + predicate: ((_col7 > 15) and (_col7 < 25)) (type: boolean) Statistics: Num rows: 1 Data size: 203 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: _col0 (type: string), _col1 (type: string), _col6 (type: string), _col7 (type: string) + expressions: _col0 (type: string), _col1 (type: string), _col7 (type: string), _col8 (type: string) outputColumnNames: _col0, _col1, _col2, _col3 Statistics: Num rows: 1 Data size: 203 Basic stats: COMPLETE Column stats: NONE File Output Operator @@ -1226,14 +1226,14 @@ STAGE PLANS: condition expressions: 0 {KEY.reducesinkkey0} {VALUE._col0} 1 {KEY.reducesinkkey0} {VALUE._col0} {VALUE._col1} - outputColumnNames: _col0, _col1, _col4, _col5, _col6 + outputColumnNames: _col0, _col1, _col5, _col6, _col7 Statistics: Num rows: 13 Data size: 2644 Basic stats: COMPLETE Column stats: NONE Filter Operator isSamplingPred: false - predicate: (((_col4 > 15) and (_col4 < 25)) and (_col6 = '2008-04-08')) (type: boolean) + predicate: (((_col5 > 15) and (_col5 < 25)) and (_col7 = '2008-04-08')) (type: boolean) Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE Select Operator - expressions: _col0 (type: string), _col1 (type: string), _col4 (type: string), _col5 (type: string) + expressions: _col0 (type: string), _col1 (type: string), _col5 (type: string), _col6 (type: string) outputColumnNames: _col0, _col1, _col2, _col3 Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE File Output Operator @@ -1599,14 +1599,14 @@ STAGE PLANS: condition expressions: 0 {KEY.reducesinkkey0} {VALUE._col0} 1 {KEY.reducesinkkey0} {VALUE._col0} - outputColumnNames: _col0, _col1, _col6, _col7 + outputColumnNames: _col0, _col1, _col7, _col8 Statistics: Num rows: 6 Data size: 1322 Basic stats: COMPLETE Column stats: NONE Filter Operator isSamplingPred: false - predicate: ((_col6 > 15) and (_col6 < 25)) (type: boolean) + predicate: ((_col7 > 15) and (_col7 < 25)) (type: boolean) Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE Select Operator - expressions: _col0 (type: string), _col1 (type: string), _col6 (type: string), _col7 (type: string) + expressions: _col0 (type: string), _col1 (type: string), _col7 (type: string), _col8 (type: string) outputColumnNames: _col0, _col1, _col2, _col3 Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE File Output Operator diff --git a/ql/src/test/results/clientpositive/mapjoin1.q.out b/ql/src/test/results/clientpositive/mapjoin1.q.out index 77b9b3a..5b94e2c 100644 --- a/ql/src/test/results/clientpositive/mapjoin1.q.out +++ b/ql/src/test/results/clientpositive/mapjoin1.q.out @@ -75,10 +75,10 @@ STAGE PLANS: keys: 0 key (type: string) 1 key (type: string) - outputColumnNames: _col0, _col1, _col4, _col5 + outputColumnNames: _col0, _col1, _col5, _col6 Statistics: Num rows: 31 Data size: 6393 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: _col0 (type: string), _col1 (type: string), _col4 (type: string), _col5 (type: string) + expressions: _col0 (type: string), _col1 (type: string), _col5 (type: string), _col6 (type: string) outputColumnNames: _col0, _col1, _col2, _col3 Statistics: Num rows: 31 Data size: 6393 Basic stats: COMPLETE Column stats: NONE Limit @@ -172,10 +172,10 @@ STAGE PLANS: keys: 0 key (type: string) 1 key (type: string) - outputColumnNames: _col0, _col1, _col4, _col5 + outputColumnNames: _col0, _col1, _col5, _col6 Statistics: Num rows: 31 Data size: 6393 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: _col0 (type: string), _col1 (type: string), _col4 (type: string), _col5 (type: string) + expressions: _col0 (type: string), _col1 (type: string), _col5 (type: string), _col6 (type: string) outputColumnNames: _col0, _col1, _col2, _col3 Statistics: Num rows: 31 Data size: 6393 Basic stats: COMPLETE Column stats: NONE Limit @@ -275,10 +275,10 @@ STAGE PLANS: keys: 0 key (type: string) 1 _col0 (type: string) - outputColumnNames: _col0, _col1, _col4, _col5 + outputColumnNames: _col0, _col1, _col5, _col6 Statistics: Num rows: 31 Data size: 6393 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: _col0 (type: string), _col1 (type: string), _col4 (type: string), _col5 (type: struct) + expressions: _col0 (type: string), _col1 (type: string), _col5 (type: string), _col6 (type: struct) outputColumnNames: _col0, _col1, _col2, _col3 Statistics: Num rows: 31 Data size: 6393 Basic stats: COMPLETE Column stats: NONE Limit @@ -366,10 +366,10 @@ STAGE PLANS: keys: 0 key (type: string) 1 key (type: string) - outputColumnNames: _col0, _col1, _col4, _col5 + outputColumnNames: _col0, _col1, _col5, _col6 Statistics: Num rows: 31 Data size: 6393 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: _col0 (type: string), _col1 (type: string), _col4 (type: string), _col5 (type: string) + expressions: _col0 (type: string), _col1 (type: string), _col5 (type: string), _col6 (type: string) outputColumnNames: _col0, _col1, _col2, _col3 Statistics: Num rows: 31 Data size: 6393 Basic stats: COMPLETE Column stats: NONE Limit @@ -461,10 +461,10 @@ STAGE PLANS: keys: 0 key (type: string) 1 key (type: string) - outputColumnNames: _col0, _col1, _col4, _col5 + outputColumnNames: _col0, _col1, _col5, _col6 Statistics: Num rows: 9 Data size: 1983 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: _col0 (type: string), _col1 (type: string), _col4 (type: string), _col5 (type: string) + expressions: _col0 (type: string), _col1 (type: string), _col5 (type: string), _col6 (type: string) outputColumnNames: _col0, _col1, _col2, _col3 Statistics: Num rows: 9 Data size: 1983 Basic stats: COMPLETE Column stats: NONE Limit @@ -559,10 +559,10 @@ STAGE PLANS: keys: 0 key (type: string) 1 _col0 (type: string) - outputColumnNames: _col0, _col1, _col4, _col5 + outputColumnNames: _col0, _col1, _col5, _col6 Statistics: Num rows: 31 Data size: 6393 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: _col0 (type: string), _col1 (type: string), _col4 (type: string), _col5 (type: struct) + expressions: _col0 (type: string), _col1 (type: string), _col5 (type: string), _col6 (type: struct) outputColumnNames: _col0, _col1, _col2, _col3 Statistics: Num rows: 31 Data size: 6393 Basic stats: COMPLETE Column stats: NONE Limit diff --git a/ql/src/test/results/clientpositive/mapjoin_filter_on_outerjoin.q.out b/ql/src/test/results/clientpositive/mapjoin_filter_on_outerjoin.q.out index 8fa3020..1213f41 100644 --- a/ql/src/test/results/clientpositive/mapjoin_filter_on_outerjoin.q.out +++ b/ql/src/test/results/clientpositive/mapjoin_filter_on_outerjoin.q.out @@ -134,10 +134,10 @@ STAGE PLANS: 0 key (type: string) 1 key (type: string) 2 key (type: string) - outputColumnNames: _col0, _col1, _col4, _col5, _col8, _col9 + outputColumnNames: _col0, _col1, _col5, _col6, _col10, _col11 Statistics: Num rows: 19 Data size: 3966 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: _col0 (type: string), _col1 (type: string), _col4 (type: string), _col5 (type: string), _col8 (type: string), _col9 (type: string) + expressions: _col0 (type: string), _col1 (type: string), _col5 (type: string), _col6 (type: string), _col10 (type: string), _col11 (type: string) outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5 Statistics: Num rows: 19 Data size: 3966 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator @@ -298,10 +298,10 @@ STAGE PLANS: 0 key (type: string) 1 key (type: string) 2 key (type: string) - outputColumnNames: _col0, _col1, _col4, _col5, _col8, _col9 + outputColumnNames: _col0, _col1, _col5, _col6, _col10, _col11 Statistics: Num rows: 19 Data size: 3966 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: _col0 (type: string), _col1 (type: string), _col4 (type: string), _col5 (type: string), _col8 (type: string), _col9 (type: string) + expressions: _col0 (type: string), _col1 (type: string), _col5 (type: string), _col6 (type: string), _col10 (type: string), _col11 (type: string) outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5 Statistics: Num rows: 19 Data size: 3966 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator diff --git a/ql/src/test/results/clientpositive/mapjoin_memcheck.q.out b/ql/src/test/results/clientpositive/mapjoin_memcheck.q.out index b346394..7fc32b5 100644 --- a/ql/src/test/results/clientpositive/mapjoin_memcheck.q.out +++ b/ql/src/test/results/clientpositive/mapjoin_memcheck.q.out @@ -74,10 +74,10 @@ STAGE PLANS: keys: 0 key (type: string) 1 key (type: string) - outputColumnNames: _col0, _col1, _col4, _col5 + outputColumnNames: _col0, _col1, _col5, _col6 Statistics: Num rows: 5 Data size: 38 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: _col0 (type: string), _col1 (type: string), _col4 (type: string), _col5 (type: string) + expressions: _col0 (type: string), _col1 (type: string), _col5 (type: string), _col6 (type: string) outputColumnNames: _col0, _col1, _col2, _col3 Statistics: Num rows: 5 Data size: 38 Basic stats: COMPLETE Column stats: NONE File Output Operator diff --git a/ql/src/test/results/clientpositive/mapjoin_subquery2.q.out b/ql/src/test/results/clientpositive/mapjoin_subquery2.q.out index ab3d8c9..ac9d498 100644 --- a/ql/src/test/results/clientpositive/mapjoin_subquery2.q.out +++ b/ql/src/test/results/clientpositive/mapjoin_subquery2.q.out @@ -145,10 +145,10 @@ STAGE PLANS: keys: 0 id (type: int) 1 id (type: int) - outputColumnNames: _col0, _col1, _col4, _col5 + outputColumnNames: _col0, _col1, _col5, _col6 Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE Select Operator - expressions: _col5 (type: int), _col4 (type: string), _col0 (type: int), _col1 (type: string) + expressions: _col6 (type: int), _col5 (type: string), _col0 (type: int), _col1 (type: string) outputColumnNames: _col0, _col1, _col2, _col3 Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE Map Join Operator diff --git a/ql/src/test/results/clientpositive/mapjoin_test_outer.q.out b/ql/src/test/results/clientpositive/mapjoin_test_outer.q.out index 6c47a0b..c261388 100644 --- a/ql/src/test/results/clientpositive/mapjoin_test_outer.q.out +++ b/ql/src/test/results/clientpositive/mapjoin_test_outer.q.out @@ -304,10 +304,10 @@ STAGE PLANS: 0 key (type: string) 1 key (type: string) 2 key (type: string) - outputColumnNames: _col0, _col1, _col4, _col5, _col8, _col9 + outputColumnNames: _col0, _col1, _col5, _col6, _col10, _col11 Statistics: Num rows: 19 Data size: 88 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: _col0 (type: string), _col1 (type: string), _col4 (type: string), _col5 (type: string), _col8 (type: string), _col9 (type: string) + expressions: _col0 (type: string), _col1 (type: string), _col5 (type: string), _col6 (type: string), _col10 (type: string), _col11 (type: string) outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5 Statistics: Num rows: 19 Data size: 88 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator @@ -1137,10 +1137,10 @@ STAGE PLANS: 0 key (type: string) 1 key (type: string) 2 key (type: string) - outputColumnNames: _col0, _col1, _col4, _col5, _col8, _col9 + outputColumnNames: _col0, _col1, _col5, _col6, _col10, _col11 Statistics: Num rows: 19 Data size: 88 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: _col0 (type: string), _col1 (type: string), _col4 (type: string), _col5 (type: string), _col8 (type: string), _col9 (type: string) + expressions: _col0 (type: string), _col1 (type: string), _col5 (type: string), _col6 (type: string), _col10 (type: string), _col11 (type: string) outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5 Statistics: Num rows: 19 Data size: 88 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator diff --git a/ql/src/test/results/clientpositive/mergejoins.q.out b/ql/src/test/results/clientpositive/mergejoins.q.out index f552990..e297ffb 100644 --- a/ql/src/test/results/clientpositive/mergejoins.q.out +++ b/ql/src/test/results/clientpositive/mergejoins.q.out @@ -110,7 +110,7 @@ STAGE PLANS: 1 {KEY.reducesinkkey0} {VALUE._col0} 2 {KEY.reducesinkkey0} {VALUE._col0} 3 {KEY.reducesinkkey0} {VALUE._col0} - outputColumnNames: _col0, _col1, _col4, _col5, _col8, _col9, _col12, _col13 + outputColumnNames: _col0, _col1, _col5, _col6, _col10, _col11, _col15, _col16 Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE File Output Operator compressed: false @@ -128,7 +128,7 @@ STAGE PLANS: sort order: + Map-reduce partition columns: _col1 (type: int) Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE - value expressions: _col0 (type: int), _col4 (type: int), _col5 (type: int), _col8 (type: int), _col9 (type: int), _col12 (type: int), _col13 (type: int) + value expressions: _col0 (type: int), _col5 (type: int), _col6 (type: int), _col10 (type: int), _col11 (type: int), _col15 (type: int), _col16 (type: int) TableScan alias: e Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE @@ -146,12 +146,12 @@ STAGE PLANS: condition map: Inner Join 0 to 1 condition expressions: - 0 {VALUE._col0} {KEY.reducesinkkey0} {VALUE._col3} {VALUE._col4} {VALUE._col7} {VALUE._col8} {VALUE._col11} {VALUE._col12} + 0 {VALUE._col0} {KEY.reducesinkkey0} {VALUE._col4} {VALUE._col5} {VALUE._col9} {VALUE._col10} {VALUE._col14} {VALUE._col15} 1 {VALUE._col0} {KEY.reducesinkkey0} - outputColumnNames: _col0, _col1, _col4, _col5, _col8, _col9, _col12, _col13, _col16, _col17 + outputColumnNames: _col0, _col1, _col5, _col6, _col10, _col11, _col15, _col16, _col20, _col21 Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE Select Operator - expressions: _col0 (type: int), _col1 (type: int), _col4 (type: int), _col5 (type: int), _col8 (type: int), _col9 (type: int), _col12 (type: int), _col13 (type: int), _col16 (type: int), _col17 (type: int) + expressions: _col0 (type: int), _col1 (type: int), _col5 (type: int), _col6 (type: int), _col10 (type: int), _col11 (type: int), _col15 (type: int), _col16 (type: int), _col20 (type: int), _col21 (type: int) outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9 Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE File Output Operator @@ -222,10 +222,10 @@ STAGE PLANS: 0 1 {(KEY.reducesinkkey0 < 10)} 2 - outputColumnNames: _col0, _col1, _col4, _col5, _col8, _col9 + outputColumnNames: _col0, _col1, _col5, _col6, _col10, _col11 Statistics: Num rows: 63 Data size: 12786 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: _col0 (type: string), _col1 (type: string), _col4 (type: string), _col5 (type: string), _col8 (type: string), _col9 (type: string) + expressions: _col0 (type: string), _col1 (type: string), _col5 (type: string), _col6 (type: string), _col10 (type: string), _col11 (type: string) outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5 Statistics: Num rows: 63 Data size: 12786 Basic stats: COMPLETE Column stats: NONE File Output Operator diff --git a/ql/src/test/results/clientpositive/mergejoins_mixed.q.out b/ql/src/test/results/clientpositive/mergejoins_mixed.q.out index 4c6329f..045fc7a 100644 --- a/ql/src/test/results/clientpositive/mergejoins_mixed.q.out +++ b/ql/src/test/results/clientpositive/mergejoins_mixed.q.out @@ -73,10 +73,10 @@ STAGE PLANS: 1 {KEY.reducesinkkey0} {VALUE._col0} 2 {KEY.reducesinkkey0} {VALUE._col0} 3 {KEY.reducesinkkey0} {VALUE._col0} - outputColumnNames: _col0, _col1, _col4, _col5, _col8, _col9, _col12, _col13 + outputColumnNames: _col0, _col1, _col5, _col6, _col10, _col11, _col15, _col16 Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE Select Operator - expressions: _col0 (type: string), _col1 (type: string), _col4 (type: string), _col5 (type: string), _col8 (type: string), _col9 (type: string), _col12 (type: string), _col13 (type: string) + expressions: _col0 (type: string), _col1 (type: string), _col5 (type: string), _col6 (type: string), _col10 (type: string), _col11 (type: string), _col15 (type: string), _col16 (type: string) outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7 Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE File Output Operator @@ -154,10 +154,10 @@ STAGE PLANS: 1 {KEY.reducesinkkey0} {VALUE._col0} 2 {KEY.reducesinkkey0} {VALUE._col0} 3 {KEY.reducesinkkey0} {VALUE._col0} - outputColumnNames: _col0, _col1, _col4, _col5, _col8, _col9, _col12, _col13 + outputColumnNames: _col0, _col1, _col5, _col6, _col10, _col11, _col15, _col16 Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE Select Operator - expressions: _col0 (type: string), _col1 (type: string), _col4 (type: string), _col5 (type: string), _col8 (type: string), _col9 (type: string), _col12 (type: string), _col13 (type: string) + expressions: _col0 (type: string), _col1 (type: string), _col5 (type: string), _col6 (type: string), _col10 (type: string), _col11 (type: string), _col15 (type: string), _col16 (type: string) outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7 Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE File Output Operator @@ -235,10 +235,10 @@ STAGE PLANS: 1 {KEY.reducesinkkey0} {VALUE._col0} 2 {KEY.reducesinkkey0} {VALUE._col0} 3 {KEY.reducesinkkey0} {VALUE._col0} - outputColumnNames: _col0, _col1, _col4, _col5, _col8, _col9, _col12, _col13 + outputColumnNames: _col0, _col1, _col5, _col6, _col10, _col11, _col15, _col16 Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE Select Operator - expressions: _col0 (type: string), _col1 (type: string), _col4 (type: string), _col5 (type: string), _col8 (type: string), _col9 (type: string), _col12 (type: string), _col13 (type: string) + expressions: _col0 (type: string), _col1 (type: string), _col5 (type: string), _col6 (type: string), _col10 (type: string), _col11 (type: string), _col15 (type: string), _col16 (type: string) outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7 Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE File Output Operator @@ -316,10 +316,10 @@ STAGE PLANS: 1 {KEY.reducesinkkey0} {VALUE._col0} 2 {KEY.reducesinkkey0} {VALUE._col0} 3 {KEY.reducesinkkey0} {VALUE._col0} - outputColumnNames: _col0, _col1, _col4, _col5, _col8, _col9, _col12, _col13 + outputColumnNames: _col0, _col1, _col5, _col6, _col10, _col11, _col15, _col16 Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE Select Operator - expressions: _col0 (type: string), _col1 (type: string), _col4 (type: string), _col5 (type: string), _col8 (type: string), _col9 (type: string), _col12 (type: string), _col13 (type: string) + expressions: _col0 (type: string), _col1 (type: string), _col5 (type: string), _col6 (type: string), _col10 (type: string), _col11 (type: string), _col15 (type: string), _col16 (type: string) outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7 Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE File Output Operator @@ -389,7 +389,7 @@ STAGE PLANS: 0 {KEY.reducesinkkey0} {VALUE._col0} 1 {KEY.reducesinkkey0} {VALUE._col0} 2 {KEY.reducesinkkey0} {VALUE._col0} - outputColumnNames: _col0, _col1, _col4, _col5, _col8, _col9 + outputColumnNames: _col0, _col1, _col5, _col6, _col10, _col11 Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE File Output Operator compressed: false @@ -403,11 +403,11 @@ STAGE PLANS: Map Operator Tree: TableScan Reduce Output Operator - key expressions: _col5 (type: string) + key expressions: _col6 (type: string) sort order: + - Map-reduce partition columns: _col5 (type: string) + Map-reduce partition columns: _col6 (type: string) Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE - value expressions: _col0 (type: string), _col1 (type: string), _col4 (type: string), _col8 (type: string), _col9 (type: string) + value expressions: _col0 (type: string), _col1 (type: string), _col5 (type: string), _col10 (type: string), _col11 (type: string) TableScan alias: c Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE @@ -422,12 +422,12 @@ STAGE PLANS: condition map: Left Outer Join0 to 1 condition expressions: - 0 {VALUE._col0} {VALUE._col1} {VALUE._col4} {KEY.reducesinkkey0} {VALUE._col7} {VALUE._col8} + 0 {VALUE._col0} {VALUE._col1} {VALUE._col5} {KEY.reducesinkkey0} {VALUE._col9} {VALUE._col10} 1 {KEY.reducesinkkey0} {VALUE._col0} - outputColumnNames: _col0, _col1, _col4, _col5, _col8, _col9, _col12, _col13 + outputColumnNames: _col0, _col1, _col5, _col6, _col10, _col11, _col15, _col16 Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE Select Operator - expressions: _col0 (type: string), _col1 (type: string), _col4 (type: string), _col5 (type: string), _col12 (type: string), _col13 (type: string), _col8 (type: string), _col9 (type: string) + expressions: _col0 (type: string), _col1 (type: string), _col5 (type: string), _col6 (type: string), _col15 (type: string), _col16 (type: string), _col10 (type: string), _col11 (type: string) outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7 Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE File Output Operator @@ -495,7 +495,7 @@ STAGE PLANS: 0 {KEY.reducesinkkey0} {VALUE._col0} 1 {KEY.reducesinkkey0} {VALUE._col0} 2 {KEY.reducesinkkey0} {VALUE._col0} - outputColumnNames: _col0, _col1, _col4, _col5, _col8, _col9 + outputColumnNames: _col0, _col1, _col5, _col6, _col10, _col11 Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE File Output Operator compressed: false @@ -509,11 +509,11 @@ STAGE PLANS: Map Operator Tree: TableScan Reduce Output Operator - key expressions: _col5 (type: string) + key expressions: _col6 (type: string) sort order: + - Map-reduce partition columns: _col5 (type: string) + Map-reduce partition columns: _col6 (type: string) Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE - value expressions: _col0 (type: string), _col1 (type: string), _col4 (type: string), _col8 (type: string), _col9 (type: string) + value expressions: _col0 (type: string), _col1 (type: string), _col5 (type: string), _col10 (type: string), _col11 (type: string) TableScan alias: c Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE @@ -528,12 +528,12 @@ STAGE PLANS: condition map: Right Outer Join0 to 1 condition expressions: - 0 {VALUE._col0} {VALUE._col1} {VALUE._col4} {KEY.reducesinkkey0} {VALUE._col7} {VALUE._col8} + 0 {VALUE._col0} {VALUE._col1} {VALUE._col5} {KEY.reducesinkkey0} {VALUE._col9} {VALUE._col10} 1 {KEY.reducesinkkey0} {VALUE._col0} - outputColumnNames: _col0, _col1, _col4, _col5, _col8, _col9, _col12, _col13 + outputColumnNames: _col0, _col1, _col5, _col6, _col10, _col11, _col15, _col16 Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE Select Operator - expressions: _col0 (type: string), _col1 (type: string), _col4 (type: string), _col5 (type: string), _col12 (type: string), _col13 (type: string), _col8 (type: string), _col9 (type: string) + expressions: _col0 (type: string), _col1 (type: string), _col5 (type: string), _col6 (type: string), _col15 (type: string), _col16 (type: string), _col10 (type: string), _col11 (type: string) outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7 Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE File Output Operator @@ -601,7 +601,7 @@ STAGE PLANS: 0 {KEY.reducesinkkey0} {VALUE._col0} 1 {KEY.reducesinkkey0} {VALUE._col0} 2 {KEY.reducesinkkey0} {VALUE._col0} - outputColumnNames: _col0, _col1, _col4, _col5, _col8, _col9 + outputColumnNames: _col0, _col1, _col5, _col6, _col10, _col11 Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE File Output Operator compressed: false @@ -615,11 +615,11 @@ STAGE PLANS: Map Operator Tree: TableScan Reduce Output Operator - key expressions: _col5 (type: string) + key expressions: _col6 (type: string) sort order: + - Map-reduce partition columns: _col5 (type: string) + Map-reduce partition columns: _col6 (type: string) Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE - value expressions: _col0 (type: string), _col1 (type: string), _col4 (type: string), _col8 (type: string), _col9 (type: string) + value expressions: _col0 (type: string), _col1 (type: string), _col5 (type: string), _col10 (type: string), _col11 (type: string) TableScan alias: c Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE @@ -634,12 +634,12 @@ STAGE PLANS: condition map: Outer Join 0 to 1 condition expressions: - 0 {VALUE._col0} {VALUE._col1} {VALUE._col4} {KEY.reducesinkkey0} {VALUE._col7} {VALUE._col8} + 0 {VALUE._col0} {VALUE._col1} {VALUE._col5} {KEY.reducesinkkey0} {VALUE._col9} {VALUE._col10} 1 {KEY.reducesinkkey0} {VALUE._col0} - outputColumnNames: _col0, _col1, _col4, _col5, _col8, _col9, _col12, _col13 + outputColumnNames: _col0, _col1, _col5, _col6, _col10, _col11, _col15, _col16 Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE Select Operator - expressions: _col0 (type: string), _col1 (type: string), _col4 (type: string), _col5 (type: string), _col12 (type: string), _col13 (type: string), _col8 (type: string), _col9 (type: string) + expressions: _col0 (type: string), _col1 (type: string), _col5 (type: string), _col6 (type: string), _col15 (type: string), _col16 (type: string), _col10 (type: string), _col11 (type: string) outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7 Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE File Output Operator @@ -705,7 +705,7 @@ STAGE PLANS: condition expressions: 0 {KEY.reducesinkkey0} {VALUE._col0} 1 {KEY.reducesinkkey0} {VALUE._col0} - outputColumnNames: _col0, _col1, _col4, _col5 + outputColumnNames: _col0, _col1, _col5, _col6 Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE File Output Operator compressed: false @@ -719,11 +719,11 @@ STAGE PLANS: Map Operator Tree: TableScan Reduce Output Operator - key expressions: _col5 (type: string) + key expressions: _col6 (type: string) sort order: + - Map-reduce partition columns: _col5 (type: string) + Map-reduce partition columns: _col6 (type: string) Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE - value expressions: _col0 (type: string), _col1 (type: string), _col4 (type: string) + value expressions: _col0 (type: string), _col1 (type: string), _col5 (type: string) TableScan alias: c Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE @@ -738,9 +738,9 @@ STAGE PLANS: condition map: Left Outer Join0 to 1 condition expressions: - 0 {VALUE._col0} {VALUE._col1} {VALUE._col4} {KEY.reducesinkkey0} + 0 {VALUE._col0} {VALUE._col1} {VALUE._col5} {KEY.reducesinkkey0} 1 {KEY.reducesinkkey0} {VALUE._col0} - outputColumnNames: _col0, _col1, _col4, _col5, _col8, _col9 + outputColumnNames: _col0, _col1, _col5, _col6, _col10, _col11 Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE File Output Operator compressed: false @@ -767,18 +767,18 @@ STAGE PLANS: sort order: + Map-reduce partition columns: _col0 (type: string) Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE - value expressions: _col1 (type: string), _col4 (type: string), _col5 (type: string), _col8 (type: string), _col9 (type: string) + value expressions: _col1 (type: string), _col5 (type: string), _col6 (type: string), _col10 (type: string), _col11 (type: string) Reduce Operator Tree: Join Operator condition map: Right Outer Join0 to 1 condition expressions: - 0 {KEY.reducesinkkey0} {VALUE._col0} {VALUE._col3} {VALUE._col4} {VALUE._col7} {VALUE._col8} + 0 {KEY.reducesinkkey0} {VALUE._col0} {VALUE._col4} {VALUE._col5} {VALUE._col9} {VALUE._col10} 1 {KEY.reducesinkkey0} {VALUE._col0} - outputColumnNames: _col0, _col1, _col4, _col5, _col8, _col9, _col12, _col13 + outputColumnNames: _col0, _col1, _col5, _col6, _col10, _col11, _col15, _col16 Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE Select Operator - expressions: _col0 (type: string), _col1 (type: string), _col4 (type: string), _col5 (type: string), _col8 (type: string), _col9 (type: string), _col12 (type: string), _col13 (type: string) + expressions: _col0 (type: string), _col1 (type: string), _col5 (type: string), _col6 (type: string), _col10 (type: string), _col11 (type: string), _col15 (type: string), _col16 (type: string) outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7 Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE File Output Operator @@ -842,7 +842,7 @@ STAGE PLANS: condition expressions: 0 {KEY.reducesinkkey0} {VALUE._col0} 1 {KEY.reducesinkkey0} {VALUE._col0} - outputColumnNames: _col0, _col1, _col4, _col5 + outputColumnNames: _col0, _col1, _col5, _col6 Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE File Output Operator compressed: false @@ -856,11 +856,11 @@ STAGE PLANS: Map Operator Tree: TableScan Reduce Output Operator - key expressions: _col5 (type: string) + key expressions: _col6 (type: string) sort order: + - Map-reduce partition columns: _col5 (type: string) + Map-reduce partition columns: _col6 (type: string) Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE - value expressions: _col0 (type: string), _col1 (type: string), _col4 (type: string) + value expressions: _col0 (type: string), _col1 (type: string), _col5 (type: string) TableScan alias: c Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE @@ -875,9 +875,9 @@ STAGE PLANS: condition map: Left Outer Join0 to 1 condition expressions: - 0 {VALUE._col0} {VALUE._col1} {VALUE._col4} {KEY.reducesinkkey0} + 0 {VALUE._col0} {VALUE._col1} {VALUE._col5} {KEY.reducesinkkey0} 1 {KEY.reducesinkkey0} {VALUE._col0} - outputColumnNames: _col0, _col1, _col4, _col5, _col8, _col9 + outputColumnNames: _col0, _col1, _col5, _col6, _col10, _col11 Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE File Output Operator compressed: false @@ -904,18 +904,18 @@ STAGE PLANS: sort order: + Map-reduce partition columns: _col0 (type: string) Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE - value expressions: _col1 (type: string), _col4 (type: string), _col5 (type: string), _col8 (type: string), _col9 (type: string) + value expressions: _col1 (type: string), _col5 (type: string), _col6 (type: string), _col10 (type: string), _col11 (type: string) Reduce Operator Tree: Join Operator condition map: Outer Join 0 to 1 condition expressions: - 0 {KEY.reducesinkkey0} {VALUE._col0} {VALUE._col3} {VALUE._col4} {VALUE._col7} {VALUE._col8} + 0 {KEY.reducesinkkey0} {VALUE._col0} {VALUE._col4} {VALUE._col5} {VALUE._col9} {VALUE._col10} 1 {KEY.reducesinkkey0} {VALUE._col0} - outputColumnNames: _col0, _col1, _col4, _col5, _col8, _col9, _col12, _col13 + outputColumnNames: _col0, _col1, _col5, _col6, _col10, _col11, _col15, _col16 Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE Select Operator - expressions: _col0 (type: string), _col1 (type: string), _col4 (type: string), _col5 (type: string), _col8 (type: string), _col9 (type: string), _col12 (type: string), _col13 (type: string) + expressions: _col0 (type: string), _col1 (type: string), _col5 (type: string), _col6 (type: string), _col10 (type: string), _col11 (type: string), _col15 (type: string), _col16 (type: string) outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7 Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE File Output Operator @@ -979,7 +979,7 @@ STAGE PLANS: condition expressions: 0 {KEY.reducesinkkey0} {VALUE._col0} 1 {KEY.reducesinkkey0} {VALUE._col0} - outputColumnNames: _col0, _col1, _col4, _col5 + outputColumnNames: _col0, _col1, _col5, _col6 Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE File Output Operator compressed: false @@ -993,11 +993,11 @@ STAGE PLANS: Map Operator Tree: TableScan Reduce Output Operator - key expressions: _col5 (type: string) + key expressions: _col6 (type: string) sort order: + - Map-reduce partition columns: _col5 (type: string) + Map-reduce partition columns: _col6 (type: string) Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE - value expressions: _col0 (type: string), _col1 (type: string), _col4 (type: string) + value expressions: _col0 (type: string), _col1 (type: string), _col5 (type: string) TableScan alias: c Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE @@ -1012,9 +1012,9 @@ STAGE PLANS: condition map: Right Outer Join0 to 1 condition expressions: - 0 {VALUE._col0} {VALUE._col1} {VALUE._col4} {KEY.reducesinkkey0} + 0 {VALUE._col0} {VALUE._col1} {VALUE._col5} {KEY.reducesinkkey0} 1 {KEY.reducesinkkey0} {VALUE._col0} - outputColumnNames: _col0, _col1, _col4, _col5, _col8, _col9 + outputColumnNames: _col0, _col1, _col5, _col6, _col10, _col11 Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE File Output Operator compressed: false @@ -1041,18 +1041,18 @@ STAGE PLANS: sort order: + Map-reduce partition columns: _col0 (type: string) Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE - value expressions: _col1 (type: string), _col4 (type: string), _col5 (type: string), _col8 (type: string), _col9 (type: string) + value expressions: _col1 (type: string), _col5 (type: string), _col6 (type: string), _col10 (type: string), _col11 (type: string) Reduce Operator Tree: Join Operator condition map: Left Outer Join0 to 1 condition expressions: - 0 {KEY.reducesinkkey0} {VALUE._col0} {VALUE._col3} {VALUE._col4} {VALUE._col7} {VALUE._col8} + 0 {KEY.reducesinkkey0} {VALUE._col0} {VALUE._col4} {VALUE._col5} {VALUE._col9} {VALUE._col10} 1 {KEY.reducesinkkey0} {VALUE._col0} - outputColumnNames: _col0, _col1, _col4, _col5, _col8, _col9, _col12, _col13 + outputColumnNames: _col0, _col1, _col5, _col6, _col10, _col11, _col15, _col16 Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE Select Operator - expressions: _col0 (type: string), _col1 (type: string), _col4 (type: string), _col5 (type: string), _col8 (type: string), _col9 (type: string), _col12 (type: string), _col13 (type: string) + expressions: _col0 (type: string), _col1 (type: string), _col5 (type: string), _col6 (type: string), _col10 (type: string), _col11 (type: string), _col15 (type: string), _col16 (type: string) outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7 Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE File Output Operator @@ -1116,7 +1116,7 @@ STAGE PLANS: condition expressions: 0 {KEY.reducesinkkey0} {VALUE._col0} 1 {KEY.reducesinkkey0} {VALUE._col0} - outputColumnNames: _col0, _col1, _col4, _col5 + outputColumnNames: _col0, _col1, _col5, _col6 Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE File Output Operator compressed: false @@ -1130,11 +1130,11 @@ STAGE PLANS: Map Operator Tree: TableScan Reduce Output Operator - key expressions: _col5 (type: string) + key expressions: _col6 (type: string) sort order: + - Map-reduce partition columns: _col5 (type: string) + Map-reduce partition columns: _col6 (type: string) Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE - value expressions: _col0 (type: string), _col1 (type: string), _col4 (type: string) + value expressions: _col0 (type: string), _col1 (type: string), _col5 (type: string) TableScan alias: c Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE @@ -1149,9 +1149,9 @@ STAGE PLANS: condition map: Right Outer Join0 to 1 condition expressions: - 0 {VALUE._col0} {VALUE._col1} {VALUE._col4} {KEY.reducesinkkey0} + 0 {VALUE._col0} {VALUE._col1} {VALUE._col5} {KEY.reducesinkkey0} 1 {KEY.reducesinkkey0} {VALUE._col0} - outputColumnNames: _col0, _col1, _col4, _col5, _col8, _col9 + outputColumnNames: _col0, _col1, _col5, _col6, _col10, _col11 Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE File Output Operator compressed: false @@ -1178,18 +1178,18 @@ STAGE PLANS: sort order: + Map-reduce partition columns: _col0 (type: string) Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE - value expressions: _col1 (type: string), _col4 (type: string), _col5 (type: string), _col8 (type: string), _col9 (type: string) + value expressions: _col1 (type: string), _col5 (type: string), _col6 (type: string), _col10 (type: string), _col11 (type: string) Reduce Operator Tree: Join Operator condition map: Outer Join 0 to 1 condition expressions: - 0 {KEY.reducesinkkey0} {VALUE._col0} {VALUE._col3} {VALUE._col4} {VALUE._col7} {VALUE._col8} + 0 {KEY.reducesinkkey0} {VALUE._col0} {VALUE._col4} {VALUE._col5} {VALUE._col9} {VALUE._col10} 1 {KEY.reducesinkkey0} {VALUE._col0} - outputColumnNames: _col0, _col1, _col4, _col5, _col8, _col9, _col12, _col13 + outputColumnNames: _col0, _col1, _col5, _col6, _col10, _col11, _col15, _col16 Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE Select Operator - expressions: _col0 (type: string), _col1 (type: string), _col4 (type: string), _col5 (type: string), _col8 (type: string), _col9 (type: string), _col12 (type: string), _col13 (type: string) + expressions: _col0 (type: string), _col1 (type: string), _col5 (type: string), _col6 (type: string), _col10 (type: string), _col11 (type: string), _col15 (type: string), _col16 (type: string) outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7 Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE File Output Operator @@ -1254,7 +1254,7 @@ STAGE PLANS: condition expressions: 0 {KEY.reducesinkkey0} {VALUE._col0} 1 {KEY.reducesinkkey0} {VALUE._col0} - outputColumnNames: _col0, _col1, _col4, _col5 + outputColumnNames: _col0, _col1, _col5, _col6 Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE File Output Operator compressed: false @@ -1286,24 +1286,24 @@ STAGE PLANS: value expressions: value (type: string) TableScan Reduce Output Operator - key expressions: _col5 (type: string) + key expressions: _col6 (type: string) sort order: + - Map-reduce partition columns: _col5 (type: string) + Map-reduce partition columns: _col6 (type: string) Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE - value expressions: _col0 (type: string), _col1 (type: string), _col4 (type: string) + value expressions: _col0 (type: string), _col1 (type: string), _col5 (type: string) Reduce Operator Tree: Join Operator condition map: Left Outer Join0 to 1 Left Outer Join1 to 2 condition expressions: - 0 {VALUE._col0} {VALUE._col1} {VALUE._col4} {KEY.reducesinkkey0} + 0 {VALUE._col0} {VALUE._col1} {VALUE._col5} {KEY.reducesinkkey0} 1 {KEY.reducesinkkey0} {VALUE._col0} 2 {KEY.reducesinkkey0} {VALUE._col0} - outputColumnNames: _col0, _col1, _col4, _col5, _col8, _col9, _col12, _col13 + outputColumnNames: _col0, _col1, _col5, _col6, _col10, _col11, _col15, _col16 Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE Select Operator - expressions: _col0 (type: string), _col1 (type: string), _col4 (type: string), _col5 (type: string), _col8 (type: string), _col9 (type: string), _col12 (type: string), _col13 (type: string) + expressions: _col0 (type: string), _col1 (type: string), _col5 (type: string), _col6 (type: string), _col10 (type: string), _col11 (type: string), _col15 (type: string), _col16 (type: string) outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7 Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE File Output Operator diff --git a/ql/src/test/results/clientpositive/multiMapJoin1.q.out b/ql/src/test/results/clientpositive/multiMapJoin1.q.out index 3ee8c23..07be9d8 100644 --- a/ql/src/test/results/clientpositive/multiMapJoin1.q.out +++ b/ql/src/test/results/clientpositive/multiMapJoin1.q.out @@ -885,9 +885,9 @@ STAGE PLANS: keys: 0 key1 (type: string) 1 key (type: string) - outputColumnNames: _col0, _col1, _col2, _col5 + outputColumnNames: _col0, _col1, _col2, _col6 Select Operator - expressions: _col0 (type: string), _col1 (type: string), _col5 (type: string), _col2 (type: string), _col2 (type: string) + expressions: _col0 (type: string), _col1 (type: string), _col6 (type: string), _col2 (type: string), _col2 (type: string) outputColumnNames: _col0, _col1, _col2, _col3, _col4 File Output Operator compressed: false @@ -1382,9 +1382,9 @@ STAGE PLANS: keys: 0 key1 (type: string) 1 key (type: string) - outputColumnNames: _col0, _col1, _col2, _col5 + outputColumnNames: _col0, _col1, _col2, _col6 Select Operator - expressions: _col0 (type: string), _col1 (type: string), _col5 (type: string), _col2 (type: string), _col2 (type: string) + expressions: _col0 (type: string), _col1 (type: string), _col6 (type: string), _col2 (type: string), _col2 (type: string) outputColumnNames: _col0, _col1, _col2, _col3, _col4 File Output Operator compressed: false @@ -1428,10 +1428,10 @@ STAGE PLANS: condition expressions: 0 {KEY.reducesinkkey0} {VALUE._col0} {VALUE._col1} 1 {KEY.reducesinkkey0} - outputColumnNames: _col0, _col1, _col2, _col5 + outputColumnNames: _col0, _col1, _col2, _col6 Statistics: Num rows: 687 Data size: 9924 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: _col0 (type: string), _col1 (type: string), _col5 (type: string), _col2 (type: string), _col2 (type: string) + expressions: _col0 (type: string), _col1 (type: string), _col6 (type: string), _col2 (type: string), _col2 (type: string) outputColumnNames: _col0, _col1, _col2, _col3, _col4 Statistics: Num rows: 687 Data size: 9924 Basic stats: COMPLETE Column stats: NONE File Output Operator @@ -1706,10 +1706,10 @@ STAGE PLANS: keys: 0 key1 (type: string) 1 key (type: string) - outputColumnNames: _col0, _col1, _col2, _col5 + outputColumnNames: _col0, _col1, _col2, _col6 Statistics: Num rows: 687 Data size: 9924 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: _col0 (type: string), _col1 (type: string), _col5 (type: string), _col2 (type: string), _col2 (type: string) + expressions: _col0 (type: string), _col1 (type: string), _col6 (type: string), _col2 (type: string), _col2 (type: string) outputColumnNames: _col0, _col1, _col2, _col3, _col4 Statistics: Num rows: 687 Data size: 9924 Basic stats: COMPLETE Column stats: NONE Map Join Operator @@ -2010,10 +2010,10 @@ STAGE PLANS: keys: 0 key1 (type: string) 1 key (type: string) - outputColumnNames: _col0, _col1, _col2, _col5 + outputColumnNames: _col0, _col1, _col2, _col6 Statistics: Num rows: 687 Data size: 9924 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: _col0 (type: string), _col1 (type: string), _col5 (type: string), _col2 (type: string), _col2 (type: string) + expressions: _col0 (type: string), _col1 (type: string), _col6 (type: string), _col2 (type: string), _col2 (type: string) outputColumnNames: _col0, _col1, _col2, _col3, _col4 Statistics: Num rows: 687 Data size: 9924 Basic stats: COMPLETE Column stats: NONE Map Join Operator @@ -2367,9 +2367,9 @@ STAGE PLANS: keys: 0 key1 (type: string) 1 key (type: string) - outputColumnNames: _col0, _col1, _col2, _col5 + outputColumnNames: _col0, _col1, _col2, _col6 Select Operator - expressions: _col0 (type: string), _col1 (type: string), _col5 (type: string), _col2 (type: string), _col2 (type: string) + expressions: _col0 (type: string), _col1 (type: string), _col6 (type: string), _col2 (type: string), _col2 (type: string) outputColumnNames: _col0, _col1, _col2, _col3, _col4 File Output Operator compressed: false @@ -2864,9 +2864,9 @@ STAGE PLANS: keys: 0 key1 (type: string) 1 key (type: string) - outputColumnNames: _col0, _col1, _col2, _col5 + outputColumnNames: _col0, _col1, _col2, _col6 Select Operator - expressions: _col0 (type: string), _col1 (type: string), _col5 (type: string), _col2 (type: string), _col2 (type: string) + expressions: _col0 (type: string), _col1 (type: string), _col6 (type: string), _col2 (type: string), _col2 (type: string) outputColumnNames: _col0, _col1, _col2, _col3, _col4 File Output Operator compressed: false @@ -2910,10 +2910,10 @@ STAGE PLANS: condition expressions: 0 {KEY.reducesinkkey0} {VALUE._col0} {VALUE._col1} 1 {KEY.reducesinkkey0} - outputColumnNames: _col0, _col1, _col2, _col5 + outputColumnNames: _col0, _col1, _col2, _col6 Statistics: Num rows: 687 Data size: 9924 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: _col0 (type: string), _col1 (type: string), _col5 (type: string), _col2 (type: string), _col2 (type: string) + expressions: _col0 (type: string), _col1 (type: string), _col6 (type: string), _col2 (type: string), _col2 (type: string) outputColumnNames: _col0, _col1, _col2, _col3, _col4 Statistics: Num rows: 687 Data size: 9924 Basic stats: COMPLETE Column stats: NONE File Output Operator diff --git a/ql/src/test/results/clientpositive/multi_insert_lateral_view.q.out b/ql/src/test/results/clientpositive/multi_insert_lateral_view.q.out index 83df255..7d84614 100644 --- a/ql/src/test/results/clientpositive/multi_insert_lateral_view.q.out +++ b/ql/src/test/results/clientpositive/multi_insert_lateral_view.q.out @@ -84,10 +84,10 @@ STAGE PLANS: outputColumnNames: key Statistics: Num rows: 10 Data size: 104 Basic stats: COMPLETE Column stats: NONE Lateral View Join Operator - outputColumnNames: _col0, _col4 + outputColumnNames: _col0, _col5 Statistics: Num rows: 20 Data size: 208 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: _col0 (type: string), _col4 (type: double) + expressions: _col0 (type: string), _col5 (type: double) outputColumnNames: _col0, _col1 Statistics: Num rows: 20 Data size: 208 Basic stats: COMPLETE Column stats: NONE File Output Operator @@ -106,10 +106,10 @@ STAGE PLANS: Statistics: Num rows: 10 Data size: 104 Basic stats: COMPLETE Column stats: NONE function name: explode Lateral View Join Operator - outputColumnNames: _col0, _col4 + outputColumnNames: _col0, _col5 Statistics: Num rows: 20 Data size: 208 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: _col0 (type: string), _col4 (type: double) + expressions: _col0 (type: string), _col5 (type: double) outputColumnNames: _col0, _col1 Statistics: Num rows: 20 Data size: 208 Basic stats: COMPLETE Column stats: NONE File Output Operator @@ -127,10 +127,10 @@ STAGE PLANS: outputColumnNames: key Statistics: Num rows: 10 Data size: 104 Basic stats: COMPLETE Column stats: NONE Lateral View Join Operator - outputColumnNames: _col0, _col4 + outputColumnNames: _col0, _col5 Statistics: Num rows: 20 Data size: 208 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: _col0 (type: string), _col4 (type: double) + expressions: _col0 (type: string), _col5 (type: double) outputColumnNames: _col0, _col1 Statistics: Num rows: 20 Data size: 208 Basic stats: COMPLETE Column stats: NONE File Output Operator @@ -149,10 +149,10 @@ STAGE PLANS: Statistics: Num rows: 10 Data size: 104 Basic stats: COMPLETE Column stats: NONE function name: explode Lateral View Join Operator - outputColumnNames: _col0, _col4 + outputColumnNames: _col0, _col5 Statistics: Num rows: 20 Data size: 208 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: _col0 (type: string), _col4 (type: double) + expressions: _col0 (type: string), _col5 (type: double) outputColumnNames: _col0, _col1 Statistics: Num rows: 20 Data size: 208 Basic stats: COMPLETE Column stats: NONE File Output Operator @@ -283,9 +283,9 @@ POSTHOOK: Input: default@src_10 POSTHOOK: Output: default@src_lv1 POSTHOOK: Output: default@src_lv2 POSTHOOK: Lineage: src_lv1.key SCRIPT [(src_10)src_10.FieldSchema(name:key, type:string, comment:null), ] -POSTHOOK: Lineage: src_lv1.value SIMPLE [(src_10)src_10.FieldSchema(name:INPUT__FILE__NAME, type:string, comment:), ] +POSTHOOK: Lineage: src_lv1.value SIMPLE [(src_10)src_10.FieldSchema(name:ROW__ID, type:struct, comment:), ] POSTHOOK: Lineage: src_lv2.key SCRIPT [(src_10)src_10.FieldSchema(name:key, type:string, comment:null), ] -POSTHOOK: Lineage: src_lv2.value SIMPLE [(src_10)src_10.FieldSchema(name:INPUT__FILE__NAME, type:string, comment:), ] +POSTHOOK: Lineage: src_lv2.value SIMPLE [(src_10)src_10.FieldSchema(name:ROW__ID, type:struct, comment:), ] PREHOOK: query: select * from src_lv1 PREHOOK: type: QUERY PREHOOK: Input: default@src_lv1 @@ -384,14 +384,14 @@ STAGE PLANS: outputColumnNames: key Statistics: Num rows: 10 Data size: 104 Basic stats: COMPLETE Column stats: NONE Lateral View Join Operator - outputColumnNames: _col0, _col4 + outputColumnNames: _col0, _col5 Statistics: Num rows: 20 Data size: 208 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: _col0 (type: string), _col4 (type: double) - outputColumnNames: _col0, _col4 + expressions: _col0 (type: string), _col5 (type: double) + outputColumnNames: _col0, _col5 Statistics: Num rows: 20 Data size: 208 Basic stats: COMPLETE Column stats: NONE Group By Operator - aggregations: sum(_col4) + aggregations: sum(_col5) keys: _col0 (type: string) mode: hash outputColumnNames: _col0, _col1 @@ -410,14 +410,14 @@ STAGE PLANS: Statistics: Num rows: 10 Data size: 104 Basic stats: COMPLETE Column stats: NONE function name: explode Lateral View Join Operator - outputColumnNames: _col0, _col4 + outputColumnNames: _col0, _col5 Statistics: Num rows: 20 Data size: 208 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: _col0 (type: string), _col4 (type: double) - outputColumnNames: _col0, _col4 + expressions: _col0 (type: string), _col5 (type: double) + outputColumnNames: _col0, _col5 Statistics: Num rows: 20 Data size: 208 Basic stats: COMPLETE Column stats: NONE Group By Operator - aggregations: sum(_col4) + aggregations: sum(_col5) keys: _col0 (type: string) mode: hash outputColumnNames: _col0, _col1 @@ -435,14 +435,14 @@ STAGE PLANS: outputColumnNames: key Statistics: Num rows: 10 Data size: 104 Basic stats: COMPLETE Column stats: NONE Lateral View Join Operator - outputColumnNames: _col0, _col4 + outputColumnNames: _col0, _col5 Statistics: Num rows: 20 Data size: 208 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: _col0 (type: string), _col4 (type: double) - outputColumnNames: _col0, _col4 + expressions: _col0 (type: string), _col5 (type: double) + outputColumnNames: _col0, _col5 Statistics: Num rows: 20 Data size: 208 Basic stats: COMPLETE Column stats: NONE Group By Operator - aggregations: sum(_col4) + aggregations: sum(_col5) keys: _col0 (type: string) mode: hash outputColumnNames: _col0, _col1 @@ -461,14 +461,14 @@ STAGE PLANS: Statistics: Num rows: 10 Data size: 104 Basic stats: COMPLETE Column stats: NONE function name: explode Lateral View Join Operator - outputColumnNames: _col0, _col4 + outputColumnNames: _col0, _col5 Statistics: Num rows: 20 Data size: 208 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: _col0 (type: string), _col4 (type: double) - outputColumnNames: _col0, _col4 + expressions: _col0 (type: string), _col5 (type: double) + outputColumnNames: _col0, _col5 Statistics: Num rows: 20 Data size: 208 Basic stats: COMPLETE Column stats: NONE Group By Operator - aggregations: sum(_col4) + aggregations: sum(_col5) keys: _col0 (type: string) mode: hash outputColumnNames: _col0, _col1 @@ -570,9 +570,9 @@ POSTHOOK: Input: default@src_10 POSTHOOK: Output: default@src_lv1 POSTHOOK: Output: default@src_lv2 POSTHOOK: Lineage: src_lv1.key SCRIPT [(src_10)src_10.FieldSchema(name:key, type:string, comment:null), ] -POSTHOOK: Lineage: src_lv1.value EXPRESSION [(src_10)src_10.FieldSchema(name:INPUT__FILE__NAME, type:string, comment:), ] +POSTHOOK: Lineage: src_lv1.value EXPRESSION [(src_10)src_10.FieldSchema(name:ROW__ID, type:struct, comment:), ] POSTHOOK: Lineage: src_lv2.key SCRIPT [(src_10)src_10.FieldSchema(name:key, type:string, comment:null), ] -POSTHOOK: Lineage: src_lv2.value EXPRESSION [(src_10)src_10.FieldSchema(name:INPUT__FILE__NAME, type:string, comment:), ] +POSTHOOK: Lineage: src_lv2.value EXPRESSION [(src_10)src_10.FieldSchema(name:ROW__ID, type:struct, comment:), ] PREHOOK: query: select * from src_lv1 PREHOOK: type: QUERY PREHOOK: Input: default@src_lv1 @@ -655,14 +655,14 @@ STAGE PLANS: outputColumnNames: key Statistics: Num rows: 10 Data size: 104 Basic stats: COMPLETE Column stats: NONE Lateral View Join Operator - outputColumnNames: _col0, _col4 + outputColumnNames: _col0, _col5 Statistics: Num rows: 20 Data size: 208 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: _col0 (type: string), _col4 (type: double) - outputColumnNames: _col0, _col4 + expressions: _col0 (type: string), _col5 (type: double) + outputColumnNames: _col0, _col5 Statistics: Num rows: 20 Data size: 208 Basic stats: COMPLETE Column stats: NONE Group By Operator - aggregations: sum(_col4) + aggregations: sum(_col5) keys: _col0 (type: string) mode: hash outputColumnNames: _col0, _col1 @@ -681,14 +681,14 @@ STAGE PLANS: Statistics: Num rows: 10 Data size: 104 Basic stats: COMPLETE Column stats: NONE function name: explode Lateral View Join Operator - outputColumnNames: _col0, _col4 + outputColumnNames: _col0, _col5 Statistics: Num rows: 20 Data size: 208 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: _col0 (type: string), _col4 (type: double) - outputColumnNames: _col0, _col4 + expressions: _col0 (type: string), _col5 (type: double) + outputColumnNames: _col0, _col5 Statistics: Num rows: 20 Data size: 208 Basic stats: COMPLETE Column stats: NONE Group By Operator - aggregations: sum(_col4) + aggregations: sum(_col5) keys: _col0 (type: string) mode: hash outputColumnNames: _col0, _col1 @@ -846,7 +846,7 @@ POSTHOOK: Output: default@src_lv1 POSTHOOK: Output: default@src_lv2 POSTHOOK: Output: default@src_lv3 POSTHOOK: Lineage: src_lv1.key SCRIPT [(src_10)src_10.FieldSchema(name:key, type:string, comment:null), ] -POSTHOOK: Lineage: src_lv1.value EXPRESSION [(src_10)src_10.FieldSchema(name:INPUT__FILE__NAME, type:string, comment:), ] +POSTHOOK: Lineage: src_lv1.value EXPRESSION [(src_10)src_10.FieldSchema(name:ROW__ID, type:struct, comment:), ] POSTHOOK: Lineage: src_lv2.key SIMPLE [(src_10)src_10.FieldSchema(name:key, type:string, comment:null), ] POSTHOOK: Lineage: src_lv2.value EXPRESSION [(src_10)src_10.FieldSchema(name:value, type:string, comment:null), ] POSTHOOK: Lineage: src_lv3.key SIMPLE [(src_10)src_10.FieldSchema(name:key, type:string, comment:null), ] @@ -946,15 +946,15 @@ STAGE PLANS: outputColumnNames: key Statistics: Num rows: 10 Data size: 104 Basic stats: COMPLETE Column stats: NONE Lateral View Join Operator - outputColumnNames: _col0, _col4 + outputColumnNames: _col0, _col5 Statistics: Num rows: 20 Data size: 208 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: _col4 (type: double), _col0 (type: string) - outputColumnNames: _col4, _col0 + expressions: _col5 (type: double), _col0 (type: string) + outputColumnNames: _col5, _col0 Statistics: Num rows: 20 Data size: 208 Basic stats: COMPLETE Column stats: NONE Group By Operator aggregations: sum(DISTINCT _col0) - keys: _col4 (type: double), _col0 (type: string) + keys: _col5 (type: double), _col0 (type: string) mode: hash outputColumnNames: _col0, _col1, _col2 Statistics: Num rows: 20 Data size: 208 Basic stats: COMPLETE Column stats: NONE @@ -971,15 +971,15 @@ STAGE PLANS: Statistics: Num rows: 10 Data size: 104 Basic stats: COMPLETE Column stats: NONE function name: explode Lateral View Join Operator - outputColumnNames: _col0, _col4 + outputColumnNames: _col0, _col5 Statistics: Num rows: 20 Data size: 208 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: _col4 (type: double), _col0 (type: string) - outputColumnNames: _col4, _col0 + expressions: _col5 (type: double), _col0 (type: string) + outputColumnNames: _col5, _col0 Statistics: Num rows: 20 Data size: 208 Basic stats: COMPLETE Column stats: NONE Group By Operator aggregations: sum(DISTINCT _col0) - keys: _col4 (type: double), _col0 (type: string) + keys: _col5 (type: double), _col0 (type: string) mode: hash outputColumnNames: _col0, _col1, _col2 Statistics: Num rows: 20 Data size: 208 Basic stats: COMPLETE Column stats: NONE @@ -995,15 +995,15 @@ STAGE PLANS: outputColumnNames: key Statistics: Num rows: 10 Data size: 104 Basic stats: COMPLETE Column stats: NONE Lateral View Join Operator - outputColumnNames: _col0, _col4 + outputColumnNames: _col0, _col5 Statistics: Num rows: 20 Data size: 208 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: _col4 (type: double), _col0 (type: string) - outputColumnNames: _col4, _col0 + expressions: _col5 (type: double), _col0 (type: string) + outputColumnNames: _col5, _col0 Statistics: Num rows: 20 Data size: 208 Basic stats: COMPLETE Column stats: NONE Group By Operator aggregations: sum(DISTINCT _col0) - keys: _col4 (type: double), _col0 (type: string) + keys: _col5 (type: double), _col0 (type: string) mode: hash outputColumnNames: _col0, _col1, _col2 Statistics: Num rows: 20 Data size: 208 Basic stats: COMPLETE Column stats: NONE @@ -1021,15 +1021,15 @@ STAGE PLANS: Statistics: Num rows: 10 Data size: 104 Basic stats: COMPLETE Column stats: NONE function name: explode Lateral View Join Operator - outputColumnNames: _col0, _col4 + outputColumnNames: _col0, _col5 Statistics: Num rows: 20 Data size: 208 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: _col4 (type: double), _col0 (type: string) - outputColumnNames: _col4, _col0 + expressions: _col5 (type: double), _col0 (type: string) + outputColumnNames: _col5, _col0 Statistics: Num rows: 20 Data size: 208 Basic stats: COMPLETE Column stats: NONE Group By Operator aggregations: sum(DISTINCT _col0) - keys: _col4 (type: double), _col0 (type: string) + keys: _col5 (type: double), _col0 (type: string) mode: hash outputColumnNames: _col0, _col1, _col2 Statistics: Num rows: 20 Data size: 208 Basic stats: COMPLETE Column stats: NONE @@ -1190,9 +1190,9 @@ POSTHOOK: Input: default@src_10 POSTHOOK: Output: default@src_lv1 POSTHOOK: Output: default@src_lv2 POSTHOOK: Output: default@src_lv3 -POSTHOOK: Lineage: src_lv1.key SIMPLE [(src_10)src_10.FieldSchema(name:INPUT__FILE__NAME, type:string, comment:), ] +POSTHOOK: Lineage: src_lv1.key SIMPLE [(src_10)src_10.FieldSchema(name:ROW__ID, type:struct, comment:), ] POSTHOOK: Lineage: src_lv1.value SCRIPT [(src_10)src_10.FieldSchema(name:key, type:string, comment:null), ] -POSTHOOK: Lineage: src_lv2.key SIMPLE [(src_10)src_10.FieldSchema(name:INPUT__FILE__NAME, type:string, comment:), ] +POSTHOOK: Lineage: src_lv2.key SIMPLE [(src_10)src_10.FieldSchema(name:ROW__ID, type:struct, comment:), ] POSTHOOK: Lineage: src_lv2.value SCRIPT [(src_10)src_10.FieldSchema(name:key, type:string, comment:null), ] POSTHOOK: Lineage: src_lv3.key SIMPLE [(src_10)src_10.FieldSchema(name:value, type:string, comment:null), ] POSTHOOK: Lineage: src_lv3.value EXPRESSION [(src_10)src_10.FieldSchema(name:key, type:string, comment:null), ] @@ -1321,15 +1321,15 @@ STAGE PLANS: outputColumnNames: key Statistics: Num rows: 10 Data size: 104 Basic stats: COMPLETE Column stats: NONE Lateral View Join Operator - outputColumnNames: _col0, _col4 + outputColumnNames: _col0, _col5 Statistics: Num rows: 20 Data size: 208 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: _col0 (type: string), _col4 (type: double) - outputColumnNames: _col0, _col4 + expressions: _col0 (type: string), _col5 (type: double) + outputColumnNames: _col0, _col5 Statistics: Num rows: 20 Data size: 208 Basic stats: COMPLETE Column stats: NONE Group By Operator - aggregations: sum(DISTINCT _col4) - keys: _col0 (type: string), _col4 (type: double) + aggregations: sum(DISTINCT _col5) + keys: _col0 (type: string), _col5 (type: double) mode: hash outputColumnNames: _col0, _col1, _col2 Statistics: Num rows: 20 Data size: 208 Basic stats: COMPLETE Column stats: NONE @@ -1346,15 +1346,15 @@ STAGE PLANS: Statistics: Num rows: 10 Data size: 104 Basic stats: COMPLETE Column stats: NONE function name: explode Lateral View Join Operator - outputColumnNames: _col0, _col4 + outputColumnNames: _col0, _col5 Statistics: Num rows: 20 Data size: 208 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: _col0 (type: string), _col4 (type: double) - outputColumnNames: _col0, _col4 + expressions: _col0 (type: string), _col5 (type: double) + outputColumnNames: _col0, _col5 Statistics: Num rows: 20 Data size: 208 Basic stats: COMPLETE Column stats: NONE Group By Operator - aggregations: sum(DISTINCT _col4) - keys: _col0 (type: string), _col4 (type: double) + aggregations: sum(DISTINCT _col5) + keys: _col0 (type: string), _col5 (type: double) mode: hash outputColumnNames: _col0, _col1, _col2 Statistics: Num rows: 20 Data size: 208 Basic stats: COMPLETE Column stats: NONE @@ -1370,15 +1370,15 @@ STAGE PLANS: outputColumnNames: key Statistics: Num rows: 10 Data size: 104 Basic stats: COMPLETE Column stats: NONE Lateral View Join Operator - outputColumnNames: _col0, _col4 + outputColumnNames: _col0, _col5 Statistics: Num rows: 20 Data size: 208 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: _col0 (type: string), _col4 (type: double) - outputColumnNames: _col0, _col4 + expressions: _col0 (type: string), _col5 (type: double) + outputColumnNames: _col0, _col5 Statistics: Num rows: 20 Data size: 208 Basic stats: COMPLETE Column stats: NONE Group By Operator - aggregations: sum(DISTINCT _col4) - keys: _col0 (type: string), _col4 (type: double) + aggregations: sum(DISTINCT _col5) + keys: _col0 (type: string), _col5 (type: double) mode: hash outputColumnNames: _col0, _col1, _col2 Statistics: Num rows: 20 Data size: 208 Basic stats: COMPLETE Column stats: NONE @@ -1396,15 +1396,15 @@ STAGE PLANS: Statistics: Num rows: 10 Data size: 104 Basic stats: COMPLETE Column stats: NONE function name: explode Lateral View Join Operator - outputColumnNames: _col0, _col4 + outputColumnNames: _col0, _col5 Statistics: Num rows: 20 Data size: 208 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: _col0 (type: string), _col4 (type: double) - outputColumnNames: _col0, _col4 + expressions: _col0 (type: string), _col5 (type: double) + outputColumnNames: _col0, _col5 Statistics: Num rows: 20 Data size: 208 Basic stats: COMPLETE Column stats: NONE Group By Operator - aggregations: sum(DISTINCT _col4) - keys: _col0 (type: string), _col4 (type: double) + aggregations: sum(DISTINCT _col5) + keys: _col0 (type: string), _col5 (type: double) mode: hash outputColumnNames: _col0, _col1, _col2 Statistics: Num rows: 20 Data size: 208 Basic stats: COMPLETE Column stats: NONE @@ -1606,9 +1606,9 @@ POSTHOOK: Output: default@src_lv2 POSTHOOK: Output: default@src_lv3 POSTHOOK: Output: default@src_lv4 POSTHOOK: Lineage: src_lv1.key SCRIPT [(src_10)src_10.FieldSchema(name:key, type:string, comment:null), ] -POSTHOOK: Lineage: src_lv1.value EXPRESSION [(src_10)src_10.FieldSchema(name:INPUT__FILE__NAME, type:string, comment:), ] +POSTHOOK: Lineage: src_lv1.value EXPRESSION [(src_10)src_10.FieldSchema(name:ROW__ID, type:struct, comment:), ] POSTHOOK: Lineage: src_lv2.key SCRIPT [(src_10)src_10.FieldSchema(name:key, type:string, comment:null), ] -POSTHOOK: Lineage: src_lv2.value EXPRESSION [(src_10)src_10.FieldSchema(name:INPUT__FILE__NAME, type:string, comment:), ] +POSTHOOK: Lineage: src_lv2.value EXPRESSION [(src_10)src_10.FieldSchema(name:ROW__ID, type:struct, comment:), ] POSTHOOK: Lineage: src_lv3.key SIMPLE [(src_10)src_10.FieldSchema(name:value, type:string, comment:null), ] POSTHOOK: Lineage: src_lv3.value EXPRESSION [(src_10)src_10.null, ] POSTHOOK: Lineage: src_lv4.key SIMPLE [(src_10)src_10.FieldSchema(name:value, type:string, comment:null), ] diff --git a/ql/src/test/results/clientpositive/multi_join_union.q.out b/ql/src/test/results/clientpositive/multi_join_union.q.out index 62e955e..3e52390 100644 --- a/ql/src/test/results/clientpositive/multi_join_union.q.out +++ b/ql/src/test/results/clientpositive/multi_join_union.q.out @@ -88,10 +88,10 @@ STAGE PLANS: Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE HashTable Sink Operator condition expressions: - 0 {_col0} {_col1} {_col4} {_col5} + 0 {_col0} {_col1} {_col5} {_col6} 1 {_col0} keys: - 0 _col5 (type: string) + 0 _col6 (type: string) 1 _col1 (type: string) c-subquery2:a-subquery2:src14 TableScan @@ -112,10 +112,10 @@ STAGE PLANS: Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE HashTable Sink Operator condition expressions: - 0 {_col0} {_col1} {_col4} {_col5} + 0 {_col0} {_col1} {_col5} {_col6} 1 {_col0} keys: - 0 _col5 (type: string) + 0 _col6 (type: string) 1 _col1 (type: string) Stage: Stage-6 @@ -136,21 +136,21 @@ STAGE PLANS: keys: 0 key (type: string) 1 key (type: string) - outputColumnNames: _col0, _col1, _col4, _col5 + outputColumnNames: _col0, _col1, _col5, _col6 Statistics: Num rows: 275 Data size: 2921 Basic stats: COMPLETE Column stats: NONE Map Join Operator condition map: Inner Join 0 to 1 condition expressions: - 0 {_col0} {_col1} {_col4} {_col5} + 0 {_col0} {_col1} {_col5} {_col6} 1 {_col0} {_col1} keys: - 0 _col5 (type: string) + 0 _col6 (type: string) 1 _col1 (type: string) - outputColumnNames: _col0, _col1, _col4, _col5, _col8, _col9 + outputColumnNames: _col0, _col1, _col5, _col6, _col10, _col11 Statistics: Num rows: 550 Data size: 5843 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: _col0 (type: string), _col1 (type: string), _col4 (type: string), _col5 (type: string), _col8 (type: string), _col9 (type: string) + expressions: _col0 (type: string), _col1 (type: string), _col5 (type: string), _col6 (type: string), _col10 (type: string), _col11 (type: string) outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5 Statistics: Num rows: 550 Data size: 5843 Basic stats: COMPLETE Column stats: NONE File Output Operator diff --git a/ql/src/test/results/clientpositive/no_hooks.q.out b/ql/src/test/results/clientpositive/no_hooks.q.out index 5c3abd5..16c74b5 100644 --- a/ql/src/test/results/clientpositive/no_hooks.q.out +++ b/ql/src/test/results/clientpositive/no_hooks.q.out @@ -38,10 +38,10 @@ STAGE PLANS: condition expressions: 0 {VALUE._col0} {VALUE._col1} 1 {VALUE._col0} {VALUE._col1} - outputColumnNames: _col0, _col1, _col4, _col5 + outputColumnNames: _col0, _col1, _col5, _col6 Statistics: Num rows: 9 Data size: 1983 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: _col0 (type: string), _col1 (type: string), _col4 (type: string), _col5 (type: string) + expressions: _col0 (type: string), _col1 (type: string), _col5 (type: string), _col6 (type: string) outputColumnNames: _col0, _col1, _col2, _col3 Statistics: Num rows: 9 Data size: 1983 Basic stats: COMPLETE Column stats: NONE File Output Operator diff --git a/ql/src/test/results/clientpositive/nonmr_fetch.q.out b/ql/src/test/results/clientpositive/nonmr_fetch.q.out index 27bd3d4..2d7496b 100644 --- a/ql/src/test/results/clientpositive/nonmr_fetch.q.out +++ b/ql/src/test/results/clientpositive/nonmr_fetch.q.out @@ -1121,10 +1121,10 @@ STAGE PLANS: condition expressions: 0 {KEY.reducesinkkey0} {VALUE._col0} 1 {KEY.reducesinkkey0} {VALUE._col0} - outputColumnNames: _col0, _col1, _col4, _col5 + outputColumnNames: _col0, _col1, _col5, _col6 Statistics: Num rows: 16 Data size: 3306 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: _col0 (type: string), _col1 (type: string), _col4 (type: string), _col5 (type: string) + expressions: _col0 (type: string), _col1 (type: string), _col5 (type: string), _col6 (type: string) outputColumnNames: _col0, _col1, _col2, _col3 Statistics: Num rows: 16 Data size: 3306 Basic stats: COMPLETE Column stats: NONE File Output Operator diff --git a/ql/src/test/results/clientpositive/optional_outer.q.out b/ql/src/test/results/clientpositive/optional_outer.q.out index c0daa31..d87a126 100644 --- a/ql/src/test/results/clientpositive/optional_outer.q.out +++ b/ql/src/test/results/clientpositive/optional_outer.q.out @@ -35,10 +35,10 @@ STAGE PLANS: condition expressions: 0 {KEY.reducesinkkey0} {VALUE._col0} 1 {KEY.reducesinkkey0} {VALUE._col0} - outputColumnNames: _col0, _col1, _col4, _col5 + outputColumnNames: _col0, _col1, _col5, _col6 Statistics: Num rows: 31 Data size: 6393 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: _col0 (type: string), _col1 (type: string), _col4 (type: string), _col5 (type: string) + expressions: _col0 (type: string), _col1 (type: string), _col5 (type: string), _col6 (type: string) outputColumnNames: _col0, _col1, _col2, _col3 Statistics: Num rows: 31 Data size: 6393 Basic stats: COMPLETE Column stats: NONE File Output Operator @@ -92,10 +92,10 @@ STAGE PLANS: condition expressions: 0 {KEY.reducesinkkey0} {VALUE._col0} 1 {KEY.reducesinkkey0} {VALUE._col0} - outputColumnNames: _col0, _col1, _col4, _col5 + outputColumnNames: _col0, _col1, _col5, _col6 Statistics: Num rows: 31 Data size: 6393 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: _col0 (type: string), _col1 (type: string), _col4 (type: string), _col5 (type: string) + expressions: _col0 (type: string), _col1 (type: string), _col5 (type: string), _col6 (type: string) outputColumnNames: _col0, _col1, _col2, _col3 Statistics: Num rows: 31 Data size: 6393 Basic stats: COMPLETE Column stats: NONE File Output Operator @@ -149,10 +149,10 @@ STAGE PLANS: condition expressions: 0 {KEY.reducesinkkey0} {VALUE._col0} 1 {KEY.reducesinkkey0} {VALUE._col0} - outputColumnNames: _col0, _col1, _col4, _col5 + outputColumnNames: _col0, _col1, _col5, _col6 Statistics: Num rows: 31 Data size: 6393 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: _col0 (type: string), _col1 (type: string), _col4 (type: string), _col5 (type: string) + expressions: _col0 (type: string), _col1 (type: string), _col5 (type: string), _col6 (type: string) outputColumnNames: _col0, _col1, _col2, _col3 Statistics: Num rows: 31 Data size: 6393 Basic stats: COMPLETE Column stats: NONE File Output Operator @@ -206,10 +206,10 @@ STAGE PLANS: condition expressions: 0 {KEY.reducesinkkey0} {VALUE._col0} 1 {KEY.reducesinkkey0} {VALUE._col0} - outputColumnNames: _col0, _col1, _col4, _col5 + outputColumnNames: _col0, _col1, _col5, _col6 Statistics: Num rows: 31 Data size: 6393 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: _col0 (type: string), _col1 (type: string), _col4 (type: string), _col5 (type: string) + expressions: _col0 (type: string), _col1 (type: string), _col5 (type: string), _col6 (type: string) outputColumnNames: _col0, _col1, _col2, _col3 Statistics: Num rows: 31 Data size: 6393 Basic stats: COMPLETE Column stats: NONE File Output Operator @@ -263,10 +263,10 @@ STAGE PLANS: condition expressions: 0 {KEY.reducesinkkey0} {VALUE._col0} 1 {KEY.reducesinkkey0} {VALUE._col0} - outputColumnNames: _col0, _col1, _col4, _col5 + outputColumnNames: _col0, _col1, _col5, _col6 Statistics: Num rows: 31 Data size: 6393 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: _col0 (type: string), _col1 (type: string), _col4 (type: string), _col5 (type: string) + expressions: _col0 (type: string), _col1 (type: string), _col5 (type: string), _col6 (type: string) outputColumnNames: _col0, _col1, _col2, _col3 Statistics: Num rows: 31 Data size: 6393 Basic stats: COMPLETE Column stats: NONE File Output Operator @@ -320,10 +320,10 @@ STAGE PLANS: condition expressions: 0 {KEY.reducesinkkey0} {VALUE._col0} 1 {KEY.reducesinkkey0} {VALUE._col0} - outputColumnNames: _col0, _col1, _col4, _col5 + outputColumnNames: _col0, _col1, _col5, _col6 Statistics: Num rows: 31 Data size: 6393 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: _col0 (type: string), _col1 (type: string), _col4 (type: string), _col5 (type: string) + expressions: _col0 (type: string), _col1 (type: string), _col5 (type: string), _col6 (type: string) outputColumnNames: _col0, _col1, _col2, _col3 Statistics: Num rows: 31 Data size: 6393 Basic stats: COMPLETE Column stats: NONE File Output Operator diff --git a/ql/src/test/results/clientpositive/orc_ppd_decimal.q.out b/ql/src/test/results/clientpositive/orc_ppd_decimal.q.out index 0c11ea8..6ddff10 100644 --- a/ql/src/test/results/clientpositive/orc_ppd_decimal.q.out +++ b/ql/src/test/results/clientpositive/orc_ppd_decimal.q.out @@ -254,6 +254,42 @@ POSTHOOK: type: QUERY POSTHOOK: Input: default@newtypesorc #### A masked pattern was here #### 81475875500 +PREHOOK: query: select sum(hash(*)) from newtypesorc where d<=cast('11.22' as decimal) +PREHOOK: type: QUERY +PREHOOK: Input: default@newtypesorc +#### A masked pattern was here #### +POSTHOOK: query: select sum(hash(*)) from newtypesorc where d<=cast('11.22' as decimal) +POSTHOOK: type: QUERY +POSTHOOK: Input: default@newtypesorc +#### A masked pattern was here #### +-252951929000 +PREHOOK: query: select sum(hash(*)) from newtypesorc where d<=cast('11.22' as decimal) +PREHOOK: type: QUERY +PREHOOK: Input: default@newtypesorc +#### A masked pattern was here #### +POSTHOOK: query: select sum(hash(*)) from newtypesorc where d<=cast('11.22' as decimal) +POSTHOOK: type: QUERY +POSTHOOK: Input: default@newtypesorc +#### A masked pattern was here #### +-252951929000 +PREHOOK: query: select sum(hash(*)) from newtypesorc where d<=11.22BD +PREHOOK: type: QUERY +PREHOOK: Input: default@newtypesorc +#### A masked pattern was here #### +POSTHOOK: query: select sum(hash(*)) from newtypesorc where d<=11.22BD +POSTHOOK: type: QUERY +POSTHOOK: Input: default@newtypesorc +#### A masked pattern was here #### +81475875500 +PREHOOK: query: select sum(hash(*)) from newtypesorc where d<=11.22BD +PREHOOK: type: QUERY +PREHOOK: Input: default@newtypesorc +#### A masked pattern was here #### +POSTHOOK: query: select sum(hash(*)) from newtypesorc where d<=11.22BD +POSTHOOK: type: QUERY +POSTHOOK: Input: default@newtypesorc +#### A masked pattern was here #### +81475875500 PREHOOK: query: select sum(hash(*)) from newtypesorc where d<=12 PREHOOK: type: QUERY PREHOOK: Input: default@newtypesorc diff --git a/ql/src/test/results/clientpositive/orc_predicate_pushdown.q.out b/ql/src/test/results/clientpositive/orc_predicate_pushdown.q.out index f25b442..bc8d242 100644 --- a/ql/src/test/results/clientpositive/orc_predicate_pushdown.q.out +++ b/ql/src/test/results/clientpositive/orc_predicate_pushdown.q.out @@ -259,7 +259,7 @@ POSTHOOK: query: SELECT * FROM orc_pred WHERE t>2 limit 1 POSTHOOK: type: QUERY POSTHOOK: Input: default@orc_pred #### A masked pattern was here #### -124 336 65664 4294967435 74.72 42.47 true bob davidson 2013-03-01 09:11:58.703302 45.4 yard duty +124 336 65664 4294967435 74.72 42.47 true bob davidson 2013-03-01 09:11:58.703302 45.40 yard duty PREHOOK: query: SELECT * FROM orc_pred WHERE t>2 limit 1 PREHOOK: type: QUERY PREHOOK: Input: default@orc_pred @@ -268,7 +268,7 @@ POSTHOOK: query: SELECT * FROM orc_pred WHERE t>2 limit 1 POSTHOOK: type: QUERY POSTHOOK: Input: default@orc_pred #### A masked pattern was here #### -124 336 65664 4294967435 74.72 42.47 true bob davidson 2013-03-01 09:11:58.703302 45.4 yard duty +124 336 65664 4294967435 74.72 42.47 true bob davidson 2013-03-01 09:11:58.703302 45.40 yard duty PREHOOK: query: SELECT SUM(HASH(t)) FROM orc_pred WHERE t IS NOT NULL AND t < 0 diff --git a/ql/src/test/results/clientpositive/outer_join_ppr.q.out b/ql/src/test/results/clientpositive/outer_join_ppr.q.out index 1951b0f..925d806 100644 --- a/ql/src/test/results/clientpositive/outer_join_ppr.q.out +++ b/ql/src/test/results/clientpositive/outer_join_ppr.q.out @@ -382,14 +382,14 @@ STAGE PLANS: filter predicates: 0 1 {(VALUE._col1 = '2008-04-08')} - outputColumnNames: _col0, _col1, _col4, _col5 + outputColumnNames: _col0, _col1, _col5, _col6 Statistics: Num rows: 127 Data size: 25572 Basic stats: COMPLETE Column stats: NONE Filter Operator isSamplingPred: false - predicate: ((((_col4 > 15) and (_col4 < 25)) and (_col0 > 10)) and (_col0 < 20)) (type: boolean) + predicate: ((((_col5 > 15) and (_col5 < 25)) and (_col0 > 10)) and (_col0 < 20)) (type: boolean) Statistics: Num rows: 1 Data size: 201 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: _col0 (type: string), _col1 (type: string), _col4 (type: string), _col5 (type: string) + expressions: _col0 (type: string), _col1 (type: string), _col5 (type: string), _col6 (type: string) outputColumnNames: _col0, _col1, _col2, _col3 Statistics: Num rows: 1 Data size: 201 Basic stats: COMPLETE Column stats: NONE File Output Operator @@ -841,14 +841,14 @@ STAGE PLANS: condition expressions: 0 {KEY.reducesinkkey0} {VALUE._col0} 1 {KEY.reducesinkkey0} {VALUE._col0} {VALUE._col1} - outputColumnNames: _col0, _col1, _col4, _col5, _col6 + outputColumnNames: _col0, _col1, _col5, _col6, _col7 Statistics: Num rows: 127 Data size: 25572 Basic stats: COMPLETE Column stats: NONE Filter Operator isSamplingPred: false - predicate: (((((_col4 > 15) and (_col4 < 25)) and (_col6 = '2008-04-08')) and (_col0 > 10)) and (_col0 < 20)) (type: boolean) + predicate: (((((_col5 > 15) and (_col5 < 25)) and (_col7 = '2008-04-08')) and (_col0 > 10)) and (_col0 < 20)) (type: boolean) Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE Select Operator - expressions: _col0 (type: string), _col1 (type: string), _col4 (type: string), _col5 (type: string) + expressions: _col0 (type: string), _col1 (type: string), _col5 (type: string), _col6 (type: string) outputColumnNames: _col0, _col1, _col2, _col3 Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE File Output Operator diff --git a/ql/src/test/results/clientpositive/parquet_decimal.q.out b/ql/src/test/results/clientpositive/parquet_decimal.q.out index cd87b92..5767c57 100644 --- a/ql/src/test/results/clientpositive/parquet_decimal.q.out +++ b/ql/src/test/results/clientpositive/parquet_decimal.q.out @@ -63,9 +63,9 @@ Mary 4.33 Cluck 5.96 Tom -12.25 Mary 33.33 -Tom 19 -Beck 0 -Beck 79.9 +Tom 19.00 +Beck 0.00 +Beck 79.90 PREHOOK: query: TRUNCATE TABLE parq_dec PREHOOK: type: TRUNCATETABLE PREHOOK: Output: default@parq_dec @@ -140,12 +140,12 @@ POSTHOOK: Input: default@parq_dec1 77.3 55.7 4.3 -6 +6.0 12.3 33.3 0.2 3.2 -8 +8.0 PREHOOK: query: DROP TABLE dec PREHOOK: type: DROPTABLE PREHOOK: Input: default@dec diff --git a/ql/src/test/results/clientpositive/parquet_decimal1.q.out b/ql/src/test/results/clientpositive/parquet_decimal1.q.out index bd146f8..0f71b1e 100644 --- a/ql/src/test/results/clientpositive/parquet_decimal1.q.out +++ b/ql/src/test/results/clientpositive/parquet_decimal1.q.out @@ -28,7 +28,7 @@ POSTHOOK: query: SELECT * FROM dec_comp POSTHOOK: type: QUERY POSTHOOK: Input: default@dec_comp #### A masked pattern was here #### -[3.14,6.28,7.3] {"k1":92.77,"k2":29.39} {"i":5,"d":9.03} +[3.14,6.28,7.30] {"k1":92.77,"k2":29.39} {"i":5,"d":9.03} [12.4,1.33,0.34] {"k2":2.79,"k4":29.09} {"i":11,"d":0.03} PREHOOK: query: DROP TABLE IF EXISTS parq_dec_comp PREHOOK: type: DROPTABLE @@ -72,8 +72,8 @@ POSTHOOK: query: SELECT * FROM parq_dec_comp POSTHOOK: type: QUERY POSTHOOK: Input: default@parq_dec_comp #### A masked pattern was here #### -[3.14,6.28,7.3] {"k2":29.39,"k1":92.77} {"i":5,"d":9.03} -[12.4,1.33,0.34] {"k4":29.09,"k2":2.79} {"i":11,"d":0.03} +[3.14,6.28,7.30] {"k2":29.39,"k1":92.77} {"i":5,"d":9.03} +[12.40,1.33,0.34] {"k4":29.09,"k2":2.79} {"i":11,"d":0.03} PREHOOK: query: DROP TABLE dec_comp PREHOOK: type: DROPTABLE PREHOOK: Input: default@dec_comp diff --git a/ql/src/test/results/clientpositive/parquet_join.q.out b/ql/src/test/results/clientpositive/parquet_join.q.out new file mode 100644 index 0000000..f3caae7 --- /dev/null +++ b/ql/src/test/results/clientpositive/parquet_join.q.out @@ -0,0 +1,323 @@ +PREHOOK: query: drop table if exists staging +PREHOOK: type: DROPTABLE +POSTHOOK: query: drop table if exists staging +POSTHOOK: type: DROPTABLE +PREHOOK: query: drop table if exists parquet_jointable1 +PREHOOK: type: DROPTABLE +POSTHOOK: query: drop table if exists parquet_jointable1 +POSTHOOK: type: DROPTABLE +PREHOOK: query: drop table if exists parquet_jointable2 +PREHOOK: type: DROPTABLE +POSTHOOK: query: drop table if exists parquet_jointable2 +POSTHOOK: type: DROPTABLE +PREHOOK: query: drop table if exists parquet_jointable1_bucketed_sorted +PREHOOK: type: DROPTABLE +POSTHOOK: query: drop table if exists parquet_jointable1_bucketed_sorted +POSTHOOK: type: DROPTABLE +PREHOOK: query: drop table if exists parquet_jointable2_bucketed_sorted +PREHOOK: type: DROPTABLE +POSTHOOK: query: drop table if exists parquet_jointable2_bucketed_sorted +POSTHOOK: type: DROPTABLE +PREHOOK: query: create table staging (key int, value string) stored as textfile +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +PREHOOK: Output: default@staging +POSTHOOK: query: create table staging (key int, value string) stored as textfile +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@staging +PREHOOK: query: insert into table staging select distinct key, value from src order by key limit 2 +PREHOOK: type: QUERY +PREHOOK: Input: default@src +PREHOOK: Output: default@staging +POSTHOOK: query: insert into table staging select distinct key, value from src order by key limit 2 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@src +POSTHOOK: Output: default@staging +POSTHOOK: Lineage: staging.key EXPRESSION [(src)src.FieldSchema(name:key, type:string, comment:default), ] +POSTHOOK: Lineage: staging.value SIMPLE [(src)src.FieldSchema(name:value, type:string, comment:default), ] +PREHOOK: query: create table parquet_jointable1 stored as parquet as select * from staging +PREHOOK: type: CREATETABLE_AS_SELECT +PREHOOK: Input: default@staging +POSTHOOK: query: create table parquet_jointable1 stored as parquet as select * from staging +POSTHOOK: type: CREATETABLE_AS_SELECT +POSTHOOK: Input: default@staging +POSTHOOK: Output: default@parquet_jointable1 +PREHOOK: query: create table parquet_jointable2 stored as parquet as select key,key+1,concat(value,"value") as myvalue from staging +PREHOOK: type: CREATETABLE_AS_SELECT +PREHOOK: Input: default@staging +POSTHOOK: query: create table parquet_jointable2 stored as parquet as select key,key+1,concat(value,"value") as myvalue from staging +POSTHOOK: type: CREATETABLE_AS_SELECT +POSTHOOK: Input: default@staging +POSTHOOK: Output: default@parquet_jointable2 +PREHOOK: query: -- MR join + +explain select p2.myvalue from parquet_jointable1 p1 join parquet_jointable2 p2 on p1.key=p2.key +PREHOOK: type: QUERY +POSTHOOK: query: -- MR join + +explain select p2.myvalue from parquet_jointable1 p1 join parquet_jointable2 p2 on p1.key=p2.key +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-0 depends on stages: Stage-1 + +STAGE PLANS: + Stage: Stage-1 + Map Reduce + Map Operator Tree: + TableScan + alias: p2 + Statistics: Num rows: 2 Data size: 6 Basic stats: COMPLETE Column stats: NONE + Filter Operator + predicate: key is not null (type: boolean) + Statistics: Num rows: 1 Data size: 3 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + key expressions: key (type: int) + sort order: + + Map-reduce partition columns: key (type: int) + Statistics: Num rows: 1 Data size: 3 Basic stats: COMPLETE Column stats: NONE + value expressions: myvalue (type: string) + TableScan + alias: p1 + Statistics: Num rows: 2 Data size: 4 Basic stats: COMPLETE Column stats: NONE + Filter Operator + predicate: key is not null (type: boolean) + Statistics: Num rows: 1 Data size: 2 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + key expressions: key (type: int) + sort order: + + Map-reduce partition columns: key (type: int) + Statistics: Num rows: 1 Data size: 2 Basic stats: COMPLETE Column stats: NONE + Reduce Operator Tree: + Join Operator + condition map: + Inner Join 0 to 1 + condition expressions: + 0 + 1 {VALUE._col1} + outputColumnNames: _col7 + Statistics: Num rows: 1 Data size: 2 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: _col7 (type: string) + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 2 Basic stats: COMPLETE Column stats: NONE + File Output Operator + compressed: false + Statistics: Num rows: 1 Data size: 2 Basic stats: COMPLETE Column stats: NONE + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + + Stage: Stage-0 + Fetch Operator + limit: -1 + Processor Tree: + ListSink + +PREHOOK: query: select p2.myvalue from parquet_jointable1 p1 join parquet_jointable2 p2 on p1.key=p2.key +PREHOOK: type: QUERY +PREHOOK: Input: default@parquet_jointable1 +PREHOOK: Input: default@parquet_jointable2 +#### A masked pattern was here #### +POSTHOOK: query: select p2.myvalue from parquet_jointable1 p1 join parquet_jointable2 p2 on p1.key=p2.key +POSTHOOK: type: QUERY +POSTHOOK: Input: default@parquet_jointable1 +POSTHOOK: Input: default@parquet_jointable2 +#### A masked pattern was here #### +val_0value +val_10value +PREHOOK: query: -- The two tables involved in the join have differing number of columns(table1-2,table2-3). In case of Map and SMB join, +-- when the second table is loaded, the column indices in hive.io.file.readcolumn.ids refer to columns of both the first and the second table +-- and hence the parquet schema/types passed to ParquetInputSplit should contain only the column indexes belonging to second/current table + +-- Map join + +explain select p2.myvalue from parquet_jointable1 p1 join parquet_jointable2 p2 on p1.key=p2.key +PREHOOK: type: QUERY +POSTHOOK: query: -- The two tables involved in the join have differing number of columns(table1-2,table2-3). In case of Map and SMB join, +-- when the second table is loaded, the column indices in hive.io.file.readcolumn.ids refer to columns of both the first and the second table +-- and hence the parquet schema/types passed to ParquetInputSplit should contain only the column indexes belonging to second/current table + +-- Map join + +explain select p2.myvalue from parquet_jointable1 p1 join parquet_jointable2 p2 on p1.key=p2.key +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-4 is a root stage + Stage-3 depends on stages: Stage-4 + Stage-0 depends on stages: Stage-3 + +STAGE PLANS: + Stage: Stage-4 + Map Reduce Local Work + Alias -> Map Local Tables: + p1 + Fetch Operator + limit: -1 + Alias -> Map Local Operator Tree: + p1 + TableScan + alias: p1 + Statistics: Num rows: 2 Data size: 4 Basic stats: COMPLETE Column stats: NONE + Filter Operator + predicate: key is not null (type: boolean) + Statistics: Num rows: 1 Data size: 2 Basic stats: COMPLETE Column stats: NONE + HashTable Sink Operator + condition expressions: + 0 + 1 {myvalue} + keys: + 0 key (type: int) + 1 key (type: int) + + Stage: Stage-3 + Map Reduce + Map Operator Tree: + TableScan + alias: p2 + Statistics: Num rows: 2 Data size: 6 Basic stats: COMPLETE Column stats: NONE + Filter Operator + predicate: key is not null (type: boolean) + Statistics: Num rows: 1 Data size: 3 Basic stats: COMPLETE Column stats: NONE + Map Join Operator + condition map: + Inner Join 0 to 1 + condition expressions: + 0 + 1 {myvalue} + keys: + 0 key (type: int) + 1 key (type: int) + outputColumnNames: _col7 + Statistics: Num rows: 1 Data size: 2 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: _col7 (type: string) + outputColumnNames: _col0 + Statistics: Num rows: 1 Data size: 2 Basic stats: COMPLETE Column stats: NONE + File Output Operator + compressed: false + Statistics: Num rows: 1 Data size: 2 Basic stats: COMPLETE Column stats: NONE + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + Local Work: + Map Reduce Local Work + + Stage: Stage-0 + Fetch Operator + limit: -1 + Processor Tree: + ListSink + +PREHOOK: query: select p2.myvalue from parquet_jointable1 p1 join parquet_jointable2 p2 on p1.key=p2.key +PREHOOK: type: QUERY +PREHOOK: Input: default@parquet_jointable1 +PREHOOK: Input: default@parquet_jointable2 +#### A masked pattern was here #### +POSTHOOK: query: select p2.myvalue from parquet_jointable1 p1 join parquet_jointable2 p2 on p1.key=p2.key +POSTHOOK: type: QUERY +POSTHOOK: Input: default@parquet_jointable1 +POSTHOOK: Input: default@parquet_jointable2 +#### A masked pattern was here #### +val_0value +val_10value +PREHOOK: query: -- SMB join + +create table parquet_jointable1_bucketed_sorted (key int,value string) clustered by (key) sorted by (key ASC) INTO 1 BUCKETS stored as parquet +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +PREHOOK: Output: default@parquet_jointable1_bucketed_sorted +POSTHOOK: query: -- SMB join + +create table parquet_jointable1_bucketed_sorted (key int,value string) clustered by (key) sorted by (key ASC) INTO 1 BUCKETS stored as parquet +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@parquet_jointable1_bucketed_sorted +PREHOOK: query: insert overwrite table parquet_jointable1_bucketed_sorted select key,concat(value,"value1") as value from staging cluster by key +PREHOOK: type: QUERY +PREHOOK: Input: default@staging +PREHOOK: Output: default@parquet_jointable1_bucketed_sorted +POSTHOOK: query: insert overwrite table parquet_jointable1_bucketed_sorted select key,concat(value,"value1") as value from staging cluster by key +POSTHOOK: type: QUERY +POSTHOOK: Input: default@staging +POSTHOOK: Output: default@parquet_jointable1_bucketed_sorted +POSTHOOK: Lineage: parquet_jointable1_bucketed_sorted.key SIMPLE [(staging)staging.FieldSchema(name:key, type:int, comment:null), ] +POSTHOOK: Lineage: parquet_jointable1_bucketed_sorted.value EXPRESSION [(staging)staging.FieldSchema(name:value, type:string, comment:null), ] +PREHOOK: query: create table parquet_jointable2_bucketed_sorted (key int,value1 string, value2 string) clustered by (key) sorted by (key ASC) INTO 1 BUCKETS stored as parquet +PREHOOK: type: CREATETABLE +PREHOOK: Output: database:default +PREHOOK: Output: default@parquet_jointable2_bucketed_sorted +POSTHOOK: query: create table parquet_jointable2_bucketed_sorted (key int,value1 string, value2 string) clustered by (key) sorted by (key ASC) INTO 1 BUCKETS stored as parquet +POSTHOOK: type: CREATETABLE +POSTHOOK: Output: database:default +POSTHOOK: Output: default@parquet_jointable2_bucketed_sorted +PREHOOK: query: insert overwrite table parquet_jointable2_bucketed_sorted select key,concat(value,"value2-1") as value1,concat(value,"value2-2") as value2 from staging cluster by key +PREHOOK: type: QUERY +PREHOOK: Input: default@staging +PREHOOK: Output: default@parquet_jointable2_bucketed_sorted +POSTHOOK: query: insert overwrite table parquet_jointable2_bucketed_sorted select key,concat(value,"value2-1") as value1,concat(value,"value2-2") as value2 from staging cluster by key +POSTHOOK: type: QUERY +POSTHOOK: Input: default@staging +POSTHOOK: Output: default@parquet_jointable2_bucketed_sorted +POSTHOOK: Lineage: parquet_jointable2_bucketed_sorted.key SIMPLE [(staging)staging.FieldSchema(name:key, type:int, comment:null), ] +POSTHOOK: Lineage: parquet_jointable2_bucketed_sorted.value1 EXPRESSION [(staging)staging.FieldSchema(name:value, type:string, comment:null), ] +POSTHOOK: Lineage: parquet_jointable2_bucketed_sorted.value2 EXPRESSION [(staging)staging.FieldSchema(name:value, type:string, comment:null), ] +PREHOOK: query: explain select p1.value,p2.value2 from parquet_jointable1_bucketed_sorted p1 join parquet_jointable2_bucketed_sorted p2 on p1.key=p2.key +PREHOOK: type: QUERY +POSTHOOK: query: explain select p1.value,p2.value2 from parquet_jointable1_bucketed_sorted p1 join parquet_jointable2_bucketed_sorted p2 on p1.key=p2.key +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-0 depends on stages: Stage-1 + +STAGE PLANS: + Stage: Stage-1 + Map Reduce + Map Operator Tree: + TableScan + alias: p2 + Statistics: Num rows: 2 Data size: 6 Basic stats: COMPLETE Column stats: NONE + Filter Operator + predicate: key is not null (type: boolean) + Statistics: Num rows: 1 Data size: 3 Basic stats: COMPLETE Column stats: NONE + Sorted Merge Bucket Map Join Operator + condition map: + Inner Join 0 to 1 + condition expressions: + 0 {value} + 1 {value2} + keys: + 0 key (type: int) + 1 key (type: int) + outputColumnNames: _col1, _col7 + Select Operator + expressions: _col1 (type: string), _col7 (type: string) + outputColumnNames: _col0, _col1 + File Output Operator + compressed: false + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + + Stage: Stage-0 + Fetch Operator + limit: -1 + Processor Tree: + ListSink + +PREHOOK: query: select p1.value,p2.value2 from parquet_jointable1_bucketed_sorted p1 join parquet_jointable2_bucketed_sorted p2 on p1.key=p2.key +PREHOOK: type: QUERY +PREHOOK: Input: default@parquet_jointable1_bucketed_sorted +PREHOOK: Input: default@parquet_jointable2_bucketed_sorted +#### A masked pattern was here #### +POSTHOOK: query: select p1.value,p2.value2 from parquet_jointable1_bucketed_sorted p1 join parquet_jointable2_bucketed_sorted p2 on p1.key=p2.key +POSTHOOK: type: QUERY +POSTHOOK: Input: default@parquet_jointable1_bucketed_sorted +POSTHOOK: Input: default@parquet_jointable2_bucketed_sorted +#### A masked pattern was here #### +val_0value1 val_0value2-2 +val_10value1 val_10value2-2 diff --git a/ql/src/test/results/clientpositive/parquet_types.q.out b/ql/src/test/results/clientpositive/parquet_types.q.out index 3acb052..803a826 100644 --- a/ql/src/test/results/clientpositive/parquet_types.q.out +++ b/ql/src/test/results/clientpositive/parquet_types.q.out @@ -13,7 +13,9 @@ PREHOOK: query: CREATE TABLE parquet_types_staging ( cfloat float, cdouble double, cstring1 string, - t timestamp + t timestamp, + cchar char(5), + cvarchar varchar(10) ) ROW FORMAT DELIMITED FIELDS TERMINATED BY '|' PREHOOK: type: CREATETABLE @@ -26,7 +28,9 @@ POSTHOOK: query: CREATE TABLE parquet_types_staging ( cfloat float, cdouble double, cstring1 string, - t timestamp + t timestamp, + cchar char(5), + cvarchar varchar(10) ) ROW FORMAT DELIMITED FIELDS TERMINATED BY '|' POSTHOOK: type: CREATETABLE @@ -39,7 +43,9 @@ PREHOOK: query: CREATE TABLE parquet_types ( cfloat float, cdouble double, cstring1 string, - t timestamp + t timestamp, + cchar char(5), + cvarchar varchar(10) ) STORED AS PARQUET PREHOOK: type: CREATETABLE PREHOOK: Output: database:default @@ -51,7 +57,9 @@ POSTHOOK: query: CREATE TABLE parquet_types ( cfloat float, cdouble double, cstring1 string, - t timestamp + t timestamp, + cchar char(5), + cvarchar varchar(10) ) STORED AS PARQUET POSTHOOK: type: CREATETABLE POSTHOOK: Output: database:default @@ -72,12 +80,14 @@ POSTHOOK: query: INSERT OVERWRITE TABLE parquet_types SELECT * FROM parquet_type POSTHOOK: type: QUERY POSTHOOK: Input: default@parquet_types_staging POSTHOOK: Output: default@parquet_types +POSTHOOK: Lineage: parquet_types.cchar SIMPLE [(parquet_types_staging)parquet_types_staging.FieldSchema(name:cchar, type:char(5), comment:null), ] POSTHOOK: Lineage: parquet_types.cdouble SIMPLE [(parquet_types_staging)parquet_types_staging.FieldSchema(name:cdouble, type:double, comment:null), ] POSTHOOK: Lineage: parquet_types.cfloat SIMPLE [(parquet_types_staging)parquet_types_staging.FieldSchema(name:cfloat, type:float, comment:null), ] POSTHOOK: Lineage: parquet_types.cint SIMPLE [(parquet_types_staging)parquet_types_staging.FieldSchema(name:cint, type:int, comment:null), ] POSTHOOK: Lineage: parquet_types.csmallint SIMPLE [(parquet_types_staging)parquet_types_staging.FieldSchema(name:csmallint, type:smallint, comment:null), ] POSTHOOK: Lineage: parquet_types.cstring1 SIMPLE [(parquet_types_staging)parquet_types_staging.FieldSchema(name:cstring1, type:string, comment:null), ] POSTHOOK: Lineage: parquet_types.ctinyint SIMPLE [(parquet_types_staging)parquet_types_staging.FieldSchema(name:ctinyint, type:tinyint, comment:null), ] +POSTHOOK: Lineage: parquet_types.cvarchar SIMPLE [(parquet_types_staging)parquet_types_staging.FieldSchema(name:cvarchar, type:varchar(10), comment:null), ] POSTHOOK: Lineage: parquet_types.t SIMPLE [(parquet_types_staging)parquet_types_staging.FieldSchema(name:t, type:timestamp, comment:null), ] PREHOOK: query: SELECT * FROM parquet_types PREHOOK: type: QUERY @@ -87,27 +97,27 @@ POSTHOOK: query: SELECT * FROM parquet_types POSTHOOK: type: QUERY POSTHOOK: Input: default@parquet_types #### A masked pattern was here #### -100 1 1 1.0 0.0 abc 2011-01-01 01:01:01.111111111 -101 2 2 1.1 0.3 def 2012-02-02 02:02:02.222222222 -102 3 3 1.2 0.6 ghi 2013-03-03 03:03:03.333333333 -103 1 4 1.3 0.9 jkl 2014-04-04 04:04:04.444444444 -104 2 5 1.4 1.2 mno 2015-05-05 05:05:05.555555555 -105 3 1 1.0 1.5 pqr 2016-06-06 06:06:06.666666666 -106 1 2 1.1 1.8 stu 2017-07-07 07:07:07.777777777 -107 2 3 1.2 2.1 vwx 2018-08-08 08:08:08.888888888 -108 3 4 1.3 2.4 yza 2019-09-09 09:09:09.999999999 -109 1 5 1.4 2.7 bcd 2020-10-10 10:10:10.101010101 -110 2 1 1.0 3.0 efg 2021-11-11 11:11:11.111111111 -111 3 2 1.1 3.3 hij 2022-12-12 12:12:12.121212121 -112 1 3 1.2 3.6 klm 2023-01-02 13:13:13.131313131 -113 2 4 1.3 3.9 nop 2024-02-02 14:14:14.141414141 -114 3 5 1.4 4.2 qrs 2025-03-03 15:15:15.151515151 -115 1 1 1.0 4.5 tuv 2026-04-04 16:16:16.161616161 -116 2 2 1.1 4.8 wxy 2027-05-05 17:17:17.171717171 -117 3 3 1.2 5.1 zab 2028-06-06 18:18:18.181818181 -118 1 4 1.3 5.4 cde 2029-07-07 19:19:19.191919191 -119 2 5 1.4 5.7 fgh 2030-08-08 20:20:20.202020202 -120 3 1 1.0 6.0 ijk 2031-09-09 21:21:21.212121212 +100 1 1 1.0 0.0 abc 2011-01-01 01:01:01.111111111 a a +101 2 2 1.1 0.3 def 2012-02-02 02:02:02.222222222 ab ab +102 3 3 1.2 0.6 ghi 2013-03-03 03:03:03.333333333 abc abc +103 1 4 1.3 0.9 jkl 2014-04-04 04:04:04.444444444 abcd abcd +104 2 5 1.4 1.2 mno 2015-05-05 05:05:05.555555555 abcde abcde +105 3 1 1.0 1.5 pqr 2016-06-06 06:06:06.666666666 abcde abcdef +106 1 2 1.1 1.8 stu 2017-07-07 07:07:07.777777777 abcde abcdefg +107 2 3 1.2 2.1 vwx 2018-08-08 08:08:08.888888888 bcdef abcdefgh +108 3 4 1.3 2.4 yza 2019-09-09 09:09:09.999999999 cdefg abcdefghij +109 1 5 1.4 2.7 bcd 2020-10-10 10:10:10.101010101 klmno abcdedef +110 2 1 1.0 3.0 efg 2021-11-11 11:11:11.111111111 pqrst abcdede +111 3 2 1.1 3.3 hij 2022-12-12 12:12:12.121212121 nopqr abcded +112 1 3 1.2 3.6 klm 2023-01-02 13:13:13.131313131 opqrs abcdd +113 2 4 1.3 3.9 nop 2024-02-02 14:14:14.141414141 pqrst abc +114 3 5 1.4 4.2 qrs 2025-03-03 15:15:15.151515151 qrstu b +115 1 1 1.0 4.5 tuv 2026-04-04 16:16:16.161616161 rstuv abcded +116 2 2 1.1 4.8 wxy 2027-05-05 17:17:17.171717171 stuvw abcded +117 3 3 1.2 5.1 zab 2028-06-06 18:18:18.181818181 tuvwx abcded +118 1 4 1.3 5.4 cde 2029-07-07 19:19:19.191919191 uvwzy abcdede +119 2 5 1.4 5.7 fgh 2030-08-08 20:20:20.202020202 vwxyz abcdede +120 3 1 1.0 6.0 ijk 2031-09-09 21:21:21.212121212 wxyza abcde PREHOOK: query: SELECT ctinyint, MAX(cint), MIN(csmallint), diff --git a/ql/src/test/results/clientpositive/pcr.q.out b/ql/src/test/results/clientpositive/pcr.q.out index 6155250..b09b33b 100644 --- a/ql/src/test/results/clientpositive/pcr.q.out +++ b/ql/src/test/results/clientpositive/pcr.q.out @@ -2784,10 +2784,10 @@ STAGE PLANS: condition expressions: 0 {KEY.reducesinkkey0} {VALUE._col0} {VALUE._col1} 1 {KEY.reducesinkkey0} {VALUE._col0} {VALUE._col1} - outputColumnNames: _col0, _col1, _col2, _col5, _col6, _col7 + outputColumnNames: _col0, _col1, _col2, _col6, _col7, _col8 Statistics: Num rows: 11 Data size: 88 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: _col0 (type: int), _col1 (type: string), _col2 (type: string), _col5 (type: int), _col6 (type: string), _col7 (type: string) + expressions: _col0 (type: int), _col1 (type: string), _col2 (type: string), _col6 (type: int), _col7 (type: string), _col8 (type: string) outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5 Statistics: Num rows: 11 Data size: 88 Basic stats: COMPLETE Column stats: NONE File Output Operator @@ -3127,10 +3127,10 @@ STAGE PLANS: condition expressions: 0 {KEY.reducesinkkey0} {VALUE._col0} {VALUE._col1} 1 {KEY.reducesinkkey0} {VALUE._col0} {VALUE._col1} - outputColumnNames: _col0, _col1, _col2, _col5, _col6, _col7 + outputColumnNames: _col0, _col1, _col2, _col6, _col7, _col8 Statistics: Num rows: 11 Data size: 88 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: _col0 (type: int), _col1 (type: string), _col2 (type: string), _col5 (type: int), _col6 (type: string), _col7 (type: string) + expressions: _col0 (type: int), _col1 (type: string), _col2 (type: string), _col6 (type: int), _col7 (type: string), _col8 (type: string) outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5 Statistics: Num rows: 11 Data size: 88 Basic stats: COMPLETE Column stats: NONE File Output Operator diff --git a/ql/src/test/results/clientpositive/ppd_join5.q.out b/ql/src/test/results/clientpositive/ppd_join5.q.out index 1461723..16861c4 100644 --- a/ql/src/test/results/clientpositive/ppd_join5.q.out +++ b/ql/src/test/results/clientpositive/ppd_join5.q.out @@ -82,7 +82,7 @@ STAGE PLANS: condition expressions: 0 {KEY.reducesinkkey0} {KEY.reducesinkkey1} 1 {VALUE._col0} - outputColumnNames: _col0, _col1, _col5 + outputColumnNames: _col0, _col1, _col6 Statistics: Num rows: 1 Data size: 3 Basic stats: COMPLETE Column stats: NONE File Output Operator compressed: false @@ -98,7 +98,7 @@ STAGE PLANS: Reduce Output Operator sort order: Statistics: Num rows: 1 Data size: 3 Basic stats: COMPLETE Column stats: NONE - value expressions: _col0 (type: string), _col1 (type: string), _col5 (type: int) + value expressions: _col0 (type: string), _col1 (type: string), _col6 (type: int) TableScan alias: c Statistics: Num rows: 1 Data size: 3 Basic stats: COMPLETE Column stats: NONE @@ -114,12 +114,12 @@ STAGE PLANS: condition map: Inner Join 0 to 1 condition expressions: - 0 {VALUE._col0} {VALUE._col1} {VALUE._col5} + 0 {VALUE._col0} {VALUE._col1} {VALUE._col6} 1 {VALUE._col1} - outputColumnNames: _col0, _col1, _col5, _col9 + outputColumnNames: _col0, _col1, _col6, _col11 Statistics: Num rows: 1 Data size: 3 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: _col0 (type: string), _col1 (type: string), _col5 (type: int), _col9 (type: int) + expressions: _col0 (type: string), _col1 (type: string), _col6 (type: int), _col11 (type: int) outputColumnNames: _col0, _col1, _col2, _col3 Statistics: Num rows: 1 Data size: 3 Basic stats: COMPLETE Column stats: NONE File Output Operator @@ -190,7 +190,7 @@ STAGE PLANS: condition expressions: 0 {KEY.reducesinkkey0} {KEY.reducesinkkey1} 1 {VALUE._col0} - outputColumnNames: _col0, _col1, _col5 + outputColumnNames: _col0, _col1, _col6 Statistics: Num rows: 1 Data size: 3 Basic stats: COMPLETE Column stats: NONE File Output Operator compressed: false @@ -206,7 +206,7 @@ STAGE PLANS: Reduce Output Operator sort order: Statistics: Num rows: 1 Data size: 3 Basic stats: COMPLETE Column stats: NONE - value expressions: _col0 (type: string), _col1 (type: string), _col5 (type: int) + value expressions: _col0 (type: string), _col1 (type: string), _col6 (type: int) TableScan alias: c Statistics: Num rows: 1 Data size: 3 Basic stats: COMPLETE Column stats: NONE @@ -222,15 +222,15 @@ STAGE PLANS: condition map: Inner Join 0 to 1 condition expressions: - 0 {VALUE._col0} {VALUE._col1} {VALUE._col5} + 0 {VALUE._col0} {VALUE._col1} {VALUE._col6} 1 {VALUE._col1} - outputColumnNames: _col0, _col1, _col5, _col9 + outputColumnNames: _col0, _col1, _col6, _col11 Statistics: Num rows: 1 Data size: 3 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: ((_col5 > 1) or (_col9 > 1)) (type: boolean) + predicate: ((_col6 > 1) or (_col11 > 1)) (type: boolean) Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE Select Operator - expressions: _col0 (type: string), _col1 (type: string), _col5 (type: int), _col9 (type: int) + expressions: _col0 (type: string), _col1 (type: string), _col6 (type: int), _col11 (type: int) outputColumnNames: _col0, _col1, _col2, _col3 Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE File Output Operator diff --git a/ql/src/test/results/clientpositive/ppd_join_filter.q.out b/ql/src/test/results/clientpositive/ppd_join_filter.q.out index 15a2a7a..71ecf21 100644 --- a/ql/src/test/results/clientpositive/ppd_join_filter.q.out +++ b/ql/src/test/results/clientpositive/ppd_join_filter.q.out @@ -343,10 +343,10 @@ STAGE PLANS: condition expressions: 0 {KEY.reducesinkkey0} 1 {VALUE._col2} {VALUE._col3} - outputColumnNames: _col0, _col7, _col8 + outputColumnNames: _col0, _col8, _col9 Statistics: Num rows: 31 Data size: 3196 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: _col0 (type: string), _col7 (type: double), _col8 (type: double) + expressions: _col0 (type: string), _col8 (type: double), _col9 (type: double) outputColumnNames: _col0, _col1, _col2 Statistics: Num rows: 31 Data size: 3196 Basic stats: COMPLETE Column stats: NONE File Output Operator @@ -756,10 +756,10 @@ STAGE PLANS: condition expressions: 0 {KEY.reducesinkkey0} 1 {VALUE._col2} {VALUE._col3} - outputColumnNames: _col0, _col7, _col8 + outputColumnNames: _col0, _col8, _col9 Statistics: Num rows: 31 Data size: 3196 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: _col0 (type: string), _col7 (type: double), _col8 (type: double) + expressions: _col0 (type: string), _col8 (type: double), _col9 (type: double) outputColumnNames: _col0, _col1, _col2 Statistics: Num rows: 31 Data size: 3196 Basic stats: COMPLETE Column stats: NONE File Output Operator @@ -1165,10 +1165,10 @@ STAGE PLANS: condition expressions: 0 {KEY.reducesinkkey0} 1 {VALUE._col2} {VALUE._col3} - outputColumnNames: _col0, _col7, _col8 + outputColumnNames: _col0, _col8, _col9 Statistics: Num rows: 31 Data size: 3196 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: _col0 (type: string), _col7 (type: double), _col8 (type: double) + expressions: _col0 (type: string), _col8 (type: double), _col9 (type: double) outputColumnNames: _col0, _col1, _col2 Statistics: Num rows: 31 Data size: 3196 Basic stats: COMPLETE Column stats: NONE File Output Operator @@ -1578,10 +1578,10 @@ STAGE PLANS: condition expressions: 0 {KEY.reducesinkkey0} 1 {VALUE._col2} {VALUE._col3} - outputColumnNames: _col0, _col7, _col8 + outputColumnNames: _col0, _col8, _col9 Statistics: Num rows: 31 Data size: 3196 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: _col0 (type: string), _col7 (type: double), _col8 (type: double) + expressions: _col0 (type: string), _col8 (type: double), _col9 (type: double) outputColumnNames: _col0, _col1, _col2 Statistics: Num rows: 31 Data size: 3196 Basic stats: COMPLETE Column stats: NONE File Output Operator diff --git a/ql/src/test/results/clientpositive/ppd_outer_join1.q.out b/ql/src/test/results/clientpositive/ppd_outer_join1.q.out index 704c61a..4fa988d 100644 --- a/ql/src/test/results/clientpositive/ppd_outer_join1.q.out +++ b/ql/src/test/results/clientpositive/ppd_outer_join1.q.out @@ -55,13 +55,13 @@ STAGE PLANS: condition expressions: 0 {KEY.reducesinkkey0} {VALUE._col0} 1 {KEY.reducesinkkey0} {VALUE._col0} - outputColumnNames: _col0, _col1, _col4, _col5 + outputColumnNames: _col0, _col1, _col5, _col6 Statistics: Num rows: 3 Data size: 661 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: ((((_col0 > 10) and (_col0 < 20)) and (_col4 > 15)) and (_col4 < 25)) (type: boolean) + predicate: ((((_col0 > 10) and (_col0 < 20)) and (_col5 > 15)) and (_col5 < 25)) (type: boolean) Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE Select Operator - expressions: _col0 (type: string), _col1 (type: string), _col4 (type: string), _col5 (type: string) + expressions: _col0 (type: string), _col1 (type: string), _col5 (type: string), _col6 (type: string) outputColumnNames: _col0, _col1, _col2, _col3 Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE File Output Operator @@ -161,13 +161,13 @@ STAGE PLANS: condition expressions: 0 {KEY.reducesinkkey0} {VALUE._col0} 1 {KEY.reducesinkkey0} {VALUE._col0} - outputColumnNames: _col0, _col1, _col4, _col5 + outputColumnNames: _col0, _col1, _col5, _col6 Statistics: Num rows: 3 Data size: 661 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: ((_col4 > 15) and (_col4 < 25)) (type: boolean) + predicate: ((_col5 > 15) and (_col5 < 25)) (type: boolean) Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE Select Operator - expressions: _col0 (type: string), _col1 (type: string), _col4 (type: string), _col5 (type: string) + expressions: _col0 (type: string), _col1 (type: string), _col5 (type: string), _col6 (type: string) outputColumnNames: _col0, _col1, _col2, _col3 Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE File Output Operator diff --git a/ql/src/test/results/clientpositive/ppd_outer_join2.q.out b/ql/src/test/results/clientpositive/ppd_outer_join2.q.out index 6213a11..00eed04 100644 --- a/ql/src/test/results/clientpositive/ppd_outer_join2.q.out +++ b/ql/src/test/results/clientpositive/ppd_outer_join2.q.out @@ -55,13 +55,13 @@ STAGE PLANS: condition expressions: 0 {KEY.reducesinkkey0} {VALUE._col0} 1 {KEY.reducesinkkey0} {VALUE._col0} - outputColumnNames: _col0, _col1, _col4, _col5 + outputColumnNames: _col0, _col1, _col5, _col6 Statistics: Num rows: 3 Data size: 661 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: ((((_col0 > '10') and (_col0 < '20')) and (_col4 > '15')) and (_col4 < '25')) (type: boolean) + predicate: ((((_col0 > '10') and (_col0 < '20')) and (_col5 > '15')) and (_col5 < '25')) (type: boolean) Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE Select Operator - expressions: _col0 (type: string), _col1 (type: string), _col4 (type: string), _col5 (type: string) + expressions: _col0 (type: string), _col1 (type: string), _col5 (type: string), _col6 (type: string) outputColumnNames: _col0, _col1, _col2, _col3 Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE File Output Operator @@ -281,13 +281,13 @@ STAGE PLANS: condition expressions: 0 {KEY.reducesinkkey0} {VALUE._col0} 1 {KEY.reducesinkkey0} {VALUE._col0} - outputColumnNames: _col0, _col1, _col4, _col5 + outputColumnNames: _col0, _col1, _col5, _col6 Statistics: Num rows: 3 Data size: 661 Basic stats: COMPLETE Column stats: NONE Filter Operator predicate: ((_col0 > '10') and (_col0 < '20')) (type: boolean) Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE Select Operator - expressions: _col0 (type: string), _col1 (type: string), _col4 (type: string), _col5 (type: string) + expressions: _col0 (type: string), _col1 (type: string), _col5 (type: string), _col6 (type: string) outputColumnNames: _col0, _col1, _col2, _col3 Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE File Output Operator diff --git a/ql/src/test/results/clientpositive/ppd_outer_join3.q.out b/ql/src/test/results/clientpositive/ppd_outer_join3.q.out index aee7666..a898a7c 100644 --- a/ql/src/test/results/clientpositive/ppd_outer_join3.q.out +++ b/ql/src/test/results/clientpositive/ppd_outer_join3.q.out @@ -49,13 +49,13 @@ STAGE PLANS: condition expressions: 0 {KEY.reducesinkkey0} {VALUE._col0} 1 {KEY.reducesinkkey0} {VALUE._col0} - outputColumnNames: _col0, _col1, _col4, _col5 + outputColumnNames: _col0, _col1, _col5, _col6 Statistics: Num rows: 31 Data size: 6393 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: ((((_col0 > '10') and (_col0 < '20')) and (_col4 > '15')) and (_col4 < '25')) (type: boolean) + predicate: ((((_col0 > '10') and (_col0 < '20')) and (_col5 > '15')) and (_col5 < '25')) (type: boolean) Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE Select Operator - expressions: _col0 (type: string), _col1 (type: string), _col4 (type: string), _col5 (type: string) + expressions: _col0 (type: string), _col1 (type: string), _col5 (type: string), _col6 (type: string) outputColumnNames: _col0, _col1, _col2, _col3 Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE File Output Operator @@ -269,13 +269,13 @@ STAGE PLANS: condition expressions: 0 {KEY.reducesinkkey0} {VALUE._col0} 1 {KEY.reducesinkkey0} {VALUE._col0} - outputColumnNames: _col0, _col1, _col4, _col5 + outputColumnNames: _col0, _col1, _col5, _col6 Statistics: Num rows: 31 Data size: 6393 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: ((((_col4 > '15') and (_col4 < '25')) and (_col0 > '10')) and (_col0 < '20')) (type: boolean) + predicate: ((((_col5 > '15') and (_col5 < '25')) and (_col0 > '10')) and (_col0 < '20')) (type: boolean) Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE Select Operator - expressions: _col0 (type: string), _col1 (type: string), _col4 (type: string), _col5 (type: string) + expressions: _col0 (type: string), _col1 (type: string), _col5 (type: string), _col6 (type: string) outputColumnNames: _col0, _col1, _col2, _col3 Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE File Output Operator diff --git a/ql/src/test/results/clientpositive/ppd_outer_join4.q.out b/ql/src/test/results/clientpositive/ppd_outer_join4.q.out index 3b1cd31..6943d97 100644 --- a/ql/src/test/results/clientpositive/ppd_outer_join4.q.out +++ b/ql/src/test/results/clientpositive/ppd_outer_join4.q.out @@ -74,13 +74,13 @@ STAGE PLANS: 0 {KEY.reducesinkkey0} {VALUE._col0} 1 {KEY.reducesinkkey0} {VALUE._col0} 2 {KEY.reducesinkkey0} - outputColumnNames: _col0, _col1, _col4, _col5, _col8 + outputColumnNames: _col0, _col1, _col5, _col6, _col10 Statistics: Num rows: 127 Data size: 12786 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: (((((_col0 > '10') and (_col0 < '20')) and (_col4 > '15')) and (_col4 < '25')) and (sqrt(_col8) <> 13)) (type: boolean) + predicate: (((((_col0 > '10') and (_col0 < '20')) and (_col5 > '15')) and (_col5 < '25')) and (sqrt(_col10) <> 13)) (type: boolean) Statistics: Num rows: 1 Data size: 100 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: _col0 (type: string), _col1 (type: string), _col4 (type: string), _col5 (type: string), _col8 (type: string) + expressions: _col0 (type: string), _col1 (type: string), _col5 (type: string), _col6 (type: string), _col10 (type: string) outputColumnNames: _col0, _col1, _col2, _col3, _col4 Statistics: Num rows: 1 Data size: 100 Basic stats: COMPLETE Column stats: NONE File Output Operator @@ -429,13 +429,13 @@ STAGE PLANS: 0 {KEY.reducesinkkey0} {VALUE._col0} 1 {KEY.reducesinkkey0} {VALUE._col0} 2 {KEY.reducesinkkey0} - outputColumnNames: _col0, _col1, _col4, _col5, _col8 + outputColumnNames: _col0, _col1, _col5, _col6, _col10 Statistics: Num rows: 127 Data size: 12786 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: ((((_col4 > '15') and (_col4 < '25')) and (_col0 > '10')) and (_col0 < '20')) (type: boolean) + predicate: ((((_col5 > '15') and (_col5 < '25')) and (_col0 > '10')) and (_col0 < '20')) (type: boolean) Statistics: Num rows: 1 Data size: 100 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: _col0 (type: string), _col1 (type: string), _col4 (type: string), _col5 (type: string), _col8 (type: string) + expressions: _col0 (type: string), _col1 (type: string), _col5 (type: string), _col6 (type: string), _col10 (type: string) outputColumnNames: _col0, _col1, _col2, _col3, _col4 Statistics: Num rows: 1 Data size: 100 Basic stats: COMPLETE Column stats: NONE File Output Operator diff --git a/ql/src/test/results/clientpositive/ppd_outer_join5.q.out b/ql/src/test/results/clientpositive/ppd_outer_join5.q.out index 4bd7653..16d788b 100644 --- a/ql/src/test/results/clientpositive/ppd_outer_join5.q.out +++ b/ql/src/test/results/clientpositive/ppd_outer_join5.q.out @@ -86,10 +86,10 @@ STAGE PLANS: 0 {KEY.reducesinkkey0} {VALUE._col0} {VALUE._col1} 1 {KEY.reducesinkkey0} {VALUE._col0} {VALUE._col1} 2 {VALUE._col0} {VALUE._col1} - outputColumnNames: _col0, _col1, _col2, _col5, _col6, _col7, _col11, _col12 + outputColumnNames: _col0, _col1, _col2, _col6, _col7, _col8, _col13, _col14 Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE Select Operator - expressions: _col0 (type: int), _col1 (type: string), _col2 (type: string), _col5 (type: int), _col6 (type: string), _col7 (type: string), 20 (type: int), _col11 (type: string), _col12 (type: string) + expressions: _col0 (type: int), _col1 (type: string), _col2 (type: string), _col6 (type: int), _col7 (type: string), _col8 (type: string), 20 (type: int), _col13 (type: string), _col14 (type: string) outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8 Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE File Output Operator @@ -161,10 +161,10 @@ STAGE PLANS: 0 {VALUE._col0} {VALUE._col1} 1 {VALUE._col0} {VALUE._col1} 2 {KEY.reducesinkkey0} {VALUE._col0} {VALUE._col1} - outputColumnNames: _col1, _col2, _col6, _col7, _col10, _col11, _col12 + outputColumnNames: _col1, _col2, _col7, _col8, _col12, _col13, _col14 Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE Select Operator - expressions: 20 (type: int), _col1 (type: string), _col2 (type: string), 20 (type: int), _col6 (type: string), _col7 (type: string), _col10 (type: int), _col11 (type: string), _col12 (type: string) + expressions: 20 (type: int), _col1 (type: string), _col2 (type: string), 20 (type: int), _col7 (type: string), _col8 (type: string), _col12 (type: int), _col13 (type: string), _col14 (type: string) outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8 Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE File Output Operator @@ -236,10 +236,10 @@ STAGE PLANS: 0 {VALUE._col0} {VALUE._col1} 1 {VALUE._col0} {VALUE._col1} 2 {KEY.reducesinkkey0} {VALUE._col0} {VALUE._col1} - outputColumnNames: _col1, _col2, _col6, _col7, _col10, _col11, _col12 + outputColumnNames: _col1, _col2, _col7, _col8, _col12, _col13, _col14 Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE Select Operator - expressions: 20 (type: int), _col1 (type: string), _col2 (type: string), 20 (type: int), _col6 (type: string), _col7 (type: string), _col10 (type: int), _col11 (type: string), _col12 (type: string) + expressions: 20 (type: int), _col1 (type: string), _col2 (type: string), 20 (type: int), _col7 (type: string), _col8 (type: string), _col12 (type: int), _col13 (type: string), _col14 (type: string) outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8 Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE File Output Operator diff --git a/ql/src/test/results/clientpositive/ppd_repeated_alias.q.out b/ql/src/test/results/clientpositive/ppd_repeated_alias.q.out index 1ec9b41..f51b5a3 100644 --- a/ql/src/test/results/clientpositive/ppd_repeated_alias.q.out +++ b/ql/src/test/results/clientpositive/ppd_repeated_alias.q.out @@ -68,13 +68,13 @@ STAGE PLANS: condition expressions: 0 {KEY.reducesinkkey0} 1 {KEY.reducesinkkey0} {VALUE._col0} - outputColumnNames: _col0, _col5, _col6 + outputColumnNames: _col0, _col6, _col7 Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE Filter Operator - predicate: (_col6 = 3) (type: boolean) + predicate: (_col7 = 3) (type: boolean) Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE Select Operator - expressions: _col0 (type: int), _col5 (type: int), 3 (type: int) + expressions: _col0 (type: int), _col6 (type: int), 3 (type: int) outputColumnNames: _col0, _col1, _col2 Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE File Output Operator @@ -139,13 +139,13 @@ STAGE PLANS: condition expressions: 0 {KEY.reducesinkkey0} 1 {KEY.reducesinkkey0} {VALUE._col0} - outputColumnNames: _col0, _col5, _col6 + outputColumnNames: _col0, _col6, _col7 Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE Filter Operator - predicate: (_col6 = 3) (type: boolean) + predicate: (_col7 = 3) (type: boolean) Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE Select Operator - expressions: _col0 (type: int), _col5 (type: int), _col6 (type: int) + expressions: _col0 (type: int), _col6 (type: int), _col7 (type: int) outputColumnNames: _col0, _col1, _col2 Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE File Output Operator @@ -215,10 +215,10 @@ STAGE PLANS: condition expressions: 0 {KEY.reducesinkkey0} 1 {KEY.reducesinkkey0} - outputColumnNames: _col0, _col5 + outputColumnNames: _col0, _col6 Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE Select Operator - expressions: _col0 (type: int), _col5 (type: int), 3 (type: int) + expressions: _col0 (type: int), _col6 (type: int), 3 (type: int) outputColumnNames: _col0, _col1, _col2 Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE File Output Operator @@ -275,10 +275,10 @@ STAGE PLANS: condition expressions: 0 {KEY.reducesinkkey0} 1 {VALUE._col0} - outputColumnNames: _col0, _col6 + outputColumnNames: _col0, _col7 Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE Filter Operator - predicate: (_col6 = 2) (type: boolean) + predicate: (_col7 = 2) (type: boolean) Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE Select Operator expressions: _col0 (type: int) diff --git a/ql/src/test/results/clientpositive/ppd_udf_case.q.out b/ql/src/test/results/clientpositive/ppd_udf_case.q.out index ad68eb6..ad65f0e 100644 --- a/ql/src/test/results/clientpositive/ppd_udf_case.q.out +++ b/ql/src/test/results/clientpositive/ppd_udf_case.q.out @@ -64,13 +64,13 @@ STAGE PLANS: condition expressions: 0 {KEY.reducesinkkey0} {VALUE._col0} {VALUE._col1} {VALUE._col2} 1 {KEY.reducesinkkey0} {VALUE._col0} {VALUE._col1} {VALUE._col2} - outputColumnNames: _col0, _col1, _col2, _col3, _col6, _col7, _col8, _col9 + outputColumnNames: _col0, _col1, _col2, _col3, _col7, _col8, _col9, _col10 Statistics: Num rows: 15 Data size: 3085 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: (((_col2 = '2008-04-08') and (_col8 = '2008-04-08')) and CASE (_col0) WHEN ('27') THEN (true) WHEN ('38') THEN (false) ELSE (null) END) (type: boolean) + predicate: (((_col2 = '2008-04-08') and (_col9 = '2008-04-08')) and CASE (_col0) WHEN ('27') THEN (true) WHEN ('38') THEN (false) ELSE (null) END) (type: boolean) Statistics: Num rows: 1 Data size: 205 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: _col0 (type: string), _col1 (type: string), _col3 (type: string), _col6 (type: string), _col7 (type: string), _col9 (type: string) + expressions: _col0 (type: string), _col1 (type: string), _col3 (type: string), _col7 (type: string), _col8 (type: string), _col10 (type: string) outputColumnNames: _col0, _col1, _col3, _col4, _col5, _col7 Statistics: Num rows: 1 Data size: 205 Basic stats: COMPLETE Column stats: NONE File Output Operator @@ -209,10 +209,10 @@ STAGE PLANS: condition expressions: 0 {KEY.reducesinkkey0} {VALUE._col0} {VALUE._col1} {VALUE._col2} 1 {KEY.reducesinkkey0} {VALUE._col0} {VALUE._col1} {VALUE._col2} - outputColumnNames: _col0, _col1, _col2, _col3, _col6, _col7, _col8, _col9 + outputColumnNames: _col0, _col1, _col2, _col3, _col7, _col8, _col9, _col10 Statistics: Num rows: 15 Data size: 3085 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: _col0 (type: string), _col1 (type: string), _col2 (type: string), _col3 (type: string), _col6 (type: string), _col7 (type: string), _col8 (type: string), _col9 (type: string) + expressions: _col0 (type: string), _col1 (type: string), _col2 (type: string), _col3 (type: string), _col7 (type: string), _col8 (type: string), _col9 (type: string), _col10 (type: string) outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7 Statistics: Num rows: 15 Data size: 3085 Basic stats: COMPLETE Column stats: NONE File Output Operator diff --git a/ql/src/test/results/clientpositive/ppd_union_view.q.out b/ql/src/test/results/clientpositive/ppd_union_view.q.out index 405c912..0c24d3e 100644 --- a/ql/src/test/results/clientpositive/ppd_union_view.q.out +++ b/ql/src/test/results/clientpositive/ppd_union_view.q.out @@ -301,10 +301,10 @@ STAGE PLANS: condition expressions: 0 {VALUE._col0} {KEY.reducesinkkey1} 1 {VALUE._col0} - outputColumnNames: _col1, _col2, _col5 + outputColumnNames: _col1, _col2, _col6 Statistics: Num rows: 1 Data size: 15 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: _col5 (type: string), _col1 (type: string), _col2 (type: string) + expressions: _col6 (type: string), _col1 (type: string), _col2 (type: string) outputColumnNames: _col0, _col1, _col2 Statistics: Num rows: 1 Data size: 15 Basic stats: COMPLETE Column stats: NONE File Output Operator @@ -470,10 +470,10 @@ STAGE PLANS: condition expressions: 0 {VALUE._col0} 1 {VALUE._col0} - outputColumnNames: _col1, _col5 + outputColumnNames: _col1, _col6 Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE Select Operator - expressions: _col5 (type: string), _col1 (type: string), '2011-10-15' (type: string) + expressions: _col6 (type: string), _col1 (type: string), '2011-10-15' (type: string) outputColumnNames: _col0, _col1, _col2 Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE File Output Operator diff --git a/ql/src/test/results/clientpositive/ppd_vc.q.out b/ql/src/test/results/clientpositive/ppd_vc.q.out index 918e1a9..48d4dd4 100644 --- a/ql/src/test/results/clientpositive/ppd_vc.q.out +++ b/ql/src/test/results/clientpositive/ppd_vc.q.out @@ -688,10 +688,10 @@ STAGE PLANS: condition expressions: 0 1 {KEY.reducesinkkey0} {VALUE._col0} {VALUE._col1} {VALUE._col2} {VALUE._col3} - outputColumnNames: _col4, _col5, _col6, _col7, _col8 + outputColumnNames: _col5, _col6, _col7, _col8, _col9 Statistics: Num rows: 31 Data size: 3196 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: _col4 (type: string), _col5 (type: string), _col6 (type: string), _col7 (type: string), _col8 (type: bigint) + expressions: _col5 (type: string), _col6 (type: string), _col7 (type: string), _col8 (type: string), _col9 (type: bigint) outputColumnNames: _col0, _col1, _col2, _col3, _col4 Statistics: Num rows: 31 Data size: 3196 Basic stats: COMPLETE Column stats: NONE File Output Operator diff --git a/ql/src/test/results/clientpositive/ptf.q.out b/ql/src/test/results/clientpositive/ptf.q.out index 25ad0a9..2182450 100644 --- a/ql/src/test/results/clientpositive/ptf.q.out +++ b/ql/src/test/results/clientpositive/ptf.q.out @@ -899,20 +899,20 @@ POSTHOOK: type: QUERY POSTHOOK: Input: default@part POSTHOOK: Output: default@part_4 POSTHOOK: Output: default@part_5 -POSTHOOK: Lineage: part_4.dr SCRIPT [(part)part.FieldSchema(name:p_partkey, type:int, comment:null), (part)part.FieldSchema(name:p_name, type:string, comment:null), (part)part.FieldSchema(name:p_mfgr, type:string, comment:null), (part)part.FieldSchema(name:p_brand, type:string, comment:null), (part)part.FieldSchema(name:p_type, type:string, comment:null), (part)part.FieldSchema(name:p_size, type:int, comment:null), (part)part.FieldSchema(name:p_container, type:string, comment:null), (part)part.FieldSchema(name:p_retailprice, type:double, comment:null), (part)part.FieldSchema(name:p_comment, type:string, comment:null), (part)part.FieldSchema(name:BLOCK__OFFSET__INSIDE__FILE, type:bigint, comment:), (part)part.FieldSchema(name:INPUT__FILE__NAME, type:string, comment:), ] -POSTHOOK: Lineage: part_4.p_mfgr SCRIPT [(part)part.FieldSchema(name:p_partkey, type:int, comment:null), (part)part.FieldSchema(name:p_name, type:string, comment:null), (part)part.FieldSchema(name:p_mfgr, type:string, comment:null), (part)part.FieldSchema(name:p_brand, type:string, comment:null), (part)part.FieldSchema(name:p_type, type:string, comment:null), (part)part.FieldSchema(name:p_size, type:int, comment:null), (part)part.FieldSchema(name:p_container, type:string, comment:null), (part)part.FieldSchema(name:p_retailprice, type:double, comment:null), (part)part.FieldSchema(name:p_comment, type:string, comment:null), (part)part.FieldSchema(name:BLOCK__OFFSET__INSIDE__FILE, type:bigint, comment:), (part)part.FieldSchema(name:INPUT__FILE__NAME, type:string, comment:), ] -POSTHOOK: Lineage: part_4.p_name SCRIPT [(part)part.FieldSchema(name:p_partkey, type:int, comment:null), (part)part.FieldSchema(name:p_name, type:string, comment:null), (part)part.FieldSchema(name:p_mfgr, type:string, comment:null), (part)part.FieldSchema(name:p_brand, type:string, comment:null), (part)part.FieldSchema(name:p_type, type:string, comment:null), (part)part.FieldSchema(name:p_size, type:int, comment:null), (part)part.FieldSchema(name:p_container, type:string, comment:null), (part)part.FieldSchema(name:p_retailprice, type:double, comment:null), (part)part.FieldSchema(name:p_comment, type:string, comment:null), (part)part.FieldSchema(name:BLOCK__OFFSET__INSIDE__FILE, type:bigint, comment:), (part)part.FieldSchema(name:INPUT__FILE__NAME, type:string, comment:), ] -POSTHOOK: Lineage: part_4.p_size SCRIPT [(part)part.FieldSchema(name:p_partkey, type:int, comment:null), (part)part.FieldSchema(name:p_name, type:string, comment:null), (part)part.FieldSchema(name:p_mfgr, type:string, comment:null), (part)part.FieldSchema(name:p_brand, type:string, comment:null), (part)part.FieldSchema(name:p_type, type:string, comment:null), (part)part.FieldSchema(name:p_size, type:int, comment:null), (part)part.FieldSchema(name:p_container, type:string, comment:null), (part)part.FieldSchema(name:p_retailprice, type:double, comment:null), (part)part.FieldSchema(name:p_comment, type:string, comment:null), (part)part.FieldSchema(name:BLOCK__OFFSET__INSIDE__FILE, type:bigint, comment:), (part)part.FieldSchema(name:INPUT__FILE__NAME, type:string, comment:), ] -POSTHOOK: Lineage: part_4.r SCRIPT [(part)part.FieldSchema(name:p_partkey, type:int, comment:null), (part)part.FieldSchema(name:p_name, type:string, comment:null), (part)part.FieldSchema(name:p_mfgr, type:string, comment:null), (part)part.FieldSchema(name:p_brand, type:string, comment:null), (part)part.FieldSchema(name:p_type, type:string, comment:null), (part)part.FieldSchema(name:p_size, type:int, comment:null), (part)part.FieldSchema(name:p_container, type:string, comment:null), (part)part.FieldSchema(name:p_retailprice, type:double, comment:null), (part)part.FieldSchema(name:p_comment, type:string, comment:null), (part)part.FieldSchema(name:BLOCK__OFFSET__INSIDE__FILE, type:bigint, comment:), (part)part.FieldSchema(name:INPUT__FILE__NAME, type:string, comment:), ] -POSTHOOK: Lineage: part_4.s SCRIPT [(part)part.FieldSchema(name:p_partkey, type:int, comment:null), (part)part.FieldSchema(name:p_name, type:string, comment:null), (part)part.FieldSchema(name:p_mfgr, type:string, comment:null), (part)part.FieldSchema(name:p_brand, type:string, comment:null), (part)part.FieldSchema(name:p_type, type:string, comment:null), (part)part.FieldSchema(name:p_size, type:int, comment:null), (part)part.FieldSchema(name:p_container, type:string, comment:null), (part)part.FieldSchema(name:p_retailprice, type:double, comment:null), (part)part.FieldSchema(name:p_comment, type:string, comment:null), (part)part.FieldSchema(name:BLOCK__OFFSET__INSIDE__FILE, type:bigint, comment:), (part)part.FieldSchema(name:INPUT__FILE__NAME, type:string, comment:), ] -POSTHOOK: Lineage: part_5.cud SCRIPT [(part)part.FieldSchema(name:p_partkey, type:int, comment:null), (part)part.FieldSchema(name:p_name, type:string, comment:null), (part)part.FieldSchema(name:p_mfgr, type:string, comment:null), (part)part.FieldSchema(name:p_brand, type:string, comment:null), (part)part.FieldSchema(name:p_type, type:string, comment:null), (part)part.FieldSchema(name:p_size, type:int, comment:null), (part)part.FieldSchema(name:p_container, type:string, comment:null), (part)part.FieldSchema(name:p_retailprice, type:double, comment:null), (part)part.FieldSchema(name:p_comment, type:string, comment:null), (part)part.FieldSchema(name:BLOCK__OFFSET__INSIDE__FILE, type:bigint, comment:), (part)part.FieldSchema(name:INPUT__FILE__NAME, type:string, comment:), ] -POSTHOOK: Lineage: part_5.dr SCRIPT [(part)part.FieldSchema(name:p_partkey, type:int, comment:null), (part)part.FieldSchema(name:p_name, type:string, comment:null), (part)part.FieldSchema(name:p_mfgr, type:string, comment:null), (part)part.FieldSchema(name:p_brand, type:string, comment:null), (part)part.FieldSchema(name:p_type, type:string, comment:null), (part)part.FieldSchema(name:p_size, type:int, comment:null), (part)part.FieldSchema(name:p_container, type:string, comment:null), (part)part.FieldSchema(name:p_retailprice, type:double, comment:null), (part)part.FieldSchema(name:p_comment, type:string, comment:null), (part)part.FieldSchema(name:BLOCK__OFFSET__INSIDE__FILE, type:bigint, comment:), (part)part.FieldSchema(name:INPUT__FILE__NAME, type:string, comment:), ] -POSTHOOK: Lineage: part_5.fv1 SCRIPT [(part)part.FieldSchema(name:p_partkey, type:int, comment:null), (part)part.FieldSchema(name:p_name, type:string, comment:null), (part)part.FieldSchema(name:p_mfgr, type:string, comment:null), (part)part.FieldSchema(name:p_brand, type:string, comment:null), (part)part.FieldSchema(name:p_type, type:string, comment:null), (part)part.FieldSchema(name:p_size, type:int, comment:null), (part)part.FieldSchema(name:p_container, type:string, comment:null), (part)part.FieldSchema(name:p_retailprice, type:double, comment:null), (part)part.FieldSchema(name:p_comment, type:string, comment:null), (part)part.FieldSchema(name:BLOCK__OFFSET__INSIDE__FILE, type:bigint, comment:), (part)part.FieldSchema(name:INPUT__FILE__NAME, type:string, comment:), ] -POSTHOOK: Lineage: part_5.p_mfgr SCRIPT [(part)part.FieldSchema(name:p_partkey, type:int, comment:null), (part)part.FieldSchema(name:p_name, type:string, comment:null), (part)part.FieldSchema(name:p_mfgr, type:string, comment:null), (part)part.FieldSchema(name:p_brand, type:string, comment:null), (part)part.FieldSchema(name:p_type, type:string, comment:null), (part)part.FieldSchema(name:p_size, type:int, comment:null), (part)part.FieldSchema(name:p_container, type:string, comment:null), (part)part.FieldSchema(name:p_retailprice, type:double, comment:null), (part)part.FieldSchema(name:p_comment, type:string, comment:null), (part)part.FieldSchema(name:BLOCK__OFFSET__INSIDE__FILE, type:bigint, comment:), (part)part.FieldSchema(name:INPUT__FILE__NAME, type:string, comment:), ] -POSTHOOK: Lineage: part_5.p_name SCRIPT [(part)part.FieldSchema(name:p_partkey, type:int, comment:null), (part)part.FieldSchema(name:p_name, type:string, comment:null), (part)part.FieldSchema(name:p_mfgr, type:string, comment:null), (part)part.FieldSchema(name:p_brand, type:string, comment:null), (part)part.FieldSchema(name:p_type, type:string, comment:null), (part)part.FieldSchema(name:p_size, type:int, comment:null), (part)part.FieldSchema(name:p_container, type:string, comment:null), (part)part.FieldSchema(name:p_retailprice, type:double, comment:null), (part)part.FieldSchema(name:p_comment, type:string, comment:null), (part)part.FieldSchema(name:BLOCK__OFFSET__INSIDE__FILE, type:bigint, comment:), (part)part.FieldSchema(name:INPUT__FILE__NAME, type:string, comment:), ] -POSTHOOK: Lineage: part_5.p_size SCRIPT [(part)part.FieldSchema(name:p_partkey, type:int, comment:null), (part)part.FieldSchema(name:p_name, type:string, comment:null), (part)part.FieldSchema(name:p_mfgr, type:string, comment:null), (part)part.FieldSchema(name:p_brand, type:string, comment:null), (part)part.FieldSchema(name:p_type, type:string, comment:null), (part)part.FieldSchema(name:p_size, type:int, comment:null), (part)part.FieldSchema(name:p_container, type:string, comment:null), (part)part.FieldSchema(name:p_retailprice, type:double, comment:null), (part)part.FieldSchema(name:p_comment, type:string, comment:null), (part)part.FieldSchema(name:BLOCK__OFFSET__INSIDE__FILE, type:bigint, comment:), (part)part.FieldSchema(name:INPUT__FILE__NAME, type:string, comment:), ] -POSTHOOK: Lineage: part_5.r SCRIPT [(part)part.FieldSchema(name:p_partkey, type:int, comment:null), (part)part.FieldSchema(name:p_name, type:string, comment:null), (part)part.FieldSchema(name:p_mfgr, type:string, comment:null), (part)part.FieldSchema(name:p_brand, type:string, comment:null), (part)part.FieldSchema(name:p_type, type:string, comment:null), (part)part.FieldSchema(name:p_size, type:int, comment:null), (part)part.FieldSchema(name:p_container, type:string, comment:null), (part)part.FieldSchema(name:p_retailprice, type:double, comment:null), (part)part.FieldSchema(name:p_comment, type:string, comment:null), (part)part.FieldSchema(name:BLOCK__OFFSET__INSIDE__FILE, type:bigint, comment:), (part)part.FieldSchema(name:INPUT__FILE__NAME, type:string, comment:), ] -POSTHOOK: Lineage: part_5.s2 SCRIPT [(part)part.FieldSchema(name:p_partkey, type:int, comment:null), (part)part.FieldSchema(name:p_name, type:string, comment:null), (part)part.FieldSchema(name:p_mfgr, type:string, comment:null), (part)part.FieldSchema(name:p_brand, type:string, comment:null), (part)part.FieldSchema(name:p_type, type:string, comment:null), (part)part.FieldSchema(name:p_size, type:int, comment:null), (part)part.FieldSchema(name:p_container, type:string, comment:null), (part)part.FieldSchema(name:p_retailprice, type:double, comment:null), (part)part.FieldSchema(name:p_comment, type:string, comment:null), (part)part.FieldSchema(name:BLOCK__OFFSET__INSIDE__FILE, type:bigint, comment:), (part)part.FieldSchema(name:INPUT__FILE__NAME, type:string, comment:), ] +POSTHOOK: Lineage: part_4.dr SCRIPT [(part)part.FieldSchema(name:p_partkey, type:int, comment:null), (part)part.FieldSchema(name:p_name, type:string, comment:null), (part)part.FieldSchema(name:p_mfgr, type:string, comment:null), (part)part.FieldSchema(name:p_brand, type:string, comment:null), (part)part.FieldSchema(name:p_type, type:string, comment:null), (part)part.FieldSchema(name:p_size, type:int, comment:null), (part)part.FieldSchema(name:p_container, type:string, comment:null), (part)part.FieldSchema(name:p_retailprice, type:double, comment:null), (part)part.FieldSchema(name:p_comment, type:string, comment:null), (part)part.FieldSchema(name:BLOCK__OFFSET__INSIDE__FILE, type:bigint, comment:), (part)part.FieldSchema(name:INPUT__FILE__NAME, type:string, comment:), (part)part.FieldSchema(name:ROW__ID, type:struct, comment:), ] +POSTHOOK: Lineage: part_4.p_mfgr SCRIPT [(part)part.FieldSchema(name:p_partkey, type:int, comment:null), (part)part.FieldSchema(name:p_name, type:string, comment:null), (part)part.FieldSchema(name:p_mfgr, type:string, comment:null), (part)part.FieldSchema(name:p_brand, type:string, comment:null), (part)part.FieldSchema(name:p_type, type:string, comment:null), (part)part.FieldSchema(name:p_size, type:int, comment:null), (part)part.FieldSchema(name:p_container, type:string, comment:null), (part)part.FieldSchema(name:p_retailprice, type:double, comment:null), (part)part.FieldSchema(name:p_comment, type:string, comment:null), (part)part.FieldSchema(name:BLOCK__OFFSET__INSIDE__FILE, type:bigint, comment:), (part)part.FieldSchema(name:INPUT__FILE__NAME, type:string, comment:), (part)part.FieldSchema(name:ROW__ID, type:struct, comment:), ] +POSTHOOK: Lineage: part_4.p_name SCRIPT [(part)part.FieldSchema(name:p_partkey, type:int, comment:null), (part)part.FieldSchema(name:p_name, type:string, comment:null), (part)part.FieldSchema(name:p_mfgr, type:string, comment:null), (part)part.FieldSchema(name:p_brand, type:string, comment:null), (part)part.FieldSchema(name:p_type, type:string, comment:null), (part)part.FieldSchema(name:p_size, type:int, comment:null), (part)part.FieldSchema(name:p_container, type:string, comment:null), (part)part.FieldSchema(name:p_retailprice, type:double, comment:null), (part)part.FieldSchema(name:p_comment, type:string, comment:null), (part)part.FieldSchema(name:BLOCK__OFFSET__INSIDE__FILE, type:bigint, comment:), (part)part.FieldSchema(name:INPUT__FILE__NAME, type:string, comment:), (part)part.FieldSchema(name:ROW__ID, type:struct, comment:), ] +POSTHOOK: Lineage: part_4.p_size SCRIPT [(part)part.FieldSchema(name:p_partkey, type:int, comment:null), (part)part.FieldSchema(name:p_name, type:string, comment:null), (part)part.FieldSchema(name:p_mfgr, type:string, comment:null), (part)part.FieldSchema(name:p_brand, type:string, comment:null), (part)part.FieldSchema(name:p_type, type:string, comment:null), (part)part.FieldSchema(name:p_size, type:int, comment:null), (part)part.FieldSchema(name:p_container, type:string, comment:null), (part)part.FieldSchema(name:p_retailprice, type:double, comment:null), (part)part.FieldSchema(name:p_comment, type:string, comment:null), (part)part.FieldSchema(name:BLOCK__OFFSET__INSIDE__FILE, type:bigint, comment:), (part)part.FieldSchema(name:INPUT__FILE__NAME, type:string, comment:), (part)part.FieldSchema(name:ROW__ID, type:struct, comment:), ] +POSTHOOK: Lineage: part_4.r SCRIPT [(part)part.FieldSchema(name:p_partkey, type:int, comment:null), (part)part.FieldSchema(name:p_name, type:string, comment:null), (part)part.FieldSchema(name:p_mfgr, type:string, comment:null), (part)part.FieldSchema(name:p_brand, type:string, comment:null), (part)part.FieldSchema(name:p_type, type:string, comment:null), (part)part.FieldSchema(name:p_size, type:int, comment:null), (part)part.FieldSchema(name:p_container, type:string, comment:null), (part)part.FieldSchema(name:p_retailprice, type:double, comment:null), (part)part.FieldSchema(name:p_comment, type:string, comment:null), (part)part.FieldSchema(name:BLOCK__OFFSET__INSIDE__FILE, type:bigint, comment:), (part)part.FieldSchema(name:INPUT__FILE__NAME, type:string, comment:), (part)part.FieldSchema(name:ROW__ID, type:struct, comment:), ] +POSTHOOK: Lineage: part_4.s SCRIPT [(part)part.FieldSchema(name:p_partkey, type:int, comment:null), (part)part.FieldSchema(name:p_name, type:string, comment:null), (part)part.FieldSchema(name:p_mfgr, type:string, comment:null), (part)part.FieldSchema(name:p_brand, type:string, comment:null), (part)part.FieldSchema(name:p_type, type:string, comment:null), (part)part.FieldSchema(name:p_size, type:int, comment:null), (part)part.FieldSchema(name:p_container, type:string, comment:null), (part)part.FieldSchema(name:p_retailprice, type:double, comment:null), (part)part.FieldSchema(name:p_comment, type:string, comment:null), (part)part.FieldSchema(name:BLOCK__OFFSET__INSIDE__FILE, type:bigint, comment:), (part)part.FieldSchema(name:INPUT__FILE__NAME, type:string, comment:), (part)part.FieldSchema(name:ROW__ID, type:struct, comment:), ] +POSTHOOK: Lineage: part_5.cud SCRIPT [(part)part.FieldSchema(name:p_partkey, type:int, comment:null), (part)part.FieldSchema(name:p_name, type:string, comment:null), (part)part.FieldSchema(name:p_mfgr, type:string, comment:null), (part)part.FieldSchema(name:p_brand, type:string, comment:null), (part)part.FieldSchema(name:p_type, type:string, comment:null), (part)part.FieldSchema(name:p_size, type:int, comment:null), (part)part.FieldSchema(name:p_container, type:string, comment:null), (part)part.FieldSchema(name:p_retailprice, type:double, comment:null), (part)part.FieldSchema(name:p_comment, type:string, comment:null), (part)part.FieldSchema(name:BLOCK__OFFSET__INSIDE__FILE, type:bigint, comment:), (part)part.FieldSchema(name:INPUT__FILE__NAME, type:string, comment:), (part)part.FieldSchema(name:ROW__ID, type:struct, comment:), ] +POSTHOOK: Lineage: part_5.dr SCRIPT [(part)part.FieldSchema(name:p_partkey, type:int, comment:null), (part)part.FieldSchema(name:p_name, type:string, comment:null), (part)part.FieldSchema(name:p_mfgr, type:string, comment:null), (part)part.FieldSchema(name:p_brand, type:string, comment:null), (part)part.FieldSchema(name:p_type, type:string, comment:null), (part)part.FieldSchema(name:p_size, type:int, comment:null), (part)part.FieldSchema(name:p_container, type:string, comment:null), (part)part.FieldSchema(name:p_retailprice, type:double, comment:null), (part)part.FieldSchema(name:p_comment, type:string, comment:null), (part)part.FieldSchema(name:BLOCK__OFFSET__INSIDE__FILE, type:bigint, comment:), (part)part.FieldSchema(name:INPUT__FILE__NAME, type:string, comment:), (part)part.FieldSchema(name:ROW__ID, type:struct, comment:), ] +POSTHOOK: Lineage: part_5.fv1 SCRIPT [(part)part.FieldSchema(name:p_partkey, type:int, comment:null), (part)part.FieldSchema(name:p_name, type:string, comment:null), (part)part.FieldSchema(name:p_mfgr, type:string, comment:null), (part)part.FieldSchema(name:p_brand, type:string, comment:null), (part)part.FieldSchema(name:p_type, type:string, comment:null), (part)part.FieldSchema(name:p_size, type:int, comment:null), (part)part.FieldSchema(name:p_container, type:string, comment:null), (part)part.FieldSchema(name:p_retailprice, type:double, comment:null), (part)part.FieldSchema(name:p_comment, type:string, comment:null), (part)part.FieldSchema(name:BLOCK__OFFSET__INSIDE__FILE, type:bigint, comment:), (part)part.FieldSchema(name:INPUT__FILE__NAME, type:string, comment:), (part)part.FieldSchema(name:ROW__ID, type:struct, comment:), ] +POSTHOOK: Lineage: part_5.p_mfgr SCRIPT [(part)part.FieldSchema(name:p_partkey, type:int, comment:null), (part)part.FieldSchema(name:p_name, type:string, comment:null), (part)part.FieldSchema(name:p_mfgr, type:string, comment:null), (part)part.FieldSchema(name:p_brand, type:string, comment:null), (part)part.FieldSchema(name:p_type, type:string, comment:null), (part)part.FieldSchema(name:p_size, type:int, comment:null), (part)part.FieldSchema(name:p_container, type:string, comment:null), (part)part.FieldSchema(name:p_retailprice, type:double, comment:null), (part)part.FieldSchema(name:p_comment, type:string, comment:null), (part)part.FieldSchema(name:BLOCK__OFFSET__INSIDE__FILE, type:bigint, comment:), (part)part.FieldSchema(name:INPUT__FILE__NAME, type:string, comment:), (part)part.FieldSchema(name:ROW__ID, type:struct, comment:), ] +POSTHOOK: Lineage: part_5.p_name SCRIPT [(part)part.FieldSchema(name:p_partkey, type:int, comment:null), (part)part.FieldSchema(name:p_name, type:string, comment:null), (part)part.FieldSchema(name:p_mfgr, type:string, comment:null), (part)part.FieldSchema(name:p_brand, type:string, comment:null), (part)part.FieldSchema(name:p_type, type:string, comment:null), (part)part.FieldSchema(name:p_size, type:int, comment:null), (part)part.FieldSchema(name:p_container, type:string, comment:null), (part)part.FieldSchema(name:p_retailprice, type:double, comment:null), (part)part.FieldSchema(name:p_comment, type:string, comment:null), (part)part.FieldSchema(name:BLOCK__OFFSET__INSIDE__FILE, type:bigint, comment:), (part)part.FieldSchema(name:INPUT__FILE__NAME, type:string, comment:), (part)part.FieldSchema(name:ROW__ID, type:struct, comment:), ] +POSTHOOK: Lineage: part_5.p_size SCRIPT [(part)part.FieldSchema(name:p_partkey, type:int, comment:null), (part)part.FieldSchema(name:p_name, type:string, comment:null), (part)part.FieldSchema(name:p_mfgr, type:string, comment:null), (part)part.FieldSchema(name:p_brand, type:string, comment:null), (part)part.FieldSchema(name:p_type, type:string, comment:null), (part)part.FieldSchema(name:p_size, type:int, comment:null), (part)part.FieldSchema(name:p_container, type:string, comment:null), (part)part.FieldSchema(name:p_retailprice, type:double, comment:null), (part)part.FieldSchema(name:p_comment, type:string, comment:null), (part)part.FieldSchema(name:BLOCK__OFFSET__INSIDE__FILE, type:bigint, comment:), (part)part.FieldSchema(name:INPUT__FILE__NAME, type:string, comment:), (part)part.FieldSchema(name:ROW__ID, type:struct, comment:), ] +POSTHOOK: Lineage: part_5.r SCRIPT [(part)part.FieldSchema(name:p_partkey, type:int, comment:null), (part)part.FieldSchema(name:p_name, type:string, comment:null), (part)part.FieldSchema(name:p_mfgr, type:string, comment:null), (part)part.FieldSchema(name:p_brand, type:string, comment:null), (part)part.FieldSchema(name:p_type, type:string, comment:null), (part)part.FieldSchema(name:p_size, type:int, comment:null), (part)part.FieldSchema(name:p_container, type:string, comment:null), (part)part.FieldSchema(name:p_retailprice, type:double, comment:null), (part)part.FieldSchema(name:p_comment, type:string, comment:null), (part)part.FieldSchema(name:BLOCK__OFFSET__INSIDE__FILE, type:bigint, comment:), (part)part.FieldSchema(name:INPUT__FILE__NAME, type:string, comment:), (part)part.FieldSchema(name:ROW__ID, type:struct, comment:), ] +POSTHOOK: Lineage: part_5.s2 SCRIPT [(part)part.FieldSchema(name:p_partkey, type:int, comment:null), (part)part.FieldSchema(name:p_name, type:string, comment:null), (part)part.FieldSchema(name:p_mfgr, type:string, comment:null), (part)part.FieldSchema(name:p_brand, type:string, comment:null), (part)part.FieldSchema(name:p_type, type:string, comment:null), (part)part.FieldSchema(name:p_size, type:int, comment:null), (part)part.FieldSchema(name:p_container, type:string, comment:null), (part)part.FieldSchema(name:p_retailprice, type:double, comment:null), (part)part.FieldSchema(name:p_comment, type:string, comment:null), (part)part.FieldSchema(name:BLOCK__OFFSET__INSIDE__FILE, type:bigint, comment:), (part)part.FieldSchema(name:INPUT__FILE__NAME, type:string, comment:), (part)part.FieldSchema(name:ROW__ID, type:struct, comment:), ] PREHOOK: query: select * from part_4 PREHOOK: type: QUERY PREHOOK: Input: default@part_4 diff --git a/ql/src/test/results/clientpositive/quotedid_skew.q.out b/ql/src/test/results/clientpositive/quotedid_skew.q.out index 4ff30b5..95ca950 100644 --- a/ql/src/test/results/clientpositive/quotedid_skew.q.out +++ b/ql/src/test/results/clientpositive/quotedid_skew.q.out @@ -87,10 +87,10 @@ STAGE PLANS: condition expressions: 0 {KEY.reducesinkkey0} {VALUE._col0} 1 {KEY.reducesinkkey0} {VALUE._col0} - outputColumnNames: _col0, _col1, _col4, _col5 + outputColumnNames: _col0, _col1, _col5, _col6 Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE Select Operator - expressions: _col0 (type: string), _col1 (type: string), _col4 (type: string), _col5 (type: string) + expressions: _col0 (type: string), _col1 (type: string), _col5 (type: string), _col6 (type: string) outputColumnNames: _col0, _col1, _col2, _col3 Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE File Output Operator @@ -164,10 +164,10 @@ STAGE PLANS: condition expressions: 0 {KEY.reducesinkkey0} {VALUE._col0} 1 {KEY.reducesinkkey0} {VALUE._col0} - outputColumnNames: _col0, _col1, _col4, _col5 + outputColumnNames: _col0, _col1, _col5, _col6 Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE Select Operator - expressions: _col0 (type: string), _col1 (type: string), _col4 (type: string), _col5 (type: string) + expressions: _col0 (type: string), _col1 (type: string), _col5 (type: string), _col6 (type: string) outputColumnNames: _col0, _col1, _col2, _col3 Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE File Output Operator diff --git a/ql/src/test/results/clientpositive/regex_col.q.out b/ql/src/test/results/clientpositive/regex_col.q.out index a1f671e..57daffb 100644 --- a/ql/src/test/results/clientpositive/regex_col.q.out +++ b/ql/src/test/results/clientpositive/regex_col.q.out @@ -138,10 +138,10 @@ STAGE PLANS: condition expressions: 0 {VALUE._col0} {VALUE._col1} 1 {VALUE._col0} {VALUE._col1} - outputColumnNames: _col2, _col3, _col8, _col9 + outputColumnNames: _col2, _col3, _col9, _col10 Statistics: Num rows: 31 Data size: 6393 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: _col2 (type: string), _col3 (type: string), _col8 (type: string), _col9 (type: string) + expressions: _col2 (type: string), _col3 (type: string), _col9 (type: string), _col10 (type: string) outputColumnNames: _col0, _col1, _col2, _col3 Statistics: Num rows: 31 Data size: 6393 Basic stats: COMPLETE Column stats: NONE File Output Operator @@ -206,10 +206,10 @@ STAGE PLANS: condition expressions: 0 1 {KEY.reducesinkkey2} {KEY.reducesinkkey1} - outputColumnNames: _col8, _col9 + outputColumnNames: _col9, _col10 Statistics: Num rows: 2130 Data size: 6393 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: _col8 (type: string), _col9 (type: string) + expressions: _col9 (type: string), _col10 (type: string) outputColumnNames: _col0, _col1 Statistics: Num rows: 2130 Data size: 6393 Basic stats: COMPLETE Column stats: NONE File Output Operator diff --git a/ql/src/test/results/clientpositive/router_join_ppr.q.out b/ql/src/test/results/clientpositive/router_join_ppr.q.out index 097c94b..9afda4d 100644 --- a/ql/src/test/results/clientpositive/router_join_ppr.q.out +++ b/ql/src/test/results/clientpositive/router_join_ppr.q.out @@ -390,14 +390,14 @@ STAGE PLANS: filter predicates: 0 1 {(VALUE._col1 = '2008-04-08')} - outputColumnNames: _col0, _col1, _col4, _col5 + outputColumnNames: _col0, _col1, _col5, _col6 Statistics: Num rows: 13 Data size: 2644 Basic stats: COMPLETE Column stats: NONE Filter Operator isSamplingPred: false predicate: ((_col0 > 10) and (_col0 < 20)) (type: boolean) Statistics: Num rows: 1 Data size: 203 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: _col0 (type: string), _col1 (type: string), _col4 (type: string), _col5 (type: string) + expressions: _col0 (type: string), _col1 (type: string), _col5 (type: string), _col6 (type: string) outputColumnNames: _col0, _col1, _col2, _col3 Statistics: Num rows: 1 Data size: 203 Basic stats: COMPLETE Column stats: NONE File Output Operator @@ -763,14 +763,14 @@ STAGE PLANS: condition expressions: 0 {KEY.reducesinkkey0} {VALUE._col0} 1 {KEY.reducesinkkey0} {VALUE._col0} - outputColumnNames: _col0, _col1, _col6, _col7 + outputColumnNames: _col0, _col1, _col7, _col8 Statistics: Num rows: 6 Data size: 1322 Basic stats: COMPLETE Column stats: NONE Filter Operator isSamplingPred: false predicate: ((_col0 > 10) and (_col0 < 20)) (type: boolean) Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE Select Operator - expressions: _col0 (type: string), _col1 (type: string), _col6 (type: string), _col7 (type: string) + expressions: _col0 (type: string), _col1 (type: string), _col7 (type: string), _col8 (type: string) outputColumnNames: _col0, _col1, _col2, _col3 Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE File Output Operator @@ -1132,14 +1132,14 @@ STAGE PLANS: condition expressions: 0 {KEY.reducesinkkey0} {VALUE._col0} 1 {KEY.reducesinkkey0} {VALUE._col0} - outputColumnNames: _col0, _col1, _col4, _col5 + outputColumnNames: _col0, _col1, _col5, _col6 Statistics: Num rows: 6 Data size: 1322 Basic stats: COMPLETE Column stats: NONE Filter Operator isSamplingPred: false predicate: ((_col0 > 10) and (_col0 < 20)) (type: boolean) Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE Select Operator - expressions: _col0 (type: string), _col1 (type: string), _col4 (type: string), _col5 (type: string) + expressions: _col0 (type: string), _col1 (type: string), _col5 (type: string), _col6 (type: string) outputColumnNames: _col0, _col1, _col2, _col3 Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE File Output Operator @@ -1595,14 +1595,14 @@ STAGE PLANS: condition expressions: 0 {KEY.reducesinkkey0} {VALUE._col0} {VALUE._col1} 1 {KEY.reducesinkkey0} {VALUE._col0} - outputColumnNames: _col0, _col1, _col2, _col6, _col7 + outputColumnNames: _col0, _col1, _col2, _col7, _col8 Statistics: Num rows: 13 Data size: 2644 Basic stats: COMPLETE Column stats: NONE Filter Operator isSamplingPred: false predicate: (((_col0 > 10) and (_col0 < 20)) and (_col2 = '2008-04-08')) (type: boolean) Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE Select Operator - expressions: _col0 (type: string), _col1 (type: string), _col6 (type: string), _col7 (type: string) + expressions: _col0 (type: string), _col1 (type: string), _col7 (type: string), _col8 (type: string) outputColumnNames: _col0, _col1, _col2, _col3 Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE File Output Operator diff --git a/ql/src/test/results/clientpositive/sample8.q.out b/ql/src/test/results/clientpositive/sample8.q.out index e0c0f9e..bc1c038 100644 --- a/ql/src/test/results/clientpositive/sample8.q.out +++ b/ql/src/test/results/clientpositive/sample8.q.out @@ -323,11 +323,11 @@ STAGE PLANS: condition expressions: 0 {KEY.reducesinkkey0} {KEY.reducesinkkey1} {VALUE._col0} {VALUE._col1} 1 {KEY.reducesinkkey0} {KEY.reducesinkkey1} - outputColumnNames: _col0, _col1, _col2, _col3, _col6, _col7 + outputColumnNames: _col0, _col1, _col2, _col3, _col7, _col8 Statistics: Num rows: 7 Data size: 1542 Basic stats: COMPLETE Column stats: NONE Filter Operator isSamplingPred: false - predicate: ((((_col6 = _col0) and (_col7 = _col1)) and (_col2 = '2008-04-08')) and (_col3 = '11')) (type: boolean) + predicate: ((((_col7 = _col0) and (_col8 = _col1)) and (_col2 = '2008-04-08')) and (_col3 = '11')) (type: boolean) Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE Select Operator expressions: _col0 (type: string), _col1 (type: string), '2008-04-08' (type: string), '11' (type: string) @@ -782,10 +782,10 @@ STAGE PLANS: condition expressions: 0 {KEY.reducesinkkey0} {VALUE._col0} 1 {KEY.reducesinkkey0} {VALUE._col0} - outputColumnNames: _col0, _col1, _col4, _col5 + outputColumnNames: _col0, _col1, _col5, _col6 Statistics: Num rows: 16 Data size: 3306 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: _col0 (type: string), _col1 (type: string), _col4 (type: string), _col5 (type: string) + expressions: _col0 (type: string), _col1 (type: string), _col5 (type: string), _col6 (type: string) outputColumnNames: _col0, _col1, _col2, _col3 Statistics: Num rows: 16 Data size: 3306 Basic stats: COMPLETE Column stats: NONE File Output Operator @@ -865,13 +865,13 @@ STAGE PLANS: condition expressions: 0 {KEY.reducesinkkey0} {VALUE._col0} 1 {KEY.reducesinkkey0} {VALUE._col0} - outputColumnNames: _col0, _col1, _col4, _col5 + outputColumnNames: _col0, _col1, _col5, _col6 Statistics: Num rows: 16 Data size: 3306 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: (_col0 = _col4) (type: boolean) + predicate: (_col0 = _col5) (type: boolean) Statistics: Num rows: 8 Data size: 1653 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: _col0 (type: string), _col1 (type: string), _col4 (type: string), _col5 (type: string) + expressions: _col0 (type: string), _col1 (type: string), _col5 (type: string), _col6 (type: string) outputColumnNames: _col0, _col1, _col2, _col3 Statistics: Num rows: 8 Data size: 1653 Basic stats: COMPLETE Column stats: NONE File Output Operator diff --git a/ql/src/test/results/clientpositive/semijoin.q.out b/ql/src/test/results/clientpositive/semijoin.q.out index b2324af..68c6bf8 100644 --- a/ql/src/test/results/clientpositive/semijoin.q.out +++ b/ql/src/test/results/clientpositive/semijoin.q.out @@ -1248,10 +1248,10 @@ STAGE PLANS: 0 {KEY.reducesinkkey0} {VALUE._col0} 1 {KEY.reducesinkkey0} {VALUE._col0} 2 - outputColumnNames: _col0, _col1, _col4, _col5 + outputColumnNames: _col0, _col1, _col5, _col6 Statistics: Num rows: 24 Data size: 178 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: _col0 (type: int), _col1 (type: string), _col4 (type: int), _col5 (type: string) + expressions: _col0 (type: int), _col1 (type: string), _col5 (type: int), _col6 (type: string) outputColumnNames: _col0, _col1, _col2, _col3 Statistics: Num rows: 24 Data size: 178 Basic stats: COMPLETE Column stats: NONE File Output Operator diff --git a/ql/src/test/results/clientpositive/serde_regex.q.out b/ql/src/test/results/clientpositive/serde_regex.q.out index 65e7dec..60182b7 100644 --- a/ql/src/test/results/clientpositive/serde_regex.q.out +++ b/ql/src/test/results/clientpositive/serde_regex.q.out @@ -201,7 +201,7 @@ POSTHOOK: type: QUERY POSTHOOK: Input: default@serde_regex1 #### A masked pattern was here #### NULL 0 --1234567890.123456789 -1234567890 +-1234567890.1234567890 -1234567890 -4400 4400 -1255.49 -1255 -1.122 -11 @@ -210,7 +210,7 @@ NULL 0 -0.333 0 -0.33 0 -0.3 0 -0 0 +0.000000000000000000 0 0 0 0 0 0.01 0 @@ -221,8 +221,8 @@ NULL 0 0.33 0 0.333 0 1 1 -1 1 -1 1 +1.0 1 +1.000000000000000000 1 1.12 1 1.122 1 2 2 @@ -230,14 +230,14 @@ NULL 0 3.14 3 3.14 3 3.14 3 -3.14 4 +3.140 4 10 10 20 20 100 100 -124 124 +124.00 124 125.2 125 200 200 -1234567890.12345678 1234567890 +1234567890.1234567800 1234567890 PREHOOK: query: DROP TABLE serde_regex1 PREHOOK: type: DROPTABLE PREHOOK: Input: default@serde_regex1 diff --git a/ql/src/test/results/clientpositive/skewjoin.q.out b/ql/src/test/results/clientpositive/skewjoin.q.out index c77e54d..e82bba1 100644 --- a/ql/src/test/results/clientpositive/skewjoin.q.out +++ b/ql/src/test/results/clientpositive/skewjoin.q.out @@ -121,10 +121,10 @@ STAGE PLANS: 0 {KEY.reducesinkkey0} 1 {VALUE._col0} handleSkewJoin: true - outputColumnNames: _col0, _col5 + outputColumnNames: _col0, _col6 Statistics: Num rows: 31 Data size: 3196 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: UDFToInteger(_col0) (type: int), _col5 (type: string) + expressions: UDFToInteger(_col0) (type: int), _col6 (type: string) outputColumnNames: _col0, _col1 Statistics: Num rows: 31 Data size: 3196 Basic stats: COMPLETE Column stats: NONE File Output Operator @@ -169,9 +169,9 @@ STAGE PLANS: keys: 0 reducesinkkey0 (type: string) 1 reducesinkkey0 (type: string) - outputColumnNames: _col0, _col5 + outputColumnNames: _col0, _col6 Select Operator - expressions: UDFToInteger(_col0) (type: int), _col5 (type: string) + expressions: UDFToInteger(_col0) (type: int), _col6 (type: string) outputColumnNames: _col0, _col1 File Output Operator compressed: false @@ -296,10 +296,10 @@ STAGE PLANS: 1 {KEY.reducesinkkey0} {VALUE._col0} 2 {KEY.reducesinkkey0} {VALUE._col0} 3 {KEY.reducesinkkey0} {VALUE._col0} - outputColumnNames: _col0, _col1, _col4, _col5, _col8, _col9, _col12, _col13 + outputColumnNames: _col0, _col1, _col5, _col6, _col10, _col11, _col15, _col16 Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE Select Operator - expressions: _col0 (type: string), _col1 (type: string), _col4 (type: string), _col5 (type: string), _col8 (type: string), _col9 (type: string), _col12 (type: string), _col13 (type: string) + expressions: _col0 (type: string), _col1 (type: string), _col5 (type: string), _col6 (type: string), _col10 (type: string), _col11 (type: string), _col15 (type: string), _col16 (type: string) outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7 Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE File Output Operator @@ -416,10 +416,10 @@ STAGE PLANS: 1 {KEY.reducesinkkey0} {VALUE._col0} 2 {KEY.reducesinkkey0} {VALUE._col0} 3 {KEY.reducesinkkey0} {VALUE._col0} - outputColumnNames: _col0, _col1, _col4, _col5, _col8, _col9, _col12, _col13 + outputColumnNames: _col0, _col1, _col5, _col6, _col10, _col11, _col15, _col16 Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE Select Operator - expressions: _col0 (type: string), _col1 (type: string), _col4 (type: string), _col5 (type: string), _col8 (type: string), _col9 (type: string), _col12 (type: string), _col13 (type: string) + expressions: _col0 (type: string), _col1 (type: string), _col5 (type: string), _col6 (type: string), _col10 (type: string), _col11 (type: string), _col15 (type: string), _col16 (type: string) outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7 Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE File Output Operator @@ -501,14 +501,14 @@ STAGE PLANS: condition expressions: 0 {VALUE._col0} {VALUE._col1} 1 {VALUE._col0} - outputColumnNames: _col0, _col1, _col4 + outputColumnNames: _col0, _col1, _col5 Statistics: Num rows: 31 Data size: 3196 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: _col0 (type: string), _col1 (type: string), _col4 (type: string) - outputColumnNames: _col0, _col1, _col4 + expressions: _col0 (type: string), _col1 (type: string), _col5 (type: string) + outputColumnNames: _col0, _col1, _col5 Statistics: Num rows: 31 Data size: 3196 Basic stats: COMPLETE Column stats: NONE Group By Operator - aggregations: sum(hash(_col0)), sum(hash(_col1)), sum(hash(_col4)) + aggregations: sum(hash(_col0)), sum(hash(_col1)), sum(hash(_col5)) mode: hash outputColumnNames: _col0, _col1, _col2 Statistics: Num rows: 1 Data size: 24 Basic stats: COMPLETE Column stats: NONE @@ -1278,14 +1278,14 @@ STAGE PLANS: keys: 0 (key + 1) (type: double) 1 UDFToDouble(key) (type: double) - outputColumnNames: _col0, _col5 + outputColumnNames: _col0, _col6 Statistics: Num rows: 0 Data size: 33 Basic stats: PARTIAL Column stats: NONE Select Operator - expressions: _col0 (type: string), _col5 (type: string) - outputColumnNames: _col0, _col5 + expressions: _col0 (type: string), _col6 (type: string) + outputColumnNames: _col0, _col6 Statistics: Num rows: 0 Data size: 33 Basic stats: PARTIAL Column stats: NONE Group By Operator - aggregations: sum(hash(_col0)), sum(hash(_col5)) + aggregations: sum(hash(_col0)), sum(hash(_col6)) mode: hash outputColumnNames: _col0, _col1 Statistics: Num rows: 1 Data size: 16 Basic stats: COMPLETE Column stats: NONE diff --git a/ql/src/test/results/clientpositive/skewjoin_union_remove_1.q.out b/ql/src/test/results/clientpositive/skewjoin_union_remove_1.q.out index 81c362e..ab5da4d 100644 --- a/ql/src/test/results/clientpositive/skewjoin_union_remove_1.q.out +++ b/ql/src/test/results/clientpositive/skewjoin_union_remove_1.q.out @@ -100,10 +100,10 @@ STAGE PLANS: condition expressions: 0 {KEY.reducesinkkey0} {VALUE._col0} 1 {KEY.reducesinkkey0} {VALUE._col0} - outputColumnNames: _col0, _col1, _col4, _col5 + outputColumnNames: _col0, _col1, _col5, _col6 Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE Select Operator - expressions: _col0 (type: string), _col1 (type: string), _col4 (type: string), _col5 (type: string) + expressions: _col0 (type: string), _col1 (type: string), _col5 (type: string), _col6 (type: string) outputColumnNames: _col0, _col1, _col2, _col3 Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE File Output Operator @@ -148,10 +148,10 @@ STAGE PLANS: condition expressions: 0 {KEY.reducesinkkey0} {VALUE._col0} 1 {KEY.reducesinkkey0} {VALUE._col0} - outputColumnNames: _col0, _col1, _col4, _col5 + outputColumnNames: _col0, _col1, _col5, _col6 Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE Select Operator - expressions: _col0 (type: string), _col1 (type: string), _col4 (type: string), _col5 (type: string) + expressions: _col0 (type: string), _col1 (type: string), _col5 (type: string), _col6 (type: string) outputColumnNames: _col0, _col1, _col2, _col3 Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE File Output Operator @@ -236,10 +236,10 @@ STAGE PLANS: condition expressions: 0 {KEY.reducesinkkey0} {VALUE._col0} 1 {KEY.reducesinkkey0} {VALUE._col0} - outputColumnNames: _col0, _col1, _col4, _col5 + outputColumnNames: _col0, _col1, _col5, _col6 Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE Select Operator - expressions: _col0 (type: string), _col1 (type: string), _col4 (type: string), _col5 (type: string) + expressions: _col0 (type: string), _col1 (type: string), _col5 (type: string), _col6 (type: string) outputColumnNames: _col0, _col1, _col2, _col3 Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE File Output Operator @@ -284,10 +284,10 @@ STAGE PLANS: condition expressions: 0 {KEY.reducesinkkey0} {VALUE._col0} 1 {KEY.reducesinkkey0} {VALUE._col0} - outputColumnNames: _col0, _col1, _col4, _col5 + outputColumnNames: _col0, _col1, _col5, _col6 Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE Select Operator - expressions: _col0 (type: string), _col1 (type: string), _col4 (type: string), _col5 (type: string) + expressions: _col0 (type: string), _col1 (type: string), _col5 (type: string), _col6 (type: string) outputColumnNames: _col0, _col1, _col2, _col3 Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE File Output Operator @@ -380,10 +380,10 @@ STAGE PLANS: condition expressions: 0 {KEY.reducesinkkey0} {VALUE._col0} 1 {KEY.reducesinkkey0} {VALUE._col0} - outputColumnNames: _col0, _col1, _col4, _col5 + outputColumnNames: _col0, _col1, _col5, _col6 Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE Select Operator - expressions: _col0 (type: string), _col1 (type: string), _col4 (type: string), _col5 (type: string) + expressions: _col0 (type: string), _col1 (type: string), _col5 (type: string), _col6 (type: string) outputColumnNames: _col0, _col1, _col2, _col3 Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE File Output Operator @@ -439,10 +439,10 @@ STAGE PLANS: condition expressions: 0 {KEY.reducesinkkey0} {VALUE._col0} 1 {KEY.reducesinkkey0} {VALUE._col0} - outputColumnNames: _col0, _col1, _col4, _col5 + outputColumnNames: _col0, _col1, _col5, _col6 Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE Select Operator - expressions: _col0 (type: string), _col1 (type: string), _col4 (type: string), _col5 (type: string) + expressions: _col0 (type: string), _col1 (type: string), _col5 (type: string), _col6 (type: string) outputColumnNames: _col0, _col1, _col2, _col3 Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE File Output Operator @@ -534,10 +534,10 @@ STAGE PLANS: condition expressions: 0 {KEY.reducesinkkey0} {VALUE._col0} 1 {KEY.reducesinkkey0} {VALUE._col0} - outputColumnNames: _col0, _col1, _col4, _col5 + outputColumnNames: _col0, _col1, _col5, _col6 Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE Select Operator - expressions: _col0 (type: string), _col1 (type: string), _col4 (type: string), _col5 (type: string) + expressions: _col0 (type: string), _col1 (type: string), _col5 (type: string), _col6 (type: string) outputColumnNames: _col0, _col1, _col2, _col3 Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE File Output Operator @@ -593,10 +593,10 @@ STAGE PLANS: condition expressions: 0 {KEY.reducesinkkey0} {VALUE._col0} 1 {KEY.reducesinkkey0} {VALUE._col0} - outputColumnNames: _col0, _col1, _col4, _col5 + outputColumnNames: _col0, _col1, _col5, _col6 Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE Select Operator - expressions: _col0 (type: string), _col1 (type: string), _col4 (type: string), _col5 (type: string) + expressions: _col0 (type: string), _col1 (type: string), _col5 (type: string), _col6 (type: string) outputColumnNames: _col0, _col1, _col2, _col3 Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE File Output Operator diff --git a/ql/src/test/results/clientpositive/skewjoin_union_remove_2.q.out b/ql/src/test/results/clientpositive/skewjoin_union_remove_2.q.out index 2f26156..e87d22b 100644 --- a/ql/src/test/results/clientpositive/skewjoin_union_remove_2.q.out +++ b/ql/src/test/results/clientpositive/skewjoin_union_remove_2.q.out @@ -126,10 +126,10 @@ STAGE PLANS: 0 {KEY.reducesinkkey0} {VALUE._col0} 1 {KEY.reducesinkkey0} {VALUE._col0} 2 {KEY.reducesinkkey0} {VALUE._col0} - outputColumnNames: _col0, _col1, _col4, _col5, _col8, _col9 + outputColumnNames: _col0, _col1, _col5, _col6, _col10, _col11 Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE Select Operator - expressions: _col0 (type: string), _col1 (type: string), _col4 (type: string), _col5 (type: string), _col8 (type: string), _col9 (type: string) + expressions: _col0 (type: string), _col1 (type: string), _col5 (type: string), _col6 (type: string), _col10 (type: string), _col11 (type: string) outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5 Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE File Output Operator @@ -188,10 +188,10 @@ STAGE PLANS: 0 {KEY.reducesinkkey0} {VALUE._col0} 1 {KEY.reducesinkkey0} {VALUE._col0} 2 {KEY.reducesinkkey0} {VALUE._col0} - outputColumnNames: _col0, _col1, _col4, _col5, _col8, _col9 + outputColumnNames: _col0, _col1, _col5, _col6, _col10, _col11 Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE Select Operator - expressions: _col0 (type: string), _col1 (type: string), _col4 (type: string), _col5 (type: string), _col8 (type: string), _col9 (type: string) + expressions: _col0 (type: string), _col1 (type: string), _col5 (type: string), _col6 (type: string), _col10 (type: string), _col11 (type: string) outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5 Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE File Output Operator diff --git a/ql/src/test/results/clientpositive/skewjoinopt1.q.out b/ql/src/test/results/clientpositive/skewjoinopt1.q.out index 18f7c6c..06cd471 100644 --- a/ql/src/test/results/clientpositive/skewjoinopt1.q.out +++ b/ql/src/test/results/clientpositive/skewjoinopt1.q.out @@ -87,10 +87,10 @@ STAGE PLANS: condition expressions: 0 {KEY.reducesinkkey0} {VALUE._col0} 1 {KEY.reducesinkkey0} {VALUE._col0} - outputColumnNames: _col0, _col1, _col4, _col5 + outputColumnNames: _col0, _col1, _col5, _col6 Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE Select Operator - expressions: _col0 (type: string), _col1 (type: string), _col4 (type: string), _col5 (type: string) + expressions: _col0 (type: string), _col1 (type: string), _col5 (type: string), _col6 (type: string) outputColumnNames: _col0, _col1, _col2, _col3 Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE File Output Operator @@ -164,10 +164,10 @@ STAGE PLANS: condition expressions: 0 {KEY.reducesinkkey0} {VALUE._col0} 1 {KEY.reducesinkkey0} {VALUE._col0} - outputColumnNames: _col0, _col1, _col4, _col5 + outputColumnNames: _col0, _col1, _col5, _col6 Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE Select Operator - expressions: _col0 (type: string), _col1 (type: string), _col4 (type: string), _col5 (type: string) + expressions: _col0 (type: string), _col1 (type: string), _col5 (type: string), _col6 (type: string) outputColumnNames: _col0, _col1, _col2, _col3 Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE File Output Operator @@ -252,10 +252,10 @@ STAGE PLANS: condition expressions: 0 {KEY.reducesinkkey0} {VALUE._col0} 1 {KEY.reducesinkkey0} {VALUE._col0} - outputColumnNames: _col0, _col1, _col4, _col5 + outputColumnNames: _col0, _col1, _col5, _col6 Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE Select Operator - expressions: _col0 (type: string), _col1 (type: string), _col4 (type: string), _col5 (type: string) + expressions: _col0 (type: string), _col1 (type: string), _col5 (type: string), _col6 (type: string) outputColumnNames: _col0, _col1, _col2, _col3 Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE File Output Operator @@ -329,10 +329,10 @@ STAGE PLANS: condition expressions: 0 {KEY.reducesinkkey0} {VALUE._col0} 1 {KEY.reducesinkkey0} {VALUE._col0} - outputColumnNames: _col0, _col1, _col4, _col5 + outputColumnNames: _col0, _col1, _col5, _col6 Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE Select Operator - expressions: _col0 (type: string), _col1 (type: string), _col4 (type: string), _col5 (type: string) + expressions: _col0 (type: string), _col1 (type: string), _col5 (type: string), _col6 (type: string) outputColumnNames: _col0, _col1, _col2, _col3 Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE File Output Operator diff --git a/ql/src/test/results/clientpositive/skewjoinopt10.q.out b/ql/src/test/results/clientpositive/skewjoinopt10.q.out index d7ca8f7..7ff8235 100644 --- a/ql/src/test/results/clientpositive/skewjoinopt10.q.out +++ b/ql/src/test/results/clientpositive/skewjoinopt10.q.out @@ -88,10 +88,10 @@ STAGE PLANS: condition expressions: 0 {KEY.reducesinkkey0} 1 {VALUE._col0} - outputColumnNames: _col0, _col5 + outputColumnNames: _col0, _col6 Statistics: Num rows: 2 Data size: 8 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: _col0 (type: string), _col5 (type: array) + expressions: _col0 (type: string), _col6 (type: array) outputColumnNames: _col0, _col1 Statistics: Num rows: 2 Data size: 8 Basic stats: COMPLETE Column stats: NONE File Output Operator @@ -230,10 +230,10 @@ STAGE PLANS: condition expressions: 0 {KEY.reducesinkkey0} 1 {VALUE._col0} - outputColumnNames: _col0, _col5 + outputColumnNames: _col0, _col6 Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: _col0 (type: string), _col5 (type: array) + expressions: _col0 (type: string), _col6 (type: array) outputColumnNames: _col0, _col1 Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: NONE File Output Operator diff --git a/ql/src/test/results/clientpositive/skewjoinopt11.q.out b/ql/src/test/results/clientpositive/skewjoinopt11.q.out index ccca84c..7b431dd 100644 --- a/ql/src/test/results/clientpositive/skewjoinopt11.q.out +++ b/ql/src/test/results/clientpositive/skewjoinopt11.q.out @@ -103,10 +103,10 @@ STAGE PLANS: condition expressions: 0 {KEY.reducesinkkey0} {VALUE._col0} 1 {VALUE._col0} - outputColumnNames: _col0, _col1, _col5 + outputColumnNames: _col0, _col1, _col6 Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE Select Operator - expressions: _col0 (type: string), _col1 (type: string), _col5 (type: string) + expressions: _col0 (type: string), _col1 (type: string), _col6 (type: string) outputColumnNames: _col0, _col1, _col2 Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE File Output Operator @@ -210,10 +210,10 @@ STAGE PLANS: condition expressions: 0 {KEY.reducesinkkey0} {VALUE._col0} 1 {VALUE._col0} - outputColumnNames: _col0, _col1, _col5 + outputColumnNames: _col0, _col1, _col6 Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE Select Operator - expressions: _col0 (type: string), _col1 (type: string), _col5 (type: string) + expressions: _col0 (type: string), _col1 (type: string), _col6 (type: string) outputColumnNames: _col0, _col1, _col2 Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE File Output Operator @@ -257,10 +257,10 @@ STAGE PLANS: condition expressions: 0 {KEY.reducesinkkey0} {VALUE._col0} 1 {VALUE._col0} - outputColumnNames: _col0, _col1, _col5 + outputColumnNames: _col0, _col1, _col6 Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE Select Operator - expressions: _col0 (type: string), _col1 (type: string), _col5 (type: string) + expressions: _col0 (type: string), _col1 (type: string), _col6 (type: string) outputColumnNames: _col0, _col1, _col2 Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE File Output Operator @@ -332,10 +332,10 @@ STAGE PLANS: condition expressions: 0 {KEY.reducesinkkey0} {VALUE._col0} 1 {VALUE._col0} - outputColumnNames: _col0, _col1, _col5 + outputColumnNames: _col0, _col1, _col6 Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE Select Operator - expressions: _col0 (type: string), _col1 (type: string), _col5 (type: string) + expressions: _col0 (type: string), _col1 (type: string), _col6 (type: string) outputColumnNames: _col0, _col1, _col2 Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE File Output Operator diff --git a/ql/src/test/results/clientpositive/skewjoinopt12.q.out b/ql/src/test/results/clientpositive/skewjoinopt12.q.out index ead9476..6d6a526 100644 --- a/ql/src/test/results/clientpositive/skewjoinopt12.q.out +++ b/ql/src/test/results/clientpositive/skewjoinopt12.q.out @@ -87,10 +87,10 @@ STAGE PLANS: condition expressions: 0 {KEY.reducesinkkey0} {KEY.reducesinkkey1} 1 {KEY.reducesinkkey0} {KEY.reducesinkkey1} - outputColumnNames: _col0, _col1, _col4, _col5 + outputColumnNames: _col0, _col1, _col5, _col6 Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE Select Operator - expressions: _col0 (type: string), _col1 (type: string), _col4 (type: string), _col5 (type: string) + expressions: _col0 (type: string), _col1 (type: string), _col5 (type: string), _col6 (type: string) outputColumnNames: _col0, _col1, _col2, _col3 Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE File Output Operator @@ -162,10 +162,10 @@ STAGE PLANS: condition expressions: 0 {KEY.reducesinkkey0} {KEY.reducesinkkey1} 1 {KEY.reducesinkkey0} {KEY.reducesinkkey1} - outputColumnNames: _col0, _col1, _col4, _col5 + outputColumnNames: _col0, _col1, _col5, _col6 Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE Select Operator - expressions: _col0 (type: string), _col1 (type: string), _col4 (type: string), _col5 (type: string) + expressions: _col0 (type: string), _col1 (type: string), _col5 (type: string), _col6 (type: string) outputColumnNames: _col0, _col1, _col2, _col3 Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE File Output Operator diff --git a/ql/src/test/results/clientpositive/skewjoinopt13.q.out b/ql/src/test/results/clientpositive/skewjoinopt13.q.out index d564dcc..79f730c 100644 --- a/ql/src/test/results/clientpositive/skewjoinopt13.q.out +++ b/ql/src/test/results/clientpositive/skewjoinopt13.q.out @@ -114,7 +114,7 @@ STAGE PLANS: condition expressions: 0 {KEY.reducesinkkey0} {VALUE._col0} 1 {KEY.reducesinkkey0} {VALUE._col0} - outputColumnNames: _col0, _col1, _col4, _col5 + outputColumnNames: _col0, _col1, _col5, _col6 Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE File Output Operator compressed: false @@ -132,7 +132,7 @@ STAGE PLANS: sort order: + Map-reduce partition columns: _col1 (type: string) Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE - value expressions: _col0 (type: string), _col4 (type: string), _col5 (type: string) + value expressions: _col0 (type: string), _col5 (type: string), _col6 (type: string) TableScan alias: c Statistics: Num rows: 0 Data size: 20 Basic stats: PARTIAL Column stats: NONE @@ -150,12 +150,12 @@ STAGE PLANS: condition map: Inner Join 0 to 1 condition expressions: - 0 {VALUE._col0} {KEY.reducesinkkey0} {VALUE._col3} {VALUE._col4} + 0 {VALUE._col0} {KEY.reducesinkkey0} {VALUE._col4} {VALUE._col5} 1 {VALUE._col0} {KEY.reducesinkkey0} - outputColumnNames: _col0, _col1, _col4, _col5, _col8, _col9 + outputColumnNames: _col0, _col1, _col5, _col6, _col10, _col11 Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE Select Operator - expressions: _col0 (type: string), _col1 (type: string), _col4 (type: string), _col5 (type: string), _col8 (type: string), _col9 (type: string) + expressions: _col0 (type: string), _col1 (type: string), _col5 (type: string), _col6 (type: string), _col10 (type: string), _col11 (type: string) outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5 Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE File Output Operator diff --git a/ql/src/test/results/clientpositive/skewjoinopt14.q.out b/ql/src/test/results/clientpositive/skewjoinopt14.q.out index 9a14a6d..206d6ce 100644 --- a/ql/src/test/results/clientpositive/skewjoinopt14.q.out +++ b/ql/src/test/results/clientpositive/skewjoinopt14.q.out @@ -119,7 +119,7 @@ STAGE PLANS: condition expressions: 0 {KEY.reducesinkkey0} {VALUE._col0} 1 {KEY.reducesinkkey0} {VALUE._col0} - outputColumnNames: _col0, _col1, _col4, _col5 + outputColumnNames: _col0, _col1, _col5, _col6 Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE File Output Operator compressed: false @@ -142,7 +142,7 @@ STAGE PLANS: sort order: + Map-reduce partition columns: _col1 (type: string) Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE - value expressions: _col0 (type: string), _col4 (type: string), _col5 (type: string) + value expressions: _col0 (type: string), _col5 (type: string), _col6 (type: string) TableScan Union Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE @@ -154,7 +154,7 @@ STAGE PLANS: sort order: + Map-reduce partition columns: _col1 (type: string) Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE - value expressions: _col0 (type: string), _col4 (type: string), _col5 (type: string) + value expressions: _col0 (type: string), _col5 (type: string), _col6 (type: string) TableScan alias: c Statistics: Num rows: 0 Data size: 20 Basic stats: PARTIAL Column stats: NONE @@ -172,12 +172,12 @@ STAGE PLANS: condition map: Inner Join 0 to 1 condition expressions: - 0 {VALUE._col0} {KEY.reducesinkkey0} {VALUE._col3} {VALUE._col4} + 0 {VALUE._col0} {KEY.reducesinkkey0} {VALUE._col4} {VALUE._col5} 1 {VALUE._col0} {KEY.reducesinkkey0} - outputColumnNames: _col0, _col1, _col4, _col5, _col8, _col9 + outputColumnNames: _col0, _col1, _col5, _col6, _col10, _col11 Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE Select Operator - expressions: _col0 (type: string), _col1 (type: string), _col4 (type: string), _col5 (type: string), _col8 (type: string), _col9 (type: string) + expressions: _col0 (type: string), _col1 (type: string), _col5 (type: string), _col6 (type: string), _col10 (type: string), _col11 (type: string) outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5 Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE File Output Operator @@ -222,7 +222,7 @@ STAGE PLANS: condition expressions: 0 {KEY.reducesinkkey0} {VALUE._col0} 1 {KEY.reducesinkkey0} {VALUE._col0} - outputColumnNames: _col0, _col1, _col4, _col5 + outputColumnNames: _col0, _col1, _col5, _col6 Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE File Output Operator compressed: false diff --git a/ql/src/test/results/clientpositive/skewjoinopt15.q.out b/ql/src/test/results/clientpositive/skewjoinopt15.q.out index 271b14b..edf2dee 100644 --- a/ql/src/test/results/clientpositive/skewjoinopt15.q.out +++ b/ql/src/test/results/clientpositive/skewjoinopt15.q.out @@ -127,10 +127,10 @@ STAGE PLANS: condition expressions: 0 {KEY.reducesinkkey0} {VALUE._col0} 1 {KEY.reducesinkkey0} {VALUE._col0} - outputColumnNames: _col0, _col1, _col4, _col5 + outputColumnNames: _col0, _col1, _col5, _col6 Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: _col0 (type: int), _col1 (type: string), _col4 (type: int), _col5 (type: string) + expressions: _col0 (type: int), _col1 (type: string), _col5 (type: int), _col6 (type: string) outputColumnNames: _col0, _col1, _col2, _col3 Statistics: Num rows: 1 Data size: 4 Basic stats: COMPLETE Column stats: NONE File Output Operator @@ -204,10 +204,10 @@ STAGE PLANS: condition expressions: 0 {KEY.reducesinkkey0} {VALUE._col0} 1 {KEY.reducesinkkey0} {VALUE._col0} - outputColumnNames: _col0, _col1, _col4, _col5 + outputColumnNames: _col0, _col1, _col5, _col6 Statistics: Num rows: 2 Data size: 8 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: _col0 (type: int), _col1 (type: string), _col4 (type: int), _col5 (type: string) + expressions: _col0 (type: int), _col1 (type: string), _col5 (type: int), _col6 (type: string) outputColumnNames: _col0, _col1, _col2, _col3 Statistics: Num rows: 2 Data size: 8 Basic stats: COMPLETE Column stats: NONE File Output Operator @@ -292,10 +292,10 @@ STAGE PLANS: condition expressions: 0 {KEY.reducesinkkey0} {VALUE._col0} 1 {KEY.reducesinkkey0} {VALUE._col0} - outputColumnNames: _col0, _col1, _col4, _col5 + outputColumnNames: _col0, _col1, _col5, _col6 Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE Select Operator - expressions: _col0 (type: int), _col1 (type: string), _col4 (type: int), _col5 (type: string) + expressions: _col0 (type: int), _col1 (type: string), _col5 (type: int), _col6 (type: string) outputColumnNames: _col0, _col1, _col2, _col3 Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE File Output Operator @@ -369,10 +369,10 @@ STAGE PLANS: condition expressions: 0 {KEY.reducesinkkey0} {VALUE._col0} 1 {KEY.reducesinkkey0} {VALUE._col0} - outputColumnNames: _col0, _col1, _col4, _col5 + outputColumnNames: _col0, _col1, _col5, _col6 Statistics: Num rows: 6 Data size: 26 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: _col0 (type: int), _col1 (type: string), _col4 (type: int), _col5 (type: string) + expressions: _col0 (type: int), _col1 (type: string), _col5 (type: int), _col6 (type: string) outputColumnNames: _col0, _col1, _col2, _col3 Statistics: Num rows: 6 Data size: 26 Basic stats: COMPLETE Column stats: NONE File Output Operator diff --git a/ql/src/test/results/clientpositive/skewjoinopt16.q.out b/ql/src/test/results/clientpositive/skewjoinopt16.q.out index 4e66784..16a18ba 100644 --- a/ql/src/test/results/clientpositive/skewjoinopt16.q.out +++ b/ql/src/test/results/clientpositive/skewjoinopt16.q.out @@ -87,10 +87,10 @@ STAGE PLANS: condition expressions: 0 {KEY.reducesinkkey0} {KEY.reducesinkkey1} 1 {KEY.reducesinkkey0} {KEY.reducesinkkey1} - outputColumnNames: _col0, _col1, _col4, _col5 + outputColumnNames: _col0, _col1, _col5, _col6 Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE Select Operator - expressions: _col0 (type: string), _col1 (type: string), _col4 (type: string), _col5 (type: string) + expressions: _col0 (type: string), _col1 (type: string), _col5 (type: string), _col6 (type: string) outputColumnNames: _col0, _col1, _col2, _col3 Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE File Output Operator @@ -162,10 +162,10 @@ STAGE PLANS: condition expressions: 0 {KEY.reducesinkkey0} {KEY.reducesinkkey1} 1 {KEY.reducesinkkey0} {KEY.reducesinkkey1} - outputColumnNames: _col0, _col1, _col4, _col5 + outputColumnNames: _col0, _col1, _col5, _col6 Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE Select Operator - expressions: _col0 (type: string), _col1 (type: string), _col4 (type: string), _col5 (type: string) + expressions: _col0 (type: string), _col1 (type: string), _col5 (type: string), _col6 (type: string) outputColumnNames: _col0, _col1, _col2, _col3 Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE File Output Operator diff --git a/ql/src/test/results/clientpositive/skewjoinopt17.q.out b/ql/src/test/results/clientpositive/skewjoinopt17.q.out index 98b8dca..04c6960 100644 --- a/ql/src/test/results/clientpositive/skewjoinopt17.q.out +++ b/ql/src/test/results/clientpositive/skewjoinopt17.q.out @@ -93,10 +93,10 @@ STAGE PLANS: condition expressions: 0 {KEY.reducesinkkey0} {VALUE._col0} 1 {KEY.reducesinkkey0} {VALUE._col0} - outputColumnNames: _col0, _col1, _col4, _col5 + outputColumnNames: _col0, _col1, _col5, _col6 Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE Select Operator - expressions: _col0 (type: string), _col1 (type: string), _col4 (type: string), _col5 (type: string) + expressions: _col0 (type: string), _col1 (type: string), _col5 (type: string), _col6 (type: string) outputColumnNames: _col0, _col1, _col2, _col3 Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE File Output Operator @@ -170,10 +170,10 @@ STAGE PLANS: condition expressions: 0 {KEY.reducesinkkey0} {VALUE._col0} 1 {KEY.reducesinkkey0} {VALUE._col0} - outputColumnNames: _col0, _col1, _col4, _col5 + outputColumnNames: _col0, _col1, _col5, _col6 Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE Select Operator - expressions: _col0 (type: string), _col1 (type: string), _col4 (type: string), _col5 (type: string) + expressions: _col0 (type: string), _col1 (type: string), _col5 (type: string), _col6 (type: string) outputColumnNames: _col0, _col1, _col2, _col3 Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE File Output Operator @@ -312,10 +312,10 @@ STAGE PLANS: condition expressions: 0 {KEY.reducesinkkey0} {KEY.reducesinkkey1} 1 {KEY.reducesinkkey0} {KEY.reducesinkkey1} - outputColumnNames: _col0, _col1, _col4, _col5 + outputColumnNames: _col0, _col1, _col5, _col6 Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE Select Operator - expressions: _col0 (type: string), _col1 (type: string), _col4 (type: string), _col5 (type: string) + expressions: _col0 (type: string), _col1 (type: string), _col5 (type: string), _col6 (type: string) outputColumnNames: _col0, _col1, _col2, _col3 Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE File Output Operator @@ -387,10 +387,10 @@ STAGE PLANS: condition expressions: 0 {KEY.reducesinkkey0} {KEY.reducesinkkey1} 1 {KEY.reducesinkkey0} {KEY.reducesinkkey1} - outputColumnNames: _col0, _col1, _col4, _col5 + outputColumnNames: _col0, _col1, _col5, _col6 Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE Select Operator - expressions: _col0 (type: string), _col1 (type: string), _col4 (type: string), _col5 (type: string) + expressions: _col0 (type: string), _col1 (type: string), _col5 (type: string), _col6 (type: string) outputColumnNames: _col0, _col1, _col2, _col3 Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE File Output Operator diff --git a/ql/src/test/results/clientpositive/skewjoinopt18.q.out b/ql/src/test/results/clientpositive/skewjoinopt18.q.out index c48aa8d..87de96b 100644 --- a/ql/src/test/results/clientpositive/skewjoinopt18.q.out +++ b/ql/src/test/results/clientpositive/skewjoinopt18.q.out @@ -113,10 +113,10 @@ STAGE PLANS: condition expressions: 0 {VALUE._col0} {VALUE._col1} 1 {VALUE._col0} {VALUE._col1} - outputColumnNames: _col0, _col1, _col4, _col5 + outputColumnNames: _col0, _col1, _col5, _col6 Statistics: Num rows: 3 Data size: 13 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: _col0 (type: int), _col1 (type: string), _col4 (type: string), _col5 (type: string) + expressions: _col0 (type: int), _col1 (type: string), _col5 (type: string), _col6 (type: string) outputColumnNames: _col0, _col1, _col2, _col3 Statistics: Num rows: 3 Data size: 13 Basic stats: COMPLETE Column stats: NONE File Output Operator diff --git a/ql/src/test/results/clientpositive/skewjoinopt19.q.out b/ql/src/test/results/clientpositive/skewjoinopt19.q.out index 5e0670e..243304b 100644 --- a/ql/src/test/results/clientpositive/skewjoinopt19.q.out +++ b/ql/src/test/results/clientpositive/skewjoinopt19.q.out @@ -91,10 +91,10 @@ STAGE PLANS: condition expressions: 0 {KEY.reducesinkkey0} {VALUE._col0} 1 {KEY.reducesinkkey0} {VALUE._col0} - outputColumnNames: _col0, _col1, _col4, _col5 + outputColumnNames: _col0, _col1, _col5, _col6 Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE Select Operator - expressions: _col0 (type: string), _col1 (type: string), _col4 (type: string), _col5 (type: string) + expressions: _col0 (type: string), _col1 (type: string), _col5 (type: string), _col6 (type: string) outputColumnNames: _col0, _col1, _col2, _col3 Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE File Output Operator @@ -168,10 +168,10 @@ STAGE PLANS: condition expressions: 0 {KEY.reducesinkkey0} {VALUE._col0} 1 {KEY.reducesinkkey0} {VALUE._col0} - outputColumnNames: _col0, _col1, _col4, _col5 + outputColumnNames: _col0, _col1, _col5, _col6 Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE Select Operator - expressions: _col0 (type: string), _col1 (type: string), _col4 (type: string), _col5 (type: string) + expressions: _col0 (type: string), _col1 (type: string), _col5 (type: string), _col6 (type: string) outputColumnNames: _col0, _col1, _col2, _col3 Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE File Output Operator diff --git a/ql/src/test/results/clientpositive/skewjoinopt2.q.out b/ql/src/test/results/clientpositive/skewjoinopt2.q.out index 57a1472..fa26522 100644 --- a/ql/src/test/results/clientpositive/skewjoinopt2.q.out +++ b/ql/src/test/results/clientpositive/skewjoinopt2.q.out @@ -91,10 +91,10 @@ STAGE PLANS: condition expressions: 0 {KEY.reducesinkkey0} {KEY.reducesinkkey1} 1 {KEY.reducesinkkey0} {KEY.reducesinkkey1} - outputColumnNames: _col0, _col1, _col4, _col5 + outputColumnNames: _col0, _col1, _col5, _col6 Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE Select Operator - expressions: _col0 (type: string), _col1 (type: string), _col4 (type: string), _col5 (type: string) + expressions: _col0 (type: string), _col1 (type: string), _col5 (type: string), _col6 (type: string) outputColumnNames: _col0, _col1, _col2, _col3 Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE File Output Operator @@ -166,10 +166,10 @@ STAGE PLANS: condition expressions: 0 {KEY.reducesinkkey0} {KEY.reducesinkkey1} 1 {KEY.reducesinkkey0} {KEY.reducesinkkey1} - outputColumnNames: _col0, _col1, _col4, _col5 + outputColumnNames: _col0, _col1, _col5, _col6 Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE Select Operator - expressions: _col0 (type: string), _col1 (type: string), _col4 (type: string), _col5 (type: string) + expressions: _col0 (type: string), _col1 (type: string), _col5 (type: string), _col6 (type: string) outputColumnNames: _col0, _col1, _col2, _col3 Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE File Output Operator @@ -249,10 +249,10 @@ STAGE PLANS: condition expressions: 0 {KEY.reducesinkkey0} {KEY.reducesinkkey1} 1 {KEY.reducesinkkey0} {KEY.reducesinkkey1} - outputColumnNames: _col0, _col1, _col4, _col5 + outputColumnNames: _col0, _col1, _col5, _col6 Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE Select Operator - expressions: _col0 (type: string), _col1 (type: string), _col4 (type: string), _col5 (type: string) + expressions: _col0 (type: string), _col1 (type: string), _col5 (type: string), _col6 (type: string) outputColumnNames: _col0, _col1, _col2, _col3 Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE File Output Operator @@ -324,10 +324,10 @@ STAGE PLANS: condition expressions: 0 {KEY.reducesinkkey0} {KEY.reducesinkkey1} 1 {KEY.reducesinkkey0} {KEY.reducesinkkey1} - outputColumnNames: _col0, _col1, _col4, _col5 + outputColumnNames: _col0, _col1, _col5, _col6 Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE Select Operator - expressions: _col0 (type: string), _col1 (type: string), _col4 (type: string), _col5 (type: string) + expressions: _col0 (type: string), _col1 (type: string), _col5 (type: string), _col6 (type: string) outputColumnNames: _col0, _col1, _col2, _col3 Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE File Output Operator diff --git a/ql/src/test/results/clientpositive/skewjoinopt20.q.out b/ql/src/test/results/clientpositive/skewjoinopt20.q.out index 73d1808..aef3e82 100644 --- a/ql/src/test/results/clientpositive/skewjoinopt20.q.out +++ b/ql/src/test/results/clientpositive/skewjoinopt20.q.out @@ -91,10 +91,10 @@ STAGE PLANS: condition expressions: 0 {KEY.reducesinkkey0} {VALUE._col0} 1 {KEY.reducesinkkey0} {VALUE._col0} - outputColumnNames: _col0, _col1, _col4, _col5 + outputColumnNames: _col0, _col1, _col5, _col6 Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE Select Operator - expressions: _col0 (type: string), _col1 (type: string), _col4 (type: string), _col5 (type: string) + expressions: _col0 (type: string), _col1 (type: string), _col5 (type: string), _col6 (type: string) outputColumnNames: _col0, _col1, _col2, _col3 Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE File Output Operator @@ -168,10 +168,10 @@ STAGE PLANS: condition expressions: 0 {KEY.reducesinkkey0} {VALUE._col0} 1 {KEY.reducesinkkey0} {VALUE._col0} - outputColumnNames: _col0, _col1, _col4, _col5 + outputColumnNames: _col0, _col1, _col5, _col6 Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE Select Operator - expressions: _col0 (type: string), _col1 (type: string), _col4 (type: string), _col5 (type: string) + expressions: _col0 (type: string), _col1 (type: string), _col5 (type: string), _col6 (type: string) outputColumnNames: _col0, _col1, _col2, _col3 Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE File Output Operator diff --git a/ql/src/test/results/clientpositive/skewjoinopt3.q.out b/ql/src/test/results/clientpositive/skewjoinopt3.q.out index 5bc2024..d36f9db 100644 --- a/ql/src/test/results/clientpositive/skewjoinopt3.q.out +++ b/ql/src/test/results/clientpositive/skewjoinopt3.q.out @@ -91,10 +91,10 @@ STAGE PLANS: condition expressions: 0 {KEY.reducesinkkey0} {VALUE._col0} 1 {KEY.reducesinkkey0} {VALUE._col0} - outputColumnNames: _col0, _col1, _col4, _col5 + outputColumnNames: _col0, _col1, _col5, _col6 Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE Select Operator - expressions: _col0 (type: string), _col1 (type: string), _col4 (type: string), _col5 (type: string) + expressions: _col0 (type: string), _col1 (type: string), _col5 (type: string), _col6 (type: string) outputColumnNames: _col0, _col1, _col2, _col3 Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE File Output Operator @@ -168,10 +168,10 @@ STAGE PLANS: condition expressions: 0 {KEY.reducesinkkey0} {VALUE._col0} 1 {KEY.reducesinkkey0} {VALUE._col0} - outputColumnNames: _col0, _col1, _col4, _col5 + outputColumnNames: _col0, _col1, _col5, _col6 Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE Select Operator - expressions: _col0 (type: string), _col1 (type: string), _col4 (type: string), _col5 (type: string) + expressions: _col0 (type: string), _col1 (type: string), _col5 (type: string), _col6 (type: string) outputColumnNames: _col0, _col1, _col2, _col3 Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE File Output Operator @@ -256,10 +256,10 @@ STAGE PLANS: condition expressions: 0 {KEY.reducesinkkey0} {VALUE._col0} 1 {KEY.reducesinkkey0} {VALUE._col0} - outputColumnNames: _col0, _col1, _col4, _col5 + outputColumnNames: _col0, _col1, _col5, _col6 Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE Select Operator - expressions: _col0 (type: string), _col1 (type: string), _col4 (type: string), _col5 (type: string) + expressions: _col0 (type: string), _col1 (type: string), _col5 (type: string), _col6 (type: string) outputColumnNames: _col0, _col1, _col2, _col3 Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE File Output Operator @@ -333,10 +333,10 @@ STAGE PLANS: condition expressions: 0 {KEY.reducesinkkey0} {VALUE._col0} 1 {KEY.reducesinkkey0} {VALUE._col0} - outputColumnNames: _col0, _col1, _col4, _col5 + outputColumnNames: _col0, _col1, _col5, _col6 Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE Select Operator - expressions: _col0 (type: string), _col1 (type: string), _col4 (type: string), _col5 (type: string) + expressions: _col0 (type: string), _col1 (type: string), _col5 (type: string), _col6 (type: string) outputColumnNames: _col0, _col1, _col2, _col3 Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE File Output Operator diff --git a/ql/src/test/results/clientpositive/skewjoinopt4.q.out b/ql/src/test/results/clientpositive/skewjoinopt4.q.out index 6ac101b..8298039 100644 --- a/ql/src/test/results/clientpositive/skewjoinopt4.q.out +++ b/ql/src/test/results/clientpositive/skewjoinopt4.q.out @@ -87,10 +87,10 @@ STAGE PLANS: condition expressions: 0 {KEY.reducesinkkey0} {VALUE._col0} 1 {KEY.reducesinkkey0} {VALUE._col0} - outputColumnNames: _col0, _col1, _col4, _col5 + outputColumnNames: _col0, _col1, _col5, _col6 Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE Select Operator - expressions: _col0 (type: string), _col1 (type: string), _col4 (type: string), _col5 (type: string) + expressions: _col0 (type: string), _col1 (type: string), _col5 (type: string), _col6 (type: string) outputColumnNames: _col0, _col1, _col2, _col3 Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE File Output Operator @@ -164,10 +164,10 @@ STAGE PLANS: condition expressions: 0 {KEY.reducesinkkey0} {VALUE._col0} 1 {KEY.reducesinkkey0} {VALUE._col0} - outputColumnNames: _col0, _col1, _col4, _col5 + outputColumnNames: _col0, _col1, _col5, _col6 Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE Select Operator - expressions: _col0 (type: string), _col1 (type: string), _col4 (type: string), _col5 (type: string) + expressions: _col0 (type: string), _col1 (type: string), _col5 (type: string), _col6 (type: string) outputColumnNames: _col0, _col1, _col2, _col3 Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE File Output Operator @@ -250,10 +250,10 @@ STAGE PLANS: condition expressions: 0 {KEY.reducesinkkey0} {VALUE._col0} 1 {KEY.reducesinkkey0} {VALUE._col0} - outputColumnNames: _col0, _col1, _col4, _col5 + outputColumnNames: _col0, _col1, _col5, _col6 Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE Select Operator - expressions: _col0 (type: string), _col1 (type: string), _col4 (type: string), _col5 (type: string) + expressions: _col0 (type: string), _col1 (type: string), _col5 (type: string), _col6 (type: string) outputColumnNames: _col0, _col1, _col2, _col3 Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE File Output Operator @@ -327,10 +327,10 @@ STAGE PLANS: condition expressions: 0 {KEY.reducesinkkey0} {VALUE._col0} 1 {KEY.reducesinkkey0} {VALUE._col0} - outputColumnNames: _col0, _col1, _col4, _col5 + outputColumnNames: _col0, _col1, _col5, _col6 Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE Select Operator - expressions: _col0 (type: string), _col1 (type: string), _col4 (type: string), _col5 (type: string) + expressions: _col0 (type: string), _col1 (type: string), _col5 (type: string), _col6 (type: string) outputColumnNames: _col0, _col1, _col2, _col3 Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE File Output Operator diff --git a/ql/src/test/results/clientpositive/skewjoinopt5.q.out b/ql/src/test/results/clientpositive/skewjoinopt5.q.out index fdb914f..165e6c6 100644 --- a/ql/src/test/results/clientpositive/skewjoinopt5.q.out +++ b/ql/src/test/results/clientpositive/skewjoinopt5.q.out @@ -89,10 +89,10 @@ STAGE PLANS: condition expressions: 0 {KEY.reducesinkkey0} {VALUE._col0} 1 {KEY.reducesinkkey0} {VALUE._col0} - outputColumnNames: _col0, _col1, _col4, _col5 + outputColumnNames: _col0, _col1, _col5, _col6 Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE Select Operator - expressions: _col0 (type: string), _col1 (type: string), _col4 (type: string), _col5 (type: string) + expressions: _col0 (type: string), _col1 (type: string), _col5 (type: string), _col6 (type: string) outputColumnNames: _col0, _col1, _col2, _col3 Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE File Output Operator @@ -166,10 +166,10 @@ STAGE PLANS: condition expressions: 0 {KEY.reducesinkkey0} {VALUE._col0} 1 {KEY.reducesinkkey0} {VALUE._col0} - outputColumnNames: _col0, _col1, _col4, _col5 + outputColumnNames: _col0, _col1, _col5, _col6 Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE Select Operator - expressions: _col0 (type: string), _col1 (type: string), _col4 (type: string), _col5 (type: string) + expressions: _col0 (type: string), _col1 (type: string), _col5 (type: string), _col6 (type: string) outputColumnNames: _col0, _col1, _col2, _col3 Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE File Output Operator diff --git a/ql/src/test/results/clientpositive/skewjoinopt6.q.out b/ql/src/test/results/clientpositive/skewjoinopt6.q.out index 3110701..790bc61 100644 --- a/ql/src/test/results/clientpositive/skewjoinopt6.q.out +++ b/ql/src/test/results/clientpositive/skewjoinopt6.q.out @@ -91,10 +91,10 @@ STAGE PLANS: condition expressions: 0 {KEY.reducesinkkey0} {VALUE._col0} 1 {KEY.reducesinkkey0} {VALUE._col0} - outputColumnNames: _col0, _col1, _col4, _col5 + outputColumnNames: _col0, _col1, _col5, _col6 Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE Select Operator - expressions: _col0 (type: string), _col1 (type: string), _col4 (type: string), _col5 (type: string) + expressions: _col0 (type: string), _col1 (type: string), _col5 (type: string), _col6 (type: string) outputColumnNames: _col0, _col1, _col2, _col3 Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE File Output Operator @@ -168,10 +168,10 @@ STAGE PLANS: condition expressions: 0 {KEY.reducesinkkey0} {VALUE._col0} 1 {KEY.reducesinkkey0} {VALUE._col0} - outputColumnNames: _col0, _col1, _col4, _col5 + outputColumnNames: _col0, _col1, _col5, _col6 Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE Select Operator - expressions: _col0 (type: string), _col1 (type: string), _col4 (type: string), _col5 (type: string) + expressions: _col0 (type: string), _col1 (type: string), _col5 (type: string), _col6 (type: string) outputColumnNames: _col0, _col1, _col2, _col3 Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE File Output Operator diff --git a/ql/src/test/results/clientpositive/skewjoinopt7.q.out b/ql/src/test/results/clientpositive/skewjoinopt7.q.out index f2f1267..3a13ffa 100644 --- a/ql/src/test/results/clientpositive/skewjoinopt7.q.out +++ b/ql/src/test/results/clientpositive/skewjoinopt7.q.out @@ -121,10 +121,10 @@ STAGE PLANS: 0 {KEY.reducesinkkey0} {VALUE._col0} 1 {KEY.reducesinkkey0} {VALUE._col0} 2 {KEY.reducesinkkey0} {VALUE._col0} - outputColumnNames: _col0, _col1, _col4, _col5, _col8, _col9 + outputColumnNames: _col0, _col1, _col5, _col6, _col10, _col11 Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE Select Operator - expressions: _col0 (type: string), _col1 (type: string), _col4 (type: string), _col5 (type: string), _col8 (type: string), _col9 (type: string) + expressions: _col0 (type: string), _col1 (type: string), _col5 (type: string), _col6 (type: string), _col10 (type: string), _col11 (type: string) outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5 Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE File Output Operator @@ -212,10 +212,10 @@ STAGE PLANS: 0 {KEY.reducesinkkey0} {VALUE._col0} 1 {KEY.reducesinkkey0} {VALUE._col0} 2 {KEY.reducesinkkey0} {VALUE._col0} - outputColumnNames: _col0, _col1, _col4, _col5, _col8, _col9 + outputColumnNames: _col0, _col1, _col5, _col6, _col10, _col11 Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE Select Operator - expressions: _col0 (type: string), _col1 (type: string), _col4 (type: string), _col5 (type: string), _col8 (type: string), _col9 (type: string) + expressions: _col0 (type: string), _col1 (type: string), _col5 (type: string), _col6 (type: string), _col10 (type: string), _col11 (type: string) outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5 Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE File Output Operator diff --git a/ql/src/test/results/clientpositive/skewjoinopt8.q.out b/ql/src/test/results/clientpositive/skewjoinopt8.q.out index 79c18af..e960354 100644 --- a/ql/src/test/results/clientpositive/skewjoinopt8.q.out +++ b/ql/src/test/results/clientpositive/skewjoinopt8.q.out @@ -119,10 +119,10 @@ STAGE PLANS: 0 {KEY.reducesinkkey0} {VALUE._col0} 1 {KEY.reducesinkkey0} {VALUE._col0} 2 {KEY.reducesinkkey0} {VALUE._col0} - outputColumnNames: _col0, _col1, _col4, _col5, _col8, _col9 + outputColumnNames: _col0, _col1, _col5, _col6, _col10, _col11 Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE Select Operator - expressions: _col0 (type: string), _col1 (type: string), _col4 (type: string), _col5 (type: string), _col8 (type: string), _col9 (type: string) + expressions: _col0 (type: string), _col1 (type: string), _col5 (type: string), _col6 (type: string), _col10 (type: string), _col11 (type: string) outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5 Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE File Output Operator @@ -210,10 +210,10 @@ STAGE PLANS: 0 {KEY.reducesinkkey0} {VALUE._col0} 1 {KEY.reducesinkkey0} {VALUE._col0} 2 {KEY.reducesinkkey0} {VALUE._col0} - outputColumnNames: _col0, _col1, _col4, _col5, _col8, _col9 + outputColumnNames: _col0, _col1, _col5, _col6, _col10, _col11 Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE Select Operator - expressions: _col0 (type: string), _col1 (type: string), _col4 (type: string), _col5 (type: string), _col8 (type: string), _col9 (type: string) + expressions: _col0 (type: string), _col1 (type: string), _col5 (type: string), _col6 (type: string), _col10 (type: string), _col11 (type: string) outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5 Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE File Output Operator diff --git a/ql/src/test/results/clientpositive/smb_mapjoin9.q.out b/ql/src/test/results/clientpositive/smb_mapjoin9.q.out index 9500ba4..e39475f 100644 --- a/ql/src/test/results/clientpositive/smb_mapjoin9.q.out +++ b/ql/src/test/results/clientpositive/smb_mapjoin9.q.out @@ -318,9 +318,9 @@ STAGE PLANS: keys: 0 key (type: int) 1 key (type: int) - outputColumnNames: _col0, _col5, _col6, _col7 + outputColumnNames: _col0, _col6, _col7, _col8 Select Operator - expressions: _col5 (type: int), _col6 (type: string), _col7 (type: string), _col0 (type: int) + expressions: _col6 (type: int), _col7 (type: string), _col8 (type: string), _col0 (type: int) outputColumnNames: _col0, _col1, _col2, _col3 File Output Operator compressed: false diff --git a/ql/src/test/results/clientpositive/smb_mapjoin_1.q.out b/ql/src/test/results/clientpositive/smb_mapjoin_1.q.out index 8d679f8..140eb1b 100644 --- a/ql/src/test/results/clientpositive/smb_mapjoin_1.q.out +++ b/ql/src/test/results/clientpositive/smb_mapjoin_1.q.out @@ -75,9 +75,9 @@ STAGE PLANS: keys: 0 key (type: int) 1 key (type: int) - outputColumnNames: _col0, _col1, _col4, _col5 + outputColumnNames: _col0, _col1, _col5, _col6 Select Operator - expressions: _col0 (type: int), _col1 (type: string), _col4 (type: int), _col5 (type: string) + expressions: _col0 (type: int), _col1 (type: string), _col5 (type: int), _col6 (type: string) outputColumnNames: _col0, _col1, _col2, _col3 File Output Operator compressed: false @@ -128,9 +128,9 @@ STAGE PLANS: keys: 0 key (type: int) 1 key (type: int) - outputColumnNames: _col0, _col1, _col4, _col5 + outputColumnNames: _col0, _col1, _col5, _col6 Select Operator - expressions: _col0 (type: int), _col1 (type: string), _col4 (type: int), _col5 (type: string) + expressions: _col0 (type: int), _col1 (type: string), _col5 (type: int), _col6 (type: string) outputColumnNames: _col0, _col1, _col2, _col3 File Output Operator compressed: false @@ -186,9 +186,9 @@ STAGE PLANS: keys: 0 key (type: int) 1 key (type: int) - outputColumnNames: _col0, _col1, _col4, _col5 + outputColumnNames: _col0, _col1, _col5, _col6 Select Operator - expressions: _col0 (type: int), _col1 (type: string), _col4 (type: int), _col5 (type: string) + expressions: _col0 (type: int), _col1 (type: string), _col5 (type: int), _col6 (type: string) outputColumnNames: _col0, _col1, _col2, _col3 File Output Operator compressed: false @@ -243,9 +243,9 @@ STAGE PLANS: keys: 0 key (type: int) 1 key (type: int) - outputColumnNames: _col0, _col1, _col4, _col5 + outputColumnNames: _col0, _col1, _col5, _col6 Select Operator - expressions: _col0 (type: int), _col1 (type: string), _col4 (type: int), _col5 (type: string) + expressions: _col0 (type: int), _col1 (type: string), _col5 (type: int), _col6 (type: string) outputColumnNames: _col0, _col1, _col2, _col3 File Output Operator compressed: false @@ -308,9 +308,9 @@ STAGE PLANS: keys: 0 key (type: int) 1 key (type: int) - outputColumnNames: _col0, _col1, _col4, _col5 + outputColumnNames: _col0, _col1, _col5, _col6 Select Operator - expressions: _col0 (type: int), _col1 (type: string), _col4 (type: int), _col5 (type: string) + expressions: _col0 (type: int), _col1 (type: string), _col5 (type: int), _col6 (type: string) outputColumnNames: _col0, _col1, _col2, _col3 File Output Operator compressed: false @@ -361,9 +361,9 @@ STAGE PLANS: keys: 0 key (type: int) 1 key (type: int) - outputColumnNames: _col0, _col1, _col4, _col5 + outputColumnNames: _col0, _col1, _col5, _col6 Select Operator - expressions: _col0 (type: int), _col1 (type: string), _col4 (type: int), _col5 (type: string) + expressions: _col0 (type: int), _col1 (type: string), _col5 (type: int), _col6 (type: string) outputColumnNames: _col0, _col1, _col2, _col3 File Output Operator compressed: false @@ -419,9 +419,9 @@ STAGE PLANS: keys: 0 key (type: int) 1 key (type: int) - outputColumnNames: _col0, _col1, _col4, _col5 + outputColumnNames: _col0, _col1, _col5, _col6 Select Operator - expressions: _col0 (type: int), _col1 (type: string), _col4 (type: int), _col5 (type: string) + expressions: _col0 (type: int), _col1 (type: string), _col5 (type: int), _col6 (type: string) outputColumnNames: _col0, _col1, _col2, _col3 File Output Operator compressed: false @@ -476,9 +476,9 @@ STAGE PLANS: keys: 0 key (type: int) 1 key (type: int) - outputColumnNames: _col0, _col1, _col4, _col5 + outputColumnNames: _col0, _col1, _col5, _col6 Select Operator - expressions: _col0 (type: int), _col1 (type: string), _col4 (type: int), _col5 (type: string) + expressions: _col0 (type: int), _col1 (type: string), _col5 (type: int), _col6 (type: string) outputColumnNames: _col0, _col1, _col2, _col3 File Output Operator compressed: false diff --git a/ql/src/test/results/clientpositive/smb_mapjoin_10.q.out b/ql/src/test/results/clientpositive/smb_mapjoin_10.q.out index 9a5cdc0..08c47e4 100644 --- a/ql/src/test/results/clientpositive/smb_mapjoin_10.q.out +++ b/ql/src/test/results/clientpositive/smb_mapjoin_10.q.out @@ -95,9 +95,9 @@ STAGE PLANS: keys: 0 userid (type: int), pageid (type: int), postid (type: int), type (type: string) 1 userid (type: int), pageid (type: int), postid (type: int), type (type: string) - outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col7, _col8, _col9, _col10, _col11 + outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col8, _col9, _col10, _col11, _col12 Select Operator - expressions: _col0 (type: int), _col1 (type: int), _col2 (type: int), _col3 (type: string), _col4 (type: string), _col7 (type: int), _col8 (type: int), _col9 (type: int), _col10 (type: string), _col11 (type: string) + expressions: _col0 (type: int), _col1 (type: int), _col2 (type: int), _col3 (type: string), _col4 (type: string), _col8 (type: int), _col9 (type: int), _col10 (type: int), _col11 (type: string), _col12 (type: string) outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9 File Output Operator compressed: false diff --git a/ql/src/test/results/clientpositive/smb_mapjoin_11.q.out b/ql/src/test/results/clientpositive/smb_mapjoin_11.q.out index f9a7aa2..7c1ff4a 100644 --- a/ql/src/test/results/clientpositive/smb_mapjoin_11.q.out +++ b/ql/src/test/results/clientpositive/smb_mapjoin_11.q.out @@ -145,10 +145,10 @@ STAGE PLANS: keys: 0 key (type: int) 1 key (type: int) - outputColumnNames: _col0, _col6 + outputColumnNames: _col0, _col7 Position of Big Table: 0 Select Operator - expressions: _col0 (type: int), _col6 (type: string) + expressions: _col0 (type: int), _col7 (type: string) outputColumnNames: _col0, _col1 File Output Operator compressed: false diff --git a/ql/src/test/results/clientpositive/smb_mapjoin_12.q.out b/ql/src/test/results/clientpositive/smb_mapjoin_12.q.out index 6d3c37e..12dd610 100644 --- a/ql/src/test/results/clientpositive/smb_mapjoin_12.q.out +++ b/ql/src/test/results/clientpositive/smb_mapjoin_12.q.out @@ -157,10 +157,10 @@ STAGE PLANS: keys: 0 key (type: int) 1 key (type: int) - outputColumnNames: _col0, _col6 + outputColumnNames: _col0, _col7 Position of Big Table: 0 Select Operator - expressions: _col0 (type: int), _col6 (type: string) + expressions: _col0 (type: int), _col7 (type: string) outputColumnNames: _col0, _col1 File Output Operator compressed: false @@ -420,10 +420,10 @@ STAGE PLANS: keys: 0 key (type: int) 1 key (type: int) - outputColumnNames: _col0, _col1, _col6 + outputColumnNames: _col0, _col1, _col7 Position of Big Table: 0 Select Operator - expressions: _col0 (type: int), concat(_col1, _col6) (type: string) + expressions: _col0 (type: int), concat(_col1, _col7) (type: string) outputColumnNames: _col0, _col1 File Output Operator compressed: false diff --git a/ql/src/test/results/clientpositive/smb_mapjoin_13.q.out b/ql/src/test/results/clientpositive/smb_mapjoin_13.q.out index fb80ee4..f248e0d 100644 --- a/ql/src/test/results/clientpositive/smb_mapjoin_13.q.out +++ b/ql/src/test/results/clientpositive/smb_mapjoin_13.q.out @@ -145,10 +145,10 @@ STAGE PLANS: keys: 0 key (type: int) 1 value (type: int) - outputColumnNames: _col0, _col1, _col4, _col5 + outputColumnNames: _col0, _col1, _col5, _col6 Position of Big Table: 0 Select Operator - expressions: _col0 (type: int), _col1 (type: string), _col4 (type: int), _col5 (type: string) + expressions: _col0 (type: int), _col1 (type: string), _col5 (type: int), _col6 (type: string) outputColumnNames: _col0, _col1, _col2, _col3 Reduce Output Operator key expressions: _col0 (type: int) @@ -368,11 +368,11 @@ STAGE PLANS: keys: 0 UDFToDouble(key) (type: double) 1 UDFToDouble(value) (type: double) - outputColumnNames: _col0, _col1, _col4, _col5 + outputColumnNames: _col0, _col1, _col5, _col6 Position of Big Table: 0 Statistics: Num rows: 275 Data size: 2921 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: _col0 (type: int), _col1 (type: string), _col4 (type: int), _col5 (type: string) + expressions: _col0 (type: int), _col1 (type: string), _col5 (type: int), _col6 (type: string) outputColumnNames: _col0, _col1, _col2, _col3 Statistics: Num rows: 275 Data size: 2921 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator diff --git a/ql/src/test/results/clientpositive/smb_mapjoin_15.q.out b/ql/src/test/results/clientpositive/smb_mapjoin_15.q.out index 2424be8..c7730e5 100644 --- a/ql/src/test/results/clientpositive/smb_mapjoin_15.q.out +++ b/ql/src/test/results/clientpositive/smb_mapjoin_15.q.out @@ -117,10 +117,10 @@ STAGE PLANS: keys: 0 key (type: int) 1 key (type: int) - outputColumnNames: _col0, _col1, _col4, _col5 + outputColumnNames: _col0, _col1, _col5, _col6 Position of Big Table: 0 Select Operator - expressions: _col0 (type: int), _col1 (type: string), _col4 (type: int), _col5 (type: string) + expressions: _col0 (type: int), _col1 (type: string), _col5 (type: int), _col6 (type: string) outputColumnNames: _col0, _col1, _col2, _col3 Reduce Output Operator key expressions: _col0 (type: int) @@ -376,10 +376,10 @@ STAGE PLANS: keys: 0 key (type: int), key2 (type: int) 1 key (type: int), key2 (type: int) - outputColumnNames: _col0, _col1, _col2, _col5, _col6, _col7 + outputColumnNames: _col0, _col1, _col2, _col6, _col7, _col8 Position of Big Table: 0 Select Operator - expressions: _col0 (type: int), _col1 (type: int), _col2 (type: string), _col5 (type: int), _col6 (type: int), _col7 (type: string) + expressions: _col0 (type: int), _col1 (type: int), _col2 (type: string), _col6 (type: int), _col7 (type: int), _col8 (type: string) outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5 Reduce Output Operator key expressions: _col0 (type: int) @@ -583,10 +583,10 @@ STAGE PLANS: keys: 0 key2 (type: int), key (type: int) 1 key2 (type: int), key (type: int) - outputColumnNames: _col0, _col1, _col2, _col5, _col6, _col7 + outputColumnNames: _col0, _col1, _col2, _col6, _col7, _col8 Position of Big Table: 0 Select Operator - expressions: _col0 (type: int), _col1 (type: int), _col2 (type: string), _col5 (type: int), _col6 (type: int), _col7 (type: string) + expressions: _col0 (type: int), _col1 (type: int), _col2 (type: string), _col6 (type: int), _col7 (type: int), _col8 (type: string) outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5 Reduce Output Operator key expressions: _col0 (type: int) @@ -823,12 +823,12 @@ STAGE PLANS: keys: 0 key (type: int), value (type: string) 1 key (type: int), value (type: string) - outputColumnNames: _col0, _col1, _col2, _col5, _col6, _col7 + outputColumnNames: _col0, _col1, _col2, _col6, _col7, _col8 Position of Big Table: 0 Statistics: Num rows: 137 Data size: 1984 Basic stats: COMPLETE Column stats: NONE BucketMapJoin: true Select Operator - expressions: _col0 (type: int), _col1 (type: int), _col2 (type: string), _col5 (type: int), _col6 (type: int), _col7 (type: string) + expressions: _col0 (type: int), _col1 (type: int), _col2 (type: string), _col6 (type: int), _col7 (type: int), _col8 (type: string) outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5 Statistics: Num rows: 137 Data size: 1984 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator diff --git a/ql/src/test/results/clientpositive/smb_mapjoin_2.q.out b/ql/src/test/results/clientpositive/smb_mapjoin_2.q.out index 8019a5d..04a4c5c 100644 --- a/ql/src/test/results/clientpositive/smb_mapjoin_2.q.out +++ b/ql/src/test/results/clientpositive/smb_mapjoin_2.q.out @@ -75,9 +75,9 @@ STAGE PLANS: keys: 0 key (type: int) 1 key (type: int) - outputColumnNames: _col0, _col1, _col4, _col5 + outputColumnNames: _col0, _col1, _col5, _col6 Select Operator - expressions: _col0 (type: int), _col1 (type: string), _col4 (type: int), _col5 (type: string) + expressions: _col0 (type: int), _col1 (type: string), _col5 (type: int), _col6 (type: string) outputColumnNames: _col0, _col1, _col2, _col3 File Output Operator compressed: false @@ -130,9 +130,9 @@ STAGE PLANS: keys: 0 key (type: int) 1 key (type: int) - outputColumnNames: _col0, _col1, _col4, _col5 + outputColumnNames: _col0, _col1, _col5, _col6 Select Operator - expressions: _col0 (type: int), _col1 (type: string), _col4 (type: int), _col5 (type: string) + expressions: _col0 (type: int), _col1 (type: string), _col5 (type: int), _col6 (type: string) outputColumnNames: _col0, _col1, _col2, _col3 File Output Operator compressed: false @@ -188,9 +188,9 @@ STAGE PLANS: keys: 0 key (type: int) 1 key (type: int) - outputColumnNames: _col0, _col1, _col4, _col5 + outputColumnNames: _col0, _col1, _col5, _col6 Select Operator - expressions: _col0 (type: int), _col1 (type: string), _col4 (type: int), _col5 (type: string) + expressions: _col0 (type: int), _col1 (type: string), _col5 (type: int), _col6 (type: string) outputColumnNames: _col0, _col1, _col2, _col3 File Output Operator compressed: false @@ -247,9 +247,9 @@ STAGE PLANS: keys: 0 key (type: int) 1 key (type: int) - outputColumnNames: _col0, _col1, _col4, _col5 + outputColumnNames: _col0, _col1, _col5, _col6 Select Operator - expressions: _col0 (type: int), _col1 (type: string), _col4 (type: int), _col5 (type: string) + expressions: _col0 (type: int), _col1 (type: string), _col5 (type: int), _col6 (type: string) outputColumnNames: _col0, _col1, _col2, _col3 File Output Operator compressed: false @@ -312,9 +312,9 @@ STAGE PLANS: keys: 0 key (type: int) 1 key (type: int) - outputColumnNames: _col0, _col1, _col4, _col5 + outputColumnNames: _col0, _col1, _col5, _col6 Select Operator - expressions: _col0 (type: int), _col1 (type: string), _col4 (type: int), _col5 (type: string) + expressions: _col0 (type: int), _col1 (type: string), _col5 (type: int), _col6 (type: string) outputColumnNames: _col0, _col1, _col2, _col3 File Output Operator compressed: false @@ -367,9 +367,9 @@ STAGE PLANS: keys: 0 key (type: int) 1 key (type: int) - outputColumnNames: _col0, _col1, _col4, _col5 + outputColumnNames: _col0, _col1, _col5, _col6 Select Operator - expressions: _col0 (type: int), _col1 (type: string), _col4 (type: int), _col5 (type: string) + expressions: _col0 (type: int), _col1 (type: string), _col5 (type: int), _col6 (type: string) outputColumnNames: _col0, _col1, _col2, _col3 File Output Operator compressed: false @@ -425,9 +425,9 @@ STAGE PLANS: keys: 0 key (type: int) 1 key (type: int) - outputColumnNames: _col0, _col1, _col4, _col5 + outputColumnNames: _col0, _col1, _col5, _col6 Select Operator - expressions: _col0 (type: int), _col1 (type: string), _col4 (type: int), _col5 (type: string) + expressions: _col0 (type: int), _col1 (type: string), _col5 (type: int), _col6 (type: string) outputColumnNames: _col0, _col1, _col2, _col3 File Output Operator compressed: false @@ -484,9 +484,9 @@ STAGE PLANS: keys: 0 key (type: int) 1 key (type: int) - outputColumnNames: _col0, _col1, _col4, _col5 + outputColumnNames: _col0, _col1, _col5, _col6 Select Operator - expressions: _col0 (type: int), _col1 (type: string), _col4 (type: int), _col5 (type: string) + expressions: _col0 (type: int), _col1 (type: string), _col5 (type: int), _col6 (type: string) outputColumnNames: _col0, _col1, _col2, _col3 File Output Operator compressed: false diff --git a/ql/src/test/results/clientpositive/smb_mapjoin_3.q.out b/ql/src/test/results/clientpositive/smb_mapjoin_3.q.out index f21e4c1..2c378bb 100644 --- a/ql/src/test/results/clientpositive/smb_mapjoin_3.q.out +++ b/ql/src/test/results/clientpositive/smb_mapjoin_3.q.out @@ -75,9 +75,9 @@ STAGE PLANS: keys: 0 key (type: int) 1 key (type: int) - outputColumnNames: _col0, _col1, _col4, _col5 + outputColumnNames: _col0, _col1, _col5, _col6 Select Operator - expressions: _col0 (type: int), _col1 (type: string), _col4 (type: int), _col5 (type: string) + expressions: _col0 (type: int), _col1 (type: string), _col5 (type: int), _col6 (type: string) outputColumnNames: _col0, _col1, _col2, _col3 File Output Operator compressed: false @@ -130,9 +130,9 @@ STAGE PLANS: keys: 0 key (type: int) 1 key (type: int) - outputColumnNames: _col0, _col1, _col4, _col5 + outputColumnNames: _col0, _col1, _col5, _col6 Select Operator - expressions: _col0 (type: int), _col1 (type: string), _col4 (type: int), _col5 (type: string) + expressions: _col0 (type: int), _col1 (type: string), _col5 (type: int), _col6 (type: string) outputColumnNames: _col0, _col1, _col2, _col3 File Output Operator compressed: false @@ -187,9 +187,9 @@ STAGE PLANS: keys: 0 key (type: int) 1 key (type: int) - outputColumnNames: _col0, _col1, _col4, _col5 + outputColumnNames: _col0, _col1, _col5, _col6 Select Operator - expressions: _col0 (type: int), _col1 (type: string), _col4 (type: int), _col5 (type: string) + expressions: _col0 (type: int), _col1 (type: string), _col5 (type: int), _col6 (type: string) outputColumnNames: _col0, _col1, _col2, _col3 File Output Operator compressed: false @@ -246,9 +246,9 @@ STAGE PLANS: keys: 0 key (type: int) 1 key (type: int) - outputColumnNames: _col0, _col1, _col4, _col5 + outputColumnNames: _col0, _col1, _col5, _col6 Select Operator - expressions: _col0 (type: int), _col1 (type: string), _col4 (type: int), _col5 (type: string) + expressions: _col0 (type: int), _col1 (type: string), _col5 (type: int), _col6 (type: string) outputColumnNames: _col0, _col1, _col2, _col3 File Output Operator compressed: false @@ -310,9 +310,9 @@ STAGE PLANS: keys: 0 key (type: int) 1 key (type: int) - outputColumnNames: _col0, _col1, _col4, _col5 + outputColumnNames: _col0, _col1, _col5, _col6 Select Operator - expressions: _col0 (type: int), _col1 (type: string), _col4 (type: int), _col5 (type: string) + expressions: _col0 (type: int), _col1 (type: string), _col5 (type: int), _col6 (type: string) outputColumnNames: _col0, _col1, _col2, _col3 File Output Operator compressed: false @@ -365,9 +365,9 @@ STAGE PLANS: keys: 0 key (type: int) 1 key (type: int) - outputColumnNames: _col0, _col1, _col4, _col5 + outputColumnNames: _col0, _col1, _col5, _col6 Select Operator - expressions: _col0 (type: int), _col1 (type: string), _col4 (type: int), _col5 (type: string) + expressions: _col0 (type: int), _col1 (type: string), _col5 (type: int), _col6 (type: string) outputColumnNames: _col0, _col1, _col2, _col3 File Output Operator compressed: false @@ -422,9 +422,9 @@ STAGE PLANS: keys: 0 key (type: int) 1 key (type: int) - outputColumnNames: _col0, _col1, _col4, _col5 + outputColumnNames: _col0, _col1, _col5, _col6 Select Operator - expressions: _col0 (type: int), _col1 (type: string), _col4 (type: int), _col5 (type: string) + expressions: _col0 (type: int), _col1 (type: string), _col5 (type: int), _col6 (type: string) outputColumnNames: _col0, _col1, _col2, _col3 File Output Operator compressed: false @@ -481,9 +481,9 @@ STAGE PLANS: keys: 0 key (type: int) 1 key (type: int) - outputColumnNames: _col0, _col1, _col4, _col5 + outputColumnNames: _col0, _col1, _col5, _col6 Select Operator - expressions: _col0 (type: int), _col1 (type: string), _col4 (type: int), _col5 (type: string) + expressions: _col0 (type: int), _col1 (type: string), _col5 (type: int), _col6 (type: string) outputColumnNames: _col0, _col1, _col2, _col3 File Output Operator compressed: false diff --git a/ql/src/test/results/clientpositive/smb_mapjoin_4.q.out b/ql/src/test/results/clientpositive/smb_mapjoin_4.q.out index d4e5b5e..5d1dfcb 100644 --- a/ql/src/test/results/clientpositive/smb_mapjoin_4.q.out +++ b/ql/src/test/results/clientpositive/smb_mapjoin_4.q.out @@ -78,9 +78,9 @@ STAGE PLANS: 0 key (type: int) 1 key (type: int) 2 key (type: int) - outputColumnNames: _col0, _col1, _col4, _col5, _col8, _col9 + outputColumnNames: _col0, _col1, _col5, _col6, _col10, _col11 Select Operator - expressions: _col0 (type: int), _col1 (type: string), _col4 (type: int), _col5 (type: string), _col8 (type: int), _col9 (type: string) + expressions: _col0 (type: int), _col1 (type: string), _col5 (type: int), _col6 (type: string), _col10 (type: int), _col11 (type: string) outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5 File Output Operator compressed: false @@ -136,9 +136,9 @@ STAGE PLANS: 0 key (type: int) 1 key (type: int) 2 key (type: int) - outputColumnNames: _col0, _col1, _col4, _col5, _col8, _col9 + outputColumnNames: _col0, _col1, _col5, _col6, _col10, _col11 Select Operator - expressions: _col0 (type: int), _col1 (type: string), _col4 (type: int), _col5 (type: string), _col8 (type: int), _col9 (type: string) + expressions: _col0 (type: int), _col1 (type: string), _col5 (type: int), _col6 (type: string), _col10 (type: int), _col11 (type: string) outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5 File Output Operator compressed: false @@ -194,9 +194,9 @@ STAGE PLANS: 0 key (type: int) 1 key (type: int) 2 key (type: int) - outputColumnNames: _col0, _col1, _col4, _col5, _col8, _col9 + outputColumnNames: _col0, _col1, _col5, _col6, _col10, _col11 Select Operator - expressions: _col0 (type: int), _col1 (type: string), _col4 (type: int), _col5 (type: string), _col8 (type: int), _col9 (type: string) + expressions: _col0 (type: int), _col1 (type: string), _col5 (type: int), _col6 (type: string), _col10 (type: int), _col11 (type: string) outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5 File Output Operator compressed: false @@ -257,9 +257,9 @@ STAGE PLANS: 0 key (type: int) 1 key (type: int) 2 key (type: int) - outputColumnNames: _col0, _col1, _col4, _col5, _col8, _col9 + outputColumnNames: _col0, _col1, _col5, _col6, _col10, _col11 Select Operator - expressions: _col0 (type: int), _col1 (type: string), _col4 (type: int), _col5 (type: string), _col8 (type: int), _col9 (type: string) + expressions: _col0 (type: int), _col1 (type: string), _col5 (type: int), _col6 (type: string), _col10 (type: int), _col11 (type: string) outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5 File Output Operator compressed: false @@ -321,9 +321,9 @@ STAGE PLANS: 0 key (type: int) 1 key (type: int) 2 key (type: int) - outputColumnNames: _col0, _col1, _col4, _col5, _col8, _col9 + outputColumnNames: _col0, _col1, _col5, _col6, _col10, _col11 Select Operator - expressions: _col0 (type: int), _col1 (type: string), _col4 (type: int), _col5 (type: string), _col8 (type: int), _col9 (type: string) + expressions: _col0 (type: int), _col1 (type: string), _col5 (type: int), _col6 (type: string), _col10 (type: int), _col11 (type: string) outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5 File Output Operator compressed: false @@ -390,9 +390,9 @@ STAGE PLANS: 0 key (type: int) 1 key (type: int) 2 key (type: int) - outputColumnNames: _col0, _col1, _col4, _col5, _col8, _col9 + outputColumnNames: _col0, _col1, _col5, _col6, _col10, _col11 Select Operator - expressions: _col0 (type: int), _col1 (type: string), _col4 (type: int), _col5 (type: string), _col8 (type: int), _col9 (type: string) + expressions: _col0 (type: int), _col1 (type: string), _col5 (type: int), _col6 (type: string), _col10 (type: int), _col11 (type: string) outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5 File Output Operator compressed: false @@ -450,9 +450,9 @@ STAGE PLANS: 0 key (type: int) 1 key (type: int) 2 key (type: int) - outputColumnNames: _col0, _col1, _col4, _col5, _col8, _col9 + outputColumnNames: _col0, _col1, _col5, _col6, _col10, _col11 Select Operator - expressions: _col0 (type: int), _col1 (type: string), _col4 (type: int), _col5 (type: string), _col8 (type: int), _col9 (type: string) + expressions: _col0 (type: int), _col1 (type: string), _col5 (type: int), _col6 (type: string), _col10 (type: int), _col11 (type: string) outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5 File Output Operator compressed: false @@ -512,9 +512,9 @@ STAGE PLANS: 0 key (type: int) 1 key (type: int) 2 key (type: int) - outputColumnNames: _col0, _col1, _col4, _col5, _col8, _col9 + outputColumnNames: _col0, _col1, _col5, _col6, _col10, _col11 Select Operator - expressions: _col0 (type: int), _col1 (type: string), _col4 (type: int), _col5 (type: string), _col8 (type: int), _col9 (type: string) + expressions: _col0 (type: int), _col1 (type: string), _col5 (type: int), _col6 (type: string), _col10 (type: int), _col11 (type: string) outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5 File Output Operator compressed: false @@ -576,9 +576,9 @@ STAGE PLANS: 0 key (type: int) 1 key (type: int) 2 key (type: int) - outputColumnNames: _col0, _col1, _col4, _col5, _col8, _col9 + outputColumnNames: _col0, _col1, _col5, _col6, _col10, _col11 Select Operator - expressions: _col0 (type: int), _col1 (type: string), _col4 (type: int), _col5 (type: string), _col8 (type: int), _col9 (type: string) + expressions: _col0 (type: int), _col1 (type: string), _col5 (type: int), _col6 (type: string), _col10 (type: int), _col11 (type: string) outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5 File Output Operator compressed: false @@ -642,9 +642,9 @@ STAGE PLANS: 0 key (type: int) 1 key (type: int) 2 key (type: int) - outputColumnNames: _col0, _col1, _col4, _col5, _col8, _col9 + outputColumnNames: _col0, _col1, _col5, _col6, _col10, _col11 Select Operator - expressions: _col0 (type: int), _col1 (type: string), _col4 (type: int), _col5 (type: string), _col8 (type: int), _col9 (type: string) + expressions: _col0 (type: int), _col1 (type: string), _col5 (type: int), _col6 (type: string), _col10 (type: int), _col11 (type: string) outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5 File Output Operator compressed: false @@ -702,9 +702,9 @@ STAGE PLANS: 0 key (type: int) 1 key (type: int) 2 key (type: int) - outputColumnNames: _col0, _col1, _col4, _col5, _col8, _col9 + outputColumnNames: _col0, _col1, _col5, _col6, _col10, _col11 Select Operator - expressions: _col0 (type: int), _col1 (type: string), _col4 (type: int), _col5 (type: string), _col8 (type: int), _col9 (type: string) + expressions: _col0 (type: int), _col1 (type: string), _col5 (type: int), _col6 (type: string), _col10 (type: int), _col11 (type: string) outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5 File Output Operator compressed: false @@ -769,9 +769,9 @@ STAGE PLANS: 0 key (type: int) 1 key (type: int) 2 key (type: int) - outputColumnNames: _col0, _col1, _col4, _col5, _col8, _col9 + outputColumnNames: _col0, _col1, _col5, _col6, _col10, _col11 Select Operator - expressions: _col0 (type: int), _col1 (type: string), _col4 (type: int), _col5 (type: string), _col8 (type: int), _col9 (type: string) + expressions: _col0 (type: int), _col1 (type: string), _col5 (type: int), _col6 (type: string), _col10 (type: int), _col11 (type: string) outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5 File Output Operator compressed: false @@ -833,9 +833,9 @@ STAGE PLANS: 0 key (type: int) 1 key (type: int) 2 key (type: int) - outputColumnNames: _col0, _col1, _col4, _col5, _col8, _col9 + outputColumnNames: _col0, _col1, _col5, _col6, _col10, _col11 Select Operator - expressions: _col0 (type: int), _col1 (type: string), _col4 (type: int), _col5 (type: string), _col8 (type: int), _col9 (type: string) + expressions: _col0 (type: int), _col1 (type: string), _col5 (type: int), _col6 (type: string), _col10 (type: int), _col11 (type: string) outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5 File Output Operator compressed: false diff --git a/ql/src/test/results/clientpositive/smb_mapjoin_5.q.out b/ql/src/test/results/clientpositive/smb_mapjoin_5.q.out index 83c2366..0ffaeae 100644 --- a/ql/src/test/results/clientpositive/smb_mapjoin_5.q.out +++ b/ql/src/test/results/clientpositive/smb_mapjoin_5.q.out @@ -78,9 +78,9 @@ STAGE PLANS: 0 key (type: int) 1 key (type: int) 2 key (type: int) - outputColumnNames: _col0, _col1, _col4, _col5, _col8, _col9 + outputColumnNames: _col0, _col1, _col5, _col6, _col10, _col11 Select Operator - expressions: _col0 (type: int), _col1 (type: string), _col4 (type: int), _col5 (type: string), _col8 (type: int), _col9 (type: string) + expressions: _col0 (type: int), _col1 (type: string), _col5 (type: int), _col6 (type: string), _col10 (type: int), _col11 (type: string) outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5 File Output Operator compressed: false @@ -136,9 +136,9 @@ STAGE PLANS: 0 key (type: int) 1 key (type: int) 2 key (type: int) - outputColumnNames: _col0, _col1, _col4, _col5, _col8, _col9 + outputColumnNames: _col0, _col1, _col5, _col6, _col10, _col11 Select Operator - expressions: _col0 (type: int), _col1 (type: string), _col4 (type: int), _col5 (type: string), _col8 (type: int), _col9 (type: string) + expressions: _col0 (type: int), _col1 (type: string), _col5 (type: int), _col6 (type: string), _col10 (type: int), _col11 (type: string) outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5 File Output Operator compressed: false @@ -194,9 +194,9 @@ STAGE PLANS: 0 key (type: int) 1 key (type: int) 2 key (type: int) - outputColumnNames: _col0, _col1, _col4, _col5, _col8, _col9 + outputColumnNames: _col0, _col1, _col5, _col6, _col10, _col11 Select Operator - expressions: _col0 (type: int), _col1 (type: string), _col4 (type: int), _col5 (type: string), _col8 (type: int), _col9 (type: string) + expressions: _col0 (type: int), _col1 (type: string), _col5 (type: int), _col6 (type: string), _col10 (type: int), _col11 (type: string) outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5 File Output Operator compressed: false @@ -257,9 +257,9 @@ STAGE PLANS: 0 key (type: int) 1 key (type: int) 2 key (type: int) - outputColumnNames: _col0, _col1, _col4, _col5, _col8, _col9 + outputColumnNames: _col0, _col1, _col5, _col6, _col10, _col11 Select Operator - expressions: _col0 (type: int), _col1 (type: string), _col4 (type: int), _col5 (type: string), _col8 (type: int), _col9 (type: string) + expressions: _col0 (type: int), _col1 (type: string), _col5 (type: int), _col6 (type: string), _col10 (type: int), _col11 (type: string) outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5 File Output Operator compressed: false @@ -321,9 +321,9 @@ STAGE PLANS: 0 key (type: int) 1 key (type: int) 2 key (type: int) - outputColumnNames: _col0, _col1, _col4, _col5, _col8, _col9 + outputColumnNames: _col0, _col1, _col5, _col6, _col10, _col11 Select Operator - expressions: _col0 (type: int), _col1 (type: string), _col4 (type: int), _col5 (type: string), _col8 (type: int), _col9 (type: string) + expressions: _col0 (type: int), _col1 (type: string), _col5 (type: int), _col6 (type: string), _col10 (type: int), _col11 (type: string) outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5 File Output Operator compressed: false @@ -390,9 +390,9 @@ STAGE PLANS: 0 key (type: int) 1 key (type: int) 2 key (type: int) - outputColumnNames: _col0, _col1, _col4, _col5, _col8, _col9 + outputColumnNames: _col0, _col1, _col5, _col6, _col10, _col11 Select Operator - expressions: _col0 (type: int), _col1 (type: string), _col4 (type: int), _col5 (type: string), _col8 (type: int), _col9 (type: string) + expressions: _col0 (type: int), _col1 (type: string), _col5 (type: int), _col6 (type: string), _col10 (type: int), _col11 (type: string) outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5 File Output Operator compressed: false @@ -450,9 +450,9 @@ STAGE PLANS: 0 key (type: int) 1 key (type: int) 2 key (type: int) - outputColumnNames: _col0, _col1, _col4, _col5, _col8, _col9 + outputColumnNames: _col0, _col1, _col5, _col6, _col10, _col11 Select Operator - expressions: _col0 (type: int), _col1 (type: string), _col4 (type: int), _col5 (type: string), _col8 (type: int), _col9 (type: string) + expressions: _col0 (type: int), _col1 (type: string), _col5 (type: int), _col6 (type: string), _col10 (type: int), _col11 (type: string) outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5 File Output Operator compressed: false @@ -512,9 +512,9 @@ STAGE PLANS: 0 key (type: int) 1 key (type: int) 2 key (type: int) - outputColumnNames: _col0, _col1, _col4, _col5, _col8, _col9 + outputColumnNames: _col0, _col1, _col5, _col6, _col10, _col11 Select Operator - expressions: _col0 (type: int), _col1 (type: string), _col4 (type: int), _col5 (type: string), _col8 (type: int), _col9 (type: string) + expressions: _col0 (type: int), _col1 (type: string), _col5 (type: int), _col6 (type: string), _col10 (type: int), _col11 (type: string) outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5 File Output Operator compressed: false @@ -576,9 +576,9 @@ STAGE PLANS: 0 key (type: int) 1 key (type: int) 2 key (type: int) - outputColumnNames: _col0, _col1, _col4, _col5, _col8, _col9 + outputColumnNames: _col0, _col1, _col5, _col6, _col10, _col11 Select Operator - expressions: _col0 (type: int), _col1 (type: string), _col4 (type: int), _col5 (type: string), _col8 (type: int), _col9 (type: string) + expressions: _col0 (type: int), _col1 (type: string), _col5 (type: int), _col6 (type: string), _col10 (type: int), _col11 (type: string) outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5 File Output Operator compressed: false @@ -642,9 +642,9 @@ STAGE PLANS: 0 key (type: int) 1 key (type: int) 2 key (type: int) - outputColumnNames: _col0, _col1, _col4, _col5, _col8, _col9 + outputColumnNames: _col0, _col1, _col5, _col6, _col10, _col11 Select Operator - expressions: _col0 (type: int), _col1 (type: string), _col4 (type: int), _col5 (type: string), _col8 (type: int), _col9 (type: string) + expressions: _col0 (type: int), _col1 (type: string), _col5 (type: int), _col6 (type: string), _col10 (type: int), _col11 (type: string) outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5 File Output Operator compressed: false @@ -702,9 +702,9 @@ STAGE PLANS: 0 key (type: int) 1 key (type: int) 2 key (type: int) - outputColumnNames: _col0, _col1, _col4, _col5, _col8, _col9 + outputColumnNames: _col0, _col1, _col5, _col6, _col10, _col11 Select Operator - expressions: _col0 (type: int), _col1 (type: string), _col4 (type: int), _col5 (type: string), _col8 (type: int), _col9 (type: string) + expressions: _col0 (type: int), _col1 (type: string), _col5 (type: int), _col6 (type: string), _col10 (type: int), _col11 (type: string) outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5 File Output Operator compressed: false @@ -769,9 +769,9 @@ STAGE PLANS: 0 key (type: int) 1 key (type: int) 2 key (type: int) - outputColumnNames: _col0, _col1, _col4, _col5, _col8, _col9 + outputColumnNames: _col0, _col1, _col5, _col6, _col10, _col11 Select Operator - expressions: _col0 (type: int), _col1 (type: string), _col4 (type: int), _col5 (type: string), _col8 (type: int), _col9 (type: string) + expressions: _col0 (type: int), _col1 (type: string), _col5 (type: int), _col6 (type: string), _col10 (type: int), _col11 (type: string) outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5 File Output Operator compressed: false @@ -833,9 +833,9 @@ STAGE PLANS: 0 key (type: int) 1 key (type: int) 2 key (type: int) - outputColumnNames: _col0, _col1, _col4, _col5, _col8, _col9 + outputColumnNames: _col0, _col1, _col5, _col6, _col10, _col11 Select Operator - expressions: _col0 (type: int), _col1 (type: string), _col4 (type: int), _col5 (type: string), _col8 (type: int), _col9 (type: string) + expressions: _col0 (type: int), _col1 (type: string), _col5 (type: int), _col6 (type: string), _col10 (type: int), _col11 (type: string) outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5 File Output Operator compressed: false diff --git a/ql/src/test/results/clientpositive/smb_mapjoin_6.q.out b/ql/src/test/results/clientpositive/smb_mapjoin_6.q.out index bbf2b0b..17e3d87 100644 --- a/ql/src/test/results/clientpositive/smb_mapjoin_6.q.out +++ b/ql/src/test/results/clientpositive/smb_mapjoin_6.q.out @@ -91,9 +91,9 @@ STAGE PLANS: keys: 0 key (type: int) 1 key (type: int) - outputColumnNames: _col0, _col1, _col4, _col5 + outputColumnNames: _col0, _col1, _col5, _col6 Select Operator - expressions: _col0 (type: int), _col1 (type: string), _col4 (type: int), _col5 (type: string) + expressions: _col0 (type: int), _col1 (type: string), _col5 (type: int), _col6 (type: string) outputColumnNames: _col0, _col1, _col2, _col3 File Output Operator compressed: false @@ -1276,9 +1276,9 @@ STAGE PLANS: keys: 0 key (type: int) 1 key (type: int) - outputColumnNames: _col0, _col1, _col4, _col5 + outputColumnNames: _col0, _col1, _col5, _col6 Select Operator - expressions: _col0 (type: int), _col1 (type: string), _col4 (type: int), _col5 (type: string) + expressions: _col0 (type: int), _col1 (type: string), _col5 (type: int), _col6 (type: string) outputColumnNames: _col0, _col1, _col2, _col3 File Output Operator compressed: false @@ -2477,9 +2477,9 @@ STAGE PLANS: keys: 0 key (type: int) 1 key (type: int) - outputColumnNames: _col0, _col1, _col4, _col5 + outputColumnNames: _col0, _col1, _col5, _col6 Select Operator - expressions: _col0 (type: int), _col1 (type: string), _col4 (type: int), _col5 (type: string) + expressions: _col0 (type: int), _col1 (type: string), _col5 (type: int), _col6 (type: string) outputColumnNames: _col0, _col1, _col2, _col3 File Output Operator compressed: false @@ -2594,9 +2594,9 @@ STAGE PLANS: keys: 0 key (type: int) 1 key (type: int) - outputColumnNames: _col0, _col1, _col4, _col5 + outputColumnNames: _col0, _col1, _col5, _col6 Select Operator - expressions: _col0 (type: int), _col1 (type: string), _col4 (type: int), _col5 (type: string) + expressions: _col0 (type: int), _col1 (type: string), _col5 (type: int), _col6 (type: string) outputColumnNames: _col0, _col1, _col2, _col3 File Output Operator compressed: false @@ -2706,9 +2706,9 @@ STAGE PLANS: 0 key (type: int) 1 key (type: int) 2 key (type: int) - outputColumnNames: _col0, _col1, _col4, _col5, _col8, _col9 + outputColumnNames: _col0, _col1, _col5, _col6, _col10, _col11 Select Operator - expressions: _col0 (type: int), _col1 (type: string), _col4 (type: int), _col5 (type: string), _col8 (type: int), _col9 (type: string) + expressions: _col0 (type: int), _col1 (type: string), _col5 (type: int), _col6 (type: string), _col10 (type: int), _col11 (type: string) outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5 File Output Operator compressed: false diff --git a/ql/src/test/results/clientpositive/smb_mapjoin_7.q.out b/ql/src/test/results/clientpositive/smb_mapjoin_7.q.out index d413ed3..c4dda71 100644 --- a/ql/src/test/results/clientpositive/smb_mapjoin_7.q.out +++ b/ql/src/test/results/clientpositive/smb_mapjoin_7.q.out @@ -640,9 +640,9 @@ STAGE PLANS: keys: 0 key (type: int) 1 key (type: int) - outputColumnNames: _col0, _col1, _col4, _col5 + outputColumnNames: _col0, _col1, _col5, _col6 Select Operator - expressions: _col0 (type: int), _col1 (type: string), _col4 (type: int), _col5 (type: string) + expressions: _col0 (type: int), _col1 (type: string), _col5 (type: int), _col6 (type: string) outputColumnNames: _col0, _col1, _col2, _col3 File Output Operator compressed: false diff --git a/ql/src/test/results/clientpositive/spark/join1.q.out b/ql/src/test/results/clientpositive/spark/join1.q.out index 63fd414..26fdb91 100644 --- a/ql/src/test/results/clientpositive/spark/join1.q.out +++ b/ql/src/test/results/clientpositive/spark/join1.q.out @@ -62,10 +62,10 @@ STAGE PLANS: condition expressions: 0 {KEY.reducesinkkey0} 1 {VALUE._col0} - outputColumnNames: _col0, _col5 + outputColumnNames: _col0, _col6 Statistics: Num rows: 31 Data size: 3196 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: UDFToInteger(_col0) (type: int), _col5 (type: string) + expressions: UDFToInteger(_col0) (type: int), _col6 (type: string) outputColumnNames: _col0, _col1 Statistics: Num rows: 31 Data size: 3196 Basic stats: COMPLETE Column stats: NONE File Output Operator diff --git a/ql/src/test/results/clientpositive/spark/join14.q.out b/ql/src/test/results/clientpositive/spark/join14.q.out index 40f1e96..a5eef04 100644 --- a/ql/src/test/results/clientpositive/spark/join14.q.out +++ b/ql/src/test/results/clientpositive/spark/join14.q.out @@ -66,10 +66,10 @@ STAGE PLANS: condition expressions: 0 {KEY.reducesinkkey0} 1 {VALUE._col0} - outputColumnNames: _col0, _col5 + outputColumnNames: _col0, _col6 Statistics: Num rows: 11 Data size: 1102 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: UDFToInteger(_col0) (type: int), _col5 (type: string) + expressions: UDFToInteger(_col0) (type: int), _col6 (type: string) outputColumnNames: _col0, _col1 Statistics: Num rows: 11 Data size: 1102 Basic stats: COMPLETE Column stats: NONE File Output Operator diff --git a/ql/src/test/results/clientpositive/spark/join15.q.out b/ql/src/test/results/clientpositive/spark/join15.q.out index 0a2f4b4..e5ae83d 100644 --- a/ql/src/test/results/clientpositive/spark/join15.q.out +++ b/ql/src/test/results/clientpositive/spark/join15.q.out @@ -52,10 +52,10 @@ STAGE PLANS: condition expressions: 0 {KEY.reducesinkkey0} {VALUE._col0} 1 {KEY.reducesinkkey0} {VALUE._col0} - outputColumnNames: _col0, _col1, _col4, _col5 + outputColumnNames: _col0, _col1, _col5, _col6 Statistics: Num rows: 16 Data size: 3306 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: _col0 (type: string), _col1 (type: string), _col4 (type: string), _col5 (type: string) + expressions: _col0 (type: string), _col1 (type: string), _col5 (type: string), _col6 (type: string) outputColumnNames: _col0, _col1, _col2, _col3 Statistics: Num rows: 16 Data size: 3306 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator diff --git a/ql/src/test/results/clientpositive/spark/join17.q.out b/ql/src/test/results/clientpositive/spark/join17.q.out index 2cb638d..7ee4920 100644 --- a/ql/src/test/results/clientpositive/spark/join17.q.out +++ b/ql/src/test/results/clientpositive/spark/join17.q.out @@ -208,10 +208,10 @@ STAGE PLANS: condition expressions: 0 {KEY.reducesinkkey0} {VALUE._col0} 1 {KEY.reducesinkkey0} {VALUE._col0} - outputColumnNames: _col0, _col1, _col4, _col5 + outputColumnNames: _col0, _col1, _col5, _col6 Statistics: Num rows: 16 Data size: 3306 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: UDFToInteger(_col0) (type: int), _col1 (type: string), UDFToInteger(_col4) (type: int), _col5 (type: string) + expressions: UDFToInteger(_col0) (type: int), _col1 (type: string), UDFToInteger(_col5) (type: int), _col6 (type: string) outputColumnNames: _col0, _col1, _col2, _col3 Statistics: Num rows: 16 Data size: 3306 Basic stats: COMPLETE Column stats: NONE File Output Operator diff --git a/ql/src/test/results/clientpositive/spark/join2.q.out b/ql/src/test/results/clientpositive/spark/join2.q.out index a3006a3..598678b 100644 --- a/ql/src/test/results/clientpositive/spark/join2.q.out +++ b/ql/src/test/results/clientpositive/spark/join2.q.out @@ -76,15 +76,15 @@ STAGE PLANS: condition expressions: 0 {KEY.reducesinkkey0} 1 {KEY.reducesinkkey0} - outputColumnNames: _col0, _col4 + outputColumnNames: _col0, _col5 Statistics: Num rows: 31 Data size: 3196 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: (_col0 + _col4) is not null (type: boolean) + predicate: (_col0 + _col5) is not null (type: boolean) Statistics: Num rows: 16 Data size: 1649 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator - key expressions: (_col0 + _col4) (type: double) + key expressions: (_col0 + _col5) (type: double) sort order: + - Map-reduce partition columns: (_col0 + _col4) (type: double) + Map-reduce partition columns: (_col0 + _col5) (type: double) Statistics: Num rows: 16 Data size: 1649 Basic stats: COMPLETE Column stats: NONE value expressions: _col0 (type: string) Reducer 3 @@ -95,10 +95,10 @@ STAGE PLANS: condition expressions: 0 {VALUE._col0} 1 {VALUE._col1} - outputColumnNames: _col0, _col9 + outputColumnNames: _col0, _col11 Statistics: Num rows: 17 Data size: 1813 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: UDFToInteger(_col0) (type: int), _col9 (type: string) + expressions: UDFToInteger(_col0) (type: int), _col11 (type: string) outputColumnNames: _col0, _col1 Statistics: Num rows: 17 Data size: 1813 Basic stats: COMPLETE Column stats: NONE File Output Operator diff --git a/ql/src/test/results/clientpositive/spark/join20.q.out b/ql/src/test/results/clientpositive/spark/join20.q.out index 5dc1faa..e894200 100644 --- a/ql/src/test/results/clientpositive/spark/join20.q.out +++ b/ql/src/test/results/clientpositive/spark/join20.q.out @@ -71,10 +71,10 @@ STAGE PLANS: 0 1 2 {(KEY.reducesinkkey0 < 20)} - outputColumnNames: _col0, _col1, _col4, _col5, _col8, _col9 + outputColumnNames: _col0, _col1, _col5, _col6, _col10, _col11 Statistics: Num rows: 63 Data size: 12786 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: _col0 (type: string), _col1 (type: string), _col4 (type: string), _col5 (type: string), _col8 (type: string), _col9 (type: string) + expressions: _col0 (type: string), _col1 (type: string), _col5 (type: string), _col6 (type: string), _col10 (type: string), _col11 (type: string) outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5 Statistics: Num rows: 63 Data size: 12786 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator @@ -732,10 +732,10 @@ STAGE PLANS: 0 1 2 {(KEY.reducesinkkey0 < 20)} - outputColumnNames: _col0, _col1, _col4, _col5, _col8, _col9 + outputColumnNames: _col0, _col1, _col5, _col6, _col10, _col11 Statistics: Num rows: 63 Data size: 12786 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: _col0 (type: string), _col1 (type: string), _col4 (type: string), _col5 (type: string), _col8 (type: string), _col9 (type: string) + expressions: _col0 (type: string), _col1 (type: string), _col5 (type: string), _col6 (type: string), _col10 (type: string), _col11 (type: string) outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5 Statistics: Num rows: 63 Data size: 12786 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator diff --git a/ql/src/test/results/clientpositive/spark/join21.q.out b/ql/src/test/results/clientpositive/spark/join21.q.out index fd0656e..c218a80 100644 --- a/ql/src/test/results/clientpositive/spark/join21.q.out +++ b/ql/src/test/results/clientpositive/spark/join21.q.out @@ -66,10 +66,10 @@ STAGE PLANS: 0 {(KEY.reducesinkkey0 < 10)} 1 2 {(KEY.reducesinkkey0 < 10)} - outputColumnNames: _col0, _col1, _col4, _col5, _col8, _col9 + outputColumnNames: _col0, _col1, _col5, _col6, _col10, _col11 Statistics: Num rows: 63 Data size: 12786 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: _col0 (type: string), _col1 (type: string), _col4 (type: string), _col5 (type: string), _col8 (type: string), _col9 (type: string) + expressions: _col0 (type: string), _col1 (type: string), _col5 (type: string), _col6 (type: string), _col10 (type: string), _col11 (type: string) outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5 Statistics: Num rows: 63 Data size: 12786 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator diff --git a/ql/src/test/results/clientpositive/spark/join22.q.out b/ql/src/test/results/clientpositive/spark/join22.q.out index b6ef0f7..b7e6d36 100644 --- a/ql/src/test/results/clientpositive/spark/join22.q.out +++ b/ql/src/test/results/clientpositive/spark/join22.q.out @@ -84,10 +84,10 @@ STAGE PLANS: condition expressions: 0 1 {VALUE._col2} - outputColumnNames: _col7 + outputColumnNames: _col8 Statistics: Num rows: 34 Data size: 3515 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: _col7 (type: string) + expressions: _col8 (type: string) outputColumnNames: _col0 Statistics: Num rows: 34 Data size: 3515 Basic stats: COMPLETE Column stats: NONE File Output Operator diff --git a/ql/src/test/results/clientpositive/spark/join23.q.out b/ql/src/test/results/clientpositive/spark/join23.q.out index f543646..9896a47 100644 --- a/ql/src/test/results/clientpositive/spark/join23.q.out +++ b/ql/src/test/results/clientpositive/spark/join23.q.out @@ -48,10 +48,10 @@ STAGE PLANS: condition expressions: 0 {VALUE._col0} {VALUE._col1} 1 {VALUE._col0} {VALUE._col1} - outputColumnNames: _col0, _col1, _col4, _col5 + outputColumnNames: _col0, _col1, _col5, _col6 Statistics: Num rows: 9 Data size: 1983 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: _col0 (type: string), _col1 (type: string), _col4 (type: string), _col5 (type: string) + expressions: _col0 (type: string), _col1 (type: string), _col5 (type: string), _col6 (type: string) outputColumnNames: _col0, _col1, _col2, _col3 Statistics: Num rows: 9 Data size: 1983 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator diff --git a/ql/src/test/results/clientpositive/spark/join25.q.out b/ql/src/test/results/clientpositive/spark/join25.q.out index cfe362c..9a03773 100644 --- a/ql/src/test/results/clientpositive/spark/join25.q.out +++ b/ql/src/test/results/clientpositive/spark/join25.q.out @@ -69,10 +69,10 @@ STAGE PLANS: condition expressions: 0 {KEY.reducesinkkey0} {VALUE._col0} 1 {VALUE._col0} - outputColumnNames: _col0, _col1, _col5 + outputColumnNames: _col0, _col1, _col6 Statistics: Num rows: 16 Data size: 3306 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: UDFToInteger(_col0) (type: int), _col1 (type: string), _col5 (type: string) + expressions: UDFToInteger(_col0) (type: int), _col1 (type: string), _col6 (type: string) outputColumnNames: _col0, _col1, _col2 Statistics: Num rows: 16 Data size: 3306 Basic stats: COMPLETE Column stats: NONE File Output Operator diff --git a/ql/src/test/results/clientpositive/spark/join26.q.out b/ql/src/test/results/clientpositive/spark/join26.q.out index 97fcf57..6228500 100644 --- a/ql/src/test/results/clientpositive/spark/join26.q.out +++ b/ql/src/test/results/clientpositive/spark/join26.q.out @@ -327,10 +327,10 @@ STAGE PLANS: 0 {KEY.reducesinkkey0} 1 {VALUE._col0} 2 {VALUE._col0} - outputColumnNames: _col0, _col5, _col9 + outputColumnNames: _col0, _col6, _col11 Statistics: Num rows: 33 Data size: 6613 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: _col0 (type: string), _col9 (type: string), _col5 (type: string) + expressions: _col0 (type: string), _col11 (type: string), _col6 (type: string) outputColumnNames: _col0, _col1, _col2 Statistics: Num rows: 33 Data size: 6613 Basic stats: COMPLETE Column stats: NONE File Output Operator diff --git a/ql/src/test/results/clientpositive/spark/join27.q.out b/ql/src/test/results/clientpositive/spark/join27.q.out index 311cd8b..8b79dd7 100644 --- a/ql/src/test/results/clientpositive/spark/join27.q.out +++ b/ql/src/test/results/clientpositive/spark/join27.q.out @@ -68,10 +68,10 @@ STAGE PLANS: condition expressions: 0 {VALUE._col0} {KEY.reducesinkkey0} 1 {KEY.reducesinkkey0} - outputColumnNames: _col0, _col1, _col5 + outputColumnNames: _col0, _col1, _col6 Statistics: Num rows: 31 Data size: 3196 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: UDFToInteger(_col0) (type: int), _col1 (type: string), _col5 (type: string) + expressions: UDFToInteger(_col0) (type: int), _col1 (type: string), _col6 (type: string) outputColumnNames: _col0, _col1, _col2 Statistics: Num rows: 31 Data size: 3196 Basic stats: COMPLETE Column stats: NONE File Output Operator diff --git a/ql/src/test/results/clientpositive/spark/join3.q.out b/ql/src/test/results/clientpositive/spark/join3.q.out index f410e3c..496ad6c 100644 --- a/ql/src/test/results/clientpositive/spark/join3.q.out +++ b/ql/src/test/results/clientpositive/spark/join3.q.out @@ -77,10 +77,10 @@ STAGE PLANS: 0 {KEY.reducesinkkey0} 1 2 {VALUE._col0} - outputColumnNames: _col0, _col9 + outputColumnNames: _col0, _col11 Statistics: Num rows: 63 Data size: 6393 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: UDFToInteger(_col0) (type: int), _col9 (type: string) + expressions: UDFToInteger(_col0) (type: int), _col11 (type: string) outputColumnNames: _col0, _col1 Statistics: Num rows: 63 Data size: 6393 Basic stats: COMPLETE Column stats: NONE File Output Operator diff --git a/ql/src/test/results/clientpositive/spark/join9.q.out b/ql/src/test/results/clientpositive/spark/join9.q.out index d59ee75..25d4529 100644 --- a/ql/src/test/results/clientpositive/spark/join9.q.out +++ b/ql/src/test/results/clientpositive/spark/join9.q.out @@ -225,10 +225,10 @@ STAGE PLANS: condition expressions: 0 {KEY.reducesinkkey0} 1 {VALUE._col0} - outputColumnNames: _col0, _col7 + outputColumnNames: _col0, _col8 Statistics: Num rows: 31 Data size: 3196 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: UDFToInteger(_col0) (type: int), _col7 (type: string) + expressions: UDFToInteger(_col0) (type: int), _col8 (type: string) outputColumnNames: _col0, _col1 Statistics: Num rows: 31 Data size: 3196 Basic stats: COMPLETE Column stats: NONE File Output Operator diff --git a/ql/src/test/results/clientpositive/stats11.q.out b/ql/src/test/results/clientpositive/stats11.q.out index ea0d589..11762bd 100644 --- a/ql/src/test/results/clientpositive/stats11.q.out +++ b/ql/src/test/results/clientpositive/stats11.q.out @@ -481,12 +481,12 @@ STAGE PLANS: keys: 0 key (type: int) 1 key (type: int) - outputColumnNames: _col0, _col1, _col5 + outputColumnNames: _col0, _col1, _col6 Position of Big Table: 0 Statistics: Num rows: 30 Data size: 3253 Basic stats: COMPLETE Column stats: NONE BucketMapJoin: true Select Operator - expressions: _col0 (type: int), _col1 (type: string), _col5 (type: string) + expressions: _col0 (type: int), _col1 (type: string), _col6 (type: string) outputColumnNames: _col0, _col1, _col2 Statistics: Num rows: 30 Data size: 3253 Basic stats: COMPLETE Column stats: NONE File Output Operator @@ -982,12 +982,12 @@ STAGE PLANS: keys: 0 key (type: int) 1 key (type: int) - outputColumnNames: _col0, _col1, _col5 + outputColumnNames: _col0, _col1, _col6 Position of Big Table: 1 Statistics: Num rows: 30 Data size: 3253 Basic stats: COMPLETE Column stats: NONE BucketMapJoin: true Select Operator - expressions: _col0 (type: int), _col1 (type: string), _col5 (type: string) + expressions: _col0 (type: int), _col1 (type: string), _col6 (type: string) outputColumnNames: _col0, _col1, _col2 Statistics: Num rows: 30 Data size: 3253 Basic stats: COMPLETE Column stats: NONE File Output Operator diff --git a/ql/src/test/results/clientpositive/subquery_multiinsert.q.out b/ql/src/test/results/clientpositive/subquery_multiinsert.q.out index 5887c78..c6a6e3f 100644 --- a/ql/src/test/results/clientpositive/subquery_multiinsert.q.out +++ b/ql/src/test/results/clientpositive/subquery_multiinsert.q.out @@ -182,10 +182,10 @@ STAGE PLANS: condition expressions: 0 {KEY.reducesinkkey0} {VALUE._col0} 1 {KEY.reducesinkkey0} - outputColumnNames: _col0, _col1, _col4 + outputColumnNames: _col0, _col1, _col5 Statistics: Num rows: 34 Data size: 7032 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: _col4 is null (type: boolean) + predicate: _col5 is null (type: boolean) Statistics: Num rows: 17 Data size: 3516 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: _col0 (type: string), _col1 (type: string) @@ -744,10 +744,10 @@ STAGE PLANS: keys: 0 _col0 (type: string) 1 _col0 (type: string) - outputColumnNames: _col0, _col1, _col4 + outputColumnNames: _col0, _col1, _col5 Statistics: Num rows: 34 Data size: 7032 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: _col4 is null (type: boolean) + predicate: _col5 is null (type: boolean) Statistics: Num rows: 17 Data size: 3516 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: _col0 (type: string), _col1 (type: string) diff --git a/ql/src/test/results/clientpositive/subquery_notexists.q.out b/ql/src/test/results/clientpositive/subquery_notexists.q.out index 2a0dfa6..b78eca9 100644 --- a/ql/src/test/results/clientpositive/subquery_notexists.q.out +++ b/ql/src/test/results/clientpositive/subquery_notexists.q.out @@ -56,10 +56,10 @@ STAGE PLANS: condition expressions: 0 {KEY.reducesinkkey1} {KEY.reducesinkkey0} 1 {KEY.reducesinkkey1} - outputColumnNames: _col0, _col1, _col6 + outputColumnNames: _col0, _col1, _col7 Statistics: Num rows: 31 Data size: 6393 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: _col6 is null (type: boolean) + predicate: _col7 is null (type: boolean) Statistics: Num rows: 15 Data size: 3093 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: _col0 (type: string), _col1 (type: string) @@ -309,10 +309,10 @@ STAGE PLANS: condition expressions: 0 {VALUE._col0} {KEY.reducesinkkey0} 1 {KEY.reducesinkkey0} - outputColumnNames: _col0, _col1, _col5 + outputColumnNames: _col0, _col1, _col6 Statistics: Num rows: 31 Data size: 6393 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: _col5 is null (type: boolean) + predicate: _col6 is null (type: boolean) Statistics: Num rows: 15 Data size: 3093 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: _col0 (type: string), _col1 (type: string) diff --git a/ql/src/test/results/clientpositive/subquery_notin.q.out b/ql/src/test/results/clientpositive/subquery_notin.q.out index 924dcf7..7b8e974 100644 --- a/ql/src/test/results/clientpositive/subquery_notin.q.out +++ b/ql/src/test/results/clientpositive/subquery_notin.q.out @@ -227,10 +227,10 @@ STAGE PLANS: condition expressions: 0 {KEY.reducesinkkey0} {VALUE._col0} 1 {KEY.reducesinkkey0} - outputColumnNames: _col0, _col1, _col4 + outputColumnNames: _col0, _col1, _col5 Statistics: Num rows: 34 Data size: 7032 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: _col4 is null (type: boolean) + predicate: _col5 is null (type: boolean) Statistics: Num rows: 17 Data size: 3516 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: _col0 (type: string), _col1 (type: string) @@ -468,10 +468,10 @@ STAGE PLANS: condition expressions: 0 {KEY.reducesinkkey0} {KEY.reducesinkkey1} {VALUE._col3} 1 {KEY.reducesinkkey0} - outputColumnNames: _col1, _col2, _col5, _col11 + outputColumnNames: _col1, _col2, _col5, _col12 Statistics: Num rows: 17 Data size: 3839 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: _col11 is null (type: boolean) + predicate: _col12 is null (type: boolean) Statistics: Num rows: 8 Data size: 1806 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: _col2 (type: string), _col1 (type: string), _col5 (type: int) @@ -742,10 +742,10 @@ STAGE PLANS: condition expressions: 0 {VALUE._col1} {VALUE._col5} 1 {KEY.reducesinkkey0} - outputColumnNames: _col1, _col5, _col11 + outputColumnNames: _col1, _col5, _col12 Statistics: Num rows: 36 Data size: 3839 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: _col11 is null (type: boolean) + predicate: _col12 is null (type: boolean) Statistics: Num rows: 18 Data size: 1919 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: _col1 (type: string), _col5 (type: int) @@ -1039,10 +1039,10 @@ STAGE PLANS: condition expressions: 0 {VALUE._col1} {KEY.reducesinkkey1} {KEY.reducesinkkey0} 1 {KEY.reducesinkkey0} - outputColumnNames: _col1, _col2, _col5, _col11 + outputColumnNames: _col1, _col2, _col5, _col12 Statistics: Num rows: 17 Data size: 3839 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: _col11 is null (type: boolean) + predicate: _col12 is null (type: boolean) Statistics: Num rows: 8 Data size: 1806 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: _col2 (type: string), _col1 (type: string), _col5 (type: int) diff --git a/ql/src/test/results/clientpositive/subquery_unqualcolumnrefs.q.out b/ql/src/test/results/clientpositive/subquery_unqualcolumnrefs.q.out index a34dae6..0b5da95 100644 --- a/ql/src/test/results/clientpositive/subquery_unqualcolumnrefs.q.out +++ b/ql/src/test/results/clientpositive/subquery_unqualcolumnrefs.q.out @@ -859,10 +859,10 @@ STAGE PLANS: condition expressions: 0 {KEY.reducesinkkey0} {KEY.reducesinkkey1} {VALUE._col3} 1 {KEY.reducesinkkey0} - outputColumnNames: _col1, _col2, _col5, _col11 + outputColumnNames: _col1, _col2, _col5, _col12 Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE Filter Operator - predicate: _col11 is null (type: boolean) + predicate: _col12 is null (type: boolean) Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE Select Operator expressions: _col2 (type: string), _col1 (type: string), _col5 (type: int) diff --git a/ql/src/test/results/clientpositive/subquery_views.q.out b/ql/src/test/results/clientpositive/subquery_views.q.out index 07370f6..5e33699 100644 --- a/ql/src/test/results/clientpositive/subquery_views.q.out +++ b/ql/src/test/results/clientpositive/subquery_views.q.out @@ -193,10 +193,10 @@ STAGE PLANS: condition expressions: 0 {KEY.reducesinkkey0} 1 {KEY.reducesinkkey0} - outputColumnNames: _col0, _col4 + outputColumnNames: _col0, _col5 Statistics: Num rows: 5 Data size: 1212 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: _col4 is null (type: boolean) + predicate: _col5 is null (type: boolean) Statistics: Num rows: 2 Data size: 484 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: _col0 (type: string) @@ -360,10 +360,10 @@ STAGE PLANS: condition expressions: 0 {KEY.reducesinkkey0} {KEY.reducesinkkey1} 1 {KEY.reducesinkkey0} - outputColumnNames: _col0, _col1, _col4 + outputColumnNames: _col0, _col1, _col5 Statistics: Num rows: 17 Data size: 3636 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: _col4 is null (type: boolean) + predicate: _col5 is null (type: boolean) Statistics: Num rows: 8 Data size: 1711 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: _col0 (type: string), _col1 (type: string) diff --git a/ql/src/test/results/clientpositive/temp_table_join1.q.out b/ql/src/test/results/clientpositive/temp_table_join1.q.out index 2aa244e..86ce39c 100644 --- a/ql/src/test/results/clientpositive/temp_table_join1.q.out +++ b/ql/src/test/results/clientpositive/temp_table_join1.q.out @@ -60,10 +60,10 @@ STAGE PLANS: condition expressions: 0 {KEY.reducesinkkey0} 1 {VALUE._col0} - outputColumnNames: _col0, _col5 + outputColumnNames: _col0, _col6 Statistics: Num rows: 5 Data size: 57 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: _col0 (type: string), _col5 (type: string) + expressions: _col0 (type: string), _col6 (type: string) outputColumnNames: _col0, _col1 Statistics: Num rows: 5 Data size: 57 Basic stats: COMPLETE Column stats: NONE File Output Operator @@ -148,10 +148,10 @@ STAGE PLANS: condition expressions: 0 {KEY.reducesinkkey0} 1 {VALUE._col0} - outputColumnNames: _col0, _col5 + outputColumnNames: _col0, _col6 Statistics: Num rows: 5 Data size: 57 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: _col0 (type: string), _col5 (type: string) + expressions: _col0 (type: string), _col6 (type: string) outputColumnNames: _col0, _col1 Statistics: Num rows: 5 Data size: 57 Basic stats: COMPLETE Column stats: NONE File Output Operator @@ -238,10 +238,10 @@ STAGE PLANS: condition expressions: 0 {KEY.reducesinkkey0} 1 {VALUE._col0} - outputColumnNames: _col0, _col5 + outputColumnNames: _col0, _col6 Statistics: Num rows: 5 Data size: 57 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: _col0 (type: string), _col5 (type: string) + expressions: _col0 (type: string), _col6 (type: string) outputColumnNames: _col0, _col1 Statistics: Num rows: 5 Data size: 57 Basic stats: COMPLETE Column stats: NONE File Output Operator diff --git a/ql/src/test/results/clientpositive/temp_table_windowing_expressions.q.out b/ql/src/test/results/clientpositive/temp_table_windowing_expressions.q.out index 241db10..062dc1b 100644 --- a/ql/src/test/results/clientpositive/temp_table_windowing_expressions.q.out +++ b/ql/src/test/results/clientpositive/temp_table_windowing_expressions.q.out @@ -698,10 +698,10 @@ POSTHOOK: type: QUERY POSTHOOK: Input: default@over10k POSTHOOK: Output: default@t1 POSTHOOK: Output: default@t2 -POSTHOOK: Lineage: t1.a1 SCRIPT [(over10k)over10k.FieldSchema(name:t, type:tinyint, comment:null), (over10k)over10k.FieldSchema(name:si, type:smallint, comment:null), (over10k)over10k.FieldSchema(name:i, type:int, comment:null), (over10k)over10k.FieldSchema(name:b, type:bigint, comment:null), (over10k)over10k.FieldSchema(name:f, type:float, comment:null), (over10k)over10k.FieldSchema(name:d, type:double, comment:null), (over10k)over10k.FieldSchema(name:bo, type:boolean, comment:null), (over10k)over10k.FieldSchema(name:s, type:string, comment:null), (over10k)over10k.FieldSchema(name:ts, type:timestamp, comment:null), (over10k)over10k.FieldSchema(name:dec, type:decimal(4,2), comment:null), (over10k)over10k.FieldSchema(name:bin, type:binary, comment:null), (over10k)over10k.FieldSchema(name:BLOCK__OFFSET__INSIDE__FILE, type:bigint, comment:), (over10k)over10k.FieldSchema(name:INPUT__FILE__NAME, type:string, comment:), ] -POSTHOOK: Lineage: t1.b1 SCRIPT [(over10k)over10k.FieldSchema(name:t, type:tinyint, comment:null), (over10k)over10k.FieldSchema(name:si, type:smallint, comment:null), (over10k)over10k.FieldSchema(name:i, type:int, comment:null), (over10k)over10k.FieldSchema(name:b, type:bigint, comment:null), (over10k)over10k.FieldSchema(name:f, type:float, comment:null), (over10k)over10k.FieldSchema(name:d, type:double, comment:null), (over10k)over10k.FieldSchema(name:bo, type:boolean, comment:null), (over10k)over10k.FieldSchema(name:s, type:string, comment:null), (over10k)over10k.FieldSchema(name:ts, type:timestamp, comment:null), (over10k)over10k.FieldSchema(name:dec, type:decimal(4,2), comment:null), (over10k)over10k.FieldSchema(name:bin, type:binary, comment:null), (over10k)over10k.FieldSchema(name:BLOCK__OFFSET__INSIDE__FILE, type:bigint, comment:), (over10k)over10k.FieldSchema(name:INPUT__FILE__NAME, type:string, comment:), ] -POSTHOOK: Lineage: t2.a1 SCRIPT [(over10k)over10k.FieldSchema(name:t, type:tinyint, comment:null), (over10k)over10k.FieldSchema(name:si, type:smallint, comment:null), (over10k)over10k.FieldSchema(name:i, type:int, comment:null), (over10k)over10k.FieldSchema(name:b, type:bigint, comment:null), (over10k)over10k.FieldSchema(name:f, type:float, comment:null), (over10k)over10k.FieldSchema(name:d, type:double, comment:null), (over10k)over10k.FieldSchema(name:bo, type:boolean, comment:null), (over10k)over10k.FieldSchema(name:s, type:string, comment:null), (over10k)over10k.FieldSchema(name:ts, type:timestamp, comment:null), (over10k)over10k.FieldSchema(name:dec, type:decimal(4,2), comment:null), (over10k)over10k.FieldSchema(name:bin, type:binary, comment:null), (over10k)over10k.FieldSchema(name:BLOCK__OFFSET__INSIDE__FILE, type:bigint, comment:), (over10k)over10k.FieldSchema(name:INPUT__FILE__NAME, type:string, comment:), ] -POSTHOOK: Lineage: t2.b1 SCRIPT [(over10k)over10k.FieldSchema(name:t, type:tinyint, comment:null), (over10k)over10k.FieldSchema(name:si, type:smallint, comment:null), (over10k)over10k.FieldSchema(name:i, type:int, comment:null), (over10k)over10k.FieldSchema(name:b, type:bigint, comment:null), (over10k)over10k.FieldSchema(name:f, type:float, comment:null), (over10k)over10k.FieldSchema(name:d, type:double, comment:null), (over10k)over10k.FieldSchema(name:bo, type:boolean, comment:null), (over10k)over10k.FieldSchema(name:s, type:string, comment:null), (over10k)over10k.FieldSchema(name:ts, type:timestamp, comment:null), (over10k)over10k.FieldSchema(name:dec, type:decimal(4,2), comment:null), (over10k)over10k.FieldSchema(name:bin, type:binary, comment:null), (over10k)over10k.FieldSchema(name:BLOCK__OFFSET__INSIDE__FILE, type:bigint, comment:), (over10k)over10k.FieldSchema(name:INPUT__FILE__NAME, type:string, comment:), ] +POSTHOOK: Lineage: t1.a1 SCRIPT [(over10k)over10k.FieldSchema(name:t, type:tinyint, comment:null), (over10k)over10k.FieldSchema(name:si, type:smallint, comment:null), (over10k)over10k.FieldSchema(name:i, type:int, comment:null), (over10k)over10k.FieldSchema(name:b, type:bigint, comment:null), (over10k)over10k.FieldSchema(name:f, type:float, comment:null), (over10k)over10k.FieldSchema(name:d, type:double, comment:null), (over10k)over10k.FieldSchema(name:bo, type:boolean, comment:null), (over10k)over10k.FieldSchema(name:s, type:string, comment:null), (over10k)over10k.FieldSchema(name:ts, type:timestamp, comment:null), (over10k)over10k.FieldSchema(name:dec, type:decimal(4,2), comment:null), (over10k)over10k.FieldSchema(name:bin, type:binary, comment:null), (over10k)over10k.FieldSchema(name:BLOCK__OFFSET__INSIDE__FILE, type:bigint, comment:), (over10k)over10k.FieldSchema(name:INPUT__FILE__NAME, type:string, comment:), (over10k)over10k.FieldSchema(name:ROW__ID, type:struct, comment:), ] +POSTHOOK: Lineage: t1.b1 SCRIPT [(over10k)over10k.FieldSchema(name:t, type:tinyint, comment:null), (over10k)over10k.FieldSchema(name:si, type:smallint, comment:null), (over10k)over10k.FieldSchema(name:i, type:int, comment:null), (over10k)over10k.FieldSchema(name:b, type:bigint, comment:null), (over10k)over10k.FieldSchema(name:f, type:float, comment:null), (over10k)over10k.FieldSchema(name:d, type:double, comment:null), (over10k)over10k.FieldSchema(name:bo, type:boolean, comment:null), (over10k)over10k.FieldSchema(name:s, type:string, comment:null), (over10k)over10k.FieldSchema(name:ts, type:timestamp, comment:null), (over10k)over10k.FieldSchema(name:dec, type:decimal(4,2), comment:null), (over10k)over10k.FieldSchema(name:bin, type:binary, comment:null), (over10k)over10k.FieldSchema(name:BLOCK__OFFSET__INSIDE__FILE, type:bigint, comment:), (over10k)over10k.FieldSchema(name:INPUT__FILE__NAME, type:string, comment:), (over10k)over10k.FieldSchema(name:ROW__ID, type:struct, comment:), ] +POSTHOOK: Lineage: t2.a1 SCRIPT [(over10k)over10k.FieldSchema(name:t, type:tinyint, comment:null), (over10k)over10k.FieldSchema(name:si, type:smallint, comment:null), (over10k)over10k.FieldSchema(name:i, type:int, comment:null), (over10k)over10k.FieldSchema(name:b, type:bigint, comment:null), (over10k)over10k.FieldSchema(name:f, type:float, comment:null), (over10k)over10k.FieldSchema(name:d, type:double, comment:null), (over10k)over10k.FieldSchema(name:bo, type:boolean, comment:null), (over10k)over10k.FieldSchema(name:s, type:string, comment:null), (over10k)over10k.FieldSchema(name:ts, type:timestamp, comment:null), (over10k)over10k.FieldSchema(name:dec, type:decimal(4,2), comment:null), (over10k)over10k.FieldSchema(name:bin, type:binary, comment:null), (over10k)over10k.FieldSchema(name:BLOCK__OFFSET__INSIDE__FILE, type:bigint, comment:), (over10k)over10k.FieldSchema(name:INPUT__FILE__NAME, type:string, comment:), (over10k)over10k.FieldSchema(name:ROW__ID, type:struct, comment:), ] +POSTHOOK: Lineage: t2.b1 SCRIPT [(over10k)over10k.FieldSchema(name:t, type:tinyint, comment:null), (over10k)over10k.FieldSchema(name:si, type:smallint, comment:null), (over10k)over10k.FieldSchema(name:i, type:int, comment:null), (over10k)over10k.FieldSchema(name:b, type:bigint, comment:null), (over10k)over10k.FieldSchema(name:f, type:float, comment:null), (over10k)over10k.FieldSchema(name:d, type:double, comment:null), (over10k)over10k.FieldSchema(name:bo, type:boolean, comment:null), (over10k)over10k.FieldSchema(name:s, type:string, comment:null), (over10k)over10k.FieldSchema(name:ts, type:timestamp, comment:null), (over10k)over10k.FieldSchema(name:dec, type:decimal(4,2), comment:null), (over10k)over10k.FieldSchema(name:bin, type:binary, comment:null), (over10k)over10k.FieldSchema(name:BLOCK__OFFSET__INSIDE__FILE, type:bigint, comment:), (over10k)over10k.FieldSchema(name:INPUT__FILE__NAME, type:string, comment:), (over10k)over10k.FieldSchema(name:ROW__ID, type:struct, comment:), ] PREHOOK: query: select * from t1 limit 3 PREHOOK: type: QUERY PREHOOK: Input: default@t1 diff --git a/ql/src/test/results/clientpositive/tez/auto_join1.q.out b/ql/src/test/results/clientpositive/tez/auto_join1.q.out index aded535..cd07d67 100644 --- a/ql/src/test/results/clientpositive/tez/auto_join1.q.out +++ b/ql/src/test/results/clientpositive/tez/auto_join1.q.out @@ -44,10 +44,10 @@ STAGE PLANS: keys: 0 key (type: string) 1 key (type: string) - outputColumnNames: _col0, _col5 + outputColumnNames: _col0, _col6 Statistics: Num rows: 31 Data size: 3196 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: UDFToInteger(_col0) (type: int), _col5 (type: string) + expressions: UDFToInteger(_col0) (type: int), _col6 (type: string) outputColumnNames: _col0, _col1 Statistics: Num rows: 31 Data size: 3196 Basic stats: COMPLETE Column stats: NONE File Output Operator diff --git a/ql/src/test/results/clientpositive/tez/bucket_map_join_tez1.q.out b/ql/src/test/results/clientpositive/tez/bucket_map_join_tez1.q.out index 2408ab8..b3f16c1 100644 --- a/ql/src/test/results/clientpositive/tez/bucket_map_join_tez1.q.out +++ b/ql/src/test/results/clientpositive/tez/bucket_map_join_tez1.q.out @@ -144,10 +144,10 @@ STAGE PLANS: keys: 0 key (type: int) 1 key (type: int) - outputColumnNames: _col0, _col1, _col6 + outputColumnNames: _col0, _col1, _col7 Statistics: Num rows: 275 Data size: 2921 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: _col0 (type: int), _col1 (type: string), _col6 (type: string) + expressions: _col0 (type: int), _col1 (type: string), _col7 (type: string) outputColumnNames: _col0, _col1, _col2 Statistics: Num rows: 275 Data size: 2921 Basic stats: COMPLETE Column stats: NONE File Output Operator @@ -336,15 +336,15 @@ STAGE PLANS: keys: 0 key (type: int) 1 key (type: int) - outputColumnNames: _col5, _col6 + outputColumnNames: _col6, _col7 Statistics: Num rows: 275 Data size: 2921 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: _col5 (type: int), _col6 (type: string) - outputColumnNames: _col5, _col6 + expressions: _col6 (type: int), _col7 (type: string) + outputColumnNames: _col6, _col7 Statistics: Num rows: 275 Data size: 2921 Basic stats: COMPLETE Column stats: NONE Group By Operator - aggregations: sum(substr(_col6, 5)) - keys: _col5 (type: int) + aggregations: sum(substr(_col7, 5)) + keys: _col6 (type: int) mode: hash outputColumnNames: _col0, _col1 Statistics: Num rows: 275 Data size: 2921 Basic stats: COMPLETE Column stats: NONE @@ -603,10 +603,10 @@ STAGE PLANS: 0 key (type: int) 1 key (type: int) 2 key (type: int) - outputColumnNames: _col0, _col1, _col6 + outputColumnNames: _col0, _col1, _col7 Statistics: Num rows: 550 Data size: 5843 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: _col0 (type: int), _col1 (type: string), _col6 (type: string) + expressions: _col0 (type: int), _col1 (type: string), _col7 (type: string) outputColumnNames: _col0, _col1, _col2 Statistics: Num rows: 550 Data size: 5843 Basic stats: COMPLETE Column stats: NONE File Output Operator @@ -957,10 +957,10 @@ STAGE PLANS: keys: 0 value (type: string) 1 value (type: string) - outputColumnNames: _col0, _col1, _col6 + outputColumnNames: _col0, _col1, _col7 Statistics: Num rows: 275 Data size: 2921 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: _col0 (type: int), _col1 (type: string), _col6 (type: string) + expressions: _col0 (type: int), _col1 (type: string), _col7 (type: string) outputColumnNames: _col0, _col1, _col2 Statistics: Num rows: 275 Data size: 2921 Basic stats: COMPLETE Column stats: NONE File Output Operator @@ -1049,10 +1049,10 @@ STAGE PLANS: keys: 0 key (type: int) 1 key (type: int) - outputColumnNames: _col0, _col1, _col5 + outputColumnNames: _col0, _col1, _col6 Statistics: Num rows: 275 Data size: 2921 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: _col0 (type: int), _col1 (type: string), _col5 (type: string) + expressions: _col0 (type: int), _col1 (type: string), _col6 (type: string) outputColumnNames: _col0, _col1, _col2 Statistics: Num rows: 275 Data size: 2921 Basic stats: COMPLETE Column stats: NONE File Output Operator @@ -1140,10 +1140,10 @@ STAGE PLANS: keys: 0 _col1 (type: string) 1 value (type: string) - outputColumnNames: _col0, _col10 + outputColumnNames: _col0, _col12 Statistics: Num rows: 302 Data size: 3213 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: _col0 (type: int), _col10 (type: int) + expressions: _col0 (type: int), _col12 (type: int) outputColumnNames: _col0, _col1 Statistics: Num rows: 302 Data size: 3213 Basic stats: COMPLETE Column stats: NONE File Output Operator diff --git a/ql/src/test/results/clientpositive/tez/bucket_map_join_tez2.q.out b/ql/src/test/results/clientpositive/tez/bucket_map_join_tez2.q.out index 4a3f5b9..ad09d3e 100644 --- a/ql/src/test/results/clientpositive/tez/bucket_map_join_tez2.q.out +++ b/ql/src/test/results/clientpositive/tez/bucket_map_join_tez2.q.out @@ -165,10 +165,10 @@ STAGE PLANS: keys: 0 _col1 (type: string) 1 value (type: string) - outputColumnNames: _col0, _col10 + outputColumnNames: _col0, _col12 Statistics: Num rows: 302 Data size: 3213 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: _col0 (type: int), _col10 (type: int) + expressions: _col0 (type: int), _col12 (type: int) outputColumnNames: _col0, _col1 Statistics: Num rows: 302 Data size: 3213 Basic stats: COMPLETE Column stats: NONE File Output Operator @@ -257,10 +257,10 @@ STAGE PLANS: keys: 0 UDFToDouble(key) (type: double) 1 UDFToDouble(key) (type: double) - outputColumnNames: _col0, _col1, _col5 + outputColumnNames: _col0, _col1, _col6 Statistics: Num rows: 133 Data size: 1411 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: _col0 (type: int), _col1 (type: string), _col5 (type: string) + expressions: _col0 (type: int), _col1 (type: string), _col6 (type: string) outputColumnNames: _col0, _col1, _col2 Statistics: Num rows: 133 Data size: 1411 Basic stats: COMPLETE Column stats: NONE File Output Operator diff --git a/ql/src/test/results/clientpositive/tez/cross_product_check_1.q.out b/ql/src/test/results/clientpositive/tez/cross_product_check_1.q.out index f243b0a..fd7eff9 100644 --- a/ql/src/test/results/clientpositive/tez/cross_product_check_1.q.out +++ b/ql/src/test/results/clientpositive/tez/cross_product_check_1.q.out @@ -60,10 +60,10 @@ STAGE PLANS: condition expressions: 0 {VALUE._col0} {VALUE._col1} 1 {VALUE._col0} {VALUE._col1} - outputColumnNames: _col0, _col1, _col4, _col5 + outputColumnNames: _col0, _col1, _col5, _col6 Statistics: Num rows: 31 Data size: 6393 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: _col0 (type: string), _col1 (type: string), _col4 (type: string), _col5 (type: string) + expressions: _col0 (type: string), _col1 (type: string), _col5 (type: string), _col6 (type: string) outputColumnNames: _col0, _col1, _col2, _col3 Statistics: Num rows: 31 Data size: 6393 Basic stats: COMPLETE Column stats: NONE File Output Operator @@ -142,24 +142,24 @@ STAGE PLANS: condition expressions: 0 {KEY.reducesinkkey0} {VALUE._col0} 1 {KEY.reducesinkkey0} {VALUE._col0} - outputColumnNames: _col0, _col1, _col4, _col5 + outputColumnNames: _col0, _col1, _col5, _col6 Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE Reduce Output Operator sort order: Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE - value expressions: _col0 (type: string), _col1 (type: string), _col4 (type: string), _col5 (type: string) + value expressions: _col0 (type: string), _col1 (type: string), _col5 (type: string), _col6 (type: string) Reducer 3 Reduce Operator Tree: Join Operator condition map: Inner Join 0 to 1 condition expressions: - 0 {VALUE._col0} {VALUE._col1} {VALUE._col4} {VALUE._col5} + 0 {VALUE._col0} {VALUE._col1} {VALUE._col5} {VALUE._col6} 1 {VALUE._col0} {VALUE._col1} - outputColumnNames: _col0, _col1, _col4, _col5, _col8, _col9 + outputColumnNames: _col0, _col1, _col5, _col6, _col10, _col11 Statistics: Num rows: 31 Data size: 6393 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: _col0 (type: string), _col1 (type: string), _col4 (type: string), _col5 (type: string), _col8 (type: string), _col9 (type: string) + expressions: _col0 (type: string), _col1 (type: string), _col5 (type: string), _col6 (type: string), _col10 (type: string), _col11 (type: string) outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5 Statistics: Num rows: 31 Data size: 6393 Basic stats: COMPLETE Column stats: NONE File Output Operator @@ -282,10 +282,10 @@ STAGE PLANS: condition expressions: 0 {VALUE._col0} {VALUE._col1} 1 {VALUE._col0} - outputColumnNames: _col0, _col1, _col4 + outputColumnNames: _col0, _col1, _col5 Statistics: Num rows: 31 Data size: 6393 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: _col0 (type: string), _col1 (type: string), _col4 (type: string) + expressions: _col0 (type: string), _col1 (type: string), _col5 (type: string) outputColumnNames: _col0, _col1, _col2 Statistics: Num rows: 31 Data size: 6393 Basic stats: COMPLETE Column stats: NONE File Output Operator @@ -394,10 +394,10 @@ STAGE PLANS: condition expressions: 0 {VALUE._col0} {VALUE._col1} 1 {VALUE._col0} - outputColumnNames: _col0, _col1, _col4 + outputColumnNames: _col0, _col1, _col5 Statistics: Num rows: 31 Data size: 6393 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: _col0 (type: string), _col1 (type: string), _col4 (type: string) + expressions: _col0 (type: string), _col1 (type: string), _col5 (type: string) outputColumnNames: _col0, _col1, _col2 Statistics: Num rows: 31 Data size: 6393 Basic stats: COMPLETE Column stats: NONE File Output Operator diff --git a/ql/src/test/results/clientpositive/tez/cross_product_check_2.q.out b/ql/src/test/results/clientpositive/tez/cross_product_check_2.q.out index d384538..7774945 100644 --- a/ql/src/test/results/clientpositive/tez/cross_product_check_2.q.out +++ b/ql/src/test/results/clientpositive/tez/cross_product_check_2.q.out @@ -57,10 +57,10 @@ STAGE PLANS: keys: 0 1 - outputColumnNames: _col0, _col1, _col4, _col5 + outputColumnNames: _col0, _col1, _col5, _col6 Statistics: Num rows: 31 Data size: 6393 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: _col0 (type: string), _col1 (type: string), _col4 (type: string), _col5 (type: string) + expressions: _col0 (type: string), _col1 (type: string), _col5 (type: string), _col6 (type: string) outputColumnNames: _col0, _col1, _col2, _col3 Statistics: Num rows: 31 Data size: 6393 Basic stats: COMPLETE Column stats: NONE File Output Operator @@ -111,12 +111,12 @@ STAGE PLANS: keys: 0 key (type: string) 1 key (type: string) - outputColumnNames: _col0, _col1, _col4, _col5 + outputColumnNames: _col0, _col1, _col5, _col6 Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE Reduce Output Operator sort order: Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE - value expressions: _col0 (type: string), _col1 (type: string), _col4 (type: string), _col5 (type: string) + value expressions: _col0 (type: string), _col1 (type: string), _col5 (type: string), _col6 (type: string) Map 2 Map Operator Tree: TableScan @@ -140,15 +140,15 @@ STAGE PLANS: condition map: Inner Join 0 to 1 condition expressions: - 0 {_col0} {_col1} {_col4} {_col5} + 0 {_col0} {_col1} {_col5} {_col6} 1 {key} {value} keys: 0 1 - outputColumnNames: _col0, _col1, _col4, _col5, _col8, _col9 + outputColumnNames: _col0, _col1, _col5, _col6, _col10, _col11 Statistics: Num rows: 31 Data size: 6393 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: _col0 (type: string), _col1 (type: string), _col4 (type: string), _col5 (type: string), _col8 (type: string), _col9 (type: string) + expressions: _col0 (type: string), _col1 (type: string), _col5 (type: string), _col6 (type: string), _col10 (type: string), _col11 (type: string) outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5 Statistics: Num rows: 31 Data size: 6393 Basic stats: COMPLETE Column stats: NONE File Output Operator @@ -249,10 +249,10 @@ STAGE PLANS: keys: 0 1 - outputColumnNames: _col0, _col1, _col4 + outputColumnNames: _col0, _col1, _col5 Statistics: Num rows: 31 Data size: 6393 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: _col0 (type: string), _col1 (type: string), _col4 (type: string) + expressions: _col0 (type: string), _col1 (type: string), _col5 (type: string) outputColumnNames: _col0, _col1, _col2 Statistics: Num rows: 31 Data size: 6393 Basic stats: COMPLETE Column stats: NONE File Output Operator @@ -355,10 +355,10 @@ STAGE PLANS: keys: 0 1 - outputColumnNames: _col0, _col1, _col4 + outputColumnNames: _col0, _col1, _col5 Statistics: Num rows: 31 Data size: 6393 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: _col0 (type: string), _col1 (type: string), _col4 (type: string) + expressions: _col0 (type: string), _col1 (type: string), _col5 (type: string) outputColumnNames: _col0, _col1, _col2 Statistics: Num rows: 31 Data size: 6393 Basic stats: COMPLETE Column stats: NONE File Output Operator diff --git a/ql/src/test/results/clientpositive/tez/filter_join_breaktask.q.out b/ql/src/test/results/clientpositive/tez/filter_join_breaktask.q.out index eae733f..b236efd 100644 --- a/ql/src/test/results/clientpositive/tez/filter_join_breaktask.q.out +++ b/ql/src/test/results/clientpositive/tez/filter_join_breaktask.q.out @@ -353,12 +353,12 @@ STAGE PLANS: condition expressions: 0 {KEY.reducesinkkey0} 1 {VALUE._col0} - outputColumnNames: _col0, _col6 + outputColumnNames: _col0, _col7 Statistics: Num rows: 14 Data size: 119 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator - key expressions: _col6 (type: string) + key expressions: _col7 (type: string) sort order: + - Map-reduce partition columns: _col6 (type: string) + Map-reduce partition columns: _col7 (type: string) Statistics: Num rows: 14 Data size: 119 Basic stats: COMPLETE Column stats: NONE tag: 0 value expressions: _col0 (type: int) @@ -372,10 +372,10 @@ STAGE PLANS: condition expressions: 0 {VALUE._col0} 1 {KEY.reducesinkkey0} - outputColumnNames: _col0, _col11 + outputColumnNames: _col0, _col13 Statistics: Num rows: 15 Data size: 130 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: _col0 (type: int), _col11 (type: string) + expressions: _col0 (type: int), _col13 (type: string) outputColumnNames: _col0, _col1 Statistics: Num rows: 15 Data size: 130 Basic stats: COMPLETE Column stats: NONE File Output Operator diff --git a/ql/src/test/results/clientpositive/tez/join1.q.out b/ql/src/test/results/clientpositive/tez/join1.q.out index 1cb7ee5..ed9a921 100644 --- a/ql/src/test/results/clientpositive/tez/join1.q.out +++ b/ql/src/test/results/clientpositive/tez/join1.q.out @@ -62,10 +62,10 @@ STAGE PLANS: condition expressions: 0 {KEY.reducesinkkey0} 1 {VALUE._col0} - outputColumnNames: _col0, _col5 + outputColumnNames: _col0, _col6 Statistics: Num rows: 31 Data size: 3196 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: UDFToInteger(_col0) (type: int), _col5 (type: string) + expressions: UDFToInteger(_col0) (type: int), _col6 (type: string) outputColumnNames: _col0, _col1 Statistics: Num rows: 31 Data size: 3196 Basic stats: COMPLETE Column stats: NONE File Output Operator diff --git a/ql/src/test/results/clientpositive/tez/mapjoin_decimal.q.out b/ql/src/test/results/clientpositive/tez/mapjoin_decimal.q.out index 07529b8..a7d6742 100644 --- a/ql/src/test/results/clientpositive/tez/mapjoin_decimal.q.out +++ b/ql/src/test/results/clientpositive/tez/mapjoin_decimal.q.out @@ -119,10 +119,10 @@ STAGE PLANS: keys: 0 dec (type: decimal(4,2)) 1 dec (type: decimal(4,0)) - outputColumnNames: _col0, _col3 + outputColumnNames: _col0, _col4 Statistics: Num rows: 577 Data size: 64680 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: _col0 (type: decimal(4,2)), _col3 (type: decimal(4,0)) + expressions: _col0 (type: decimal(4,2)), _col4 (type: decimal(4,0)) outputColumnNames: _col0, _col1 Statistics: Num rows: 577 Data size: 64680 Basic stats: COMPLETE Column stats: NONE File Output Operator @@ -149,112 +149,112 @@ POSTHOOK: type: QUERY POSTHOOK: Input: default@t1 POSTHOOK: Input: default@t2 #### A masked pattern was here #### -9 9 -9 9 -9 9 -9 9 -9 9 -9 9 -9 9 -9 9 -9 9 -9 9 -9 9 -9 9 -9 9 -9 9 -9 9 -9 9 -9 9 -9 9 -9 9 -9 9 -9 9 -45 45 -45 45 -45 45 -45 45 -45 45 -79 79 -79 79 -79 79 -79 79 -79 79 -79 79 -17 17 -17 17 -17 17 -17 17 -17 17 -17 17 -17 17 -17 17 -17 17 -17 17 -6 6 -6 6 -6 6 -6 6 -6 6 -6 6 -62 62 -62 62 -62 62 -62 62 -62 62 -62 62 -62 62 -62 62 -62 62 -62 62 -62 62 -62 62 -64 64 -64 64 -64 64 -64 64 -64 64 -64 64 -64 64 -64 64 -64 64 -64 64 -64 64 -64 64 -64 64 -64 64 -64 64 -64 64 -64 64 -64 64 -89 89 -89 89 -89 89 -89 89 -89 89 -89 89 -89 89 -89 89 -89 89 -89 89 -89 89 -89 89 -70 70 -70 70 -70 70 -70 70 -70 70 -70 70 -70 70 -14 14 -14 14 -14 14 -14 14 -14 14 -14 14 -14 14 -14 14 -14 14 +9.00 9 +9.00 9 +9.00 9 +9.00 9 +9.00 9 +9.00 9 +9.00 9 +9.00 9 +9.00 9 +9.00 9 +9.00 9 +9.00 9 +9.00 9 +9.00 9 +9.00 9 +9.00 9 +9.00 9 +9.00 9 +9.00 9 +9.00 9 +9.00 9 +45.00 45 +45.00 45 +45.00 45 +45.00 45 +45.00 45 +79.00 79 +79.00 79 +79.00 79 +79.00 79 +79.00 79 +79.00 79 +17.00 17 +17.00 17 +17.00 17 +17.00 17 +17.00 17 +17.00 17 +17.00 17 +17.00 17 +17.00 17 +17.00 17 +6.00 6 +6.00 6 +6.00 6 +6.00 6 +6.00 6 +6.00 6 +62.00 62 +62.00 62 +62.00 62 +62.00 62 +62.00 62 +62.00 62 +62.00 62 +62.00 62 +62.00 62 +62.00 62 +62.00 62 +62.00 62 +64.00 64 +64.00 64 +64.00 64 +64.00 64 +64.00 64 +64.00 64 +64.00 64 +64.00 64 +64.00 64 +64.00 64 +64.00 64 +64.00 64 +64.00 64 +64.00 64 +64.00 64 +64.00 64 +64.00 64 +64.00 64 +89.00 89 +89.00 89 +89.00 89 +89.00 89 +89.00 89 +89.00 89 +89.00 89 +89.00 89 +89.00 89 +89.00 89 +89.00 89 +89.00 89 +70.00 70 +70.00 70 +70.00 70 +70.00 70 +70.00 70 +70.00 70 +70.00 70 +14.00 14 +14.00 14 +14.00 14 +14.00 14 +14.00 14 +14.00 14 +14.00 14 +14.00 14 +14.00 14 PREHOOK: query: select t1.dec, t2.dec from t1 join t2 on (t1.dec=t2.dec) PREHOOK: type: QUERY PREHOOK: Input: default@t1 @@ -265,112 +265,112 @@ POSTHOOK: type: QUERY POSTHOOK: Input: default@t1 POSTHOOK: Input: default@t2 #### A masked pattern was here #### -9 9 -9 9 -9 9 -9 9 -9 9 -9 9 -9 9 -9 9 -9 9 -9 9 -9 9 -9 9 -9 9 -9 9 -9 9 -9 9 -9 9 -9 9 -9 9 -9 9 -9 9 -45 45 -45 45 -45 45 -45 45 -45 45 -79 79 -79 79 -79 79 -79 79 -79 79 -79 79 -17 17 -17 17 -17 17 -17 17 -17 17 -17 17 -17 17 -17 17 -17 17 -17 17 -6 6 -6 6 -6 6 -6 6 -6 6 -6 6 -62 62 -62 62 -62 62 -62 62 -62 62 -62 62 -62 62 -62 62 -62 62 -62 62 -62 62 -62 62 -64 64 -64 64 -64 64 -64 64 -64 64 -64 64 -64 64 -64 64 -64 64 -64 64 -64 64 -64 64 -64 64 -64 64 -64 64 -64 64 -64 64 -64 64 -89 89 -89 89 -89 89 -89 89 -89 89 -89 89 -89 89 -89 89 -89 89 -89 89 -89 89 -89 89 -70 70 -70 70 -70 70 -70 70 -70 70 -70 70 -70 70 -14 14 -14 14 -14 14 -14 14 -14 14 -14 14 -14 14 -14 14 -14 14 +9.00 9 +9.00 9 +9.00 9 +9.00 9 +9.00 9 +9.00 9 +9.00 9 +9.00 9 +9.00 9 +9.00 9 +9.00 9 +9.00 9 +9.00 9 +9.00 9 +9.00 9 +9.00 9 +9.00 9 +9.00 9 +9.00 9 +9.00 9 +9.00 9 +45.00 45 +45.00 45 +45.00 45 +45.00 45 +45.00 45 +79.00 79 +79.00 79 +79.00 79 +79.00 79 +79.00 79 +79.00 79 +17.00 17 +17.00 17 +17.00 17 +17.00 17 +17.00 17 +17.00 17 +17.00 17 +17.00 17 +17.00 17 +17.00 17 +6.00 6 +6.00 6 +6.00 6 +6.00 6 +6.00 6 +6.00 6 +62.00 62 +62.00 62 +62.00 62 +62.00 62 +62.00 62 +62.00 62 +62.00 62 +62.00 62 +62.00 62 +62.00 62 +62.00 62 +62.00 62 +64.00 64 +64.00 64 +64.00 64 +64.00 64 +64.00 64 +64.00 64 +64.00 64 +64.00 64 +64.00 64 +64.00 64 +64.00 64 +64.00 64 +64.00 64 +64.00 64 +64.00 64 +64.00 64 +64.00 64 +64.00 64 +89.00 89 +89.00 89 +89.00 89 +89.00 89 +89.00 89 +89.00 89 +89.00 89 +89.00 89 +89.00 89 +89.00 89 +89.00 89 +89.00 89 +70.00 70 +70.00 70 +70.00 70 +70.00 70 +70.00 70 +70.00 70 +70.00 70 +14.00 14 +14.00 14 +14.00 14 +14.00 14 +14.00 14 +14.00 14 +14.00 14 +14.00 14 +14.00 14 PREHOOK: query: select t1.dec, t2.dec from t1 join t2 on (t1.dec=t2.dec) PREHOOK: type: QUERY PREHOOK: Input: default@t1 @@ -381,109 +381,109 @@ POSTHOOK: type: QUERY POSTHOOK: Input: default@t1 POSTHOOK: Input: default@t2 #### A masked pattern was here #### -9 9 -9 9 -9 9 -9 9 -9 9 -9 9 -9 9 -9 9 -9 9 -9 9 -9 9 -9 9 -9 9 -9 9 -9 9 -9 9 -9 9 -9 9 -9 9 -9 9 -9 9 -45 45 -45 45 -45 45 -45 45 -45 45 -79 79 -79 79 -79 79 -79 79 -79 79 -79 79 -17 17 -17 17 -17 17 -17 17 -17 17 -17 17 -17 17 -17 17 -17 17 -17 17 -6 6 -6 6 -6 6 -6 6 -6 6 -6 6 -62 62 -62 62 -62 62 -62 62 -62 62 -62 62 -62 62 -62 62 -62 62 -62 62 -62 62 -62 62 -64 64 -64 64 -64 64 -64 64 -64 64 -64 64 -64 64 -64 64 -64 64 -64 64 -64 64 -64 64 -64 64 -64 64 -64 64 -64 64 -64 64 -64 64 -89 89 -89 89 -89 89 -89 89 -89 89 -89 89 -89 89 -89 89 -89 89 -89 89 -89 89 -89 89 -70 70 -70 70 -70 70 -70 70 -70 70 -70 70 -70 70 -14 14 -14 14 -14 14 -14 14 -14 14 -14 14 -14 14 -14 14 -14 14 +9.00 9 +9.00 9 +9.00 9 +9.00 9 +9.00 9 +9.00 9 +9.00 9 +9.00 9 +9.00 9 +9.00 9 +9.00 9 +9.00 9 +9.00 9 +9.00 9 +9.00 9 +9.00 9 +9.00 9 +9.00 9 +9.00 9 +9.00 9 +9.00 9 +45.00 45 +45.00 45 +45.00 45 +45.00 45 +45.00 45 +79.00 79 +79.00 79 +79.00 79 +79.00 79 +79.00 79 +79.00 79 +17.00 17 +17.00 17 +17.00 17 +17.00 17 +17.00 17 +17.00 17 +17.00 17 +17.00 17 +17.00 17 +17.00 17 +6.00 6 +6.00 6 +6.00 6 +6.00 6 +6.00 6 +6.00 6 +62.00 62 +62.00 62 +62.00 62 +62.00 62 +62.00 62 +62.00 62 +62.00 62 +62.00 62 +62.00 62 +62.00 62 +62.00 62 +62.00 62 +64.00 64 +64.00 64 +64.00 64 +64.00 64 +64.00 64 +64.00 64 +64.00 64 +64.00 64 +64.00 64 +64.00 64 +64.00 64 +64.00 64 +64.00 64 +64.00 64 +64.00 64 +64.00 64 +64.00 64 +64.00 64 +89.00 89 +89.00 89 +89.00 89 +89.00 89 +89.00 89 +89.00 89 +89.00 89 +89.00 89 +89.00 89 +89.00 89 +89.00 89 +89.00 89 +70.00 70 +70.00 70 +70.00 70 +70.00 70 +70.00 70 +70.00 70 +70.00 70 +14.00 14 +14.00 14 +14.00 14 +14.00 14 +14.00 14 +14.00 14 +14.00 14 +14.00 14 +14.00 14 diff --git a/ql/src/test/results/clientpositive/tez/mrr.q.out b/ql/src/test/results/clientpositive/tez/mrr.q.out index 8fcf23e..2d4a6cf 100644 --- a/ql/src/test/results/clientpositive/tez/mrr.q.out +++ b/ql/src/test/results/clientpositive/tez/mrr.q.out @@ -445,15 +445,15 @@ STAGE PLANS: condition expressions: 0 1 {KEY.reducesinkkey0} {VALUE._col0} - outputColumnNames: _col4, _col5 + outputColumnNames: _col5, _col6 Statistics: Num rows: 31 Data size: 3196 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: _col4 (type: string), _col5 (type: string) - outputColumnNames: _col4, _col5 + expressions: _col5 (type: string), _col6 (type: string) + outputColumnNames: _col5, _col6 Statistics: Num rows: 31 Data size: 3196 Basic stats: COMPLETE Column stats: NONE Group By Operator - aggregations: count(DISTINCT _col5) - keys: _col4 (type: string), _col5 (type: string) + aggregations: count(DISTINCT _col6) + keys: _col5 (type: string), _col6 (type: string) mode: hash outputColumnNames: _col0, _col1, _col2 Statistics: Num rows: 31 Data size: 3196 Basic stats: COMPLETE Column stats: NONE @@ -852,15 +852,15 @@ STAGE PLANS: keys: 0 key (type: string) 1 key (type: string) - outputColumnNames: _col4, _col5 + outputColumnNames: _col5, _col6 Statistics: Num rows: 31 Data size: 3196 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: _col4 (type: string), _col5 (type: string) - outputColumnNames: _col4, _col5 + expressions: _col5 (type: string), _col6 (type: string) + outputColumnNames: _col5, _col6 Statistics: Num rows: 31 Data size: 3196 Basic stats: COMPLETE Column stats: NONE Group By Operator - aggregations: count(DISTINCT _col5) - keys: _col4 (type: string), _col5 (type: string) + aggregations: count(DISTINCT _col6) + keys: _col5 (type: string), _col6 (type: string) mode: hash outputColumnNames: _col0, _col1, _col2 Statistics: Num rows: 31 Data size: 3196 Basic stats: COMPLETE Column stats: NONE diff --git a/ql/src/test/results/clientpositive/tez/ptf.q.out b/ql/src/test/results/clientpositive/tez/ptf.q.out index 25ad0a9..2182450 100644 --- a/ql/src/test/results/clientpositive/tez/ptf.q.out +++ b/ql/src/test/results/clientpositive/tez/ptf.q.out @@ -899,20 +899,20 @@ POSTHOOK: type: QUERY POSTHOOK: Input: default@part POSTHOOK: Output: default@part_4 POSTHOOK: Output: default@part_5 -POSTHOOK: Lineage: part_4.dr SCRIPT [(part)part.FieldSchema(name:p_partkey, type:int, comment:null), (part)part.FieldSchema(name:p_name, type:string, comment:null), (part)part.FieldSchema(name:p_mfgr, type:string, comment:null), (part)part.FieldSchema(name:p_brand, type:string, comment:null), (part)part.FieldSchema(name:p_type, type:string, comment:null), (part)part.FieldSchema(name:p_size, type:int, comment:null), (part)part.FieldSchema(name:p_container, type:string, comment:null), (part)part.FieldSchema(name:p_retailprice, type:double, comment:null), (part)part.FieldSchema(name:p_comment, type:string, comment:null), (part)part.FieldSchema(name:BLOCK__OFFSET__INSIDE__FILE, type:bigint, comment:), (part)part.FieldSchema(name:INPUT__FILE__NAME, type:string, comment:), ] -POSTHOOK: Lineage: part_4.p_mfgr SCRIPT [(part)part.FieldSchema(name:p_partkey, type:int, comment:null), (part)part.FieldSchema(name:p_name, type:string, comment:null), (part)part.FieldSchema(name:p_mfgr, type:string, comment:null), (part)part.FieldSchema(name:p_brand, type:string, comment:null), (part)part.FieldSchema(name:p_type, type:string, comment:null), (part)part.FieldSchema(name:p_size, type:int, comment:null), (part)part.FieldSchema(name:p_container, type:string, comment:null), (part)part.FieldSchema(name:p_retailprice, type:double, comment:null), (part)part.FieldSchema(name:p_comment, type:string, comment:null), (part)part.FieldSchema(name:BLOCK__OFFSET__INSIDE__FILE, type:bigint, comment:), (part)part.FieldSchema(name:INPUT__FILE__NAME, type:string, comment:), ] -POSTHOOK: Lineage: part_4.p_name SCRIPT [(part)part.FieldSchema(name:p_partkey, type:int, comment:null), (part)part.FieldSchema(name:p_name, type:string, comment:null), (part)part.FieldSchema(name:p_mfgr, type:string, comment:null), (part)part.FieldSchema(name:p_brand, type:string, comment:null), (part)part.FieldSchema(name:p_type, type:string, comment:null), (part)part.FieldSchema(name:p_size, type:int, comment:null), (part)part.FieldSchema(name:p_container, type:string, comment:null), (part)part.FieldSchema(name:p_retailprice, type:double, comment:null), (part)part.FieldSchema(name:p_comment, type:string, comment:null), (part)part.FieldSchema(name:BLOCK__OFFSET__INSIDE__FILE, type:bigint, comment:), (part)part.FieldSchema(name:INPUT__FILE__NAME, type:string, comment:), ] -POSTHOOK: Lineage: part_4.p_size SCRIPT [(part)part.FieldSchema(name:p_partkey, type:int, comment:null), (part)part.FieldSchema(name:p_name, type:string, comment:null), (part)part.FieldSchema(name:p_mfgr, type:string, comment:null), (part)part.FieldSchema(name:p_brand, type:string, comment:null), (part)part.FieldSchema(name:p_type, type:string, comment:null), (part)part.FieldSchema(name:p_size, type:int, comment:null), (part)part.FieldSchema(name:p_container, type:string, comment:null), (part)part.FieldSchema(name:p_retailprice, type:double, comment:null), (part)part.FieldSchema(name:p_comment, type:string, comment:null), (part)part.FieldSchema(name:BLOCK__OFFSET__INSIDE__FILE, type:bigint, comment:), (part)part.FieldSchema(name:INPUT__FILE__NAME, type:string, comment:), ] -POSTHOOK: Lineage: part_4.r SCRIPT [(part)part.FieldSchema(name:p_partkey, type:int, comment:null), (part)part.FieldSchema(name:p_name, type:string, comment:null), (part)part.FieldSchema(name:p_mfgr, type:string, comment:null), (part)part.FieldSchema(name:p_brand, type:string, comment:null), (part)part.FieldSchema(name:p_type, type:string, comment:null), (part)part.FieldSchema(name:p_size, type:int, comment:null), (part)part.FieldSchema(name:p_container, type:string, comment:null), (part)part.FieldSchema(name:p_retailprice, type:double, comment:null), (part)part.FieldSchema(name:p_comment, type:string, comment:null), (part)part.FieldSchema(name:BLOCK__OFFSET__INSIDE__FILE, type:bigint, comment:), (part)part.FieldSchema(name:INPUT__FILE__NAME, type:string, comment:), ] -POSTHOOK: Lineage: part_4.s SCRIPT [(part)part.FieldSchema(name:p_partkey, type:int, comment:null), (part)part.FieldSchema(name:p_name, type:string, comment:null), (part)part.FieldSchema(name:p_mfgr, type:string, comment:null), (part)part.FieldSchema(name:p_brand, type:string, comment:null), (part)part.FieldSchema(name:p_type, type:string, comment:null), (part)part.FieldSchema(name:p_size, type:int, comment:null), (part)part.FieldSchema(name:p_container, type:string, comment:null), (part)part.FieldSchema(name:p_retailprice, type:double, comment:null), (part)part.FieldSchema(name:p_comment, type:string, comment:null), (part)part.FieldSchema(name:BLOCK__OFFSET__INSIDE__FILE, type:bigint, comment:), (part)part.FieldSchema(name:INPUT__FILE__NAME, type:string, comment:), ] -POSTHOOK: Lineage: part_5.cud SCRIPT [(part)part.FieldSchema(name:p_partkey, type:int, comment:null), (part)part.FieldSchema(name:p_name, type:string, comment:null), (part)part.FieldSchema(name:p_mfgr, type:string, comment:null), (part)part.FieldSchema(name:p_brand, type:string, comment:null), (part)part.FieldSchema(name:p_type, type:string, comment:null), (part)part.FieldSchema(name:p_size, type:int, comment:null), (part)part.FieldSchema(name:p_container, type:string, comment:null), (part)part.FieldSchema(name:p_retailprice, type:double, comment:null), (part)part.FieldSchema(name:p_comment, type:string, comment:null), (part)part.FieldSchema(name:BLOCK__OFFSET__INSIDE__FILE, type:bigint, comment:), (part)part.FieldSchema(name:INPUT__FILE__NAME, type:string, comment:), ] -POSTHOOK: Lineage: part_5.dr SCRIPT [(part)part.FieldSchema(name:p_partkey, type:int, comment:null), (part)part.FieldSchema(name:p_name, type:string, comment:null), (part)part.FieldSchema(name:p_mfgr, type:string, comment:null), (part)part.FieldSchema(name:p_brand, type:string, comment:null), (part)part.FieldSchema(name:p_type, type:string, comment:null), (part)part.FieldSchema(name:p_size, type:int, comment:null), (part)part.FieldSchema(name:p_container, type:string, comment:null), (part)part.FieldSchema(name:p_retailprice, type:double, comment:null), (part)part.FieldSchema(name:p_comment, type:string, comment:null), (part)part.FieldSchema(name:BLOCK__OFFSET__INSIDE__FILE, type:bigint, comment:), (part)part.FieldSchema(name:INPUT__FILE__NAME, type:string, comment:), ] -POSTHOOK: Lineage: part_5.fv1 SCRIPT [(part)part.FieldSchema(name:p_partkey, type:int, comment:null), (part)part.FieldSchema(name:p_name, type:string, comment:null), (part)part.FieldSchema(name:p_mfgr, type:string, comment:null), (part)part.FieldSchema(name:p_brand, type:string, comment:null), (part)part.FieldSchema(name:p_type, type:string, comment:null), (part)part.FieldSchema(name:p_size, type:int, comment:null), (part)part.FieldSchema(name:p_container, type:string, comment:null), (part)part.FieldSchema(name:p_retailprice, type:double, comment:null), (part)part.FieldSchema(name:p_comment, type:string, comment:null), (part)part.FieldSchema(name:BLOCK__OFFSET__INSIDE__FILE, type:bigint, comment:), (part)part.FieldSchema(name:INPUT__FILE__NAME, type:string, comment:), ] -POSTHOOK: Lineage: part_5.p_mfgr SCRIPT [(part)part.FieldSchema(name:p_partkey, type:int, comment:null), (part)part.FieldSchema(name:p_name, type:string, comment:null), (part)part.FieldSchema(name:p_mfgr, type:string, comment:null), (part)part.FieldSchema(name:p_brand, type:string, comment:null), (part)part.FieldSchema(name:p_type, type:string, comment:null), (part)part.FieldSchema(name:p_size, type:int, comment:null), (part)part.FieldSchema(name:p_container, type:string, comment:null), (part)part.FieldSchema(name:p_retailprice, type:double, comment:null), (part)part.FieldSchema(name:p_comment, type:string, comment:null), (part)part.FieldSchema(name:BLOCK__OFFSET__INSIDE__FILE, type:bigint, comment:), (part)part.FieldSchema(name:INPUT__FILE__NAME, type:string, comment:), ] -POSTHOOK: Lineage: part_5.p_name SCRIPT [(part)part.FieldSchema(name:p_partkey, type:int, comment:null), (part)part.FieldSchema(name:p_name, type:string, comment:null), (part)part.FieldSchema(name:p_mfgr, type:string, comment:null), (part)part.FieldSchema(name:p_brand, type:string, comment:null), (part)part.FieldSchema(name:p_type, type:string, comment:null), (part)part.FieldSchema(name:p_size, type:int, comment:null), (part)part.FieldSchema(name:p_container, type:string, comment:null), (part)part.FieldSchema(name:p_retailprice, type:double, comment:null), (part)part.FieldSchema(name:p_comment, type:string, comment:null), (part)part.FieldSchema(name:BLOCK__OFFSET__INSIDE__FILE, type:bigint, comment:), (part)part.FieldSchema(name:INPUT__FILE__NAME, type:string, comment:), ] -POSTHOOK: Lineage: part_5.p_size SCRIPT [(part)part.FieldSchema(name:p_partkey, type:int, comment:null), (part)part.FieldSchema(name:p_name, type:string, comment:null), (part)part.FieldSchema(name:p_mfgr, type:string, comment:null), (part)part.FieldSchema(name:p_brand, type:string, comment:null), (part)part.FieldSchema(name:p_type, type:string, comment:null), (part)part.FieldSchema(name:p_size, type:int, comment:null), (part)part.FieldSchema(name:p_container, type:string, comment:null), (part)part.FieldSchema(name:p_retailprice, type:double, comment:null), (part)part.FieldSchema(name:p_comment, type:string, comment:null), (part)part.FieldSchema(name:BLOCK__OFFSET__INSIDE__FILE, type:bigint, comment:), (part)part.FieldSchema(name:INPUT__FILE__NAME, type:string, comment:), ] -POSTHOOK: Lineage: part_5.r SCRIPT [(part)part.FieldSchema(name:p_partkey, type:int, comment:null), (part)part.FieldSchema(name:p_name, type:string, comment:null), (part)part.FieldSchema(name:p_mfgr, type:string, comment:null), (part)part.FieldSchema(name:p_brand, type:string, comment:null), (part)part.FieldSchema(name:p_type, type:string, comment:null), (part)part.FieldSchema(name:p_size, type:int, comment:null), (part)part.FieldSchema(name:p_container, type:string, comment:null), (part)part.FieldSchema(name:p_retailprice, type:double, comment:null), (part)part.FieldSchema(name:p_comment, type:string, comment:null), (part)part.FieldSchema(name:BLOCK__OFFSET__INSIDE__FILE, type:bigint, comment:), (part)part.FieldSchema(name:INPUT__FILE__NAME, type:string, comment:), ] -POSTHOOK: Lineage: part_5.s2 SCRIPT [(part)part.FieldSchema(name:p_partkey, type:int, comment:null), (part)part.FieldSchema(name:p_name, type:string, comment:null), (part)part.FieldSchema(name:p_mfgr, type:string, comment:null), (part)part.FieldSchema(name:p_brand, type:string, comment:null), (part)part.FieldSchema(name:p_type, type:string, comment:null), (part)part.FieldSchema(name:p_size, type:int, comment:null), (part)part.FieldSchema(name:p_container, type:string, comment:null), (part)part.FieldSchema(name:p_retailprice, type:double, comment:null), (part)part.FieldSchema(name:p_comment, type:string, comment:null), (part)part.FieldSchema(name:BLOCK__OFFSET__INSIDE__FILE, type:bigint, comment:), (part)part.FieldSchema(name:INPUT__FILE__NAME, type:string, comment:), ] +POSTHOOK: Lineage: part_4.dr SCRIPT [(part)part.FieldSchema(name:p_partkey, type:int, comment:null), (part)part.FieldSchema(name:p_name, type:string, comment:null), (part)part.FieldSchema(name:p_mfgr, type:string, comment:null), (part)part.FieldSchema(name:p_brand, type:string, comment:null), (part)part.FieldSchema(name:p_type, type:string, comment:null), (part)part.FieldSchema(name:p_size, type:int, comment:null), (part)part.FieldSchema(name:p_container, type:string, comment:null), (part)part.FieldSchema(name:p_retailprice, type:double, comment:null), (part)part.FieldSchema(name:p_comment, type:string, comment:null), (part)part.FieldSchema(name:BLOCK__OFFSET__INSIDE__FILE, type:bigint, comment:), (part)part.FieldSchema(name:INPUT__FILE__NAME, type:string, comment:), (part)part.FieldSchema(name:ROW__ID, type:struct, comment:), ] +POSTHOOK: Lineage: part_4.p_mfgr SCRIPT [(part)part.FieldSchema(name:p_partkey, type:int, comment:null), (part)part.FieldSchema(name:p_name, type:string, comment:null), (part)part.FieldSchema(name:p_mfgr, type:string, comment:null), (part)part.FieldSchema(name:p_brand, type:string, comment:null), (part)part.FieldSchema(name:p_type, type:string, comment:null), (part)part.FieldSchema(name:p_size, type:int, comment:null), (part)part.FieldSchema(name:p_container, type:string, comment:null), (part)part.FieldSchema(name:p_retailprice, type:double, comment:null), (part)part.FieldSchema(name:p_comment, type:string, comment:null), (part)part.FieldSchema(name:BLOCK__OFFSET__INSIDE__FILE, type:bigint, comment:), (part)part.FieldSchema(name:INPUT__FILE__NAME, type:string, comment:), (part)part.FieldSchema(name:ROW__ID, type:struct, comment:), ] +POSTHOOK: Lineage: part_4.p_name SCRIPT [(part)part.FieldSchema(name:p_partkey, type:int, comment:null), (part)part.FieldSchema(name:p_name, type:string, comment:null), (part)part.FieldSchema(name:p_mfgr, type:string, comment:null), (part)part.FieldSchema(name:p_brand, type:string, comment:null), (part)part.FieldSchema(name:p_type, type:string, comment:null), (part)part.FieldSchema(name:p_size, type:int, comment:null), (part)part.FieldSchema(name:p_container, type:string, comment:null), (part)part.FieldSchema(name:p_retailprice, type:double, comment:null), (part)part.FieldSchema(name:p_comment, type:string, comment:null), (part)part.FieldSchema(name:BLOCK__OFFSET__INSIDE__FILE, type:bigint, comment:), (part)part.FieldSchema(name:INPUT__FILE__NAME, type:string, comment:), (part)part.FieldSchema(name:ROW__ID, type:struct, comment:), ] +POSTHOOK: Lineage: part_4.p_size SCRIPT [(part)part.FieldSchema(name:p_partkey, type:int, comment:null), (part)part.FieldSchema(name:p_name, type:string, comment:null), (part)part.FieldSchema(name:p_mfgr, type:string, comment:null), (part)part.FieldSchema(name:p_brand, type:string, comment:null), (part)part.FieldSchema(name:p_type, type:string, comment:null), (part)part.FieldSchema(name:p_size, type:int, comment:null), (part)part.FieldSchema(name:p_container, type:string, comment:null), (part)part.FieldSchema(name:p_retailprice, type:double, comment:null), (part)part.FieldSchema(name:p_comment, type:string, comment:null), (part)part.FieldSchema(name:BLOCK__OFFSET__INSIDE__FILE, type:bigint, comment:), (part)part.FieldSchema(name:INPUT__FILE__NAME, type:string, comment:), (part)part.FieldSchema(name:ROW__ID, type:struct, comment:), ] +POSTHOOK: Lineage: part_4.r SCRIPT [(part)part.FieldSchema(name:p_partkey, type:int, comment:null), (part)part.FieldSchema(name:p_name, type:string, comment:null), (part)part.FieldSchema(name:p_mfgr, type:string, comment:null), (part)part.FieldSchema(name:p_brand, type:string, comment:null), (part)part.FieldSchema(name:p_type, type:string, comment:null), (part)part.FieldSchema(name:p_size, type:int, comment:null), (part)part.FieldSchema(name:p_container, type:string, comment:null), (part)part.FieldSchema(name:p_retailprice, type:double, comment:null), (part)part.FieldSchema(name:p_comment, type:string, comment:null), (part)part.FieldSchema(name:BLOCK__OFFSET__INSIDE__FILE, type:bigint, comment:), (part)part.FieldSchema(name:INPUT__FILE__NAME, type:string, comment:), (part)part.FieldSchema(name:ROW__ID, type:struct, comment:), ] +POSTHOOK: Lineage: part_4.s SCRIPT [(part)part.FieldSchema(name:p_partkey, type:int, comment:null), (part)part.FieldSchema(name:p_name, type:string, comment:null), (part)part.FieldSchema(name:p_mfgr, type:string, comment:null), (part)part.FieldSchema(name:p_brand, type:string, comment:null), (part)part.FieldSchema(name:p_type, type:string, comment:null), (part)part.FieldSchema(name:p_size, type:int, comment:null), (part)part.FieldSchema(name:p_container, type:string, comment:null), (part)part.FieldSchema(name:p_retailprice, type:double, comment:null), (part)part.FieldSchema(name:p_comment, type:string, comment:null), (part)part.FieldSchema(name:BLOCK__OFFSET__INSIDE__FILE, type:bigint, comment:), (part)part.FieldSchema(name:INPUT__FILE__NAME, type:string, comment:), (part)part.FieldSchema(name:ROW__ID, type:struct, comment:), ] +POSTHOOK: Lineage: part_5.cud SCRIPT [(part)part.FieldSchema(name:p_partkey, type:int, comment:null), (part)part.FieldSchema(name:p_name, type:string, comment:null), (part)part.FieldSchema(name:p_mfgr, type:string, comment:null), (part)part.FieldSchema(name:p_brand, type:string, comment:null), (part)part.FieldSchema(name:p_type, type:string, comment:null), (part)part.FieldSchema(name:p_size, type:int, comment:null), (part)part.FieldSchema(name:p_container, type:string, comment:null), (part)part.FieldSchema(name:p_retailprice, type:double, comment:null), (part)part.FieldSchema(name:p_comment, type:string, comment:null), (part)part.FieldSchema(name:BLOCK__OFFSET__INSIDE__FILE, type:bigint, comment:), (part)part.FieldSchema(name:INPUT__FILE__NAME, type:string, comment:), (part)part.FieldSchema(name:ROW__ID, type:struct, comment:), ] +POSTHOOK: Lineage: part_5.dr SCRIPT [(part)part.FieldSchema(name:p_partkey, type:int, comment:null), (part)part.FieldSchema(name:p_name, type:string, comment:null), (part)part.FieldSchema(name:p_mfgr, type:string, comment:null), (part)part.FieldSchema(name:p_brand, type:string, comment:null), (part)part.FieldSchema(name:p_type, type:string, comment:null), (part)part.FieldSchema(name:p_size, type:int, comment:null), (part)part.FieldSchema(name:p_container, type:string, comment:null), (part)part.FieldSchema(name:p_retailprice, type:double, comment:null), (part)part.FieldSchema(name:p_comment, type:string, comment:null), (part)part.FieldSchema(name:BLOCK__OFFSET__INSIDE__FILE, type:bigint, comment:), (part)part.FieldSchema(name:INPUT__FILE__NAME, type:string, comment:), (part)part.FieldSchema(name:ROW__ID, type:struct, comment:), ] +POSTHOOK: Lineage: part_5.fv1 SCRIPT [(part)part.FieldSchema(name:p_partkey, type:int, comment:null), (part)part.FieldSchema(name:p_name, type:string, comment:null), (part)part.FieldSchema(name:p_mfgr, type:string, comment:null), (part)part.FieldSchema(name:p_brand, type:string, comment:null), (part)part.FieldSchema(name:p_type, type:string, comment:null), (part)part.FieldSchema(name:p_size, type:int, comment:null), (part)part.FieldSchema(name:p_container, type:string, comment:null), (part)part.FieldSchema(name:p_retailprice, type:double, comment:null), (part)part.FieldSchema(name:p_comment, type:string, comment:null), (part)part.FieldSchema(name:BLOCK__OFFSET__INSIDE__FILE, type:bigint, comment:), (part)part.FieldSchema(name:INPUT__FILE__NAME, type:string, comment:), (part)part.FieldSchema(name:ROW__ID, type:struct, comment:), ] +POSTHOOK: Lineage: part_5.p_mfgr SCRIPT [(part)part.FieldSchema(name:p_partkey, type:int, comment:null), (part)part.FieldSchema(name:p_name, type:string, comment:null), (part)part.FieldSchema(name:p_mfgr, type:string, comment:null), (part)part.FieldSchema(name:p_brand, type:string, comment:null), (part)part.FieldSchema(name:p_type, type:string, comment:null), (part)part.FieldSchema(name:p_size, type:int, comment:null), (part)part.FieldSchema(name:p_container, type:string, comment:null), (part)part.FieldSchema(name:p_retailprice, type:double, comment:null), (part)part.FieldSchema(name:p_comment, type:string, comment:null), (part)part.FieldSchema(name:BLOCK__OFFSET__INSIDE__FILE, type:bigint, comment:), (part)part.FieldSchema(name:INPUT__FILE__NAME, type:string, comment:), (part)part.FieldSchema(name:ROW__ID, type:struct, comment:), ] +POSTHOOK: Lineage: part_5.p_name SCRIPT [(part)part.FieldSchema(name:p_partkey, type:int, comment:null), (part)part.FieldSchema(name:p_name, type:string, comment:null), (part)part.FieldSchema(name:p_mfgr, type:string, comment:null), (part)part.FieldSchema(name:p_brand, type:string, comment:null), (part)part.FieldSchema(name:p_type, type:string, comment:null), (part)part.FieldSchema(name:p_size, type:int, comment:null), (part)part.FieldSchema(name:p_container, type:string, comment:null), (part)part.FieldSchema(name:p_retailprice, type:double, comment:null), (part)part.FieldSchema(name:p_comment, type:string, comment:null), (part)part.FieldSchema(name:BLOCK__OFFSET__INSIDE__FILE, type:bigint, comment:), (part)part.FieldSchema(name:INPUT__FILE__NAME, type:string, comment:), (part)part.FieldSchema(name:ROW__ID, type:struct, comment:), ] +POSTHOOK: Lineage: part_5.p_size SCRIPT [(part)part.FieldSchema(name:p_partkey, type:int, comment:null), (part)part.FieldSchema(name:p_name, type:string, comment:null), (part)part.FieldSchema(name:p_mfgr, type:string, comment:null), (part)part.FieldSchema(name:p_brand, type:string, comment:null), (part)part.FieldSchema(name:p_type, type:string, comment:null), (part)part.FieldSchema(name:p_size, type:int, comment:null), (part)part.FieldSchema(name:p_container, type:string, comment:null), (part)part.FieldSchema(name:p_retailprice, type:double, comment:null), (part)part.FieldSchema(name:p_comment, type:string, comment:null), (part)part.FieldSchema(name:BLOCK__OFFSET__INSIDE__FILE, type:bigint, comment:), (part)part.FieldSchema(name:INPUT__FILE__NAME, type:string, comment:), (part)part.FieldSchema(name:ROW__ID, type:struct, comment:), ] +POSTHOOK: Lineage: part_5.r SCRIPT [(part)part.FieldSchema(name:p_partkey, type:int, comment:null), (part)part.FieldSchema(name:p_name, type:string, comment:null), (part)part.FieldSchema(name:p_mfgr, type:string, comment:null), (part)part.FieldSchema(name:p_brand, type:string, comment:null), (part)part.FieldSchema(name:p_type, type:string, comment:null), (part)part.FieldSchema(name:p_size, type:int, comment:null), (part)part.FieldSchema(name:p_container, type:string, comment:null), (part)part.FieldSchema(name:p_retailprice, type:double, comment:null), (part)part.FieldSchema(name:p_comment, type:string, comment:null), (part)part.FieldSchema(name:BLOCK__OFFSET__INSIDE__FILE, type:bigint, comment:), (part)part.FieldSchema(name:INPUT__FILE__NAME, type:string, comment:), (part)part.FieldSchema(name:ROW__ID, type:struct, comment:), ] +POSTHOOK: Lineage: part_5.s2 SCRIPT [(part)part.FieldSchema(name:p_partkey, type:int, comment:null), (part)part.FieldSchema(name:p_name, type:string, comment:null), (part)part.FieldSchema(name:p_mfgr, type:string, comment:null), (part)part.FieldSchema(name:p_brand, type:string, comment:null), (part)part.FieldSchema(name:p_type, type:string, comment:null), (part)part.FieldSchema(name:p_size, type:int, comment:null), (part)part.FieldSchema(name:p_container, type:string, comment:null), (part)part.FieldSchema(name:p_retailprice, type:double, comment:null), (part)part.FieldSchema(name:p_comment, type:string, comment:null), (part)part.FieldSchema(name:BLOCK__OFFSET__INSIDE__FILE, type:bigint, comment:), (part)part.FieldSchema(name:INPUT__FILE__NAME, type:string, comment:), (part)part.FieldSchema(name:ROW__ID, type:struct, comment:), ] PREHOOK: query: select * from part_4 PREHOOK: type: QUERY PREHOOK: Input: default@part_4 diff --git a/ql/src/test/results/clientpositive/tez/tez_join_hash.q.out b/ql/src/test/results/clientpositive/tez/tez_join_hash.q.out index 410645b..50c8604 100644 --- a/ql/src/test/results/clientpositive/tez/tez_join_hash.q.out +++ b/ql/src/test/results/clientpositive/tez/tez_join_hash.q.out @@ -69,10 +69,10 @@ STAGE PLANS: condition expressions: 0 {KEY.reducesinkkey0} 1 {KEY.reducesinkkey0} - outputColumnNames: _col0, _col4 + outputColumnNames: _col0, _col5 Statistics: Num rows: 275 Data size: 48400 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: (_col0 = _col4) (type: boolean) + predicate: (_col0 = _col5) (type: boolean) Statistics: Num rows: 137 Data size: 24112 Basic stats: COMPLETE Column stats: NONE Select Operator Statistics: Num rows: 137 Data size: 24112 Basic stats: COMPLETE Column stats: NONE diff --git a/ql/src/test/results/clientpositive/tez/tez_join_tests.q.out b/ql/src/test/results/clientpositive/tez/tez_join_tests.q.out index 06d1b71..81dd777 100644 --- a/ql/src/test/results/clientpositive/tez/tez_join_tests.q.out +++ b/ql/src/test/results/clientpositive/tez/tez_join_tests.q.out @@ -58,10 +58,10 @@ STAGE PLANS: condition expressions: 0 1 {KEY.reducesinkkey0} {VALUE._col0} - outputColumnNames: _col4, _col5 + outputColumnNames: _col5, _col6 Statistics: Num rows: 31 Data size: 6393 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: _col4 (type: string), _col5 (type: string) + expressions: _col5 (type: string), _col6 (type: string) outputColumnNames: _col0, _col1 Statistics: Num rows: 31 Data size: 6393 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator diff --git a/ql/src/test/results/clientpositive/tez/tez_joins_explain.q.out b/ql/src/test/results/clientpositive/tez/tez_joins_explain.q.out index b16105a..dd463d9 100644 --- a/ql/src/test/results/clientpositive/tez/tez_joins_explain.q.out +++ b/ql/src/test/results/clientpositive/tez/tez_joins_explain.q.out @@ -58,10 +58,10 @@ STAGE PLANS: condition expressions: 0 1 {KEY.reducesinkkey0} {VALUE._col0} - outputColumnNames: _col4, _col5 + outputColumnNames: _col5, _col6 Statistics: Num rows: 31 Data size: 6393 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: _col4 (type: string), _col5 (type: string) + expressions: _col5 (type: string), _col6 (type: string) outputColumnNames: _col0, _col1 Statistics: Num rows: 31 Data size: 6393 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator diff --git a/ql/src/test/results/clientpositive/tez/tez_union.q.out b/ql/src/test/results/clientpositive/tez/tez_union.q.out index d1511f2..0fc9b78 100644 --- a/ql/src/test/results/clientpositive/tez/tez_union.q.out +++ b/ql/src/test/results/clientpositive/tez/tez_union.q.out @@ -348,9 +348,9 @@ STAGE PLANS: keys: 0 key (type: string) 1 _col0 (type: string) - outputColumnNames: _col0, _col4 + outputColumnNames: _col0, _col5 Select Operator - expressions: _col0 (type: string), _col4 (type: string) + expressions: _col0 (type: string), _col5 (type: string) outputColumnNames: _col0, _col1 File Output Operator compressed: false @@ -376,9 +376,9 @@ STAGE PLANS: keys: 0 key (type: string) 1 _col0 (type: string) - outputColumnNames: _col0, _col4 + outputColumnNames: _col0, _col5 Select Operator - expressions: _col0 (type: string), _col4 (type: string) + expressions: _col0 (type: string), _col5 (type: string) outputColumnNames: _col0, _col1 File Output Operator compressed: false @@ -541,9 +541,9 @@ STAGE PLANS: 0 key (type: string) 1 _col0 (type: string) 2 key (type: string) - outputColumnNames: _col0, _col4, _col5 + outputColumnNames: _col0, _col5, _col6 Select Operator - expressions: _col0 (type: string), _col4 (type: string), _col5 (type: string) + expressions: _col0 (type: string), _col5 (type: string), _col6 (type: string) outputColumnNames: _col0, _col1, _col2 Reduce Output Operator key expressions: _col2 (type: string) @@ -606,9 +606,9 @@ STAGE PLANS: 0 key (type: string) 1 _col0 (type: string) 2 key (type: string) - outputColumnNames: _col0, _col4, _col5 + outputColumnNames: _col0, _col5, _col6 Select Operator - expressions: _col0 (type: string), _col4 (type: string), _col5 (type: string) + expressions: _col0 (type: string), _col5 (type: string), _col6 (type: string) outputColumnNames: _col0, _col1, _col2 Reduce Output Operator key expressions: _col2 (type: string) @@ -668,9 +668,9 @@ STAGE PLANS: 0 key (type: string) 1 _col0 (type: string) 2 key (type: string) - outputColumnNames: _col0, _col4, _col5 + outputColumnNames: _col0, _col5, _col6 Select Operator - expressions: _col0 (type: string), _col4 (type: string), _col5 (type: string) + expressions: _col0 (type: string), _col5 (type: string), _col6 (type: string) outputColumnNames: _col0, _col1, _col2 Reduce Output Operator key expressions: _col2 (type: string) diff --git a/ql/src/test/results/clientpositive/tez/tez_union_decimal.q.out b/ql/src/test/results/clientpositive/tez/tez_union_decimal.q.out new file mode 100644 index 0000000..29332be --- /dev/null +++ b/ql/src/test/results/clientpositive/tez/tez_union_decimal.q.out @@ -0,0 +1,101 @@ +PREHOOK: query: select sum(a) from ( + select cast(1.1 as decimal) a from src tablesample (1 rows) + union all + select cast(null as decimal) a from src tablesample (1 rows) +) t +PREHOOK: type: QUERY +PREHOOK: Input: default@src +#### A masked pattern was here #### +POSTHOOK: query: select sum(a) from ( + select cast(1.1 as decimal) a from src tablesample (1 rows) + union all + select cast(null as decimal) a from src tablesample (1 rows) +) t +POSTHOOK: type: QUERY +POSTHOOK: Input: default@src +#### A masked pattern was here #### +1 +PREHOOK: query: select sum(a) from ( + select cast(1 as tinyint) a from src tablesample (1 rows) + union all + select cast(null as tinyint) a from src tablesample (1 rows) + union all + select cast(1.1 as decimal) a from src tablesample (1 rows) +) t +PREHOOK: type: QUERY +PREHOOK: Input: default@src +#### A masked pattern was here #### +POSTHOOK: query: select sum(a) from ( + select cast(1 as tinyint) a from src tablesample (1 rows) + union all + select cast(null as tinyint) a from src tablesample (1 rows) + union all + select cast(1.1 as decimal) a from src tablesample (1 rows) +) t +POSTHOOK: type: QUERY +POSTHOOK: Input: default@src +#### A masked pattern was here #### +2 +PREHOOK: query: select sum(a) from ( + select cast(1 as smallint) a from src tablesample (1 rows) + union all + select cast(null as smallint) a from src tablesample (1 rows) + union all + select cast(1.1 as decimal) a from src tablesample (1 rows) +) t +PREHOOK: type: QUERY +PREHOOK: Input: default@src +#### A masked pattern was here #### +POSTHOOK: query: select sum(a) from ( + select cast(1 as smallint) a from src tablesample (1 rows) + union all + select cast(null as smallint) a from src tablesample (1 rows) + union all + select cast(1.1 as decimal) a from src tablesample (1 rows) +) t +POSTHOOK: type: QUERY +POSTHOOK: Input: default@src +#### A masked pattern was here #### +2 +PREHOOK: query: select sum(a) from ( + select cast(1 as int) a from src tablesample (1 rows) + union all + select cast(null as int) a from src tablesample (1 rows) + union all + select cast(1.1 as decimal) a from src tablesample (1 rows) +) t +PREHOOK: type: QUERY +PREHOOK: Input: default@src +#### A masked pattern was here #### +POSTHOOK: query: select sum(a) from ( + select cast(1 as int) a from src tablesample (1 rows) + union all + select cast(null as int) a from src tablesample (1 rows) + union all + select cast(1.1 as decimal) a from src tablesample (1 rows) +) t +POSTHOOK: type: QUERY +POSTHOOK: Input: default@src +#### A masked pattern was here #### +2 +PREHOOK: query: select sum(a) from ( + select cast(1 as bigint) a from src tablesample (1 rows) + union all + select cast(null as bigint) a from src tablesample (1 rows) + union all + select cast(1.1 as decimal) a from src tablesample (1 rows) +) t +PREHOOK: type: QUERY +PREHOOK: Input: default@src +#### A masked pattern was here #### +POSTHOOK: query: select sum(a) from ( + select cast(1 as bigint) a from src tablesample (1 rows) + union all + select cast(null as bigint) a from src tablesample (1 rows) + union all + select cast(1.1 as decimal) a from src tablesample (1 rows) +) t +POSTHOOK: type: QUERY +POSTHOOK: Input: default@src +#### A masked pattern was here #### +2 diff --git a/ql/src/test/results/clientpositive/tez/vector_data_types.q.out b/ql/src/test/results/clientpositive/tez/vector_data_types.q.out index f577e13..a3bf59d 100644 --- a/ql/src/test/results/clientpositive/tez/vector_data_types.q.out +++ b/ql/src/test/results/clientpositive/tez/vector_data_types.q.out @@ -157,7 +157,7 @@ POSTHOOK: query: SELECT t, si, i, b, f, d, bo, s, ts, dec, bin FROM over1korc OR POSTHOOK: type: QUERY POSTHOOK: Input: default@over1korc #### A masked pattern was here #### -108 301 65536 4294967357 90.05 17.59 true ethan johnson 2013-03-01 09:11:58.703271 75.7 undecided +108 301 65536 4294967357 90.05 17.59 true ethan johnson 2013-03-01 09:11:58.703271 75.70 undecided 118 497 65536 4294967381 50.32 12.72 false david nixon 2013-03-01 09:11:58.703285 83.48 values clariffication 18 280 65536 4294967320 32.92 45.94 false holly white 2013-03-01 09:11:58.703086 58.86 topology 69 489 65536 4294967404 33.52 17.99 false oscar ichabod 2013-03-01 09:11:58.703247 32.68 topology @@ -239,7 +239,7 @@ POSTHOOK: query: SELECT t, si, i, b, f, d, bo, s, ts, dec, bin FROM over1korc OR POSTHOOK: type: QUERY POSTHOOK: Input: default@over1korc #### A masked pattern was here #### -108 301 65536 4294967357 90.05 17.59 true ethan johnson 1860-11-12 20:05:55.011470936 75.7 undecided +108 301 65536 4294967357 90.05 17.59 true ethan johnson 1860-11-12 20:05:55.011470936 75.70 undecided 118 497 65536 4294967381 50.32 12.72 false david nixon 1860-11-12 20:05:55.011484936 83.48 values clariffication 18 280 65536 4294967320 32.92 45.94 false holly white 1860-11-12 20:05:55.011285936 58.86 topologyariffication 69 489 65536 4294967404 33.52 17.99 false oscar ichabod 1860-11-12 20:05:55.011446936 32.68 topologyariffication diff --git a/ql/src/test/results/clientpositive/tez/vector_decimal_aggregate.q.out b/ql/src/test/results/clientpositive/tez/vector_decimal_aggregate.q.out index 437e830..d6b80b3 100644 --- a/ql/src/test/results/clientpositive/tez/vector_decimal_aggregate.q.out +++ b/ql/src/test/results/clientpositive/tez/vector_decimal_aggregate.q.out @@ -107,11 +107,11 @@ POSTHOOK: query: SELECT cint, POSTHOOK: type: QUERY POSTHOOK: Input: default@decimal_vgby #### A masked pattern was here #### -NULL 3072 9318.4351351351 -4298.1513513514 5018444.1081079808 1633.60810810806667 5695.483082135364 5696.4103077145055 3072 11160.715384615385 -5147.907692307693 6010604.3076923073536 1956.576923076922966667 6821.495748565159 6822.606289190924 --3728 6 5831542.269248378 -3367.6517567568 5817556.0411483778 969592.67352472963333 2174330.2092403853 2381859.406131774 6 6984454.211097692 -4033.445769230769 6967702.8672438458471 1161283.811207307641183333 2604201.2704476737 2852759.5602156054 --563 2 -515.621072973 -3367.6517567568 -3883.2728297298 -1941.6364148649 1426.0153418918999 2016.6902366556308 2 -617.5607769230769 -4033.445769230769 -4651.0065461538459 -2325.50327307692295 1707.9424961538462 2415.395441814127 -762 2 5831542.269248378 1531.2194054054 5833073.4886537834 2916536.7443268917 2915005.5249214866 4122440.3477364695 2 6984454.211097692 1833.9456923076925 6986288.1567899996925 3493144.07839499984625 3491310.1327026924 4937458.140118758 -6981 3 5831542.269248378 -515.621072973 5830511.027102432 1943503.67570081066667 2749258.455012492 3367140.1929065133 3 6984454.211097692 -617.5607769230769 6983219.0895438458462 2327739.696514615282066667 3292794.4113115156 4032833.0678006653 -253665376 1024 9767.0054054054 -9779.5486486487 -347484.0818378374 -339.33992366976309 5708.9563478862 5711.745967572779 1024 11697.969230769231 -11712.99230769231 -416182.64030769233089 -406.428359675480791885 6837.632716002934 6840.973851172274 -528534767 1024 5831542.269248378 -9777.1594594595 11646372.8607481068 11373.41099682432305 257528.92988206653 257654.7686043977 1024 6984454.211097692 -11710.130769230771 13948892.79980307629003 13621.965624807691689482 308443.1074570801 308593.82484083984 -626923679 1024 9723.4027027027 -9778.9513513514 10541.0525297287 10.29399661106318 5742.09145323734 5744.897264034267 1024 11645.746153846154 -11712.276923076923 12625.04759999997746 12.329148046874977988 6877.318722794877 6880.679250101603 +NULL 3072 9318.4351351351 -4298.1513513514 5018444.1081079808 1633.60810810806667 5695.483082135364 5696.4103077145055 3072 11160.71538461538500 -5147.90769230769300 6010604.30769230735360 1956.576923076922966667 6821.495748565159 6822.606289190924 +-3728 6 5831542.2692483780 -3367.6517567568 5817556.0411483778 969592.67352472963333 2174330.2092403853 2381859.406131774 6 6984454.21109769200000 -4033.445769230769 6967702.86724384584710 1161283.811207307641183333 2604201.2704476737 2852759.5602156054 +-563 2 -515.6210729730 -3367.6517567568 -3883.2728297298 -1941.6364148649 1426.0153418918999 2016.6902366556308 2 -617.56077692307690 -4033.445769230769 -4651.00654615384590 -2325.50327307692295 1707.9424961538462 2415.395441814127 +762 2 5831542.2692483780 1531.2194054054 5833073.4886537834 2916536.7443268917 2915005.5249214866 4122440.3477364695 2 6984454.21109769200000 1833.9456923076925 6986288.15678999969250 3493144.07839499984625 3491310.1327026924 4937458.140118758 +6981 3 5831542.269248378 -515.6210729730 5830511.0271024320 1943503.67570081066667 2749258.455012492 3367140.1929065133 3 6984454.211097692 -617.56077692307690 6983219.08954384584620 2327739.696514615282066667 3292794.4113115156 4032833.0678006653 +253665376 1024 9767.0054054054 -9779.5486486487 -347484.0818378374 -339.33992366976309 5708.9563478862 5711.745967572779 1024 11697.96923076923100 -11712.99230769231000 -416182.64030769233089 -406.428359675480791885 6837.632716002934 6840.973851172274 +528534767 1024 5831542.2692483780 -9777.1594594595 11646372.8607481068 11373.41099682432305 257528.92988206653 257654.7686043977 1024 6984454.21109769200000 -11710.13076923077100 13948892.79980307629003 13621.965624807691689482 308443.1074570801 308593.82484083984 +626923679 1024 9723.4027027027 -9778.9513513514 10541.0525297287 10.29399661106318 5742.09145323734 5744.897264034267 1024 11645.74615384615400 -11712.27692307692300 12625.04759999997746 12.329148046874977988 6877.318722794877 6880.679250101603 diff --git a/ql/src/test/results/clientpositive/tez/vectorized_mapjoin.q.out b/ql/src/test/results/clientpositive/tez/vectorized_mapjoin.q.out index 7dd7f91..a57990e 100644 --- a/ql/src/test/results/clientpositive/tez/vectorized_mapjoin.q.out +++ b/ql/src/test/results/clientpositive/tez/vectorized_mapjoin.q.out @@ -49,14 +49,14 @@ STAGE PLANS: keys: 0 cint (type: int) 1 cint (type: int) - outputColumnNames: _col2, _col16 + outputColumnNames: _col2, _col17 Statistics: Num rows: 51870 Data size: 207482 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: _col2 (type: int), _col16 (type: int) - outputColumnNames: _col2, _col16 + expressions: _col2 (type: int), _col17 (type: int) + outputColumnNames: _col2, _col17 Statistics: Num rows: 51870 Data size: 207482 Basic stats: COMPLETE Column stats: NONE Group By Operator - aggregations: count(_col2), max(_col16), min(_col2), avg((_col2 + _col16)) + aggregations: count(_col2), max(_col17), min(_col2), avg((_col2 + _col17)) mode: hash outputColumnNames: _col0, _col1, _col2, _col3 Statistics: Num rows: 1 Data size: 16 Basic stats: COMPLETE Column stats: NONE diff --git a/ql/src/test/results/clientpositive/tez/vectorized_nested_mapjoin.q.out b/ql/src/test/results/clientpositive/tez/vectorized_nested_mapjoin.q.out index d85627e..120eba6 100644 --- a/ql/src/test/results/clientpositive/tez/vectorized_nested_mapjoin.q.out +++ b/ql/src/test/results/clientpositive/tez/vectorized_nested_mapjoin.q.out @@ -80,10 +80,10 @@ STAGE PLANS: keys: 0 ctinyint (type: tinyint) 1 ctinyint (type: tinyint) - outputColumnNames: _col0, _col1, _col5, _col14 + outputColumnNames: _col0, _col1, _col5, _col15 Statistics: Num rows: 51870 Data size: 207482 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: (_col0 = _col14) (type: boolean) + predicate: (_col0 = _col15) (type: boolean) Statistics: Num rows: 25935 Data size: 103741 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: _col1 (type: smallint), _col5 (type: double) diff --git a/ql/src/test/results/clientpositive/tez/vectorized_ptf.q.out b/ql/src/test/results/clientpositive/tez/vectorized_ptf.q.out index 1e91e8c..88f9e5f 100644 --- a/ql/src/test/results/clientpositive/tez/vectorized_ptf.q.out +++ b/ql/src/test/results/clientpositive/tez/vectorized_ptf.q.out @@ -240,7 +240,7 @@ STAGE PLANS: Map-reduce partition columns: p_mfgr (type: string) Statistics: Num rows: 26 Data size: 16042 Basic stats: COMPLETE Column stats: NONE tag: -1 - value expressions: p_partkey (type: int), p_name (type: string), p_mfgr (type: string), p_brand (type: string), p_type (type: string), p_size (type: int), p_container (type: string), p_retailprice (type: double), p_comment (type: string), BLOCK__OFFSET__INSIDE__FILE (type: bigint), INPUT__FILE__NAME (type: string) + value expressions: p_partkey (type: int), p_name (type: string), p_mfgr (type: string), p_brand (type: string), p_type (type: string), p_size (type: int), p_container (type: string), p_retailprice (type: double), p_comment (type: string), BLOCK__OFFSET__INSIDE__FILE (type: bigint), INPUT__FILE__NAME (type: string), ROW__ID (type: struct) auto parallelism: true Path -> Alias: #### A masked pattern was here #### @@ -846,7 +846,7 @@ STAGE PLANS: Map-reduce partition columns: p_mfgr (type: string) Statistics: Num rows: 26 Data size: 16042 Basic stats: COMPLETE Column stats: NONE tag: -1 - value expressions: p_partkey (type: int), p_name (type: string), p_mfgr (type: string), p_brand (type: string), p_type (type: string), p_size (type: int), p_container (type: string), p_retailprice (type: double), p_comment (type: string), BLOCK__OFFSET__INSIDE__FILE (type: bigint), INPUT__FILE__NAME (type: string) + value expressions: p_partkey (type: int), p_name (type: string), p_mfgr (type: string), p_brand (type: string), p_type (type: string), p_size (type: int), p_container (type: string), p_retailprice (type: double), p_comment (type: string), BLOCK__OFFSET__INSIDE__FILE (type: bigint), INPUT__FILE__NAME (type: string), ROW__ID (type: struct) auto parallelism: true Path -> Alias: #### A masked pattern was here #### @@ -1103,7 +1103,7 @@ STAGE PLANS: Map-reduce partition columns: p_mfgr (type: string) Statistics: Num rows: 26 Data size: 16042 Basic stats: COMPLETE Column stats: NONE tag: -1 - value expressions: p_partkey (type: int), p_name (type: string), p_mfgr (type: string), p_brand (type: string), p_type (type: string), p_size (type: int), p_container (type: string), p_retailprice (type: double), p_comment (type: string), BLOCK__OFFSET__INSIDE__FILE (type: bigint), INPUT__FILE__NAME (type: string) + value expressions: p_partkey (type: int), p_name (type: string), p_mfgr (type: string), p_brand (type: string), p_type (type: string), p_size (type: int), p_container (type: string), p_retailprice (type: double), p_comment (type: string), BLOCK__OFFSET__INSIDE__FILE (type: bigint), INPUT__FILE__NAME (type: string), ROW__ID (type: struct) auto parallelism: true Path -> Alias: #### A masked pattern was here #### @@ -1391,7 +1391,7 @@ STAGE PLANS: Map-reduce partition columns: p_mfgr (type: string) Statistics: Num rows: 26 Data size: 16042 Basic stats: COMPLETE Column stats: NONE tag: -1 - value expressions: p_partkey (type: int), p_name (type: string), p_mfgr (type: string), p_brand (type: string), p_type (type: string), p_size (type: int), p_container (type: string), p_retailprice (type: double), p_comment (type: string), BLOCK__OFFSET__INSIDE__FILE (type: bigint), INPUT__FILE__NAME (type: string) + value expressions: p_partkey (type: int), p_name (type: string), p_mfgr (type: string), p_brand (type: string), p_type (type: string), p_size (type: int), p_container (type: string), p_retailprice (type: double), p_comment (type: string), BLOCK__OFFSET__INSIDE__FILE (type: bigint), INPUT__FILE__NAME (type: string), ROW__ID (type: struct) auto parallelism: true Path -> Alias: #### A masked pattern was here #### @@ -1689,7 +1689,7 @@ STAGE PLANS: Map-reduce partition columns: p_mfgr (type: string) Statistics: Num rows: 26 Data size: 16042 Basic stats: COMPLETE Column stats: NONE tag: -1 - value expressions: p_partkey (type: int), p_name (type: string), p_mfgr (type: string), p_brand (type: string), p_type (type: string), p_size (type: int), p_container (type: string), p_retailprice (type: double), p_comment (type: string), BLOCK__OFFSET__INSIDE__FILE (type: bigint), INPUT__FILE__NAME (type: string) + value expressions: p_partkey (type: int), p_name (type: string), p_mfgr (type: string), p_brand (type: string), p_type (type: string), p_size (type: int), p_container (type: string), p_retailprice (type: double), p_comment (type: string), BLOCK__OFFSET__INSIDE__FILE (type: bigint), INPUT__FILE__NAME (type: string), ROW__ID (type: struct) auto parallelism: true Path -> Alias: #### A masked pattern was here #### @@ -2019,7 +2019,7 @@ STAGE PLANS: Map-reduce partition columns: p_mfgr (type: string) Statistics: Num rows: 26 Data size: 16042 Basic stats: COMPLETE Column stats: NONE tag: -1 - value expressions: p_partkey (type: int), p_name (type: string), p_mfgr (type: string), p_brand (type: string), p_type (type: string), p_size (type: int), p_container (type: string), p_retailprice (type: double), p_comment (type: string), BLOCK__OFFSET__INSIDE__FILE (type: bigint), INPUT__FILE__NAME (type: string) + value expressions: p_partkey (type: int), p_name (type: string), p_mfgr (type: string), p_brand (type: string), p_type (type: string), p_size (type: int), p_container (type: string), p_retailprice (type: double), p_comment (type: string), BLOCK__OFFSET__INSIDE__FILE (type: bigint), INPUT__FILE__NAME (type: string), ROW__ID (type: struct) auto parallelism: true Path -> Alias: #### A masked pattern was here #### @@ -2332,7 +2332,7 @@ STAGE PLANS: Map-reduce partition columns: p_mfgr (type: string) Statistics: Num rows: 26 Data size: 16042 Basic stats: COMPLETE Column stats: NONE tag: -1 - value expressions: p_partkey (type: int), p_name (type: string), p_mfgr (type: string), p_brand (type: string), p_type (type: string), p_size (type: int), p_container (type: string), p_retailprice (type: double), p_comment (type: string), BLOCK__OFFSET__INSIDE__FILE (type: bigint), INPUT__FILE__NAME (type: string) + value expressions: p_partkey (type: int), p_name (type: string), p_mfgr (type: string), p_brand (type: string), p_type (type: string), p_size (type: int), p_container (type: string), p_retailprice (type: double), p_comment (type: string), BLOCK__OFFSET__INSIDE__FILE (type: bigint), INPUT__FILE__NAME (type: string), ROW__ID (type: struct) auto parallelism: true Path -> Alias: #### A masked pattern was here #### @@ -2392,10 +2392,10 @@ STAGE PLANS: condition expressions: 0 1 {KEY.reducesinkkey0} {VALUE._col0} {VALUE._col1} {VALUE._col2} {VALUE._col3} {VALUE._col4} {VALUE._col5} {VALUE._col6} {VALUE._col7} - outputColumnNames: _col11, _col12, _col13, _col14, _col15, _col16, _col17, _col18, _col19 + outputColumnNames: _col12, _col13, _col14, _col15, _col16, _col17, _col18, _col19, _col20 Statistics: Num rows: 14 Data size: 8823 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: _col11 (type: int), _col12 (type: string), _col13 (type: string), _col14 (type: string), _col15 (type: string), _col16 (type: int), _col17 (type: string), _col18 (type: double), _col19 (type: string) + expressions: _col12 (type: int), _col13 (type: string), _col14 (type: string), _col15 (type: string), _col16 (type: string), _col17 (type: int), _col18 (type: string), _col19 (type: double), _col20 (type: string) outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8 Statistics: Num rows: 14 Data size: 8823 Basic stats: COMPLETE Column stats: NONE File Output Operator @@ -2589,7 +2589,7 @@ STAGE PLANS: Map-reduce partition columns: p_mfgr (type: string) Statistics: Num rows: 26 Data size: 16042 Basic stats: COMPLETE Column stats: NONE tag: -1 - value expressions: p_partkey (type: int), p_name (type: string), p_mfgr (type: string), p_brand (type: string), p_type (type: string), p_size (type: int), p_container (type: string), p_retailprice (type: double), p_comment (type: string), BLOCK__OFFSET__INSIDE__FILE (type: bigint), INPUT__FILE__NAME (type: string) + value expressions: p_partkey (type: int), p_name (type: string), p_mfgr (type: string), p_brand (type: string), p_type (type: string), p_size (type: int), p_container (type: string), p_retailprice (type: double), p_comment (type: string), BLOCK__OFFSET__INSIDE__FILE (type: bigint), INPUT__FILE__NAME (type: string), ROW__ID (type: struct) auto parallelism: true Path -> Alias: #### A masked pattern was here #### @@ -2866,7 +2866,7 @@ STAGE PLANS: Map-reduce partition columns: p_mfgr (type: string) Statistics: Num rows: 26 Data size: 16042 Basic stats: COMPLETE Column stats: NONE tag: -1 - value expressions: p_partkey (type: int), p_name (type: string), p_mfgr (type: string), p_brand (type: string), p_type (type: string), p_size (type: int), p_container (type: string), p_retailprice (type: double), p_comment (type: string), BLOCK__OFFSET__INSIDE__FILE (type: bigint), INPUT__FILE__NAME (type: string) + value expressions: p_partkey (type: int), p_name (type: string), p_mfgr (type: string), p_brand (type: string), p_type (type: string), p_size (type: int), p_container (type: string), p_retailprice (type: double), p_comment (type: string), BLOCK__OFFSET__INSIDE__FILE (type: bigint), INPUT__FILE__NAME (type: string), ROW__ID (type: struct) auto parallelism: true Path -> Alias: #### A masked pattern was here #### @@ -3145,7 +3145,7 @@ STAGE PLANS: Map-reduce partition columns: p_mfgr (type: string) Statistics: Num rows: 26 Data size: 16042 Basic stats: COMPLETE Column stats: NONE tag: -1 - value expressions: p_partkey (type: int), p_name (type: string), p_mfgr (type: string), p_brand (type: string), p_type (type: string), p_size (type: int), p_container (type: string), p_retailprice (type: double), p_comment (type: string), BLOCK__OFFSET__INSIDE__FILE (type: bigint), INPUT__FILE__NAME (type: string) + value expressions: p_partkey (type: int), p_name (type: string), p_mfgr (type: string), p_brand (type: string), p_type (type: string), p_size (type: int), p_container (type: string), p_retailprice (type: double), p_comment (type: string), BLOCK__OFFSET__INSIDE__FILE (type: bigint), INPUT__FILE__NAME (type: string), ROW__ID (type: struct) auto parallelism: true Path -> Alias: #### A masked pattern was here #### @@ -3434,7 +3434,7 @@ STAGE PLANS: Map-reduce partition columns: p_mfgr (type: string) Statistics: Num rows: 26 Data size: 16042 Basic stats: COMPLETE Column stats: NONE tag: -1 - value expressions: p_partkey (type: int), p_name (type: string), p_mfgr (type: string), p_brand (type: string), p_type (type: string), p_size (type: int), p_container (type: string), p_retailprice (type: double), p_comment (type: string), BLOCK__OFFSET__INSIDE__FILE (type: bigint), INPUT__FILE__NAME (type: string) + value expressions: p_partkey (type: int), p_name (type: string), p_mfgr (type: string), p_brand (type: string), p_type (type: string), p_size (type: int), p_container (type: string), p_retailprice (type: double), p_comment (type: string), BLOCK__OFFSET__INSIDE__FILE (type: bigint), INPUT__FILE__NAME (type: string), ROW__ID (type: struct) auto parallelism: true Path -> Alias: #### A masked pattern was here #### @@ -3500,7 +3500,7 @@ STAGE PLANS: Map-reduce partition columns: _col2 (type: string) Statistics: Num rows: 26 Data size: 16042 Basic stats: COMPLETE Column stats: NONE tag: -1 - value expressions: _col0 (type: int), _col1 (type: string), _col2 (type: string), _col3 (type: string), _col4 (type: string), _col5 (type: int), _col6 (type: string), _col7 (type: double), _col8 (type: string), _col9 (type: bigint), _col10 (type: string) + value expressions: _col0 (type: int), _col1 (type: string), _col2 (type: string), _col3 (type: string), _col4 (type: string), _col5 (type: int), _col6 (type: string), _col7 (type: double), _col8 (type: string), _col9 (type: bigint), _col10 (type: string), _col11 (type: struct) auto parallelism: true Reducer 3 Needs Tagging: false @@ -3760,7 +3760,7 @@ STAGE PLANS: Map-reduce partition columns: p_mfgr (type: string) Statistics: Num rows: 26 Data size: 16042 Basic stats: COMPLETE Column stats: NONE tag: -1 - value expressions: p_partkey (type: int), p_name (type: string), p_mfgr (type: string), p_brand (type: string), p_type (type: string), p_size (type: int), p_container (type: string), p_retailprice (type: double), p_comment (type: string), BLOCK__OFFSET__INSIDE__FILE (type: bigint), INPUT__FILE__NAME (type: string) + value expressions: p_partkey (type: int), p_name (type: string), p_mfgr (type: string), p_brand (type: string), p_type (type: string), p_size (type: int), p_container (type: string), p_retailprice (type: double), p_comment (type: string), BLOCK__OFFSET__INSIDE__FILE (type: bigint), INPUT__FILE__NAME (type: string), ROW__ID (type: struct) auto parallelism: true Path -> Alias: #### A masked pattern was here #### @@ -4213,7 +4213,7 @@ STAGE PLANS: Map-reduce partition columns: p_mfgr (type: string) Statistics: Num rows: 26 Data size: 16042 Basic stats: COMPLETE Column stats: NONE tag: -1 - value expressions: p_partkey (type: int), p_name (type: string), p_mfgr (type: string), p_brand (type: string), p_type (type: string), p_size (type: int), p_container (type: string), p_retailprice (type: double), p_comment (type: string), BLOCK__OFFSET__INSIDE__FILE (type: bigint), INPUT__FILE__NAME (type: string) + value expressions: p_partkey (type: int), p_name (type: string), p_mfgr (type: string), p_brand (type: string), p_type (type: string), p_size (type: int), p_container (type: string), p_retailprice (type: double), p_comment (type: string), BLOCK__OFFSET__INSIDE__FILE (type: bigint), INPUT__FILE__NAME (type: string), ROW__ID (type: struct) auto parallelism: true Path -> Alias: #### A masked pattern was here #### @@ -4472,7 +4472,7 @@ STAGE PLANS: Map-reduce partition columns: p_mfgr (type: string) Statistics: Num rows: 26 Data size: 16042 Basic stats: COMPLETE Column stats: NONE tag: -1 - value expressions: p_partkey (type: int), p_name (type: string), p_mfgr (type: string), p_brand (type: string), p_type (type: string), p_size (type: int), p_container (type: string), p_retailprice (type: double), p_comment (type: string), BLOCK__OFFSET__INSIDE__FILE (type: bigint), INPUT__FILE__NAME (type: string) + value expressions: p_partkey (type: int), p_name (type: string), p_mfgr (type: string), p_brand (type: string), p_type (type: string), p_size (type: int), p_container (type: string), p_retailprice (type: double), p_comment (type: string), BLOCK__OFFSET__INSIDE__FILE (type: bigint), INPUT__FILE__NAME (type: string), ROW__ID (type: struct) auto parallelism: true Path -> Alias: #### A masked pattern was here #### @@ -5214,7 +5214,7 @@ STAGE PLANS: Map-reduce partition columns: p_mfgr (type: string) Statistics: Num rows: 26 Data size: 16042 Basic stats: COMPLETE Column stats: NONE tag: -1 - value expressions: p_partkey (type: int), p_name (type: string), p_mfgr (type: string), p_brand (type: string), p_type (type: string), p_size (type: int), p_container (type: string), p_retailprice (type: double), p_comment (type: string), BLOCK__OFFSET__INSIDE__FILE (type: bigint), INPUT__FILE__NAME (type: string) + value expressions: p_partkey (type: int), p_name (type: string), p_mfgr (type: string), p_brand (type: string), p_type (type: string), p_size (type: int), p_container (type: string), p_retailprice (type: double), p_comment (type: string), BLOCK__OFFSET__INSIDE__FILE (type: bigint), INPUT__FILE__NAME (type: string), ROW__ID (type: struct) auto parallelism: true Path -> Alias: #### A masked pattern was here #### @@ -5473,20 +5473,20 @@ POSTHOOK: type: QUERY POSTHOOK: Input: default@part POSTHOOK: Output: default@part_4 POSTHOOK: Output: default@part_5 -POSTHOOK: Lineage: part_4.dr SCRIPT [(part)part.FieldSchema(name:p_partkey, type:int, comment:null), (part)part.FieldSchema(name:p_name, type:string, comment:null), (part)part.FieldSchema(name:p_mfgr, type:string, comment:null), (part)part.FieldSchema(name:p_brand, type:string, comment:null), (part)part.FieldSchema(name:p_type, type:string, comment:null), (part)part.FieldSchema(name:p_size, type:int, comment:null), (part)part.FieldSchema(name:p_container, type:string, comment:null), (part)part.FieldSchema(name:p_retailprice, type:double, comment:null), (part)part.FieldSchema(name:p_comment, type:string, comment:null), (part)part.FieldSchema(name:BLOCK__OFFSET__INSIDE__FILE, type:bigint, comment:), (part)part.FieldSchema(name:INPUT__FILE__NAME, type:string, comment:), ] -POSTHOOK: Lineage: part_4.p_mfgr SCRIPT [(part)part.FieldSchema(name:p_partkey, type:int, comment:null), (part)part.FieldSchema(name:p_name, type:string, comment:null), (part)part.FieldSchema(name:p_mfgr, type:string, comment:null), (part)part.FieldSchema(name:p_brand, type:string, comment:null), (part)part.FieldSchema(name:p_type, type:string, comment:null), (part)part.FieldSchema(name:p_size, type:int, comment:null), (part)part.FieldSchema(name:p_container, type:string, comment:null), (part)part.FieldSchema(name:p_retailprice, type:double, comment:null), (part)part.FieldSchema(name:p_comment, type:string, comment:null), (part)part.FieldSchema(name:BLOCK__OFFSET__INSIDE__FILE, type:bigint, comment:), (part)part.FieldSchema(name:INPUT__FILE__NAME, type:string, comment:), ] -POSTHOOK: Lineage: part_4.p_name SCRIPT [(part)part.FieldSchema(name:p_partkey, type:int, comment:null), (part)part.FieldSchema(name:p_name, type:string, comment:null), (part)part.FieldSchema(name:p_mfgr, type:string, comment:null), (part)part.FieldSchema(name:p_brand, type:string, comment:null), (part)part.FieldSchema(name:p_type, type:string, comment:null), (part)part.FieldSchema(name:p_size, type:int, comment:null), (part)part.FieldSchema(name:p_container, type:string, comment:null), (part)part.FieldSchema(name:p_retailprice, type:double, comment:null), (part)part.FieldSchema(name:p_comment, type:string, comment:null), (part)part.FieldSchema(name:BLOCK__OFFSET__INSIDE__FILE, type:bigint, comment:), (part)part.FieldSchema(name:INPUT__FILE__NAME, type:string, comment:), ] -POSTHOOK: Lineage: part_4.p_size SCRIPT [(part)part.FieldSchema(name:p_partkey, type:int, comment:null), (part)part.FieldSchema(name:p_name, type:string, comment:null), (part)part.FieldSchema(name:p_mfgr, type:string, comment:null), (part)part.FieldSchema(name:p_brand, type:string, comment:null), (part)part.FieldSchema(name:p_type, type:string, comment:null), (part)part.FieldSchema(name:p_size, type:int, comment:null), (part)part.FieldSchema(name:p_container, type:string, comment:null), (part)part.FieldSchema(name:p_retailprice, type:double, comment:null), (part)part.FieldSchema(name:p_comment, type:string, comment:null), (part)part.FieldSchema(name:BLOCK__OFFSET__INSIDE__FILE, type:bigint, comment:), (part)part.FieldSchema(name:INPUT__FILE__NAME, type:string, comment:), ] -POSTHOOK: Lineage: part_4.r SCRIPT [(part)part.FieldSchema(name:p_partkey, type:int, comment:null), (part)part.FieldSchema(name:p_name, type:string, comment:null), (part)part.FieldSchema(name:p_mfgr, type:string, comment:null), (part)part.FieldSchema(name:p_brand, type:string, comment:null), (part)part.FieldSchema(name:p_type, type:string, comment:null), (part)part.FieldSchema(name:p_size, type:int, comment:null), (part)part.FieldSchema(name:p_container, type:string, comment:null), (part)part.FieldSchema(name:p_retailprice, type:double, comment:null), (part)part.FieldSchema(name:p_comment, type:string, comment:null), (part)part.FieldSchema(name:BLOCK__OFFSET__INSIDE__FILE, type:bigint, comment:), (part)part.FieldSchema(name:INPUT__FILE__NAME, type:string, comment:), ] -POSTHOOK: Lineage: part_4.s SCRIPT [(part)part.FieldSchema(name:p_partkey, type:int, comment:null), (part)part.FieldSchema(name:p_name, type:string, comment:null), (part)part.FieldSchema(name:p_mfgr, type:string, comment:null), (part)part.FieldSchema(name:p_brand, type:string, comment:null), (part)part.FieldSchema(name:p_type, type:string, comment:null), (part)part.FieldSchema(name:p_size, type:int, comment:null), (part)part.FieldSchema(name:p_container, type:string, comment:null), (part)part.FieldSchema(name:p_retailprice, type:double, comment:null), (part)part.FieldSchema(name:p_comment, type:string, comment:null), (part)part.FieldSchema(name:BLOCK__OFFSET__INSIDE__FILE, type:bigint, comment:), (part)part.FieldSchema(name:INPUT__FILE__NAME, type:string, comment:), ] -POSTHOOK: Lineage: part_5.cud SCRIPT [(part)part.FieldSchema(name:p_partkey, type:int, comment:null), (part)part.FieldSchema(name:p_name, type:string, comment:null), (part)part.FieldSchema(name:p_mfgr, type:string, comment:null), (part)part.FieldSchema(name:p_brand, type:string, comment:null), (part)part.FieldSchema(name:p_type, type:string, comment:null), (part)part.FieldSchema(name:p_size, type:int, comment:null), (part)part.FieldSchema(name:p_container, type:string, comment:null), (part)part.FieldSchema(name:p_retailprice, type:double, comment:null), (part)part.FieldSchema(name:p_comment, type:string, comment:null), (part)part.FieldSchema(name:BLOCK__OFFSET__INSIDE__FILE, type:bigint, comment:), (part)part.FieldSchema(name:INPUT__FILE__NAME, type:string, comment:), ] -POSTHOOK: Lineage: part_5.dr SCRIPT [(part)part.FieldSchema(name:p_partkey, type:int, comment:null), (part)part.FieldSchema(name:p_name, type:string, comment:null), (part)part.FieldSchema(name:p_mfgr, type:string, comment:null), (part)part.FieldSchema(name:p_brand, type:string, comment:null), (part)part.FieldSchema(name:p_type, type:string, comment:null), (part)part.FieldSchema(name:p_size, type:int, comment:null), (part)part.FieldSchema(name:p_container, type:string, comment:null), (part)part.FieldSchema(name:p_retailprice, type:double, comment:null), (part)part.FieldSchema(name:p_comment, type:string, comment:null), (part)part.FieldSchema(name:BLOCK__OFFSET__INSIDE__FILE, type:bigint, comment:), (part)part.FieldSchema(name:INPUT__FILE__NAME, type:string, comment:), ] -POSTHOOK: Lineage: part_5.fv1 SCRIPT [(part)part.FieldSchema(name:p_partkey, type:int, comment:null), (part)part.FieldSchema(name:p_name, type:string, comment:null), (part)part.FieldSchema(name:p_mfgr, type:string, comment:null), (part)part.FieldSchema(name:p_brand, type:string, comment:null), (part)part.FieldSchema(name:p_type, type:string, comment:null), (part)part.FieldSchema(name:p_size, type:int, comment:null), (part)part.FieldSchema(name:p_container, type:string, comment:null), (part)part.FieldSchema(name:p_retailprice, type:double, comment:null), (part)part.FieldSchema(name:p_comment, type:string, comment:null), (part)part.FieldSchema(name:BLOCK__OFFSET__INSIDE__FILE, type:bigint, comment:), (part)part.FieldSchema(name:INPUT__FILE__NAME, type:string, comment:), ] -POSTHOOK: Lineage: part_5.p_mfgr SCRIPT [(part)part.FieldSchema(name:p_partkey, type:int, comment:null), (part)part.FieldSchema(name:p_name, type:string, comment:null), (part)part.FieldSchema(name:p_mfgr, type:string, comment:null), (part)part.FieldSchema(name:p_brand, type:string, comment:null), (part)part.FieldSchema(name:p_type, type:string, comment:null), (part)part.FieldSchema(name:p_size, type:int, comment:null), (part)part.FieldSchema(name:p_container, type:string, comment:null), (part)part.FieldSchema(name:p_retailprice, type:double, comment:null), (part)part.FieldSchema(name:p_comment, type:string, comment:null), (part)part.FieldSchema(name:BLOCK__OFFSET__INSIDE__FILE, type:bigint, comment:), (part)part.FieldSchema(name:INPUT__FILE__NAME, type:string, comment:), ] -POSTHOOK: Lineage: part_5.p_name SCRIPT [(part)part.FieldSchema(name:p_partkey, type:int, comment:null), (part)part.FieldSchema(name:p_name, type:string, comment:null), (part)part.FieldSchema(name:p_mfgr, type:string, comment:null), (part)part.FieldSchema(name:p_brand, type:string, comment:null), (part)part.FieldSchema(name:p_type, type:string, comment:null), (part)part.FieldSchema(name:p_size, type:int, comment:null), (part)part.FieldSchema(name:p_container, type:string, comment:null), (part)part.FieldSchema(name:p_retailprice, type:double, comment:null), (part)part.FieldSchema(name:p_comment, type:string, comment:null), (part)part.FieldSchema(name:BLOCK__OFFSET__INSIDE__FILE, type:bigint, comment:), (part)part.FieldSchema(name:INPUT__FILE__NAME, type:string, comment:), ] -POSTHOOK: Lineage: part_5.p_size SCRIPT [(part)part.FieldSchema(name:p_partkey, type:int, comment:null), (part)part.FieldSchema(name:p_name, type:string, comment:null), (part)part.FieldSchema(name:p_mfgr, type:string, comment:null), (part)part.FieldSchema(name:p_brand, type:string, comment:null), (part)part.FieldSchema(name:p_type, type:string, comment:null), (part)part.FieldSchema(name:p_size, type:int, comment:null), (part)part.FieldSchema(name:p_container, type:string, comment:null), (part)part.FieldSchema(name:p_retailprice, type:double, comment:null), (part)part.FieldSchema(name:p_comment, type:string, comment:null), (part)part.FieldSchema(name:BLOCK__OFFSET__INSIDE__FILE, type:bigint, comment:), (part)part.FieldSchema(name:INPUT__FILE__NAME, type:string, comment:), ] -POSTHOOK: Lineage: part_5.r SCRIPT [(part)part.FieldSchema(name:p_partkey, type:int, comment:null), (part)part.FieldSchema(name:p_name, type:string, comment:null), (part)part.FieldSchema(name:p_mfgr, type:string, comment:null), (part)part.FieldSchema(name:p_brand, type:string, comment:null), (part)part.FieldSchema(name:p_type, type:string, comment:null), (part)part.FieldSchema(name:p_size, type:int, comment:null), (part)part.FieldSchema(name:p_container, type:string, comment:null), (part)part.FieldSchema(name:p_retailprice, type:double, comment:null), (part)part.FieldSchema(name:p_comment, type:string, comment:null), (part)part.FieldSchema(name:BLOCK__OFFSET__INSIDE__FILE, type:bigint, comment:), (part)part.FieldSchema(name:INPUT__FILE__NAME, type:string, comment:), ] -POSTHOOK: Lineage: part_5.s2 SCRIPT [(part)part.FieldSchema(name:p_partkey, type:int, comment:null), (part)part.FieldSchema(name:p_name, type:string, comment:null), (part)part.FieldSchema(name:p_mfgr, type:string, comment:null), (part)part.FieldSchema(name:p_brand, type:string, comment:null), (part)part.FieldSchema(name:p_type, type:string, comment:null), (part)part.FieldSchema(name:p_size, type:int, comment:null), (part)part.FieldSchema(name:p_container, type:string, comment:null), (part)part.FieldSchema(name:p_retailprice, type:double, comment:null), (part)part.FieldSchema(name:p_comment, type:string, comment:null), (part)part.FieldSchema(name:BLOCK__OFFSET__INSIDE__FILE, type:bigint, comment:), (part)part.FieldSchema(name:INPUT__FILE__NAME, type:string, comment:), ] +POSTHOOK: Lineage: part_4.dr SCRIPT [(part)part.FieldSchema(name:p_partkey, type:int, comment:null), (part)part.FieldSchema(name:p_name, type:string, comment:null), (part)part.FieldSchema(name:p_mfgr, type:string, comment:null), (part)part.FieldSchema(name:p_brand, type:string, comment:null), (part)part.FieldSchema(name:p_type, type:string, comment:null), (part)part.FieldSchema(name:p_size, type:int, comment:null), (part)part.FieldSchema(name:p_container, type:string, comment:null), (part)part.FieldSchema(name:p_retailprice, type:double, comment:null), (part)part.FieldSchema(name:p_comment, type:string, comment:null), (part)part.FieldSchema(name:BLOCK__OFFSET__INSIDE__FILE, type:bigint, comment:), (part)part.FieldSchema(name:INPUT__FILE__NAME, type:string, comment:), (part)part.FieldSchema(name:ROW__ID, type:struct, comment:), ] +POSTHOOK: Lineage: part_4.p_mfgr SCRIPT [(part)part.FieldSchema(name:p_partkey, type:int, comment:null), (part)part.FieldSchema(name:p_name, type:string, comment:null), (part)part.FieldSchema(name:p_mfgr, type:string, comment:null), (part)part.FieldSchema(name:p_brand, type:string, comment:null), (part)part.FieldSchema(name:p_type, type:string, comment:null), (part)part.FieldSchema(name:p_size, type:int, comment:null), (part)part.FieldSchema(name:p_container, type:string, comment:null), (part)part.FieldSchema(name:p_retailprice, type:double, comment:null), (part)part.FieldSchema(name:p_comment, type:string, comment:null), (part)part.FieldSchema(name:BLOCK__OFFSET__INSIDE__FILE, type:bigint, comment:), (part)part.FieldSchema(name:INPUT__FILE__NAME, type:string, comment:), (part)part.FieldSchema(name:ROW__ID, type:struct, comment:), ] +POSTHOOK: Lineage: part_4.p_name SCRIPT [(part)part.FieldSchema(name:p_partkey, type:int, comment:null), (part)part.FieldSchema(name:p_name, type:string, comment:null), (part)part.FieldSchema(name:p_mfgr, type:string, comment:null), (part)part.FieldSchema(name:p_brand, type:string, comment:null), (part)part.FieldSchema(name:p_type, type:string, comment:null), (part)part.FieldSchema(name:p_size, type:int, comment:null), (part)part.FieldSchema(name:p_container, type:string, comment:null), (part)part.FieldSchema(name:p_retailprice, type:double, comment:null), (part)part.FieldSchema(name:p_comment, type:string, comment:null), (part)part.FieldSchema(name:BLOCK__OFFSET__INSIDE__FILE, type:bigint, comment:), (part)part.FieldSchema(name:INPUT__FILE__NAME, type:string, comment:), (part)part.FieldSchema(name:ROW__ID, type:struct, comment:), ] +POSTHOOK: Lineage: part_4.p_size SCRIPT [(part)part.FieldSchema(name:p_partkey, type:int, comment:null), (part)part.FieldSchema(name:p_name, type:string, comment:null), (part)part.FieldSchema(name:p_mfgr, type:string, comment:null), (part)part.FieldSchema(name:p_brand, type:string, comment:null), (part)part.FieldSchema(name:p_type, type:string, comment:null), (part)part.FieldSchema(name:p_size, type:int, comment:null), (part)part.FieldSchema(name:p_container, type:string, comment:null), (part)part.FieldSchema(name:p_retailprice, type:double, comment:null), (part)part.FieldSchema(name:p_comment, type:string, comment:null), (part)part.FieldSchema(name:BLOCK__OFFSET__INSIDE__FILE, type:bigint, comment:), (part)part.FieldSchema(name:INPUT__FILE__NAME, type:string, comment:), (part)part.FieldSchema(name:ROW__ID, type:struct, comment:), ] +POSTHOOK: Lineage: part_4.r SCRIPT [(part)part.FieldSchema(name:p_partkey, type:int, comment:null), (part)part.FieldSchema(name:p_name, type:string, comment:null), (part)part.FieldSchema(name:p_mfgr, type:string, comment:null), (part)part.FieldSchema(name:p_brand, type:string, comment:null), (part)part.FieldSchema(name:p_type, type:string, comment:null), (part)part.FieldSchema(name:p_size, type:int, comment:null), (part)part.FieldSchema(name:p_container, type:string, comment:null), (part)part.FieldSchema(name:p_retailprice, type:double, comment:null), (part)part.FieldSchema(name:p_comment, type:string, comment:null), (part)part.FieldSchema(name:BLOCK__OFFSET__INSIDE__FILE, type:bigint, comment:), (part)part.FieldSchema(name:INPUT__FILE__NAME, type:string, comment:), (part)part.FieldSchema(name:ROW__ID, type:struct, comment:), ] +POSTHOOK: Lineage: part_4.s SCRIPT [(part)part.FieldSchema(name:p_partkey, type:int, comment:null), (part)part.FieldSchema(name:p_name, type:string, comment:null), (part)part.FieldSchema(name:p_mfgr, type:string, comment:null), (part)part.FieldSchema(name:p_brand, type:string, comment:null), (part)part.FieldSchema(name:p_type, type:string, comment:null), (part)part.FieldSchema(name:p_size, type:int, comment:null), (part)part.FieldSchema(name:p_container, type:string, comment:null), (part)part.FieldSchema(name:p_retailprice, type:double, comment:null), (part)part.FieldSchema(name:p_comment, type:string, comment:null), (part)part.FieldSchema(name:BLOCK__OFFSET__INSIDE__FILE, type:bigint, comment:), (part)part.FieldSchema(name:INPUT__FILE__NAME, type:string, comment:), (part)part.FieldSchema(name:ROW__ID, type:struct, comment:), ] +POSTHOOK: Lineage: part_5.cud SCRIPT [(part)part.FieldSchema(name:p_partkey, type:int, comment:null), (part)part.FieldSchema(name:p_name, type:string, comment:null), (part)part.FieldSchema(name:p_mfgr, type:string, comment:null), (part)part.FieldSchema(name:p_brand, type:string, comment:null), (part)part.FieldSchema(name:p_type, type:string, comment:null), (part)part.FieldSchema(name:p_size, type:int, comment:null), (part)part.FieldSchema(name:p_container, type:string, comment:null), (part)part.FieldSchema(name:p_retailprice, type:double, comment:null), (part)part.FieldSchema(name:p_comment, type:string, comment:null), (part)part.FieldSchema(name:BLOCK__OFFSET__INSIDE__FILE, type:bigint, comment:), (part)part.FieldSchema(name:INPUT__FILE__NAME, type:string, comment:), (part)part.FieldSchema(name:ROW__ID, type:struct, comment:), ] +POSTHOOK: Lineage: part_5.dr SCRIPT [(part)part.FieldSchema(name:p_partkey, type:int, comment:null), (part)part.FieldSchema(name:p_name, type:string, comment:null), (part)part.FieldSchema(name:p_mfgr, type:string, comment:null), (part)part.FieldSchema(name:p_brand, type:string, comment:null), (part)part.FieldSchema(name:p_type, type:string, comment:null), (part)part.FieldSchema(name:p_size, type:int, comment:null), (part)part.FieldSchema(name:p_container, type:string, comment:null), (part)part.FieldSchema(name:p_retailprice, type:double, comment:null), (part)part.FieldSchema(name:p_comment, type:string, comment:null), (part)part.FieldSchema(name:BLOCK__OFFSET__INSIDE__FILE, type:bigint, comment:), (part)part.FieldSchema(name:INPUT__FILE__NAME, type:string, comment:), (part)part.FieldSchema(name:ROW__ID, type:struct, comment:), ] +POSTHOOK: Lineage: part_5.fv1 SCRIPT [(part)part.FieldSchema(name:p_partkey, type:int, comment:null), (part)part.FieldSchema(name:p_name, type:string, comment:null), (part)part.FieldSchema(name:p_mfgr, type:string, comment:null), (part)part.FieldSchema(name:p_brand, type:string, comment:null), (part)part.FieldSchema(name:p_type, type:string, comment:null), (part)part.FieldSchema(name:p_size, type:int, comment:null), (part)part.FieldSchema(name:p_container, type:string, comment:null), (part)part.FieldSchema(name:p_retailprice, type:double, comment:null), (part)part.FieldSchema(name:p_comment, type:string, comment:null), (part)part.FieldSchema(name:BLOCK__OFFSET__INSIDE__FILE, type:bigint, comment:), (part)part.FieldSchema(name:INPUT__FILE__NAME, type:string, comment:), (part)part.FieldSchema(name:ROW__ID, type:struct, comment:), ] +POSTHOOK: Lineage: part_5.p_mfgr SCRIPT [(part)part.FieldSchema(name:p_partkey, type:int, comment:null), (part)part.FieldSchema(name:p_name, type:string, comment:null), (part)part.FieldSchema(name:p_mfgr, type:string, comment:null), (part)part.FieldSchema(name:p_brand, type:string, comment:null), (part)part.FieldSchema(name:p_type, type:string, comment:null), (part)part.FieldSchema(name:p_size, type:int, comment:null), (part)part.FieldSchema(name:p_container, type:string, comment:null), (part)part.FieldSchema(name:p_retailprice, type:double, comment:null), (part)part.FieldSchema(name:p_comment, type:string, comment:null), (part)part.FieldSchema(name:BLOCK__OFFSET__INSIDE__FILE, type:bigint, comment:), (part)part.FieldSchema(name:INPUT__FILE__NAME, type:string, comment:), (part)part.FieldSchema(name:ROW__ID, type:struct, comment:), ] +POSTHOOK: Lineage: part_5.p_name SCRIPT [(part)part.FieldSchema(name:p_partkey, type:int, comment:null), (part)part.FieldSchema(name:p_name, type:string, comment:null), (part)part.FieldSchema(name:p_mfgr, type:string, comment:null), (part)part.FieldSchema(name:p_brand, type:string, comment:null), (part)part.FieldSchema(name:p_type, type:string, comment:null), (part)part.FieldSchema(name:p_size, type:int, comment:null), (part)part.FieldSchema(name:p_container, type:string, comment:null), (part)part.FieldSchema(name:p_retailprice, type:double, comment:null), (part)part.FieldSchema(name:p_comment, type:string, comment:null), (part)part.FieldSchema(name:BLOCK__OFFSET__INSIDE__FILE, type:bigint, comment:), (part)part.FieldSchema(name:INPUT__FILE__NAME, type:string, comment:), (part)part.FieldSchema(name:ROW__ID, type:struct, comment:), ] +POSTHOOK: Lineage: part_5.p_size SCRIPT [(part)part.FieldSchema(name:p_partkey, type:int, comment:null), (part)part.FieldSchema(name:p_name, type:string, comment:null), (part)part.FieldSchema(name:p_mfgr, type:string, comment:null), (part)part.FieldSchema(name:p_brand, type:string, comment:null), (part)part.FieldSchema(name:p_type, type:string, comment:null), (part)part.FieldSchema(name:p_size, type:int, comment:null), (part)part.FieldSchema(name:p_container, type:string, comment:null), (part)part.FieldSchema(name:p_retailprice, type:double, comment:null), (part)part.FieldSchema(name:p_comment, type:string, comment:null), (part)part.FieldSchema(name:BLOCK__OFFSET__INSIDE__FILE, type:bigint, comment:), (part)part.FieldSchema(name:INPUT__FILE__NAME, type:string, comment:), (part)part.FieldSchema(name:ROW__ID, type:struct, comment:), ] +POSTHOOK: Lineage: part_5.r SCRIPT [(part)part.FieldSchema(name:p_partkey, type:int, comment:null), (part)part.FieldSchema(name:p_name, type:string, comment:null), (part)part.FieldSchema(name:p_mfgr, type:string, comment:null), (part)part.FieldSchema(name:p_brand, type:string, comment:null), (part)part.FieldSchema(name:p_type, type:string, comment:null), (part)part.FieldSchema(name:p_size, type:int, comment:null), (part)part.FieldSchema(name:p_container, type:string, comment:null), (part)part.FieldSchema(name:p_retailprice, type:double, comment:null), (part)part.FieldSchema(name:p_comment, type:string, comment:null), (part)part.FieldSchema(name:BLOCK__OFFSET__INSIDE__FILE, type:bigint, comment:), (part)part.FieldSchema(name:INPUT__FILE__NAME, type:string, comment:), (part)part.FieldSchema(name:ROW__ID, type:struct, comment:), ] +POSTHOOK: Lineage: part_5.s2 SCRIPT [(part)part.FieldSchema(name:p_partkey, type:int, comment:null), (part)part.FieldSchema(name:p_name, type:string, comment:null), (part)part.FieldSchema(name:p_mfgr, type:string, comment:null), (part)part.FieldSchema(name:p_brand, type:string, comment:null), (part)part.FieldSchema(name:p_type, type:string, comment:null), (part)part.FieldSchema(name:p_size, type:int, comment:null), (part)part.FieldSchema(name:p_container, type:string, comment:null), (part)part.FieldSchema(name:p_retailprice, type:double, comment:null), (part)part.FieldSchema(name:p_comment, type:string, comment:null), (part)part.FieldSchema(name:BLOCK__OFFSET__INSIDE__FILE, type:bigint, comment:), (part)part.FieldSchema(name:INPUT__FILE__NAME, type:string, comment:), (part)part.FieldSchema(name:ROW__ID, type:struct, comment:), ] PREHOOK: query: select * from part_4 PREHOOK: type: QUERY PREHOOK: Input: default@part_4 @@ -5722,7 +5722,7 @@ STAGE PLANS: Map-reduce partition columns: p_mfgr (type: string) Statistics: Num rows: 26 Data size: 16042 Basic stats: COMPLETE Column stats: NONE tag: -1 - value expressions: p_partkey (type: int), p_name (type: string), p_mfgr (type: string), p_brand (type: string), p_type (type: string), p_size (type: int), p_container (type: string), p_retailprice (type: double), p_comment (type: string), BLOCK__OFFSET__INSIDE__FILE (type: bigint), INPUT__FILE__NAME (type: string) + value expressions: p_partkey (type: int), p_name (type: string), p_mfgr (type: string), p_brand (type: string), p_type (type: string), p_size (type: int), p_container (type: string), p_retailprice (type: double), p_comment (type: string), BLOCK__OFFSET__INSIDE__FILE (type: bigint), INPUT__FILE__NAME (type: string), ROW__ID (type: struct) auto parallelism: true Path -> Alias: #### A masked pattern was here #### @@ -5788,7 +5788,7 @@ STAGE PLANS: Map-reduce partition columns: _col2 (type: string), _col1 (type: string) Statistics: Num rows: 26 Data size: 16042 Basic stats: COMPLETE Column stats: NONE tag: -1 - value expressions: _col0 (type: int), _col1 (type: string), _col2 (type: string), _col3 (type: string), _col4 (type: string), _col5 (type: int), _col6 (type: string), _col7 (type: double), _col8 (type: string), _col9 (type: bigint), _col10 (type: string) + value expressions: _col0 (type: int), _col1 (type: string), _col2 (type: string), _col3 (type: string), _col4 (type: string), _col5 (type: int), _col6 (type: string), _col7 (type: double), _col8 (type: string), _col9 (type: bigint), _col10 (type: string), _col11 (type: struct) auto parallelism: true Reducer 3 Needs Tagging: false @@ -6079,7 +6079,7 @@ STAGE PLANS: Map-reduce partition columns: p_mfgr (type: string) Statistics: Num rows: 26 Data size: 16042 Basic stats: COMPLETE Column stats: NONE tag: -1 - value expressions: p_partkey (type: int), p_name (type: string), p_mfgr (type: string), p_brand (type: string), p_type (type: string), p_size (type: int), p_container (type: string), p_retailprice (type: double), p_comment (type: string), BLOCK__OFFSET__INSIDE__FILE (type: bigint), INPUT__FILE__NAME (type: string) + value expressions: p_partkey (type: int), p_name (type: string), p_mfgr (type: string), p_brand (type: string), p_type (type: string), p_size (type: int), p_container (type: string), p_retailprice (type: double), p_comment (type: string), BLOCK__OFFSET__INSIDE__FILE (type: bigint), INPUT__FILE__NAME (type: string), ROW__ID (type: struct) auto parallelism: true Path -> Alias: #### A masked pattern was here #### @@ -6143,7 +6143,7 @@ STAGE PLANS: Map-reduce partition columns: _col2 (type: string), _col1 (type: string) Statistics: Num rows: 26 Data size: 16042 Basic stats: COMPLETE Column stats: NONE tag: -1 - value expressions: _col0 (type: int), _col1 (type: string), _col2 (type: string), _col3 (type: string), _col4 (type: string), _col5 (type: int), _col6 (type: string), _col7 (type: double), _col8 (type: string), _col9 (type: bigint), _col10 (type: string) + value expressions: _col0 (type: int), _col1 (type: string), _col2 (type: string), _col3 (type: string), _col4 (type: string), _col5 (type: int), _col6 (type: string), _col7 (type: double), _col8 (type: string), _col9 (type: bigint), _col10 (type: string), _col11 (type: struct) auto parallelism: true Reducer 3 Needs Tagging: false @@ -6158,7 +6158,7 @@ STAGE PLANS: Map-reduce partition columns: _col2 (type: string) Statistics: Num rows: 26 Data size: 16042 Basic stats: COMPLETE Column stats: NONE tag: -1 - value expressions: _col0 (type: int), _col1 (type: string), _col2 (type: string), _col3 (type: string), _col4 (type: string), _col5 (type: int), _col6 (type: string), _col7 (type: double), _col8 (type: string), _col9 (type: bigint), _col10 (type: string) + value expressions: _col0 (type: int), _col1 (type: string), _col2 (type: string), _col3 (type: string), _col4 (type: string), _col5 (type: int), _col6 (type: string), _col7 (type: double), _col8 (type: string), _col9 (type: bigint), _col10 (type: string), _col11 (type: struct) auto parallelism: true Reducer 4 Needs Tagging: false @@ -6432,7 +6432,7 @@ STAGE PLANS: Map-reduce partition columns: p_mfgr (type: string), p_name (type: string) Statistics: Num rows: 26 Data size: 16042 Basic stats: COMPLETE Column stats: NONE tag: -1 - value expressions: p_partkey (type: int), p_name (type: string), p_mfgr (type: string), p_brand (type: string), p_type (type: string), p_size (type: int), p_container (type: string), p_retailprice (type: double), p_comment (type: string), BLOCK__OFFSET__INSIDE__FILE (type: bigint), INPUT__FILE__NAME (type: string) + value expressions: p_partkey (type: int), p_name (type: string), p_mfgr (type: string), p_brand (type: string), p_type (type: string), p_size (type: int), p_container (type: string), p_retailprice (type: double), p_comment (type: string), BLOCK__OFFSET__INSIDE__FILE (type: bigint), INPUT__FILE__NAME (type: string), ROW__ID (type: struct) auto parallelism: true Path -> Alias: #### A masked pattern was here #### @@ -6496,7 +6496,7 @@ STAGE PLANS: Map-reduce partition columns: _col2 (type: string) Statistics: Num rows: 26 Data size: 16042 Basic stats: COMPLETE Column stats: NONE tag: -1 - value expressions: _col0 (type: int), _col1 (type: string), _col2 (type: string), _col3 (type: string), _col4 (type: string), _col5 (type: int), _col6 (type: string), _col7 (type: double), _col8 (type: string), _col9 (type: bigint), _col10 (type: string) + value expressions: _col0 (type: int), _col1 (type: string), _col2 (type: string), _col3 (type: string), _col4 (type: string), _col5 (type: int), _col6 (type: string), _col7 (type: double), _col8 (type: string), _col9 (type: bigint), _col10 (type: string), _col11 (type: struct) auto parallelism: true Reducer 3 Needs Tagging: false @@ -6782,7 +6782,7 @@ STAGE PLANS: Map-reduce partition columns: p_mfgr (type: string), p_name (type: string) Statistics: Num rows: 26 Data size: 16042 Basic stats: COMPLETE Column stats: NONE tag: -1 - value expressions: p_partkey (type: int), p_name (type: string), p_mfgr (type: string), p_brand (type: string), p_type (type: string), p_size (type: int), p_container (type: string), p_retailprice (type: double), p_comment (type: string), BLOCK__OFFSET__INSIDE__FILE (type: bigint), INPUT__FILE__NAME (type: string) + value expressions: p_partkey (type: int), p_name (type: string), p_mfgr (type: string), p_brand (type: string), p_type (type: string), p_size (type: int), p_container (type: string), p_retailprice (type: double), p_comment (type: string), BLOCK__OFFSET__INSIDE__FILE (type: bigint), INPUT__FILE__NAME (type: string), ROW__ID (type: struct) auto parallelism: true Path -> Alias: #### A masked pattern was here #### @@ -6846,7 +6846,7 @@ STAGE PLANS: Map-reduce partition columns: _col2 (type: string) Statistics: Num rows: 26 Data size: 16042 Basic stats: COMPLETE Column stats: NONE tag: -1 - value expressions: _col0 (type: int), _col1 (type: string), _col2 (type: string), _col3 (type: string), _col4 (type: string), _col5 (type: int), _col6 (type: string), _col7 (type: double), _col8 (type: string), _col9 (type: bigint), _col10 (type: string) + value expressions: _col0 (type: int), _col1 (type: string), _col2 (type: string), _col3 (type: string), _col4 (type: string), _col5 (type: int), _col6 (type: string), _col7 (type: double), _col8 (type: string), _col9 (type: bigint), _col10 (type: string), _col11 (type: struct) auto parallelism: true Reducer 3 Needs Tagging: false @@ -6863,7 +6863,7 @@ STAGE PLANS: Map-reduce partition columns: _col2 (type: string), _col1 (type: string) Statistics: Num rows: 26 Data size: 16042 Basic stats: COMPLETE Column stats: NONE tag: -1 - value expressions: _col0 (type: int), _col1 (type: string), _col2 (type: string), _col3 (type: string), _col4 (type: string), _col5 (type: int), _col6 (type: string), _col7 (type: double), _col8 (type: string), _col9 (type: bigint), _col10 (type: string) + value expressions: _col0 (type: int), _col1 (type: string), _col2 (type: string), _col3 (type: string), _col4 (type: string), _col5 (type: int), _col6 (type: string), _col7 (type: double), _col8 (type: string), _col9 (type: bigint), _col10 (type: string), _col11 (type: struct) auto parallelism: true Reducer 4 Needs Tagging: false @@ -7180,7 +7180,7 @@ STAGE PLANS: Map-reduce partition columns: p_mfgr (type: string), p_name (type: string) Statistics: Num rows: 26 Data size: 16042 Basic stats: COMPLETE Column stats: NONE tag: -1 - value expressions: p_partkey (type: int), p_name (type: string), p_mfgr (type: string), p_brand (type: string), p_type (type: string), p_size (type: int), p_container (type: string), p_retailprice (type: double), p_comment (type: string), BLOCK__OFFSET__INSIDE__FILE (type: bigint), INPUT__FILE__NAME (type: string) + value expressions: p_partkey (type: int), p_name (type: string), p_mfgr (type: string), p_brand (type: string), p_type (type: string), p_size (type: int), p_container (type: string), p_retailprice (type: double), p_comment (type: string), BLOCK__OFFSET__INSIDE__FILE (type: bigint), INPUT__FILE__NAME (type: string), ROW__ID (type: struct) auto parallelism: true Path -> Alias: #### A masked pattern was here #### @@ -7246,7 +7246,7 @@ STAGE PLANS: Map-reduce partition columns: _col2 (type: string) Statistics: Num rows: 26 Data size: 16042 Basic stats: COMPLETE Column stats: NONE tag: -1 - value expressions: _col0 (type: int), _col1 (type: string), _col2 (type: string), _col3 (type: string), _col4 (type: string), _col5 (type: int), _col6 (type: string), _col7 (type: double), _col8 (type: string), _col9 (type: bigint), _col10 (type: string) + value expressions: _col0 (type: int), _col1 (type: string), _col2 (type: string), _col3 (type: string), _col4 (type: string), _col5 (type: int), _col6 (type: string), _col7 (type: double), _col8 (type: string), _col9 (type: bigint), _col10 (type: string), _col11 (type: struct) auto parallelism: true Reducer 3 Needs Tagging: false @@ -7529,7 +7529,7 @@ STAGE PLANS: Map-reduce partition columns: p_mfgr (type: string), p_name (type: string) Statistics: Num rows: 26 Data size: 16042 Basic stats: COMPLETE Column stats: NONE tag: -1 - value expressions: p_partkey (type: int), p_name (type: string), p_mfgr (type: string), p_brand (type: string), p_type (type: string), p_size (type: int), p_container (type: string), p_retailprice (type: double), p_comment (type: string), BLOCK__OFFSET__INSIDE__FILE (type: bigint), INPUT__FILE__NAME (type: string) + value expressions: p_partkey (type: int), p_name (type: string), p_mfgr (type: string), p_brand (type: string), p_type (type: string), p_size (type: int), p_container (type: string), p_retailprice (type: double), p_comment (type: string), BLOCK__OFFSET__INSIDE__FILE (type: bigint), INPUT__FILE__NAME (type: string), ROW__ID (type: struct) auto parallelism: true Path -> Alias: #### A masked pattern was here #### @@ -7595,7 +7595,7 @@ STAGE PLANS: Map-reduce partition columns: _col2 (type: string), _col1 (type: string) Statistics: Num rows: 26 Data size: 16042 Basic stats: COMPLETE Column stats: NONE tag: -1 - value expressions: _col0 (type: int), _col1 (type: string), _col2 (type: string), _col3 (type: string), _col4 (type: string), _col5 (type: int), _col6 (type: string), _col7 (type: double), _col8 (type: string), _col9 (type: bigint), _col10 (type: string) + value expressions: _col0 (type: int), _col1 (type: string), _col2 (type: string), _col3 (type: string), _col4 (type: string), _col5 (type: int), _col6 (type: string), _col7 (type: double), _col8 (type: string), _col9 (type: bigint), _col10 (type: string), _col11 (type: struct) auto parallelism: true Reducer 3 Needs Tagging: false diff --git a/ql/src/test/results/clientpositive/tez/vectorized_shufflejoin.q.out b/ql/src/test/results/clientpositive/tez/vectorized_shufflejoin.q.out index e659e7f..a6d344d 100644 --- a/ql/src/test/results/clientpositive/tez/vectorized_shufflejoin.q.out +++ b/ql/src/test/results/clientpositive/tez/vectorized_shufflejoin.q.out @@ -54,14 +54,14 @@ STAGE PLANS: condition expressions: 0 {KEY.reducesinkkey0} 1 {KEY.reducesinkkey0} - outputColumnNames: _col2, _col16 + outputColumnNames: _col2, _col17 Statistics: Num rows: 51870 Data size: 207482 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: _col2 (type: int), _col16 (type: int) - outputColumnNames: _col2, _col16 + expressions: _col2 (type: int), _col17 (type: int) + outputColumnNames: _col2, _col17 Statistics: Num rows: 51870 Data size: 207482 Basic stats: COMPLETE Column stats: NONE Group By Operator - aggregations: count(_col2), max(_col16), min(_col2), avg((_col2 + _col16)) + aggregations: count(_col2), max(_col17), min(_col2), avg((_col2 + _col17)) mode: hash outputColumnNames: _col0, _col1, _col2, _col3 Statistics: Num rows: 1 Data size: 16 Basic stats: COMPLETE Column stats: NONE diff --git a/ql/src/test/results/clientpositive/udf_case.q.out b/ql/src/test/results/clientpositive/udf_case.q.out index 6c186bd..d6f5d9b 100644 --- a/ql/src/test/results/clientpositive/udf_case.q.out +++ b/ql/src/test/results/clientpositive/udf_case.q.out @@ -2,12 +2,24 @@ PREHOOK: query: DESCRIBE FUNCTION case PREHOOK: type: DESCFUNCTION POSTHOOK: query: DESCRIBE FUNCTION case POSTHOOK: type: DESCFUNCTION -There is no documentation for function 'case' +CASE a WHEN b THEN c [WHEN d THEN e]* [ELSE f] END - When a = b, returns c; when a = d, return e; else return f PREHOOK: query: DESCRIBE FUNCTION EXTENDED case PREHOOK: type: DESCFUNCTION POSTHOOK: query: DESCRIBE FUNCTION EXTENDED case POSTHOOK: type: DESCFUNCTION -There is no documentation for function 'case' +CASE a WHEN b THEN c [WHEN d THEN e]* [ELSE f] END - When a = b, returns c; when a = d, return e; else return f +Example: + SELECT + CASE deptno + WHEN 1 THEN Engineering + WHEN 2 THEN Finance + ELSE admin + END, + CASE zone + WHEN 7 THEN Americas + ELSE Asia-Pac + END + FROM emp_details PREHOOK: query: EXPLAIN SELECT CASE 1 WHEN 1 THEN 2 @@ -196,4 +208,4 @@ FROM src tablesample (1 rows) POSTHOOK: type: QUERY POSTHOOK: Input: default@src #### A masked pattern was here #### -123 123.0 abcd +123.0 123.0 abcd diff --git a/ql/src/test/results/clientpositive/udf_using.q.out b/ql/src/test/results/clientpositive/udf_using.q.out index f20b7f5..3e33cc3 100644 --- a/ql/src/test/results/clientpositive/udf_using.q.out +++ b/ql/src/test/results/clientpositive/udf_using.q.out @@ -1,11 +1,11 @@ #### A masked pattern was here #### PREHOOK: type: CREATEFUNCTION PREHOOK: Output: database:default -PREHOOK: Output: lookup +PREHOOK: Output: default.lookup #### A masked pattern was here #### POSTHOOK: type: CREATEFUNCTION POSTHOOK: Output: database:default -POSTHOOK: Output: lookup +POSTHOOK: Output: default.lookup PREHOOK: query: create table udf_using (c1 string) PREHOOK: type: CREATETABLE PREHOOK: Output: database:default @@ -44,9 +44,9 @@ POSTHOOK: Output: default@udf_using PREHOOK: query: drop function lookup PREHOOK: type: DROPFUNCTION PREHOOK: Output: database:default -PREHOOK: Output: lookup +PREHOOK: Output: default.lookup POSTHOOK: query: drop function lookup POSTHOOK: type: DROPFUNCTION POSTHOOK: Output: database:default -POSTHOOK: Output: lookup +POSTHOOK: Output: default.lookup #### A masked pattern was here #### diff --git a/ql/src/test/results/clientpositive/udf_when.q.out b/ql/src/test/results/clientpositive/udf_when.q.out index cbb1210..e2121c8 100644 --- a/ql/src/test/results/clientpositive/udf_when.q.out +++ b/ql/src/test/results/clientpositive/udf_when.q.out @@ -2,12 +2,24 @@ PREHOOK: query: DESCRIBE FUNCTION when PREHOOK: type: DESCFUNCTION POSTHOOK: query: DESCRIBE FUNCTION when POSTHOOK: type: DESCFUNCTION -There is no documentation for function 'when' +CASE WHEN a THEN b [WHEN c THEN d]* [ELSE e] END - When a = true, returns b; when c = true, return d; else return e PREHOOK: query: DESCRIBE FUNCTION EXTENDED when PREHOOK: type: DESCFUNCTION POSTHOOK: query: DESCRIBE FUNCTION EXTENDED when POSTHOOK: type: DESCFUNCTION -There is no documentation for function 'when' +CASE WHEN a THEN b [WHEN c THEN d]* [ELSE e] END - When a = true, returns b; when c = true, return d; else return e +Example: + SELECT + CASE + WHEN deptno=1 THEN Engineering + WHEN deptno=2 THEN Finance + ELSE admin + END, + CASE + WHEN zone=7 THEN Americas + ELSE Asia-Pac + END + FROM emp_details PREHOOK: query: EXPLAIN SELECT CASE WHEN 1=1 THEN 2 @@ -179,4 +191,4 @@ FROM src tablesample (1 rows) POSTHOOK: type: QUERY POSTHOOK: Input: default@src #### A masked pattern was here #### -123 123.0 abcd +123.0 123.0 abcd diff --git a/ql/src/test/results/clientpositive/udtf_json_tuple.q.out b/ql/src/test/results/clientpositive/udtf_json_tuple.q.out index bc58856..61becaa 100644 --- a/ql/src/test/results/clientpositive/udtf_json_tuple.q.out +++ b/ql/src/test/results/clientpositive/udtf_json_tuple.q.out @@ -66,10 +66,10 @@ STAGE PLANS: outputColumnNames: key Statistics: Num rows: 6 Data size: 236 Basic stats: COMPLETE Column stats: NONE Lateral View Join Operator - outputColumnNames: _col0, _col4, _col5, _col6, _col7, _col8 + outputColumnNames: _col0, _col5, _col6, _col7, _col8, _col9 Statistics: Num rows: 12 Data size: 472 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: _col0 (type: string), _col4 (type: string), _col5 (type: string), _col6 (type: string), _col7 (type: string), _col8 (type: string) + expressions: _col0 (type: string), _col5 (type: string), _col6 (type: string), _col7 (type: string), _col8 (type: string), _col9 (type: string) outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5 Statistics: Num rows: 12 Data size: 472 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator @@ -85,10 +85,10 @@ STAGE PLANS: Statistics: Num rows: 6 Data size: 236 Basic stats: COMPLETE Column stats: NONE function name: json_tuple Lateral View Join Operator - outputColumnNames: _col0, _col4, _col5, _col6, _col7, _col8 + outputColumnNames: _col0, _col5, _col6, _col7, _col8, _col9 Statistics: Num rows: 12 Data size: 472 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: _col0 (type: string), _col4 (type: string), _col5 (type: string), _col6 (type: string), _col7 (type: string), _col8 (type: string) + expressions: _col0 (type: string), _col5 (type: string), _col6 (type: string), _col7 (type: string), _col8 (type: string), _col9 (type: string) outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5 Statistics: Num rows: 12 Data size: 472 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator @@ -215,10 +215,10 @@ STAGE PLANS: outputColumnNames: key Statistics: Num rows: 6 Data size: 236 Basic stats: COMPLETE Column stats: NONE Lateral View Join Operator - outputColumnNames: _col0, _col4, _col5, _col6, _col7, _col8 + outputColumnNames: _col0, _col5, _col6, _col7, _col8, _col9 Statistics: Num rows: 12 Data size: 472 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: _col0 (type: string), _col5 (type: string), _col8 (type: string) + expressions: _col0 (type: string), _col6 (type: string), _col9 (type: string) outputColumnNames: _col0, _col1, _col2 Statistics: Num rows: 12 Data size: 472 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator @@ -234,10 +234,10 @@ STAGE PLANS: Statistics: Num rows: 6 Data size: 236 Basic stats: COMPLETE Column stats: NONE function name: json_tuple Lateral View Join Operator - outputColumnNames: _col0, _col4, _col5, _col6, _col7, _col8 + outputColumnNames: _col0, _col5, _col6, _col7, _col8, _col9 Statistics: Num rows: 12 Data size: 472 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: _col0 (type: string), _col5 (type: string), _col8 (type: string) + expressions: _col0 (type: string), _col6 (type: string), _col9 (type: string) outputColumnNames: _col0, _col1, _col2 Statistics: Num rows: 12 Data size: 472 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator @@ -301,15 +301,15 @@ STAGE PLANS: Select Operator Statistics: Num rows: 6 Data size: 236 Basic stats: COMPLETE Column stats: NONE Lateral View Join Operator - outputColumnNames: _col4, _col5, _col6, _col7, _col8 + outputColumnNames: _col5, _col6, _col7, _col8, _col9 Statistics: Num rows: 9 Data size: 354 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: _col5 (type: string) - outputColumnNames: _col5 + expressions: _col6 (type: string) + outputColumnNames: _col6 Statistics: Num rows: 9 Data size: 354 Basic stats: COMPLETE Column stats: NONE Group By Operator aggregations: count() - keys: _col5 (type: string) + keys: _col6 (type: string) mode: hash outputColumnNames: _col0, _col1 Statistics: Num rows: 9 Data size: 354 Basic stats: COMPLETE Column stats: NONE @@ -330,15 +330,15 @@ STAGE PLANS: predicate: c0 is not null (type: boolean) Statistics: Num rows: 3 Data size: 118 Basic stats: COMPLETE Column stats: NONE Lateral View Join Operator - outputColumnNames: _col4, _col5, _col6, _col7, _col8 + outputColumnNames: _col5, _col6, _col7, _col8, _col9 Statistics: Num rows: 9 Data size: 354 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: _col5 (type: string) - outputColumnNames: _col5 + expressions: _col6 (type: string) + outputColumnNames: _col6 Statistics: Num rows: 9 Data size: 354 Basic stats: COMPLETE Column stats: NONE Group By Operator aggregations: count() - keys: _col5 (type: string) + keys: _col6 (type: string) mode: hash outputColumnNames: _col0, _col1 Statistics: Num rows: 9 Data size: 354 Basic stats: COMPLETE Column stats: NONE diff --git a/ql/src/test/results/clientpositive/udtf_parse_url_tuple.q.out b/ql/src/test/results/clientpositive/udtf_parse_url_tuple.q.out index 24f5eac..9f82a61 100644 --- a/ql/src/test/results/clientpositive/udtf_parse_url_tuple.q.out +++ b/ql/src/test/results/clientpositive/udtf_parse_url_tuple.q.out @@ -83,10 +83,10 @@ STAGE PLANS: outputColumnNames: key Statistics: Num rows: 6 Data size: 213 Basic stats: COMPLETE Column stats: NONE Lateral View Join Operator - outputColumnNames: _col0, _col4, _col5, _col6, _col7, _col8, _col9, _col10, _col11, _col12 + outputColumnNames: _col0, _col5, _col6, _col7, _col8, _col9, _col10, _col11, _col12, _col13 Statistics: Num rows: 12 Data size: 426 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: _col0 (type: string), _col4 (type: string), _col5 (type: string), _col6 (type: string), _col7 (type: string), _col8 (type: string), _col9 (type: string), _col10 (type: string), _col11 (type: string), _col12 (type: string) + expressions: _col0 (type: string), _col5 (type: string), _col6 (type: string), _col7 (type: string), _col8 (type: string), _col9 (type: string), _col10 (type: string), _col11 (type: string), _col12 (type: string), _col13 (type: string) outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9 Statistics: Num rows: 12 Data size: 426 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator @@ -102,10 +102,10 @@ STAGE PLANS: Statistics: Num rows: 6 Data size: 213 Basic stats: COMPLETE Column stats: NONE function name: parse_url_tuple Lateral View Join Operator - outputColumnNames: _col0, _col4, _col5, _col6, _col7, _col8, _col9, _col10, _col11, _col12 + outputColumnNames: _col0, _col5, _col6, _col7, _col8, _col9, _col10, _col11, _col12, _col13 Statistics: Num rows: 12 Data size: 426 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: _col0 (type: string), _col4 (type: string), _col5 (type: string), _col6 (type: string), _col7 (type: string), _col8 (type: string), _col9 (type: string), _col10 (type: string), _col11 (type: string), _col12 (type: string) + expressions: _col0 (type: string), _col5 (type: string), _col6 (type: string), _col7 (type: string), _col8 (type: string), _col9 (type: string), _col10 (type: string), _col11 (type: string), _col12 (type: string), _col13 (type: string) outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8, _col9 Statistics: Num rows: 12 Data size: 426 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator @@ -234,10 +234,10 @@ STAGE PLANS: outputColumnNames: key Statistics: Num rows: 6 Data size: 213 Basic stats: COMPLETE Column stats: NONE Lateral View Join Operator - outputColumnNames: _col0, _col4, _col5, _col6, _col7, _col8, _col9, _col10, _col11, _col12, _col13, _col14, _col15 + outputColumnNames: _col0, _col5, _col6, _col7, _col8, _col9, _col10, _col11, _col12, _col13, _col14, _col15, _col16 Statistics: Num rows: 12 Data size: 426 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: _col0 (type: string), _col4 (type: string), _col6 (type: string), _col12 (type: string), _col13 (type: string), _col14 (type: string), _col15 (type: string) + expressions: _col0 (type: string), _col5 (type: string), _col7 (type: string), _col13 (type: string), _col14 (type: string), _col15 (type: string), _col16 (type: string) outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6 Statistics: Num rows: 12 Data size: 426 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator @@ -253,10 +253,10 @@ STAGE PLANS: Statistics: Num rows: 6 Data size: 213 Basic stats: COMPLETE Column stats: NONE function name: parse_url_tuple Lateral View Join Operator - outputColumnNames: _col0, _col4, _col5, _col6, _col7, _col8, _col9, _col10, _col11, _col12, _col13, _col14, _col15 + outputColumnNames: _col0, _col5, _col6, _col7, _col8, _col9, _col10, _col11, _col12, _col13, _col14, _col15, _col16 Statistics: Num rows: 12 Data size: 426 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: _col0 (type: string), _col4 (type: string), _col6 (type: string), _col12 (type: string), _col13 (type: string), _col14 (type: string), _col15 (type: string) + expressions: _col0 (type: string), _col5 (type: string), _col7 (type: string), _col13 (type: string), _col14 (type: string), _col15 (type: string), _col16 (type: string) outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6 Statistics: Num rows: 12 Data size: 426 Basic stats: COMPLETE Column stats: NONE Reduce Output Operator @@ -319,15 +319,15 @@ STAGE PLANS: Select Operator Statistics: Num rows: 6 Data size: 213 Basic stats: COMPLETE Column stats: NONE Lateral View Join Operator - outputColumnNames: _col4, _col5, _col6, _col7, _col8, _col9, _col10, _col11, _col12 + outputColumnNames: _col5, _col6, _col7, _col8, _col9, _col10, _col11, _col12, _col13 Statistics: Num rows: 9 Data size: 319 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: _col4 (type: string) - outputColumnNames: _col4 + expressions: _col5 (type: string) + outputColumnNames: _col5 Statistics: Num rows: 9 Data size: 319 Basic stats: COMPLETE Column stats: NONE Group By Operator aggregations: count() - keys: _col4 (type: string) + keys: _col5 (type: string) mode: hash outputColumnNames: _col0, _col1 Statistics: Num rows: 9 Data size: 319 Basic stats: COMPLETE Column stats: NONE @@ -348,15 +348,15 @@ STAGE PLANS: predicate: c8 is not null (type: boolean) Statistics: Num rows: 3 Data size: 106 Basic stats: COMPLETE Column stats: NONE Lateral View Join Operator - outputColumnNames: _col4, _col5, _col6, _col7, _col8, _col9, _col10, _col11, _col12 + outputColumnNames: _col5, _col6, _col7, _col8, _col9, _col10, _col11, _col12, _col13 Statistics: Num rows: 9 Data size: 319 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: _col4 (type: string) - outputColumnNames: _col4 + expressions: _col5 (type: string) + outputColumnNames: _col5 Statistics: Num rows: 9 Data size: 319 Basic stats: COMPLETE Column stats: NONE Group By Operator aggregations: count() - keys: _col4 (type: string) + keys: _col5 (type: string) mode: hash outputColumnNames: _col0, _col1 Statistics: Num rows: 9 Data size: 319 Basic stats: COMPLETE Column stats: NONE diff --git a/ql/src/test/results/clientpositive/udtf_stack.q.out b/ql/src/test/results/clientpositive/udtf_stack.q.out index d625cb1..0671907 100644 --- a/ql/src/test/results/clientpositive/udtf_stack.q.out +++ b/ql/src/test/results/clientpositive/udtf_stack.q.out @@ -23,10 +23,10 @@ STAGE PLANS: Select Operator Statistics: Num rows: 0 Data size: 5812 Basic stats: PARTIAL Column stats: COMPLETE Lateral View Join Operator - outputColumnNames: _col4, _col5 + outputColumnNames: _col5, _col6 Statistics: Num rows: 0 Data size: 11624 Basic stats: PARTIAL Column stats: COMPLETE Select Operator - expressions: _col4 (type: string), _col5 (type: array) + expressions: _col5 (type: string), _col6 (type: array) outputColumnNames: _col0, _col1 Statistics: Num rows: 0 Data size: 11624 Basic stats: PARTIAL Column stats: COMPLETE Limit @@ -47,10 +47,10 @@ STAGE PLANS: Statistics: Num rows: 0 Data size: 5812 Basic stats: PARTIAL Column stats: COMPLETE function name: stack Lateral View Join Operator - outputColumnNames: _col4, _col5 + outputColumnNames: _col5, _col6 Statistics: Num rows: 0 Data size: 11624 Basic stats: PARTIAL Column stats: COMPLETE Select Operator - expressions: _col4 (type: string), _col5 (type: array) + expressions: _col5 (type: string), _col6 (type: array) outputColumnNames: _col0, _col1 Statistics: Num rows: 0 Data size: 11624 Basic stats: PARTIAL Column stats: COMPLETE Limit @@ -90,10 +90,10 @@ STAGE PLANS: Select Operator Statistics: Num rows: 0 Data size: 5812 Basic stats: PARTIAL Column stats: COMPLETE Lateral View Join Operator - outputColumnNames: _col4, _col5 + outputColumnNames: _col5, _col6 Statistics: Num rows: 0 Data size: 11624 Basic stats: PARTIAL Column stats: COMPLETE Select Operator - expressions: _col4 (type: string), _col5 (type: array) + expressions: _col5 (type: string), _col6 (type: array) outputColumnNames: _col0, _col1 Statistics: Num rows: 0 Data size: 11624 Basic stats: PARTIAL Column stats: COMPLETE Limit @@ -114,10 +114,10 @@ STAGE PLANS: Statistics: Num rows: 0 Data size: 5812 Basic stats: PARTIAL Column stats: COMPLETE function name: stack Lateral View Join Operator - outputColumnNames: _col4, _col5 + outputColumnNames: _col5, _col6 Statistics: Num rows: 0 Data size: 11624 Basic stats: PARTIAL Column stats: COMPLETE Select Operator - expressions: _col4 (type: string), _col5 (type: array) + expressions: _col5 (type: string), _col6 (type: array) outputColumnNames: _col0, _col1 Statistics: Num rows: 0 Data size: 11624 Basic stats: PARTIAL Column stats: COMPLETE Limit diff --git a/ql/src/test/results/clientpositive/union20.q.out b/ql/src/test/results/clientpositive/union20.q.out index 98a99df..631d158 100644 --- a/ql/src/test/results/clientpositive/union20.q.out +++ b/ql/src/test/results/clientpositive/union20.q.out @@ -132,14 +132,14 @@ STAGE PLANS: 0 {KEY.reducesinkkey0} {VALUE._col0} 1 {KEY.reducesinkkey0} {VALUE._col0} outputColumnNames: _col0, _col1, _col2, _col3 - Statistics: Num rows: 36 Data size: 9792 Basic stats: COMPLETE Column stats: PARTIAL + Statistics: Num rows: 6 Data size: 1632 Basic stats: COMPLETE Column stats: PARTIAL Select Operator expressions: _col0 (type: string), _col1 (type: string), _col2 (type: string), _col3 (type: string) outputColumnNames: _col0, _col1, _col2, _col3 - Statistics: Num rows: 36 Data size: 9792 Basic stats: COMPLETE Column stats: PARTIAL + Statistics: Num rows: 6 Data size: 1632 Basic stats: COMPLETE Column stats: PARTIAL File Output Operator compressed: false - Statistics: Num rows: 36 Data size: 9792 Basic stats: COMPLETE Column stats: PARTIAL + Statistics: Num rows: 6 Data size: 1632 Basic stats: COMPLETE Column stats: PARTIAL table: input format: org.apache.hadoop.mapred.TextInputFormat output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat diff --git a/ql/src/test/results/clientpositive/union22.q.out b/ql/src/test/results/clientpositive/union22.q.out index 9b5f88c..670aac6 100644 --- a/ql/src/test/results/clientpositive/union22.q.out +++ b/ql/src/test/results/clientpositive/union22.q.out @@ -330,10 +330,10 @@ STAGE PLANS: keys: 0 k1 (type: string) 1 _col1 (type: string) - outputColumnNames: _col0, _col1, _col10, _col11 + outputColumnNames: _col0, _col1, _col11, _col12 Position of Big Table: 0 Select Operator - expressions: _col0 (type: string), _col1 (type: string), _col10 (type: string), _col11 (type: string) + expressions: _col0 (type: string), _col1 (type: string), _col11 (type: string), _col12 (type: string) outputColumnNames: _col0, _col1, _col2, _col3 File Output Operator compressed: false @@ -787,10 +787,10 @@ STAGE PLANS: filter predicates: 0 {(VALUE._col3 = '1')} 1 - outputColumnNames: _col0, _col1, _col10, _col11 + outputColumnNames: _col0, _col1, _col11, _col12 Statistics: Num rows: 182 Data size: 4062 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: _col0 (type: string), _col1 (type: string), _col10 (type: string), _col11 (type: string) + expressions: _col0 (type: string), _col1 (type: string), _col11 (type: string), _col12 (type: string) outputColumnNames: _col0, _col1, _col2, _col3 Statistics: Num rows: 182 Data size: 4062 Basic stats: COMPLETE Column stats: NONE File Output Operator diff --git a/ql/src/test/results/clientpositive/union24.q.out b/ql/src/test/results/clientpositive/union24.q.out index 90378df..76c1adb 100644 --- a/ql/src/test/results/clientpositive/union24.q.out +++ b/ql/src/test/results/clientpositive/union24.q.out @@ -922,10 +922,10 @@ STAGE PLANS: condition expressions: 0 {KEY.reducesinkkey0} 1 {VALUE._col0} - outputColumnNames: _col0, _col5 + outputColumnNames: _col0, _col6 Statistics: Num rows: 56 Data size: 268 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: _col0 (type: string), _col5 (type: bigint) + expressions: _col0 (type: string), _col6 (type: bigint) outputColumnNames: _col0, _col1 Statistics: Num rows: 56 Data size: 268 Basic stats: COMPLETE Column stats: NONE File Output Operator diff --git a/ql/src/test/results/clientpositive/union26.q.out b/ql/src/test/results/clientpositive/union26.q.out index e184664..8edf61c 100644 --- a/ql/src/test/results/clientpositive/union26.q.out +++ b/ql/src/test/results/clientpositive/union26.q.out @@ -111,7 +111,7 @@ STAGE PLANS: outputColumnNames: key, value Statistics: Num rows: 29 Data size: 5812 Basic stats: COMPLETE Column stats: NONE Lateral View Join Operator - outputColumnNames: _col0, _col1, _col6 + outputColumnNames: _col0, _col1, _col7 Statistics: Num rows: 58 Data size: 11624 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: _col0 (type: string), _col1 (type: string) @@ -143,7 +143,7 @@ STAGE PLANS: Statistics: Num rows: 29 Data size: 5812 Basic stats: COMPLETE Column stats: NONE function name: explode Lateral View Join Operator - outputColumnNames: _col0, _col1, _col6 + outputColumnNames: _col0, _col1, _col7 Statistics: Num rows: 58 Data size: 11624 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: _col0 (type: string), _col1 (type: string) diff --git a/ql/src/test/results/clientpositive/union27.q.out b/ql/src/test/results/clientpositive/union27.q.out index 20b88b5..99cfa6f 100644 --- a/ql/src/test/results/clientpositive/union27.q.out +++ b/ql/src/test/results/clientpositive/union27.q.out @@ -84,10 +84,10 @@ STAGE PLANS: condition expressions: 0 1 {VALUE._col0} - outputColumnNames: _col5 + outputColumnNames: _col6 Statistics: Num rows: 275 Data size: 2921 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: '97' (type: string), _col5 (type: string) + expressions: '97' (type: string), _col6 (type: string) outputColumnNames: _col0, _col1 Statistics: Num rows: 275 Data size: 2921 Basic stats: COMPLETE Column stats: NONE File Output Operator diff --git a/ql/src/test/results/clientpositive/union32.q.out b/ql/src/test/results/clientpositive/union32.q.out index 35363c1..b353db9 100644 --- a/ql/src/test/results/clientpositive/union32.q.out +++ b/ql/src/test/results/clientpositive/union32.q.out @@ -501,10 +501,10 @@ STAGE PLANS: condition expressions: 0 {KEY.reducesinkkey0} 1 {KEY.reducesinkkey0} - outputColumnNames: _col0, _col4 + outputColumnNames: _col0, _col5 Statistics: Num rows: 5 Data size: 38 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: UDFToDouble(UDFToLong(_col0)) (type: double), UDFToString(UDFToDouble(_col4)) (type: string) + expressions: UDFToDouble(UDFToLong(_col0)) (type: double), UDFToString(UDFToDouble(_col5)) (type: string) outputColumnNames: _col0, _col1 Statistics: Num rows: 5 Data size: 38 Basic stats: COMPLETE Column stats: NONE File Output Operator @@ -658,10 +658,10 @@ STAGE PLANS: condition expressions: 0 {KEY.reducesinkkey0} 1 {KEY.reducesinkkey0} - outputColumnNames: _col0, _col4 + outputColumnNames: _col0, _col5 Statistics: Num rows: 5 Data size: 38 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: UDFToDouble(UDFToLong(_col0)) (type: double), UDFToDouble(_col4) (type: double) + expressions: UDFToDouble(UDFToLong(_col0)) (type: double), UDFToDouble(_col5) (type: double) outputColumnNames: _col0, _col1 Statistics: Num rows: 5 Data size: 38 Basic stats: COMPLETE Column stats: NONE File Output Operator diff --git a/ql/src/test/results/clientpositive/union_remove_12.q.out b/ql/src/test/results/clientpositive/union_remove_12.q.out index ada4883..033475f 100644 --- a/ql/src/test/results/clientpositive/union_remove_12.q.out +++ b/ql/src/test/results/clientpositive/union_remove_12.q.out @@ -174,10 +174,10 @@ STAGE PLANS: keys: 0 key (type: string) 1 key (type: string) - outputColumnNames: _col0, _col5 + outputColumnNames: _col0, _col6 Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE Select Operator - expressions: _col0 (type: string), UDFToLong(_col5) (type: bigint) + expressions: _col0 (type: string), UDFToLong(_col6) (type: bigint) outputColumnNames: _col0, _col1 Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE File Output Operator diff --git a/ql/src/test/results/clientpositive/union_remove_13.q.out b/ql/src/test/results/clientpositive/union_remove_13.q.out index a160843..3c7f8fa 100644 --- a/ql/src/test/results/clientpositive/union_remove_13.q.out +++ b/ql/src/test/results/clientpositive/union_remove_13.q.out @@ -197,10 +197,10 @@ STAGE PLANS: keys: 0 key (type: string) 1 key (type: string) - outputColumnNames: _col0, _col5 + outputColumnNames: _col0, _col6 Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE Select Operator - expressions: _col0 (type: string), UDFToLong(_col5) (type: bigint) + expressions: _col0 (type: string), UDFToLong(_col6) (type: bigint) outputColumnNames: _col0, _col1 Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE File Output Operator diff --git a/ql/src/test/results/clientpositive/union_remove_14.q.out b/ql/src/test/results/clientpositive/union_remove_14.q.out index cbb7ae8..315c958 100644 --- a/ql/src/test/results/clientpositive/union_remove_14.q.out +++ b/ql/src/test/results/clientpositive/union_remove_14.q.out @@ -176,10 +176,10 @@ STAGE PLANS: keys: 0 key (type: string) 1 key (type: string) - outputColumnNames: _col0, _col5 + outputColumnNames: _col0, _col6 Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE Select Operator - expressions: _col0 (type: string), UDFToLong(_col5) (type: bigint) + expressions: _col0 (type: string), UDFToLong(_col6) (type: bigint) outputColumnNames: _col0, _col1 Statistics: Num rows: 0 Data size: 0 Basic stats: NONE Column stats: NONE File Output Operator diff --git a/ql/src/test/results/clientpositive/union_top_level.q.out b/ql/src/test/results/clientpositive/union_top_level.q.out index 14225d1..10694b2 100644 --- a/ql/src/test/results/clientpositive/union_top_level.q.out +++ b/ql/src/test/results/clientpositive/union_top_level.q.out @@ -260,10 +260,10 @@ STAGE PLANS: condition expressions: 0 {KEY.reducesinkkey0} 1 {VALUE._col0} - outputColumnNames: _col0, _col5 + outputColumnNames: _col0, _col6 Statistics: Num rows: 31 Data size: 3196 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: _col0 (type: string), _col5 (type: string) + expressions: _col0 (type: string), _col6 (type: string) outputColumnNames: _col0, _col1 Statistics: Num rows: 31 Data size: 3196 Basic stats: COMPLETE Column stats: NONE Limit @@ -364,10 +364,10 @@ STAGE PLANS: condition expressions: 0 {KEY.reducesinkkey0} 1 {VALUE._col0} - outputColumnNames: _col0, _col5 + outputColumnNames: _col0, _col6 Statistics: Num rows: 31 Data size: 3196 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: _col0 (type: string), _col5 (type: string) + expressions: _col0 (type: string), _col6 (type: string) outputColumnNames: _col0, _col1 Statistics: Num rows: 31 Data size: 3196 Basic stats: COMPLETE Column stats: NONE Limit diff --git a/ql/src/test/results/clientpositive/vector_between_in.q.out b/ql/src/test/results/clientpositive/vector_between_in.q.out index bbd23d2..e7b64e2 100644 --- a/ql/src/test/results/clientpositive/vector_between_in.q.out +++ b/ql/src/test/results/clientpositive/vector_between_in.q.out @@ -640,34 +640,34 @@ POSTHOOK: Input: default@decimal_date_test -18.5162162162 -17.3216216216 -16.7243243243 --16.127027027 +-16.1270270270 -15.5297297297 -10.7513513514 -9.5567567568 -8.3621621622 --5.972972973 +-5.9729729730 -3.5837837838 4.1810810811 4.7783783784 4.7783783784 5.3756756757 -5.972972973 -5.972972973 +5.9729729730 +5.9729729730 11.3486486486 11.3486486486 11.9459459459 14.9324324324 19.1135135135 20.3081081081 -22.1 +22.1000000000 24.4891891892 33.4486486486 34.6432432432 40.0189189189 42.4081081081 43.0054054054 -44.2 -44.2 +44.2000000000 +44.2000000000 44.7972972973 45.9918918919 PREHOOK: query: SELECT COUNT(*) FROM decimal_date_test WHERE cdecimal1 NOT BETWEEN -2000 AND 4390.1351351351 diff --git a/ql/src/test/results/clientpositive/vector_data_types.q.out b/ql/src/test/results/clientpositive/vector_data_types.q.out index a1183ad..6b0598c 100644 --- a/ql/src/test/results/clientpositive/vector_data_types.q.out +++ b/ql/src/test/results/clientpositive/vector_data_types.q.out @@ -151,7 +151,7 @@ POSTHOOK: query: SELECT t, si, i, b, f, d, bo, s, ts, dec, bin FROM over1korc OR POSTHOOK: type: QUERY POSTHOOK: Input: default@over1korc #### A masked pattern was here #### -108 301 65536 4294967357 90.05 17.59 true ethan johnson 2013-03-01 09:11:58.703271 75.7 undecided +108 301 65536 4294967357 90.05 17.59 true ethan johnson 2013-03-01 09:11:58.703271 75.70 undecided 118 497 65536 4294967381 50.32 12.72 false david nixon 2013-03-01 09:11:58.703285 83.48 values clariffication 18 280 65536 4294967320 32.92 45.94 false holly white 2013-03-01 09:11:58.703086 58.86 topology 69 489 65536 4294967404 33.52 17.99 false oscar ichabod 2013-03-01 09:11:58.703247 32.68 topology @@ -226,7 +226,7 @@ POSTHOOK: query: SELECT t, si, i, b, f, d, bo, s, ts, dec, bin FROM over1korc OR POSTHOOK: type: QUERY POSTHOOK: Input: default@over1korc #### A masked pattern was here #### -108 301 65536 4294967357 90.05 17.59 true ethan johnson 1860-11-12 20:05:55.011470936 75.7 undecided +108 301 65536 4294967357 90.05 17.59 true ethan johnson 1860-11-12 20:05:55.011470936 75.70 undecided 118 497 65536 4294967381 50.32 12.72 false david nixon 1860-11-12 20:05:55.011484936 83.48 values clariffication 18 280 65536 4294967320 32.92 45.94 false holly white 1860-11-12 20:05:55.011285936 58.86 topology 69 489 65536 4294967404 33.52 17.99 false oscar ichabod 1860-11-12 20:05:55.011446936 32.68 topology diff --git a/ql/src/test/results/clientpositive/vector_decimal_aggregate.q.out b/ql/src/test/results/clientpositive/vector_decimal_aggregate.q.out index 2c4d552..874836d 100644 --- a/ql/src/test/results/clientpositive/vector_decimal_aggregate.q.out +++ b/ql/src/test/results/clientpositive/vector_decimal_aggregate.q.out @@ -101,11 +101,11 @@ POSTHOOK: query: SELECT cint, POSTHOOK: type: QUERY POSTHOOK: Input: default@decimal_vgby #### A masked pattern was here #### -NULL 3072 9318.4351351351 -4298.1513513514 5018444.1081079808 1633.60810810806667 5695.483082135364 5696.4103077145055 3072 11160.715384615385 -5147.907692307693 6010604.3076923073536 1956.576923076922966667 6821.495748565159 6822.606289190924 --3728 6 5831542.269248378 -3367.6517567568 5817556.0411483778 969592.67352472963333 2174330.2092403853 2381859.406131774 6 6984454.211097692 -4033.445769230769 6967702.8672438458471 1161283.811207307641183333 2604201.2704476737 2852759.5602156054 --563 2 -515.621072973 -3367.6517567568 -3883.2728297298 -1941.6364148649 1426.0153418918999 2016.6902366556308 2 -617.5607769230769 -4033.445769230769 -4651.0065461538459 -2325.50327307692295 1707.9424961538462 2415.395441814127 -762 2 5831542.269248378 1531.2194054054 5833073.4886537834 2916536.7443268917 2915005.5249214866 4122440.3477364695 2 6984454.211097692 1833.9456923076925 6986288.1567899996925 3493144.07839499984625 3491310.1327026924 4937458.140118758 -6981 3 5831542.269248378 -515.621072973 5830511.027102432 1943503.67570081066667 2749258.455012492 3367140.1929065133 3 6984454.211097692 -617.5607769230769 6983219.0895438458462 2327739.696514615282066667 3292794.4113115156 4032833.0678006653 -253665376 1024 9767.0054054054 -9779.5486486487 -347484.0818378374 -339.33992366976309 5708.9563478862 5711.745967572779 1024 11697.969230769231 -11712.99230769231 -416182.64030769233089 -406.428359675480791885 6837.632716002934 6840.973851172274 -528534767 1024 5831542.269248378 -9777.1594594595 11646372.8607481068 11373.41099682432305 257528.92988206653 257654.7686043977 1024 6984454.211097692 -11710.130769230771 13948892.79980307629003 13621.965624807691689482 308443.1074570801 308593.82484083984 -626923679 1024 9723.4027027027 -9778.9513513514 10541.0525297287 10.29399661106318 5742.09145323734 5744.897264034267 1024 11645.746153846154 -11712.276923076923 12625.04759999997746 12.329148046874977988 6877.318722794877 6880.679250101603 +NULL 3072 9318.4351351351 -4298.1513513514 5018444.1081079808 1633.60810810806667 5695.483082135364 5696.4103077145055 3072 11160.71538461538500 -5147.90769230769300 6010604.30769230735360 1956.576923076922966667 6821.495748565159 6822.606289190924 +-3728 6 5831542.2692483780 -3367.6517567568 5817556.0411483778 969592.67352472963333 2174330.2092403853 2381859.406131774 6 6984454.21109769200000 -4033.445769230769 6967702.86724384584710 1161283.811207307641183333 2604201.2704476737 2852759.5602156054 +-563 2 -515.6210729730 -3367.6517567568 -3883.2728297298 -1941.6364148649 1426.0153418918999 2016.6902366556308 2 -617.56077692307690 -4033.445769230769 -4651.00654615384590 -2325.50327307692295 1707.9424961538462 2415.395441814127 +762 2 5831542.2692483780 1531.2194054054 5833073.4886537834 2916536.7443268917 2915005.5249214866 4122440.3477364695 2 6984454.21109769200000 1833.9456923076925 6986288.15678999969250 3493144.07839499984625 3491310.1327026924 4937458.140118758 +6981 3 5831542.269248378 -515.6210729730 5830511.0271024320 1943503.67570081066667 2749258.455012492 3367140.1929065133 3 6984454.211097692 -617.56077692307690 6983219.08954384584620 2327739.696514615282066667 3292794.4113115156 4032833.0678006653 +253665376 1024 9767.0054054054 -9779.5486486487 -347484.0818378374 -339.33992366976309 5708.9563478862 5711.745967572779 1024 11697.96923076923100 -11712.99230769231000 -416182.64030769233089 -406.428359675480791885 6837.632716002934 6840.973851172274 +528534767 1024 5831542.2692483780 -9777.1594594595 11646372.8607481068 11373.41099682432305 257528.92988206653 257654.7686043977 1024 6984454.21109769200000 -11710.13076923077100 13948892.79980307629003 13621.965624807691689482 308443.1074570801 308593.82484083984 +626923679 1024 9723.4027027027 -9778.9513513514 10541.0525297287 10.29399661106318 5742.09145323734 5744.897264034267 1024 11645.74615384615400 -11712.27692307692300 12625.04759999997746 12.329148046874977988 6877.318722794877 6880.679250101603 diff --git a/ql/src/test/results/clientpositive/vector_decimal_cast.q.out b/ql/src/test/results/clientpositive/vector_decimal_cast.q.out index a508732..aa0696f 100644 --- a/ql/src/test/results/clientpositive/vector_decimal_cast.q.out +++ b/ql/src/test/results/clientpositive/vector_decimal_cast.q.out @@ -46,13 +46,13 @@ POSTHOOK: query: SELECT cdouble, cint, cboolean1, ctimestamp1, CAST(cdouble AS D POSTHOOK: type: QUERY POSTHOOK: Input: default@alltypesorc #### A masked pattern was here #### --13326.0 528534767 true 1969-12-31 15:59:46.674 -13326 528534767 1 -13 --15813.0 528534767 true 1969-12-31 15:59:55.787 -15813 528534767 1 -4 --9566.0 528534767 true 1969-12-31 15:59:44.187 -9566 528534767 1 -16 -15007.0 528534767 true 1969-12-31 15:59:50.434 15007 528534767 1 -10 -7021.0 528534767 true 1969-12-31 16:00:15.007 7021 528534767 1 15 -4963.0 528534767 true 1969-12-31 16:00:07.021 4963 528534767 1 7 --7824.0 528534767 true 1969-12-31 16:00:04.963 -7824 528534767 1 5 --15431.0 528534767 true 1969-12-31 15:59:52.176 -15431 528534767 1 -8 --15549.0 528534767 true 1969-12-31 15:59:44.569 -15549 528534767 1 -15 -5780.0 528534767 true 1969-12-31 15:59:44.451 5780 528534767 1 -16 +-13326.0 528534767 true 1969-12-31 15:59:46.674 -13326.0000000000 528534767.00000000000000 1.00 -13 +-15813.0 528534767 true 1969-12-31 15:59:55.787 -15813.0000000000 528534767.00000000000000 1.00 -4 +-9566.0 528534767 true 1969-12-31 15:59:44.187 -9566.0000000000 528534767.00000000000000 1.00 -16 +15007.0 528534767 true 1969-12-31 15:59:50.434 15007.0000000000 528534767.00000000000000 1.00 -10 +7021.0 528534767 true 1969-12-31 16:00:15.007 7021.0000000000 528534767.00000000000000 1.00 15 +4963.0 528534767 true 1969-12-31 16:00:07.021 4963.0000000000 528534767.00000000000000 1.00 7 +-7824.0 528534767 true 1969-12-31 16:00:04.963 -7824.0000000000 528534767.00000000000000 1.00 5 +-15431.0 528534767 true 1969-12-31 15:59:52.176 -15431.0000000000 528534767.00000000000000 1.00 -8 +-15549.0 528534767 true 1969-12-31 15:59:44.569 -15549.0000000000 528534767.00000000000000 1.00 -15 +5780.0 528534767 true 1969-12-31 15:59:44.451 5780.0000000000 528534767.00000000000000 1.00 -16 diff --git a/ql/src/test/results/clientpositive/vector_decimal_expressions.q.out b/ql/src/test/results/clientpositive/vector_decimal_expressions.q.out index 094eb8e..def9f8d 100644 --- a/ql/src/test/results/clientpositive/vector_decimal_expressions.q.out +++ b/ql/src/test/results/clientpositive/vector_decimal_expressions.q.out @@ -53,13 +53,13 @@ POSTHOOK: query: SELECT cdecimal1 + cdecimal2, cdecimal1 - (2*cdecimal2), ((cdec POSTHOOK: type: QUERY POSTHOOK: Input: default@decimal_test #### A masked pattern was here #### -19699.417463617423 -12507.913305613346 0.8351496686995997 2.8303425077026896E7 3.6405405405 8963 10735 -17 8963 true 10735.776923076923 8963.641 10735.776923076923 1969-12-31 18:29:23.64054054 -9216.339708939685 -5851.80644490647 0.8353975893550668 6195112.1797296945 3.6243243243 4193 5022 -98 4193 true 5022.715384615385 4193.6245 5022.715384615385 1969-12-31 17:09:53.624324324 -6514.8403326403464 -4136.5212058211928 0.8355907765708067 3095563.9418919063 4.3864864865 2964 3550 -34 2964 true 3550.4538461538464 2964.3865 3550.4538461538464 1969-12-31 16:49:24.386486486 -7587.301455301477 -4817.467775467754 0.8354976172734904 4198623.24324327 2.3783783784 3452 4134 38 3452 true 4134.923076923077 3452.3784 4134.923076923077 1969-12-31 16:57:32.378378378 -19197.972972973 -12189.527027027 0.835155361813429 2.6880848817567654E7 5.472972973 8735 10462 -34 8735 true 10462.5 8735.473 10462.5 1969-12-31 18:25:35.472972973 -17098.9945945946 -10856.8054054054 0.8351828165813104 2.132423090270272E7 0.3945945946 7780 9318 102 7780 true 9318.6 7780.3945 9318.6 1969-12-31 18:09:40.394594594 -12433.723076923077 -7894.646153846154 0.8352770361086894 1.12754688E7 7.6 5657 6776 120 5657 true 6776.123076923077 5657.6 6776.123076923077 1969-12-31 17:34:17.6 -7247.316839916862 -4601.598544698524 0.8355241651897876 3830775.6932432684 7.6783783784 3297 3949 109 3297 true 3949.638461538462 3297.6785 3949.638461538462 1969-12-31 16:54:57.678378378 -14757.1700623700465 -9369.891476091493 0.8352226654922171 1.5883214124324286E7 4.8162162162 6714 8042 106 6714 true 8042.3538461538465 6714.8164 8042.3538461538465 1969-12-31 17:51:54.816216216 -10964.832016631993 -6961.991060291086 0.8353232978714221 8768719.779729689 9.2243243243 4989 5975 87 4989 true 5975.607692307693 4989.224 5975.607692307693 1969-12-31 17:23:09.224324324 +19699.41746361742300 -12507.91330561334600 0.8351496686995997 2.8303425077026896E7 3.6405405405 8963 10735 -17 8963 true 10735.776923076923 8963.641 10735.776923076923 1969-12-31 18:29:23.64054054 +9216.33970893968500 -5851.80644490647000 0.8353975893550668 6195112.1797296945 3.6243243243 4193 5022 -98 4193 true 5022.715384615385 4193.6245 5022.715384615385 1969-12-31 17:09:53.624324324 +6514.84033264034640 -4136.52120582119280 0.8355907765708067 3095563.9418919063 4.3864864865 2964 3550 -34 2964 true 3550.4538461538464 2964.3865 3550.4538461538464 1969-12-31 16:49:24.386486486 +7587.30145530147700 -4817.46777546775400 0.8354976172734904 4198623.24324327 2.3783783784 3452 4134 38 3452 true 4134.923076923077 3452.3784 4134.923076923077 1969-12-31 16:57:32.378378378 +19197.97297297300000 -12189.52702702700000 0.835155361813429 2.6880848817567654E7 5.4729729730 8735 10462 -34 8735 true 10462.5 8735.473 10462.5 1969-12-31 18:25:35.472972973 +17098.99459459460000 -10856.80540540540000 0.8351828165813104 2.132423090270272E7 0.3945945946 7780 9318 102 7780 true 9318.6 7780.3945 9318.6 1969-12-31 18:09:40.394594594 +12433.72307692307700 -7894.64615384615400 0.8352770361086894 1.12754688E7 7.6000000000 5657 6776 120 5657 true 6776.123076923077 5657.6 6776.123076923077 1969-12-31 17:34:17.6 +7247.31683991686200 -4601.59854469852400 0.8355241651897876 3830775.6932432684 7.6783783784 3297 3949 109 3297 true 3949.638461538462 3297.6785 3949.638461538462 1969-12-31 16:54:57.678378378 +14757.17006237004650 -9369.89147609149300 0.8352226654922171 1.5883214124324286E7 4.8162162162 6714 8042 106 6714 true 8042.3538461538465 6714.8164 8042.3538461538465 1969-12-31 17:51:54.816216216 +10964.83201663199300 -6961.99106029108600 0.8353232978714221 8768719.779729689 9.2243243243 4989 5975 87 4989 true 5975.607692307693 4989.224 5975.607692307693 1969-12-31 17:23:09.224324324 diff --git a/ql/src/test/results/clientpositive/vector_decimal_mapjoin.q.out b/ql/src/test/results/clientpositive/vector_decimal_mapjoin.q.out index 3327c90..4a90849 100644 --- a/ql/src/test/results/clientpositive/vector_decimal_mapjoin.q.out +++ b/ql/src/test/results/clientpositive/vector_decimal_mapjoin.q.out @@ -69,10 +69,10 @@ STAGE PLANS: keys: 0 6981 (type: int) 1 6981 (type: int) - outputColumnNames: _col1, _col8 + outputColumnNames: _col1, _col9 Statistics: Num rows: 3379 Data size: 595391 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: 6981 (type: int), 6981 (type: int), _col1 (type: decimal(20,10)), _col8 (type: decimal(23,14)) + expressions: 6981 (type: int), 6981 (type: int), _col1 (type: decimal(20,10)), _col9 (type: decimal(23,14)) outputColumnNames: _col0, _col1, _col2, _col3 Statistics: Num rows: 3379 Data size: 595391 Basic stats: COMPLETE Column stats: NONE File Output Operator @@ -114,8 +114,8 @@ POSTHOOK: Input: default@decimal_mapjoin 6981 6981 NULL NULL 6981 6981 NULL NULL 6981 6981 NULL NULL -6981 6981 -515.621072973 NULL -6981 6981 -515.621072973 NULL +6981 6981 -515.6210729730 NULL +6981 6981 -515.6210729730 NULL 6981 6981 NULL NULL 6981 6981 NULL NULL 6981 6981 NULL NULL @@ -124,8 +124,8 @@ POSTHOOK: Input: default@decimal_mapjoin 6981 6981 NULL NULL 6981 6981 NULL NULL 6981 6981 NULL NULL -6981 6981 -515.621072973 NULL -6981 6981 -515.621072973 NULL +6981 6981 -515.6210729730 NULL +6981 6981 -515.6210729730 NULL 6981 6981 NULL NULL 6981 6981 NULL NULL 6981 6981 NULL NULL @@ -134,8 +134,8 @@ POSTHOOK: Input: default@decimal_mapjoin 6981 6981 NULL NULL 6981 6981 NULL NULL 6981 6981 NULL NULL -6981 6981 -515.621072973 NULL -6981 6981 -515.621072973 NULL +6981 6981 -515.6210729730 NULL +6981 6981 -515.6210729730 NULL 6981 6981 NULL NULL 6981 6981 NULL NULL 6981 6981 NULL NULL @@ -144,8 +144,8 @@ POSTHOOK: Input: default@decimal_mapjoin 6981 6981 NULL NULL 6981 6981 NULL NULL 6981 6981 NULL NULL -6981 6981 -515.621072973 NULL -6981 6981 -515.621072973 NULL +6981 6981 -515.6210729730 NULL +6981 6981 -515.6210729730 NULL 6981 6981 NULL 6984454.211097692 6981 6981 NULL 6984454.211097692 6981 6981 NULL 6984454.211097692 @@ -154,8 +154,8 @@ POSTHOOK: Input: default@decimal_mapjoin 6981 6981 NULL 6984454.211097692 6981 6981 NULL 6984454.211097692 6981 6981 NULL 6984454.211097692 -6981 6981 -515.621072973 6984454.211097692 -6981 6981 -515.621072973 6984454.211097692 +6981 6981 -515.6210729730 6984454.211097692 +6981 6981 -515.6210729730 6984454.211097692 6981 6981 NULL NULL 6981 6981 NULL NULL 6981 6981 NULL NULL @@ -164,8 +164,8 @@ POSTHOOK: Input: default@decimal_mapjoin 6981 6981 NULL NULL 6981 6981 NULL NULL 6981 6981 NULL NULL -6981 6981 -515.621072973 NULL -6981 6981 -515.621072973 NULL +6981 6981 -515.6210729730 NULL +6981 6981 -515.6210729730 NULL 6981 6981 NULL NULL 6981 6981 NULL NULL 6981 6981 NULL NULL @@ -174,8 +174,8 @@ POSTHOOK: Input: default@decimal_mapjoin 6981 6981 NULL NULL 6981 6981 NULL NULL 6981 6981 NULL NULL -6981 6981 -515.621072973 NULL -6981 6981 -515.621072973 NULL +6981 6981 -515.6210729730 NULL +6981 6981 -515.6210729730 NULL 6981 6981 NULL NULL 6981 6981 NULL NULL 6981 6981 NULL NULL @@ -184,8 +184,8 @@ POSTHOOK: Input: default@decimal_mapjoin 6981 6981 NULL NULL 6981 6981 NULL NULL 6981 6981 NULL NULL -6981 6981 -515.621072973 NULL -6981 6981 -515.621072973 NULL +6981 6981 -515.6210729730 NULL +6981 6981 -515.6210729730 NULL 6981 6981 NULL -617.5607769230769 6981 6981 NULL -617.5607769230769 6981 6981 NULL -617.5607769230769 @@ -194,8 +194,8 @@ POSTHOOK: Input: default@decimal_mapjoin 6981 6981 NULL -617.5607769230769 6981 6981 NULL -617.5607769230769 6981 6981 NULL -617.5607769230769 -6981 6981 -515.621072973 -617.5607769230769 -6981 6981 -515.621072973 -617.5607769230769 +6981 6981 -515.6210729730 -617.5607769230769 +6981 6981 -515.6210729730 -617.5607769230769 6981 6981 NULL -617.5607769230769 6981 6981 NULL -617.5607769230769 6981 6981 NULL -617.5607769230769 @@ -204,5 +204,5 @@ POSTHOOK: Input: default@decimal_mapjoin 6981 6981 NULL -617.5607769230769 6981 6981 NULL -617.5607769230769 6981 6981 NULL -617.5607769230769 -6981 6981 -515.621072973 -617.5607769230769 -6981 6981 -515.621072973 -617.5607769230769 +6981 6981 -515.6210729730 -617.5607769230769 +6981 6981 -515.6210729730 -617.5607769230769 diff --git a/ql/src/test/results/clientpositive/vector_decimal_math_funcs.q.out b/ql/src/test/results/clientpositive/vector_decimal_math_funcs.q.out index d60d855..2e98ceb 100644 --- a/ql/src/test/results/clientpositive/vector_decimal_math_funcs.q.out +++ b/ql/src/test/results/clientpositive/vector_decimal_math_funcs.q.out @@ -192,14 +192,14 @@ and sin(cdecimal1) >= -1.0 POSTHOOK: type: QUERY POSTHOOK: Input: default@decimal_test #### A masked pattern was here #### --119.4594594595 -119.46 -119 -120 -119 1.316485E-52 NULL NULL NULL NULL NULL NULL NULL NULL 119.4594594595 -0.07885666683797002 NaN 0.9968859644388647 NaN -1.5624254815943668 -6844.522849943508 -2.0849608902209606 -119.4594594595 119.4594594595 -1 NULL -9318.4351351351 9318.44 9318 9318 9319 Infinity 9.13974998962673 3.969342986470191 13.185871984999437 NULL 13.185871984999437 173.867220004793 173.867220004793 96.53204201266593 9318.4351351351 0.4540668481851705 NaN 0.8909676185918236 NaN 1.5706890126394983 533907.0049096602 162.63737424163023 9318.4351351351 -9318.4351351351 1 -0.9607267417229353 -9318.4351351351 9318.44 9318 9318 9319 Infinity 9.13974998962673 3.969342986470191 13.185871984999437 NULL 13.185871984999437 173.867220004793 173.867220004793 96.53204201266593 9318.4351351351 0.4540668481851705 NaN 0.8909676185918236 NaN 1.5706890126394983 533907.0049096602 162.63737424163023 9318.4351351351 -9318.4351351351 1 -0.9607267417229353 -9318.4351351351 9318.44 9318 9318 9319 Infinity 9.13974998962673 3.969342986470191 13.185871984999437 NULL 13.185871984999437 173.867220004793 173.867220004793 96.53204201266593 9318.4351351351 0.4540668481851705 NaN 0.8909676185918236 NaN 1.5706890126394983 533907.0049096602 162.63737424163023 9318.4351351351 -9318.4351351351 1 -0.9607267417229353 --4298.1513513514 -4298.15 -4298 -4299 -4298 0.0 NULL NULL NULL NULL NULL NULL NULL NULL 4298.1513513514 -0.43730633941118113 NaN 0.899312607223313 NaN -1.5705636686355597 -246265.93214088667 -75.01689283012556 -4298.1513513514 4298.1513513514 -1 NULL --4298.1513513514 -4298.15 -4298 -4299 -4298 0.0 NULL NULL NULL NULL NULL NULL NULL NULL 4298.1513513514 -0.43730633941118113 NaN 0.899312607223313 NaN -1.5705636686355597 -246265.93214088667 -75.01689283012556 -4298.1513513514 4298.1513513514 -1 NULL --4298.1513513514 -4298.15 -4298 -4299 -4298 0.0 NULL NULL NULL NULL NULL NULL NULL NULL 4298.1513513514 -0.43730633941118113 NaN 0.899312607223313 NaN -1.5705636686355597 -246265.93214088667 -75.01689283012556 -4298.1513513514 4298.1513513514 -1 NULL --4298.1513513514 -4298.15 -4298 -4299 -4298 0.0 NULL NULL NULL NULL NULL NULL NULL NULL 4298.1513513514 -0.43730633941118113 NaN 0.899312607223313 NaN -1.5705636686355597 -246265.93214088667 -75.01689283012556 -4298.1513513514 4298.1513513514 -1 NULL --4298.1513513514 -4298.15 -4298 -4299 -4298 0.0 NULL NULL NULL NULL NULL NULL NULL NULL 4298.1513513514 -0.43730633941118113 NaN 0.899312607223313 NaN -1.5705636686355597 -246265.93214088667 -75.01689283012556 -4298.1513513514 4298.1513513514 -1 NULL --4298.1513513514 -4298.15 -4298 -4299 -4298 0.0 NULL NULL NULL NULL NULL NULL NULL NULL 4298.1513513514 -0.43730633941118113 NaN 0.899312607223313 NaN -1.5705636686355597 -246265.93214088667 -75.01689283012556 -4298.1513513514 4298.1513513514 -1 NULL --4298.1513513514 -4298.15 -4298 -4299 -4298 0.0 NULL NULL NULL NULL NULL NULL NULL NULL 4298.1513513514 -0.43730633941118113 NaN 0.899312607223313 NaN -1.5705636686355597 -246265.93214088667 -75.01689283012556 -4298.1513513514 4298.1513513514 -1 NULL +-119.4594594595 -119.46 -119 -120 -119 1.316485E-52 NULL NULL NULL NULL NULL NULL NULL NULL 119.459459459500000000 -0.07885666683797002 NaN 0.9968859644388647 NaN -1.5624254815943668 -6844.522849943508 -2.0849608902209606 -119.4594594595 119.4594594595 -1 NULL +9318.4351351351 9318.44 9318 9318 9319 Infinity 9.13974998962673 3.969342986470191 13.185871984999437 NULL 13.185871984999437 173.867220004793 173.867220004793 96.53204201266593 9318.435135135100000000 0.4540668481851705 NaN 0.8909676185918236 NaN 1.5706890126394983 533907.0049096602 162.63737424163023 9318.4351351351 -9318.4351351351 1 -0.9607267417229353 +9318.4351351351 9318.44 9318 9318 9319 Infinity 9.13974998962673 3.969342986470191 13.185871984999437 NULL 13.185871984999437 173.867220004793 173.867220004793 96.53204201266593 9318.435135135100000000 0.4540668481851705 NaN 0.8909676185918236 NaN 1.5706890126394983 533907.0049096602 162.63737424163023 9318.4351351351 -9318.4351351351 1 -0.9607267417229353 +9318.4351351351 9318.44 9318 9318 9319 Infinity 9.13974998962673 3.969342986470191 13.185871984999437 NULL 13.185871984999437 173.867220004793 173.867220004793 96.53204201266593 9318.435135135100000000 0.4540668481851705 NaN 0.8909676185918236 NaN 1.5706890126394983 533907.0049096602 162.63737424163023 9318.4351351351 -9318.4351351351 1 -0.9607267417229353 +-4298.1513513514 -4298.15 -4298 -4299 -4298 0.0 NULL NULL NULL NULL NULL NULL NULL NULL 4298.151351351400000000 -0.43730633941118113 NaN 0.899312607223313 NaN -1.5705636686355597 -246265.93214088667 -75.01689283012556 -4298.1513513514 4298.1513513514 -1 NULL +-4298.1513513514 -4298.15 -4298 -4299 -4298 0.0 NULL NULL NULL NULL NULL NULL NULL NULL 4298.151351351400000000 -0.43730633941118113 NaN 0.899312607223313 NaN -1.5705636686355597 -246265.93214088667 -75.01689283012556 -4298.1513513514 4298.1513513514 -1 NULL +-4298.1513513514 -4298.15 -4298 -4299 -4298 0.0 NULL NULL NULL NULL NULL NULL NULL NULL 4298.151351351400000000 -0.43730633941118113 NaN 0.899312607223313 NaN -1.5705636686355597 -246265.93214088667 -75.01689283012556 -4298.1513513514 4298.1513513514 -1 NULL +-4298.1513513514 -4298.15 -4298 -4299 -4298 0.0 NULL NULL NULL NULL NULL NULL NULL NULL 4298.151351351400000000 -0.43730633941118113 NaN 0.899312607223313 NaN -1.5705636686355597 -246265.93214088667 -75.01689283012556 -4298.1513513514 4298.1513513514 -1 NULL +-4298.1513513514 -4298.15 -4298 -4299 -4298 0.0 NULL NULL NULL NULL NULL NULL NULL NULL 4298.151351351400000000 -0.43730633941118113 NaN 0.899312607223313 NaN -1.5705636686355597 -246265.93214088667 -75.01689283012556 -4298.1513513514 4298.1513513514 -1 NULL +-4298.1513513514 -4298.15 -4298 -4299 -4298 0.0 NULL NULL NULL NULL NULL NULL NULL NULL 4298.151351351400000000 -0.43730633941118113 NaN 0.899312607223313 NaN -1.5705636686355597 -246265.93214088667 -75.01689283012556 -4298.1513513514 4298.1513513514 -1 NULL +-4298.1513513514 -4298.15 -4298 -4299 -4298 0.0 NULL NULL NULL NULL NULL NULL NULL NULL 4298.151351351400000000 -0.43730633941118113 NaN 0.899312607223313 NaN -1.5705636686355597 -246265.93214088667 -75.01689283012556 -4298.1513513514 4298.1513513514 -1 NULL diff --git a/ql/src/test/results/clientpositive/vectorized_bucketmapjoin1.q.out b/ql/src/test/results/clientpositive/vectorized_bucketmapjoin1.q.out index ea711e9..66cf878 100644 --- a/ql/src/test/results/clientpositive/vectorized_bucketmapjoin1.q.out +++ b/ql/src/test/results/clientpositive/vectorized_bucketmapjoin1.q.out @@ -123,9 +123,9 @@ STAGE PLANS: keys: 0 key (type: int) 1 key (type: int) - outputColumnNames: _col0, _col1, _col4, _col5 + outputColumnNames: _col0, _col1, _col5, _col6 Select Operator - expressions: _col0 (type: int), _col1 (type: string), _col4 (type: int), _col5 (type: string) + expressions: _col0 (type: int), _col1 (type: string), _col5 (type: int), _col6 (type: string) outputColumnNames: _col0, _col1, _col2, _col3 File Output Operator compressed: false @@ -184,9 +184,9 @@ STAGE PLANS: keys: 0 key (type: int) 1 key (type: int) - outputColumnNames: _col0, _col1, _col4, _col5 + outputColumnNames: _col0, _col1, _col5, _col6 Select Operator - expressions: _col0 (type: int), _col1 (type: string), _col4 (type: int), _col5 (type: string) + expressions: _col0 (type: int), _col1 (type: string), _col5 (type: int), _col6 (type: string) outputColumnNames: _col0, _col1, _col2, _col3 File Output Operator compressed: false @@ -255,9 +255,9 @@ STAGE PLANS: keys: 0 key (type: int) 1 key (type: int) - outputColumnNames: _col0, _col1, _col4, _col5 + outputColumnNames: _col0, _col1, _col5, _col6 Select Operator - expressions: _col0 (type: int), _col1 (type: string), _col4 (type: int), _col5 (type: string) + expressions: _col0 (type: int), _col1 (type: string), _col5 (type: int), _col6 (type: string) outputColumnNames: _col0, _col1, _col2, _col3 File Output Operator compressed: false diff --git a/ql/src/test/results/clientpositive/vectorized_context.q.out b/ql/src/test/results/clientpositive/vectorized_context.q.out index dd54a5e..fe24fd7 100644 --- a/ql/src/test/results/clientpositive/vectorized_context.q.out +++ b/ql/src/test/results/clientpositive/vectorized_context.q.out @@ -121,7 +121,7 @@ STAGE PLANS: Statistics: Num rows: 3038 Data size: 12152 Basic stats: COMPLETE Column stats: NONE HashTable Sink Operator condition expressions: - 0 {_col2} {_col6} + 0 {_col2} {_col7} 1 keys: 0 _col1 (type: int) @@ -159,21 +159,21 @@ STAGE PLANS: keys: 0 ss_store_sk (type: int) 1 s_store_sk (type: int) - outputColumnNames: _col1, _col2, _col6 + outputColumnNames: _col1, _col2, _col7 Statistics: Num rows: 3341 Data size: 338652 Basic stats: COMPLETE Column stats: NONE Map Join Operator condition map: Inner Join 0 to 1 condition expressions: - 0 {_col2} {_col6} + 0 {_col2} {_col7} 1 keys: 0 _col1 (type: int) 1 hd_demo_sk (type: int) - outputColumnNames: _col2, _col6 + outputColumnNames: _col2, _col7 Statistics: Num rows: 3675 Data size: 372517 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: _col6 (type: string), _col2 (type: double) + expressions: _col7 (type: string), _col2 (type: double) outputColumnNames: _col0, _col1 Statistics: Num rows: 3675 Data size: 372517 Basic stats: COMPLETE Column stats: NONE Limit diff --git a/ql/src/test/results/clientpositive/vectorized_mapjoin.q.out b/ql/src/test/results/clientpositive/vectorized_mapjoin.q.out index 6a085f6..8173eeb 100644 --- a/ql/src/test/results/clientpositive/vectorized_mapjoin.q.out +++ b/ql/src/test/results/clientpositive/vectorized_mapjoin.q.out @@ -52,14 +52,14 @@ STAGE PLANS: keys: 0 cint (type: int) 1 cint (type: int) - outputColumnNames: _col2, _col16 + outputColumnNames: _col2, _col17 Statistics: Num rows: 51870 Data size: 207482 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: _col2 (type: int), _col16 (type: int) - outputColumnNames: _col2, _col16 + expressions: _col2 (type: int), _col17 (type: int) + outputColumnNames: _col2, _col17 Statistics: Num rows: 51870 Data size: 207482 Basic stats: COMPLETE Column stats: NONE Group By Operator - aggregations: count(_col2), max(_col16), min(_col2), avg((_col2 + _col16)) + aggregations: count(_col2), max(_col17), min(_col2), avg((_col2 + _col17)) mode: hash outputColumnNames: _col0, _col1, _col2, _col3 Statistics: Num rows: 1 Data size: 16 Basic stats: COMPLETE Column stats: NONE diff --git a/ql/src/test/results/clientpositive/vectorized_nested_mapjoin.q.out b/ql/src/test/results/clientpositive/vectorized_nested_mapjoin.q.out index ac2dfb5..d6b5f36 100644 --- a/ql/src/test/results/clientpositive/vectorized_nested_mapjoin.q.out +++ b/ql/src/test/results/clientpositive/vectorized_nested_mapjoin.q.out @@ -65,10 +65,10 @@ STAGE PLANS: keys: 0 ctinyint (type: tinyint) 1 ctinyint (type: tinyint) - outputColumnNames: _col0, _col1, _col5, _col14 + outputColumnNames: _col0, _col1, _col5, _col15 Statistics: Num rows: 51870 Data size: 207482 Basic stats: COMPLETE Column stats: NONE Filter Operator - predicate: (_col0 = _col14) (type: boolean) + predicate: (_col0 = _col15) (type: boolean) Statistics: Num rows: 25935 Data size: 103741 Basic stats: COMPLETE Column stats: NONE Select Operator expressions: _col1 (type: smallint), _col5 (type: double) diff --git a/ql/src/test/results/clientpositive/vectorized_ptf.q.out b/ql/src/test/results/clientpositive/vectorized_ptf.q.out index d847337..202ec40 100644 --- a/ql/src/test/results/clientpositive/vectorized_ptf.q.out +++ b/ql/src/test/results/clientpositive/vectorized_ptf.q.out @@ -235,7 +235,7 @@ STAGE PLANS: Map-reduce partition columns: p_mfgr (type: string) Statistics: Num rows: 26 Data size: 16042 Basic stats: COMPLETE Column stats: NONE tag: -1 - value expressions: p_partkey (type: int), p_name (type: string), p_mfgr (type: string), p_brand (type: string), p_type (type: string), p_size (type: int), p_container (type: string), p_retailprice (type: double), p_comment (type: string), BLOCK__OFFSET__INSIDE__FILE (type: bigint), INPUT__FILE__NAME (type: string) + value expressions: p_partkey (type: int), p_name (type: string), p_mfgr (type: string), p_brand (type: string), p_type (type: string), p_size (type: int), p_container (type: string), p_retailprice (type: double), p_comment (type: string), BLOCK__OFFSET__INSIDE__FILE (type: bigint), INPUT__FILE__NAME (type: string), ROW__ID (type: struct) auto parallelism: false Path -> Alias: #### A masked pattern was here #### @@ -918,7 +918,7 @@ STAGE PLANS: Map-reduce partition columns: p_mfgr (type: string) Statistics: Num rows: 26 Data size: 16042 Basic stats: COMPLETE Column stats: NONE tag: -1 - value expressions: p_partkey (type: int), p_name (type: string), p_mfgr (type: string), p_brand (type: string), p_type (type: string), p_size (type: int), p_container (type: string), p_retailprice (type: double), p_comment (type: string), BLOCK__OFFSET__INSIDE__FILE (type: bigint), INPUT__FILE__NAME (type: string) + value expressions: p_partkey (type: int), p_name (type: string), p_mfgr (type: string), p_brand (type: string), p_type (type: string), p_size (type: int), p_container (type: string), p_retailprice (type: double), p_comment (type: string), BLOCK__OFFSET__INSIDE__FILE (type: bigint), INPUT__FILE__NAME (type: string), ROW__ID (type: struct) auto parallelism: false Path -> Alias: #### A masked pattern was here #### @@ -1169,7 +1169,7 @@ STAGE PLANS: Map-reduce partition columns: p_mfgr (type: string) Statistics: Num rows: 26 Data size: 16042 Basic stats: COMPLETE Column stats: NONE tag: -1 - value expressions: p_partkey (type: int), p_name (type: string), p_mfgr (type: string), p_brand (type: string), p_type (type: string), p_size (type: int), p_container (type: string), p_retailprice (type: double), p_comment (type: string), BLOCK__OFFSET__INSIDE__FILE (type: bigint), INPUT__FILE__NAME (type: string) + value expressions: p_partkey (type: int), p_name (type: string), p_mfgr (type: string), p_brand (type: string), p_type (type: string), p_size (type: int), p_container (type: string), p_retailprice (type: double), p_comment (type: string), BLOCK__OFFSET__INSIDE__FILE (type: bigint), INPUT__FILE__NAME (type: string), ROW__ID (type: struct) auto parallelism: false Path -> Alias: #### A masked pattern was here #### @@ -1498,7 +1498,7 @@ STAGE PLANS: Map-reduce partition columns: p_mfgr (type: string) Statistics: Num rows: 26 Data size: 16042 Basic stats: COMPLETE Column stats: NONE tag: -1 - value expressions: p_partkey (type: int), p_name (type: string), p_mfgr (type: string), p_brand (type: string), p_type (type: string), p_size (type: int), p_container (type: string), p_retailprice (type: double), p_comment (type: string), BLOCK__OFFSET__INSIDE__FILE (type: bigint), INPUT__FILE__NAME (type: string) + value expressions: p_partkey (type: int), p_name (type: string), p_mfgr (type: string), p_brand (type: string), p_type (type: string), p_size (type: int), p_container (type: string), p_retailprice (type: double), p_comment (type: string), BLOCK__OFFSET__INSIDE__FILE (type: bigint), INPUT__FILE__NAME (type: string), ROW__ID (type: struct) auto parallelism: false Path -> Alias: #### A masked pattern was here #### @@ -1837,7 +1837,7 @@ STAGE PLANS: Map-reduce partition columns: p_mfgr (type: string) Statistics: Num rows: 26 Data size: 16042 Basic stats: COMPLETE Column stats: NONE tag: -1 - value expressions: p_partkey (type: int), p_name (type: string), p_mfgr (type: string), p_brand (type: string), p_type (type: string), p_size (type: int), p_container (type: string), p_retailprice (type: double), p_comment (type: string), BLOCK__OFFSET__INSIDE__FILE (type: bigint), INPUT__FILE__NAME (type: string) + value expressions: p_partkey (type: int), p_name (type: string), p_mfgr (type: string), p_brand (type: string), p_type (type: string), p_size (type: int), p_container (type: string), p_retailprice (type: double), p_comment (type: string), BLOCK__OFFSET__INSIDE__FILE (type: bigint), INPUT__FILE__NAME (type: string), ROW__ID (type: struct) auto parallelism: false Path -> Alias: #### A masked pattern was here #### @@ -2188,7 +2188,7 @@ STAGE PLANS: Map-reduce partition columns: p_mfgr (type: string) Statistics: Num rows: 26 Data size: 16042 Basic stats: COMPLETE Column stats: NONE tag: -1 - value expressions: p_partkey (type: int), p_name (type: string), p_mfgr (type: string), p_brand (type: string), p_type (type: string), p_size (type: int), p_container (type: string), p_retailprice (type: double), p_comment (type: string), BLOCK__OFFSET__INSIDE__FILE (type: bigint), INPUT__FILE__NAME (type: string) + value expressions: p_partkey (type: int), p_name (type: string), p_mfgr (type: string), p_brand (type: string), p_type (type: string), p_size (type: int), p_container (type: string), p_retailprice (type: double), p_comment (type: string), BLOCK__OFFSET__INSIDE__FILE (type: bigint), INPUT__FILE__NAME (type: string), ROW__ID (type: struct) auto parallelism: false Path -> Alias: #### A masked pattern was here #### @@ -2535,7 +2535,7 @@ STAGE PLANS: Map-reduce partition columns: p_mfgr (type: string) Statistics: Num rows: 26 Data size: 16042 Basic stats: COMPLETE Column stats: NONE tag: -1 - value expressions: p_partkey (type: int), p_name (type: string), p_mfgr (type: string), p_brand (type: string), p_type (type: string), p_size (type: int), p_container (type: string), p_retailprice (type: double), p_comment (type: string), BLOCK__OFFSET__INSIDE__FILE (type: bigint), INPUT__FILE__NAME (type: string) + value expressions: p_partkey (type: int), p_name (type: string), p_mfgr (type: string), p_brand (type: string), p_type (type: string), p_size (type: int), p_container (type: string), p_retailprice (type: double), p_comment (type: string), BLOCK__OFFSET__INSIDE__FILE (type: bigint), INPUT__FILE__NAME (type: string), ROW__ID (type: struct) auto parallelism: false Path -> Alias: #### A masked pattern was here #### @@ -2724,10 +2724,10 @@ STAGE PLANS: condition expressions: 0 1 {KEY.reducesinkkey0} {VALUE._col0} {VALUE._col1} {VALUE._col2} {VALUE._col3} {VALUE._col4} {VALUE._col5} {VALUE._col6} {VALUE._col7} - outputColumnNames: _col11, _col12, _col13, _col14, _col15, _col16, _col17, _col18, _col19 + outputColumnNames: _col12, _col13, _col14, _col15, _col16, _col17, _col18, _col19, _col20 Statistics: Num rows: 14 Data size: 8823 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: _col11 (type: int), _col12 (type: string), _col13 (type: string), _col14 (type: string), _col15 (type: string), _col16 (type: int), _col17 (type: string), _col18 (type: double), _col19 (type: string) + expressions: _col12 (type: int), _col13 (type: string), _col14 (type: string), _col15 (type: string), _col16 (type: string), _col17 (type: int), _col18 (type: string), _col19 (type: double), _col20 (type: string) outputColumnNames: _col0, _col1, _col2, _col3, _col4, _col5, _col6, _col7, _col8 Statistics: Num rows: 14 Data size: 8823 Basic stats: COMPLETE Column stats: NONE File Output Operator @@ -2893,7 +2893,7 @@ STAGE PLANS: Map-reduce partition columns: p_mfgr (type: string) Statistics: Num rows: 26 Data size: 16042 Basic stats: COMPLETE Column stats: NONE tag: -1 - value expressions: p_partkey (type: int), p_name (type: string), p_mfgr (type: string), p_brand (type: string), p_type (type: string), p_size (type: int), p_container (type: string), p_retailprice (type: double), p_comment (type: string), BLOCK__OFFSET__INSIDE__FILE (type: bigint), INPUT__FILE__NAME (type: string) + value expressions: p_partkey (type: int), p_name (type: string), p_mfgr (type: string), p_brand (type: string), p_type (type: string), p_size (type: int), p_container (type: string), p_retailprice (type: double), p_comment (type: string), BLOCK__OFFSET__INSIDE__FILE (type: bigint), INPUT__FILE__NAME (type: string), ROW__ID (type: struct) auto parallelism: false Path -> Alias: #### A masked pattern was here #### @@ -3211,7 +3211,7 @@ STAGE PLANS: Map-reduce partition columns: p_mfgr (type: string) Statistics: Num rows: 26 Data size: 16042 Basic stats: COMPLETE Column stats: NONE tag: -1 - value expressions: p_partkey (type: int), p_name (type: string), p_mfgr (type: string), p_brand (type: string), p_type (type: string), p_size (type: int), p_container (type: string), p_retailprice (type: double), p_comment (type: string), BLOCK__OFFSET__INSIDE__FILE (type: bigint), INPUT__FILE__NAME (type: string) + value expressions: p_partkey (type: int), p_name (type: string), p_mfgr (type: string), p_brand (type: string), p_type (type: string), p_size (type: int), p_container (type: string), p_retailprice (type: double), p_comment (type: string), BLOCK__OFFSET__INSIDE__FILE (type: bigint), INPUT__FILE__NAME (type: string), ROW__ID (type: struct) auto parallelism: false Path -> Alias: #### A masked pattern was here #### @@ -3531,7 +3531,7 @@ STAGE PLANS: Map-reduce partition columns: p_mfgr (type: string) Statistics: Num rows: 26 Data size: 16042 Basic stats: COMPLETE Column stats: NONE tag: -1 - value expressions: p_partkey (type: int), p_name (type: string), p_mfgr (type: string), p_brand (type: string), p_type (type: string), p_size (type: int), p_container (type: string), p_retailprice (type: double), p_comment (type: string), BLOCK__OFFSET__INSIDE__FILE (type: bigint), INPUT__FILE__NAME (type: string) + value expressions: p_partkey (type: int), p_name (type: string), p_mfgr (type: string), p_brand (type: string), p_type (type: string), p_size (type: int), p_container (type: string), p_retailprice (type: double), p_comment (type: string), BLOCK__OFFSET__INSIDE__FILE (type: bigint), INPUT__FILE__NAME (type: string), ROW__ID (type: struct) auto parallelism: false Path -> Alias: #### A masked pattern was here #### @@ -3861,7 +3861,7 @@ STAGE PLANS: Map-reduce partition columns: p_mfgr (type: string) Statistics: Num rows: 26 Data size: 16042 Basic stats: COMPLETE Column stats: NONE tag: -1 - value expressions: p_partkey (type: int), p_name (type: string), p_mfgr (type: string), p_brand (type: string), p_type (type: string), p_size (type: int), p_container (type: string), p_retailprice (type: double), p_comment (type: string), BLOCK__OFFSET__INSIDE__FILE (type: bigint), INPUT__FILE__NAME (type: string) + value expressions: p_partkey (type: int), p_name (type: string), p_mfgr (type: string), p_brand (type: string), p_type (type: string), p_size (type: int), p_container (type: string), p_retailprice (type: double), p_comment (type: string), BLOCK__OFFSET__INSIDE__FILE (type: bigint), INPUT__FILE__NAME (type: string), ROW__ID (type: struct) auto parallelism: false Path -> Alias: #### A masked pattern was here #### @@ -3929,8 +3929,8 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.SequenceFileInputFormat output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat properties: - columns _col0,_col1,_col2,_col3,_col4,_col5,_col6,_col7,_col8,_col9,_col10 - columns.types int,string,string,string,string,int,string,double,string,bigint,string + columns _col0,_col1,_col2,_col3,_col4,_col5,_col6,_col7,_col8,_col9,_col10,_col11 + columns.types int,string,string,string,string,int,string,double,string,bigint,string,struct escape.delim \ serialization.lib org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe @@ -3949,7 +3949,7 @@ STAGE PLANS: Map-reduce partition columns: _col2 (type: string) Statistics: Num rows: 26 Data size: 16042 Basic stats: COMPLETE Column stats: NONE tag: -1 - value expressions: _col0 (type: int), _col1 (type: string), _col2 (type: string), _col3 (type: string), _col4 (type: string), _col5 (type: int), _col6 (type: string), _col7 (type: double), _col8 (type: string), _col9 (type: bigint), _col10 (type: string) + value expressions: _col0 (type: int), _col1 (type: string), _col2 (type: string), _col3 (type: string), _col4 (type: string), _col5 (type: int), _col6 (type: string), _col7 (type: double), _col8 (type: string), _col9 (type: bigint), _col10 (type: string), _col11 (type: struct) auto parallelism: false Path -> Alias: #### A masked pattern was here #### @@ -3960,8 +3960,8 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.SequenceFileInputFormat output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat properties: - columns _col0,_col1,_col2,_col3,_col4,_col5,_col6,_col7,_col8,_col9,_col10 - columns.types int,string,string,string,string,int,string,double,string,bigint,string + columns _col0,_col1,_col2,_col3,_col4,_col5,_col6,_col7,_col8,_col9,_col10,_col11 + columns.types int,string,string,string,string,int,string,double,string,bigint,string,struct escape.delim \ serialization.lib org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe @@ -3969,8 +3969,8 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.SequenceFileInputFormat output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat properties: - columns _col0,_col1,_col2,_col3,_col4,_col5,_col6,_col7,_col8,_col9,_col10 - columns.types int,string,string,string,string,int,string,double,string,bigint,string + columns _col0,_col1,_col2,_col3,_col4,_col5,_col6,_col7,_col8,_col9,_col10,_col11 + columns.types int,string,string,string,string,int,string,double,string,bigint,string,struct escape.delim \ serialization.lib org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe @@ -4275,7 +4275,7 @@ STAGE PLANS: Map-reduce partition columns: p_mfgr (type: string) Statistics: Num rows: 26 Data size: 16042 Basic stats: COMPLETE Column stats: NONE tag: -1 - value expressions: p_partkey (type: int), p_name (type: string), p_mfgr (type: string), p_brand (type: string), p_type (type: string), p_size (type: int), p_container (type: string), p_retailprice (type: double), p_comment (type: string), BLOCK__OFFSET__INSIDE__FILE (type: bigint), INPUT__FILE__NAME (type: string) + value expressions: p_partkey (type: int), p_name (type: string), p_mfgr (type: string), p_brand (type: string), p_type (type: string), p_size (type: int), p_container (type: string), p_retailprice (type: double), p_comment (type: string), BLOCK__OFFSET__INSIDE__FILE (type: bigint), INPUT__FILE__NAME (type: string), ROW__ID (type: struct) auto parallelism: false Path -> Alias: #### A masked pattern was here #### @@ -4702,7 +4702,7 @@ STAGE PLANS: Map-reduce partition columns: p_mfgr (type: string) Statistics: Num rows: 26 Data size: 16042 Basic stats: COMPLETE Column stats: NONE tag: -1 - value expressions: p_partkey (type: int), p_name (type: string), p_mfgr (type: string), p_brand (type: string), p_type (type: string), p_size (type: int), p_container (type: string), p_retailprice (type: double), p_comment (type: string), BLOCK__OFFSET__INSIDE__FILE (type: bigint), INPUT__FILE__NAME (type: string) + value expressions: p_partkey (type: int), p_name (type: string), p_mfgr (type: string), p_brand (type: string), p_type (type: string), p_size (type: int), p_container (type: string), p_retailprice (type: double), p_comment (type: string), BLOCK__OFFSET__INSIDE__FILE (type: bigint), INPUT__FILE__NAME (type: string), ROW__ID (type: struct) auto parallelism: false Path -> Alias: #### A masked pattern was here #### @@ -5109,7 +5109,7 @@ STAGE PLANS: Map-reduce partition columns: p_mfgr (type: string) Statistics: Num rows: 26 Data size: 16042 Basic stats: COMPLETE Column stats: NONE tag: -1 - value expressions: p_partkey (type: int), p_name (type: string), p_mfgr (type: string), p_brand (type: string), p_type (type: string), p_size (type: int), p_container (type: string), p_retailprice (type: double), p_comment (type: string), BLOCK__OFFSET__INSIDE__FILE (type: bigint), INPUT__FILE__NAME (type: string) + value expressions: p_partkey (type: int), p_name (type: string), p_mfgr (type: string), p_brand (type: string), p_type (type: string), p_size (type: int), p_container (type: string), p_retailprice (type: double), p_comment (type: string), BLOCK__OFFSET__INSIDE__FILE (type: bigint), INPUT__FILE__NAME (type: string), ROW__ID (type: struct) auto parallelism: false Path -> Alias: #### A masked pattern was here #### @@ -5979,7 +5979,7 @@ STAGE PLANS: Map-reduce partition columns: p_mfgr (type: string) Statistics: Num rows: 26 Data size: 16042 Basic stats: COMPLETE Column stats: NONE tag: -1 - value expressions: p_partkey (type: int), p_name (type: string), p_mfgr (type: string), p_brand (type: string), p_type (type: string), p_size (type: int), p_container (type: string), p_retailprice (type: double), p_comment (type: string), BLOCK__OFFSET__INSIDE__FILE (type: bigint), INPUT__FILE__NAME (type: string) + value expressions: p_partkey (type: int), p_name (type: string), p_mfgr (type: string), p_brand (type: string), p_type (type: string), p_size (type: int), p_container (type: string), p_retailprice (type: double), p_comment (type: string), BLOCK__OFFSET__INSIDE__FILE (type: bigint), INPUT__FILE__NAME (type: string), ROW__ID (type: struct) auto parallelism: false Path -> Alias: #### A masked pattern was here #### @@ -6045,8 +6045,8 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.SequenceFileInputFormat output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat properties: - columns _col0,_col1,_col2,_col3,_col4,_col5,_col6,_col7,_col8,_col9,_col10 - columns.types int,string,string,string,string,int,string,double,string,bigint,string + columns _col0,_col1,_col2,_col3,_col4,_col5,_col6,_col7,_col8,_col9,_col10,_col11 + columns.types int,string,string,string,string,int,string,double,string,bigint,string,struct escape.delim \ serialization.lib org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe @@ -6097,8 +6097,8 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.SequenceFileInputFormat output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat properties: - columns _col0,_col1,_col2,_col3,_col4,_col5,_col6,_col7,_col8,_col9,_col10 - columns.types int,string,string,string,string,int,string,double,string,bigint,string + columns _col0,_col1,_col2,_col3,_col4,_col5,_col6,_col7,_col8,_col9,_col10,_col11 + columns.types int,string,string,string,string,int,string,double,string,bigint,string,struct escape.delim \ serialization.lib org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe @@ -6106,8 +6106,8 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.SequenceFileInputFormat output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat properties: - columns _col0,_col1,_col2,_col3,_col4,_col5,_col6,_col7,_col8,_col9,_col10 - columns.types int,string,string,string,string,int,string,double,string,bigint,string + columns _col0,_col1,_col2,_col3,_col4,_col5,_col6,_col7,_col8,_col9,_col10,_col11 + columns.types int,string,string,string,string,int,string,double,string,bigint,string,struct escape.delim \ serialization.lib org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe @@ -6375,20 +6375,20 @@ POSTHOOK: type: QUERY POSTHOOK: Input: default@part POSTHOOK: Output: default@part_4 POSTHOOK: Output: default@part_5 -POSTHOOK: Lineage: part_4.dr SCRIPT [(part)part.FieldSchema(name:p_partkey, type:int, comment:null), (part)part.FieldSchema(name:p_name, type:string, comment:null), (part)part.FieldSchema(name:p_mfgr, type:string, comment:null), (part)part.FieldSchema(name:p_brand, type:string, comment:null), (part)part.FieldSchema(name:p_type, type:string, comment:null), (part)part.FieldSchema(name:p_size, type:int, comment:null), (part)part.FieldSchema(name:p_container, type:string, comment:null), (part)part.FieldSchema(name:p_retailprice, type:double, comment:null), (part)part.FieldSchema(name:p_comment, type:string, comment:null), (part)part.FieldSchema(name:BLOCK__OFFSET__INSIDE__FILE, type:bigint, comment:), (part)part.FieldSchema(name:INPUT__FILE__NAME, type:string, comment:), ] -POSTHOOK: Lineage: part_4.p_mfgr SCRIPT [(part)part.FieldSchema(name:p_partkey, type:int, comment:null), (part)part.FieldSchema(name:p_name, type:string, comment:null), (part)part.FieldSchema(name:p_mfgr, type:string, comment:null), (part)part.FieldSchema(name:p_brand, type:string, comment:null), (part)part.FieldSchema(name:p_type, type:string, comment:null), (part)part.FieldSchema(name:p_size, type:int, comment:null), (part)part.FieldSchema(name:p_container, type:string, comment:null), (part)part.FieldSchema(name:p_retailprice, type:double, comment:null), (part)part.FieldSchema(name:p_comment, type:string, comment:null), (part)part.FieldSchema(name:BLOCK__OFFSET__INSIDE__FILE, type:bigint, comment:), (part)part.FieldSchema(name:INPUT__FILE__NAME, type:string, comment:), ] -POSTHOOK: Lineage: part_4.p_name SCRIPT [(part)part.FieldSchema(name:p_partkey, type:int, comment:null), (part)part.FieldSchema(name:p_name, type:string, comment:null), (part)part.FieldSchema(name:p_mfgr, type:string, comment:null), (part)part.FieldSchema(name:p_brand, type:string, comment:null), (part)part.FieldSchema(name:p_type, type:string, comment:null), (part)part.FieldSchema(name:p_size, type:int, comment:null), (part)part.FieldSchema(name:p_container, type:string, comment:null), (part)part.FieldSchema(name:p_retailprice, type:double, comment:null), (part)part.FieldSchema(name:p_comment, type:string, comment:null), (part)part.FieldSchema(name:BLOCK__OFFSET__INSIDE__FILE, type:bigint, comment:), (part)part.FieldSchema(name:INPUT__FILE__NAME, type:string, comment:), ] -POSTHOOK: Lineage: part_4.p_size SCRIPT [(part)part.FieldSchema(name:p_partkey, type:int, comment:null), (part)part.FieldSchema(name:p_name, type:string, comment:null), (part)part.FieldSchema(name:p_mfgr, type:string, comment:null), (part)part.FieldSchema(name:p_brand, type:string, comment:null), (part)part.FieldSchema(name:p_type, type:string, comment:null), (part)part.FieldSchema(name:p_size, type:int, comment:null), (part)part.FieldSchema(name:p_container, type:string, comment:null), (part)part.FieldSchema(name:p_retailprice, type:double, comment:null), (part)part.FieldSchema(name:p_comment, type:string, comment:null), (part)part.FieldSchema(name:BLOCK__OFFSET__INSIDE__FILE, type:bigint, comment:), (part)part.FieldSchema(name:INPUT__FILE__NAME, type:string, comment:), ] -POSTHOOK: Lineage: part_4.r SCRIPT [(part)part.FieldSchema(name:p_partkey, type:int, comment:null), (part)part.FieldSchema(name:p_name, type:string, comment:null), (part)part.FieldSchema(name:p_mfgr, type:string, comment:null), (part)part.FieldSchema(name:p_brand, type:string, comment:null), (part)part.FieldSchema(name:p_type, type:string, comment:null), (part)part.FieldSchema(name:p_size, type:int, comment:null), (part)part.FieldSchema(name:p_container, type:string, comment:null), (part)part.FieldSchema(name:p_retailprice, type:double, comment:null), (part)part.FieldSchema(name:p_comment, type:string, comment:null), (part)part.FieldSchema(name:BLOCK__OFFSET__INSIDE__FILE, type:bigint, comment:), (part)part.FieldSchema(name:INPUT__FILE__NAME, type:string, comment:), ] -POSTHOOK: Lineage: part_4.s SCRIPT [(part)part.FieldSchema(name:p_partkey, type:int, comment:null), (part)part.FieldSchema(name:p_name, type:string, comment:null), (part)part.FieldSchema(name:p_mfgr, type:string, comment:null), (part)part.FieldSchema(name:p_brand, type:string, comment:null), (part)part.FieldSchema(name:p_type, type:string, comment:null), (part)part.FieldSchema(name:p_size, type:int, comment:null), (part)part.FieldSchema(name:p_container, type:string, comment:null), (part)part.FieldSchema(name:p_retailprice, type:double, comment:null), (part)part.FieldSchema(name:p_comment, type:string, comment:null), (part)part.FieldSchema(name:BLOCK__OFFSET__INSIDE__FILE, type:bigint, comment:), (part)part.FieldSchema(name:INPUT__FILE__NAME, type:string, comment:), ] -POSTHOOK: Lineage: part_5.cud SCRIPT [(part)part.FieldSchema(name:p_partkey, type:int, comment:null), (part)part.FieldSchema(name:p_name, type:string, comment:null), (part)part.FieldSchema(name:p_mfgr, type:string, comment:null), (part)part.FieldSchema(name:p_brand, type:string, comment:null), (part)part.FieldSchema(name:p_type, type:string, comment:null), (part)part.FieldSchema(name:p_size, type:int, comment:null), (part)part.FieldSchema(name:p_container, type:string, comment:null), (part)part.FieldSchema(name:p_retailprice, type:double, comment:null), (part)part.FieldSchema(name:p_comment, type:string, comment:null), (part)part.FieldSchema(name:BLOCK__OFFSET__INSIDE__FILE, type:bigint, comment:), (part)part.FieldSchema(name:INPUT__FILE__NAME, type:string, comment:), ] -POSTHOOK: Lineage: part_5.dr SCRIPT [(part)part.FieldSchema(name:p_partkey, type:int, comment:null), (part)part.FieldSchema(name:p_name, type:string, comment:null), (part)part.FieldSchema(name:p_mfgr, type:string, comment:null), (part)part.FieldSchema(name:p_brand, type:string, comment:null), (part)part.FieldSchema(name:p_type, type:string, comment:null), (part)part.FieldSchema(name:p_size, type:int, comment:null), (part)part.FieldSchema(name:p_container, type:string, comment:null), (part)part.FieldSchema(name:p_retailprice, type:double, comment:null), (part)part.FieldSchema(name:p_comment, type:string, comment:null), (part)part.FieldSchema(name:BLOCK__OFFSET__INSIDE__FILE, type:bigint, comment:), (part)part.FieldSchema(name:INPUT__FILE__NAME, type:string, comment:), ] -POSTHOOK: Lineage: part_5.fv1 SCRIPT [(part)part.FieldSchema(name:p_partkey, type:int, comment:null), (part)part.FieldSchema(name:p_name, type:string, comment:null), (part)part.FieldSchema(name:p_mfgr, type:string, comment:null), (part)part.FieldSchema(name:p_brand, type:string, comment:null), (part)part.FieldSchema(name:p_type, type:string, comment:null), (part)part.FieldSchema(name:p_size, type:int, comment:null), (part)part.FieldSchema(name:p_container, type:string, comment:null), (part)part.FieldSchema(name:p_retailprice, type:double, comment:null), (part)part.FieldSchema(name:p_comment, type:string, comment:null), (part)part.FieldSchema(name:BLOCK__OFFSET__INSIDE__FILE, type:bigint, comment:), (part)part.FieldSchema(name:INPUT__FILE__NAME, type:string, comment:), ] -POSTHOOK: Lineage: part_5.p_mfgr SCRIPT [(part)part.FieldSchema(name:p_partkey, type:int, comment:null), (part)part.FieldSchema(name:p_name, type:string, comment:null), (part)part.FieldSchema(name:p_mfgr, type:string, comment:null), (part)part.FieldSchema(name:p_brand, type:string, comment:null), (part)part.FieldSchema(name:p_type, type:string, comment:null), (part)part.FieldSchema(name:p_size, type:int, comment:null), (part)part.FieldSchema(name:p_container, type:string, comment:null), (part)part.FieldSchema(name:p_retailprice, type:double, comment:null), (part)part.FieldSchema(name:p_comment, type:string, comment:null), (part)part.FieldSchema(name:BLOCK__OFFSET__INSIDE__FILE, type:bigint, comment:), (part)part.FieldSchema(name:INPUT__FILE__NAME, type:string, comment:), ] -POSTHOOK: Lineage: part_5.p_name SCRIPT [(part)part.FieldSchema(name:p_partkey, type:int, comment:null), (part)part.FieldSchema(name:p_name, type:string, comment:null), (part)part.FieldSchema(name:p_mfgr, type:string, comment:null), (part)part.FieldSchema(name:p_brand, type:string, comment:null), (part)part.FieldSchema(name:p_type, type:string, comment:null), (part)part.FieldSchema(name:p_size, type:int, comment:null), (part)part.FieldSchema(name:p_container, type:string, comment:null), (part)part.FieldSchema(name:p_retailprice, type:double, comment:null), (part)part.FieldSchema(name:p_comment, type:string, comment:null), (part)part.FieldSchema(name:BLOCK__OFFSET__INSIDE__FILE, type:bigint, comment:), (part)part.FieldSchema(name:INPUT__FILE__NAME, type:string, comment:), ] -POSTHOOK: Lineage: part_5.p_size SCRIPT [(part)part.FieldSchema(name:p_partkey, type:int, comment:null), (part)part.FieldSchema(name:p_name, type:string, comment:null), (part)part.FieldSchema(name:p_mfgr, type:string, comment:null), (part)part.FieldSchema(name:p_brand, type:string, comment:null), (part)part.FieldSchema(name:p_type, type:string, comment:null), (part)part.FieldSchema(name:p_size, type:int, comment:null), (part)part.FieldSchema(name:p_container, type:string, comment:null), (part)part.FieldSchema(name:p_retailprice, type:double, comment:null), (part)part.FieldSchema(name:p_comment, type:string, comment:null), (part)part.FieldSchema(name:BLOCK__OFFSET__INSIDE__FILE, type:bigint, comment:), (part)part.FieldSchema(name:INPUT__FILE__NAME, type:string, comment:), ] -POSTHOOK: Lineage: part_5.r SCRIPT [(part)part.FieldSchema(name:p_partkey, type:int, comment:null), (part)part.FieldSchema(name:p_name, type:string, comment:null), (part)part.FieldSchema(name:p_mfgr, type:string, comment:null), (part)part.FieldSchema(name:p_brand, type:string, comment:null), (part)part.FieldSchema(name:p_type, type:string, comment:null), (part)part.FieldSchema(name:p_size, type:int, comment:null), (part)part.FieldSchema(name:p_container, type:string, comment:null), (part)part.FieldSchema(name:p_retailprice, type:double, comment:null), (part)part.FieldSchema(name:p_comment, type:string, comment:null), (part)part.FieldSchema(name:BLOCK__OFFSET__INSIDE__FILE, type:bigint, comment:), (part)part.FieldSchema(name:INPUT__FILE__NAME, type:string, comment:), ] -POSTHOOK: Lineage: part_5.s2 SCRIPT [(part)part.FieldSchema(name:p_partkey, type:int, comment:null), (part)part.FieldSchema(name:p_name, type:string, comment:null), (part)part.FieldSchema(name:p_mfgr, type:string, comment:null), (part)part.FieldSchema(name:p_brand, type:string, comment:null), (part)part.FieldSchema(name:p_type, type:string, comment:null), (part)part.FieldSchema(name:p_size, type:int, comment:null), (part)part.FieldSchema(name:p_container, type:string, comment:null), (part)part.FieldSchema(name:p_retailprice, type:double, comment:null), (part)part.FieldSchema(name:p_comment, type:string, comment:null), (part)part.FieldSchema(name:BLOCK__OFFSET__INSIDE__FILE, type:bigint, comment:), (part)part.FieldSchema(name:INPUT__FILE__NAME, type:string, comment:), ] +POSTHOOK: Lineage: part_4.dr SCRIPT [(part)part.FieldSchema(name:p_partkey, type:int, comment:null), (part)part.FieldSchema(name:p_name, type:string, comment:null), (part)part.FieldSchema(name:p_mfgr, type:string, comment:null), (part)part.FieldSchema(name:p_brand, type:string, comment:null), (part)part.FieldSchema(name:p_type, type:string, comment:null), (part)part.FieldSchema(name:p_size, type:int, comment:null), (part)part.FieldSchema(name:p_container, type:string, comment:null), (part)part.FieldSchema(name:p_retailprice, type:double, comment:null), (part)part.FieldSchema(name:p_comment, type:string, comment:null), (part)part.FieldSchema(name:BLOCK__OFFSET__INSIDE__FILE, type:bigint, comment:), (part)part.FieldSchema(name:INPUT__FILE__NAME, type:string, comment:), (part)part.FieldSchema(name:ROW__ID, type:struct, comment:), ] +POSTHOOK: Lineage: part_4.p_mfgr SCRIPT [(part)part.FieldSchema(name:p_partkey, type:int, comment:null), (part)part.FieldSchema(name:p_name, type:string, comment:null), (part)part.FieldSchema(name:p_mfgr, type:string, comment:null), (part)part.FieldSchema(name:p_brand, type:string, comment:null), (part)part.FieldSchema(name:p_type, type:string, comment:null), (part)part.FieldSchema(name:p_size, type:int, comment:null), (part)part.FieldSchema(name:p_container, type:string, comment:null), (part)part.FieldSchema(name:p_retailprice, type:double, comment:null), (part)part.FieldSchema(name:p_comment, type:string, comment:null), (part)part.FieldSchema(name:BLOCK__OFFSET__INSIDE__FILE, type:bigint, comment:), (part)part.FieldSchema(name:INPUT__FILE__NAME, type:string, comment:), (part)part.FieldSchema(name:ROW__ID, type:struct, comment:), ] +POSTHOOK: Lineage: part_4.p_name SCRIPT [(part)part.FieldSchema(name:p_partkey, type:int, comment:null), (part)part.FieldSchema(name:p_name, type:string, comment:null), (part)part.FieldSchema(name:p_mfgr, type:string, comment:null), (part)part.FieldSchema(name:p_brand, type:string, comment:null), (part)part.FieldSchema(name:p_type, type:string, comment:null), (part)part.FieldSchema(name:p_size, type:int, comment:null), (part)part.FieldSchema(name:p_container, type:string, comment:null), (part)part.FieldSchema(name:p_retailprice, type:double, comment:null), (part)part.FieldSchema(name:p_comment, type:string, comment:null), (part)part.FieldSchema(name:BLOCK__OFFSET__INSIDE__FILE, type:bigint, comment:), (part)part.FieldSchema(name:INPUT__FILE__NAME, type:string, comment:), (part)part.FieldSchema(name:ROW__ID, type:struct, comment:), ] +POSTHOOK: Lineage: part_4.p_size SCRIPT [(part)part.FieldSchema(name:p_partkey, type:int, comment:null), (part)part.FieldSchema(name:p_name, type:string, comment:null), (part)part.FieldSchema(name:p_mfgr, type:string, comment:null), (part)part.FieldSchema(name:p_brand, type:string, comment:null), (part)part.FieldSchema(name:p_type, type:string, comment:null), (part)part.FieldSchema(name:p_size, type:int, comment:null), (part)part.FieldSchema(name:p_container, type:string, comment:null), (part)part.FieldSchema(name:p_retailprice, type:double, comment:null), (part)part.FieldSchema(name:p_comment, type:string, comment:null), (part)part.FieldSchema(name:BLOCK__OFFSET__INSIDE__FILE, type:bigint, comment:), (part)part.FieldSchema(name:INPUT__FILE__NAME, type:string, comment:), (part)part.FieldSchema(name:ROW__ID, type:struct, comment:), ] +POSTHOOK: Lineage: part_4.r SCRIPT [(part)part.FieldSchema(name:p_partkey, type:int, comment:null), (part)part.FieldSchema(name:p_name, type:string, comment:null), (part)part.FieldSchema(name:p_mfgr, type:string, comment:null), (part)part.FieldSchema(name:p_brand, type:string, comment:null), (part)part.FieldSchema(name:p_type, type:string, comment:null), (part)part.FieldSchema(name:p_size, type:int, comment:null), (part)part.FieldSchema(name:p_container, type:string, comment:null), (part)part.FieldSchema(name:p_retailprice, type:double, comment:null), (part)part.FieldSchema(name:p_comment, type:string, comment:null), (part)part.FieldSchema(name:BLOCK__OFFSET__INSIDE__FILE, type:bigint, comment:), (part)part.FieldSchema(name:INPUT__FILE__NAME, type:string, comment:), (part)part.FieldSchema(name:ROW__ID, type:struct, comment:), ] +POSTHOOK: Lineage: part_4.s SCRIPT [(part)part.FieldSchema(name:p_partkey, type:int, comment:null), (part)part.FieldSchema(name:p_name, type:string, comment:null), (part)part.FieldSchema(name:p_mfgr, type:string, comment:null), (part)part.FieldSchema(name:p_brand, type:string, comment:null), (part)part.FieldSchema(name:p_type, type:string, comment:null), (part)part.FieldSchema(name:p_size, type:int, comment:null), (part)part.FieldSchema(name:p_container, type:string, comment:null), (part)part.FieldSchema(name:p_retailprice, type:double, comment:null), (part)part.FieldSchema(name:p_comment, type:string, comment:null), (part)part.FieldSchema(name:BLOCK__OFFSET__INSIDE__FILE, type:bigint, comment:), (part)part.FieldSchema(name:INPUT__FILE__NAME, type:string, comment:), (part)part.FieldSchema(name:ROW__ID, type:struct, comment:), ] +POSTHOOK: Lineage: part_5.cud SCRIPT [(part)part.FieldSchema(name:p_partkey, type:int, comment:null), (part)part.FieldSchema(name:p_name, type:string, comment:null), (part)part.FieldSchema(name:p_mfgr, type:string, comment:null), (part)part.FieldSchema(name:p_brand, type:string, comment:null), (part)part.FieldSchema(name:p_type, type:string, comment:null), (part)part.FieldSchema(name:p_size, type:int, comment:null), (part)part.FieldSchema(name:p_container, type:string, comment:null), (part)part.FieldSchema(name:p_retailprice, type:double, comment:null), (part)part.FieldSchema(name:p_comment, type:string, comment:null), (part)part.FieldSchema(name:BLOCK__OFFSET__INSIDE__FILE, type:bigint, comment:), (part)part.FieldSchema(name:INPUT__FILE__NAME, type:string, comment:), (part)part.FieldSchema(name:ROW__ID, type:struct, comment:), ] +POSTHOOK: Lineage: part_5.dr SCRIPT [(part)part.FieldSchema(name:p_partkey, type:int, comment:null), (part)part.FieldSchema(name:p_name, type:string, comment:null), (part)part.FieldSchema(name:p_mfgr, type:string, comment:null), (part)part.FieldSchema(name:p_brand, type:string, comment:null), (part)part.FieldSchema(name:p_type, type:string, comment:null), (part)part.FieldSchema(name:p_size, type:int, comment:null), (part)part.FieldSchema(name:p_container, type:string, comment:null), (part)part.FieldSchema(name:p_retailprice, type:double, comment:null), (part)part.FieldSchema(name:p_comment, type:string, comment:null), (part)part.FieldSchema(name:BLOCK__OFFSET__INSIDE__FILE, type:bigint, comment:), (part)part.FieldSchema(name:INPUT__FILE__NAME, type:string, comment:), (part)part.FieldSchema(name:ROW__ID, type:struct, comment:), ] +POSTHOOK: Lineage: part_5.fv1 SCRIPT [(part)part.FieldSchema(name:p_partkey, type:int, comment:null), (part)part.FieldSchema(name:p_name, type:string, comment:null), (part)part.FieldSchema(name:p_mfgr, type:string, comment:null), (part)part.FieldSchema(name:p_brand, type:string, comment:null), (part)part.FieldSchema(name:p_type, type:string, comment:null), (part)part.FieldSchema(name:p_size, type:int, comment:null), (part)part.FieldSchema(name:p_container, type:string, comment:null), (part)part.FieldSchema(name:p_retailprice, type:double, comment:null), (part)part.FieldSchema(name:p_comment, type:string, comment:null), (part)part.FieldSchema(name:BLOCK__OFFSET__INSIDE__FILE, type:bigint, comment:), (part)part.FieldSchema(name:INPUT__FILE__NAME, type:string, comment:), (part)part.FieldSchema(name:ROW__ID, type:struct, comment:), ] +POSTHOOK: Lineage: part_5.p_mfgr SCRIPT [(part)part.FieldSchema(name:p_partkey, type:int, comment:null), (part)part.FieldSchema(name:p_name, type:string, comment:null), (part)part.FieldSchema(name:p_mfgr, type:string, comment:null), (part)part.FieldSchema(name:p_brand, type:string, comment:null), (part)part.FieldSchema(name:p_type, type:string, comment:null), (part)part.FieldSchema(name:p_size, type:int, comment:null), (part)part.FieldSchema(name:p_container, type:string, comment:null), (part)part.FieldSchema(name:p_retailprice, type:double, comment:null), (part)part.FieldSchema(name:p_comment, type:string, comment:null), (part)part.FieldSchema(name:BLOCK__OFFSET__INSIDE__FILE, type:bigint, comment:), (part)part.FieldSchema(name:INPUT__FILE__NAME, type:string, comment:), (part)part.FieldSchema(name:ROW__ID, type:struct, comment:), ] +POSTHOOK: Lineage: part_5.p_name SCRIPT [(part)part.FieldSchema(name:p_partkey, type:int, comment:null), (part)part.FieldSchema(name:p_name, type:string, comment:null), (part)part.FieldSchema(name:p_mfgr, type:string, comment:null), (part)part.FieldSchema(name:p_brand, type:string, comment:null), (part)part.FieldSchema(name:p_type, type:string, comment:null), (part)part.FieldSchema(name:p_size, type:int, comment:null), (part)part.FieldSchema(name:p_container, type:string, comment:null), (part)part.FieldSchema(name:p_retailprice, type:double, comment:null), (part)part.FieldSchema(name:p_comment, type:string, comment:null), (part)part.FieldSchema(name:BLOCK__OFFSET__INSIDE__FILE, type:bigint, comment:), (part)part.FieldSchema(name:INPUT__FILE__NAME, type:string, comment:), (part)part.FieldSchema(name:ROW__ID, type:struct, comment:), ] +POSTHOOK: Lineage: part_5.p_size SCRIPT [(part)part.FieldSchema(name:p_partkey, type:int, comment:null), (part)part.FieldSchema(name:p_name, type:string, comment:null), (part)part.FieldSchema(name:p_mfgr, type:string, comment:null), (part)part.FieldSchema(name:p_brand, type:string, comment:null), (part)part.FieldSchema(name:p_type, type:string, comment:null), (part)part.FieldSchema(name:p_size, type:int, comment:null), (part)part.FieldSchema(name:p_container, type:string, comment:null), (part)part.FieldSchema(name:p_retailprice, type:double, comment:null), (part)part.FieldSchema(name:p_comment, type:string, comment:null), (part)part.FieldSchema(name:BLOCK__OFFSET__INSIDE__FILE, type:bigint, comment:), (part)part.FieldSchema(name:INPUT__FILE__NAME, type:string, comment:), (part)part.FieldSchema(name:ROW__ID, type:struct, comment:), ] +POSTHOOK: Lineage: part_5.r SCRIPT [(part)part.FieldSchema(name:p_partkey, type:int, comment:null), (part)part.FieldSchema(name:p_name, type:string, comment:null), (part)part.FieldSchema(name:p_mfgr, type:string, comment:null), (part)part.FieldSchema(name:p_brand, type:string, comment:null), (part)part.FieldSchema(name:p_type, type:string, comment:null), (part)part.FieldSchema(name:p_size, type:int, comment:null), (part)part.FieldSchema(name:p_container, type:string, comment:null), (part)part.FieldSchema(name:p_retailprice, type:double, comment:null), (part)part.FieldSchema(name:p_comment, type:string, comment:null), (part)part.FieldSchema(name:BLOCK__OFFSET__INSIDE__FILE, type:bigint, comment:), (part)part.FieldSchema(name:INPUT__FILE__NAME, type:string, comment:), (part)part.FieldSchema(name:ROW__ID, type:struct, comment:), ] +POSTHOOK: Lineage: part_5.s2 SCRIPT [(part)part.FieldSchema(name:p_partkey, type:int, comment:null), (part)part.FieldSchema(name:p_name, type:string, comment:null), (part)part.FieldSchema(name:p_mfgr, type:string, comment:null), (part)part.FieldSchema(name:p_brand, type:string, comment:null), (part)part.FieldSchema(name:p_type, type:string, comment:null), (part)part.FieldSchema(name:p_size, type:int, comment:null), (part)part.FieldSchema(name:p_container, type:string, comment:null), (part)part.FieldSchema(name:p_retailprice, type:double, comment:null), (part)part.FieldSchema(name:p_comment, type:string, comment:null), (part)part.FieldSchema(name:BLOCK__OFFSET__INSIDE__FILE, type:bigint, comment:), (part)part.FieldSchema(name:INPUT__FILE__NAME, type:string, comment:), (part)part.FieldSchema(name:ROW__ID, type:struct, comment:), ] PREHOOK: query: select * from part_4 PREHOOK: type: QUERY PREHOOK: Input: default@part_4 @@ -6619,7 +6619,7 @@ STAGE PLANS: Map-reduce partition columns: p_mfgr (type: string) Statistics: Num rows: 26 Data size: 16042 Basic stats: COMPLETE Column stats: NONE tag: -1 - value expressions: p_partkey (type: int), p_name (type: string), p_mfgr (type: string), p_brand (type: string), p_type (type: string), p_size (type: int), p_container (type: string), p_retailprice (type: double), p_comment (type: string), BLOCK__OFFSET__INSIDE__FILE (type: bigint), INPUT__FILE__NAME (type: string) + value expressions: p_partkey (type: int), p_name (type: string), p_mfgr (type: string), p_brand (type: string), p_type (type: string), p_size (type: int), p_container (type: string), p_retailprice (type: double), p_comment (type: string), BLOCK__OFFSET__INSIDE__FILE (type: bigint), INPUT__FILE__NAME (type: string), ROW__ID (type: struct) auto parallelism: false Path -> Alias: #### A masked pattern was here #### @@ -6687,8 +6687,8 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.SequenceFileInputFormat output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat properties: - columns _col0,_col1,_col2,_col3,_col4,_col5,_col6,_col7,_col8,_col9,_col10 - columns.types int,string,string,string,string,int,string,double,string,bigint,string + columns _col0,_col1,_col2,_col3,_col4,_col5,_col6,_col7,_col8,_col9,_col10,_col11 + columns.types int,string,string,string,string,int,string,double,string,bigint,string,struct escape.delim \ serialization.lib org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe @@ -6707,7 +6707,7 @@ STAGE PLANS: Map-reduce partition columns: _col2 (type: string), _col1 (type: string) Statistics: Num rows: 26 Data size: 16042 Basic stats: COMPLETE Column stats: NONE tag: -1 - value expressions: _col0 (type: int), _col1 (type: string), _col2 (type: string), _col3 (type: string), _col4 (type: string), _col5 (type: int), _col6 (type: string), _col7 (type: double), _col8 (type: string), _col9 (type: bigint), _col10 (type: string) + value expressions: _col0 (type: int), _col1 (type: string), _col2 (type: string), _col3 (type: string), _col4 (type: string), _col5 (type: int), _col6 (type: string), _col7 (type: double), _col8 (type: string), _col9 (type: bigint), _col10 (type: string), _col11 (type: struct) auto parallelism: false Path -> Alias: #### A masked pattern was here #### @@ -6718,8 +6718,8 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.SequenceFileInputFormat output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat properties: - columns _col0,_col1,_col2,_col3,_col4,_col5,_col6,_col7,_col8,_col9,_col10 - columns.types int,string,string,string,string,int,string,double,string,bigint,string + columns _col0,_col1,_col2,_col3,_col4,_col5,_col6,_col7,_col8,_col9,_col10,_col11 + columns.types int,string,string,string,string,int,string,double,string,bigint,string,struct escape.delim \ serialization.lib org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe @@ -6727,8 +6727,8 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.SequenceFileInputFormat output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat properties: - columns _col0,_col1,_col2,_col3,_col4,_col5,_col6,_col7,_col8,_col9,_col10 - columns.types int,string,string,string,string,int,string,double,string,bigint,string + columns _col0,_col1,_col2,_col3,_col4,_col5,_col6,_col7,_col8,_col9,_col10,_col11 + columns.types int,string,string,string,string,int,string,double,string,bigint,string,struct escape.delim \ serialization.lib org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe @@ -7064,7 +7064,7 @@ STAGE PLANS: Map-reduce partition columns: p_mfgr (type: string) Statistics: Num rows: 26 Data size: 16042 Basic stats: COMPLETE Column stats: NONE tag: -1 - value expressions: p_partkey (type: int), p_name (type: string), p_mfgr (type: string), p_brand (type: string), p_type (type: string), p_size (type: int), p_container (type: string), p_retailprice (type: double), p_comment (type: string), BLOCK__OFFSET__INSIDE__FILE (type: bigint), INPUT__FILE__NAME (type: string) + value expressions: p_partkey (type: int), p_name (type: string), p_mfgr (type: string), p_brand (type: string), p_type (type: string), p_size (type: int), p_container (type: string), p_retailprice (type: double), p_comment (type: string), BLOCK__OFFSET__INSIDE__FILE (type: bigint), INPUT__FILE__NAME (type: string), ROW__ID (type: struct) auto parallelism: false Path -> Alias: #### A masked pattern was here #### @@ -7130,8 +7130,8 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.SequenceFileInputFormat output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat properties: - columns _col0,_col1,_col2,_col3,_col4,_col5,_col6,_col7,_col8,_col9,_col10 - columns.types int,string,string,string,string,int,string,double,string,bigint,string + columns _col0,_col1,_col2,_col3,_col4,_col5,_col6,_col7,_col8,_col9,_col10,_col11 + columns.types int,string,string,string,string,int,string,double,string,bigint,string,struct escape.delim \ serialization.lib org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe @@ -7150,7 +7150,7 @@ STAGE PLANS: Map-reduce partition columns: _col2 (type: string), _col1 (type: string) Statistics: Num rows: 26 Data size: 16042 Basic stats: COMPLETE Column stats: NONE tag: -1 - value expressions: _col0 (type: int), _col1 (type: string), _col2 (type: string), _col3 (type: string), _col4 (type: string), _col5 (type: int), _col6 (type: string), _col7 (type: double), _col8 (type: string), _col9 (type: bigint), _col10 (type: string) + value expressions: _col0 (type: int), _col1 (type: string), _col2 (type: string), _col3 (type: string), _col4 (type: string), _col5 (type: int), _col6 (type: string), _col7 (type: double), _col8 (type: string), _col9 (type: bigint), _col10 (type: string), _col11 (type: struct) auto parallelism: false Path -> Alias: #### A masked pattern was here #### @@ -7161,8 +7161,8 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.SequenceFileInputFormat output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat properties: - columns _col0,_col1,_col2,_col3,_col4,_col5,_col6,_col7,_col8,_col9,_col10 - columns.types int,string,string,string,string,int,string,double,string,bigint,string + columns _col0,_col1,_col2,_col3,_col4,_col5,_col6,_col7,_col8,_col9,_col10,_col11 + columns.types int,string,string,string,string,int,string,double,string,bigint,string,struct escape.delim \ serialization.lib org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe @@ -7170,8 +7170,8 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.SequenceFileInputFormat output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat properties: - columns _col0,_col1,_col2,_col3,_col4,_col5,_col6,_col7,_col8,_col9,_col10 - columns.types int,string,string,string,string,int,string,double,string,bigint,string + columns _col0,_col1,_col2,_col3,_col4,_col5,_col6,_col7,_col8,_col9,_col10,_col11 + columns.types int,string,string,string,string,int,string,double,string,bigint,string,struct escape.delim \ serialization.lib org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe @@ -7192,8 +7192,8 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.SequenceFileInputFormat output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat properties: - columns _col0,_col1,_col2,_col3,_col4,_col5,_col6,_col7,_col8,_col9,_col10 - columns.types int,string,string,string,string,int,string,double,string,bigint,string + columns _col0,_col1,_col2,_col3,_col4,_col5,_col6,_col7,_col8,_col9,_col10,_col11 + columns.types int,string,string,string,string,int,string,double,string,bigint,string,struct escape.delim \ serialization.lib org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe @@ -7212,7 +7212,7 @@ STAGE PLANS: Map-reduce partition columns: _col2 (type: string) Statistics: Num rows: 26 Data size: 16042 Basic stats: COMPLETE Column stats: NONE tag: -1 - value expressions: _col0 (type: int), _col1 (type: string), _col2 (type: string), _col3 (type: string), _col4 (type: string), _col5 (type: int), _col6 (type: string), _col7 (type: double), _col8 (type: string), _col9 (type: bigint), _col10 (type: string) + value expressions: _col0 (type: int), _col1 (type: string), _col2 (type: string), _col3 (type: string), _col4 (type: string), _col5 (type: int), _col6 (type: string), _col7 (type: double), _col8 (type: string), _col9 (type: bigint), _col10 (type: string), _col11 (type: struct) auto parallelism: false Path -> Alias: #### A masked pattern was here #### @@ -7223,8 +7223,8 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.SequenceFileInputFormat output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat properties: - columns _col0,_col1,_col2,_col3,_col4,_col5,_col6,_col7,_col8,_col9,_col10 - columns.types int,string,string,string,string,int,string,double,string,bigint,string + columns _col0,_col1,_col2,_col3,_col4,_col5,_col6,_col7,_col8,_col9,_col10,_col11 + columns.types int,string,string,string,string,int,string,double,string,bigint,string,struct escape.delim \ serialization.lib org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe @@ -7232,8 +7232,8 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.SequenceFileInputFormat output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat properties: - columns _col0,_col1,_col2,_col3,_col4,_col5,_col6,_col7,_col8,_col9,_col10 - columns.types int,string,string,string,string,int,string,double,string,bigint,string + columns _col0,_col1,_col2,_col3,_col4,_col5,_col6,_col7,_col8,_col9,_col10,_col11 + columns.types int,string,string,string,string,int,string,double,string,bigint,string,struct escape.delim \ serialization.lib org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe @@ -7552,7 +7552,7 @@ STAGE PLANS: Map-reduce partition columns: p_mfgr (type: string), p_name (type: string) Statistics: Num rows: 26 Data size: 16042 Basic stats: COMPLETE Column stats: NONE tag: -1 - value expressions: p_partkey (type: int), p_name (type: string), p_mfgr (type: string), p_brand (type: string), p_type (type: string), p_size (type: int), p_container (type: string), p_retailprice (type: double), p_comment (type: string), BLOCK__OFFSET__INSIDE__FILE (type: bigint), INPUT__FILE__NAME (type: string) + value expressions: p_partkey (type: int), p_name (type: string), p_mfgr (type: string), p_brand (type: string), p_type (type: string), p_size (type: int), p_container (type: string), p_retailprice (type: double), p_comment (type: string), BLOCK__OFFSET__INSIDE__FILE (type: bigint), INPUT__FILE__NAME (type: string), ROW__ID (type: struct) auto parallelism: false Path -> Alias: #### A masked pattern was here #### @@ -7618,8 +7618,8 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.SequenceFileInputFormat output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat properties: - columns _col0,_col1,_col2,_col3,_col4,_col5,_col6,_col7,_col8,_col9,_col10 - columns.types int,string,string,string,string,int,string,double,string,bigint,string + columns _col0,_col1,_col2,_col3,_col4,_col5,_col6,_col7,_col8,_col9,_col10,_col11 + columns.types int,string,string,string,string,int,string,double,string,bigint,string,struct escape.delim \ serialization.lib org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe @@ -7638,7 +7638,7 @@ STAGE PLANS: Map-reduce partition columns: _col2 (type: string) Statistics: Num rows: 26 Data size: 16042 Basic stats: COMPLETE Column stats: NONE tag: -1 - value expressions: _col0 (type: int), _col1 (type: string), _col2 (type: string), _col3 (type: string), _col4 (type: string), _col5 (type: int), _col6 (type: string), _col7 (type: double), _col8 (type: string), _col9 (type: bigint), _col10 (type: string) + value expressions: _col0 (type: int), _col1 (type: string), _col2 (type: string), _col3 (type: string), _col4 (type: string), _col5 (type: int), _col6 (type: string), _col7 (type: double), _col8 (type: string), _col9 (type: bigint), _col10 (type: string), _col11 (type: struct) auto parallelism: false Path -> Alias: #### A masked pattern was here #### @@ -7649,8 +7649,8 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.SequenceFileInputFormat output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat properties: - columns _col0,_col1,_col2,_col3,_col4,_col5,_col6,_col7,_col8,_col9,_col10 - columns.types int,string,string,string,string,int,string,double,string,bigint,string + columns _col0,_col1,_col2,_col3,_col4,_col5,_col6,_col7,_col8,_col9,_col10,_col11 + columns.types int,string,string,string,string,int,string,double,string,bigint,string,struct escape.delim \ serialization.lib org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe @@ -7658,8 +7658,8 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.SequenceFileInputFormat output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat properties: - columns _col0,_col1,_col2,_col3,_col4,_col5,_col6,_col7,_col8,_col9,_col10 - columns.types int,string,string,string,string,int,string,double,string,bigint,string + columns _col0,_col1,_col2,_col3,_col4,_col5,_col6,_col7,_col8,_col9,_col10,_col11 + columns.types int,string,string,string,string,int,string,double,string,bigint,string,struct escape.delim \ serialization.lib org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe @@ -7990,7 +7990,7 @@ STAGE PLANS: Map-reduce partition columns: p_mfgr (type: string), p_name (type: string) Statistics: Num rows: 26 Data size: 16042 Basic stats: COMPLETE Column stats: NONE tag: -1 - value expressions: p_partkey (type: int), p_name (type: string), p_mfgr (type: string), p_brand (type: string), p_type (type: string), p_size (type: int), p_container (type: string), p_retailprice (type: double), p_comment (type: string), BLOCK__OFFSET__INSIDE__FILE (type: bigint), INPUT__FILE__NAME (type: string) + value expressions: p_partkey (type: int), p_name (type: string), p_mfgr (type: string), p_brand (type: string), p_type (type: string), p_size (type: int), p_container (type: string), p_retailprice (type: double), p_comment (type: string), BLOCK__OFFSET__INSIDE__FILE (type: bigint), INPUT__FILE__NAME (type: string), ROW__ID (type: struct) auto parallelism: false Path -> Alias: #### A masked pattern was here #### @@ -8056,8 +8056,8 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.SequenceFileInputFormat output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat properties: - columns _col0,_col1,_col2,_col3,_col4,_col5,_col6,_col7,_col8,_col9,_col10 - columns.types int,string,string,string,string,int,string,double,string,bigint,string + columns _col0,_col1,_col2,_col3,_col4,_col5,_col6,_col7,_col8,_col9,_col10,_col11 + columns.types int,string,string,string,string,int,string,double,string,bigint,string,struct escape.delim \ serialization.lib org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe @@ -8076,7 +8076,7 @@ STAGE PLANS: Map-reduce partition columns: _col2 (type: string) Statistics: Num rows: 26 Data size: 16042 Basic stats: COMPLETE Column stats: NONE tag: -1 - value expressions: _col0 (type: int), _col1 (type: string), _col2 (type: string), _col3 (type: string), _col4 (type: string), _col5 (type: int), _col6 (type: string), _col7 (type: double), _col8 (type: string), _col9 (type: bigint), _col10 (type: string) + value expressions: _col0 (type: int), _col1 (type: string), _col2 (type: string), _col3 (type: string), _col4 (type: string), _col5 (type: int), _col6 (type: string), _col7 (type: double), _col8 (type: string), _col9 (type: bigint), _col10 (type: string), _col11 (type: struct) auto parallelism: false Path -> Alias: #### A masked pattern was here #### @@ -8087,8 +8087,8 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.SequenceFileInputFormat output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat properties: - columns _col0,_col1,_col2,_col3,_col4,_col5,_col6,_col7,_col8,_col9,_col10 - columns.types int,string,string,string,string,int,string,double,string,bigint,string + columns _col0,_col1,_col2,_col3,_col4,_col5,_col6,_col7,_col8,_col9,_col10,_col11 + columns.types int,string,string,string,string,int,string,double,string,bigint,string,struct escape.delim \ serialization.lib org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe @@ -8096,8 +8096,8 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.SequenceFileInputFormat output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat properties: - columns _col0,_col1,_col2,_col3,_col4,_col5,_col6,_col7,_col8,_col9,_col10 - columns.types int,string,string,string,string,int,string,double,string,bigint,string + columns _col0,_col1,_col2,_col3,_col4,_col5,_col6,_col7,_col8,_col9,_col10,_col11 + columns.types int,string,string,string,string,int,string,double,string,bigint,string,struct escape.delim \ serialization.lib org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe @@ -8120,8 +8120,8 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.SequenceFileInputFormat output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat properties: - columns _col0,_col1,_col2,_col3,_col4,_col5,_col6,_col7,_col8,_col9,_col10 - columns.types int,string,string,string,string,int,string,double,string,bigint,string + columns _col0,_col1,_col2,_col3,_col4,_col5,_col6,_col7,_col8,_col9,_col10,_col11 + columns.types int,string,string,string,string,int,string,double,string,bigint,string,struct escape.delim \ serialization.lib org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe @@ -8140,7 +8140,7 @@ STAGE PLANS: Map-reduce partition columns: _col2 (type: string), _col1 (type: string) Statistics: Num rows: 26 Data size: 16042 Basic stats: COMPLETE Column stats: NONE tag: -1 - value expressions: _col0 (type: int), _col1 (type: string), _col2 (type: string), _col3 (type: string), _col4 (type: string), _col5 (type: int), _col6 (type: string), _col7 (type: double), _col8 (type: string), _col9 (type: bigint), _col10 (type: string) + value expressions: _col0 (type: int), _col1 (type: string), _col2 (type: string), _col3 (type: string), _col4 (type: string), _col5 (type: int), _col6 (type: string), _col7 (type: double), _col8 (type: string), _col9 (type: bigint), _col10 (type: string), _col11 (type: struct) auto parallelism: false Path -> Alias: #### A masked pattern was here #### @@ -8151,8 +8151,8 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.SequenceFileInputFormat output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat properties: - columns _col0,_col1,_col2,_col3,_col4,_col5,_col6,_col7,_col8,_col9,_col10 - columns.types int,string,string,string,string,int,string,double,string,bigint,string + columns _col0,_col1,_col2,_col3,_col4,_col5,_col6,_col7,_col8,_col9,_col10,_col11 + columns.types int,string,string,string,string,int,string,double,string,bigint,string,struct escape.delim \ serialization.lib org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe @@ -8160,8 +8160,8 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.SequenceFileInputFormat output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat properties: - columns _col0,_col1,_col2,_col3,_col4,_col5,_col6,_col7,_col8,_col9,_col10 - columns.types int,string,string,string,string,int,string,double,string,bigint,string + columns _col0,_col1,_col2,_col3,_col4,_col5,_col6,_col7,_col8,_col9,_col10,_col11 + columns.types int,string,string,string,string,int,string,double,string,bigint,string,struct escape.delim \ serialization.lib org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe @@ -8523,7 +8523,7 @@ STAGE PLANS: Map-reduce partition columns: p_mfgr (type: string), p_name (type: string) Statistics: Num rows: 26 Data size: 16042 Basic stats: COMPLETE Column stats: NONE tag: -1 - value expressions: p_partkey (type: int), p_name (type: string), p_mfgr (type: string), p_brand (type: string), p_type (type: string), p_size (type: int), p_container (type: string), p_retailprice (type: double), p_comment (type: string), BLOCK__OFFSET__INSIDE__FILE (type: bigint), INPUT__FILE__NAME (type: string) + value expressions: p_partkey (type: int), p_name (type: string), p_mfgr (type: string), p_brand (type: string), p_type (type: string), p_size (type: int), p_container (type: string), p_retailprice (type: double), p_comment (type: string), BLOCK__OFFSET__INSIDE__FILE (type: bigint), INPUT__FILE__NAME (type: string), ROW__ID (type: struct) auto parallelism: false Path -> Alias: #### A masked pattern was here #### @@ -8591,8 +8591,8 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.SequenceFileInputFormat output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat properties: - columns _col0,_col1,_col2,_col3,_col4,_col5,_col6,_col7,_col8,_col9,_col10 - columns.types int,string,string,string,string,int,string,double,string,bigint,string + columns _col0,_col1,_col2,_col3,_col4,_col5,_col6,_col7,_col8,_col9,_col10,_col11 + columns.types int,string,string,string,string,int,string,double,string,bigint,string,struct escape.delim \ serialization.lib org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe @@ -8611,7 +8611,7 @@ STAGE PLANS: Map-reduce partition columns: _col2 (type: string) Statistics: Num rows: 26 Data size: 16042 Basic stats: COMPLETE Column stats: NONE tag: -1 - value expressions: _col0 (type: int), _col1 (type: string), _col2 (type: string), _col3 (type: string), _col4 (type: string), _col5 (type: int), _col6 (type: string), _col7 (type: double), _col8 (type: string), _col9 (type: bigint), _col10 (type: string) + value expressions: _col0 (type: int), _col1 (type: string), _col2 (type: string), _col3 (type: string), _col4 (type: string), _col5 (type: int), _col6 (type: string), _col7 (type: double), _col8 (type: string), _col9 (type: bigint), _col10 (type: string), _col11 (type: struct) auto parallelism: false Path -> Alias: #### A masked pattern was here #### @@ -8622,8 +8622,8 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.SequenceFileInputFormat output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat properties: - columns _col0,_col1,_col2,_col3,_col4,_col5,_col6,_col7,_col8,_col9,_col10 - columns.types int,string,string,string,string,int,string,double,string,bigint,string + columns _col0,_col1,_col2,_col3,_col4,_col5,_col6,_col7,_col8,_col9,_col10,_col11 + columns.types int,string,string,string,string,int,string,double,string,bigint,string,struct escape.delim \ serialization.lib org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe @@ -8631,8 +8631,8 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.SequenceFileInputFormat output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat properties: - columns _col0,_col1,_col2,_col3,_col4,_col5,_col6,_col7,_col8,_col9,_col10 - columns.types int,string,string,string,string,int,string,double,string,bigint,string + columns _col0,_col1,_col2,_col3,_col4,_col5,_col6,_col7,_col8,_col9,_col10,_col11 + columns.types int,string,string,string,string,int,string,double,string,bigint,string,struct escape.delim \ serialization.lib org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe @@ -8960,7 +8960,7 @@ STAGE PLANS: Map-reduce partition columns: p_mfgr (type: string), p_name (type: string) Statistics: Num rows: 26 Data size: 16042 Basic stats: COMPLETE Column stats: NONE tag: -1 - value expressions: p_partkey (type: int), p_name (type: string), p_mfgr (type: string), p_brand (type: string), p_type (type: string), p_size (type: int), p_container (type: string), p_retailprice (type: double), p_comment (type: string), BLOCK__OFFSET__INSIDE__FILE (type: bigint), INPUT__FILE__NAME (type: string) + value expressions: p_partkey (type: int), p_name (type: string), p_mfgr (type: string), p_brand (type: string), p_type (type: string), p_size (type: int), p_container (type: string), p_retailprice (type: double), p_comment (type: string), BLOCK__OFFSET__INSIDE__FILE (type: bigint), INPUT__FILE__NAME (type: string), ROW__ID (type: struct) auto parallelism: false Path -> Alias: #### A masked pattern was here #### @@ -9028,8 +9028,8 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.SequenceFileInputFormat output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat properties: - columns _col0,_col1,_col2,_col3,_col4,_col5,_col6,_col7,_col8,_col9,_col10 - columns.types int,string,string,string,string,int,string,double,string,bigint,string + columns _col0,_col1,_col2,_col3,_col4,_col5,_col6,_col7,_col8,_col9,_col10,_col11 + columns.types int,string,string,string,string,int,string,double,string,bigint,string,struct escape.delim \ serialization.lib org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe @@ -9048,7 +9048,7 @@ STAGE PLANS: Map-reduce partition columns: _col2 (type: string), _col1 (type: string) Statistics: Num rows: 26 Data size: 16042 Basic stats: COMPLETE Column stats: NONE tag: -1 - value expressions: _col0 (type: int), _col1 (type: string), _col2 (type: string), _col3 (type: string), _col4 (type: string), _col5 (type: int), _col6 (type: string), _col7 (type: double), _col8 (type: string), _col9 (type: bigint), _col10 (type: string) + value expressions: _col0 (type: int), _col1 (type: string), _col2 (type: string), _col3 (type: string), _col4 (type: string), _col5 (type: int), _col6 (type: string), _col7 (type: double), _col8 (type: string), _col9 (type: bigint), _col10 (type: string), _col11 (type: struct) auto parallelism: false Path -> Alias: #### A masked pattern was here #### @@ -9059,8 +9059,8 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.SequenceFileInputFormat output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat properties: - columns _col0,_col1,_col2,_col3,_col4,_col5,_col6,_col7,_col8,_col9,_col10 - columns.types int,string,string,string,string,int,string,double,string,bigint,string + columns _col0,_col1,_col2,_col3,_col4,_col5,_col6,_col7,_col8,_col9,_col10,_col11 + columns.types int,string,string,string,string,int,string,double,string,bigint,string,struct escape.delim \ serialization.lib org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe @@ -9068,8 +9068,8 @@ STAGE PLANS: input format: org.apache.hadoop.mapred.SequenceFileInputFormat output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat properties: - columns _col0,_col1,_col2,_col3,_col4,_col5,_col6,_col7,_col8,_col9,_col10 - columns.types int,string,string,string,string,int,string,double,string,bigint,string + columns _col0,_col1,_col2,_col3,_col4,_col5,_col6,_col7,_col8,_col9,_col10,_col11 + columns.types int,string,string,string,string,int,string,double,string,bigint,string,struct escape.delim \ serialization.lib org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe diff --git a/ql/src/test/results/clientpositive/vectorized_shufflejoin.q.out b/ql/src/test/results/clientpositive/vectorized_shufflejoin.q.out index e2c3295..b55a4c6 100644 --- a/ql/src/test/results/clientpositive/vectorized_shufflejoin.q.out +++ b/ql/src/test/results/clientpositive/vectorized_shufflejoin.q.out @@ -45,14 +45,14 @@ STAGE PLANS: condition expressions: 0 {KEY.reducesinkkey0} 1 {KEY.reducesinkkey0} - outputColumnNames: _col2, _col16 + outputColumnNames: _col2, _col17 Statistics: Num rows: 51870 Data size: 207482 Basic stats: COMPLETE Column stats: NONE Select Operator - expressions: _col2 (type: int), _col16 (type: int) - outputColumnNames: _col2, _col16 + expressions: _col2 (type: int), _col17 (type: int) + outputColumnNames: _col2, _col17 Statistics: Num rows: 51870 Data size: 207482 Basic stats: COMPLETE Column stats: NONE Group By Operator - aggregations: count(_col2), max(_col16), min(_col2), avg((_col2 + _col16)) + aggregations: count(_col2), max(_col17), min(_col2), avg((_col2 + _col17)) mode: hash outputColumnNames: _col0, _col1, _col2, _col3 Statistics: Num rows: 1 Data size: 16 Basic stats: COMPLETE Column stats: NONE diff --git a/ql/src/test/results/clientpositive/windowing.q.out b/ql/src/test/results/clientpositive/windowing.q.out index 0a68483..b72f4c0 100644 --- a/ql/src/test/results/clientpositive/windowing.q.out +++ b/ql/src/test/results/clientpositive/windowing.q.out @@ -1370,26 +1370,26 @@ POSTHOOK: Input: default@part POSTHOOK: Output: default@part_1 POSTHOOK: Output: default@part_2 POSTHOOK: Output: default@part_3 -POSTHOOK: Lineage: part_1.dr SCRIPT [(part)part.FieldSchema(name:p_partkey, type:int, comment:null), (part)part.FieldSchema(name:p_name, type:string, comment:null), (part)part.FieldSchema(name:p_mfgr, type:string, comment:null), (part)part.FieldSchema(name:p_brand, type:string, comment:null), (part)part.FieldSchema(name:p_type, type:string, comment:null), (part)part.FieldSchema(name:p_size, type:int, comment:null), (part)part.FieldSchema(name:p_container, type:string, comment:null), (part)part.FieldSchema(name:p_retailprice, type:double, comment:null), (part)part.FieldSchema(name:p_comment, type:string, comment:null), (part)part.FieldSchema(name:BLOCK__OFFSET__INSIDE__FILE, type:bigint, comment:), (part)part.FieldSchema(name:INPUT__FILE__NAME, type:string, comment:), ] -POSTHOOK: Lineage: part_1.p_mfgr SCRIPT [(part)part.FieldSchema(name:p_partkey, type:int, comment:null), (part)part.FieldSchema(name:p_name, type:string, comment:null), (part)part.FieldSchema(name:p_mfgr, type:string, comment:null), (part)part.FieldSchema(name:p_brand, type:string, comment:null), (part)part.FieldSchema(name:p_type, type:string, comment:null), (part)part.FieldSchema(name:p_size, type:int, comment:null), (part)part.FieldSchema(name:p_container, type:string, comment:null), (part)part.FieldSchema(name:p_retailprice, type:double, comment:null), (part)part.FieldSchema(name:p_comment, type:string, comment:null), (part)part.FieldSchema(name:BLOCK__OFFSET__INSIDE__FILE, type:bigint, comment:), (part)part.FieldSchema(name:INPUT__FILE__NAME, type:string, comment:), ] -POSTHOOK: Lineage: part_1.p_name SCRIPT [(part)part.FieldSchema(name:p_partkey, type:int, comment:null), (part)part.FieldSchema(name:p_name, type:string, comment:null), (part)part.FieldSchema(name:p_mfgr, type:string, comment:null), (part)part.FieldSchema(name:p_brand, type:string, comment:null), (part)part.FieldSchema(name:p_type, type:string, comment:null), (part)part.FieldSchema(name:p_size, type:int, comment:null), (part)part.FieldSchema(name:p_container, type:string, comment:null), (part)part.FieldSchema(name:p_retailprice, type:double, comment:null), (part)part.FieldSchema(name:p_comment, type:string, comment:null), (part)part.FieldSchema(name:BLOCK__OFFSET__INSIDE__FILE, type:bigint, comment:), (part)part.FieldSchema(name:INPUT__FILE__NAME, type:string, comment:), ] -POSTHOOK: Lineage: part_1.p_size SCRIPT [(part)part.FieldSchema(name:p_partkey, type:int, comment:null), (part)part.FieldSchema(name:p_name, type:string, comment:null), (part)part.FieldSchema(name:p_mfgr, type:string, comment:null), (part)part.FieldSchema(name:p_brand, type:string, comment:null), (part)part.FieldSchema(name:p_type, type:string, comment:null), (part)part.FieldSchema(name:p_size, type:int, comment:null), (part)part.FieldSchema(name:p_container, type:string, comment:null), (part)part.FieldSchema(name:p_retailprice, type:double, comment:null), (part)part.FieldSchema(name:p_comment, type:string, comment:null), (part)part.FieldSchema(name:BLOCK__OFFSET__INSIDE__FILE, type:bigint, comment:), (part)part.FieldSchema(name:INPUT__FILE__NAME, type:string, comment:), ] -POSTHOOK: Lineage: part_1.r SCRIPT [(part)part.FieldSchema(name:p_partkey, type:int, comment:null), (part)part.FieldSchema(name:p_name, type:string, comment:null), (part)part.FieldSchema(name:p_mfgr, type:string, comment:null), (part)part.FieldSchema(name:p_brand, type:string, comment:null), (part)part.FieldSchema(name:p_type, type:string, comment:null), (part)part.FieldSchema(name:p_size, type:int, comment:null), (part)part.FieldSchema(name:p_container, type:string, comment:null), (part)part.FieldSchema(name:p_retailprice, type:double, comment:null), (part)part.FieldSchema(name:p_comment, type:string, comment:null), (part)part.FieldSchema(name:BLOCK__OFFSET__INSIDE__FILE, type:bigint, comment:), (part)part.FieldSchema(name:INPUT__FILE__NAME, type:string, comment:), ] -POSTHOOK: Lineage: part_1.s SCRIPT [(part)part.FieldSchema(name:p_partkey, type:int, comment:null), (part)part.FieldSchema(name:p_name, type:string, comment:null), (part)part.FieldSchema(name:p_mfgr, type:string, comment:null), (part)part.FieldSchema(name:p_brand, type:string, comment:null), (part)part.FieldSchema(name:p_type, type:string, comment:null), (part)part.FieldSchema(name:p_size, type:int, comment:null), (part)part.FieldSchema(name:p_container, type:string, comment:null), (part)part.FieldSchema(name:p_retailprice, type:double, comment:null), (part)part.FieldSchema(name:p_comment, type:string, comment:null), (part)part.FieldSchema(name:BLOCK__OFFSET__INSIDE__FILE, type:bigint, comment:), (part)part.FieldSchema(name:INPUT__FILE__NAME, type:string, comment:), ] -POSTHOOK: Lineage: part_2.cud SCRIPT [(part)part.FieldSchema(name:p_partkey, type:int, comment:null), (part)part.FieldSchema(name:p_name, type:string, comment:null), (part)part.FieldSchema(name:p_mfgr, type:string, comment:null), (part)part.FieldSchema(name:p_brand, type:string, comment:null), (part)part.FieldSchema(name:p_type, type:string, comment:null), (part)part.FieldSchema(name:p_size, type:int, comment:null), (part)part.FieldSchema(name:p_container, type:string, comment:null), (part)part.FieldSchema(name:p_retailprice, type:double, comment:null), (part)part.FieldSchema(name:p_comment, type:string, comment:null), (part)part.FieldSchema(name:BLOCK__OFFSET__INSIDE__FILE, type:bigint, comment:), (part)part.FieldSchema(name:INPUT__FILE__NAME, type:string, comment:), ] -POSTHOOK: Lineage: part_2.dr SCRIPT [(part)part.FieldSchema(name:p_partkey, type:int, comment:null), (part)part.FieldSchema(name:p_name, type:string, comment:null), (part)part.FieldSchema(name:p_mfgr, type:string, comment:null), (part)part.FieldSchema(name:p_brand, type:string, comment:null), (part)part.FieldSchema(name:p_type, type:string, comment:null), (part)part.FieldSchema(name:p_size, type:int, comment:null), (part)part.FieldSchema(name:p_container, type:string, comment:null), (part)part.FieldSchema(name:p_retailprice, type:double, comment:null), (part)part.FieldSchema(name:p_comment, type:string, comment:null), (part)part.FieldSchema(name:BLOCK__OFFSET__INSIDE__FILE, type:bigint, comment:), (part)part.FieldSchema(name:INPUT__FILE__NAME, type:string, comment:), ] -POSTHOOK: Lineage: part_2.fv1 SCRIPT [(part)part.FieldSchema(name:p_partkey, type:int, comment:null), (part)part.FieldSchema(name:p_name, type:string, comment:null), (part)part.FieldSchema(name:p_mfgr, type:string, comment:null), (part)part.FieldSchema(name:p_brand, type:string, comment:null), (part)part.FieldSchema(name:p_type, type:string, comment:null), (part)part.FieldSchema(name:p_size, type:int, comment:null), (part)part.FieldSchema(name:p_container, type:string, comment:null), (part)part.FieldSchema(name:p_retailprice, type:double, comment:null), (part)part.FieldSchema(name:p_comment, type:string, comment:null), (part)part.FieldSchema(name:BLOCK__OFFSET__INSIDE__FILE, type:bigint, comment:), (part)part.FieldSchema(name:INPUT__FILE__NAME, type:string, comment:), ] -POSTHOOK: Lineage: part_2.p_mfgr SCRIPT [(part)part.FieldSchema(name:p_partkey, type:int, comment:null), (part)part.FieldSchema(name:p_name, type:string, comment:null), (part)part.FieldSchema(name:p_mfgr, type:string, comment:null), (part)part.FieldSchema(name:p_brand, type:string, comment:null), (part)part.FieldSchema(name:p_type, type:string, comment:null), (part)part.FieldSchema(name:p_size, type:int, comment:null), (part)part.FieldSchema(name:p_container, type:string, comment:null), (part)part.FieldSchema(name:p_retailprice, type:double, comment:null), (part)part.FieldSchema(name:p_comment, type:string, comment:null), (part)part.FieldSchema(name:BLOCK__OFFSET__INSIDE__FILE, type:bigint, comment:), (part)part.FieldSchema(name:INPUT__FILE__NAME, type:string, comment:), ] -POSTHOOK: Lineage: part_2.p_name SCRIPT [(part)part.FieldSchema(name:p_partkey, type:int, comment:null), (part)part.FieldSchema(name:p_name, type:string, comment:null), (part)part.FieldSchema(name:p_mfgr, type:string, comment:null), (part)part.FieldSchema(name:p_brand, type:string, comment:null), (part)part.FieldSchema(name:p_type, type:string, comment:null), (part)part.FieldSchema(name:p_size, type:int, comment:null), (part)part.FieldSchema(name:p_container, type:string, comment:null), (part)part.FieldSchema(name:p_retailprice, type:double, comment:null), (part)part.FieldSchema(name:p_comment, type:string, comment:null), (part)part.FieldSchema(name:BLOCK__OFFSET__INSIDE__FILE, type:bigint, comment:), (part)part.FieldSchema(name:INPUT__FILE__NAME, type:string, comment:), ] -POSTHOOK: Lineage: part_2.p_size SCRIPT [(part)part.FieldSchema(name:p_partkey, type:int, comment:null), (part)part.FieldSchema(name:p_name, type:string, comment:null), (part)part.FieldSchema(name:p_mfgr, type:string, comment:null), (part)part.FieldSchema(name:p_brand, type:string, comment:null), (part)part.FieldSchema(name:p_type, type:string, comment:null), (part)part.FieldSchema(name:p_size, type:int, comment:null), (part)part.FieldSchema(name:p_container, type:string, comment:null), (part)part.FieldSchema(name:p_retailprice, type:double, comment:null), (part)part.FieldSchema(name:p_comment, type:string, comment:null), (part)part.FieldSchema(name:BLOCK__OFFSET__INSIDE__FILE, type:bigint, comment:), (part)part.FieldSchema(name:INPUT__FILE__NAME, type:string, comment:), ] -POSTHOOK: Lineage: part_2.r SCRIPT [(part)part.FieldSchema(name:p_partkey, type:int, comment:null), (part)part.FieldSchema(name:p_name, type:string, comment:null), (part)part.FieldSchema(name:p_mfgr, type:string, comment:null), (part)part.FieldSchema(name:p_brand, type:string, comment:null), (part)part.FieldSchema(name:p_type, type:string, comment:null), (part)part.FieldSchema(name:p_size, type:int, comment:null), (part)part.FieldSchema(name:p_container, type:string, comment:null), (part)part.FieldSchema(name:p_retailprice, type:double, comment:null), (part)part.FieldSchema(name:p_comment, type:string, comment:null), (part)part.FieldSchema(name:BLOCK__OFFSET__INSIDE__FILE, type:bigint, comment:), (part)part.FieldSchema(name:INPUT__FILE__NAME, type:string, comment:), ] -POSTHOOK: Lineage: part_2.s2 SCRIPT [(part)part.FieldSchema(name:p_partkey, type:int, comment:null), (part)part.FieldSchema(name:p_name, type:string, comment:null), (part)part.FieldSchema(name:p_mfgr, type:string, comment:null), (part)part.FieldSchema(name:p_brand, type:string, comment:null), (part)part.FieldSchema(name:p_type, type:string, comment:null), (part)part.FieldSchema(name:p_size, type:int, comment:null), (part)part.FieldSchema(name:p_container, type:string, comment:null), (part)part.FieldSchema(name:p_retailprice, type:double, comment:null), (part)part.FieldSchema(name:p_comment, type:string, comment:null), (part)part.FieldSchema(name:BLOCK__OFFSET__INSIDE__FILE, type:bigint, comment:), (part)part.FieldSchema(name:INPUT__FILE__NAME, type:string, comment:), ] -POSTHOOK: Lineage: part_3.c SCRIPT [(part)part.FieldSchema(name:p_partkey, type:int, comment:null), (part)part.FieldSchema(name:p_name, type:string, comment:null), (part)part.FieldSchema(name:p_mfgr, type:string, comment:null), (part)part.FieldSchema(name:p_brand, type:string, comment:null), (part)part.FieldSchema(name:p_type, type:string, comment:null), (part)part.FieldSchema(name:p_size, type:int, comment:null), (part)part.FieldSchema(name:p_container, type:string, comment:null), (part)part.FieldSchema(name:p_retailprice, type:double, comment:null), (part)part.FieldSchema(name:p_comment, type:string, comment:null), (part)part.FieldSchema(name:BLOCK__OFFSET__INSIDE__FILE, type:bigint, comment:), (part)part.FieldSchema(name:INPUT__FILE__NAME, type:string, comment:), ] -POSTHOOK: Lineage: part_3.ca SCRIPT [(part)part.FieldSchema(name:p_partkey, type:int, comment:null), (part)part.FieldSchema(name:p_name, type:string, comment:null), (part)part.FieldSchema(name:p_mfgr, type:string, comment:null), (part)part.FieldSchema(name:p_brand, type:string, comment:null), (part)part.FieldSchema(name:p_type, type:string, comment:null), (part)part.FieldSchema(name:p_size, type:int, comment:null), (part)part.FieldSchema(name:p_container, type:string, comment:null), (part)part.FieldSchema(name:p_retailprice, type:double, comment:null), (part)part.FieldSchema(name:p_comment, type:string, comment:null), (part)part.FieldSchema(name:BLOCK__OFFSET__INSIDE__FILE, type:bigint, comment:), (part)part.FieldSchema(name:INPUT__FILE__NAME, type:string, comment:), ] -POSTHOOK: Lineage: part_3.fv SCRIPT [(part)part.FieldSchema(name:p_partkey, type:int, comment:null), (part)part.FieldSchema(name:p_name, type:string, comment:null), (part)part.FieldSchema(name:p_mfgr, type:string, comment:null), (part)part.FieldSchema(name:p_brand, type:string, comment:null), (part)part.FieldSchema(name:p_type, type:string, comment:null), (part)part.FieldSchema(name:p_size, type:int, comment:null), (part)part.FieldSchema(name:p_container, type:string, comment:null), (part)part.FieldSchema(name:p_retailprice, type:double, comment:null), (part)part.FieldSchema(name:p_comment, type:string, comment:null), (part)part.FieldSchema(name:BLOCK__OFFSET__INSIDE__FILE, type:bigint, comment:), (part)part.FieldSchema(name:INPUT__FILE__NAME, type:string, comment:), ] -POSTHOOK: Lineage: part_3.p_mfgr SCRIPT [(part)part.FieldSchema(name:p_partkey, type:int, comment:null), (part)part.FieldSchema(name:p_name, type:string, comment:null), (part)part.FieldSchema(name:p_mfgr, type:string, comment:null), (part)part.FieldSchema(name:p_brand, type:string, comment:null), (part)part.FieldSchema(name:p_type, type:string, comment:null), (part)part.FieldSchema(name:p_size, type:int, comment:null), (part)part.FieldSchema(name:p_container, type:string, comment:null), (part)part.FieldSchema(name:p_retailprice, type:double, comment:null), (part)part.FieldSchema(name:p_comment, type:string, comment:null), (part)part.FieldSchema(name:BLOCK__OFFSET__INSIDE__FILE, type:bigint, comment:), (part)part.FieldSchema(name:INPUT__FILE__NAME, type:string, comment:), ] -POSTHOOK: Lineage: part_3.p_name SCRIPT [(part)part.FieldSchema(name:p_partkey, type:int, comment:null), (part)part.FieldSchema(name:p_name, type:string, comment:null), (part)part.FieldSchema(name:p_mfgr, type:string, comment:null), (part)part.FieldSchema(name:p_brand, type:string, comment:null), (part)part.FieldSchema(name:p_type, type:string, comment:null), (part)part.FieldSchema(name:p_size, type:int, comment:null), (part)part.FieldSchema(name:p_container, type:string, comment:null), (part)part.FieldSchema(name:p_retailprice, type:double, comment:null), (part)part.FieldSchema(name:p_comment, type:string, comment:null), (part)part.FieldSchema(name:BLOCK__OFFSET__INSIDE__FILE, type:bigint, comment:), (part)part.FieldSchema(name:INPUT__FILE__NAME, type:string, comment:), ] -POSTHOOK: Lineage: part_3.p_size SCRIPT [(part)part.FieldSchema(name:p_partkey, type:int, comment:null), (part)part.FieldSchema(name:p_name, type:string, comment:null), (part)part.FieldSchema(name:p_mfgr, type:string, comment:null), (part)part.FieldSchema(name:p_brand, type:string, comment:null), (part)part.FieldSchema(name:p_type, type:string, comment:null), (part)part.FieldSchema(name:p_size, type:int, comment:null), (part)part.FieldSchema(name:p_container, type:string, comment:null), (part)part.FieldSchema(name:p_retailprice, type:double, comment:null), (part)part.FieldSchema(name:p_comment, type:string, comment:null), (part)part.FieldSchema(name:BLOCK__OFFSET__INSIDE__FILE, type:bigint, comment:), (part)part.FieldSchema(name:INPUT__FILE__NAME, type:string, comment:), ] +POSTHOOK: Lineage: part_1.dr SCRIPT [(part)part.FieldSchema(name:p_partkey, type:int, comment:null), (part)part.FieldSchema(name:p_name, type:string, comment:null), (part)part.FieldSchema(name:p_mfgr, type:string, comment:null), (part)part.FieldSchema(name:p_brand, type:string, comment:null), (part)part.FieldSchema(name:p_type, type:string, comment:null), (part)part.FieldSchema(name:p_size, type:int, comment:null), (part)part.FieldSchema(name:p_container, type:string, comment:null), (part)part.FieldSchema(name:p_retailprice, type:double, comment:null), (part)part.FieldSchema(name:p_comment, type:string, comment:null), (part)part.FieldSchema(name:BLOCK__OFFSET__INSIDE__FILE, type:bigint, comment:), (part)part.FieldSchema(name:INPUT__FILE__NAME, type:string, comment:), (part)part.FieldSchema(name:ROW__ID, type:struct, comment:), ] +POSTHOOK: Lineage: part_1.p_mfgr SCRIPT [(part)part.FieldSchema(name:p_partkey, type:int, comment:null), (part)part.FieldSchema(name:p_name, type:string, comment:null), (part)part.FieldSchema(name:p_mfgr, type:string, comment:null), (part)part.FieldSchema(name:p_brand, type:string, comment:null), (part)part.FieldSchema(name:p_type, type:string, comment:null), (part)part.FieldSchema(name:p_size, type:int, comment:null), (part)part.FieldSchema(name:p_container, type:string, comment:null), (part)part.FieldSchema(name:p_retailprice, type:double, comment:null), (part)part.FieldSchema(name:p_comment, type:string, comment:null), (part)part.FieldSchema(name:BLOCK__OFFSET__INSIDE__FILE, type:bigint, comment:), (part)part.FieldSchema(name:INPUT__FILE__NAME, type:string, comment:), (part)part.FieldSchema(name:ROW__ID, type:struct, comment:), ] +POSTHOOK: Lineage: part_1.p_name SCRIPT [(part)part.FieldSchema(name:p_partkey, type:int, comment:null), (part)part.FieldSchema(name:p_name, type:string, comment:null), (part)part.FieldSchema(name:p_mfgr, type:string, comment:null), (part)part.FieldSchema(name:p_brand, type:string, comment:null), (part)part.FieldSchema(name:p_type, type:string, comment:null), (part)part.FieldSchema(name:p_size, type:int, comment:null), (part)part.FieldSchema(name:p_container, type:string, comment:null), (part)part.FieldSchema(name:p_retailprice, type:double, comment:null), (part)part.FieldSchema(name:p_comment, type:string, comment:null), (part)part.FieldSchema(name:BLOCK__OFFSET__INSIDE__FILE, type:bigint, comment:), (part)part.FieldSchema(name:INPUT__FILE__NAME, type:string, comment:), (part)part.FieldSchema(name:ROW__ID, type:struct, comment:), ] +POSTHOOK: Lineage: part_1.p_size SCRIPT [(part)part.FieldSchema(name:p_partkey, type:int, comment:null), (part)part.FieldSchema(name:p_name, type:string, comment:null), (part)part.FieldSchema(name:p_mfgr, type:string, comment:null), (part)part.FieldSchema(name:p_brand, type:string, comment:null), (part)part.FieldSchema(name:p_type, type:string, comment:null), (part)part.FieldSchema(name:p_size, type:int, comment:null), (part)part.FieldSchema(name:p_container, type:string, comment:null), (part)part.FieldSchema(name:p_retailprice, type:double, comment:null), (part)part.FieldSchema(name:p_comment, type:string, comment:null), (part)part.FieldSchema(name:BLOCK__OFFSET__INSIDE__FILE, type:bigint, comment:), (part)part.FieldSchema(name:INPUT__FILE__NAME, type:string, comment:), (part)part.FieldSchema(name:ROW__ID, type:struct, comment:), ] +POSTHOOK: Lineage: part_1.r SCRIPT [(part)part.FieldSchema(name:p_partkey, type:int, comment:null), (part)part.FieldSchema(name:p_name, type:string, comment:null), (part)part.FieldSchema(name:p_mfgr, type:string, comment:null), (part)part.FieldSchema(name:p_brand, type:string, comment:null), (part)part.FieldSchema(name:p_type, type:string, comment:null), (part)part.FieldSchema(name:p_size, type:int, comment:null), (part)part.FieldSchema(name:p_container, type:string, comment:null), (part)part.FieldSchema(name:p_retailprice, type:double, comment:null), (part)part.FieldSchema(name:p_comment, type:string, comment:null), (part)part.FieldSchema(name:BLOCK__OFFSET__INSIDE__FILE, type:bigint, comment:), (part)part.FieldSchema(name:INPUT__FILE__NAME, type:string, comment:), (part)part.FieldSchema(name:ROW__ID, type:struct, comment:), ] +POSTHOOK: Lineage: part_1.s SCRIPT [(part)part.FieldSchema(name:p_partkey, type:int, comment:null), (part)part.FieldSchema(name:p_name, type:string, comment:null), (part)part.FieldSchema(name:p_mfgr, type:string, comment:null), (part)part.FieldSchema(name:p_brand, type:string, comment:null), (part)part.FieldSchema(name:p_type, type:string, comment:null), (part)part.FieldSchema(name:p_size, type:int, comment:null), (part)part.FieldSchema(name:p_container, type:string, comment:null), (part)part.FieldSchema(name:p_retailprice, type:double, comment:null), (part)part.FieldSchema(name:p_comment, type:string, comment:null), (part)part.FieldSchema(name:BLOCK__OFFSET__INSIDE__FILE, type:bigint, comment:), (part)part.FieldSchema(name:INPUT__FILE__NAME, type:string, comment:), (part)part.FieldSchema(name:ROW__ID, type:struct, comment:), ] +POSTHOOK: Lineage: part_2.cud SCRIPT [(part)part.FieldSchema(name:p_partkey, type:int, comment:null), (part)part.FieldSchema(name:p_name, type:string, comment:null), (part)part.FieldSchema(name:p_mfgr, type:string, comment:null), (part)part.FieldSchema(name:p_brand, type:string, comment:null), (part)part.FieldSchema(name:p_type, type:string, comment:null), (part)part.FieldSchema(name:p_size, type:int, comment:null), (part)part.FieldSchema(name:p_container, type:string, comment:null), (part)part.FieldSchema(name:p_retailprice, type:double, comment:null), (part)part.FieldSchema(name:p_comment, type:string, comment:null), (part)part.FieldSchema(name:BLOCK__OFFSET__INSIDE__FILE, type:bigint, comment:), (part)part.FieldSchema(name:INPUT__FILE__NAME, type:string, comment:), (part)part.FieldSchema(name:ROW__ID, type:struct, comment:), ] +POSTHOOK: Lineage: part_2.dr SCRIPT [(part)part.FieldSchema(name:p_partkey, type:int, comment:null), (part)part.FieldSchema(name:p_name, type:string, comment:null), (part)part.FieldSchema(name:p_mfgr, type:string, comment:null), (part)part.FieldSchema(name:p_brand, type:string, comment:null), (part)part.FieldSchema(name:p_type, type:string, comment:null), (part)part.FieldSchema(name:p_size, type:int, comment:null), (part)part.FieldSchema(name:p_container, type:string, comment:null), (part)part.FieldSchema(name:p_retailprice, type:double, comment:null), (part)part.FieldSchema(name:p_comment, type:string, comment:null), (part)part.FieldSchema(name:BLOCK__OFFSET__INSIDE__FILE, type:bigint, comment:), (part)part.FieldSchema(name:INPUT__FILE__NAME, type:string, comment:), (part)part.FieldSchema(name:ROW__ID, type:struct, comment:), ] +POSTHOOK: Lineage: part_2.fv1 SCRIPT [(part)part.FieldSchema(name:p_partkey, type:int, comment:null), (part)part.FieldSchema(name:p_name, type:string, comment:null), (part)part.FieldSchema(name:p_mfgr, type:string, comment:null), (part)part.FieldSchema(name:p_brand, type:string, comment:null), (part)part.FieldSchema(name:p_type, type:string, comment:null), (part)part.FieldSchema(name:p_size, type:int, comment:null), (part)part.FieldSchema(name:p_container, type:string, comment:null), (part)part.FieldSchema(name:p_retailprice, type:double, comment:null), (part)part.FieldSchema(name:p_comment, type:string, comment:null), (part)part.FieldSchema(name:BLOCK__OFFSET__INSIDE__FILE, type:bigint, comment:), (part)part.FieldSchema(name:INPUT__FILE__NAME, type:string, comment:), (part)part.FieldSchema(name:ROW__ID, type:struct, comment:), ] +POSTHOOK: Lineage: part_2.p_mfgr SCRIPT [(part)part.FieldSchema(name:p_partkey, type:int, comment:null), (part)part.FieldSchema(name:p_name, type:string, comment:null), (part)part.FieldSchema(name:p_mfgr, type:string, comment:null), (part)part.FieldSchema(name:p_brand, type:string, comment:null), (part)part.FieldSchema(name:p_type, type:string, comment:null), (part)part.FieldSchema(name:p_size, type:int, comment:null), (part)part.FieldSchema(name:p_container, type:string, comment:null), (part)part.FieldSchema(name:p_retailprice, type:double, comment:null), (part)part.FieldSchema(name:p_comment, type:string, comment:null), (part)part.FieldSchema(name:BLOCK__OFFSET__INSIDE__FILE, type:bigint, comment:), (part)part.FieldSchema(name:INPUT__FILE__NAME, type:string, comment:), (part)part.FieldSchema(name:ROW__ID, type:struct, comment:), ] +POSTHOOK: Lineage: part_2.p_name SCRIPT [(part)part.FieldSchema(name:p_partkey, type:int, comment:null), (part)part.FieldSchema(name:p_name, type:string, comment:null), (part)part.FieldSchema(name:p_mfgr, type:string, comment:null), (part)part.FieldSchema(name:p_brand, type:string, comment:null), (part)part.FieldSchema(name:p_type, type:string, comment:null), (part)part.FieldSchema(name:p_size, type:int, comment:null), (part)part.FieldSchema(name:p_container, type:string, comment:null), (part)part.FieldSchema(name:p_retailprice, type:double, comment:null), (part)part.FieldSchema(name:p_comment, type:string, comment:null), (part)part.FieldSchema(name:BLOCK__OFFSET__INSIDE__FILE, type:bigint, comment:), (part)part.FieldSchema(name:INPUT__FILE__NAME, type:string, comment:), (part)part.FieldSchema(name:ROW__ID, type:struct, comment:), ] +POSTHOOK: Lineage: part_2.p_size SCRIPT [(part)part.FieldSchema(name:p_partkey, type:int, comment:null), (part)part.FieldSchema(name:p_name, type:string, comment:null), (part)part.FieldSchema(name:p_mfgr, type:string, comment:null), (part)part.FieldSchema(name:p_brand, type:string, comment:null), (part)part.FieldSchema(name:p_type, type:string, comment:null), (part)part.FieldSchema(name:p_size, type:int, comment:null), (part)part.FieldSchema(name:p_container, type:string, comment:null), (part)part.FieldSchema(name:p_retailprice, type:double, comment:null), (part)part.FieldSchema(name:p_comment, type:string, comment:null), (part)part.FieldSchema(name:BLOCK__OFFSET__INSIDE__FILE, type:bigint, comment:), (part)part.FieldSchema(name:INPUT__FILE__NAME, type:string, comment:), (part)part.FieldSchema(name:ROW__ID, type:struct, comment:), ] +POSTHOOK: Lineage: part_2.r SCRIPT [(part)part.FieldSchema(name:p_partkey, type:int, comment:null), (part)part.FieldSchema(name:p_name, type:string, comment:null), (part)part.FieldSchema(name:p_mfgr, type:string, comment:null), (part)part.FieldSchema(name:p_brand, type:string, comment:null), (part)part.FieldSchema(name:p_type, type:string, comment:null), (part)part.FieldSchema(name:p_size, type:int, comment:null), (part)part.FieldSchema(name:p_container, type:string, comment:null), (part)part.FieldSchema(name:p_retailprice, type:double, comment:null), (part)part.FieldSchema(name:p_comment, type:string, comment:null), (part)part.FieldSchema(name:BLOCK__OFFSET__INSIDE__FILE, type:bigint, comment:), (part)part.FieldSchema(name:INPUT__FILE__NAME, type:string, comment:), (part)part.FieldSchema(name:ROW__ID, type:struct, comment:), ] +POSTHOOK: Lineage: part_2.s2 SCRIPT [(part)part.FieldSchema(name:p_partkey, type:int, comment:null), (part)part.FieldSchema(name:p_name, type:string, comment:null), (part)part.FieldSchema(name:p_mfgr, type:string, comment:null), (part)part.FieldSchema(name:p_brand, type:string, comment:null), (part)part.FieldSchema(name:p_type, type:string, comment:null), (part)part.FieldSchema(name:p_size, type:int, comment:null), (part)part.FieldSchema(name:p_container, type:string, comment:null), (part)part.FieldSchema(name:p_retailprice, type:double, comment:null), (part)part.FieldSchema(name:p_comment, type:string, comment:null), (part)part.FieldSchema(name:BLOCK__OFFSET__INSIDE__FILE, type:bigint, comment:), (part)part.FieldSchema(name:INPUT__FILE__NAME, type:string, comment:), (part)part.FieldSchema(name:ROW__ID, type:struct, comment:), ] +POSTHOOK: Lineage: part_3.c SCRIPT [(part)part.FieldSchema(name:p_partkey, type:int, comment:null), (part)part.FieldSchema(name:p_name, type:string, comment:null), (part)part.FieldSchema(name:p_mfgr, type:string, comment:null), (part)part.FieldSchema(name:p_brand, type:string, comment:null), (part)part.FieldSchema(name:p_type, type:string, comment:null), (part)part.FieldSchema(name:p_size, type:int, comment:null), (part)part.FieldSchema(name:p_container, type:string, comment:null), (part)part.FieldSchema(name:p_retailprice, type:double, comment:null), (part)part.FieldSchema(name:p_comment, type:string, comment:null), (part)part.FieldSchema(name:BLOCK__OFFSET__INSIDE__FILE, type:bigint, comment:), (part)part.FieldSchema(name:INPUT__FILE__NAME, type:string, comment:), (part)part.FieldSchema(name:ROW__ID, type:struct, comment:), ] +POSTHOOK: Lineage: part_3.ca SCRIPT [(part)part.FieldSchema(name:p_partkey, type:int, comment:null), (part)part.FieldSchema(name:p_name, type:string, comment:null), (part)part.FieldSchema(name:p_mfgr, type:string, comment:null), (part)part.FieldSchema(name:p_brand, type:string, comment:null), (part)part.FieldSchema(name:p_type, type:string, comment:null), (part)part.FieldSchema(name:p_size, type:int, comment:null), (part)part.FieldSchema(name:p_container, type:string, comment:null), (part)part.FieldSchema(name:p_retailprice, type:double, comment:null), (part)part.FieldSchema(name:p_comment, type:string, comment:null), (part)part.FieldSchema(name:BLOCK__OFFSET__INSIDE__FILE, type:bigint, comment:), (part)part.FieldSchema(name:INPUT__FILE__NAME, type:string, comment:), (part)part.FieldSchema(name:ROW__ID, type:struct, comment:), ] +POSTHOOK: Lineage: part_3.fv SCRIPT [(part)part.FieldSchema(name:p_partkey, type:int, comment:null), (part)part.FieldSchema(name:p_name, type:string, comment:null), (part)part.FieldSchema(name:p_mfgr, type:string, comment:null), (part)part.FieldSchema(name:p_brand, type:string, comment:null), (part)part.FieldSchema(name:p_type, type:string, comment:null), (part)part.FieldSchema(name:p_size, type:int, comment:null), (part)part.FieldSchema(name:p_container, type:string, comment:null), (part)part.FieldSchema(name:p_retailprice, type:double, comment:null), (part)part.FieldSchema(name:p_comment, type:string, comment:null), (part)part.FieldSchema(name:BLOCK__OFFSET__INSIDE__FILE, type:bigint, comment:), (part)part.FieldSchema(name:INPUT__FILE__NAME, type:string, comment:), (part)part.FieldSchema(name:ROW__ID, type:struct, comment:), ] +POSTHOOK: Lineage: part_3.p_mfgr SCRIPT [(part)part.FieldSchema(name:p_partkey, type:int, comment:null), (part)part.FieldSchema(name:p_name, type:string, comment:null), (part)part.FieldSchema(name:p_mfgr, type:string, comment:null), (part)part.FieldSchema(name:p_brand, type:string, comment:null), (part)part.FieldSchema(name:p_type, type:string, comment:null), (part)part.FieldSchema(name:p_size, type:int, comment:null), (part)part.FieldSchema(name:p_container, type:string, comment:null), (part)part.FieldSchema(name:p_retailprice, type:double, comment:null), (part)part.FieldSchema(name:p_comment, type:string, comment:null), (part)part.FieldSchema(name:BLOCK__OFFSET__INSIDE__FILE, type:bigint, comment:), (part)part.FieldSchema(name:INPUT__FILE__NAME, type:string, comment:), (part)part.FieldSchema(name:ROW__ID, type:struct, comment:), ] +POSTHOOK: Lineage: part_3.p_name SCRIPT [(part)part.FieldSchema(name:p_partkey, type:int, comment:null), (part)part.FieldSchema(name:p_name, type:string, comment:null), (part)part.FieldSchema(name:p_mfgr, type:string, comment:null), (part)part.FieldSchema(name:p_brand, type:string, comment:null), (part)part.FieldSchema(name:p_type, type:string, comment:null), (part)part.FieldSchema(name:p_size, type:int, comment:null), (part)part.FieldSchema(name:p_container, type:string, comment:null), (part)part.FieldSchema(name:p_retailprice, type:double, comment:null), (part)part.FieldSchema(name:p_comment, type:string, comment:null), (part)part.FieldSchema(name:BLOCK__OFFSET__INSIDE__FILE, type:bigint, comment:), (part)part.FieldSchema(name:INPUT__FILE__NAME, type:string, comment:), (part)part.FieldSchema(name:ROW__ID, type:struct, comment:), ] +POSTHOOK: Lineage: part_3.p_size SCRIPT [(part)part.FieldSchema(name:p_partkey, type:int, comment:null), (part)part.FieldSchema(name:p_name, type:string, comment:null), (part)part.FieldSchema(name:p_mfgr, type:string, comment:null), (part)part.FieldSchema(name:p_brand, type:string, comment:null), (part)part.FieldSchema(name:p_type, type:string, comment:null), (part)part.FieldSchema(name:p_size, type:int, comment:null), (part)part.FieldSchema(name:p_container, type:string, comment:null), (part)part.FieldSchema(name:p_retailprice, type:double, comment:null), (part)part.FieldSchema(name:p_comment, type:string, comment:null), (part)part.FieldSchema(name:BLOCK__OFFSET__INSIDE__FILE, type:bigint, comment:), (part)part.FieldSchema(name:INPUT__FILE__NAME, type:string, comment:), (part)part.FieldSchema(name:ROW__ID, type:struct, comment:), ] PREHOOK: query: select * from part_1 PREHOOK: type: QUERY PREHOOK: Input: default@part_1 @@ -2334,3 +2334,25 @@ where p_mfgr = 'Manufacturer#6' POSTHOOK: type: QUERY POSTHOOK: Input: default@part #### A masked pattern was here #### +PREHOOK: query: -- 46. window sz is same as partition sz +select p_retailprice, avg(p_retailprice) over (partition by p_mfgr order by p_name rows between current row and 6 following), +sum(p_retailprice) over (partition by p_mfgr order by p_name rows between current row and 6 following) +from part +where p_mfgr='Manufacturer#1' +PREHOOK: type: QUERY +PREHOOK: Input: default@part +#### A masked pattern was here #### +POSTHOOK: query: -- 46. window sz is same as partition sz +select p_retailprice, avg(p_retailprice) over (partition by p_mfgr order by p_name rows between current row and 6 following), +sum(p_retailprice) over (partition by p_mfgr order by p_name rows between current row and 6 following) +from part +where p_mfgr='Manufacturer#1' +POSTHOOK: type: QUERY +POSTHOOK: Input: default@part +#### A masked pattern was here #### +1173.15 1458.2883333333336 8749.730000000001 +1173.15 1515.3160000000003 7576.580000000002 +1753.76 1600.8575000000003 6403.430000000001 +1602.59 1549.8900000000003 4649.670000000001 +1414.42 1523.5400000000004 3047.080000000001 +1632.66 1632.6600000000008 1632.6600000000008 diff --git a/ql/src/test/results/clientpositive/windowing_decimal.q.out b/ql/src/test/results/clientpositive/windowing_decimal.q.out index 08dd6ab..2354b6b 100644 --- a/ql/src/test/results/clientpositive/windowing_decimal.q.out +++ b/ql/src/test/results/clientpositive/windowing_decimal.q.out @@ -99,8 +99,8 @@ from part_dec POSTHOOK: type: QUERY POSTHOOK: Input: default@part_dec #### A masked pattern was here #### -Manufacturer#1 1173.15 1173.15 2346.3 -Manufacturer#1 1173.15 1173.15 2346.3 +Manufacturer#1 1173.15 1173.15 2346.30 +Manufacturer#1 1173.15 1173.15 2346.30 Manufacturer#1 1414.42 1173.15 3760.72 Manufacturer#1 1602.59 1173.15 5363.31 Manufacturer#1 1632.66 1173.15 6995.97 @@ -118,7 +118,7 @@ Manufacturer#3 1922.98 1190.27 7532.61 Manufacturer#4 1206.26 1206.26 1206.26 Manufacturer#4 1290.35 1206.26 2496.61 Manufacturer#4 1375.42 1206.26 3872.03 -Manufacturer#4 1620.67 1206.26 5492.7 +Manufacturer#4 1620.67 1206.26 5492.70 Manufacturer#4 1844.92 1206.26 7337.62 Manufacturer#5 1018.1 1018.1 1018.1 Manufacturer#5 1464.48 1018.1 2482.58 @@ -139,8 +139,8 @@ from part_dec POSTHOOK: type: QUERY POSTHOOK: Input: default@part_dec #### A masked pattern was here #### -Manufacturer#1 1173.15 1173.15 2346.3 -Manufacturer#1 1173.15 1173.15 2346.3 +Manufacturer#1 1173.15 1173.15 2346.30 +Manufacturer#1 1173.15 1173.15 2346.30 Manufacturer#1 1414.42 1414.42 1414.42 Manufacturer#1 1602.59 1602.59 1602.59 Manufacturer#1 1632.66 1632.66 1632.66 diff --git a/ql/src/test/results/clientpositive/windowing_expressions.q.out b/ql/src/test/results/clientpositive/windowing_expressions.q.out index ee9f6ca..bd432c4 100644 --- a/ql/src/test/results/clientpositive/windowing_expressions.q.out +++ b/ql/src/test/results/clientpositive/windowing_expressions.q.out @@ -698,10 +698,10 @@ POSTHOOK: type: QUERY POSTHOOK: Input: default@over10k POSTHOOK: Output: default@t1 POSTHOOK: Output: default@t2 -POSTHOOK: Lineage: t1.a1 SCRIPT [(over10k)over10k.FieldSchema(name:t, type:tinyint, comment:null), (over10k)over10k.FieldSchema(name:si, type:smallint, comment:null), (over10k)over10k.FieldSchema(name:i, type:int, comment:null), (over10k)over10k.FieldSchema(name:b, type:bigint, comment:null), (over10k)over10k.FieldSchema(name:f, type:float, comment:null), (over10k)over10k.FieldSchema(name:d, type:double, comment:null), (over10k)over10k.FieldSchema(name:bo, type:boolean, comment:null), (over10k)over10k.FieldSchema(name:s, type:string, comment:null), (over10k)over10k.FieldSchema(name:ts, type:timestamp, comment:null), (over10k)over10k.FieldSchema(name:dec, type:decimal(4,2), comment:null), (over10k)over10k.FieldSchema(name:bin, type:binary, comment:null), (over10k)over10k.FieldSchema(name:BLOCK__OFFSET__INSIDE__FILE, type:bigint, comment:), (over10k)over10k.FieldSchema(name:INPUT__FILE__NAME, type:string, comment:), ] -POSTHOOK: Lineage: t1.b1 SCRIPT [(over10k)over10k.FieldSchema(name:t, type:tinyint, comment:null), (over10k)over10k.FieldSchema(name:si, type:smallint, comment:null), (over10k)over10k.FieldSchema(name:i, type:int, comment:null), (over10k)over10k.FieldSchema(name:b, type:bigint, comment:null), (over10k)over10k.FieldSchema(name:f, type:float, comment:null), (over10k)over10k.FieldSchema(name:d, type:double, comment:null), (over10k)over10k.FieldSchema(name:bo, type:boolean, comment:null), (over10k)over10k.FieldSchema(name:s, type:string, comment:null), (over10k)over10k.FieldSchema(name:ts, type:timestamp, comment:null), (over10k)over10k.FieldSchema(name:dec, type:decimal(4,2), comment:null), (over10k)over10k.FieldSchema(name:bin, type:binary, comment:null), (over10k)over10k.FieldSchema(name:BLOCK__OFFSET__INSIDE__FILE, type:bigint, comment:), (over10k)over10k.FieldSchema(name:INPUT__FILE__NAME, type:string, comment:), ] -POSTHOOK: Lineage: t2.a1 SCRIPT [(over10k)over10k.FieldSchema(name:t, type:tinyint, comment:null), (over10k)over10k.FieldSchema(name:si, type:smallint, comment:null), (over10k)over10k.FieldSchema(name:i, type:int, comment:null), (over10k)over10k.FieldSchema(name:b, type:bigint, comment:null), (over10k)over10k.FieldSchema(name:f, type:float, comment:null), (over10k)over10k.FieldSchema(name:d, type:double, comment:null), (over10k)over10k.FieldSchema(name:bo, type:boolean, comment:null), (over10k)over10k.FieldSchema(name:s, type:string, comment:null), (over10k)over10k.FieldSchema(name:ts, type:timestamp, comment:null), (over10k)over10k.FieldSchema(name:dec, type:decimal(4,2), comment:null), (over10k)over10k.FieldSchema(name:bin, type:binary, comment:null), (over10k)over10k.FieldSchema(name:BLOCK__OFFSET__INSIDE__FILE, type:bigint, comment:), (over10k)over10k.FieldSchema(name:INPUT__FILE__NAME, type:string, comment:), ] -POSTHOOK: Lineage: t2.b1 SCRIPT [(over10k)over10k.FieldSchema(name:t, type:tinyint, comment:null), (over10k)over10k.FieldSchema(name:si, type:smallint, comment:null), (over10k)over10k.FieldSchema(name:i, type:int, comment:null), (over10k)over10k.FieldSchema(name:b, type:bigint, comment:null), (over10k)over10k.FieldSchema(name:f, type:float, comment:null), (over10k)over10k.FieldSchema(name:d, type:double, comment:null), (over10k)over10k.FieldSchema(name:bo, type:boolean, comment:null), (over10k)over10k.FieldSchema(name:s, type:string, comment:null), (over10k)over10k.FieldSchema(name:ts, type:timestamp, comment:null), (over10k)over10k.FieldSchema(name:dec, type:decimal(4,2), comment:null), (over10k)over10k.FieldSchema(name:bin, type:binary, comment:null), (over10k)over10k.FieldSchema(name:BLOCK__OFFSET__INSIDE__FILE, type:bigint, comment:), (over10k)over10k.FieldSchema(name:INPUT__FILE__NAME, type:string, comment:), ] +POSTHOOK: Lineage: t1.a1 SCRIPT [(over10k)over10k.FieldSchema(name:t, type:tinyint, comment:null), (over10k)over10k.FieldSchema(name:si, type:smallint, comment:null), (over10k)over10k.FieldSchema(name:i, type:int, comment:null), (over10k)over10k.FieldSchema(name:b, type:bigint, comment:null), (over10k)over10k.FieldSchema(name:f, type:float, comment:null), (over10k)over10k.FieldSchema(name:d, type:double, comment:null), (over10k)over10k.FieldSchema(name:bo, type:boolean, comment:null), (over10k)over10k.FieldSchema(name:s, type:string, comment:null), (over10k)over10k.FieldSchema(name:ts, type:timestamp, comment:null), (over10k)over10k.FieldSchema(name:dec, type:decimal(4,2), comment:null), (over10k)over10k.FieldSchema(name:bin, type:binary, comment:null), (over10k)over10k.FieldSchema(name:BLOCK__OFFSET__INSIDE__FILE, type:bigint, comment:), (over10k)over10k.FieldSchema(name:INPUT__FILE__NAME, type:string, comment:), (over10k)over10k.FieldSchema(name:ROW__ID, type:struct, comment:), ] +POSTHOOK: Lineage: t1.b1 SCRIPT [(over10k)over10k.FieldSchema(name:t, type:tinyint, comment:null), (over10k)over10k.FieldSchema(name:si, type:smallint, comment:null), (over10k)over10k.FieldSchema(name:i, type:int, comment:null), (over10k)over10k.FieldSchema(name:b, type:bigint, comment:null), (over10k)over10k.FieldSchema(name:f, type:float, comment:null), (over10k)over10k.FieldSchema(name:d, type:double, comment:null), (over10k)over10k.FieldSchema(name:bo, type:boolean, comment:null), (over10k)over10k.FieldSchema(name:s, type:string, comment:null), (over10k)over10k.FieldSchema(name:ts, type:timestamp, comment:null), (over10k)over10k.FieldSchema(name:dec, type:decimal(4,2), comment:null), (over10k)over10k.FieldSchema(name:bin, type:binary, comment:null), (over10k)over10k.FieldSchema(name:BLOCK__OFFSET__INSIDE__FILE, type:bigint, comment:), (over10k)over10k.FieldSchema(name:INPUT__FILE__NAME, type:string, comment:), (over10k)over10k.FieldSchema(name:ROW__ID, type:struct, comment:), ] +POSTHOOK: Lineage: t2.a1 SCRIPT [(over10k)over10k.FieldSchema(name:t, type:tinyint, comment:null), (over10k)over10k.FieldSchema(name:si, type:smallint, comment:null), (over10k)over10k.FieldSchema(name:i, type:int, comment:null), (over10k)over10k.FieldSchema(name:b, type:bigint, comment:null), (over10k)over10k.FieldSchema(name:f, type:float, comment:null), (over10k)over10k.FieldSchema(name:d, type:double, comment:null), (over10k)over10k.FieldSchema(name:bo, type:boolean, comment:null), (over10k)over10k.FieldSchema(name:s, type:string, comment:null), (over10k)over10k.FieldSchema(name:ts, type:timestamp, comment:null), (over10k)over10k.FieldSchema(name:dec, type:decimal(4,2), comment:null), (over10k)over10k.FieldSchema(name:bin, type:binary, comment:null), (over10k)over10k.FieldSchema(name:BLOCK__OFFSET__INSIDE__FILE, type:bigint, comment:), (over10k)over10k.FieldSchema(name:INPUT__FILE__NAME, type:string, comment:), (over10k)over10k.FieldSchema(name:ROW__ID, type:struct, comment:), ] +POSTHOOK: Lineage: t2.b1 SCRIPT [(over10k)over10k.FieldSchema(name:t, type:tinyint, comment:null), (over10k)over10k.FieldSchema(name:si, type:smallint, comment:null), (over10k)over10k.FieldSchema(name:i, type:int, comment:null), (over10k)over10k.FieldSchema(name:b, type:bigint, comment:null), (over10k)over10k.FieldSchema(name:f, type:float, comment:null), (over10k)over10k.FieldSchema(name:d, type:double, comment:null), (over10k)over10k.FieldSchema(name:bo, type:boolean, comment:null), (over10k)over10k.FieldSchema(name:s, type:string, comment:null), (over10k)over10k.FieldSchema(name:ts, type:timestamp, comment:null), (over10k)over10k.FieldSchema(name:dec, type:decimal(4,2), comment:null), (over10k)over10k.FieldSchema(name:bin, type:binary, comment:null), (over10k)over10k.FieldSchema(name:BLOCK__OFFSET__INSIDE__FILE, type:bigint, comment:), (over10k)over10k.FieldSchema(name:INPUT__FILE__NAME, type:string, comment:), (over10k)over10k.FieldSchema(name:ROW__ID, type:struct, comment:), ] PREHOOK: query: select * from t1 limit 3 PREHOOK: type: QUERY PREHOOK: Input: default@t1 diff --git a/ql/src/test/results/clientpositive/windowing_navfn.q.out b/ql/src/test/results/clientpositive/windowing_navfn.q.out index f2f2cb4..e5bc4f4 100644 --- a/ql/src/test/results/clientpositive/windowing_navfn.q.out +++ b/ql/src/test/results/clientpositive/windowing_navfn.q.out @@ -277,13 +277,13 @@ POSTHOOK: Input: default@over10k 65536 98.42 65536 0.93 65536 83.48 -65536 75.7 +65536 75.70 65536 88.04 65536 94.09 65536 33.45 65536 44.41 65536 22.15 -65536 20.5 +65536 20.50 65536 58.86 65536 30.91 65536 74.47 @@ -300,9 +300,9 @@ POSTHOOK: Input: default@over10k 65536 80.26 65536 35.07 65536 95.88 -65536 30.6 +65536 30.60 65536 46.97 -65536 58.8 +65536 58.80 65536 5.72 65536 29.27 65536 62.25 @@ -326,7 +326,7 @@ POSTHOOK: Input: default@over10k 65537 35.86 65537 47.75 65537 1.12 -65537 52.9 +65537 52.90 65537 53.92 65537 43.45 65537 7.52 @@ -340,20 +340,20 @@ POSTHOOK: Input: default@over10k 65537 56.48 65537 83.21 65537 56.52 -65537 36.6 -65537 59.7 +65537 36.60 +65537 59.70 65537 80.14 -65537 66.3 +65537 66.30 65537 94.87 65537 40.92 -65537 25.2 +65537 25.20 65537 7.36 65538 NULL 65538 53.35 65538 54.64 65538 76.67 65538 15.17 -65538 1.2 +65538 1.20 65538 13.71 65538 81.59 65538 43.33 diff --git a/ql/src/test/results/clientpositive/windowing_rank.q.out b/ql/src/test/results/clientpositive/windowing_rank.q.out index 6a74a8e..67975f3 100644 --- a/ql/src/test/results/clientpositive/windowing_rank.q.out +++ b/ql/src/test/results/clientpositive/windowing_rank.q.out @@ -508,16 +508,16 @@ where rnk = 1 limit 10 POSTHOOK: type: QUERY POSTHOOK: Input: default@over10k #### A masked pattern was here #### -2013-03-01 09:11:58.70307 0.5 1 -2013-03-01 09:11:58.70307 0.5 1 -2013-03-01 09:11:58.70307 0.5 1 -2013-03-01 09:11:58.70307 0.5 1 -2013-03-01 09:11:58.70307 0.5 1 -2013-03-01 09:11:58.70307 0.5 1 -2013-03-01 09:11:58.70307 0.5 1 -2013-03-01 09:11:58.70307 0.5 1 -2013-03-01 09:11:58.70307 0.5 1 -2013-03-01 09:11:58.70307 0.5 1 +2013-03-01 09:11:58.70307 0.50 1 +2013-03-01 09:11:58.70307 0.50 1 +2013-03-01 09:11:58.70307 0.50 1 +2013-03-01 09:11:58.70307 0.50 1 +2013-03-01 09:11:58.70307 0.50 1 +2013-03-01 09:11:58.70307 0.50 1 +2013-03-01 09:11:58.70307 0.50 1 +2013-03-01 09:11:58.70307 0.50 1 +2013-03-01 09:11:58.70307 0.50 1 +2013-03-01 09:11:58.70307 0.50 1 PREHOOK: query: select ts, dec, rnk from (select ts, dec, @@ -546,16 +546,16 @@ where dec = 89.5 limit 10 POSTHOOK: type: QUERY POSTHOOK: Input: default@over10k #### A masked pattern was here #### -2013-03-01 09:11:58.703124 89.5 1 -2013-03-01 09:11:58.703124 89.5 1 -2013-03-01 09:11:58.703124 89.5 1 -2013-03-01 09:11:58.703124 89.5 1 -2013-03-01 09:11:58.703124 89.5 1 -2013-03-01 09:11:58.703124 89.5 1 -2013-03-01 09:11:58.703124 89.5 1 -2013-03-01 09:11:58.703124 89.5 1 -2013-03-01 09:11:58.703124 89.5 1 -2013-03-01 09:11:58.703124 89.5 1 +2013-03-01 09:11:58.703124 89.50 1 +2013-03-01 09:11:58.703124 89.50 1 +2013-03-01 09:11:58.703124 89.50 1 +2013-03-01 09:11:58.703124 89.50 1 +2013-03-01 09:11:58.703124 89.50 1 +2013-03-01 09:11:58.703124 89.50 1 +2013-03-01 09:11:58.703124 89.50 1 +2013-03-01 09:11:58.703124 89.50 1 +2013-03-01 09:11:58.703124 89.50 1 +2013-03-01 09:11:58.703124 89.50 1 PREHOOK: query: select ts, dec, rnk from (select ts, dec, @@ -586,13 +586,13 @@ where rnk = 1 limit 10 POSTHOOK: type: QUERY POSTHOOK: Input: default@over10k #### A masked pattern was here #### -2013-03-01 09:11:58.70307 37.3 1 -2013-03-01 09:11:58.70307 37.3 1 -2013-03-01 09:11:58.70307 37.3 1 -2013-03-01 09:11:58.70307 37.3 1 -2013-03-01 09:11:58.70307 37.3 1 -2013-03-01 09:11:58.70307 37.3 1 -2013-03-01 09:11:58.70307 37.3 1 -2013-03-01 09:11:58.70307 37.3 1 -2013-03-01 09:11:58.70307 37.3 1 -2013-03-01 09:11:58.70307 37.3 1 +2013-03-01 09:11:58.70307 37.30 1 +2013-03-01 09:11:58.70307 37.30 1 +2013-03-01 09:11:58.70307 37.30 1 +2013-03-01 09:11:58.70307 37.30 1 +2013-03-01 09:11:58.70307 37.30 1 +2013-03-01 09:11:58.70307 37.30 1 +2013-03-01 09:11:58.70307 37.30 1 +2013-03-01 09:11:58.70307 37.30 1 +2013-03-01 09:11:58.70307 37.30 1 +2013-03-01 09:11:58.70307 37.30 1 diff --git a/ql/src/test/results/compiler/plan/case_sensitivity.q.xml b/ql/src/test/results/compiler/plan/case_sensitivity.q.xml index 550359d..bec78dd 100644 --- a/ql/src/test/results/compiler/plan/case_sensitivity.q.xml +++ b/ql/src/test/results/compiler/plan/case_sensitivity.q.xml @@ -1184,7 +1184,7 @@ src_thrift - + bigint @@ -1214,6 +1214,52 @@ + + + + true + + + ROW__ID + + + src_thrift + + + + + + + transactionid + + + bucketid + + + rowid + + + + + + + + + + + + + + + + + + + + struct<transactionid:bigint,bucketid:int,rowid:bigint> + + + diff --git a/ql/src/test/results/compiler/plan/cast1.q.xml b/ql/src/test/results/compiler/plan/cast1.q.xml index 5b9bbf8..870443f 100644 --- a/ql/src/test/results/compiler/plan/cast1.q.xml +++ b/ql/src/test/results/compiler/plan/cast1.q.xml @@ -1039,7 +1039,7 @@ src - + bigint @@ -1069,6 +1069,52 @@ + + + + true + + + ROW__ID + + + src + + + + + + + transactionid + + + bucketid + + + rowid + + + + + + + + + + + + + + + + + + + + struct<transactionid:bigint,bucketid:int,rowid:bigint> + + + diff --git a/ql/src/test/results/compiler/plan/groupby1.q.xml b/ql/src/test/results/compiler/plan/groupby1.q.xml index 1f53052..bdaf2b4 100755 --- a/ql/src/test/results/compiler/plan/groupby1.q.xml +++ b/ql/src/test/results/compiler/plan/groupby1.q.xml @@ -571,7 +571,7 @@ - + int @@ -705,7 +705,7 @@ src - + bigint @@ -745,6 +745,50 @@ + ROW__ID + + + ROW__ID + + + true + + + src + + + + + + + transactionid + + + bucketid + + + rowid + + + + + + + + + + + + + + + + + + + + + key @@ -920,7 +964,7 @@ src - + bigint @@ -946,6 +990,25 @@ + + + + true + + + ROW__ID + + + src + + + + + + struct<transactionid:bigint,bucketid:int,rowid:bigint> + + + diff --git a/ql/src/test/results/compiler/plan/groupby2.q.xml b/ql/src/test/results/compiler/plan/groupby2.q.xml index f9e1540..e61fbb2 100755 --- a/ql/src/test/results/compiler/plan/groupby2.q.xml +++ b/ql/src/test/results/compiler/plan/groupby2.q.xml @@ -832,6 +832,50 @@ + ROW__ID + + + ROW__ID + + + true + + + src + + + + + + + transactionid + + + bucketid + + + rowid + + + + + + + + + + + + + + + + + + + + + key @@ -1033,6 +1077,25 @@ + + + + true + + + ROW__ID + + + src + + + + + + struct<transactionid:bigint,bucketid:int,rowid:bigint> + + + diff --git a/ql/src/test/results/compiler/plan/groupby3.q.xml b/ql/src/test/results/compiler/plan/groupby3.q.xml index ee32e0e..7a96bfb 100644 --- a/ql/src/test/results/compiler/plan/groupby3.q.xml +++ b/ql/src/test/results/compiler/plan/groupby3.q.xml @@ -1078,6 +1078,50 @@ + ROW__ID + + + ROW__ID + + + true + + + src + + + + + + + transactionid + + + bucketid + + + rowid + + + + + + + + + + + + + + + + + + + + + key @@ -1251,6 +1295,25 @@ + + + + true + + + ROW__ID + + + src + + + + + + struct<transactionid:bigint,bucketid:int,rowid:bigint> + + + diff --git a/ql/src/test/results/compiler/plan/groupby4.q.xml b/ql/src/test/results/compiler/plan/groupby4.q.xml index 56fc265..de03eda 100644 --- a/ql/src/test/results/compiler/plan/groupby4.q.xml +++ b/ql/src/test/results/compiler/plan/groupby4.q.xml @@ -516,6 +516,50 @@ + ROW__ID + + + ROW__ID + + + true + + + src + + + + + + + transactionid + + + bucketid + + + rowid + + + + + + + + + + + + + + + + + + + + + key @@ -689,6 +733,25 @@ + + + + true + + + ROW__ID + + + src + + + + + + struct<transactionid:bigint,bucketid:int,rowid:bigint> + + + diff --git a/ql/src/test/results/compiler/plan/groupby5.q.xml b/ql/src/test/results/compiler/plan/groupby5.q.xml index 7a63fd1..06df157 100644 --- a/ql/src/test/results/compiler/plan/groupby5.q.xml +++ b/ql/src/test/results/compiler/plan/groupby5.q.xml @@ -423,7 +423,7 @@ - + int @@ -557,7 +557,7 @@ src - + bigint @@ -597,6 +597,50 @@ + ROW__ID + + + ROW__ID + + + true + + + src + + + + + + + transactionid + + + bucketid + + + rowid + + + + + + + + + + + + + + + + + + + + + key @@ -772,7 +816,7 @@ src - + bigint @@ -798,6 +842,25 @@ + + + + true + + + ROW__ID + + + src + + + + + + struct<transactionid:bigint,bucketid:int,rowid:bigint> + + + diff --git a/ql/src/test/results/compiler/plan/groupby6.q.xml b/ql/src/test/results/compiler/plan/groupby6.q.xml index 2cc7442..a201e59 100644 --- a/ql/src/test/results/compiler/plan/groupby6.q.xml +++ b/ql/src/test/results/compiler/plan/groupby6.q.xml @@ -516,6 +516,50 @@ + ROW__ID + + + ROW__ID + + + true + + + src + + + + + + + transactionid + + + bucketid + + + rowid + + + + + + + + + + + + + + + + + + + + + key @@ -689,6 +733,25 @@ + + + + true + + + ROW__ID + + + src + + + + + + struct<transactionid:bigint,bucketid:int,rowid:bigint> + + + diff --git a/ql/src/test/results/compiler/plan/input1.q.xml b/ql/src/test/results/compiler/plan/input1.q.xml index 24a2da9..66e66c2 100755 --- a/ql/src/test/results/compiler/plan/input1.q.xml +++ b/ql/src/test/results/compiler/plan/input1.q.xml @@ -906,7 +906,7 @@ - + int @@ -1046,7 +1046,7 @@ src - + bigint @@ -1076,6 +1076,52 @@ + + + + true + + + ROW__ID + + + src + + + + + + + transactionid + + + bucketid + + + rowid + + + + + + + + + + + + + + + + + + + + struct<transactionid:bigint,bucketid:int,rowid:bigint> + + + diff --git a/ql/src/test/results/compiler/plan/input2.q.xml b/ql/src/test/results/compiler/plan/input2.q.xml index 57146f6..e69ac8e 100755 --- a/ql/src/test/results/compiler/plan/input2.q.xml +++ b/ql/src/test/results/compiler/plan/input2.q.xml @@ -2549,7 +2549,7 @@ src - + bigint @@ -2579,6 +2579,52 @@ + + + + true + + + ROW__ID + + + src + + + + + + + transactionid + + + bucketid + + + rowid + + + + + + + + + + + + + + + + + + + + struct<transactionid:bigint,bucketid:int,rowid:bigint> + + + diff --git a/ql/src/test/results/compiler/plan/input20.q.xml b/ql/src/test/results/compiler/plan/input20.q.xml index 3efbc07..ce3f4e0 100644 --- a/ql/src/test/results/compiler/plan/input20.q.xml +++ b/ql/src/test/results/compiler/plan/input20.q.xml @@ -806,7 +806,7 @@ src - + bigint @@ -836,6 +836,52 @@ + + + + true + + + ROW__ID + + + src + + + + + + + transactionid + + + bucketid + + + rowid + + + + + + + + + + + + + + + + + + + + struct<transactionid:bigint,bucketid:int,rowid:bigint> + + + diff --git a/ql/src/test/results/compiler/plan/input3.q.xml b/ql/src/test/results/compiler/plan/input3.q.xml index c55fee9..ba42b5d 100755 --- a/ql/src/test/results/compiler/plan/input3.q.xml +++ b/ql/src/test/results/compiler/plan/input3.q.xml @@ -3254,7 +3254,7 @@ src - + bigint @@ -3284,6 +3284,52 @@ + + + + true + + + ROW__ID + + + src + + + + + + + transactionid + + + bucketid + + + rowid + + + + + + + + + + + + + + + + + + + + struct<transactionid:bigint,bucketid:int,rowid:bigint> + + + diff --git a/ql/src/test/results/compiler/plan/input4.q.xml b/ql/src/test/results/compiler/plan/input4.q.xml index 4652ae6..5225985 100755 --- a/ql/src/test/results/compiler/plan/input4.q.xml +++ b/ql/src/test/results/compiler/plan/input4.q.xml @@ -555,7 +555,7 @@ - + int @@ -955,7 +955,7 @@ src - + bigint @@ -985,6 +985,52 @@ + + + + true + + + ROW__ID + + + src + + + + + + + transactionid + + + bucketid + + + rowid + + + + + + + + + + + + + + + + + + + + struct<transactionid:bigint,bucketid:int,rowid:bigint> + + + diff --git a/ql/src/test/results/compiler/plan/input5.q.xml b/ql/src/test/results/compiler/plan/input5.q.xml index d65bd3e..8a77ba5 100644 --- a/ql/src/test/results/compiler/plan/input5.q.xml +++ b/ql/src/test/results/compiler/plan/input5.q.xml @@ -1017,7 +1017,7 @@ src_thrift - + bigint @@ -1047,6 +1047,52 @@ + + + + true + + + ROW__ID + + + src_thrift + + + + + + + transactionid + + + bucketid + + + rowid + + + + + + + + + + + + + + + + + + + + struct<transactionid:bigint,bucketid:int,rowid:bigint> + + + diff --git a/ql/src/test/results/compiler/plan/input6.q.xml b/ql/src/test/results/compiler/plan/input6.q.xml index 30f44a9..e539747 100644 --- a/ql/src/test/results/compiler/plan/input6.q.xml +++ b/ql/src/test/results/compiler/plan/input6.q.xml @@ -1028,7 +1028,7 @@ src1 - + bigint @@ -1058,6 +1058,56 @@ + + + + true + + + ROW__ID + + + src1 + + + + + + + transactionid + + + bucketid + + + rowid + + + + + + + + + + + + int + + + + + + + + + + + + struct<transactionid:bigint,bucketid:int,rowid:bigint> + + + diff --git a/ql/src/test/results/compiler/plan/input7.q.xml b/ql/src/test/results/compiler/plan/input7.q.xml index a0ba850..ec33cc6 100644 --- a/ql/src/test/results/compiler/plan/input7.q.xml +++ b/ql/src/test/results/compiler/plan/input7.q.xml @@ -957,7 +957,7 @@ src1 - + bigint @@ -987,6 +987,56 @@ + + + + true + + + ROW__ID + + + src1 + + + + + + + transactionid + + + bucketid + + + rowid + + + + + + + + + + + + int + + + + + + + + + + + + struct<transactionid:bigint,bucketid:int,rowid:bigint> + + + diff --git a/ql/src/test/results/compiler/plan/input8.q.xml b/ql/src/test/results/compiler/plan/input8.q.xml index 3cb1dfe..c4c6ceb 100644 --- a/ql/src/test/results/compiler/plan/input8.q.xml +++ b/ql/src/test/results/compiler/plan/input8.q.xml @@ -373,7 +373,7 @@ - + int @@ -579,7 +579,7 @@ src1 - + bigint @@ -609,6 +609,52 @@ + + + + true + + + ROW__ID + + + src1 + + + + + + + transactionid + + + bucketid + + + rowid + + + + + + + + + + + + + + + + + + + + struct<transactionid:bigint,bucketid:int,rowid:bigint> + + + diff --git a/ql/src/test/results/compiler/plan/input9.q.xml b/ql/src/test/results/compiler/plan/input9.q.xml index 124e73d..a514912 100644 --- a/ql/src/test/results/compiler/plan/input9.q.xml +++ b/ql/src/test/results/compiler/plan/input9.q.xml @@ -1008,7 +1008,7 @@ src1 - + bigint @@ -1038,6 +1038,56 @@ + + + + true + + + ROW__ID + + + src1 + + + + + + + transactionid + + + bucketid + + + rowid + + + + + + + + + + + + int + + + + + + + + + + + + struct<transactionid:bigint,bucketid:int,rowid:bigint> + + + diff --git a/ql/src/test/results/compiler/plan/input_part1.q.xml b/ql/src/test/results/compiler/plan/input_part1.q.xml index 50ad0e8..ad7aedf 100644 --- a/ql/src/test/results/compiler/plan/input_part1.q.xml +++ b/ql/src/test/results/compiler/plan/input_part1.q.xml @@ -564,7 +564,7 @@ - + int @@ -748,7 +748,7 @@ srcpart - + bigint @@ -778,6 +778,52 @@ + + + + true + + + ROW__ID + + + srcpart + + + + + + + transactionid + + + bucketid + + + rowid + + + + + + + + + + + + + + + + + + + + struct<transactionid:bigint,bucketid:int,rowid:bigint> + + + diff --git a/ql/src/test/results/compiler/plan/input_testsequencefile.q.xml b/ql/src/test/results/compiler/plan/input_testsequencefile.q.xml index 6a9d7a0..4cc6601 100644 --- a/ql/src/test/results/compiler/plan/input_testsequencefile.q.xml +++ b/ql/src/test/results/compiler/plan/input_testsequencefile.q.xml @@ -912,7 +912,7 @@ src - + bigint @@ -942,6 +942,56 @@ + + + + true + + + ROW__ID + + + src + + + + + + + transactionid + + + bucketid + + + rowid + + + + + + + + + + + + int + + + + + + + + + + + + struct<transactionid:bigint,bucketid:int,rowid:bigint> + + + diff --git a/ql/src/test/results/compiler/plan/input_testxpath.q.xml b/ql/src/test/results/compiler/plan/input_testxpath.q.xml index d46688d..6b7e405 100644 --- a/ql/src/test/results/compiler/plan/input_testxpath.q.xml +++ b/ql/src/test/results/compiler/plan/input_testxpath.q.xml @@ -746,7 +746,7 @@ src_thrift - + bigint @@ -776,6 +776,52 @@ + + + + true + + + ROW__ID + + + src_thrift + + + + + + + transactionid + + + bucketid + + + rowid + + + + + + + + + + + + + + + + + + + + struct<transactionid:bigint,bucketid:int,rowid:bigint> + + + diff --git a/ql/src/test/results/compiler/plan/input_testxpath2.q.xml b/ql/src/test/results/compiler/plan/input_testxpath2.q.xml index f31428a..6117017 100644 --- a/ql/src/test/results/compiler/plan/input_testxpath2.q.xml +++ b/ql/src/test/results/compiler/plan/input_testxpath2.q.xml @@ -822,7 +822,7 @@ src_thrift - + bigint @@ -852,6 +852,52 @@ + + + + true + + + ROW__ID + + + src_thrift + + + + + + + transactionid + + + bucketid + + + rowid + + + + + + + + + + + + + + + + + + + + struct<transactionid:bigint,bucketid:int,rowid:bigint> + + + diff --git a/ql/src/test/results/compiler/plan/join1.q.xml b/ql/src/test/results/compiler/plan/join1.q.xml index 0bd33c4..34cc13a 100644 --- a/ql/src/test/results/compiler/plan/join1.q.xml +++ b/ql/src/test/results/compiler/plan/join1.q.xml @@ -740,6 +740,56 @@ + + + + true + + + ROW__ID + + + src2 + + + + + + + transactionid + + + bucketid + + + rowid + + + + + + + + + + + + int + + + + + + + + + + + + struct<transactionid:bigint,bucketid:int,rowid:bigint> + + + @@ -1047,6 +1097,25 @@ + + + + true + + + ROW__ID + + + src1 + + + + + + struct<transactionid:bigint,bucketid:int,rowid:bigint> + + + @@ -1282,7 +1351,7 @@ _col1 - _col5 + _col6 src2 @@ -1384,7 +1453,7 @@ - _col5 + _col6 VALUE._col0 @@ -1473,13 +1542,17 @@ _col0 - _col5 + _col6 + _col8 + 1 + + _col7 1 @@ -1493,7 +1566,7 @@ _col4 - 1 + 0 _col3 @@ -1511,6 +1584,10 @@ _col0 0 + + _col9 + 1 + @@ -1578,7 +1655,7 @@ - _col5 + _col6 src2 diff --git a/ql/src/test/results/compiler/plan/join2.q.xml b/ql/src/test/results/compiler/plan/join2.q.xml index 75f1404..8354332 100644 --- a/ql/src/test/results/compiler/plan/join2.q.xml +++ b/ql/src/test/results/compiler/plan/join2.q.xml @@ -363,7 +363,7 @@ - _col4 + _col5 src2 @@ -555,7 +555,7 @@ _col0 - _col4 + _col5 @@ -589,7 +589,7 @@ - _col4 + _col5 src2 @@ -1015,6 +1015,56 @@ + + + + true + + + ROW__ID + + + src3 + + + + + + + transactionid + + + bucketid + + + rowid + + + + + + + + + + + + int + + + + + + + + + + + + struct<transactionid:bigint,bucketid:int,rowid:bigint> + + + @@ -1059,7 +1109,7 @@ columns - _col0,_col4 + _col0,_col5 serialization.lib @@ -1302,7 +1352,7 @@ _col1 - _col9 + _col11 src3 @@ -1404,13 +1454,13 @@ - _col0 + _col11 - VALUE._col0 + VALUE._col1 - src1 + src3 @@ -1418,13 +1468,13 @@ - _col9 + _col0 - VALUE._col1 + VALUE._col0 - src3 + src1 @@ -1452,7 +1502,7 @@ 0 - + @@ -1460,7 +1510,7 @@ 1 - + @@ -1493,7 +1543,7 @@ _col0 - _col9 + _col11 @@ -1501,25 +1551,17 @@ _col8 - 1 + 0 _col7 0 - _col11 - 1 - - _col6 0 - _col10 - 1 - - _col5 0 @@ -1540,13 +1582,33 @@ 0 - _col0 + _col9 0 - _col9 + _col13 + 1 + + + _col12 + 1 + + + _col11 + 1 + + + _col10 + 1 + + + _col14 1 + + _col0 + 0 + @@ -1617,7 +1679,7 @@ - _col9 + _col11 src3 @@ -2095,6 +2157,25 @@ + + + + true + + + ROW__ID + + + src2 + + + + + + struct<transactionid:bigint,bucketid:int,rowid:bigint> + + + @@ -2402,6 +2483,25 @@ + + + + true + + + ROW__ID + + + src1 + + + + + + struct<transactionid:bigint,bucketid:int,rowid:bigint> + + + @@ -2608,7 +2708,7 @@ - _col4 + _col5 KEY.reducesinkkey0 @@ -2697,13 +2797,17 @@ _col0 - _col4 + _col5 + _col8 + 1 + + _col7 1 @@ -2717,7 +2821,7 @@ _col4 - 1 + 0 _col3 @@ -2735,6 +2839,10 @@ _col0 0 + + _col9 + 1 + diff --git a/ql/src/test/results/compiler/plan/join3.q.xml b/ql/src/test/results/compiler/plan/join3.q.xml index 5276850..c671d05 100644 --- a/ql/src/test/results/compiler/plan/join3.q.xml +++ b/ql/src/test/results/compiler/plan/join3.q.xml @@ -752,6 +752,56 @@ + + + + true + + + ROW__ID + + + src2 + + + + + + + transactionid + + + bucketid + + + rowid + + + + + + + + + + + + int + + + + + + + + + + + + struct<transactionid:bigint,bucketid:int,rowid:bigint> + + + @@ -1115,6 +1165,25 @@ + + + + true + + + ROW__ID + + + src3 + + + + + + struct<transactionid:bigint,bucketid:int,rowid:bigint> + + + @@ -1422,6 +1491,25 @@ + + + + true + + + ROW__ID + + + src1 + + + + + + struct<transactionid:bigint,bucketid:int,rowid:bigint> + + + @@ -1660,7 +1748,7 @@ _col1 - _col9 + _col11 src3 @@ -1762,13 +1850,13 @@ - _col0 + _col11 - KEY.reducesinkkey0 + VALUE._col0 - src1 + src3 @@ -1776,13 +1864,13 @@ - _col9 + _col0 - VALUE._col0 + KEY.reducesinkkey0 - src3 + src1 @@ -1817,7 +1905,7 @@ 0 - + @@ -1829,7 +1917,7 @@ 2 - + @@ -1866,7 +1954,7 @@ _col0 - _col9 + _col11 @@ -1874,31 +1962,23 @@ _col8 - 2 + 1 _col7 1 - _col11 - 2 - - _col6 1 - _col10 - 2 - - _col5 1 _col4 - 1 + 0 _col3 @@ -1913,13 +1993,33 @@ 0 - _col0 - 0 + _col9 + 1 - _col9 + _col13 + 2 + + + _col12 2 + + _col11 + 2 + + + _col10 + 2 + + + _col14 + 2 + + + _col0 + 0 + @@ -2001,7 +2101,7 @@ - _col9 + _col11 src3 diff --git a/ql/src/test/results/compiler/plan/join4.q.xml b/ql/src/test/results/compiler/plan/join4.q.xml index 6d3739e..01032ac 100644 --- a/ql/src/test/results/compiler/plan/join4.q.xml +++ b/ql/src/test/results/compiler/plan/join4.q.xml @@ -750,6 +750,52 @@ + + + + true + + + ROW__ID + + + src1 + + + + + + + transactionid + + + bucketid + + + rowid + + + + + + + + + + + + + + + + + + + + struct<transactionid:bigint,bucketid:int,rowid:bigint> + + + @@ -1270,6 +1316,25 @@ + + + + true + + + ROW__ID + + + src2 + + + + + + struct<transactionid:bigint,bucketid:int,rowid:bigint> + + + diff --git a/ql/src/test/results/compiler/plan/join5.q.xml b/ql/src/test/results/compiler/plan/join5.q.xml index 6d3ce7c..83171cf 100644 --- a/ql/src/test/results/compiler/plan/join5.q.xml +++ b/ql/src/test/results/compiler/plan/join5.q.xml @@ -750,6 +750,52 @@ + + + + true + + + ROW__ID + + + src1 + + + + + + + transactionid + + + bucketid + + + rowid + + + + + + + + + + + + + + + + + + + + struct<transactionid:bigint,bucketid:int,rowid:bigint> + + + @@ -1270,6 +1316,25 @@ + + + + true + + + ROW__ID + + + src2 + + + + + + struct<transactionid:bigint,bucketid:int,rowid:bigint> + + + diff --git a/ql/src/test/results/compiler/plan/join6.q.xml b/ql/src/test/results/compiler/plan/join6.q.xml index f8a7670..9f9b3d6 100644 --- a/ql/src/test/results/compiler/plan/join6.q.xml +++ b/ql/src/test/results/compiler/plan/join6.q.xml @@ -750,6 +750,52 @@ + + + + true + + + ROW__ID + + + src1 + + + + + + + transactionid + + + bucketid + + + rowid + + + + + + + + + + + + + + + + + + + + struct<transactionid:bigint,bucketid:int,rowid:bigint> + + + @@ -1270,6 +1316,25 @@ + + + + true + + + ROW__ID + + + src2 + + + + + + struct<transactionid:bigint,bucketid:int,rowid:bigint> + + + diff --git a/ql/src/test/results/compiler/plan/join7.q.xml b/ql/src/test/results/compiler/plan/join7.q.xml index 2b317ae..a2c8884 100644 --- a/ql/src/test/results/compiler/plan/join7.q.xml +++ b/ql/src/test/results/compiler/plan/join7.q.xml @@ -815,6 +815,52 @@ + + + + true + + + ROW__ID + + + src1 + + + + + + + transactionid + + + bucketid + + + rowid + + + + + + + + + + + + + + + + + + + + struct<transactionid:bigint,bucketid:int,rowid:bigint> + + + @@ -1335,6 +1381,25 @@ + + + + true + + + ROW__ID + + + src2 + + + + + + struct<transactionid:bigint,bucketid:int,rowid:bigint> + + + @@ -1855,6 +1920,25 @@ + + + + true + + + ROW__ID + + + src3 + + + + + + struct<transactionid:bigint,bucketid:int,rowid:bigint> + + + diff --git a/ql/src/test/results/compiler/plan/join8.q.xml b/ql/src/test/results/compiler/plan/join8.q.xml index 32fd0f1..c5f8197 100644 --- a/ql/src/test/results/compiler/plan/join8.q.xml +++ b/ql/src/test/results/compiler/plan/join8.q.xml @@ -791,6 +791,52 @@ + + + + true + + + ROW__ID + + + src1 + + + + + + + transactionid + + + bucketid + + + rowid + + + + + + + + + + + + + + + + + + + + struct<transactionid:bigint,bucketid:int,rowid:bigint> + + + @@ -1352,6 +1398,25 @@ + + + + true + + + ROW__ID + + + src2 + + + + + + struct<transactionid:bigint,bucketid:int,rowid:bigint> + + + diff --git a/ql/src/test/results/compiler/plan/sample1.q.xml b/ql/src/test/results/compiler/plan/sample1.q.xml index 9986a8d..855a7bc 100644 --- a/ql/src/test/results/compiler/plan/sample1.q.xml +++ b/ql/src/test/results/compiler/plan/sample1.q.xml @@ -841,7 +841,7 @@ s - + bigint @@ -871,6 +871,52 @@ + + + + true + + + ROW__ID + + + s + + + + + + + transactionid + + + bucketid + + + rowid + + + + + + + + + + + + + + + + + + + + struct<transactionid:bigint,bucketid:int,rowid:bigint> + + + diff --git a/ql/src/test/results/compiler/plan/sample2.q.xml b/ql/src/test/results/compiler/plan/sample2.q.xml index 4ccc2cd..acd3561 100644 --- a/ql/src/test/results/compiler/plan/sample2.q.xml +++ b/ql/src/test/results/compiler/plan/sample2.q.xml @@ -1139,7 +1139,7 @@ s - + bigint @@ -1169,6 +1169,52 @@ + + + + true + + + ROW__ID + + + s + + + + + + + transactionid + + + bucketid + + + rowid + + + + + + + + + + + + + + + + + + + + struct<transactionid:bigint,bucketid:int,rowid:bigint> + + + diff --git a/ql/src/test/results/compiler/plan/sample3.q.xml b/ql/src/test/results/compiler/plan/sample3.q.xml index 17725ae..854c72e 100644 --- a/ql/src/test/results/compiler/plan/sample3.q.xml +++ b/ql/src/test/results/compiler/plan/sample3.q.xml @@ -1149,7 +1149,7 @@ s - + bigint @@ -1179,6 +1179,52 @@ + + + + true + + + ROW__ID + + + s + + + + + + + transactionid + + + bucketid + + + rowid + + + + + + + + + + + + + + + + + + + + struct<transactionid:bigint,bucketid:int,rowid:bigint> + + + diff --git a/ql/src/test/results/compiler/plan/sample4.q.xml b/ql/src/test/results/compiler/plan/sample4.q.xml index 4ccc2cd..acd3561 100644 --- a/ql/src/test/results/compiler/plan/sample4.q.xml +++ b/ql/src/test/results/compiler/plan/sample4.q.xml @@ -1139,7 +1139,7 @@ s - + bigint @@ -1169,6 +1169,52 @@ + + + + true + + + ROW__ID + + + s + + + + + + + transactionid + + + bucketid + + + rowid + + + + + + + + + + + + + + + + + + + + struct<transactionid:bigint,bucketid:int,rowid:bigint> + + + diff --git a/ql/src/test/results/compiler/plan/sample5.q.xml b/ql/src/test/results/compiler/plan/sample5.q.xml index 828ffbb..3b1b467 100644 --- a/ql/src/test/results/compiler/plan/sample5.q.xml +++ b/ql/src/test/results/compiler/plan/sample5.q.xml @@ -1136,7 +1136,7 @@ s - + bigint @@ -1166,6 +1166,52 @@ + + + + true + + + ROW__ID + + + s + + + + + + + transactionid + + + bucketid + + + rowid + + + + + + + + + + + + + + + + + + + + struct<transactionid:bigint,bucketid:int,rowid:bigint> + + + diff --git a/ql/src/test/results/compiler/plan/sample6.q.xml b/ql/src/test/results/compiler/plan/sample6.q.xml index 4bf64a0..408ab38 100644 --- a/ql/src/test/results/compiler/plan/sample6.q.xml +++ b/ql/src/test/results/compiler/plan/sample6.q.xml @@ -1139,7 +1139,7 @@ s - + bigint @@ -1169,6 +1169,52 @@ + + + + true + + + ROW__ID + + + s + + + + + + + transactionid + + + bucketid + + + rowid + + + + + + + + + + + + + + + + + + + + struct<transactionid:bigint,bucketid:int,rowid:bigint> + + + diff --git a/ql/src/test/results/compiler/plan/sample7.q.xml b/ql/src/test/results/compiler/plan/sample7.q.xml index e4bc030..5d5f6e1 100644 --- a/ql/src/test/results/compiler/plan/sample7.q.xml +++ b/ql/src/test/results/compiler/plan/sample7.q.xml @@ -1184,7 +1184,7 @@ s - + bigint @@ -1214,6 +1214,52 @@ + + + + true + + + ROW__ID + + + s + + + + + + + transactionid + + + bucketid + + + rowid + + + + + + + + + + + + + + + + + + + + struct<transactionid:bigint,bucketid:int,rowid:bigint> + + + diff --git a/ql/src/test/results/compiler/plan/subq.q.xml b/ql/src/test/results/compiler/plan/subq.q.xml index f7c27b6..0f58100 100644 --- a/ql/src/test/results/compiler/plan/subq.q.xml +++ b/ql/src/test/results/compiler/plan/subq.q.xml @@ -726,7 +726,7 @@ - + int @@ -866,7 +866,7 @@ src - + bigint @@ -896,6 +896,52 @@ + + + + true + + + ROW__ID + + + src + + + + + + + transactionid + + + bucketid + + + rowid + + + + + + + + + + + + + + + + + + + + struct<transactionid:bigint,bucketid:int,rowid:bigint> + + + diff --git a/ql/src/test/results/compiler/plan/udf1.q.xml b/ql/src/test/results/compiler/plan/udf1.q.xml index f7cf1cd..3039059 100644 --- a/ql/src/test/results/compiler/plan/udf1.q.xml +++ b/ql/src/test/results/compiler/plan/udf1.q.xml @@ -1871,7 +1871,7 @@ - + int @@ -1998,7 +1998,7 @@ src - + bigint @@ -2028,6 +2028,52 @@ + + + + true + + + ROW__ID + + + src + + + + + + + transactionid + + + bucketid + + + rowid + + + + + + + + + + + + + + + + + + + + struct<transactionid:bigint,bucketid:int,rowid:bigint> + + + diff --git a/ql/src/test/results/compiler/plan/udf4.q.xml b/ql/src/test/results/compiler/plan/udf4.q.xml index 994ce7f..d6dab9e 100644 --- a/ql/src/test/results/compiler/plan/udf4.q.xml +++ b/ql/src/test/results/compiler/plan/udf4.q.xml @@ -1802,6 +1802,52 @@ + + + + true + + + ROW__ID + + + dest1 + + + + + + + transactionid + + + bucketid + + + rowid + + + + + + + + + + + + + + + + + + + + struct<transactionid:bigint,bucketid:int,rowid:bigint> + + + diff --git a/ql/src/test/results/compiler/plan/udf6.q.xml b/ql/src/test/results/compiler/plan/udf6.q.xml index 22ef3ff..3bdcc4f 100644 --- a/ql/src/test/results/compiler/plan/udf6.q.xml +++ b/ql/src/test/results/compiler/plan/udf6.q.xml @@ -531,7 +531,7 @@ src - + bigint @@ -561,6 +561,52 @@ + + + + true + + + ROW__ID + + + src + + + + + + + transactionid + + + bucketid + + + rowid + + + + + + + + + + + + + + + + + + + + struct<transactionid:bigint,bucketid:int,rowid:bigint> + + + diff --git a/ql/src/test/results/compiler/plan/udf_case.q.xml b/ql/src/test/results/compiler/plan/udf_case.q.xml index 7f12f53..e961c9c 100644 --- a/ql/src/test/results/compiler/plan/udf_case.q.xml +++ b/ql/src/test/results/compiler/plan/udf_case.q.xml @@ -608,7 +608,7 @@ src - + bigint @@ -638,6 +638,52 @@ + + + + true + + + ROW__ID + + + src + + + + + + + transactionid + + + bucketid + + + rowid + + + + + + + + + + + + + + + + + + + + struct<transactionid:bigint,bucketid:int,rowid:bigint> + + + diff --git a/ql/src/test/results/compiler/plan/udf_when.q.xml b/ql/src/test/results/compiler/plan/udf_when.q.xml index ae7db70..ce3e6f5 100644 --- a/ql/src/test/results/compiler/plan/udf_when.q.xml +++ b/ql/src/test/results/compiler/plan/udf_when.q.xml @@ -688,7 +688,7 @@ src - + bigint @@ -718,6 +718,52 @@ + + + + true + + + ROW__ID + + + src + + + + + + + transactionid + + + bucketid + + + rowid + + + + + + + + + + + + + + + + + + + + struct<transactionid:bigint,bucketid:int,rowid:bigint> + + + diff --git a/ql/src/test/results/compiler/plan/union.q.xml b/ql/src/test/results/compiler/plan/union.q.xml index 7c5d8a0..0f9b1e5 100644 --- a/ql/src/test/results/compiler/plan/union.q.xml +++ b/ql/src/test/results/compiler/plan/union.q.xml @@ -1069,6 +1069,52 @@ + + + + true + + + ROW__ID + + + src + + + + + + + transactionid + + + bucketid + + + rowid + + + + + + + + + + + + + + + + + + + + struct<transactionid:bigint,bucketid:int,rowid:bigint> + + + @@ -1457,6 +1503,25 @@ + + + + true + + + ROW__ID + + + src + + + + + + struct<transactionid:bigint,bucketid:int,rowid:bigint> + + + diff --git a/serde/src/java/org/apache/hadoop/hive/serde2/Deserializer.java b/serde/src/java/org/apache/hadoop/hive/serde2/Deserializer.java index ade3b5f..df27db2 100644 --- a/serde/src/java/org/apache/hadoop/hive/serde2/Deserializer.java +++ b/serde/src/java/org/apache/hadoop/hive/serde2/Deserializer.java @@ -52,7 +52,7 @@ * Deserialize an object out of a Writable blob. In most cases, the return * value of this function will be constant since the function will reuse the * returned object. If the client wants to keep a copy of the object, the - * client needs to clone the returnDeserializered value by calling + * client needs to clone the returned deserialized value by calling * ObjectInspectorUtils.getStandardObject(). * * @param blob diff --git a/serde/src/java/org/apache/hadoop/hive/serde2/avro/AvroGenericRecordWritable.java b/serde/src/java/org/apache/hadoop/hive/serde2/avro/AvroGenericRecordWritable.java index b554743..402a4ac 100644 --- a/serde/src/java/org/apache/hadoop/hive/serde2/avro/AvroGenericRecordWritable.java +++ b/serde/src/java/org/apache/hadoop/hive/serde2/avro/AvroGenericRecordWritable.java @@ -93,8 +93,8 @@ public void write(DataOutput out) throws IOException { @Override public void readFields(DataInput in) throws IOException { - Schema schema = Schema.parse(in.readUTF()); - fileSchema = Schema.parse(in.readUTF()); + Schema schema = AvroSerdeUtils.getSchemaFor(in.readUTF()); + fileSchema = AvroSerdeUtils.getSchemaFor(in.readUTF()); recordReaderID = UID.read(in); record = new GenericData.Record(schema); binaryDecoder = DecoderFactory.defaultFactory().createBinaryDecoder((InputStream) in, binaryDecoder); diff --git a/serde/src/java/org/apache/hadoop/hive/serde2/avro/AvroSerdeUtils.java b/serde/src/java/org/apache/hadoop/hive/serde2/avro/AvroSerdeUtils.java index 8c5cf3e..7c48e9b 100644 --- a/serde/src/java/org/apache/hadoop/hive/serde2/avro/AvroSerdeUtils.java +++ b/serde/src/java/org/apache/hadoop/hive/serde2/avro/AvroSerdeUtils.java @@ -29,7 +29,9 @@ import org.apache.hadoop.hive.conf.HiveConf; import org.apache.hadoop.mapred.JobConf; +import java.io.File; import java.io.IOException; +import java.io.InputStream; import java.math.BigInteger; import java.net.URI; import java.net.URISyntaxException; @@ -67,7 +69,7 @@ public static Schema determineSchemaOrThrowException(Properties properties) throws IOException, AvroSerdeException { String schemaString = properties.getProperty(SCHEMA_LITERAL); if(schemaString != null && !schemaString.equals(SCHEMA_NONE)) - return Schema.parse(schemaString); + return AvroSerdeUtils.getSchemaFor(schemaString); // Try pulling directly from URL schemaString = properties.getProperty(SCHEMA_URL); @@ -78,7 +80,7 @@ public static Schema determineSchemaOrThrowException(Properties properties) Schema s = getSchemaFromFS(schemaString, new Configuration()); if (s == null) { //in case schema is not a file system - return Schema.parse(new URL(schemaString).openStream()); + return AvroSerdeUtils.getSchemaFor(new URL(schemaString).openStream()); } return s; } catch (IOException ioe) { @@ -123,7 +125,7 @@ protected static Schema getSchemaFromFS(String schemaFSUrl, } try { in = fs.open(new Path(schemaFSUrl)); - Schema s = Schema.parse(in); + Schema s = AvroSerdeUtils.getSchemaFor(in); return s; } finally { if(in != null) in.close(); @@ -194,4 +196,31 @@ public static HiveDecimal getHiveDecimalFromByteBuffer(ByteBuffer byteBuffer, in return dec; } + public static Schema getSchemaFor(String str) { + Schema.Parser parser = new Schema.Parser(); + Schema schema = parser.parse(str); + return schema; + } + + public static Schema getSchemaFor(File file) { + Schema.Parser parser = new Schema.Parser(); + Schema schema; + try { + schema = parser.parse(file); + } catch (IOException e) { + throw new RuntimeException("Failed to parse Avro schema from " + file.getName(), e); + } + return schema; + } + + public static Schema getSchemaFor(InputStream stream) { + Schema.Parser parser = new Schema.Parser(); + Schema schema; + try { + schema = parser.parse(stream); + } catch (IOException e) { + throw new RuntimeException("Failed to parse Avro schema", e); + } + return schema; + } } diff --git a/serde/src/java/org/apache/hadoop/hive/serde2/avro/SchemaResolutionProblem.java b/serde/src/java/org/apache/hadoop/hive/serde2/avro/SchemaResolutionProblem.java index 3dceb63..65f104d 100644 --- a/serde/src/java/org/apache/hadoop/hive/serde2/avro/SchemaResolutionProblem.java +++ b/serde/src/java/org/apache/hadoop/hive/serde2/avro/SchemaResolutionProblem.java @@ -55,5 +55,5 @@ " }\n" + " ]\n" + "}"; - public final static Schema SIGNAL_BAD_SCHEMA = Schema.parse(sentinelString); + public final static Schema SIGNAL_BAD_SCHEMA = AvroSerdeUtils.getSchemaFor(sentinelString); } diff --git a/serde/src/java/org/apache/hadoop/hive/serde2/avro/TypeInfoToSchema.java b/serde/src/java/org/apache/hadoop/hive/serde2/avro/TypeInfoToSchema.java index 497a49c..4169558 100644 --- a/serde/src/java/org/apache/hadoop/hive/serde2/avro/TypeInfoToSchema.java +++ b/serde/src/java/org/apache/hadoop/hive/serde2/avro/TypeInfoToSchema.java @@ -38,7 +38,6 @@ */ public class TypeInfoToSchema { - private static final Schema.Parser PARSER = new Schema.Parser(); private long recordCounter = 0; /** @@ -139,7 +138,7 @@ private Schema createAvroPrimitive(TypeInfo typeInfo) { DecimalTypeInfo decimalTypeInfo = (DecimalTypeInfo) typeInfo; String precision = String.valueOf(decimalTypeInfo.precision()); String scale = String.valueOf(decimalTypeInfo.scale()); - schema = PARSER.parse("{" + + schema = AvroSerdeUtils.getSchemaFor("{" + "\"type\":\"bytes\"," + "\"logicalType\":\"decimal\"," + "\"precision\":" + precision + "," + diff --git a/serde/src/java/org/apache/hadoop/hive/serde2/binarysortable/BinarySortableSerDe.java b/serde/src/java/org/apache/hadoop/hive/serde2/binarysortable/BinarySortableSerDe.java index 523ad7d..06a8c2f 100644 --- a/serde/src/java/org/apache/hadoop/hive/serde2/binarysortable/BinarySortableSerDe.java +++ b/serde/src/java/org/apache/hadoop/hive/serde2/binarysortable/BinarySortableSerDe.java @@ -776,7 +776,7 @@ static void serialize(ByteStream.Output buffer, Object o, ObjectInspector oi, // get the scale factor to turn big decimal into a decimal < 1 int factor = dec.precision() - dec.scale(); - factor = sign == 1 ? factor : -factor; + factor = sign != -1 ? factor : -factor; // convert the absolute big decimal to string dec.scaleByPowerOfTen(Math.abs(dec.scale())); diff --git a/serde/src/java/org/apache/hadoop/hive/serde2/lazybinary/LazyBinaryArray.java b/serde/src/java/org/apache/hadoop/hive/serde2/lazybinary/LazyBinaryArray.java index 55f96ee..4929f67 100644 --- a/serde/src/java/org/apache/hadoop/hive/serde2/lazybinary/LazyBinaryArray.java +++ b/serde/src/java/org/apache/hadoop/hive/serde2/lazybinary/LazyBinaryArray.java @@ -149,7 +149,7 @@ private void parse() { if ((bytes[nullByteCur] & (1 << (i % 8))) != 0) { elementIsNull[i] = false; LazyBinaryUtils.checkObjectByteInfo(listEleObjectInspector, bytes, - lastElementByteEnd, recordInfo); + lastElementByteEnd, recordInfo, vInt); elementStart[i] = lastElementByteEnd + recordInfo.elementOffset; elementLength[i] = recordInfo.elementSize; lastElementByteEnd = elementStart[i] + elementLength[i]; diff --git a/serde/src/java/org/apache/hadoop/hive/serde2/lazybinary/LazyBinaryMap.java b/serde/src/java/org/apache/hadoop/hive/serde2/lazybinary/LazyBinaryMap.java index 31ad78e..5e40cd5 100644 --- a/serde/src/java/org/apache/hadoop/hive/serde2/lazybinary/LazyBinaryMap.java +++ b/serde/src/java/org/apache/hadoop/hive/serde2/lazybinary/LazyBinaryMap.java @@ -163,7 +163,7 @@ private void parse() { if ((bytes[nullByteCur] & (1 << ((i * 2) % 8))) != 0) { keyIsNull[i] = false; LazyBinaryUtils.checkObjectByteInfo(((MapObjectInspector) oi) - .getMapKeyObjectInspector(), bytes, lastElementByteEnd, recordInfo); + .getMapKeyObjectInspector(), bytes, lastElementByteEnd, recordInfo, vInt); keyStart[i] = lastElementByteEnd + recordInfo.elementOffset; keyLength[i] = recordInfo.elementSize; lastElementByteEnd = keyStart[i] + keyLength[i]; @@ -178,7 +178,7 @@ private void parse() { valueIsNull[i] = false; LazyBinaryUtils.checkObjectByteInfo(((MapObjectInspector) oi) .getMapValueObjectInspector(), bytes, lastElementByteEnd, - recordInfo); + recordInfo, vInt); valueStart[i] = lastElementByteEnd + recordInfo.elementOffset; valueLength[i] = recordInfo.elementSize; lastElementByteEnd = valueStart[i] + valueLength[i]; diff --git a/serde/src/java/org/apache/hadoop/hive/serde2/lazybinary/LazyBinaryStruct.java b/serde/src/java/org/apache/hadoop/hive/serde2/lazybinary/LazyBinaryStruct.java index bd9e6cc..8819703 100644 --- a/serde/src/java/org/apache/hadoop/hive/serde2/lazybinary/LazyBinaryStruct.java +++ b/serde/src/java/org/apache/hadoop/hive/serde2/lazybinary/LazyBinaryStruct.java @@ -27,6 +27,7 @@ import org.apache.hadoop.hive.serde2.StructObject; import org.apache.hadoop.hive.serde2.lazy.ByteArrayRef; import org.apache.hadoop.hive.serde2.lazybinary.LazyBinaryUtils.RecordInfo; +import org.apache.hadoop.hive.serde2.lazybinary.LazyBinaryUtils.VInt; import org.apache.hadoop.hive.serde2.lazybinary.objectinspector.LazyBinaryStructObjectInspector; import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspector; import org.apache.hadoop.hive.serde2.objectinspector.StructField; @@ -96,7 +97,8 @@ public void init(ByteArrayRef bytes, int start, int length) { serializedSize = length; } - RecordInfo recordInfo = new LazyBinaryUtils.RecordInfo(); + final VInt vInt = new VInt(); + final RecordInfo recordInfo = new LazyBinaryUtils.RecordInfo(); boolean missingFieldWarned = false; boolean extraFieldWarned = false; @@ -138,7 +140,7 @@ private void parse() { if ((nullByte & (1 << (i % 8))) != 0) { fieldIsNull[i] = false; LazyBinaryUtils.checkObjectByteInfo(fieldRefs.get(i) - .getFieldObjectInspector(), bytes, lastFieldByteEnd, recordInfo); + .getFieldObjectInspector(), bytes, lastFieldByteEnd, recordInfo, vInt); fieldStart[i] = lastFieldByteEnd + recordInfo.elementOffset; fieldLength[i] = recordInfo.elementSize; lastFieldByteEnd = fieldStart[i] + fieldLength[i]; @@ -200,6 +202,7 @@ public Object getField(int fieldID) { } public static final class SingleFieldGetter { + private final VInt vInt = new VInt(); private final LazyBinaryStructObjectInspector soi; private final int fieldIndex; private final RecordInfo recordInfo = new LazyBinaryUtils.RecordInfo(); @@ -219,7 +222,7 @@ public void init(BinaryComparable src) { for (int i = 0; i <= fieldIndex; i++) { if ((nullByte & (1 << (i % 8))) != 0) { LazyBinaryUtils.checkObjectByteInfo(fieldRefs.get(i) - .getFieldObjectInspector(), fieldBytes, lastFieldByteEnd, recordInfo); + .getFieldObjectInspector(), fieldBytes, lastFieldByteEnd, recordInfo, vInt); fieldStart = lastFieldByteEnd + recordInfo.elementOffset; fieldLength = recordInfo.elementSize; lastFieldByteEnd = fieldStart + fieldLength; diff --git a/serde/src/java/org/apache/hadoop/hive/serde2/lazybinary/LazyBinaryUtils.java b/serde/src/java/org/apache/hadoop/hive/serde2/lazybinary/LazyBinaryUtils.java index f7cfb36..155b267 100644 --- a/serde/src/java/org/apache/hadoop/hive/serde2/lazybinary/LazyBinaryUtils.java +++ b/serde/src/java/org/apache/hadoop/hive/serde2/lazybinary/LazyBinaryUtils.java @@ -125,13 +125,6 @@ public String toString() { } } - private static ThreadLocal vIntThreadLocal = new ThreadLocal() { - @Override - public VInt initialValue() { - return new VInt(); - } - }; - /** * Check a particular field and set its size and offset in bytes based on the * field type and the bytes arrays. @@ -154,8 +147,7 @@ public VInt initialValue() { * modify this byteinfo object and return it */ public static void checkObjectByteInfo(ObjectInspector objectInspector, - byte[] bytes, int offset, RecordInfo recordInfo) { - VInt vInt = vIntThreadLocal.get(); + byte[] bytes, int offset, RecordInfo recordInfo, VInt vInt) { Category category = objectInspector.getCategory(); switch (category) { case PRIMITIVE: diff --git a/serde/src/java/org/apache/hadoop/hive/serde2/objectinspector/primitive/WritableConstantByteObjectInspector.java b/serde/src/java/org/apache/hadoop/hive/serde2/objectinspector/primitive/WritableConstantByteObjectInspector.java index 7931021..3214e11 100644 --- a/serde/src/java/org/apache/hadoop/hive/serde2/objectinspector/primitive/WritableConstantByteObjectInspector.java +++ b/serde/src/java/org/apache/hadoop/hive/serde2/objectinspector/primitive/WritableConstantByteObjectInspector.java @@ -48,6 +48,9 @@ public ByteWritable getWritableConstantValue() { @Override public int precision() { + if (value == null) { + return super.precision(); + } return BigDecimal.valueOf(value.get()).precision(); } diff --git a/serde/src/java/org/apache/hadoop/hive/serde2/objectinspector/primitive/WritableConstantHiveDecimalObjectInspector.java b/serde/src/java/org/apache/hadoop/hive/serde2/objectinspector/primitive/WritableConstantHiveDecimalObjectInspector.java index 5e3ad62..b87d1f8 100644 --- a/serde/src/java/org/apache/hadoop/hive/serde2/objectinspector/primitive/WritableConstantHiveDecimalObjectInspector.java +++ b/serde/src/java/org/apache/hadoop/hive/serde2/objectinspector/primitive/WritableConstantHiveDecimalObjectInspector.java @@ -58,11 +58,17 @@ public HiveDecimalWritable getWritableConstantValue() { @Override public int precision() { + if (value == null) { + return super.precision(); + } return value.getHiveDecimal().precision(); } @Override public int scale() { + if (value == null) { + return super.scale(); + } return value.getHiveDecimal().scale(); } diff --git a/serde/src/java/org/apache/hadoop/hive/serde2/objectinspector/primitive/WritableConstantIntObjectInspector.java b/serde/src/java/org/apache/hadoop/hive/serde2/objectinspector/primitive/WritableConstantIntObjectInspector.java index 18389a9..0a24c2c 100644 --- a/serde/src/java/org/apache/hadoop/hive/serde2/objectinspector/primitive/WritableConstantIntObjectInspector.java +++ b/serde/src/java/org/apache/hadoop/hive/serde2/objectinspector/primitive/WritableConstantIntObjectInspector.java @@ -48,6 +48,9 @@ public IntWritable getWritableConstantValue() { @Override public int precision() { + if (value == null) { + return super.precision(); + } return BigDecimal.valueOf(value.get()).precision(); } diff --git a/serde/src/java/org/apache/hadoop/hive/serde2/objectinspector/primitive/WritableConstantLongObjectInspector.java b/serde/src/java/org/apache/hadoop/hive/serde2/objectinspector/primitive/WritableConstantLongObjectInspector.java index 52f8a26..1973d48 100644 --- a/serde/src/java/org/apache/hadoop/hive/serde2/objectinspector/primitive/WritableConstantLongObjectInspector.java +++ b/serde/src/java/org/apache/hadoop/hive/serde2/objectinspector/primitive/WritableConstantLongObjectInspector.java @@ -48,6 +48,9 @@ public LongWritable getWritableConstantValue() { @Override public int precision() { + if (value == null) { + return super.precision(); + } return BigDecimal.valueOf(value.get()).precision(); } diff --git a/serde/src/java/org/apache/hadoop/hive/serde2/objectinspector/primitive/WritableConstantShortObjectInspector.java b/serde/src/java/org/apache/hadoop/hive/serde2/objectinspector/primitive/WritableConstantShortObjectInspector.java index 85e4f1d..2f7479a 100644 --- a/serde/src/java/org/apache/hadoop/hive/serde2/objectinspector/primitive/WritableConstantShortObjectInspector.java +++ b/serde/src/java/org/apache/hadoop/hive/serde2/objectinspector/primitive/WritableConstantShortObjectInspector.java @@ -48,6 +48,9 @@ public ShortWritable getWritableConstantValue() { @Override public int precision() { + if (value == null) { + return super.precision(); + } return BigDecimal.valueOf(value.get()).precision(); } diff --git a/serde/src/java/org/apache/hadoop/hive/serde2/typeinfo/TypeInfo.java b/serde/src/java/org/apache/hadoop/hive/serde2/typeinfo/TypeInfo.java index e7f3f48..70dc181 100644 --- a/serde/src/java/org/apache/hadoop/hive/serde2/typeinfo/TypeInfo.java +++ b/serde/src/java/org/apache/hadoop/hive/serde2/typeinfo/TypeInfo.java @@ -26,10 +26,11 @@ * Stores information about a type. Always use the TypeInfoFactory to create new * TypeInfo objects. * - * We support 5 categories of types: 1. Primitive objects (String, Number, etc) + * We support 8 categories of types: 1. Primitive objects (String, Number, etc) * 2. List objects (a list of objects of a single type) 3. Map objects (a map * from objects of one type to objects of another type) 4. Struct objects (a * list of fields with names and their own types) 5. Union objects + * 6. Decimal objects 7. Char objects 8. Varchar objects */ public abstract class TypeInfo implements Serializable { diff --git a/serde/src/test/org/apache/hadoop/hive/serde2/avro/TestAvroDeserializer.java b/serde/src/test/org/apache/hadoop/hive/serde2/avro/TestAvroDeserializer.java index 198bd24..3a33239 100644 --- a/serde/src/test/org/apache/hadoop/hive/serde2/avro/TestAvroDeserializer.java +++ b/serde/src/test/org/apache/hadoop/hive/serde2/avro/TestAvroDeserializer.java @@ -55,7 +55,7 @@ public void canDeserializeVoidType() throws IOException, SerDeException { " {\"name\": \"isANull\", \"type\": \"null\"}\n" + " ]\n" + "}"; - Schema s = Schema.parse(schemaString); + Schema s = AvroSerdeUtils.getSchemaFor(schemaString); GenericData.Record record = new GenericData.Record(s); record.put("isANull", null); @@ -83,7 +83,7 @@ public void canDeserializeVoidType() throws IOException, SerDeException { @Test public void canDeserializeMapsWithPrimitiveKeys() throws SerDeException, IOException { - Schema s = Schema.parse(TestAvroObjectInspectorGenerator.MAP_WITH_PRIMITIVE_VALUE_TYPE); + Schema s = AvroSerdeUtils.getSchemaFor(TestAvroObjectInspectorGenerator.MAP_WITH_PRIMITIVE_VALUE_TYPE); GenericData.Record record = new GenericData.Record(s); Map m = new Hashtable(); @@ -129,7 +129,7 @@ public void canDeserializeMapsWithPrimitiveKeys() throws SerDeException, IOExcep @Test public void canDeserializeArrays() throws SerDeException, IOException { - Schema s = Schema.parse(TestAvroObjectInspectorGenerator.ARRAY_WITH_PRIMITIVE_ELEMENT_TYPE); + Schema s = AvroSerdeUtils.getSchemaFor(TestAvroObjectInspectorGenerator.ARRAY_WITH_PRIMITIVE_ELEMENT_TYPE); GenericData.Record record = new GenericData.Record(s); List list = new ArrayList(); @@ -187,7 +187,7 @@ public void canDeserializeArrays() throws SerDeException, IOException { @Test public void canDeserializeRecords() throws SerDeException, IOException { - Schema s = Schema.parse(TestAvroObjectInspectorGenerator.RECORD_SCHEMA); + Schema s = AvroSerdeUtils.getSchemaFor(TestAvroObjectInspectorGenerator.RECORD_SCHEMA); GenericData.Record record = new GenericData.Record(s); GenericData.Record innerRecord = new GenericData.Record(s.getField("aRecord").schema()); innerRecord.put("int1", 42); @@ -246,7 +246,7 @@ private ResultPair(ObjectInspector oi, Object value, Object unionObject) { @Test public void canDeserializeUnions() throws SerDeException, IOException { - Schema s = Schema.parse(TestAvroObjectInspectorGenerator.UNION_SCHEMA); + Schema s = AvroSerdeUtils.getSchemaFor(TestAvroObjectInspectorGenerator.UNION_SCHEMA); GenericData.Record record = new GenericData.Record(s); record.put("aUnion", "this is a string"); @@ -295,7 +295,7 @@ private ResultPair unionTester(Schema s, GenericData.Record record) @Test // Enums are one of two types we fudge for Hive. Enums go in, Strings come out. public void canDeserializeEnums() throws SerDeException, IOException { - Schema s = Schema.parse(TestAvroObjectInspectorGenerator.ENUM_SCHEMA); + Schema s = AvroSerdeUtils.getSchemaFor(TestAvroObjectInspectorGenerator.ENUM_SCHEMA); GenericData.Record record = new GenericData.Record(s); record.put("baddies", new GenericData.EnumSymbol(s.getField("baddies").schema(),"DALEKS")); @@ -325,7 +325,7 @@ public void canDeserializeEnums() throws SerDeException, IOException { @Test // Fixed doesn't exist in Hive. Fixeds go in, lists of bytes go out. public void canDeserializeFixed() throws SerDeException, IOException { - Schema s = Schema.parse(TestAvroObjectInspectorGenerator.FIXED_SCHEMA); + Schema s = AvroSerdeUtils.getSchemaFor(TestAvroObjectInspectorGenerator.FIXED_SCHEMA); GenericData.Record record = new GenericData.Record(s); byte [] bytes = "ANANCIENTBLUEBOX".getBytes(); @@ -361,7 +361,7 @@ public void canDeserializeFixed() throws SerDeException, IOException { @Test public void canDeserializeBytes() throws SerDeException, IOException { - Schema s = Schema.parse(TestAvroObjectInspectorGenerator.BYTES_SCHEMA); + Schema s = AvroSerdeUtils.getSchemaFor(TestAvroObjectInspectorGenerator.BYTES_SCHEMA); GenericData.Record record = new GenericData.Record(s); byte [] bytes = "ANANCIENTBLUEBOX".getBytes(); @@ -400,7 +400,7 @@ public void canDeserializeBytes() throws SerDeException, IOException { @Test public void canDeserializeNullableTypes() throws IOException, SerDeException { - Schema s = Schema.parse(TestAvroObjectInspectorGenerator.NULLABLE_STRING_SCHEMA); + Schema s = AvroSerdeUtils.getSchemaFor(TestAvroObjectInspectorGenerator.NULLABLE_STRING_SCHEMA); GenericData.Record record = new GenericData.Record(s); record.put("nullableString", "this is a string"); @@ -413,7 +413,7 @@ public void canDeserializeNullableTypes() throws IOException, SerDeException { @Test public void canDeserializeNullableEnums() throws IOException, SerDeException { - Schema s = Schema.parse(TestAvroObjectInspectorGenerator.NULLABLE_ENUM_SCHEMA); + Schema s = AvroSerdeUtils.getSchemaFor(TestAvroObjectInspectorGenerator.NULLABLE_ENUM_SCHEMA); GenericData.Record record = new GenericData.Record(s); record.put("nullableEnum", new GenericData.EnumSymbol(AvroSerdeUtils.getOtherTypeFromNullableType(s.getField("nullableEnum").schema()), "CYBERMEN")); @@ -426,7 +426,8 @@ public void canDeserializeNullableEnums() throws IOException, SerDeException { @Test public void canDeserializeMapWithNullablePrimitiveValues() throws SerDeException, IOException { - Schema s = Schema.parse(TestAvroObjectInspectorGenerator.MAP_WITH_NULLABLE_PRIMITIVE_VALUE_TYPE_SCHEMA); + Schema s = AvroSerdeUtils.getSchemaFor(TestAvroObjectInspectorGenerator + .MAP_WITH_NULLABLE_PRIMITIVE_VALUE_TYPE_SCHEMA); GenericData.Record record = new GenericData.Record(s); Map m = new HashMap(); @@ -504,7 +505,7 @@ private void verifyNullableType(GenericData.Record record, Schema s, String fiel @Test public void verifyCaching() throws SerDeException, IOException { - Schema s = Schema.parse(TestAvroObjectInspectorGenerator.RECORD_SCHEMA); + Schema s = AvroSerdeUtils.getSchemaFor(TestAvroObjectInspectorGenerator.RECORD_SCHEMA); GenericData.Record record = new GenericData.Record(s); GenericData.Record innerRecord = new GenericData.Record(s.getField("aRecord").schema()); innerRecord.put("int1", 42); @@ -541,7 +542,7 @@ public void verifyCaching() throws SerDeException, IOException { assertEquals(0, de.getReEncoderCache().size()); //Read the record with **different** record reader ID and **evolved** schema - Schema evolvedSchema = Schema.parse(s.toString()); + Schema evolvedSchema = AvroSerdeUtils.getSchemaFor(s.toString()); evolvedSchema.getField("aRecord").schema().addProp("Testing", "meaningless"); garw.setRecordReaderID(recordReaderID = new UID()); //New record reader ID row = diff --git a/serde/src/test/org/apache/hadoop/hive/serde2/avro/TestAvroObjectInspectorGenerator.java b/serde/src/test/org/apache/hadoop/hive/serde2/avro/TestAvroObjectInspectorGenerator.java index 76c1940..337b44e 100644 --- a/serde/src/test/org/apache/hadoop/hive/serde2/avro/TestAvroObjectInspectorGenerator.java +++ b/serde/src/test/org/apache/hadoop/hive/serde2/avro/TestAvroObjectInspectorGenerator.java @@ -41,13 +41,13 @@ import org.junit.Test; public class TestAvroObjectInspectorGenerator { - private final TypeInfo STRING = TypeInfoFactory.getPrimitiveTypeInfo("string"); - private final TypeInfo INT = TypeInfoFactory.getPrimitiveTypeInfo("int"); - private final TypeInfo BOOLEAN = TypeInfoFactory.getPrimitiveTypeInfo("boolean"); - private final TypeInfo LONG = TypeInfoFactory.getPrimitiveTypeInfo("bigint"); - private final TypeInfo FLOAT = TypeInfoFactory.getPrimitiveTypeInfo("float"); - private final TypeInfo DOUBLE = TypeInfoFactory.getPrimitiveTypeInfo("double"); - private final TypeInfo VOID = TypeInfoFactory.getPrimitiveTypeInfo("void"); + private static final TypeInfo STRING = TypeInfoFactory.getPrimitiveTypeInfo("string"); + private static final TypeInfo INT = TypeInfoFactory.getPrimitiveTypeInfo("int"); + private static final TypeInfo BOOLEAN = TypeInfoFactory.getPrimitiveTypeInfo("boolean"); + private static final TypeInfo LONG = TypeInfoFactory.getPrimitiveTypeInfo("bigint"); + private static final TypeInfo FLOAT = TypeInfoFactory.getPrimitiveTypeInfo("float"); + private static final TypeInfo DOUBLE = TypeInfoFactory.getPrimitiveTypeInfo("double"); + private static final TypeInfo VOID = TypeInfoFactory.getPrimitiveTypeInfo("void"); // These schemata are used in other tests static public final String MAP_WITH_PRIMITIVE_VALUE_TYPE = "{\n" + @@ -265,7 +265,7 @@ public void failOnNonRecords() throws Exception { " \"symbols\" : [\"SPADES\", \"HEARTS\", \"DIAMONDS\", \"CLUBS\"]\n" + "}"; - Schema s = Schema.parse(nonRecordSchema); + Schema s = AvroSerdeUtils.getSchemaFor(nonRecordSchema); try { new AvroObjectInspectorGenerator(s); fail("Should not be able to handle non-record Avro types"); @@ -311,7 +311,7 @@ public void primitiveTypesWorkCorrectly() throws SerDeException { " }\n" + " ]\n" + "}"; - AvroObjectInspectorGenerator aoig = new AvroObjectInspectorGenerator(Schema.parse(bunchOfPrimitives)); + AvroObjectInspectorGenerator aoig = new AvroObjectInspectorGenerator(AvroSerdeUtils.getSchemaFor(bunchOfPrimitives)); String [] expectedColumnNames = {"aString", "anInt", "aBoolean", "aLong", "aFloat", "aDouble", "aNull"}; verifyColumnNames(expectedColumnNames, aoig.getColumnNames()); @@ -350,7 +350,7 @@ private void verifyColumnNames(String[] expectedColumnNames, List column @Test public void canHandleMapsWithPrimitiveValueTypes() throws SerDeException { - Schema s = Schema.parse(MAP_WITH_PRIMITIVE_VALUE_TYPE); + Schema s = AvroSerdeUtils.getSchemaFor(MAP_WITH_PRIMITIVE_VALUE_TYPE); AvroObjectInspectorGenerator aoig = new AvroObjectInspectorGenerator(s); verifyMap(aoig, "aMap"); } @@ -379,7 +379,7 @@ private void verifyMap(final AvroObjectInspectorGenerator aoig, final String fie @Test public void canHandleArrays() throws SerDeException { - Schema s = Schema.parse(ARRAY_WITH_PRIMITIVE_ELEMENT_TYPE); + Schema s = AvroSerdeUtils.getSchemaFor(ARRAY_WITH_PRIMITIVE_ELEMENT_TYPE); AvroObjectInspectorGenerator aoig = new AvroObjectInspectorGenerator(s); // Column names @@ -398,7 +398,7 @@ public void canHandleArrays() throws SerDeException { @Test public void canHandleRecords() throws SerDeException { - Schema s = Schema.parse(RECORD_SCHEMA); + Schema s = AvroSerdeUtils.getSchemaFor(RECORD_SCHEMA); AvroObjectInspectorGenerator aoig = new AvroObjectInspectorGenerator(s); // Column names @@ -429,7 +429,7 @@ public void canHandleRecords() throws SerDeException { @Test public void canHandleUnions() throws SerDeException { - Schema s = Schema.parse(UNION_SCHEMA); + Schema s = AvroSerdeUtils.getSchemaFor(UNION_SCHEMA); AvroObjectInspectorGenerator aoig = new AvroObjectInspectorGenerator(s); // Column names @@ -452,7 +452,7 @@ public void canHandleUnions() throws SerDeException { @Test // Enums are one of two Avro types that Hive doesn't have any native support for. public void canHandleEnums() throws SerDeException { - Schema s = Schema.parse(ENUM_SCHEMA); + Schema s = AvroSerdeUtils.getSchemaFor(ENUM_SCHEMA); AvroObjectInspectorGenerator aoig = new AvroObjectInspectorGenerator(s); // Column names - we lose the enumness of this schema @@ -466,7 +466,7 @@ public void canHandleEnums() throws SerDeException { @Test // Hive has no concept of Avro's fixed type. Fixed -> arrays of bytes public void canHandleFixed() throws SerDeException { - Schema s = Schema.parse(FIXED_SCHEMA); + Schema s = AvroSerdeUtils.getSchemaFor(FIXED_SCHEMA); AvroObjectInspectorGenerator aoig = new AvroObjectInspectorGenerator(s); @@ -483,7 +483,7 @@ public void canHandleFixed() throws SerDeException { @Test // Avro considers bytes primitive, Hive doesn't. Make them list of tinyint. public void canHandleBytes() throws SerDeException { - Schema s = Schema.parse(BYTES_SCHEMA); + Schema s = AvroSerdeUtils.getSchemaFor(BYTES_SCHEMA); AvroObjectInspectorGenerator aoig = new AvroObjectInspectorGenerator(s); @@ -500,7 +500,7 @@ public void canHandleBytes() throws SerDeException { @Test // That Union[T, NULL] is converted to just T. public void convertsNullableTypes() throws SerDeException { - Schema s = Schema.parse(NULLABLE_STRING_SCHEMA); + Schema s = AvroSerdeUtils.getSchemaFor(NULLABLE_STRING_SCHEMA); AvroObjectInspectorGenerator aoig = new AvroObjectInspectorGenerator(s); assertEquals(1, aoig.getColumnNames().size()); @@ -517,14 +517,14 @@ public void convertsNullableTypes() throws SerDeException { @Test // That Union[T, NULL] is converted to just T, within a Map public void convertsMapsWithNullablePrimitiveTypes() throws SerDeException { - Schema s = Schema.parse(MAP_WITH_NULLABLE_PRIMITIVE_VALUE_TYPE_SCHEMA); + Schema s = AvroSerdeUtils.getSchemaFor(MAP_WITH_NULLABLE_PRIMITIVE_VALUE_TYPE_SCHEMA); AvroObjectInspectorGenerator aoig = new AvroObjectInspectorGenerator(s); verifyMap(aoig, "aMap"); } @Test // That Union[T, NULL] is converted to just T. public void convertsNullableEnum() throws SerDeException { - Schema s = Schema.parse(NULLABLE_ENUM_SCHEMA); + Schema s = AvroSerdeUtils.getSchemaFor(NULLABLE_ENUM_SCHEMA); AvroObjectInspectorGenerator aoig = new AvroObjectInspectorGenerator(s); assertEquals(1, aoig.getColumnNames().size()); @@ -542,10 +542,10 @@ public void convertsNullableEnum() throws SerDeException { @Test public void objectInspectorsAreCached() throws SerDeException { // Verify that Hive is caching the object inspectors for us. - Schema s = Schema.parse(KITCHEN_SINK_SCHEMA); + Schema s = AvroSerdeUtils.getSchemaFor(KITCHEN_SINK_SCHEMA); AvroObjectInspectorGenerator aoig = new AvroObjectInspectorGenerator(s); - Schema s2 = Schema.parse(KITCHEN_SINK_SCHEMA); + Schema s2 = AvroSerdeUtils.getSchemaFor(KITCHEN_SINK_SCHEMA); AvroObjectInspectorGenerator aoig2 = new AvroObjectInspectorGenerator(s2); diff --git a/serde/src/test/org/apache/hadoop/hive/serde2/avro/TestAvroSerde.java b/serde/src/test/org/apache/hadoop/hive/serde2/avro/TestAvroSerde.java index 072225d..803a987 100644 --- a/serde/src/test/org/apache/hadoop/hive/serde2/avro/TestAvroSerde.java +++ b/serde/src/test/org/apache/hadoop/hive/serde2/avro/TestAvroSerde.java @@ -61,8 +61,8 @@ " ]\n" + "}"; - static final Schema originalSchema = Schema.parse(originalSchemaString); - static final Schema newSchema = Schema.parse(newSchemaString); + static final Schema originalSchema = AvroSerdeUtils.getSchemaFor(originalSchemaString); + static final Schema newSchema = AvroSerdeUtils.getSchemaFor(newSchemaString); @Test public void initializeDoesNotReuseSchemasFromConf() throws SerDeException { @@ -81,7 +81,7 @@ public void initializeDoesNotReuseSchemasFromConf() throws SerDeException { // Verify that the schema now within the configuration is the one passed // in via the properties - assertEquals(newSchema, Schema.parse(conf.get(AVRO_SERDE_SCHEMA))); + assertEquals(newSchema, AvroSerdeUtils.getSchemaFor(conf.get(AVRO_SERDE_SCHEMA))); } @Test diff --git a/serde/src/test/org/apache/hadoop/hive/serde2/avro/TestAvroSerdeUtils.java b/serde/src/test/org/apache/hadoop/hive/serde2/avro/TestAvroSerdeUtils.java index 67d5570..af236f7 100644 --- a/serde/src/test/org/apache/hadoop/hive/serde2/avro/TestAvroSerdeUtils.java +++ b/serde/src/test/org/apache/hadoop/hive/serde2/avro/TestAvroSerdeUtils.java @@ -58,7 +58,7 @@ "}"; private void testField(String schemaString, String fieldName, boolean shouldBeNullable) { - Schema s = Schema.parse(schemaString); + Schema s = AvroSerdeUtils.getSchemaFor(schemaString); assertEquals(shouldBeNullable, isNullableType(s.getField(fieldName).schema())); } @@ -106,11 +106,11 @@ public void isNullableTypeIdentifiesNonUnionTypes() { @Test public void getTypeFromNullableTypePositiveCase() { - Schema s = Schema.parse(NULLABLE_UNION); + Schema s = AvroSerdeUtils.getSchemaFor(NULLABLE_UNION); Schema typeFromNullableType = getOtherTypeFromNullableType(s.getField("mayBeNull").schema()); assertEquals(Schema.Type.STRING, typeFromNullableType.getType()); - s = Schema.parse(NULLABLE_UNION2); + s = AvroSerdeUtils.getSchemaFor(NULLABLE_UNION2); typeFromNullableType = getOtherTypeFromNullableType(s.getField("mayBeNull").schema()); assertEquals(Schema.Type.STRING, typeFromNullableType.getType()); } @@ -126,7 +126,7 @@ public void determineSchemaFindsLiterals() throws Exception { String schema = TestAvroObjectInspectorGenerator.RECORD_SCHEMA; Properties props = new Properties(); props.put(AvroSerdeUtils.SCHEMA_LITERAL, schema); - Schema expected = Schema.parse(schema); + Schema expected = AvroSerdeUtils.getSchemaFor(schema); assertEquals(expected, AvroSerdeUtils.determineSchemaOrThrowException(props)); } @@ -163,7 +163,7 @@ public void noneOptionWorksForSpecifyingSchemas() throws IOException, AvroSerdeE try { s = determineSchemaOrThrowException(props); assertNotNull(s); - assertEquals(Schema.parse(TestAvroObjectInspectorGenerator.RECORD_SCHEMA), s); + assertEquals(AvroSerdeUtils.getSchemaFor(TestAvroObjectInspectorGenerator.RECORD_SCHEMA), s); } catch(AvroSerdeException he) { fail("Should have parsed schema literal, not thrown exception."); } @@ -197,7 +197,7 @@ public void determineSchemaCanReadSchemaFromHDFS() throws IOException, AvroSerde Schema schemaFromHDFS = AvroSerdeUtils.getSchemaFromFS(onHDFS, miniDfs.getFileSystem().getConf()); - Schema expectedSchema = Schema.parse(schemaString); + Schema expectedSchema = AvroSerdeUtils.getSchemaFor(schemaString); assertEquals(expectedSchema, schemaFromHDFS); } finally { if(miniDfs != null) miniDfs.shutdown(); diff --git a/serde/src/test/org/apache/hadoop/hive/serde2/avro/TestAvroSerializer.java b/serde/src/test/org/apache/hadoop/hive/serde2/avro/TestAvroSerializer.java index f8161da..b573f50 100644 --- a/serde/src/test/org/apache/hadoop/hive/serde2/avro/TestAvroSerializer.java +++ b/serde/src/test/org/apache/hadoop/hive/serde2/avro/TestAvroSerializer.java @@ -53,7 +53,7 @@ private Schema buildSchema(String recordValues) { " \"fields\": [" + recordValues + " ] }"; - return Schema.parse(s); + return AvroSerdeUtils.getSchemaFor(s); } /** @@ -231,7 +231,7 @@ public void canSerializeUnions() throws SerDeException, IOException { public void canSerializeEnums() throws SerDeException, IOException { String type = "{\"type\": \"enum\", \"name\": \"enum1_values\", " + "\"symbols\":[\"BLUE\",\"RED\",\"GREEN\"]}"; - Schema schema = Schema.parse(type); + Schema schema = AvroSerdeUtils.getSchemaFor(type); String field = "{ \"name\":\"enum1\", \"type\": " + schema + " }"; for(enum1 e : enum1.values()) { GenericEnumSymbol symbol = new GenericData.EnumSymbol(schema, e.toString()); @@ -247,7 +247,7 @@ public void canSerializeNullableEnums() throws SerDeException, IOException { String type = "{\"type\": \"enum\", \"name\": \"enum1_values\",\n" + " \"namespace\": \"org.apache.hadoop.hive\",\n" + " \"symbols\":[\"BLUE\",\"RED\",\"GREEN\"]}"; - Schema schema = Schema.parse(type); + Schema schema = AvroSerdeUtils.getSchemaFor(type); String field = "{ \"name\":\"nullableenum\", \"type\": [\"null\", " + schema + "] }"; GenericEnumSymbol symbol = new GenericData.EnumSymbol(schema, enum1.BLUE.toString()); GenericRecord r = serializeAndDeserialize(field, "nullableenum", symbol); diff --git a/serde/src/test/org/apache/hadoop/hive/serde2/avro/TestGenericAvroRecordWritable.java b/serde/src/test/org/apache/hadoop/hive/serde2/avro/TestGenericAvroRecordWritable.java index cf3b16c..fb13b47 100644 --- a/serde/src/test/org/apache/hadoop/hive/serde2/avro/TestGenericAvroRecordWritable.java +++ b/serde/src/test/org/apache/hadoop/hive/serde2/avro/TestGenericAvroRecordWritable.java @@ -50,7 +50,7 @@ @Test public void writableContractIsImplementedCorrectly() throws IOException { - Schema schema = Schema.parse(schemaJSON); + Schema schema = AvroSerdeUtils.getSchemaFor(schemaJSON); GenericRecord gr = new GenericData.Record(schema); gr.put("first", "The"); diff --git a/serde/src/test/org/apache/hadoop/hive/serde2/avro/TestSchemaReEncoder.java b/serde/src/test/org/apache/hadoop/hive/serde2/avro/TestSchemaReEncoder.java index 8dd6109..922ca7c 100644 --- a/serde/src/test/org/apache/hadoop/hive/serde2/avro/TestSchemaReEncoder.java +++ b/serde/src/test/org/apache/hadoop/hive/serde2/avro/TestSchemaReEncoder.java @@ -27,6 +27,7 @@ import org.junit.Test; public class TestSchemaReEncoder { + @Test public void schemasCanAddFields() throws SerDeException { String original = "{\n" + @@ -56,8 +57,8 @@ public void schemasCanAddFields() throws SerDeException { " }\n" + " ]\n" + "}"; - Schema originalSchema = Schema.parse(original); - Schema evolvedSchema = Schema.parse(evolved); + Schema originalSchema = AvroSerdeUtils.getSchemaFor(original); + Schema evolvedSchema = AvroSerdeUtils.getSchemaFor(evolved); GenericRecord record = new GenericData.Record(originalSchema); record.put("text", "it is a far better thing I do, yadda, yadda"); @@ -97,8 +98,8 @@ public void schemasCanAddFields() throws SerDeException { " }\n" + " ]\n" + "}"; - Schema originalSchema2 = Schema.parse(original2); - Schema evolvedSchema2 = Schema.parse(evolved2); + Schema originalSchema2 = AvroSerdeUtils.getSchemaFor(original2); + Schema evolvedSchema2 = AvroSerdeUtils.getSchemaFor(evolved2); record = new GenericData.Record(originalSchema2); record.put("a", 19); diff --git a/serde/src/test/org/apache/hadoop/hive/serde2/avro/TestThatEvolvedSchemasActAsWeWant.java b/serde/src/test/org/apache/hadoop/hive/serde2/avro/TestThatEvolvedSchemasActAsWeWant.java index 4b8cc98..70613d8 100644 --- a/serde/src/test/org/apache/hadoop/hive/serde2/avro/TestThatEvolvedSchemasActAsWeWant.java +++ b/serde/src/test/org/apache/hadoop/hive/serde2/avro/TestThatEvolvedSchemasActAsWeWant.java @@ -34,6 +34,7 @@ import static org.junit.Assert.assertTrue; public class TestThatEvolvedSchemasActAsWeWant { + @Test public void resolvedSchemasShouldReturnReaderSchema() throws IOException { // Need to verify that when reading a datum with an updated reader schema @@ -68,7 +69,7 @@ public void resolvedSchemasShouldReturnReaderSchema() throws IOException { " ]\n" + "}"; - Schema[] schemas = {Schema.parse(v0), Schema.parse(v1)}; + Schema[] schemas = {AvroSerdeUtils.getSchemaFor(v0), AvroSerdeUtils.getSchemaFor(v1)}; // Encode a schema with v0, write out. GenericRecord record = new GenericData.Record(schemas[0]); diff --git a/service/src/java/org/apache/hive/service/cli/CLIService.java b/service/src/java/org/apache/hive/service/cli/CLIService.java index 80d7b82..d2cdfc1 100644 --- a/service/src/java/org/apache/hive/service/cli/CLIService.java +++ b/service/src/java/org/apache/hive/service/cli/CLIService.java @@ -377,7 +377,7 @@ public OperationStatus getOperationStatus(OperationHandle opHandle) LOG.trace(opHandle + ": The background operation was cancelled", e); } catch (ExecutionException e) { // The background operation thread was aborted - LOG.trace(opHandle + ": The background operation was aborted", e); + LOG.warn(opHandle + ": The background operation was aborted", e); } catch (InterruptedException e) { // No op, this thread was interrupted // In this case, the call might return sooner than long polling timeout