diff --git ql/src/java/org/apache/hadoop/hive/ql/exec/FunctionRegistry.java ql/src/java/org/apache/hadoop/hive/ql/exec/FunctionRegistry.java index 5eb321c..7a2bee2 100644 --- ql/src/java/org/apache/hadoop/hive/ql/exec/FunctionRegistry.java +++ ql/src/java/org/apache/hadoop/hive/ql/exec/FunctionRegistry.java @@ -57,6 +57,7 @@ import org.apache.hadoop.hive.ql.udf.UDFCeil; import org.apache.hadoop.hive.ql.udf.UDFConv; import org.apache.hadoop.hive.ql.udf.UDFCos; +import org.apache.hadoop.hive.ql.udf.generic.UDFCurrentDB; import org.apache.hadoop.hive.ql.udf.UDFDate; import org.apache.hadoop.hive.ql.udf.UDFDateAdd; import org.apache.hadoop.hive.ql.udf.UDFDateDiff; @@ -305,6 +306,8 @@ registerUDF("^", UDFOPBitXor.class, true); registerUDF("~", UDFOPBitNot.class, true); + registerGenericUDF("current_database", UDFCurrentDB.class); + registerGenericUDF("isnull", GenericUDFOPNull.class); registerGenericUDF("isnotnull", GenericUDFOPNotNull.class); diff --git ql/src/java/org/apache/hadoop/hive/ql/exec/RowSchema.java ql/src/java/org/apache/hadoop/hive/ql/exec/RowSchema.java index 1bfcee6..4403867 100644 --- ql/src/java/org/apache/hadoop/hive/ql/exec/RowSchema.java +++ ql/src/java/org/apache/hadoop/hive/ql/exec/RowSchema.java @@ -30,6 +30,7 @@ private ArrayList signature; public RowSchema() { + this.signature = new ArrayList(); } public RowSchema(RowSchema that) { @@ -50,11 +51,13 @@ public void setSignature(ArrayList signature) { @Override public String toString() { - StringBuilder sb = new StringBuilder('('); - if (signature != null) { - for (ColumnInfo col : signature) { - sb.append(col.toString()); + StringBuilder sb = new StringBuilder(); + sb.append('('); + for (ColumnInfo col: signature) { + if (sb.length() > 1) { + sb.append(','); } + sb.append(col.toString()); } sb.append(')'); return sb.toString(); diff --git ql/src/java/org/apache/hadoop/hive/ql/exec/mr/ExecDriver.java ql/src/java/org/apache/hadoop/hive/ql/exec/mr/ExecDriver.java index f63da09..be0e01a 100644 --- ql/src/java/org/apache/hadoop/hive/ql/exec/mr/ExecDriver.java +++ ql/src/java/org/apache/hadoop/hive/ql/exec/mr/ExecDriver.java @@ -31,6 +31,8 @@ import java.util.List; import java.util.Properties; +import static org.apache.hadoop.hive.metastore.MetaStoreUtils.DEFAULT_DATABASE_NAME; + import org.apache.commons.lang.StringUtils; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; @@ -568,6 +570,12 @@ protected void setInputAttributes(Configuration conf) { // Intentionally overwrites anything the user may have put here conf.setBoolean("hive.input.format.sorted", mWork.isInputFormatSorted()); + + String currentDB = SessionState.get().getCurrentDatabase(); + if (currentDB == null) { + currentDB = DEFAULT_DATABASE_NAME; + } + conf.set("hive.current.database", currentDB); } public boolean mapStarted() { diff --git ql/src/java/org/apache/hadoop/hive/ql/io/NullRowsInputFormat.java ql/src/java/org/apache/hadoop/hive/ql/io/NullRowsInputFormat.java new file mode 100644 index 0000000..fd60fed --- /dev/null +++ ql/src/java/org/apache/hadoop/hive/ql/io/NullRowsInputFormat.java @@ -0,0 +1,126 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hive.ql.io; + +import java.io.DataInput; +import java.io.DataOutput; +import java.io.IOException; + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.io.NullWritable; +import org.apache.hadoop.mapred.InputFormat; +import org.apache.hadoop.mapred.InputSplit; +import org.apache.hadoop.mapred.JobConf; +import org.apache.hadoop.mapred.JobConfigurable; +import org.apache.hadoop.mapred.RecordReader; +import org.apache.hadoop.mapred.Reporter; + +/** + * NullRowsInputFormat outputs null rows, maximum 100. + */ +public class NullRowsInputFormat implements InputFormat, + JobConfigurable { + + static final int MAX_ROW = 100; // to prevent infinite loop + static final Log LOG = LogFactory.getLog(NullRowsRecordReader.class.getName()); + + public static class DummyInputSplit implements InputSplit { + public DummyInputSplit() { + } + + @Override + public long getLength() throws IOException { + return 1; + } + + @Override + public String[] getLocations() throws IOException { + return new String[0]; + } + + @Override + public void readFields(DataInput arg0) throws IOException { + } + + @Override + public void write(DataOutput arg0) throws IOException { + } + + } + + public static class NullRowsRecordReader implements RecordReader { + + private int counter; + + public NullRowsRecordReader() { + } + @Override + public void close() throws IOException { + } + + @Override + public NullWritable createKey() { + return NullWritable.get(); + } + + @Override + public NullWritable createValue() { + return NullWritable.get(); + } + + @Override + public long getPos() throws IOException { + return counter; + } + + @Override + public float getProgress() throws IOException { + return (float)counter / MAX_ROW; + } + + @Override + public boolean next(NullWritable arg0, NullWritable arg1) throws IOException { + if (counter++ < MAX_ROW) { + return true; + } + return false; + } + } + + @Override + public RecordReader getRecordReader(InputSplit arg0, + JobConf arg1, Reporter arg2) throws IOException { + return new NullRowsRecordReader(); + } + + @Override + public InputSplit[] getSplits(JobConf arg0, int arg1) throws IOException { + InputSplit[] ret = new InputSplit[1]; + ret[0] = new DummyInputSplit(); + LOG.info("Calculating splits"); + return ret; + } + + @Override + public void configure(JobConf job) { + LOG.info("Using null rows input format"); + } + +} diff --git ql/src/java/org/apache/hadoop/hive/ql/io/OneNullRowInputFormat.java ql/src/java/org/apache/hadoop/hive/ql/io/OneNullRowInputFormat.java index 8ce1c15..91a6d5a 100644 --- ql/src/java/org/apache/hadoop/hive/ql/io/OneNullRowInputFormat.java +++ ql/src/java/org/apache/hadoop/hive/ql/io/OneNullRowInputFormat.java @@ -18,19 +18,11 @@ package org.apache.hadoop.hive.ql.io; -import java.io.DataInput; -import java.io.DataOutput; import java.io.IOException; -import java.util.List; -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; -import org.apache.hadoop.hive.ql.plan.MapredWork; import org.apache.hadoop.io.NullWritable; -import org.apache.hadoop.mapred.InputFormat; import org.apache.hadoop.mapred.InputSplit; import org.apache.hadoop.mapred.JobConf; -import org.apache.hadoop.mapred.JobConfigurable; import org.apache.hadoop.mapred.RecordReader; import org.apache.hadoop.mapred.Reporter; @@ -39,95 +31,34 @@ * metadata only queries. * */ -public class OneNullRowInputFormat implements - InputFormat, JobConfigurable { - private static final Log LOG = LogFactory.getLog(OneNullRowInputFormat.class - .getName()); - MapredWork mrwork = null; - List partitions; - long len; - - static public class DummyInputSplit implements InputSplit { - public DummyInputSplit() { - } - - @Override - public long getLength() throws IOException { - return 1; - } - - @Override - public String[] getLocations() throws IOException { - return new String[0]; - } - - @Override - public void readFields(DataInput arg0) throws IOException { - } - - @Override - public void write(DataOutput arg0) throws IOException { - } +public class OneNullRowInputFormat extends NullRowsInputFormat { + @Override + public RecordReader getRecordReader(InputSplit arg0, + JobConf arg1, Reporter arg2) throws IOException { + return new OneNullRowRecordReader(); } - static public class OneNullRowRecordReader implements RecordReader { - private boolean processed = false; - public OneNullRowRecordReader() { - } - @Override - public void close() throws IOException { - } - - @Override - public NullWritable createKey() { - return NullWritable.get(); - } - - @Override - public NullWritable createValue() { - return NullWritable.get(); - } + public static class OneNullRowRecordReader extends NullRowsRecordReader { + private boolean processed; @Override public long getPos() throws IOException { - return (processed ? 1 : 0); + return processed ? 1 : 0; } @Override public float getProgress() throws IOException { - return (float) (processed ? 1.0 : 0.0); + return processed ? 1.0f : 0f; } @Override - public boolean next(NullWritable arg0, NullWritable arg1) throws IOException { - if(processed) { + public boolean next(NullWritable key, NullWritable value) throws IOException { + if (processed) { return false; - } else { - processed = true; - return true; } + processed = true; + return true; } - - } - - @Override - public RecordReader getRecordReader(InputSplit arg0, JobConf arg1, Reporter arg2) - throws IOException { - return new OneNullRowRecordReader(); - } - - @Override - public InputSplit[] getSplits(JobConf arg0, int arg1) throws IOException { - InputSplit[] ret = new InputSplit[1]; - ret[0] = new DummyInputSplit(); - LOG.info("Calculating splits"); - return ret; } - - @Override - public void configure(JobConf job) { - LOG.info("Using one null row input format"); - } - -} \ No newline at end of file +} diff --git ql/src/java/org/apache/hadoop/hive/ql/parse/HiveParser.g ql/src/java/org/apache/hadoop/hive/ql/parse/HiveParser.g index 2343a2c..a4cf51d 100644 --- ql/src/java/org/apache/hadoop/hive/ql/parse/HiveParser.g +++ ql/src/java/org/apache/hadoop/hive/ql/parse/HiveParser.g @@ -1851,7 +1851,7 @@ regular_body selectStatement : selectClause - fromClause + fromClause? whereClause? groupByClause? havingClause? @@ -1860,7 +1860,7 @@ selectStatement distributeByClause? sortByClause? window_clause? - limitClause? -> ^(TOK_QUERY fromClause ^(TOK_INSERT ^(TOK_DESTINATION ^(TOK_DIR TOK_TMP_FILE)) + limitClause? -> ^(TOK_QUERY fromClause? ^(TOK_INSERT ^(TOK_DESTINATION ^(TOK_DIR TOK_TMP_FILE)) selectClause whereClause? groupByClause? havingClause? orderByClause? clusterByClause? distributeByClause? sortByClause? window_clause? limitClause?)) ; diff --git ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java index 0d9cc69..7a226ef 100644 --- ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java +++ ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java @@ -18,6 +18,7 @@ package org.apache.hadoop.hive.ql.parse; +import java.io.IOException; import java.io.Serializable; import java.util.ArrayList; import java.util.HashMap; @@ -37,6 +38,8 @@ import org.antlr.runtime.tree.TreeWizard; import org.antlr.runtime.tree.TreeWizard.ContextVisitor; import org.apache.commons.lang.StringUtils; +import org.apache.hadoop.fs.FSDataOutputStream; +import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hive.common.FileUtils; import org.apache.hadoop.hive.common.JavaUtils; @@ -73,8 +76,10 @@ import org.apache.hadoop.hive.ql.hooks.ReadEntity; import org.apache.hadoop.hive.ql.hooks.WriteEntity; import org.apache.hadoop.hive.ql.io.CombineHiveInputFormat; +import org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat; import org.apache.hadoop.hive.ql.io.HiveOutputFormat; import org.apache.hadoop.hive.ql.io.RCFileInputFormat; +import org.apache.hadoop.hive.ql.io.NullRowsInputFormat; import org.apache.hadoop.hive.ql.lib.DefaultGraphWalker; import org.apache.hadoop.hive.ql.lib.Dispatcher; import org.apache.hadoop.hive.ql.lib.GraphWalker; @@ -161,6 +166,7 @@ import org.apache.hadoop.hive.serde.serdeConstants; import org.apache.hadoop.hive.serde2.Deserializer; import org.apache.hadoop.hive.serde2.MetadataTypedColumnsetSerDe; +import org.apache.hadoop.hive.serde2.NullStructSerDe; import org.apache.hadoop.hive.serde2.SerDeException; import org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe; import org.apache.hadoop.hive.serde2.objectinspector.ConstantObjectInspector; @@ -172,6 +178,7 @@ import org.apache.hadoop.hive.serde2.typeinfo.TypeInfo; import org.apache.hadoop.hive.serde2.typeinfo.TypeInfoFactory; import org.apache.hadoop.hive.serde2.typeinfo.TypeInfoUtils; +import org.apache.hadoop.io.IOUtils; import org.apache.hadoop.mapred.InputFormat; /** @@ -179,6 +186,9 @@ */ public class SemanticAnalyzer extends BaseSemanticAnalyzer { + + private static final String DUMMY_ALIAS = "_dummy_alias"; + private HashMap opToPartPruner; private HashMap opToPartList; private HashMap> topOps; @@ -2066,10 +2076,6 @@ private Integer genColListRegex(String colRegex, String tabAlias, } } } - if (matched == 0) { - throw new SemanticException(ErrorMsg.INVALID_COLUMN.getMsg(sel)); - } - if (unparseTranslator.isEnabled()) { unparseTranslator.addTranslation(sel, replacementText.toString()); } @@ -8452,6 +8458,15 @@ public Operator genPlan(QB qb) throws SemanticException { aliasToOpInfo.put(alias, op); } + if (aliasToOpInfo.isEmpty()) { + qb.getMetaData().setSrcForAlias(DUMMY_ALIAS, getDummyTable()); + TableScanOperator op = (TableScanOperator) genTablePlan(DUMMY_ALIAS, qb); + op.getConf().setRowLimit(1); + qb.addAlias(DUMMY_ALIAS); + qb.setTabAlias(DUMMY_ALIAS, DUMMY_ALIAS); + aliasToOpInfo.put(DUMMY_ALIAS, op); + } + Operator srcOpInfo = null; Operator lastPTFOp = null; @@ -8521,6 +8536,33 @@ public Operator genPlan(QB qb) throws SemanticException { return bodyOpInfo; } + private Table getDummyTable() throws SemanticException { + Path dummyPath = createDummyFile(); + Table desc = new Table("default", DUMMY_ALIAS); + desc.getTTable().getSd().setLocation(dummyPath.toString()); + desc.getTTable().getSd().getSerdeInfo().setSerializationLib(NullStructSerDe.class.getName()); + desc.setInputFormatClass(NullRowsInputFormat.class); + desc.setOutputFormatClass(HiveIgnoreKeyTextOutputFormat.class); + return desc; + } + + // add dummy data for not removed by CombineHiveInputFormat, etc. + private Path createDummyFile() throws SemanticException { + Path emptyScratchDirStr = new Path(ctx.getMRTmpFileURI(), "dummy_file"); + FSDataOutputStream fout = null; + try { + FileSystem fs = emptyScratchDirStr.getFileSystem(conf); + fout = fs.create(emptyScratchDirStr); + fout.write(1); + fout.close(); + } catch (IOException e) { + throw new SemanticException(e); + } finally { + IOUtils.closeStream(fout); + } + return emptyScratchDirStr.getParent(); + } + /** * Generates the operator DAG needed to implement lateral views and attaches * it to the TS operator. diff --git ql/src/java/org/apache/hadoop/hive/ql/udf/generic/UDFCurrentDB.java ql/src/java/org/apache/hadoop/hive/ql/udf/generic/UDFCurrentDB.java new file mode 100644 index 0000000..f2d2685 --- /dev/null +++ ql/src/java/org/apache/hadoop/hive/ql/udf/generic/UDFCurrentDB.java @@ -0,0 +1,46 @@ +package org.apache.hadoop.hive.ql.udf.generic; + +import org.apache.hadoop.hive.ql.exec.Description; +import org.apache.hadoop.hive.ql.exec.MapredContext; +import org.apache.hadoop.hive.ql.exec.UDFArgumentException; +import org.apache.hadoop.hive.ql.metadata.HiveException; +import org.apache.hadoop.hive.ql.session.SessionState; +import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspector; +import org.apache.hadoop.hive.serde2.objectinspector.primitive.PrimitiveObjectInspectorFactory; +import org.apache.hadoop.hive.serde2.typeinfo.TypeInfoFactory; +import org.apache.hadoop.io.Text; + +// deterministic in the query range +@Description(name = "current_database", + value = "_FUNC_() - returns currently using database name") +public class UDFCurrentDB extends GenericUDF { + + private MapredContext context; + + @Override + public void configure(MapredContext context) { + this.context = context; + } + + @Override + public ObjectInspector initialize(ObjectInspector[] arguments) throws UDFArgumentException { + String database; + if (context != null) { + database = context.getJobConf().get("hive.current.database"); + } else { + database = SessionState.get().getCurrentDatabase(); + } + return PrimitiveObjectInspectorFactory.getPrimitiveWritableConstantObjectInspector( + TypeInfoFactory.stringTypeInfo, new Text(database)); + } + + @Override + public Object evaluate(DeferredObject[] arguments) throws HiveException { + throw new IllegalStateException("never"); + } + + @Override + public String getDisplayString(String[] children) { + return "current_database()"; + } +} diff --git ql/src/test/queries/clientpositive/select_dummy_source.q ql/src/test/queries/clientpositive/select_dummy_source.q new file mode 100644 index 0000000..25a1a81 --- /dev/null +++ ql/src/test/queries/clientpositive/select_dummy_source.q @@ -0,0 +1,33 @@ +explain +select 'a', 100; +select 'a', 100; + +--evaluation +explain +select 1 + 1; +select 1 + 1; + +-- explode (not possible for lateral view) +explain +select explode(array('a', 'b')); +select explode(array('a', 'b')); + +set hive.fetch.task.conversion=more; + +explain +select 'a', 100; +select 'a', 100; + +explain +select 1 + 1; +select 1 + 1; + +explain +select explode(array('a', 'b')); +select explode(array('a', 'b')); + +-- subquery +explain +select 2 + 3,x from (select 1 + 2 x) X; +select 2 + 3,x from (select 1 + 2 x) X; + diff --git ql/src/test/queries/clientpositive/udf_current_database.q ql/src/test/queries/clientpositive/udf_current_database.q new file mode 100644 index 0000000..4aed498 --- /dev/null +++ ql/src/test/queries/clientpositive/udf_current_database.q @@ -0,0 +1,12 @@ +DESCRIBE FUNCTION current_database; + +explain +select current_database(); +select current_database(); + +create database xxx; +use xxx; + +explain +select current_database(); +select current_database(); diff --git ql/src/test/results/clientpositive/select_dummy_source.q.out ql/src/test/results/clientpositive/select_dummy_source.q.out new file mode 100644 index 0000000..6233a60 --- /dev/null +++ ql/src/test/results/clientpositive/select_dummy_source.q.out @@ -0,0 +1,332 @@ +PREHOOK: query: explain +select 'a', 100 +PREHOOK: type: QUERY +POSTHOOK: query: explain +select 'a', 100 +POSTHOOK: type: QUERY +ABSTRACT SYNTAX TREE: + (TOK_QUERY (TOK_INSERT (TOK_DESTINATION (TOK_DIR TOK_TMP_FILE)) (TOK_SELECT (TOK_SELEXPR 'a') (TOK_SELEXPR 100)))) + +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-0 is a root stage + +STAGE PLANS: + Stage: Stage-1 + Map Reduce + Alias -> Map Operator Tree: + _dummy_alias + TableScan + alias: _dummy_alias + Row Limit Per Split: 1 + Select Operator + expressions: + expr: 'a' + type: string + expr: 100 + type: int + outputColumnNames: _col0, _col1 + File Output Operator + compressed: false + GlobalTableId: 0 + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + + Stage: Stage-0 + Fetch Operator + limit: -1 + + +PREHOOK: query: select 'a', 100 +PREHOOK: type: QUERY +PREHOOK: Input: default@_dummy_alias +#### A masked pattern was here #### +POSTHOOK: query: select 'a', 100 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@_dummy_alias +#### A masked pattern was here #### +a 100 +PREHOOK: query: --evaluation +explain +select 1 + 1 +PREHOOK: type: QUERY +POSTHOOK: query: --evaluation +explain +select 1 + 1 +POSTHOOK: type: QUERY +ABSTRACT SYNTAX TREE: + (TOK_QUERY (TOK_INSERT (TOK_DESTINATION (TOK_DIR TOK_TMP_FILE)) (TOK_SELECT (TOK_SELEXPR (+ 1 1))))) + +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-0 is a root stage + +STAGE PLANS: + Stage: Stage-1 + Map Reduce + Alias -> Map Operator Tree: + _dummy_alias + TableScan + alias: _dummy_alias + Row Limit Per Split: 1 + Select Operator + expressions: + expr: (1 + 1) + type: int + outputColumnNames: _col0 + File Output Operator + compressed: false + GlobalTableId: 0 + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + + Stage: Stage-0 + Fetch Operator + limit: -1 + + +PREHOOK: query: select 1 + 1 +PREHOOK: type: QUERY +PREHOOK: Input: default@_dummy_alias +#### A masked pattern was here #### +POSTHOOK: query: select 1 + 1 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@_dummy_alias +#### A masked pattern was here #### +2 +PREHOOK: query: -- explode (not possible for lateral view) +explain +select explode(array('a', 'b')) +PREHOOK: type: QUERY +POSTHOOK: query: -- explode (not possible for lateral view) +explain +select explode(array('a', 'b')) +POSTHOOK: type: QUERY +ABSTRACT SYNTAX TREE: + (TOK_QUERY (TOK_INSERT (TOK_DESTINATION (TOK_DIR TOK_TMP_FILE)) (TOK_SELECT (TOK_SELEXPR (TOK_FUNCTION explode (TOK_FUNCTION array 'a' 'b')))))) + +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-0 is a root stage + +STAGE PLANS: + Stage: Stage-1 + Map Reduce + Alias -> Map Operator Tree: + _dummy_alias + TableScan + alias: _dummy_alias + Row Limit Per Split: 1 + Select Operator + expressions: + expr: array('a','b') + type: array + outputColumnNames: _col0 + UDTF Operator + function name: explode + File Output Operator + compressed: false + GlobalTableId: 0 + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + + Stage: Stage-0 + Fetch Operator + limit: -1 + + +PREHOOK: query: select explode(array('a', 'b')) +PREHOOK: type: QUERY +PREHOOK: Input: default@_dummy_alias +#### A masked pattern was here #### +POSTHOOK: query: select explode(array('a', 'b')) +POSTHOOK: type: QUERY +POSTHOOK: Input: default@_dummy_alias +#### A masked pattern was here #### +a +b +PREHOOK: query: explain +select 'a', 100 +PREHOOK: type: QUERY +POSTHOOK: query: explain +select 'a', 100 +POSTHOOK: type: QUERY +ABSTRACT SYNTAX TREE: + (TOK_QUERY (TOK_INSERT (TOK_DESTINATION (TOK_DIR TOK_TMP_FILE)) (TOK_SELECT (TOK_SELEXPR 'a') (TOK_SELEXPR 100)))) + +STAGE DEPENDENCIES: + Stage-0 is a root stage + +STAGE PLANS: + Stage: Stage-0 + Fetch Operator + limit: -1 + Processor Tree: + TableScan + alias: _dummy_alias + Row Limit Per Split: 1 + Select Operator + expressions: + expr: 'a' + type: string + expr: 100 + type: int + outputColumnNames: _col0, _col1 + ListSink + + +PREHOOK: query: select 'a', 100 +PREHOOK: type: QUERY +PREHOOK: Input: default@_dummy_alias +#### A masked pattern was here #### +POSTHOOK: query: select 'a', 100 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@_dummy_alias +#### A masked pattern was here #### +a 100 +PREHOOK: query: explain +select 1 + 1 +PREHOOK: type: QUERY +POSTHOOK: query: explain +select 1 + 1 +POSTHOOK: type: QUERY +ABSTRACT SYNTAX TREE: + (TOK_QUERY (TOK_INSERT (TOK_DESTINATION (TOK_DIR TOK_TMP_FILE)) (TOK_SELECT (TOK_SELEXPR (+ 1 1))))) + +STAGE DEPENDENCIES: + Stage-0 is a root stage + +STAGE PLANS: + Stage: Stage-0 + Fetch Operator + limit: -1 + Processor Tree: + TableScan + alias: _dummy_alias + Row Limit Per Split: 1 + Select Operator + expressions: + expr: (1 + 1) + type: int + outputColumnNames: _col0 + ListSink + + +PREHOOK: query: select 1 + 1 +PREHOOK: type: QUERY +PREHOOK: Input: default@_dummy_alias +#### A masked pattern was here #### +POSTHOOK: query: select 1 + 1 +POSTHOOK: type: QUERY +POSTHOOK: Input: default@_dummy_alias +#### A masked pattern was here #### +2 +PREHOOK: query: explain +select explode(array('a', 'b')) +PREHOOK: type: QUERY +POSTHOOK: query: explain +select explode(array('a', 'b')) +POSTHOOK: type: QUERY +ABSTRACT SYNTAX TREE: + (TOK_QUERY (TOK_INSERT (TOK_DESTINATION (TOK_DIR TOK_TMP_FILE)) (TOK_SELECT (TOK_SELEXPR (TOK_FUNCTION explode (TOK_FUNCTION array 'a' 'b')))))) + +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-0 is a root stage + +STAGE PLANS: + Stage: Stage-1 + Map Reduce + Alias -> Map Operator Tree: + _dummy_alias + TableScan + alias: _dummy_alias + Row Limit Per Split: 1 + Select Operator + expressions: + expr: array('a','b') + type: array + outputColumnNames: _col0 + UDTF Operator + function name: explode + File Output Operator + compressed: false + GlobalTableId: 0 + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + + Stage: Stage-0 + Fetch Operator + limit: -1 + + +PREHOOK: query: select explode(array('a', 'b')) +PREHOOK: type: QUERY +PREHOOK: Input: default@_dummy_alias +#### A masked pattern was here #### +POSTHOOK: query: select explode(array('a', 'b')) +POSTHOOK: type: QUERY +POSTHOOK: Input: default@_dummy_alias +#### A masked pattern was here #### +a +b +PREHOOK: query: -- subquery +explain +select 2 + 3,x from (select 1 + 2 x) X +PREHOOK: type: QUERY +POSTHOOK: query: -- subquery +explain +select 2 + 3,x from (select 1 + 2 x) X +POSTHOOK: type: QUERY +ABSTRACT SYNTAX TREE: + (TOK_QUERY (TOK_FROM (TOK_SUBQUERY (TOK_QUERY (TOK_INSERT (TOK_DESTINATION (TOK_DIR TOK_TMP_FILE)) (TOK_SELECT (TOK_SELEXPR (+ 1 2) x)))) X)) (TOK_INSERT (TOK_DESTINATION (TOK_DIR TOK_TMP_FILE)) (TOK_SELECT (TOK_SELEXPR (+ 2 3)) (TOK_SELEXPR (TOK_TABLE_OR_COL x))))) + +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-0 is a root stage + +STAGE PLANS: + Stage: Stage-1 + Map Reduce + Alias -> Map Operator Tree: + x:_dummy_alias + TableScan + alias: _dummy_alias + Row Limit Per Split: 1 + Select Operator + expressions: + expr: (2 + 3) + type: int + expr: (1 + 2) + type: int + outputColumnNames: _col0, _col1 + File Output Operator + compressed: false + GlobalTableId: 0 + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + + Stage: Stage-0 + Fetch Operator + limit: -1 + + +PREHOOK: query: select 2 + 3,x from (select 1 + 2 x) X +PREHOOK: type: QUERY +PREHOOK: Input: default@_dummy_alias +#### A masked pattern was here #### +POSTHOOK: query: select 2 + 3,x from (select 1 + 2 x) X +POSTHOOK: type: QUERY +POSTHOOK: Input: default@_dummy_alias +#### A masked pattern was here #### +5 3 diff --git ql/src/test/results/clientpositive/show_functions.q.out ql/src/test/results/clientpositive/show_functions.q.out index 57c9036..3400ac2 100644 --- ql/src/test/results/clientpositive/show_functions.q.out +++ ql/src/test/results/clientpositive/show_functions.q.out @@ -50,6 +50,7 @@ covar_pop covar_samp create_union cume_dist +current_database date_add date_sub datediff @@ -218,6 +219,7 @@ covar_pop covar_samp create_union cume_dist +current_database PREHOOK: query: SHOW FUNCTIONS '.*e$' PREHOOK: type: SHOWFUNCTIONS POSTHOOK: query: SHOW FUNCTIONS '.*e$' @@ -225,6 +227,7 @@ POSTHOOK: type: SHOWFUNCTIONS assert_true case coalesce +current_database decode e encode diff --git ql/src/test/results/clientpositive/udf_current_database.q.out ql/src/test/results/clientpositive/udf_current_database.q.out new file mode 100644 index 0000000..1ff9038 --- /dev/null +++ ql/src/test/results/clientpositive/udf_current_database.q.out @@ -0,0 +1,109 @@ +PREHOOK: query: DESCRIBE FUNCTION current_database +PREHOOK: type: DESCFUNCTION +POSTHOOK: query: DESCRIBE FUNCTION current_database +POSTHOOK: type: DESCFUNCTION +current_database() - returns currently using database name +PREHOOK: query: explain +select current_database() +PREHOOK: type: QUERY +POSTHOOK: query: explain +select current_database() +POSTHOOK: type: QUERY +ABSTRACT SYNTAX TREE: + (TOK_QUERY (TOK_INSERT (TOK_DESTINATION (TOK_DIR TOK_TMP_FILE)) (TOK_SELECT (TOK_SELEXPR (TOK_FUNCTION current_database))))) + +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-0 is a root stage + +STAGE PLANS: + Stage: Stage-1 + Map Reduce + Alias -> Map Operator Tree: + _dummy_alias + TableScan + alias: _dummy_alias + Row Limit Per Split: 1 + Select Operator + expressions: + expr: current_database() + type: string + outputColumnNames: _col0 + File Output Operator + compressed: false + GlobalTableId: 0 + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + + Stage: Stage-0 + Fetch Operator + limit: -1 + + +PREHOOK: query: select current_database() +PREHOOK: type: QUERY +PREHOOK: Input: default@_dummy_alias +#### A masked pattern was here #### +POSTHOOK: query: select current_database() +POSTHOOK: type: QUERY +POSTHOOK: Input: default@_dummy_alias +#### A masked pattern was here #### +default +PREHOOK: query: create database xxx +PREHOOK: type: CREATEDATABASE +POSTHOOK: query: create database xxx +POSTHOOK: type: CREATEDATABASE +PREHOOK: query: use xxx +PREHOOK: type: SWITCHDATABASE +POSTHOOK: query: use xxx +POSTHOOK: type: SWITCHDATABASE +PREHOOK: query: explain +select current_database() +PREHOOK: type: QUERY +POSTHOOK: query: explain +select current_database() +POSTHOOK: type: QUERY +ABSTRACT SYNTAX TREE: + (TOK_QUERY (TOK_INSERT (TOK_DESTINATION (TOK_DIR TOK_TMP_FILE)) (TOK_SELECT (TOK_SELEXPR (TOK_FUNCTION current_database))))) + +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-0 is a root stage + +STAGE PLANS: + Stage: Stage-1 + Map Reduce + Alias -> Map Operator Tree: + _dummy_alias + TableScan + alias: _dummy_alias + Row Limit Per Split: 1 + Select Operator + expressions: + expr: current_database() + type: string + outputColumnNames: _col0 + File Output Operator + compressed: false + GlobalTableId: 0 + table: + input format: org.apache.hadoop.mapred.TextInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + + Stage: Stage-0 + Fetch Operator + limit: -1 + + +PREHOOK: query: select current_database() +PREHOOK: type: QUERY +PREHOOK: Input: default@_dummy_alias +#### A masked pattern was here #### +POSTHOOK: query: select current_database() +POSTHOOK: type: QUERY +POSTHOOK: Input: default@_dummy_alias +#### A masked pattern was here #### +xxx