Index: contrib/src/java/org/apache/hadoop/hive/contrib/udtf/example/GenericUDTFExplode2.java =================================================================== --- contrib/src/java/org/apache/hadoop/hive/contrib/udtf/example/GenericUDTFExplode2.java (revision 901960) +++ contrib/src/java/org/apache/hadoop/hive/contrib/udtf/example/GenericUDTFExplode2.java (working copy) @@ -22,7 +22,7 @@ import java.util.List; import org.apache.hadoop.hive.ql.exec.UDFArgumentException; -import org.apache.hadoop.hive.ql.exec.description; +import org.apache.hadoop.hive.ql.exec.Description; import org.apache.hadoop.hive.ql.metadata.HiveException; import org.apache.hadoop.hive.ql.udf.generic.GenericUDTF; import org.apache.hadoop.hive.serde2.objectinspector.ListObjectInspector; @@ -30,7 +30,7 @@ import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspectorFactory; import org.apache.hadoop.hive.serde2.objectinspector.StructObjectInspector; -@description(name = "explode2", value = "_FUNC_(a) - like explode, but outputs two identical columns (for " +@Description(name = "explode2", value = "_FUNC_(a) - like explode, but outputs two identical columns (for " + "testing purposes)") public class GenericUDTFExplode2 extends GenericUDTF { Index: contrib/src/java/org/apache/hadoop/hive/contrib/genericudf/example/GenericUDFDBOutput.java =================================================================== --- contrib/src/java/org/apache/hadoop/hive/contrib/genericudf/example/GenericUDFDBOutput.java (revision 901960) +++ contrib/src/java/org/apache/hadoop/hive/contrib/genericudf/example/GenericUDFDBOutput.java (working copy) @@ -25,7 +25,7 @@ import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.hive.ql.exec.UDFArgumentTypeException; -import org.apache.hadoop.hive.ql.exec.description; +import org.apache.hadoop.hive.ql.exec.Description; import org.apache.hadoop.hive.ql.metadata.HiveException; import org.apache.hadoop.hive.ql.udf.UDFType; import org.apache.hadoop.hive.ql.udf.generic.GenericUDF; @@ -51,7 +51,7 @@ * Use hive's ADD JAR feature to add your JDBC Driver to the distributed cache, * otherwise GenericUDFDBoutput will fail. */ -@description(name = "dboutput", value = "_FUNC_(jdbcstring,username,password,preparedstatement,[arguments]) - sends data to a jdbc driver", extended = "argument 0 is the JDBC connection string\n" +@Description(name = "dboutput", value = "_FUNC_(jdbcstring,username,password,preparedstatement,[arguments]) - sends data to a jdbc driver", extended = "argument 0 is the JDBC connection string\n" + "argument 1 is the user name\n" + "argument 2 is the password\n" + "argument 3 is an SQL query to be used in the PreparedStatement\n" Index: ql/src/test/org/apache/hadoop/hive/scripts/extracturl.java =================================================================== --- ql/src/test/org/apache/hadoop/hive/scripts/extracturl.java (revision 901960) +++ ql/src/test/org/apache/hadoop/hive/scripts/extracturl.java (working copy) @@ -1,50 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.hive.scripts; - -import java.io.BufferedReader; -import java.io.InputStreamReader; -import java.util.regex.Matcher; -import java.util.regex.Pattern; - -public class extracturl { - - protected static final Pattern pattern = Pattern.compile( - "link", - Pattern.CASE_INSENSITIVE); - static InputStreamReader converter = new InputStreamReader(System.in); - static BufferedReader in = new BufferedReader(converter); - - public static void main(String[] args) { - String input; - try { - while ((input = in.readLine()) != null) { - Matcher m = pattern.matcher(input); - - while (m.find()) { - String url = input.substring(m.start(1), m.end(1)); - System.out.println(url + "\t" + "1"); - } - } - } catch (Exception e) { - e.printStackTrace(); - System.exit(1); - } - } -} Index: ql/src/test/org/apache/hadoop/hive/ql/exec/TestExecDriver.java =================================================================== --- ql/src/test/org/apache/hadoop/hive/ql/exec/TestExecDriver.java (revision 901960) +++ ql/src/test/org/apache/hadoop/hive/ql/exec/TestExecDriver.java (working copy) @@ -37,18 +37,18 @@ import org.apache.hadoop.hive.ql.metadata.Hive; import org.apache.hadoop.hive.ql.metadata.Table; import org.apache.hadoop.hive.ql.plan.PlanUtils; -import org.apache.hadoop.hive.ql.plan.exprNodeColumnDesc; -import org.apache.hadoop.hive.ql.plan.exprNodeConstantDesc; -import org.apache.hadoop.hive.ql.plan.exprNodeDesc; -import org.apache.hadoop.hive.ql.plan.exprNodeFieldDesc; -import org.apache.hadoop.hive.ql.plan.exprNodeGenericFuncDesc; -import org.apache.hadoop.hive.ql.plan.extractDesc; -import org.apache.hadoop.hive.ql.plan.fileSinkDesc; -import org.apache.hadoop.hive.ql.plan.filterDesc; -import org.apache.hadoop.hive.ql.plan.mapredWork; -import org.apache.hadoop.hive.ql.plan.reduceSinkDesc; -import org.apache.hadoop.hive.ql.plan.scriptDesc; -import org.apache.hadoop.hive.ql.plan.selectDesc; +import org.apache.hadoop.hive.ql.plan.ExprNodeColumnDesc; +import org.apache.hadoop.hive.ql.plan.ExprNodeConstantDesc; +import org.apache.hadoop.hive.ql.plan.ExprNodeDesc; +import org.apache.hadoop.hive.ql.plan.ExprNodeFieldDesc; +import org.apache.hadoop.hive.ql.plan.ExprNodeGenericFuncDesc; +import org.apache.hadoop.hive.ql.plan.ExtractDesc; +import org.apache.hadoop.hive.ql.plan.FileSinkDesc; +import org.apache.hadoop.hive.ql.plan.FilterDesc; +import org.apache.hadoop.hive.ql.plan.MapredWork; +import org.apache.hadoop.hive.ql.plan.ReduceSinkDesc; +import org.apache.hadoop.hive.ql.plan.ScriptDesc; +import org.apache.hadoop.hive.ql.plan.SelectDesc; import org.apache.hadoop.hive.serde.Constants; import org.apache.hadoop.hive.serde2.typeinfo.TypeInfoFactory; import org.apache.hadoop.mapred.TextInputFormat; @@ -127,7 +127,7 @@ } } - mapredWork mr; + MapredWork mr; protected void setUp() { mr = PlanUtils.getMapRedWork(); @@ -157,39 +157,39 @@ } } - private filterDesc getTestFilterDesc(String column) { - ArrayList children1 = new ArrayList(); - children1.add(new exprNodeColumnDesc(TypeInfoFactory.stringTypeInfo, + private FilterDesc getTestFilterDesc(String column) { + ArrayList children1 = new ArrayList(); + children1.add(new ExprNodeColumnDesc(TypeInfoFactory.stringTypeInfo, column, "", false)); - exprNodeDesc lhs = new exprNodeGenericFuncDesc( + ExprNodeDesc lhs = new ExprNodeGenericFuncDesc( TypeInfoFactory.doubleTypeInfo, FunctionRegistry.getFunctionInfo( Constants.DOUBLE_TYPE_NAME).getGenericUDF(), children1); - ArrayList children2 = new ArrayList(); - children2.add(new exprNodeConstantDesc(TypeInfoFactory.longTypeInfo, Long + ArrayList children2 = new ArrayList(); + children2.add(new ExprNodeConstantDesc(TypeInfoFactory.longTypeInfo, Long .valueOf(100))); - exprNodeDesc rhs = new exprNodeGenericFuncDesc( + ExprNodeDesc rhs = new ExprNodeGenericFuncDesc( TypeInfoFactory.doubleTypeInfo, FunctionRegistry.getFunctionInfo( Constants.DOUBLE_TYPE_NAME).getGenericUDF(), children2); - ArrayList children3 = new ArrayList(); + ArrayList children3 = new ArrayList(); children3.add(lhs); children3.add(rhs); - exprNodeDesc desc = new exprNodeGenericFuncDesc( + ExprNodeDesc desc = new ExprNodeGenericFuncDesc( TypeInfoFactory.booleanTypeInfo, FunctionRegistry.getFunctionInfo("<") .getGenericUDF(), children3); - return new filterDesc(desc, false); + return new FilterDesc(desc, false); } @SuppressWarnings("unchecked") private void populateMapPlan1(Table src) { mr.setNumReduceTasks(Integer.valueOf(0)); - Operator op2 = OperatorFactory.get(new fileSinkDesc(tmpdir + Operator op2 = OperatorFactory.get(new FileSinkDesc(tmpdir + "mapplan1.out", Utilities.defaultTd, true)); - Operator op1 = OperatorFactory.get(getTestFilterDesc("key"), + Operator op1 = OperatorFactory.get(getTestFilterDesc("key"), op2); Utilities.addMapWork(mr, src, "a", op1); @@ -199,15 +199,15 @@ private void populateMapPlan2(Table src) { mr.setNumReduceTasks(Integer.valueOf(0)); - Operator op3 = OperatorFactory.get(new fileSinkDesc(tmpdir + Operator op3 = OperatorFactory.get(new FileSinkDesc(tmpdir + "mapplan2.out", Utilities.defaultTd, false)); - Operator op2 = OperatorFactory.get(new scriptDesc("/bin/cat", + Operator op2 = OperatorFactory.get(new ScriptDesc("/bin/cat", PlanUtils.getDefaultTableDesc("" + Utilities.tabCode, "key,value"), TextRecordWriter.class, PlanUtils.getDefaultTableDesc("" + Utilities.tabCode, "key,value"), TextRecordReader.class), op3); - Operator op1 = OperatorFactory.get(getTestFilterDesc("key"), + Operator op1 = OperatorFactory.get(getTestFilterDesc("key"), op2); Utilities.addMapWork(mr, src, "a", op1); @@ -222,7 +222,7 @@ outputColumns.add("_col" + i); } // map-side work - Operator op1 = OperatorFactory.get(PlanUtils + Operator op1 = OperatorFactory.get(PlanUtils .getReduceSinkDesc(Utilities.makeList(getStringColumn("key")), Utilities.makeList(getStringColumn("value")), outputColumns, true, -1, 1, -1)); @@ -232,10 +232,10 @@ mr.getTagToValueDesc().add(op1.getConf().getValueSerializeInfo()); // reduce side work - Operator op3 = OperatorFactory.get(new fileSinkDesc(tmpdir + Operator op3 = OperatorFactory.get(new FileSinkDesc(tmpdir + "mapredplan1.out", Utilities.defaultTd, false)); - Operator op2 = OperatorFactory.get(new extractDesc( + Operator op2 = OperatorFactory.get(new ExtractDesc( getStringColumn(Utilities.ReduceField.VALUE.toString())), op3); mr.setReducer(op2); @@ -249,7 +249,7 @@ outputColumns.add("_col" + i); } // map-side work - Operator op1 = OperatorFactory.get(PlanUtils + Operator op1 = OperatorFactory.get(PlanUtils .getReduceSinkDesc(Utilities.makeList(getStringColumn("key")), Utilities .makeList(getStringColumn("key"), getStringColumn("value")), @@ -260,12 +260,12 @@ mr.getTagToValueDesc().add(op1.getConf().getValueSerializeInfo()); // reduce side work - Operator op4 = OperatorFactory.get(new fileSinkDesc(tmpdir + Operator op4 = OperatorFactory.get(new FileSinkDesc(tmpdir + "mapredplan2.out", Utilities.defaultTd, false)); - Operator op3 = OperatorFactory.get(getTestFilterDesc("0"), op4); + Operator op3 = OperatorFactory.get(getTestFilterDesc("0"), op4); - Operator op2 = OperatorFactory.get(new extractDesc( + Operator op2 = OperatorFactory.get(new ExtractDesc( getStringColumn(Utilities.ReduceField.VALUE.toString())), op3); mr.setReducer(op2); @@ -283,7 +283,7 @@ outputColumns.add("_col" + i); } // map-side work - Operator op1 = OperatorFactory.get(PlanUtils + Operator op1 = OperatorFactory.get(PlanUtils .getReduceSinkDesc(Utilities.makeList(getStringColumn("key")), Utilities.makeList(getStringColumn("value")), outputColumns, true, Byte.valueOf((byte) 0), 1, -1)); @@ -292,7 +292,7 @@ mr.setKeyDesc(op1.getConf().getKeySerializeInfo()); mr.getTagToValueDesc().add(op1.getConf().getValueSerializeInfo()); - Operator op2 = OperatorFactory.get(PlanUtils + Operator op2 = OperatorFactory.get(PlanUtils .getReduceSinkDesc(Utilities.makeList(getStringColumn("key")), Utilities.makeList(getStringColumn("key")), outputColumns, true, Byte.valueOf((byte) 1), Integer.MAX_VALUE, -1)); @@ -301,13 +301,13 @@ mr.getTagToValueDesc().add(op2.getConf().getValueSerializeInfo()); // reduce side work - Operator op4 = OperatorFactory.get(new fileSinkDesc(tmpdir + Operator op4 = OperatorFactory.get(new FileSinkDesc(tmpdir + "mapredplan3.out", Utilities.defaultTd, false)); - Operator op5 = OperatorFactory.get(new selectDesc(Utilities + Operator op5 = OperatorFactory.get(new SelectDesc(Utilities .makeList(getStringColumn(Utilities.ReduceField.ALIAS.toString()), - new exprNodeFieldDesc(TypeInfoFactory.stringTypeInfo, - new exprNodeColumnDesc(TypeInfoFactory + new ExprNodeFieldDesc(TypeInfoFactory.stringTypeInfo, + new ExprNodeColumnDesc(TypeInfoFactory .getListTypeInfo(TypeInfoFactory.stringTypeInfo), Utilities.ReduceField.VALUE.toString(), "", false), "0", false)), outputColumns), op4); @@ -324,17 +324,17 @@ for (int i = 0; i < 2; i++) { outputColumns.add("_col" + i); } - Operator op1 = OperatorFactory.get(PlanUtils + Operator op1 = OperatorFactory.get(PlanUtils .getReduceSinkDesc(Utilities.makeList(getStringColumn("tkey")), Utilities.makeList(getStringColumn("tkey"), getStringColumn("tvalue")), outputColumns, false, -1, 1, -1)); - Operator op0 = OperatorFactory.get(new scriptDesc("/bin/cat", + Operator op0 = OperatorFactory.get(new ScriptDesc("/bin/cat", PlanUtils.getDefaultTableDesc("" + Utilities.tabCode, "key,value"), TextRecordWriter.class, PlanUtils.getDefaultTableDesc("" + Utilities.tabCode, "tkey,tvalue"), TextRecordReader.class), op1); - Operator op4 = OperatorFactory.get(new selectDesc(Utilities + Operator op4 = OperatorFactory.get(new SelectDesc(Utilities .makeList(getStringColumn("key"), getStringColumn("value")), outputColumns), op0); @@ -343,17 +343,17 @@ mr.getTagToValueDesc().add(op1.getConf().getValueSerializeInfo()); // reduce side work - Operator op3 = OperatorFactory.get(new fileSinkDesc(tmpdir + Operator op3 = OperatorFactory.get(new FileSinkDesc(tmpdir + "mapredplan4.out", Utilities.defaultTd, false)); - Operator op2 = OperatorFactory.get(new extractDesc( + Operator op2 = OperatorFactory.get(new ExtractDesc( getStringColumn(Utilities.ReduceField.VALUE.toString())), op3); mr.setReducer(op2); } - public static exprNodeColumnDesc getStringColumn(String columnName) { - return new exprNodeColumnDesc(TypeInfoFactory.stringTypeInfo, columnName, + public static ExprNodeColumnDesc getStringColumn(String columnName) { + return new ExprNodeColumnDesc(TypeInfoFactory.stringTypeInfo, columnName, "", false); } @@ -366,12 +366,12 @@ for (int i = 0; i < 2; i++) { outputColumns.add("_col" + i); } - Operator op0 = OperatorFactory.get(PlanUtils + Operator op0 = OperatorFactory.get(PlanUtils .getReduceSinkDesc(Utilities.makeList(getStringColumn("0")), Utilities .makeList(getStringColumn("0"), getStringColumn("1")), outputColumns, false, -1, 1, -1)); - Operator op4 = OperatorFactory.get(new selectDesc(Utilities + Operator op4 = OperatorFactory.get(new SelectDesc(Utilities .makeList(getStringColumn("key"), getStringColumn("value")), outputColumns), op0); @@ -380,10 +380,10 @@ mr.getTagToValueDesc().add(op0.getConf().getValueSerializeInfo()); // reduce side work - Operator op3 = OperatorFactory.get(new fileSinkDesc(tmpdir + Operator op3 = OperatorFactory.get(new FileSinkDesc(tmpdir + "mapredplan5.out", Utilities.defaultTd, false)); - Operator op2 = OperatorFactory.get(new extractDesc( + Operator op2 = OperatorFactory.get(new ExtractDesc( getStringColumn(Utilities.ReduceField.VALUE.toString())), op3); mr.setReducer(op2); @@ -398,18 +398,18 @@ for (int i = 0; i < 2; i++) { outputColumns.add("_col" + i); } - Operator op1 = OperatorFactory.get(PlanUtils + Operator op1 = OperatorFactory.get(PlanUtils .getReduceSinkDesc(Utilities.makeList(getStringColumn("tkey")), Utilities.makeList(getStringColumn("tkey"), getStringColumn("tvalue")), outputColumns, false, -1, 1, -1)); - Operator op0 = OperatorFactory.get(new scriptDesc( + Operator op0 = OperatorFactory.get(new ScriptDesc( "\'/bin/cat\'", PlanUtils.getDefaultTableDesc("" + Utilities.tabCode, "tkey,tvalue"), TextRecordWriter.class, PlanUtils .getDefaultTableDesc("" + Utilities.tabCode, "tkey,tvalue"), TextRecordReader.class), op1); - Operator op4 = OperatorFactory.get(new selectDesc(Utilities + Operator op4 = OperatorFactory.get(new SelectDesc(Utilities .makeList(getStringColumn("key"), getStringColumn("value")), outputColumns), op0); @@ -418,12 +418,12 @@ mr.getTagToValueDesc().add(op1.getConf().getValueSerializeInfo()); // reduce side work - Operator op3 = OperatorFactory.get(new fileSinkDesc(tmpdir + Operator op3 = OperatorFactory.get(new FileSinkDesc(tmpdir + "mapredplan6.out", Utilities.defaultTd, false)); - Operator op2 = OperatorFactory.get(getTestFilterDesc("0"), op3); + Operator op2 = OperatorFactory.get(getTestFilterDesc("0"), op3); - Operator op5 = OperatorFactory.get(new extractDesc( + Operator op5 = OperatorFactory.get(new ExtractDesc( getStringColumn(Utilities.ReduceField.VALUE.toString())), op2); mr.setReducer(op5); Index: ql/src/test/org/apache/hadoop/hive/ql/exec/TestExpressionEvaluator.java =================================================================== --- ql/src/test/org/apache/hadoop/hive/ql/exec/TestExpressionEvaluator.java (revision 901960) +++ ql/src/test/org/apache/hadoop/hive/ql/exec/TestExpressionEvaluator.java (working copy) @@ -24,10 +24,10 @@ import org.apache.hadoop.hive.ql.metadata.HiveException; import org.apache.hadoop.hive.ql.parse.TypeCheckProcFactory; -import org.apache.hadoop.hive.ql.plan.exprNodeColumnDesc; -import org.apache.hadoop.hive.ql.plan.exprNodeConstantDesc; -import org.apache.hadoop.hive.ql.plan.exprNodeDesc; -import org.apache.hadoop.hive.ql.plan.exprNodeGenericFuncDesc; +import org.apache.hadoop.hive.ql.plan.ExprNodeColumnDesc; +import org.apache.hadoop.hive.ql.plan.ExprNodeConstantDesc; +import org.apache.hadoop.hive.ql.plan.ExprNodeDesc; +import org.apache.hadoop.hive.ql.plan.ExprNodeGenericFuncDesc; import org.apache.hadoop.hive.serde.Constants; import org.apache.hadoop.hive.serde2.objectinspector.InspectableObject; import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspector; @@ -94,7 +94,7 @@ public void testExprNodeColumnEvaluator() throws Throwable { try { // get a evaluator for a simple field expression - exprNodeDesc exprDesc = new exprNodeColumnDesc(colaType, "cola", "", + ExprNodeDesc exprDesc = new ExprNodeColumnDesc(colaType, "cola", "", false); ExprNodeEvaluator eval = ExprNodeEvaluatorFactory.get(exprDesc); @@ -112,16 +112,16 @@ } } - private static exprNodeDesc getListIndexNode(exprNodeDesc node, int index) { - return getListIndexNode(node, new exprNodeConstantDesc(index)); + private static ExprNodeDesc getListIndexNode(ExprNodeDesc node, int index) { + return getListIndexNode(node, new ExprNodeConstantDesc(index)); } - private static exprNodeDesc getListIndexNode(exprNodeDesc node, - exprNodeDesc index) { - ArrayList children = new ArrayList(2); + private static ExprNodeDesc getListIndexNode(ExprNodeDesc node, + ExprNodeDesc index) { + ArrayList children = new ArrayList(2); children.add(node); children.add(index); - return new exprNodeGenericFuncDesc(((ListTypeInfo) node.getTypeInfo()) + return new ExprNodeGenericFuncDesc(((ListTypeInfo) node.getTypeInfo()) .getListElementTypeInfo(), FunctionRegistry.getGenericUDFForIndex(), children); } @@ -129,13 +129,13 @@ public void testExprNodeFuncEvaluator() throws Throwable { try { // get a evaluator for a string concatenation expression - exprNodeDesc col1desc = new exprNodeColumnDesc(col1Type, "col1", "", + ExprNodeDesc col1desc = new ExprNodeColumnDesc(col1Type, "col1", "", false); - exprNodeDesc coladesc = new exprNodeColumnDesc(colaType, "cola", "", + ExprNodeDesc coladesc = new ExprNodeColumnDesc(colaType, "cola", "", false); - exprNodeDesc col11desc = getListIndexNode(col1desc, 1); - exprNodeDesc cola0desc = getListIndexNode(coladesc, 0); - exprNodeDesc func1 = TypeCheckProcFactory.DefaultExprProcessor + ExprNodeDesc col11desc = getListIndexNode(col1desc, 1); + ExprNodeDesc cola0desc = getListIndexNode(coladesc, 0); + ExprNodeDesc func1 = TypeCheckProcFactory.DefaultExprProcessor .getFuncExprNodeDesc("concat", col11desc, cola0desc); ExprNodeEvaluator eval = ExprNodeEvaluatorFactory.get(func1); @@ -154,10 +154,10 @@ public void testExprNodeConversionEvaluator() throws Throwable { try { // get a evaluator for a string concatenation expression - exprNodeDesc col1desc = new exprNodeColumnDesc(col1Type, "col1", "", + ExprNodeDesc col1desc = new ExprNodeColumnDesc(col1Type, "col1", "", false); - exprNodeDesc col11desc = getListIndexNode(col1desc, 1); - exprNodeDesc func1 = TypeCheckProcFactory.DefaultExprProcessor + ExprNodeDesc col11desc = getListIndexNode(col1desc, 1); + ExprNodeDesc func1 = TypeCheckProcFactory.DefaultExprProcessor .getFuncExprNodeDesc(Constants.DOUBLE_TYPE_NAME, col11desc); ExprNodeEvaluator eval = ExprNodeEvaluatorFactory.get(func1); @@ -199,15 +199,15 @@ int basetimes = 100000; measureSpeed("1 + 2", basetimes * 100, ExprNodeEvaluatorFactory .get(TypeCheckProcFactory.DefaultExprProcessor.getFuncExprNodeDesc( - "+", new exprNodeConstantDesc(1), new exprNodeConstantDesc(2))), + "+", new ExprNodeConstantDesc(1), new ExprNodeConstantDesc(2))), r, Integer.valueOf(1 + 2)); measureSpeed("1 + 2 - 3", basetimes * 100, ExprNodeEvaluatorFactory .get(TypeCheckProcFactory.DefaultExprProcessor .getFuncExprNodeDesc("-", TypeCheckProcFactory.DefaultExprProcessor - .getFuncExprNodeDesc("+", new exprNodeConstantDesc(1), - new exprNodeConstantDesc(2)), - new exprNodeConstantDesc(3))), r, Integer.valueOf(1 + 2 - 3)); + .getFuncExprNodeDesc("+", new ExprNodeConstantDesc(1), + new ExprNodeConstantDesc(2)), + new ExprNodeConstantDesc(3))), r, Integer.valueOf(1 + 2 - 3)); measureSpeed("1 + 2 - 3 + 4", basetimes * 100, ExprNodeEvaluatorFactory .get(TypeCheckProcFactory.DefaultExprProcessor .getFuncExprNodeDesc("+", @@ -215,25 +215,25 @@ .getFuncExprNodeDesc("-", TypeCheckProcFactory.DefaultExprProcessor .getFuncExprNodeDesc("+", - new exprNodeConstantDesc(1), - new exprNodeConstantDesc(2)), - new exprNodeConstantDesc(3)), - new exprNodeConstantDesc(4))), r, Integer + new ExprNodeConstantDesc(1), + new ExprNodeConstantDesc(2)), + new ExprNodeConstantDesc(3)), + new ExprNodeConstantDesc(4))), r, Integer .valueOf(1 + 2 - 3 + 4)); measureSpeed("concat(\"1\", \"2\")", basetimes * 100, ExprNodeEvaluatorFactory .get(TypeCheckProcFactory.DefaultExprProcessor - .getFuncExprNodeDesc("concat", new exprNodeConstantDesc("1"), - new exprNodeConstantDesc("2"))), r, "12"); + .getFuncExprNodeDesc("concat", new ExprNodeConstantDesc("1"), + new ExprNodeConstantDesc("2"))), r, "12"); measureSpeed("concat(concat(\"1\", \"2\"), \"3\")", basetimes * 100, ExprNodeEvaluatorFactory .get(TypeCheckProcFactory.DefaultExprProcessor .getFuncExprNodeDesc("concat", TypeCheckProcFactory.DefaultExprProcessor .getFuncExprNodeDesc("concat", - new exprNodeConstantDesc("1"), - new exprNodeConstantDesc("2")), - new exprNodeConstantDesc("3"))), r, "123"); + new ExprNodeConstantDesc("1"), + new ExprNodeConstantDesc("2")), + new ExprNodeConstantDesc("3"))), r, "123"); measureSpeed("concat(concat(concat(\"1\", \"2\"), \"3\"), \"4\")", basetimes * 100, ExprNodeEvaluatorFactory .get(TypeCheckProcFactory.DefaultExprProcessor @@ -242,18 +242,18 @@ .getFuncExprNodeDesc("concat", TypeCheckProcFactory.DefaultExprProcessor .getFuncExprNodeDesc("concat", - new exprNodeConstantDesc("1"), - new exprNodeConstantDesc("2")), - new exprNodeConstantDesc("3")), - new exprNodeConstantDesc("4"))), r, "1234"); - exprNodeDesc constant1 = new exprNodeConstantDesc(1); - exprNodeDesc constant2 = new exprNodeConstantDesc(2); + new ExprNodeConstantDesc("1"), + new ExprNodeConstantDesc("2")), + new ExprNodeConstantDesc("3")), + new ExprNodeConstantDesc("4"))), r, "1234"); + ExprNodeDesc constant1 = new ExprNodeConstantDesc(1); + ExprNodeDesc constant2 = new ExprNodeConstantDesc(2); measureSpeed("concat(col1[1], cola[1])", basetimes * 10, ExprNodeEvaluatorFactory .get(TypeCheckProcFactory.DefaultExprProcessor .getFuncExprNodeDesc("concat", getListIndexNode( - new exprNodeColumnDesc(col1Type, "col1", "", false), - constant1), getListIndexNode(new exprNodeColumnDesc( + new ExprNodeColumnDesc(col1Type, "col1", "", false), + constant1), getListIndexNode(new ExprNodeColumnDesc( colaType, "cola", "", false), constant1))), r, "1b"); measureSpeed("concat(concat(col1[1], cola[1]), col1[2])", basetimes * 10, ExprNodeEvaluatorFactory @@ -261,11 +261,11 @@ .getFuncExprNodeDesc("concat", TypeCheckProcFactory.DefaultExprProcessor .getFuncExprNodeDesc("concat", getListIndexNode( - new exprNodeColumnDesc(col1Type, "col1", "", + new ExprNodeColumnDesc(col1Type, "col1", "", false), constant1), getListIndexNode( - new exprNodeColumnDesc(colaType, "cola", "", + new ExprNodeColumnDesc(colaType, "cola", "", false), constant1)), getListIndexNode( - new exprNodeColumnDesc(col1Type, "col1", "", false), + new ExprNodeColumnDesc(col1Type, "col1", "", false), constant2))), r, "1b2"); measureSpeed( "concat(concat(concat(col1[1], cola[1]), col1[2]), cola[2])", @@ -276,14 +276,14 @@ .getFuncExprNodeDesc("concat", TypeCheckProcFactory.DefaultExprProcessor .getFuncExprNodeDesc("concat", - getListIndexNode(new exprNodeColumnDesc( + getListIndexNode(new ExprNodeColumnDesc( col1Type, "col1", "", false), constant1), getListIndexNode( - new exprNodeColumnDesc(colaType, + new ExprNodeColumnDesc(colaType, "cola", "", false), constant1)), - getListIndexNode(new exprNodeColumnDesc(col1Type, + getListIndexNode(new ExprNodeColumnDesc(col1Type, "col1", "", false), constant2)), - getListIndexNode(new exprNodeColumnDesc(colaType, "cola", + getListIndexNode(new ExprNodeColumnDesc(colaType, "cola", "", false), constant2))), r, "1b2c"); } catch (Throwable e) { Index: ql/src/test/org/apache/hadoop/hive/ql/exec/TestPlan.java =================================================================== --- ql/src/test/org/apache/hadoop/hive/ql/exec/TestPlan.java (revision 901960) +++ ql/src/test/org/apache/hadoop/hive/ql/exec/TestPlan.java (working copy) @@ -26,12 +26,12 @@ import junit.framework.TestCase; import org.apache.hadoop.hive.ql.parse.TypeCheckProcFactory; -import org.apache.hadoop.hive.ql.plan.exprNodeColumnDesc; -import org.apache.hadoop.hive.ql.plan.exprNodeDesc; -import org.apache.hadoop.hive.ql.plan.filterDesc; -import org.apache.hadoop.hive.ql.plan.mapredWork; -import org.apache.hadoop.hive.ql.plan.partitionDesc; -import org.apache.hadoop.hive.ql.plan.tableDesc; +import org.apache.hadoop.hive.ql.plan.ExprNodeColumnDesc; +import org.apache.hadoop.hive.ql.plan.ExprNodeDesc; +import org.apache.hadoop.hive.ql.plan.FilterDesc; +import org.apache.hadoop.hive.ql.plan.MapredWork; +import org.apache.hadoop.hive.ql.plan.PartitionDesc; +import org.apache.hadoop.hive.ql.plan.TableDesc; import org.apache.hadoop.hive.serde2.typeinfo.TypeInfoFactory; import org.apache.hadoop.mapred.JobConf; @@ -44,15 +44,15 @@ try { // initialize a complete map reduce configuration - exprNodeDesc expr1 = new exprNodeColumnDesc( + ExprNodeDesc expr1 = new ExprNodeColumnDesc( TypeInfoFactory.stringTypeInfo, F1, "", false); - exprNodeDesc expr2 = new exprNodeColumnDesc( + ExprNodeDesc expr2 = new ExprNodeColumnDesc( TypeInfoFactory.stringTypeInfo, F2, "", false); - exprNodeDesc filterExpr = TypeCheckProcFactory.DefaultExprProcessor + ExprNodeDesc filterExpr = TypeCheckProcFactory.DefaultExprProcessor .getFuncExprNodeDesc("==", expr1, expr2); - filterDesc filterCtx = new filterDesc(filterExpr, false); - Operator op = OperatorFactory.get(filterDesc.class); + FilterDesc filterCtx = new FilterDesc(filterExpr, false); + Operator op = OperatorFactory.get(FilterDesc.class); op.setConf(filterCtx); ArrayList aliasList = new ArrayList(); @@ -60,15 +60,15 @@ LinkedHashMap> pa = new LinkedHashMap>(); pa.put("/tmp/testfolder", aliasList); - tableDesc tblDesc = Utilities.defaultTd; - partitionDesc partDesc = new partitionDesc(tblDesc, null); - LinkedHashMap pt = new LinkedHashMap(); + TableDesc tblDesc = Utilities.defaultTd; + PartitionDesc partDesc = new PartitionDesc(tblDesc, null); + LinkedHashMap pt = new LinkedHashMap(); pt.put("/tmp/testfolder", partDesc); LinkedHashMap> ao = new LinkedHashMap>(); ao.put("a", op); - mapredWork mrwork = new mapredWork(); + MapredWork mrwork = new MapredWork(); mrwork.setPathToAliases(pa); mrwork.setPathToPartitionInfo(pt); mrwork.setAliasToWork(ao); @@ -83,7 +83,7 @@ JobConf job = new JobConf(TestPlan.class); job.set("fs.default.name", "file:///"); Utilities.setMapRedWork(job, mrwork); - mapredWork mrwork2 = Utilities.getMapRedWork(job); + MapredWork mrwork2 = Utilities.getMapRedWork(job); Utilities.clearMapRedWork(job); // over here we should have some checks of the deserialized object against Index: ql/src/test/org/apache/hadoop/hive/ql/exec/TestOperators.java =================================================================== --- ql/src/test/org/apache/hadoop/hive/ql/exec/TestOperators.java (revision 901960) +++ ql/src/test/org/apache/hadoop/hive/ql/exec/TestOperators.java (working copy) @@ -29,15 +29,15 @@ import org.apache.hadoop.hive.conf.HiveConf; import org.apache.hadoop.hive.ql.parse.TypeCheckProcFactory; import org.apache.hadoop.hive.ql.plan.PlanUtils; -import org.apache.hadoop.hive.ql.plan.collectDesc; -import org.apache.hadoop.hive.ql.plan.exprNodeConstantDesc; -import org.apache.hadoop.hive.ql.plan.exprNodeDesc; -import org.apache.hadoop.hive.ql.plan.filterDesc; -import org.apache.hadoop.hive.ql.plan.mapredWork; -import org.apache.hadoop.hive.ql.plan.partitionDesc; -import org.apache.hadoop.hive.ql.plan.scriptDesc; -import org.apache.hadoop.hive.ql.plan.selectDesc; -import org.apache.hadoop.hive.ql.plan.tableDesc; +import org.apache.hadoop.hive.ql.plan.CollectDesc; +import org.apache.hadoop.hive.ql.plan.ExprNodeConstantDesc; +import org.apache.hadoop.hive.ql.plan.ExprNodeDesc; +import org.apache.hadoop.hive.ql.plan.FilterDesc; +import org.apache.hadoop.hive.ql.plan.MapredWork; +import org.apache.hadoop.hive.ql.plan.PartitionDesc; +import org.apache.hadoop.hive.ql.plan.ScriptDesc; +import org.apache.hadoop.hive.ql.plan.SelectDesc; +import org.apache.hadoop.hive.ql.plan.TableDesc; import org.apache.hadoop.hive.serde2.objectinspector.InspectableObject; import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspector; import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspectorFactory; @@ -87,21 +87,21 @@ public void testBaseFilterOperator() throws Throwable { try { System.out.println("Testing Filter Operator"); - exprNodeDesc col0 = TestExecDriver.getStringColumn("col0"); - exprNodeDesc col1 = TestExecDriver.getStringColumn("col1"); - exprNodeDesc col2 = TestExecDriver.getStringColumn("col2"); - exprNodeDesc zero = new exprNodeConstantDesc("0"); - exprNodeDesc func1 = TypeCheckProcFactory.DefaultExprProcessor + ExprNodeDesc col0 = TestExecDriver.getStringColumn("col0"); + ExprNodeDesc col1 = TestExecDriver.getStringColumn("col1"); + ExprNodeDesc col2 = TestExecDriver.getStringColumn("col2"); + ExprNodeDesc zero = new ExprNodeConstantDesc("0"); + ExprNodeDesc func1 = TypeCheckProcFactory.DefaultExprProcessor .getFuncExprNodeDesc(">", col2, col1); - exprNodeDesc func2 = TypeCheckProcFactory.DefaultExprProcessor + ExprNodeDesc func2 = TypeCheckProcFactory.DefaultExprProcessor .getFuncExprNodeDesc("==", col0, zero); - exprNodeDesc func3 = TypeCheckProcFactory.DefaultExprProcessor + ExprNodeDesc func3 = TypeCheckProcFactory.DefaultExprProcessor .getFuncExprNodeDesc("and", func1, func2); assert (func3 != null); - filterDesc filterCtx = new filterDesc(func3, false); + FilterDesc filterCtx = new FilterDesc(func3, false); // Configuration - Operator op = OperatorFactory.get(filterDesc.class); + Operator op = OperatorFactory.get(FilterDesc.class); op.setConf(filterCtx); // runtime initialization @@ -137,32 +137,32 @@ try { System.out.println("Testing FileSink Operator"); // col1 - exprNodeDesc exprDesc1 = TestExecDriver.getStringColumn("col1"); + ExprNodeDesc exprDesc1 = TestExecDriver.getStringColumn("col1"); // col2 - exprNodeDesc expr1 = TestExecDriver.getStringColumn("col0"); - exprNodeDesc expr2 = new exprNodeConstantDesc("1"); - exprNodeDesc exprDesc2 = TypeCheckProcFactory.DefaultExprProcessor + ExprNodeDesc expr1 = TestExecDriver.getStringColumn("col0"); + ExprNodeDesc expr2 = new ExprNodeConstantDesc("1"); + ExprNodeDesc exprDesc2 = TypeCheckProcFactory.DefaultExprProcessor .getFuncExprNodeDesc("concat", expr1, expr2); // select operator to project these two columns - ArrayList earr = new ArrayList(); + ArrayList earr = new ArrayList(); earr.add(exprDesc1); earr.add(exprDesc2); ArrayList outputCols = new ArrayList(); for (int i = 0; i < earr.size(); i++) { outputCols.add("_col" + i); } - selectDesc selectCtx = new selectDesc(earr, outputCols); - Operator op = OperatorFactory.get(selectDesc.class); + SelectDesc selectCtx = new SelectDesc(earr, outputCols); + Operator op = OperatorFactory.get(SelectDesc.class); op.setConf(selectCtx); // fileSinkOperator to dump the output of the select - // fileSinkDesc fsd = new fileSinkDesc ("file:///tmp" + File.separator + + // FileSinkDesc fsd = new FileSinkDesc ("file:///tmp" + File.separator + // System.getProperty("user.name") + File.separator + // "TestFileSinkOperator", // Utilities.defaultTd, false); - // Operator flop = OperatorFactory.getAndMakeChild(fsd, op); + // Operator flop = OperatorFactory.getAndMakeChild(fsd, op); op.initialize(new JobConf(TestOperators.class), new ObjectInspector[] { r[0].oi }); @@ -185,37 +185,37 @@ try { System.out.println("Testing Script Operator"); // col1 - exprNodeDesc exprDesc1 = TestExecDriver.getStringColumn("col1"); + ExprNodeDesc exprDesc1 = TestExecDriver.getStringColumn("col1"); // col2 - exprNodeDesc expr1 = TestExecDriver.getStringColumn("col0"); - exprNodeDesc expr2 = new exprNodeConstantDesc("1"); - exprNodeDesc exprDesc2 = TypeCheckProcFactory.DefaultExprProcessor + ExprNodeDesc expr1 = TestExecDriver.getStringColumn("col0"); + ExprNodeDesc expr2 = new ExprNodeConstantDesc("1"); + ExprNodeDesc exprDesc2 = TypeCheckProcFactory.DefaultExprProcessor .getFuncExprNodeDesc("concat", expr1, expr2); // select operator to project these two columns - ArrayList earr = new ArrayList(); + ArrayList earr = new ArrayList(); earr.add(exprDesc1); earr.add(exprDesc2); ArrayList outputCols = new ArrayList(); for (int i = 0; i < earr.size(); i++) { outputCols.add("_col" + i); } - selectDesc selectCtx = new selectDesc(earr, outputCols); - Operator op = OperatorFactory.get(selectDesc.class); + SelectDesc selectCtx = new SelectDesc(earr, outputCols); + Operator op = OperatorFactory.get(SelectDesc.class); op.setConf(selectCtx); // scriptOperator to echo the output of the select - tableDesc scriptOutput = PlanUtils.getDefaultTableDesc("" + TableDesc scriptOutput = PlanUtils.getDefaultTableDesc("" + Utilities.tabCode, "a,b"); - tableDesc scriptInput = PlanUtils.getDefaultTableDesc("" + TableDesc scriptInput = PlanUtils.getDefaultTableDesc("" + Utilities.tabCode, "a,b"); - scriptDesc sd = new scriptDesc("cat", scriptOutput, + ScriptDesc sd = new ScriptDesc("cat", scriptOutput, TextRecordWriter.class, scriptInput, TextRecordReader.class); - Operator sop = OperatorFactory.getAndMakeChild(sd, op); + Operator sop = OperatorFactory.getAndMakeChild(sd, op); // Collect operator to observe the output of the script - collectDesc cd = new collectDesc(Integer.valueOf(10)); + CollectDesc cd = new CollectDesc(Integer.valueOf(10)); CollectOperator cdop = (CollectOperator) OperatorFactory.getAndMakeChild( cd, sop); @@ -270,25 +270,25 @@ // initialize pathToTableInfo // Default: treat the table as a single column "col" - tableDesc td = Utilities.defaultTd; - partitionDesc pd = new partitionDesc(td, null); - LinkedHashMap pathToPartitionInfo = new LinkedHashMap(); + TableDesc td = Utilities.defaultTd; + PartitionDesc pd = new PartitionDesc(td, null); + LinkedHashMap pathToPartitionInfo = new LinkedHashMap(); pathToPartitionInfo.put("/testDir", pd); // initialize aliasToWork - collectDesc cd = new collectDesc(Integer.valueOf(1)); + CollectDesc cd = new CollectDesc(Integer.valueOf(1)); CollectOperator cdop1 = (CollectOperator) OperatorFactory - .get(collectDesc.class); + .get(CollectDesc.class); cdop1.setConf(cd); CollectOperator cdop2 = (CollectOperator) OperatorFactory - .get(collectDesc.class); + .get(CollectDesc.class); cdop2.setConf(cd); LinkedHashMap> aliasToWork = new LinkedHashMap>(); aliasToWork.put("a", cdop1); aliasToWork.put("b", cdop2); - // initialize mapredWork - mapredWork mrwork = new mapredWork(); + // initialize MapredWork + MapredWork mrwork = new MapredWork(); mrwork.setPathToAliases(pathToAliases); mrwork.setPathToPartitionInfo(pathToPartitionInfo); mrwork.setAliasToWork(aliasToWork); Index: ql/src/test/org/apache/hadoop/hive/ql/udf/UDFTestLength.java =================================================================== --- ql/src/test/org/apache/hadoop/hive/ql/udf/UDFTestLength.java (revision 901960) +++ ql/src/test/org/apache/hadoop/hive/ql/udf/UDFTestLength.java (working copy) @@ -36,4 +36,4 @@ result.set(s.toString().length()); return result; } -} \ No newline at end of file +} Index: ql/src/java/org/apache/hadoop/hive/ql/optimizer/GenMRProcContext.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/optimizer/GenMRProcContext.java (revision 901960) +++ ql/src/java/org/apache/hadoop/hive/ql/optimizer/GenMRProcContext.java (working copy) @@ -35,7 +35,7 @@ import org.apache.hadoop.hive.ql.hooks.WriteEntity; import org.apache.hadoop.hive.ql.lib.NodeProcessorCtx; import org.apache.hadoop.hive.ql.parse.ParseContext; -import org.apache.hadoop.hive.ql.plan.tableDesc; +import org.apache.hadoop.hive.ql.plan.TableDesc; /** * Processor Context for creating map reduce task. Walk the tree in a DFS manner @@ -95,12 +95,12 @@ public static class GenMRUnionCtx { Task uTask; List taskTmpDir; - List tt_desc; + List tt_desc; public GenMRUnionCtx() { uTask = null; taskTmpDir = new ArrayList(); - tt_desc = new ArrayList(); + tt_desc = new ArrayList(); } public Task getUTask() { @@ -119,18 +119,18 @@ return taskTmpDir; } - public void addTTDesc(tableDesc tt_desc) { + public void addTTDesc(TableDesc tt_desc) { this.tt_desc.add(tt_desc); } - public List getTTDesc() { + public List getTTDesc() { return tt_desc; } } public static class GenMRMapJoinCtx { String taskTmpDir; - tableDesc tt_desc; + TableDesc tt_desc; Operator rootMapJoinOp; MapJoinOperator oldMapJoin; @@ -147,7 +147,7 @@ * @param rootMapJoinOp * @param oldMapJoin */ - public GenMRMapJoinCtx(String taskTmpDir, tableDesc tt_desc, + public GenMRMapJoinCtx(String taskTmpDir, TableDesc tt_desc, Operator rootMapJoinOp, MapJoinOperator oldMapJoin) { this.taskTmpDir = taskTmpDir; @@ -164,11 +164,11 @@ return taskTmpDir; } - public void setTTDesc(tableDesc tt_desc) { + public void setTTDesc(TableDesc tt_desc) { this.tt_desc = tt_desc; } - public tableDesc getTTDesc() { + public TableDesc getTTDesc() { return tt_desc; } Index: ql/src/java/org/apache/hadoop/hive/ql/optimizer/GenMRUnion1.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/optimizer/GenMRUnion1.java (revision 901960) +++ ql/src/java/org/apache/hadoop/hive/ql/optimizer/GenMRUnion1.java (working copy) @@ -45,10 +45,10 @@ import org.apache.hadoop.hive.ql.parse.ParseContext; import org.apache.hadoop.hive.ql.parse.SemanticException; import org.apache.hadoop.hive.ql.plan.PlanUtils; -import org.apache.hadoop.hive.ql.plan.fileSinkDesc; -import org.apache.hadoop.hive.ql.plan.mapredWork; -import org.apache.hadoop.hive.ql.plan.partitionDesc; -import org.apache.hadoop.hive.ql.plan.tableDesc; +import org.apache.hadoop.hive.ql.plan.FileSinkDesc; +import org.apache.hadoop.hive.ql.plan.MapredWork; +import org.apache.hadoop.hive.ql.plan.PartitionDesc; +import org.apache.hadoop.hive.ql.plan.TableDesc; /** * Processor for the rule - TableScan followed by Union @@ -125,7 +125,7 @@ Operator parent = union.getParentOperators().get( pos); - mapredWork uPlan = null; + MapredWork uPlan = null; // union is encountered for the first time if (uCtxTask == null) { @@ -136,7 +136,7 @@ ctx.setUnionTask(union, uCtxTask); } else { uTask = uCtxTask.getUTask(); - uPlan = (mapredWork) uTask.getWork(); + uPlan = (MapredWork) uTask.getWork(); } // If there is a mapjoin at position 'pos' @@ -145,19 +145,19 @@ assert mjOp != null; GenMRMapJoinCtx mjCtx = ctx.getMapJoinCtx(mjOp); assert mjCtx != null; - mapredWork plan = (mapredWork) currTask.getWork(); + MapredWork plan = (MapredWork) currTask.getWork(); String taskTmpDir = mjCtx.getTaskTmpDir(); - tableDesc tt_desc = mjCtx.getTTDesc(); + TableDesc tt_desc = mjCtx.getTTDesc(); assert plan.getPathToAliases().get(taskTmpDir) == null; plan.getPathToAliases().put(taskTmpDir, new ArrayList()); plan.getPathToAliases().get(taskTmpDir).add(taskTmpDir); plan.getPathToPartitionInfo().put(taskTmpDir, - new partitionDesc(tt_desc, null)); + new PartitionDesc(tt_desc, null)); plan.getAliasToWork().put(taskTmpDir, mjCtx.getRootMapJoinOp()); } - tableDesc tt_desc = PlanUtils.getIntermediateFileTableDesc(PlanUtils + TableDesc tt_desc = PlanUtils.getIntermediateFileTableDesc(PlanUtils .getFieldSchemasFromRowSchema(parent.getSchema(), "temporarycol")); // generate the temporary file @@ -174,7 +174,7 @@ // Create a file sink operator for this file name Operator fs_op = OperatorFactory.get( - new fileSinkDesc(taskTmpDir, tt_desc, parseCtx.getConf().getBoolVar( + new FileSinkDesc(taskTmpDir, tt_desc, parseCtx.getConf().getBoolVar( HiveConf.ConfVars.COMPRESSINTERMEDIATE)), parent.getSchema()); assert parent.getChildOperators().size() == 1; @@ -189,7 +189,7 @@ // If it is map-only task, add the files to be processed if (uPrsCtx.getMapOnlySubq(pos) && uPrsCtx.getRootTask(pos)) { GenMapRedUtils.setTaskPlan(ctx.getCurrAliasId(), ctx.getCurrTopOp(), - (mapredWork) currTask.getWork(), false, ctx); + (MapredWork) currTask.getWork(), false, ctx); } ctx.setCurrTask(uTask); Index: ql/src/java/org/apache/hadoop/hive/ql/optimizer/ColumnPrunerProcFactory.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/optimizer/ColumnPrunerProcFactory.java (revision 901960) +++ ql/src/java/org/apache/hadoop/hive/ql/optimizer/ColumnPrunerProcFactory.java (working copy) @@ -52,14 +52,14 @@ import org.apache.hadoop.hive.ql.parse.RowResolver; import org.apache.hadoop.hive.ql.parse.SemanticException; import org.apache.hadoop.hive.ql.plan.PlanUtils; -import org.apache.hadoop.hive.ql.plan.aggregationDesc; -import org.apache.hadoop.hive.ql.plan.exprNodeDesc; -import org.apache.hadoop.hive.ql.plan.groupByDesc; -import org.apache.hadoop.hive.ql.plan.joinDesc; -import org.apache.hadoop.hive.ql.plan.mapJoinDesc; -import org.apache.hadoop.hive.ql.plan.reduceSinkDesc; -import org.apache.hadoop.hive.ql.plan.selectDesc; -import org.apache.hadoop.hive.ql.plan.tableDesc; +import org.apache.hadoop.hive.ql.plan.AggregationDesc; +import org.apache.hadoop.hive.ql.plan.ExprNodeDesc; +import org.apache.hadoop.hive.ql.plan.GroupByDesc; +import org.apache.hadoop.hive.ql.plan.JoinDesc; +import org.apache.hadoop.hive.ql.plan.MapJoinDesc; +import org.apache.hadoop.hive.ql.plan.ReduceSinkDesc; +import org.apache.hadoop.hive.ql.plan.SelectDesc; +import org.apache.hadoop.hive.ql.plan.TableDesc; /** * Factory for generating the different node processors used by ColumnPruner. @@ -74,7 +74,7 @@ Object... nodeOutputs) throws SemanticException { FilterOperator op = (FilterOperator) nd; ColumnPrunerProcCtx cppCtx = (ColumnPrunerProcCtx) ctx; - exprNodeDesc condn = op.getConf().getPredicate(); + ExprNodeDesc condn = op.getConf().getPredicate(); // get list of columns used in the filter List cl = condn.getCols(); // merge it with the downstream col list @@ -102,16 +102,16 @@ GroupByOperator op = (GroupByOperator) nd; ColumnPrunerProcCtx cppCtx = (ColumnPrunerProcCtx) ctx; List colLists = new ArrayList(); - groupByDesc conf = op.getConf(); - ArrayList keys = conf.getKeys(); - for (exprNodeDesc key : keys) { + GroupByDesc conf = op.getConf(); + ArrayList keys = conf.getKeys(); + for (ExprNodeDesc key : keys) { colLists = Utilities.mergeUniqElems(colLists, key.getCols()); } - ArrayList aggrs = conf.getAggregators(); - for (aggregationDesc aggr : aggrs) { - ArrayList params = aggr.getParameters(); - for (exprNodeDesc param : params) { + ArrayList aggrs = conf.getAggregators(); + for (AggregationDesc aggr : aggrs) { + ArrayList params = aggr.getParameters(); + for (ExprNodeDesc param : params) { colLists = Utilities.mergeUniqElems(colLists, param.getCols()); } } @@ -155,7 +155,7 @@ /** * The Node Processor for Column Pruning on Table Scan Operators. It will - * store needed columns in tableScanDesc. + * store needed columns in TableScanDesc. */ public static class ColumnPrunerTableScanProc implements NodeProcessor { public Object process(Node nd, Stack stack, NodeProcessorCtx ctx, @@ -197,15 +197,15 @@ HashMap, OpParseContext> opToParseCtxMap = cppCtx .getOpToParseCtxMap(); RowResolver redSinkRR = opToParseCtxMap.get(op).getRR(); - reduceSinkDesc conf = op.getConf(); + ReduceSinkDesc conf = op.getConf(); List> childOperators = op .getChildOperators(); List> parentOperators = op .getParentOperators(); List colLists = new ArrayList(); - ArrayList keys = conf.getKeyCols(); - for (exprNodeDesc key : keys) { + ArrayList keys = conf.getKeyCols(); + for (ExprNodeDesc key : keys) { colLists = Utilities.mergeUniqElems(colLists, key.getCols()); } @@ -222,9 +222,9 @@ flags[i] = false; } if (childJoinCols != null && childJoinCols.size() > 0) { - Map exprMap = op.getColumnExprMap(); + Map exprMap = op.getColumnExprMap(); for (String childCol : childJoinCols) { - exprNodeDesc desc = exprMap.get(childCol); + ExprNodeDesc desc = exprMap.get(childCol); int index = conf.getValueCols().indexOf(desc); flags[index] = true; String[] nm = redSinkRR.reverseLookup(childCol); @@ -241,8 +241,8 @@ } else { // Reduce Sink contains the columns needed - no need to aggregate from // children - ArrayList vals = conf.getValueCols(); - for (exprNodeDesc val : vals) { + ArrayList vals = conf.getValueCols(); + for (ExprNodeDesc val : vals) { colLists = Utilities.mergeUniqElems(colLists, val.getCols()); } } @@ -290,7 +290,7 @@ } cols = cppCtx.genColLists(op); - selectDesc conf = op.getConf(); + SelectDesc conf = op.getConf(); // The input to the select does not matter. Go over the expressions // and return the ones which have a marked column cppCtx.getPrunedColLists().put(op, @@ -301,16 +301,16 @@ } // do we need to prune the select operator? - List originalColList = op.getConf().getColList(); + List originalColList = op.getConf().getColList(); List columns = new ArrayList(); - for (exprNodeDesc expr : originalColList) { + for (ExprNodeDesc expr : originalColList) { Utilities.mergeUniqElems(columns, expr.getCols()); } // by now, 'prunedCols' are columns used by child operators, and 'columns' // are columns used by this select operator. ArrayList originalOutputColumnNames = conf.getOutputColumnNames(); if (cols.size() < originalOutputColumnNames.size()) { - ArrayList newColList = new ArrayList(); + ArrayList newColList = new ArrayList(); ArrayList newOutputColumnNames = new ArrayList(); Vector rs_oldsignature = op.getSchema().getSignature(); Vector rs_newsignature = new Vector(); @@ -370,8 +370,8 @@ private static boolean[] getPruneReduceSinkOpRetainFlags( List retainedParentOpOutputCols, ReduceSinkOperator reduce) { - reduceSinkDesc reduceConf = reduce.getConf(); - java.util.ArrayList originalValueEval = reduceConf + ReduceSinkDesc reduceConf = reduce.getConf(); + java.util.ArrayList originalValueEval = reduceConf .getValueCols(); boolean[] flags = new boolean[originalValueEval.size()]; for (int i = 0; i < originalValueEval.size(); i++) { @@ -394,18 +394,18 @@ private static void pruneReduceSinkOperator(boolean[] retainFlags, ReduceSinkOperator reduce, ColumnPrunerProcCtx cppCtx) throws SemanticException { - reduceSinkDesc reduceConf = reduce.getConf(); - Map oldMap = reduce.getColumnExprMap(); - Map newMap = new HashMap(); + ReduceSinkDesc reduceConf = reduce.getConf(); + Map oldMap = reduce.getColumnExprMap(); + Map newMap = new HashMap(); Vector sig = new Vector(); RowResolver oldRR = cppCtx.getOpToParseCtxMap().get(reduce).getRR(); RowResolver newRR = new RowResolver(); ArrayList originalValueOutputColNames = reduceConf .getOutputValueColumnNames(); - java.util.ArrayList originalValueEval = reduceConf + java.util.ArrayList originalValueEval = reduceConf .getValueCols(); ArrayList newOutputColNames = new ArrayList(); - java.util.ArrayList newValueEval = new ArrayList(); + java.util.ArrayList newValueEval = new ArrayList(); for (int i = 0; i < retainFlags.length; i++) { if (retainFlags[i]) { newValueEval.add(originalValueEval.get(i)); @@ -423,7 +423,7 @@ } } - ArrayList keyCols = reduceConf.getKeyCols(); + ArrayList keyCols = reduceConf.getKeyCols(); List keys = new ArrayList(); RowResolver parResover = cppCtx.getOpToParseCtxMap().get( reduce.getParentOperators().get(0)).getRR(); @@ -444,7 +444,7 @@ reduce.getSchema().setSignature(sig); reduceConf.setOutputValueColumnNames(newOutputColNames); reduceConf.setValueCols(newValueEval); - tableDesc newValueTable = PlanUtils.getReduceValueTableDesc(PlanUtils + TableDesc newValueTable = PlanUtils.getReduceValueTableDesc(PlanUtils .getFieldSchemasFromColumnList(reduceConf.getValueCols(), newOutputColNames, 0, "")); reduceConf.setValueSerializeInfo(newValueTable); @@ -495,8 +495,8 @@ } private static void pruneJoinOperator(NodeProcessorCtx ctx, - CommonJoinOperator op, joinDesc conf, - Map columnExprMap, + CommonJoinOperator op, JoinDesc conf, + Map columnExprMap, Map> retainMap, boolean mapJoin) throws SemanticException { ColumnPrunerProcCtx cppCtx = (ColumnPrunerProcCtx) ctx; @@ -516,11 +516,11 @@ RowResolver newJoinRR = new RowResolver(); ArrayList outputCols = new ArrayList(); Vector rs = new Vector(); - Map newColExprMap = new HashMap(); + Map newColExprMap = new HashMap(); for (int i = 0; i < conf.getOutputColumnNames().size(); i++) { String internalName = conf.getOutputColumnNames().get(i); - exprNodeDesc desc = columnExprMap.get(internalName); + ExprNodeDesc desc = columnExprMap.get(internalName); Byte tag = conf.getReversedExprs().get(internalName); if (!childColLists.contains(internalName)) { int index = conf.getExprs().get(tag).indexOf(desc); @@ -545,30 +545,30 @@ if (mapJoin) { // regenerate the valueTableDesc - List valueTableDescs = new ArrayList(); + List valueTableDescs = new ArrayList(); for (int pos = 0; pos < op.getParentOperators().size(); pos++) { - List valueCols = conf.getExprs() + List valueCols = conf.getExprs() .get(new Byte((byte) pos)); StringBuilder keyOrder = new StringBuilder(); for (int i = 0; i < valueCols.size(); i++) { keyOrder.append("+"); } - tableDesc valueTableDesc = PlanUtils.getMapJoinValueTableDesc(PlanUtils + TableDesc valueTableDesc = PlanUtils.getMapJoinValueTableDesc(PlanUtils .getFieldSchemasFromColumnList(valueCols, "mapjoinvalue")); valueTableDescs.add(valueTableDesc); } - ((mapJoinDesc) conf).setValueTblDescs(valueTableDescs); + ((MapJoinDesc) conf).setValueTblDescs(valueTableDescs); - Set>> exprs = ((mapJoinDesc) conf) + Set>> exprs = ((MapJoinDesc) conf) .getKeys().entrySet(); - Iterator>> iters = exprs.iterator(); + Iterator>> iters = exprs.iterator(); while (iters.hasNext()) { - Map.Entry> entry = iters.next(); - List lists = entry.getValue(); + Map.Entry> entry = iters.next(); + List lists = entry.getValue(); for (int j = 0; j < lists.size(); j++) { - exprNodeDesc desc = lists.get(j); + ExprNodeDesc desc = lists.get(j); Byte tag = entry.getKey(); List cols = prunedColLists.get(tag); cols = Utilities.mergeUniqElems(cols, desc.getCols()); Index: ql/src/java/org/apache/hadoop/hive/ql/optimizer/GenMRRedSink1.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/optimizer/GenMRRedSink1.java (revision 901960) +++ ql/src/java/org/apache/hadoop/hive/ql/optimizer/GenMRRedSink1.java (working copy) @@ -31,7 +31,7 @@ import org.apache.hadoop.hive.ql.lib.NodeProcessorCtx; import org.apache.hadoop.hive.ql.optimizer.GenMRProcContext.GenMapRedCtx; import org.apache.hadoop.hive.ql.parse.SemanticException; -import org.apache.hadoop.hive.ql.plan.mapredWork; +import org.apache.hadoop.hive.ql.plan.MapredWork; /** * Processor for the rule - table scan followed by reduce sink @@ -58,7 +58,7 @@ .getMapCurrCtx(); GenMapRedCtx mapredCtx = mapCurrCtx.get(stack.get(stack.size() - 2)); Task currTask = mapredCtx.getCurrTask(); - mapredWork currPlan = (mapredWork) currTask.getWork(); + MapredWork currPlan = (MapredWork) currTask.getWork(); Operator currTopOp = mapredCtx.getCurrTopOp(); String currAliasId = mapredCtx.getCurrAliasId(); Operator reducer = op.getChildOperators().get(0); Index: ql/src/java/org/apache/hadoop/hive/ql/optimizer/GenMRRedSink3.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/optimizer/GenMRRedSink3.java (revision 901960) +++ ql/src/java/org/apache/hadoop/hive/ql/optimizer/GenMRRedSink3.java (working copy) @@ -33,7 +33,7 @@ import org.apache.hadoop.hive.ql.optimizer.unionproc.UnionProcContext; import org.apache.hadoop.hive.ql.parse.ParseContext; import org.apache.hadoop.hive.ql.parse.SemanticException; -import org.apache.hadoop.hive.ql.plan.mapredWork; +import org.apache.hadoop.hive.ql.plan.MapredWork; /** * Processor for the rule - union followed by reduce sink @@ -71,7 +71,7 @@ .getMapCurrCtx(); GenMapRedCtx mapredCtx = mapCurrCtx.get(op.getParentOperators().get(0)); Task currTask = mapredCtx.getCurrTask(); - mapredWork plan = (mapredWork) currTask.getWork(); + MapredWork plan = (MapredWork) currTask.getWork(); HashMap, Task> opTaskMap = ctx .getOpTaskMap(); Task opMapTask = opTaskMap.get(reducer); Index: ql/src/java/org/apache/hadoop/hive/ql/optimizer/GenMRRedSink4.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/optimizer/GenMRRedSink4.java (revision 901960) +++ ql/src/java/org/apache/hadoop/hive/ql/optimizer/GenMRRedSink4.java (working copy) @@ -31,7 +31,7 @@ import org.apache.hadoop.hive.ql.lib.NodeProcessorCtx; import org.apache.hadoop.hive.ql.optimizer.GenMRProcContext.GenMapRedCtx; import org.apache.hadoop.hive.ql.parse.SemanticException; -import org.apache.hadoop.hive.ql.plan.mapredWork; +import org.apache.hadoop.hive.ql.plan.MapredWork; /** * Processor for the rule - map join followed by reduce sink @@ -63,7 +63,7 @@ .getMapCurrCtx(); GenMapRedCtx mapredCtx = mapCurrCtx.get(op.getParentOperators().get(0)); Task currTask = mapredCtx.getCurrTask(); - mapredWork plan = (mapredWork) currTask.getWork(); + MapredWork plan = (MapredWork) currTask.getWork(); HashMap, Task> opTaskMap = ctx .getOpTaskMap(); Task opMapTask = opTaskMap.get(reducer); Index: ql/src/java/org/apache/hadoop/hive/ql/optimizer/SamplePruner.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/optimizer/SamplePruner.java (revision 901960) +++ ql/src/java/org/apache/hadoop/hive/ql/optimizer/SamplePruner.java (working copy) @@ -41,8 +41,8 @@ import org.apache.hadoop.hive.ql.metadata.Partition; import org.apache.hadoop.hive.ql.parse.ParseContext; import org.apache.hadoop.hive.ql.parse.SemanticException; -import org.apache.hadoop.hive.ql.plan.filterDesc; -import org.apache.hadoop.hive.ql.plan.filterDesc.sampleDesc; +import org.apache.hadoop.hive.ql.plan.FilterDesc; +import org.apache.hadoop.hive.ql.plan.FilterDesc.sampleDesc; /** * The transformation step that does sample pruning. @@ -116,7 +116,7 @@ public Object process(Node nd, Stack stack, NodeProcessorCtx procCtx, Object... nodeOutputs) throws SemanticException { FilterOperator filOp = (FilterOperator) nd; - filterDesc filOpDesc = filOp.getConf(); + FilterDesc filOpDesc = filOp.getConf(); sampleDesc sampleDescr = filOpDesc.getSampleDescr(); if ((sampleDescr == null) || !sampleDescr.getInputPruning()) { Index: ql/src/java/org/apache/hadoop/hive/ql/optimizer/GenMRFileSink1.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/optimizer/GenMRFileSink1.java (revision 901960) +++ ql/src/java/org/apache/hadoop/hive/ql/optimizer/GenMRFileSink1.java (working copy) @@ -50,17 +50,17 @@ import org.apache.hadoop.hive.ql.plan.ConditionalResolverMergeFiles; import org.apache.hadoop.hive.ql.plan.ConditionalWork; import org.apache.hadoop.hive.ql.plan.PlanUtils; -import org.apache.hadoop.hive.ql.plan.exprNodeColumnDesc; -import org.apache.hadoop.hive.ql.plan.exprNodeDesc; -import org.apache.hadoop.hive.ql.plan.extractDesc; -import org.apache.hadoop.hive.ql.plan.fileSinkDesc; -import org.apache.hadoop.hive.ql.plan.loadFileDesc; -import org.apache.hadoop.hive.ql.plan.mapredWork; -import org.apache.hadoop.hive.ql.plan.moveWork; -import org.apache.hadoop.hive.ql.plan.partitionDesc; -import org.apache.hadoop.hive.ql.plan.reduceSinkDesc; -import org.apache.hadoop.hive.ql.plan.tableDesc; -import org.apache.hadoop.hive.ql.plan.tableScanDesc; +import org.apache.hadoop.hive.ql.plan.ExprNodeColumnDesc; +import org.apache.hadoop.hive.ql.plan.ExprNodeDesc; +import org.apache.hadoop.hive.ql.plan.ExtractDesc; +import org.apache.hadoop.hive.ql.plan.FileSinkDesc; +import org.apache.hadoop.hive.ql.plan.LoadFileDesc; +import org.apache.hadoop.hive.ql.plan.MapredWork; +import org.apache.hadoop.hive.ql.plan.MoveWork; +import org.apache.hadoop.hive.ql.plan.PartitionDesc; +import org.apache.hadoop.hive.ql.plan.ReduceSinkDesc; +import org.apache.hadoop.hive.ql.plan.TableDesc; +import org.apache.hadoop.hive.ql.plan.TableScanDesc; import org.apache.hadoop.hive.ql.plan.ConditionalResolverMergeFiles.ConditionalResolverMergeFilesCtx; import org.apache.hadoop.hive.serde2.typeinfo.TypeInfoFactory; @@ -105,10 +105,10 @@ // merge for a map-only job // or for a map-reduce job if ((parseCtx.getConf().getBoolVar( - HiveConf.ConfVars.HIVEMERGEMAPFILES) && (((mapredWork) currTask + HiveConf.ConfVars.HIVEMERGEMAPFILES) && (((MapredWork) currTask .getWork()).getReducer() == null)) || (parseCtx.getConf().getBoolVar( - HiveConf.ConfVars.HIVEMERGEMAPREDFILES) && (((mapredWork) currTask + HiveConf.ConfVars.HIVEMERGEMAPREDFILES) && (((MapredWork) currTask .getWork()).getReducer() != null))) { chDir = true; } @@ -132,35 +132,35 @@ RowSchema fsRS = fsOp.getSchema(); // create a reduce Sink operator - key is the first column - ArrayList keyCols = new ArrayList(); + ArrayList keyCols = new ArrayList(); keyCols.add(TypeCheckProcFactory.DefaultExprProcessor .getFuncExprNodeDesc("rand")); - ArrayList valueCols = new ArrayList(); + ArrayList valueCols = new ArrayList(); for (ColumnInfo ci : fsRS.getSignature()) { - valueCols.add(new exprNodeColumnDesc(ci.getType(), ci.getInternalName(), + valueCols.add(new ExprNodeColumnDesc(ci.getType(), ci.getInternalName(), ci.getTabAlias(), ci.getIsPartitionCol())); } // create a dummy tableScan operator Operator ts_op = OperatorFactory.get( - tableScanDesc.class, fsRS); + TableScanDesc.class, fsRS); ArrayList outputColumns = new ArrayList(); for (int i = 0; i < valueCols.size(); i++) { outputColumns.add(SemanticAnalyzer.getColumnInternalName(i)); } - reduceSinkDesc rsDesc = PlanUtils.getReduceSinkDesc( - new ArrayList(), valueCols, outputColumns, false, -1, -1, + ReduceSinkDesc rsDesc = PlanUtils.getReduceSinkDesc( + new ArrayList(), valueCols, outputColumns, false, -1, -1, -1); OperatorFactory.getAndMakeChild(rsDesc, fsRS, ts_op); - mapredWork cplan = GenMapRedUtils.getMapRedWork(); + MapredWork cplan = GenMapRedUtils.getMapRedWork(); ParseContext parseCtx = ctx.getParseCtx(); Task mergeTask = TaskFactory.get(cplan, parseCtx .getConf()); - fileSinkDesc fsConf = fsOp.getConf(); + FileSinkDesc fsConf = fsOp.getConf(); // Add the extract operator to get the value fields RowResolver out_rwsch = new RowResolver(); @@ -174,19 +174,19 @@ pos = Integer.valueOf(pos.intValue() + 1); } - Operator extract = OperatorFactory.getAndMakeChild(new extractDesc( - new exprNodeColumnDesc(TypeInfoFactory.stringTypeInfo, + Operator extract = OperatorFactory.getAndMakeChild(new ExtractDesc( + new ExprNodeColumnDesc(TypeInfoFactory.stringTypeInfo, Utilities.ReduceField.VALUE.toString(), "", false)), new RowSchema( out_rwsch.getColumnInfos())); - tableDesc ts = (tableDesc) fsConf.getTableInfo().clone(); + TableDesc ts = (TableDesc) fsConf.getTableInfo().clone(); fsConf .getTableInfo() .getProperties() .remove( org.apache.hadoop.hive.metastore.api.Constants.META_TABLE_PARTITION_COLUMNS); FileSinkOperator newOutput = (FileSinkOperator) OperatorFactory - .getAndMakeChild(new fileSinkDesc(finalName, ts, parseCtx.getConf() + .getAndMakeChild(new FileSinkDesc(finalName, ts, parseCtx.getConf() .getBoolVar(HiveConf.ConfVars.COMPRESSRESULT)), fsRS, extract); cplan.setReducer(extract); @@ -195,10 +195,10 @@ cplan.getPathToAliases().put(fsConf.getDirName(), aliases); cplan.getAliasToWork().put(fsConf.getDirName(), ts_op); cplan.getPathToPartitionInfo().put(fsConf.getDirName(), - new partitionDesc(fsConf.getTableInfo(), null)); + new PartitionDesc(fsConf.getTableInfo(), null)); cplan.setNumReduceTasks(-1); - moveWork dummyMv = new moveWork(null, null, null, new loadFileDesc(fsOp + MoveWork dummyMv = new MoveWork(null, null, null, new LoadFileDesc(fsOp .getConf().getDirName(), finalName, true, null, null), false); Task dummyMergeTask = TaskFactory.get(dummyMv, ctx .getConf()); @@ -234,7 +234,7 @@ List> mvTasks, FileSinkOperator fsOp) { // find the move task for (Task mvTsk : mvTasks) { - moveWork mvWork = (moveWork) mvTsk.getWork(); + MoveWork mvWork = (MoveWork) mvTsk.getWork(); String srcDir = null; if (mvWork.getLoadFileWork() != null) { srcDir = mvWork.getLoadFileWork().getSourceDir(); @@ -315,14 +315,14 @@ assert (!seenOps.contains(currTopOp)); seenOps.add(currTopOp); GenMapRedUtils.setTaskPlan(currAliasId, currTopOp, - (mapredWork) currTask.getWork(), false, ctx); + (MapredWork) currTask.getWork(), false, ctx); opTaskMap.put(null, currTask); rootTasks.add(currTask); } else { if (!seenOps.contains(currTopOp)) { seenOps.add(currTopOp); GenMapRedUtils.setTaskPlan(currAliasId, currTopOp, - (mapredWork) mapTask.getWork(), false, ctx); + (MapredWork) mapTask.getWork(), false, ctx); } // mapTask and currTask should be merged by and join/union operator // (e.g., GenMRUnion1j) which has multiple topOps. @@ -347,15 +347,15 @@ if (currMapJoinOp != null) { opTaskMap.put(null, currTask); GenMRMapJoinCtx mjCtx = ctx.getMapJoinCtx(currMapJoinOp); - mapredWork plan = (mapredWork) currTask.getWork(); + MapredWork plan = (MapredWork) currTask.getWork(); String taskTmpDir = mjCtx.getTaskTmpDir(); - tableDesc tt_desc = mjCtx.getTTDesc(); + TableDesc tt_desc = mjCtx.getTTDesc(); assert plan.getPathToAliases().get(taskTmpDir) == null; plan.getPathToAliases().put(taskTmpDir, new ArrayList()); plan.getPathToAliases().get(taskTmpDir).add(taskTmpDir); plan.getPathToPartitionInfo().put(taskTmpDir, - new partitionDesc(tt_desc, null)); + new PartitionDesc(tt_desc, null)); plan.getAliasToWork().put(taskTmpDir, mjCtx.getRootMapJoinOp()); return dest; } Index: ql/src/java/org/apache/hadoop/hive/ql/optimizer/ppr/OpWalkerCtx.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/optimizer/ppr/OpWalkerCtx.java (revision 901960) +++ ql/src/java/org/apache/hadoop/hive/ql/optimizer/ppr/OpWalkerCtx.java (working copy) @@ -22,7 +22,7 @@ import org.apache.hadoop.hive.ql.exec.TableScanOperator; import org.apache.hadoop.hive.ql.lib.NodeProcessorCtx; -import org.apache.hadoop.hive.ql.plan.exprNodeDesc; +import org.apache.hadoop.hive.ql.plan.ExprNodeDesc; /** * Context class for operator tree walker for partition pruner. @@ -35,17 +35,17 @@ * Map from tablescan operator to partition pruning predicate that is * initialized from the ParseContext */ - private final HashMap opToPartPruner; + private final HashMap opToPartPruner; /** * Constructor */ - public OpWalkerCtx(HashMap opToPartPruner) { + public OpWalkerCtx(HashMap opToPartPruner) { this.opToPartPruner = opToPartPruner; hasNonPartCols = false; } - public HashMap getOpToPartPruner() { + public HashMap getOpToPartPruner() { return opToPartPruner; } Index: ql/src/java/org/apache/hadoop/hive/ql/optimizer/ppr/PartitionPruner.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/optimizer/ppr/PartitionPruner.java (revision 901960) +++ ql/src/java/org/apache/hadoop/hive/ql/optimizer/ppr/PartitionPruner.java (working copy) @@ -48,9 +48,9 @@ import org.apache.hadoop.hive.ql.parse.ParseContext; import org.apache.hadoop.hive.ql.parse.PrunedPartitionList; import org.apache.hadoop.hive.ql.parse.SemanticException; -import org.apache.hadoop.hive.ql.plan.exprNodeColumnDesc; -import org.apache.hadoop.hive.ql.plan.exprNodeDesc; -import org.apache.hadoop.hive.ql.plan.exprNodeGenericFuncDesc; +import org.apache.hadoop.hive.ql.plan.ExprNodeColumnDesc; +import org.apache.hadoop.hive.ql.plan.ExprNodeDesc; +import org.apache.hadoop.hive.ql.plan.ExprNodeGenericFuncDesc; import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspector; import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspectorFactory; import org.apache.hadoop.hive.serde2.objectinspector.PrimitiveObjectInspector; @@ -109,25 +109,25 @@ * @param expr * the pruner expression for the table */ - public static boolean onlyContainsPartnCols(Table tab, exprNodeDesc expr) { + public static boolean onlyContainsPartnCols(Table tab, ExprNodeDesc expr) { if (!tab.isPartitioned() || (expr == null)) { return true; } - if (expr instanceof exprNodeColumnDesc) { - String colName = ((exprNodeColumnDesc) expr).getColumn(); + if (expr instanceof ExprNodeColumnDesc) { + String colName = ((ExprNodeColumnDesc) expr).getColumn(); return tab.isPartitionKey(colName); } // It cannot contain a non-deterministic function - if ((expr instanceof exprNodeGenericFuncDesc) - && !FunctionRegistry.isDeterministic(((exprNodeGenericFuncDesc) expr) + if ((expr instanceof ExprNodeGenericFuncDesc) + && !FunctionRegistry.isDeterministic(((ExprNodeGenericFuncDesc) expr) .getGenericUDF())) { return false; } // All columns of the expression must be parttioned columns - List children = expr.getChildren(); + List children = expr.getChildren(); if (children != null) { for (int i = 0; i < children.size(); i++) { if (!onlyContainsPartnCols(tab, children.get(i))) { @@ -155,7 +155,7 @@ * pruner condition. * @throws HiveException */ - public static PrunedPartitionList prune(Table tab, exprNodeDesc prunerExpr, + public static PrunedPartitionList prune(Table tab, ExprNodeDesc prunerExpr, HiveConf conf, String alias, Map prunedPartitionsMap) throws HiveException { @@ -271,17 +271,17 @@ /** * Whether the expression contains a column node or not. */ - public static boolean hasColumnExpr(exprNodeDesc desc) { + public static boolean hasColumnExpr(ExprNodeDesc desc) { // Return false for null if (desc == null) { return false; } - // Return true for exprNodeColumnDesc - if (desc instanceof exprNodeColumnDesc) { + // Return true for ExprNodeColumnDesc + if (desc instanceof ExprNodeColumnDesc) { return true; } // Return true in case one of the children is column expr. - List children = desc.getChildren(); + List children = desc.getChildren(); if (children != null) { for (int i = 0; i < children.size(); i++) { if (hasColumnExpr(children.get(i))) { Index: ql/src/java/org/apache/hadoop/hive/ql/optimizer/ppr/ExprProcFactory.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/optimizer/ppr/ExprProcFactory.java (revision 901960) +++ ql/src/java/org/apache/hadoop/hive/ql/optimizer/ppr/ExprProcFactory.java (working copy) @@ -36,12 +36,12 @@ import org.apache.hadoop.hive.ql.lib.Rule; import org.apache.hadoop.hive.ql.lib.RuleRegExp; import org.apache.hadoop.hive.ql.parse.SemanticException; -import org.apache.hadoop.hive.ql.plan.exprNodeColumnDesc; -import org.apache.hadoop.hive.ql.plan.exprNodeConstantDesc; -import org.apache.hadoop.hive.ql.plan.exprNodeDesc; -import org.apache.hadoop.hive.ql.plan.exprNodeFieldDesc; -import org.apache.hadoop.hive.ql.plan.exprNodeGenericFuncDesc; -import org.apache.hadoop.hive.ql.plan.exprNodeNullDesc; +import org.apache.hadoop.hive.ql.plan.ExprNodeColumnDesc; +import org.apache.hadoop.hive.ql.plan.ExprNodeConstantDesc; +import org.apache.hadoop.hive.ql.plan.ExprNodeDesc; +import org.apache.hadoop.hive.ql.plan.ExprNodeFieldDesc; +import org.apache.hadoop.hive.ql.plan.ExprNodeGenericFuncDesc; +import org.apache.hadoop.hive.ql.plan.ExprNodeNullDesc; /** * Expression processor factory for partition pruning. Each processor tries to @@ -60,14 +60,14 @@ public Object process(Node nd, Stack stack, NodeProcessorCtx procCtx, Object... nodeOutputs) throws SemanticException { - exprNodeDesc newcd = null; - exprNodeColumnDesc cd = (exprNodeColumnDesc) nd; + ExprNodeDesc newcd = null; + ExprNodeColumnDesc cd = (ExprNodeColumnDesc) nd; ExprProcCtx epc = (ExprProcCtx) procCtx; if (cd.getTabAlias().equalsIgnoreCase(epc.getTabAlias()) && cd.getIsParititonCol()) { newcd = cd.clone(); } else { - newcd = new exprNodeConstantDesc(cd.getTypeInfo(), null); + newcd = new ExprNodeConstantDesc(cd.getTypeInfo(), null); epc.setHasNonPartCols(true); } @@ -87,8 +87,8 @@ public Object process(Node nd, Stack stack, NodeProcessorCtx procCtx, Object... nodeOutputs) throws SemanticException { - exprNodeDesc newfd = null; - exprNodeGenericFuncDesc fd = (exprNodeGenericFuncDesc) nd; + ExprNodeDesc newfd = null; + ExprNodeGenericFuncDesc fd = (ExprNodeGenericFuncDesc) nd; boolean unknown = false; @@ -106,24 +106,24 @@ } else { // If any child is null, set unknown to true for (Object child : nodeOutputs) { - exprNodeDesc child_nd = (exprNodeDesc) child; - if (child_nd instanceof exprNodeConstantDesc - && ((exprNodeConstantDesc) child_nd).getValue() == null) { + ExprNodeDesc child_nd = (ExprNodeDesc) child; + if (child_nd instanceof ExprNodeConstantDesc + && ((ExprNodeConstantDesc) child_nd).getValue() == null) { unknown = true; } } } if (unknown) { - newfd = new exprNodeConstantDesc(fd.getTypeInfo(), null); + newfd = new ExprNodeConstantDesc(fd.getTypeInfo(), null); } else { // Create the list of children - ArrayList children = new ArrayList(); + ArrayList children = new ArrayList(); for (Object child : nodeOutputs) { - children.add((exprNodeDesc) child); + children.add((ExprNodeDesc) child); } // Create a copy of the function descriptor - newfd = new exprNodeGenericFuncDesc(fd.getTypeInfo(), fd + newfd = new ExprNodeGenericFuncDesc(fd.getTypeInfo(), fd .getGenericUDF(), children); } @@ -138,14 +138,14 @@ public Object process(Node nd, Stack stack, NodeProcessorCtx procCtx, Object... nodeOutputs) throws SemanticException { - exprNodeFieldDesc fnd = (exprNodeFieldDesc) nd; + ExprNodeFieldDesc fnd = (ExprNodeFieldDesc) nd; boolean unknown = false; int idx = 0; - exprNodeDesc left_nd = null; + ExprNodeDesc left_nd = null; for (Object child : nodeOutputs) { - exprNodeDesc child_nd = (exprNodeDesc) child; - if (child_nd instanceof exprNodeConstantDesc - && ((exprNodeConstantDesc) child_nd).getValue() == null) { + ExprNodeDesc child_nd = (ExprNodeDesc) child; + if (child_nd instanceof ExprNodeConstantDesc + && ((ExprNodeConstantDesc) child_nd).getValue() == null) { unknown = true; } left_nd = child_nd; @@ -153,11 +153,11 @@ assert (idx == 0); - exprNodeDesc newnd = null; + ExprNodeDesc newnd = null; if (unknown) { - newnd = new exprNodeConstantDesc(fnd.getTypeInfo(), null); + newnd = new ExprNodeConstantDesc(fnd.getTypeInfo(), null); } else { - newnd = new exprNodeFieldDesc(fnd.getTypeInfo(), left_nd, fnd + newnd = new ExprNodeFieldDesc(fnd.getTypeInfo(), left_nd, fnd .getFieldName(), fnd.getIsList()); } return newnd; @@ -167,17 +167,17 @@ /** * Processor for constants and null expressions. For such expressions the - * processor simply clones the exprNodeDesc and returns it. + * processor simply clones the ExprNodeDesc and returns it. */ public static class DefaultExprProcessor implements NodeProcessor { @Override public Object process(Node nd, Stack stack, NodeProcessorCtx procCtx, Object... nodeOutputs) throws SemanticException { - if (nd instanceof exprNodeConstantDesc) { - return ((exprNodeConstantDesc) nd).clone(); - } else if (nd instanceof exprNodeNullDesc) { - return ((exprNodeNullDesc) nd).clone(); + if (nd instanceof ExprNodeConstantDesc) { + return ((ExprNodeConstantDesc) nd).clone(); + } else if (nd instanceof ExprNodeNullDesc) { + return ((ExprNodeNullDesc) nd).clone(); } assert (false); @@ -214,7 +214,7 @@ * has a non partition column * @throws SemanticException */ - public static exprNodeDesc genPruner(String tabAlias, exprNodeDesc pred, + public static ExprNodeDesc genPruner(String tabAlias, ExprNodeDesc pred, boolean hasNonPartCols) throws SemanticException { // Create the walker, the rules dispatcher and the context. ExprProcCtx pprCtx = new ExprProcCtx(tabAlias); @@ -224,12 +224,12 @@ // generates the plan from the operator tree Map exprRules = new LinkedHashMap(); exprRules.put( - new RuleRegExp("R1", exprNodeColumnDesc.class.getName() + "%"), + new RuleRegExp("R1", ExprNodeColumnDesc.class.getName() + "%"), getColumnProcessor()); exprRules.put( - new RuleRegExp("R2", exprNodeFieldDesc.class.getName() + "%"), + new RuleRegExp("R2", ExprNodeFieldDesc.class.getName() + "%"), getFieldProcessor()); - exprRules.put(new RuleRegExp("R5", exprNodeGenericFuncDesc.class.getName() + exprRules.put(new RuleRegExp("R5", ExprNodeGenericFuncDesc.class.getName() + "%"), getGenericFuncProcessor()); // The dispatcher fires the processor corresponding to the closest matching @@ -245,8 +245,8 @@ egw.startWalking(startNodes, outputMap); hasNonPartCols = pprCtx.getHasNonPartCols(); - // Get the exprNodeDesc corresponding to the first start node; - return (exprNodeDesc) outputMap.get(pred); + // Get the ExprNodeDesc corresponding to the first start node; + return (ExprNodeDesc) outputMap.get(pred); } } Index: ql/src/java/org/apache/hadoop/hive/ql/optimizer/ppr/OpProcFactory.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/optimizer/ppr/OpProcFactory.java (revision 901960) +++ ql/src/java/org/apache/hadoop/hive/ql/optimizer/ppr/OpProcFactory.java (working copy) @@ -28,7 +28,7 @@ import org.apache.hadoop.hive.ql.lib.NodeProcessorCtx; import org.apache.hadoop.hive.ql.parse.SemanticException; import org.apache.hadoop.hive.ql.parse.TypeCheckProcFactory; -import org.apache.hadoop.hive.ql.plan.exprNodeDesc; +import org.apache.hadoop.hive.ql.plan.ExprNodeDesc; /** * Operator factory for partition pruning processing of operator graph We find @@ -81,12 +81,12 @@ } // Otherwise this is not a sampling predicate and we need to - exprNodeDesc predicate = fop.getConf().getPredicate(); + ExprNodeDesc predicate = fop.getConf().getPredicate(); String alias = top.getConf().getAlias(); // Generate the partition pruning predicate boolean hasNonPartCols = false; - exprNodeDesc ppr_pred = ExprProcFactory.genPruner(alias, predicate, + ExprNodeDesc ppr_pred = ExprProcFactory.genPruner(alias, predicate, hasNonPartCols); owc.addHasNonPartCols(hasNonPartCols); @@ -96,10 +96,10 @@ return null; } - private void addPruningPred(Map opToPPR, - TableScanOperator top, exprNodeDesc new_ppr_pred) { - exprNodeDesc old_ppr_pred = opToPPR.get(top); - exprNodeDesc ppr_pred = null; + private void addPruningPred(Map opToPPR, + TableScanOperator top, ExprNodeDesc new_ppr_pred) { + ExprNodeDesc old_ppr_pred = opToPPR.get(top); + ExprNodeDesc ppr_pred = null; if (old_ppr_pred != null) { // or the old_ppr_pred and the new_ppr_pred ppr_pred = TypeCheckProcFactory.DefaultExprProcessor Index: ql/src/java/org/apache/hadoop/hive/ql/optimizer/GenMapRedUtils.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/optimizer/GenMapRedUtils.java (revision 901960) +++ ql/src/java/org/apache/hadoop/hive/ql/optimizer/GenMapRedUtils.java (working copy) @@ -56,16 +56,16 @@ import org.apache.hadoop.hive.ql.parse.RowResolver; import org.apache.hadoop.hive.ql.parse.SemanticException; import org.apache.hadoop.hive.ql.plan.PlanUtils; -import org.apache.hadoop.hive.ql.plan.fetchWork; -import org.apache.hadoop.hive.ql.plan.fileSinkDesc; -import org.apache.hadoop.hive.ql.plan.mapJoinDesc; -import org.apache.hadoop.hive.ql.plan.mapredLocalWork; -import org.apache.hadoop.hive.ql.plan.mapredWork; -import org.apache.hadoop.hive.ql.plan.partitionDesc; -import org.apache.hadoop.hive.ql.plan.reduceSinkDesc; -import org.apache.hadoop.hive.ql.plan.tableDesc; -import org.apache.hadoop.hive.ql.plan.tableScanDesc; -import org.apache.hadoop.hive.ql.plan.filterDesc.sampleDesc; +import org.apache.hadoop.hive.ql.plan.FetchWork; +import org.apache.hadoop.hive.ql.plan.FileSinkDesc; +import org.apache.hadoop.hive.ql.plan.MapJoinDesc; +import org.apache.hadoop.hive.ql.plan.MapredLocalWork; +import org.apache.hadoop.hive.ql.plan.MapredWork; +import org.apache.hadoop.hive.ql.plan.PartitionDesc; +import org.apache.hadoop.hive.ql.plan.ReduceSinkDesc; +import org.apache.hadoop.hive.ql.plan.TableDesc; +import org.apache.hadoop.hive.ql.plan.TableScanDesc; +import org.apache.hadoop.hive.ql.plan.FilterDesc.sampleDesc; /** * General utility common functions for the Processor to convert operator into @@ -94,14 +94,14 @@ .getMapCurrCtx(); GenMapRedCtx mapredCtx = mapCurrCtx.get(op.getParentOperators().get(0)); Task currTask = mapredCtx.getCurrTask(); - mapredWork plan = (mapredWork) currTask.getWork(); + MapredWork plan = (MapredWork) currTask.getWork(); HashMap, Task> opTaskMap = opProcCtx .getOpTaskMap(); Operator currTopOp = opProcCtx.getCurrTopOp(); opTaskMap.put(reducer, currTask); plan.setReducer(reducer); - reduceSinkDesc desc = op.getConf(); + ReduceSinkDesc desc = op.getConf(); plan.setNumReduceTasks(desc.getNumReducers()); @@ -148,7 +148,7 @@ GenMapRedCtx mapredCtx = mapCurrCtx.get(op.getParentOperators().get( parentPos)); Task currTask = mapredCtx.getCurrTask(); - mapredWork plan = (mapredWork) currTask.getWork(); + MapredWork plan = (MapredWork) currTask.getWork(); HashMap, Task> opTaskMap = opProcCtx .getOpTaskMap(); Operator currTopOp = opProcCtx.getCurrTopOp(); @@ -169,7 +169,7 @@ if (reducer.getClass() == JoinOperator.class) { plan.setNeedsTagging(true); } - reduceSinkDesc desc = (reduceSinkDesc) op.getConf(); + ReduceSinkDesc desc = (ReduceSinkDesc) op.getConf(); plan.setNumReduceTasks(desc.getNumReducers()); } else { opTaskMap.put(op, currTask); @@ -178,7 +178,7 @@ if (!readInputUnion) { GenMRMapJoinCtx mjCtx = opProcCtx.getMapJoinCtx(currMapJoinOp); String taskTmpDir; - tableDesc tt_desc; + TableDesc tt_desc; Operator rootOp; if (mjCtx.getOldMapJoin() == null) { @@ -200,7 +200,7 @@ opProcCtx.setCurrMapJoinOp(null); } else { - mapJoinDesc desc = (mapJoinDesc) op.getConf(); + MapJoinDesc desc = (MapJoinDesc) op.getConf(); // The map is overloaded to keep track of mapjoins also opTaskMap.put(op, currTask); @@ -237,13 +237,13 @@ .getMapCurrCtx(); GenMapRedCtx mapredCtx = mapCurrCtx.get(op.getParentOperators().get(0)); Task currTask = mapredCtx.getCurrTask(); - mapredWork plan = (mapredWork) currTask.getWork(); + MapredWork plan = (MapredWork) currTask.getWork(); HashMap, Task> opTaskMap = opProcCtx .getOpTaskMap(); opTaskMap.put(reducer, currTask); plan.setReducer(reducer); - reduceSinkDesc desc = op.getConf(); + ReduceSinkDesc desc = op.getConf(); plan.setNumReduceTasks(desc.getNumReducers()); @@ -260,14 +260,14 @@ */ public static void initUnionPlan(GenMRProcContext opProcCtx, Task currTask, boolean local) { - mapredWork plan = (mapredWork) currTask.getWork(); + MapredWork plan = (MapredWork) currTask.getWork(); UnionOperator currUnionOp = opProcCtx.getCurrUnionOp(); assert currUnionOp != null; GenMRUnionCtx uCtx = opProcCtx.getUnionTask(currUnionOp); assert uCtx != null; List taskTmpDirLst = uCtx.getTaskTmpDir(); - List tt_descLst = uCtx.getTTDesc(); + List tt_descLst = uCtx.getTTDesc(); assert !taskTmpDirLst.isEmpty() && !tt_descLst.isEmpty(); assert taskTmpDirLst.size() == tt_descLst.size(); int size = taskTmpDirLst.size(); @@ -275,12 +275,12 @@ for (int pos = 0; pos < size; pos++) { String taskTmpDir = taskTmpDirLst.get(pos); - tableDesc tt_desc = tt_descLst.get(pos); + TableDesc tt_desc = tt_descLst.get(pos); if (plan.getPathToAliases().get(taskTmpDir) == null) { plan.getPathToAliases().put(taskTmpDir, new ArrayList()); plan.getPathToAliases().get(taskTmpDir).add(taskTmpDir); plan.getPathToPartitionInfo().put(taskTmpDir, - new partitionDesc(tt_desc, null)); + new PartitionDesc(tt_desc, null)); plan.getAliasToWork().put(taskTmpDir, currUnionOp); } } @@ -305,7 +305,7 @@ GenMRProcContext opProcCtx, int pos, boolean split, boolean readMapJoinData, boolean readUnionData) throws SemanticException { Task currTask = task; - mapredWork plan = (mapredWork) currTask.getWork(); + MapredWork plan = (MapredWork) currTask.getWork(); Operator currTopOp = opProcCtx.getCurrTopOp(); List> parTasks = null; @@ -334,7 +334,7 @@ seenOps.add(currTopOp); boolean local = false; if (pos != -1) { - local = (pos == ((mapJoinDesc) op.getConf()).getPosBigTable()) ? false + local = (pos == ((MapJoinDesc) op.getConf()).getPosBigTable()) ? false : true; } setTaskPlan(currAliasId, currTopOp, plan, local, opProcCtx); @@ -352,7 +352,7 @@ // obtained from the old map join MapJoinOperator oldMapJoin = mjCtx.getOldMapJoin(); String taskTmpDir = null; - tableDesc tt_desc = null; + TableDesc tt_desc = null; Operator rootOp = null; if (oldMapJoin == null) { @@ -398,7 +398,7 @@ public static void splitPlan(ReduceSinkOperator op, GenMRProcContext opProcCtx) throws SemanticException { // Generate a new task - mapredWork cplan = getMapRedWork(); + MapredWork cplan = getMapRedWork(); ParseContext parseCtx = opProcCtx.getParseCtx(); Task redTask = TaskFactory.get(cplan, parseCtx .getConf()); @@ -406,7 +406,7 @@ // Add the reducer cplan.setReducer(reducer); - reduceSinkDesc desc = op.getConf(); + ReduceSinkDesc desc = op.getConf(); cplan.setNumReduceTasks(new Integer(desc.getNumReducers())); @@ -420,7 +420,7 @@ } /** - * set the current task in the mapredWork + * set the current task in the MapredWork * * @param alias_id * current alias @@ -434,16 +434,16 @@ * processing context */ public static void setTaskPlan(String alias_id, - Operator topOp, mapredWork plan, boolean local, + Operator topOp, MapredWork plan, boolean local, GenMRProcContext opProcCtx) throws SemanticException { ParseContext parseCtx = opProcCtx.getParseCtx(); Set inputs = opProcCtx.getInputs(); ArrayList partDir = new ArrayList(); - ArrayList partDesc = new ArrayList(); + ArrayList partDesc = new ArrayList(); Path tblDir = null; - tableDesc tblDesc = null; + TableDesc tblDesc = null; PrunedPartitionList partsList = null; @@ -465,7 +465,7 @@ parts = partsList.getConfirmedPartns(); parts.addAll(partsList.getUnknownPartns()); - partitionDesc aliasPartnDesc = null; + PartitionDesc aliasPartnDesc = null; try { if (parts.isEmpty()) { if (!partsList.getDeniedPartns().isEmpty()) { @@ -482,7 +482,7 @@ // The table does not have any partitions if (aliasPartnDesc == null) { - aliasPartnDesc = new partitionDesc(Utilities.getTableDesc(parseCtx + aliasPartnDesc = new PartitionDesc(Utilities.getTableDesc(parseCtx .getTopToTable().get(topOp)), null); } @@ -532,14 +532,14 @@ } Iterator iterPath = partDir.iterator(); - Iterator iterPartnDesc = partDesc.iterator(); + Iterator iterPartnDesc = partDesc.iterator(); if (!local) { while (iterPath.hasNext()) { assert iterPartnDesc.hasNext(); String path = iterPath.next().toString(); - partitionDesc prtDesc = iterPartnDesc.next(); + PartitionDesc prtDesc = iterPartnDesc.next(); // Add the path to alias mapping if (plan.getPathToAliases().get(path) == null) { @@ -554,11 +554,11 @@ plan.getAliasToWork().put(alias_id, topOp); } else { // populate local work if needed - mapredLocalWork localPlan = plan.getMapLocalWork(); + MapredLocalWork localPlan = plan.getMapLocalWork(); if (localPlan == null) { - localPlan = new mapredLocalWork( + localPlan = new MapredLocalWork( new LinkedHashMap>(), - new LinkedHashMap()); + new LinkedHashMap()); } assert localPlan.getAliasToWork().get(alias_id) == null; @@ -568,18 +568,18 @@ localPlan.getAliasToFetchWork() .put( alias_id, - new fetchWork(fetchWork.convertPathToStringArray(partDir), + new FetchWork(FetchWork.convertPathToStringArray(partDir), partDesc)); } else { localPlan.getAliasToFetchWork().put(alias_id, - new fetchWork(tblDir.toString(), tblDesc)); + new FetchWork(tblDir.toString(), tblDesc)); } plan.setMapLocalWork(localPlan); } } /** - * set the current task in the mapredWork + * set the current task in the MapredWork * * @param alias * current alias @@ -593,29 +593,29 @@ * table descriptor */ public static void setTaskPlan(String path, String alias, - Operator topOp, mapredWork plan, boolean local, - tableDesc tt_desc) throws SemanticException { + Operator topOp, MapredWork plan, boolean local, + TableDesc tt_desc) throws SemanticException { if (!local) { if (plan.getPathToAliases().get(path) == null) { plan.getPathToAliases().put(path, new ArrayList()); } plan.getPathToAliases().get(path).add(alias); - plan.getPathToPartitionInfo().put(path, new partitionDesc(tt_desc, null)); + plan.getPathToPartitionInfo().put(path, new PartitionDesc(tt_desc, null)); plan.getAliasToWork().put(alias, topOp); } else { // populate local work if needed - mapredLocalWork localPlan = plan.getMapLocalWork(); + MapredLocalWork localPlan = plan.getMapLocalWork(); if (localPlan == null) { - localPlan = new mapredLocalWork( + localPlan = new MapredLocalWork( new LinkedHashMap>(), - new LinkedHashMap()); + new LinkedHashMap()); } assert localPlan.getAliasToWork().get(alias) == null; assert localPlan.getAliasToFetchWork().get(alias) == null; localPlan.getAliasToWork().put(alias, topOp); - localPlan.getAliasToFetchWork().put(alias, new fetchWork(alias, tt_desc)); + localPlan.getAliasToFetchWork().put(alias, new FetchWork(alias, tt_desc)); plan.setMapLocalWork(localPlan); } } @@ -628,7 +628,7 @@ * @param topOp * current top operator in the path */ - public static void setKeyAndValueDesc(mapredWork plan, + public static void setKeyAndValueDesc(MapredWork plan, Operator topOp) { if (topOp == null) { return; @@ -638,7 +638,7 @@ ReduceSinkOperator rs = (ReduceSinkOperator) topOp; plan.setKeyDesc(rs.getConf().getKeySerializeInfo()); int tag = Math.max(0, rs.getConf().getTag()); - List tagToSchema = plan.getTagToValueDesc(); + List tagToSchema = plan.getTagToValueDesc(); while (tag + 1 > tagToSchema.size()) { tagToSchema.add(null); } @@ -659,13 +659,13 @@ * * @return the new plan */ - public static mapredWork getMapRedWork() { - mapredWork work = new mapredWork(); + public static MapredWork getMapRedWork() { + MapredWork work = new MapredWork(); work.setPathToAliases(new LinkedHashMap>()); - work.setPathToPartitionInfo(new LinkedHashMap()); + work.setPathToPartitionInfo(new LinkedHashMap()); work .setAliasToWork(new LinkedHashMap>()); - work.setTagToValueDesc(new ArrayList()); + work.setTagToValueDesc(new ArrayList()); work.setReducer(null); return work; } @@ -720,13 +720,13 @@ String taskTmpDir = baseCtx.getMRTmpFileURI(); Operator parent = op.getParentOperators().get(posn); - tableDesc tt_desc = PlanUtils.getIntermediateFileTableDesc(PlanUtils + TableDesc tt_desc = PlanUtils.getIntermediateFileTableDesc(PlanUtils .getFieldSchemasFromRowSchema(parent.getSchema(), "temporarycol")); // Create a file sink operator for this file name boolean compressIntermediate = parseCtx.getConf().getBoolVar( HiveConf.ConfVars.COMPRESSINTERMEDIATE); - fileSinkDesc desc = new fileSinkDesc(taskTmpDir, tt_desc, + FileSinkDesc desc = new FileSinkDesc(taskTmpDir, tt_desc, compressIntermediate); if (compressIntermediate) { desc.setCompressCodec(parseCtx.getConf().getVar( @@ -753,7 +753,7 @@ // create a dummy tableScan operator on top of op Operator ts_op = putOpInsertMap(OperatorFactory - .get(tableScanDesc.class, parent.getSchema()), null, parseCtx); + .get(TableScanDesc.class, parent.getSchema()), null, parseCtx); childOpList = new ArrayList>(); childOpList.add(op); @@ -765,7 +765,7 @@ mapCurrCtx.put(ts_op, new GenMapRedCtx(childTask, null, null)); String streamDesc = taskTmpDir; - mapredWork cplan = (mapredWork) childTask.getWork(); + MapredWork cplan = (MapredWork) childTask.getWork(); if (setReducer) { Operator reducer = op.getChildOperators().get(0); @@ -829,7 +829,7 @@ Task uTask = null; union.getParentOperators().get(pos); - mapredWork uPlan = null; + MapredWork uPlan = null; // union is encountered for the first time if (uCtxTask == null) { @@ -840,7 +840,7 @@ ctx.setUnionTask(union, uCtxTask); } else { uTask = uCtxTask.getUTask(); - uPlan = (mapredWork) uTask.getWork(); + uPlan = (MapredWork) uTask.getWork(); } // If there is a mapjoin at position 'pos' @@ -851,7 +851,7 @@ uPlan.getPathToAliases().put(taskTmpDir, new ArrayList()); uPlan.getPathToAliases().get(taskTmpDir).add(taskTmpDir); uPlan.getPathToPartitionInfo().put(taskTmpDir, - new partitionDesc(mjCtx.getTTDesc(), null)); + new PartitionDesc(mjCtx.getTTDesc(), null)); uPlan.getAliasToWork().put(taskTmpDir, mjCtx.getRootMapJoinOp()); } Index: ql/src/java/org/apache/hadoop/hive/ql/optimizer/MapJoinFactory.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/optimizer/MapJoinFactory.java (revision 901960) +++ ql/src/java/org/apache/hadoop/hive/ql/optimizer/MapJoinFactory.java (working copy) @@ -43,9 +43,9 @@ import org.apache.hadoop.hive.ql.parse.ParseContext; import org.apache.hadoop.hive.ql.parse.SemanticException; import org.apache.hadoop.hive.ql.plan.PlanUtils; -import org.apache.hadoop.hive.ql.plan.fileSinkDesc; -import org.apache.hadoop.hive.ql.plan.mapredWork; -import org.apache.hadoop.hive.ql.plan.tableDesc; +import org.apache.hadoop.hive.ql.plan.FileSinkDesc; +import org.apache.hadoop.hive.ql.plan.MapredWork; +import org.apache.hadoop.hive.ql.plan.TableDesc; /** * Operator factory for MapJoin processing @@ -83,7 +83,7 @@ GenMapRedCtx mapredCtx = mapCurrCtx.get(mapJoin.getParentOperators().get( pos)); Task currTask = mapredCtx.getCurrTask(); - mapredWork currPlan = (mapredWork) currTask.getWork(); + MapredWork currPlan = (MapredWork) currTask.getWork(); Operator currTopOp = mapredCtx.getCurrTopOp(); String currAliasId = mapredCtx.getCurrAliasId(); Operator reducer = mapJoin; @@ -126,7 +126,7 @@ MapJoinOperator mapJoin = (MapJoinOperator) nd; GenMRProcContext opProcCtx = (GenMRProcContext) procCtx; - mapredWork cplan = GenMapRedUtils.getMapRedWork(); + MapredWork cplan = GenMapRedUtils.getMapRedWork(); ParseContext parseCtx = opProcCtx.getParseCtx(); Task redTask = TaskFactory.get(cplan, parseCtx .getConf()); @@ -212,11 +212,11 @@ ctx.setMapJoinCtx(mapJoin, mjCtx); } - mapredWork mjPlan = GenMapRedUtils.getMapRedWork(); + MapredWork mjPlan = GenMapRedUtils.getMapRedWork(); Task mjTask = TaskFactory.get(mjPlan, parseCtx .getConf()); - tableDesc tt_desc = PlanUtils.getIntermediateFileTableDesc(PlanUtils + TableDesc tt_desc = PlanUtils.getIntermediateFileTableDesc(PlanUtils .getFieldSchemasFromRowSchema(mapJoin.getSchema(), "temporarycol")); // generate the temporary file @@ -232,7 +232,7 @@ // Create a file sink operator for this file name Operator fs_op = OperatorFactory.get( - new fileSinkDesc(taskTmpDir, tt_desc, parseCtx.getConf().getBoolVar( + new FileSinkDesc(taskTmpDir, tt_desc, parseCtx.getConf().getBoolVar( HiveConf.ConfVars.COMPRESSINTERMEDIATE)), mapJoin.getSchema()); assert mapJoin.getChildOperators().size() == 1; @@ -288,7 +288,7 @@ GenMapRedCtx mapredCtx = mapCurrCtx.get(mapJoin.getParentOperators().get( pos)); Task currTask = mapredCtx.getCurrTask(); - mapredWork currPlan = (mapredWork) currTask.getWork(); + MapredWork currPlan = (MapredWork) currTask.getWork(); mapredCtx.getCurrAliasId(); Operator reducer = mapJoin; HashMap, Task> opTaskMap = ctx @@ -348,7 +348,7 @@ GenMapRedCtx mapredCtx = mapCurrCtx.get(mapJoin.getParentOperators().get( pos)); Task currTask = mapredCtx.getCurrTask(); - mapredWork currPlan = (mapredWork) currTask.getWork(); + MapredWork currPlan = (MapredWork) currTask.getWork(); Operator reducer = mapJoin; HashMap, Task> opTaskMap = ctx .getOpTaskMap(); Index: ql/src/java/org/apache/hadoop/hive/ql/optimizer/GroupByOptimizer.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/optimizer/GroupByOptimizer.java (revision 901960) +++ ql/src/java/org/apache/hadoop/hive/ql/optimizer/GroupByOptimizer.java (working copy) @@ -50,13 +50,13 @@ import org.apache.hadoop.hive.ql.parse.ParseContext; import org.apache.hadoop.hive.ql.parse.PrunedPartitionList; import org.apache.hadoop.hive.ql.parse.SemanticException; -import org.apache.hadoop.hive.ql.plan.exprNodeColumnDesc; -import org.apache.hadoop.hive.ql.plan.exprNodeConstantDesc; -import org.apache.hadoop.hive.ql.plan.exprNodeDesc; -import org.apache.hadoop.hive.ql.plan.exprNodeFieldDesc; -import org.apache.hadoop.hive.ql.plan.exprNodeGenericFuncDesc; -import org.apache.hadoop.hive.ql.plan.exprNodeNullDesc; -import org.apache.hadoop.hive.ql.plan.groupByDesc; +import org.apache.hadoop.hive.ql.plan.ExprNodeColumnDesc; +import org.apache.hadoop.hive.ql.plan.ExprNodeConstantDesc; +import org.apache.hadoop.hive.ql.plan.ExprNodeDesc; +import org.apache.hadoop.hive.ql.plan.ExprNodeFieldDesc; +import org.apache.hadoop.hive.ql.plan.ExprNodeGenericFuncDesc; +import org.apache.hadoop.hive.ql.plan.ExprNodeNullDesc; +import org.apache.hadoop.hive.ql.plan.GroupByDesc; import org.apache.hadoop.hive.ql.udf.generic.GenericUDF; /** @@ -130,7 +130,7 @@ throws SemanticException { // if this is not a HASH groupby, return - if (curr.getConf().getMode() != groupByDesc.Mode.HASH) { + if (curr.getConf().getMode() != GroupByDesc.Mode.HASH) { return; } @@ -140,23 +140,23 @@ } boolean bucketGroupBy = true; - groupByDesc desc = curr.getConf(); - List groupByKeys = new LinkedList(); + GroupByDesc desc = curr.getConf(); + List groupByKeys = new LinkedList(); groupByKeys.addAll(desc.getKeys()); // compute groupby columns from groupby keys List groupByCols = new ArrayList(); while (groupByKeys.size() > 0) { - exprNodeDesc node = groupByKeys.remove(0); - if (node instanceof exprNodeColumnDesc) { + ExprNodeDesc node = groupByKeys.remove(0); + if (node instanceof ExprNodeColumnDesc) { groupByCols.addAll(node.getCols()); - } else if ((node instanceof exprNodeConstantDesc) - || (node instanceof exprNodeNullDesc)) { + } else if ((node instanceof ExprNodeConstantDesc) + || (node instanceof ExprNodeNullDesc)) { // nothing - } else if (node instanceof exprNodeFieldDesc) { - groupByKeys.add(0, ((exprNodeFieldDesc) node).getDesc()); + } else if (node instanceof ExprNodeFieldDesc) { + groupByKeys.add(0, ((ExprNodeFieldDesc) node).getDesc()); continue; - } else if (node instanceof exprNodeGenericFuncDesc) { - exprNodeGenericFuncDesc udfNode = ((exprNodeGenericFuncDesc) node); + } else if (node instanceof ExprNodeGenericFuncDesc) { + ExprNodeGenericFuncDesc udfNode = ((ExprNodeGenericFuncDesc) node); GenericUDF udf = udfNode.getGenericUDF(); if (!FunctionRegistry.isDeterministic(udf)) { return; Index: ql/src/java/org/apache/hadoop/hive/ql/optimizer/unionproc/UnionProcessor.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/optimizer/unionproc/UnionProcessor.java (revision 901960) +++ ql/src/java/org/apache/hadoop/hive/ql/optimizer/unionproc/UnionProcessor.java (working copy) @@ -37,7 +37,7 @@ /** * Implementation of the union processor. This can be enhanced later on. * Currently, it does the following: Identify if both the subqueries of UNION - * are map-only. Store that fact in the unionDesc/UnionOperator. If either of + * are map-only. Store that fact in the UnionDesc/UnionOperator. If either of * the sub-query involves a map-reduce job, a FS is introduced on top of the * UNION. This can be later optimized to clone all the operators above the * UNION. Index: ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/GenMRSkewJoinProcessor.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/GenMRSkewJoinProcessor.java (revision 901960) +++ ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/GenMRSkewJoinProcessor.java (working copy) @@ -45,16 +45,16 @@ import org.apache.hadoop.hive.ql.plan.ConditionalResolverSkewJoin; import org.apache.hadoop.hive.ql.plan.ConditionalWork; import org.apache.hadoop.hive.ql.plan.PlanUtils; -import org.apache.hadoop.hive.ql.plan.exprNodeColumnDesc; -import org.apache.hadoop.hive.ql.plan.exprNodeDesc; -import org.apache.hadoop.hive.ql.plan.fetchWork; -import org.apache.hadoop.hive.ql.plan.joinDesc; -import org.apache.hadoop.hive.ql.plan.mapJoinDesc; -import org.apache.hadoop.hive.ql.plan.mapredLocalWork; -import org.apache.hadoop.hive.ql.plan.mapredWork; -import org.apache.hadoop.hive.ql.plan.partitionDesc; -import org.apache.hadoop.hive.ql.plan.tableDesc; -import org.apache.hadoop.hive.ql.plan.tableScanDesc; +import org.apache.hadoop.hive.ql.plan.ExprNodeColumnDesc; +import org.apache.hadoop.hive.ql.plan.ExprNodeDesc; +import org.apache.hadoop.hive.ql.plan.FetchWork; +import org.apache.hadoop.hive.ql.plan.JoinDesc; +import org.apache.hadoop.hive.ql.plan.MapJoinDesc; +import org.apache.hadoop.hive.ql.plan.MapredLocalWork; +import org.apache.hadoop.hive.ql.plan.MapredWork; +import org.apache.hadoop.hive.ql.plan.PartitionDesc; +import org.apache.hadoop.hive.ql.plan.TableDesc; +import org.apache.hadoop.hive.ql.plan.TableScanDesc; import org.apache.hadoop.hive.serde2.typeinfo.TypeInfo; import org.apache.hadoop.hive.serde2.typeinfo.TypeInfoFactory; @@ -109,14 +109,14 @@ String baseTmpDir = parseCtx.getContext().getMRTmpFileURI(); - joinDesc joinDescriptor = joinOp.getConf(); - Map> joinValues = joinDescriptor.getExprs(); + JoinDesc JoinDescriptor = joinOp.getConf(); + Map> joinValues = JoinDescriptor.getExprs(); int numAliases = joinValues.size(); Map bigKeysDirMap = new HashMap(); Map> smallKeysDirMap = new HashMap>(); Map skewJoinJobResultsDir = new HashMap(); - Byte[] tags = joinDescriptor.getTagOrder(); + Byte[] tags = JoinDescriptor.getTagOrder(); for (int i = 0; i < numAliases; i++) { Byte alias = tags[i]; String bigKeysDir = getBigKeysDir(baseTmpDir, alias); @@ -132,29 +132,29 @@ alias)); } - joinDescriptor.setHandleSkewJoin(true); - joinDescriptor.setBigKeysDirMap(bigKeysDirMap); - joinDescriptor.setSmallKeysDirMap(smallKeysDirMap); - joinDescriptor.setSkewKeyDefinition(HiveConf.getIntVar(parseCtx.getConf(), + JoinDescriptor.setHandleSkewJoin(true); + JoinDescriptor.setBigKeysDirMap(bigKeysDirMap); + JoinDescriptor.setSmallKeysDirMap(smallKeysDirMap); + JoinDescriptor.setSkewKeyDefinition(HiveConf.getIntVar(parseCtx.getConf(), HiveConf.ConfVars.HIVESKEWJOINKEY)); Map> bigKeysDirToTaskMap = new HashMap>(); List listWorks = new ArrayList(); List> listTasks = new ArrayList>(); - mapredWork currPlan = (mapredWork) currTask.getWork(); + MapredWork currPlan = (MapredWork) currTask.getWork(); - tableDesc keyTblDesc = (tableDesc) currPlan.getKeyDesc().clone(); + TableDesc keyTblDesc = (TableDesc) currPlan.getKeyDesc().clone(); List joinKeys = Utilities .getColumnNames(keyTblDesc.getProperties()); List joinKeyTypes = Utilities.getColumnTypes(keyTblDesc .getProperties()); - Map tableDescList = new HashMap(); - Map> newJoinValues = new HashMap>(); - Map> newJoinKeys = new HashMap>(); - List newJoinValueTblDesc = new ArrayList();// used for + Map TableDescList = new HashMap(); + Map> newJoinValues = new HashMap>(); + Map> newJoinKeys = new HashMap>(); + List newJoinValueTblDesc = new ArrayList();// used for // create - // mapJoinDesc, + // MapJoinDesc, // should // be in // order @@ -165,19 +165,19 @@ for (int i = 0; i < numAliases; i++) { Byte alias = tags[i]; - List valueCols = joinValues.get(alias); + List valueCols = joinValues.get(alias); String colNames = ""; String colTypes = ""; int columnSize = valueCols.size(); - List newValueExpr = new ArrayList(); - List newKeyExpr = new ArrayList(); + List newValueExpr = new ArrayList(); + List newKeyExpr = new ArrayList(); boolean first = true; for (int k = 0; k < columnSize; k++) { TypeInfo type = valueCols.get(k).getTypeInfo(); String newColName = i + "_VALUE_" + k; // any name, it does not matter. newValueExpr - .add(new exprNodeColumnDesc(type, newColName, "" + i, false)); + .add(new ExprNodeColumnDesc(type, newColName, "" + i, false)); if (!first) { colNames = colNames + ","; colTypes = colTypes + ","; @@ -196,14 +196,14 @@ first = false; colNames = colNames + joinKeys.get(k); colTypes = colTypes + joinKeyTypes.get(k); - newKeyExpr.add(new exprNodeColumnDesc(TypeInfoFactory + newKeyExpr.add(new ExprNodeColumnDesc(TypeInfoFactory .getPrimitiveTypeInfo(joinKeyTypes.get(k)), joinKeys.get(k), "" + i, false)); } newJoinValues.put(alias, newValueExpr); newJoinKeys.put(alias, newKeyExpr); - tableDescList.put(alias, Utilities.getTableDesc(colNames, colTypes)); + TableDescList.put(alias, Utilities.getTableDesc(colNames, colTypes)); // construct value table Desc String valueColNames = ""; @@ -223,13 +223,13 @@ valueColNames, valueColTypes)); } - joinDescriptor.setSkewKeysValuesTables(tableDescList); - joinDescriptor.setKeyTableDesc(keyTblDesc); + JoinDescriptor.setSkewKeysValuesTables(TableDescList); + JoinDescriptor.setKeyTableDesc(keyTblDesc); for (int i = 0; i < numAliases - 1; i++) { Byte src = tags[i]; - mapredWork newPlan = PlanUtils.getMapRedWork(); - mapredWork clonePlan = null; + MapredWork newPlan = PlanUtils.getMapRedWork(); + MapredWork clonePlan = null; try { String xmlPlan = currPlan.toXML(); StringBuffer sb = new StringBuffer(xmlPlan); @@ -243,7 +243,7 @@ Operator[] parentOps = new TableScanOperator[tags.length]; for (int k = 0; k < tags.length; k++) { Operator ts = OperatorFactory.get( - tableScanDesc.class, (RowSchema) null); + TableScanDesc.class, (RowSchema) null); parentOps[k] = ts; } Operator tblScan_op = parentOps[i]; @@ -254,7 +254,7 @@ String bigKeyDirPath = bigKeysDirMap.get(src); newPlan.getPathToAliases().put(bigKeyDirPath, aliases); newPlan.getAliasToWork().put(alias, tblScan_op); - partitionDesc part = new partitionDesc(tableDescList.get(src), null); + PartitionDesc part = new PartitionDesc(TableDescList.get(src), null); newPlan.getPathToPartitionInfo().put(bigKeyDirPath, part); newPlan.getAliasToPartnInfo().put(alias, part); @@ -262,16 +262,16 @@ assert reducer instanceof JoinOperator; JoinOperator cloneJoinOp = (JoinOperator) reducer; - mapJoinDesc mapJoinDescriptor = new mapJoinDesc(newJoinKeys, keyTblDesc, - newJoinValues, newJoinValueTblDesc, joinDescriptor - .getOutputColumnNames(), i, joinDescriptor.getConds()); - mapJoinDescriptor.setNoOuterJoin(joinDescriptor.isNoOuterJoin()); - mapJoinDescriptor.setTagOrder(tags); - mapJoinDescriptor.setHandleSkewJoin(false); + MapJoinDesc MapJoinDescriptor = new MapJoinDesc(newJoinKeys, keyTblDesc, + newJoinValues, newJoinValueTblDesc, JoinDescriptor + .getOutputColumnNames(), i, JoinDescriptor.getConds()); + MapJoinDescriptor.setNoOuterJoin(JoinDescriptor.isNoOuterJoin()); + MapJoinDescriptor.setTagOrder(tags); + MapJoinDescriptor.setHandleSkewJoin(false); - mapredLocalWork localPlan = new mapredLocalWork( + MapredLocalWork localPlan = new MapredLocalWork( new LinkedHashMap>(), - new LinkedHashMap()); + new LinkedHashMap()); Map smallTblDirs = smallKeysDirMap.get(src); for (int j = 0; j < numAliases; j++) { @@ -283,14 +283,14 @@ localPlan.getAliasToWork().put(small_alias.toString(), tblScan_op2); Path tblDir = new Path(smallTblDirs.get(small_alias)); localPlan.getAliasToFetchWork().put(small_alias.toString(), - new fetchWork(tblDir.toString(), tableDescList.get(small_alias))); + new FetchWork(tblDir.toString(), TableDescList.get(small_alias))); } newPlan.setMapLocalWork(localPlan); // construct a map join and set it as the child operator of tblScan_op MapJoinOperator mapJoinOp = (MapJoinOperator) OperatorFactory - .getAndMakeChild(mapJoinDescriptor, (RowSchema) null, parentOps); + .getAndMakeChild(MapJoinDescriptor, (RowSchema) null, parentOps); // change the children of the original join operator to point to the map // join operator List> childOps = cloneJoinOp @@ -377,4 +377,4 @@ + UNDERLINE + srcTblBigTbl + UNDERLINE + srcTblSmallTbl; } -} \ No newline at end of file +} Index: ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/SkewJoinResolver.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/SkewJoinResolver.java (revision 901960) +++ ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/SkewJoinResolver.java (working copy) @@ -37,7 +37,7 @@ import org.apache.hadoop.hive.ql.lib.RuleRegExp; import org.apache.hadoop.hive.ql.parse.ParseContext; import org.apache.hadoop.hive.ql.parse.SemanticException; -import org.apache.hadoop.hive.ql.plan.mapredWork; +import org.apache.hadoop.hive.ql.plan.MapredWork; /** * An implementation of PhysicalPlanResolver. It iterator each task with a rule @@ -73,7 +73,7 @@ Task task = (Task) nd; if (!task.isMapRedTask() || task instanceof ConditionalTask - || ((mapredWork) task.getWork()).getReducer() == null) { + || ((MapredWork) task.getWork()).getReducer() == null) { return null; } @@ -92,7 +92,7 @@ // iterator the reducer operator tree ArrayList topNodes = new ArrayList(); - topNodes.add(((mapredWork) task.getWork()).getReducer()); + topNodes.add(((MapredWork) task.getWork()).getReducer()); ogw.startWalking(topNodes, null); return null; } Index: ql/src/java/org/apache/hadoop/hive/ql/optimizer/ColumnPrunerProcCtx.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/optimizer/ColumnPrunerProcCtx.java (revision 901960) +++ ql/src/java/org/apache/hadoop/hive/ql/optimizer/ColumnPrunerProcCtx.java (working copy) @@ -31,8 +31,8 @@ import org.apache.hadoop.hive.ql.lib.NodeProcessorCtx; import org.apache.hadoop.hive.ql.parse.OpParseContext; import org.apache.hadoop.hive.ql.parse.SemanticException; -import org.apache.hadoop.hive.ql.plan.exprNodeDesc; -import org.apache.hadoop.hive.ql.plan.selectDesc; +import org.apache.hadoop.hive.ql.plan.ExprNodeDesc; +import org.apache.hadoop.hive.ql.plan.SelectDesc; /** * This class implements the processor context for Column Pruner. @@ -111,9 +111,9 @@ */ public List getColsFromSelectExpr(SelectOperator op) { List cols = new ArrayList(); - selectDesc conf = op.getConf(); - ArrayList exprList = conf.getColList(); - for (exprNodeDesc expr : exprList) { + SelectDesc conf = op.getConf(); + ArrayList exprList = conf.getColList(); + for (ExprNodeDesc expr : exprList) { cols = Utilities.mergeUniqElems(cols, expr.getCols()); } return cols; @@ -132,14 +132,14 @@ public List getSelectColsFromChildren(SelectOperator op, List colList) { List cols = new ArrayList(); - selectDesc conf = op.getConf(); + SelectDesc conf = op.getConf(); if (conf.isSelStarNoCompute()) { cols.addAll(colList); return cols; } - ArrayList selectExprs = conf.getColList(); + ArrayList selectExprs = conf.getColList(); // The colList is the output columns used by child operators, they are // different @@ -148,7 +148,7 @@ ArrayList outputColumnNames = conf.getOutputColumnNames(); for (int i = 0; i < outputColumnNames.size(); i++) { if (colList.contains(outputColumnNames.get(i))) { - exprNodeDesc expr = selectExprs.get(i); + ExprNodeDesc expr = selectExprs.get(i); cols = Utilities.mergeUniqElems(cols, expr.getCols()); } } Index: ql/src/java/org/apache/hadoop/hive/ql/optimizer/MapJoinProcessor.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/optimizer/MapJoinProcessor.java (revision 901960) +++ ql/src/java/org/apache/hadoop/hive/ql/optimizer/MapJoinProcessor.java (working copy) @@ -52,13 +52,13 @@ import org.apache.hadoop.hive.ql.parse.RowResolver; import org.apache.hadoop.hive.ql.parse.SemanticException; import org.apache.hadoop.hive.ql.plan.PlanUtils; -import org.apache.hadoop.hive.ql.plan.exprNodeColumnDesc; -import org.apache.hadoop.hive.ql.plan.exprNodeDesc; -import org.apache.hadoop.hive.ql.plan.joinDesc; -import org.apache.hadoop.hive.ql.plan.mapJoinDesc; -import org.apache.hadoop.hive.ql.plan.reduceSinkDesc; -import org.apache.hadoop.hive.ql.plan.selectDesc; -import org.apache.hadoop.hive.ql.plan.tableDesc; +import org.apache.hadoop.hive.ql.plan.ExprNodeColumnDesc; +import org.apache.hadoop.hive.ql.plan.ExprNodeDesc; +import org.apache.hadoop.hive.ql.plan.JoinDesc; +import org.apache.hadoop.hive.ql.plan.MapJoinDesc; +import org.apache.hadoop.hive.ql.plan.ReduceSinkDesc; +import org.apache.hadoop.hive.ql.plan.SelectDesc; +import org.apache.hadoop.hive.ql.plan.TableDesc; /** * Implementation of one of the rule-based map join optimization. User passes @@ -99,17 +99,17 @@ private MapJoinOperator convertMapJoin(ParseContext pctx, JoinOperator op, QBJoinTree joinTree, int mapJoinPos) throws SemanticException { // outer join cannot be performed on a table which is being cached - joinDesc desc = op.getConf(); - org.apache.hadoop.hive.ql.plan.joinCond[] condns = desc.getConds(); - for (org.apache.hadoop.hive.ql.plan.joinCond condn : condns) { - if (condn.getType() == joinDesc.FULL_OUTER_JOIN) { + JoinDesc desc = op.getConf(); + org.apache.hadoop.hive.ql.plan.JoinCondDesc[] condns = desc.getConds(); + for (org.apache.hadoop.hive.ql.plan.JoinCondDesc condn : condns) { + if (condn.getType() == JoinDesc.FULL_OUTER_JOIN) { throw new SemanticException(ErrorMsg.NO_OUTER_MAPJOIN.getMsg()); } - if ((condn.getType() == joinDesc.LEFT_OUTER_JOIN) + if ((condn.getType() == JoinDesc.LEFT_OUTER_JOIN) && (condn.getLeft() != mapJoinPos)) { throw new SemanticException(ErrorMsg.NO_OUTER_MAPJOIN.getMsg()); } - if ((condn.getType() == joinDesc.RIGHT_OUTER_JOIN) + if ((condn.getType() == JoinDesc.RIGHT_OUTER_JOIN) && (condn.getRight() != mapJoinPos)) { throw new SemanticException(ErrorMsg.NO_OUTER_MAPJOIN.getMsg()); } @@ -118,8 +118,8 @@ RowResolver oldOutputRS = pctx.getOpParseCtx().get(op).getRR(); RowResolver outputRS = new RowResolver(); ArrayList outputColumnNames = new ArrayList(); - Map> keyExprMap = new HashMap>(); - Map> valueExprMap = new HashMap>(); + Map> keyExprMap = new HashMap>(); + Map> valueExprMap = new HashMap>(); // Walk over all the sources (which are guaranteed to be reduce sink // operators). @@ -129,7 +129,7 @@ List> parentOps = op.getParentOperators(); List> newParentOps = new ArrayList>(); List> oldReduceSinkParentOps = new ArrayList>(); - Map colExprMap = new HashMap(); + Map colExprMap = new HashMap(); // found a source which is not to be stored in memory if (leftSrc != null) { // assert mapJoinPos == 0; @@ -162,9 +162,9 @@ for (pos = 0; pos < newParentOps.size(); pos++) { ReduceSinkOperator oldPar = (ReduceSinkOperator) oldReduceSinkParentOps .get(pos); - reduceSinkDesc rsconf = oldPar.getConf(); + ReduceSinkDesc rsconf = oldPar.getConf(); Byte tag = (byte) rsconf.getTag(); - List keys = rsconf.getKeyCols(); + List keys = rsconf.getKeyCols(); keyExprMap.put(tag, keys); } @@ -173,7 +173,7 @@ RowResolver inputRS = pGraphContext.getOpParseCtx().get( newParentOps.get(pos)).getRR(); - List values = new ArrayList(); + List values = new ArrayList(); Iterator keysIter = inputRS.getTableNames().iterator(); while (keysIter.hasNext()) { @@ -190,7 +190,7 @@ String outputCol = oldValueInfo.getInternalName(); if (outputRS.get(key, field) == null) { outputColumnNames.add(outputCol); - exprNodeDesc colDesc = new exprNodeColumnDesc(valueInfo.getType(), + ExprNodeDesc colDesc = new ExprNodeColumnDesc(valueInfo.getType(), valueInfo.getInternalName(), valueInfo.getTabAlias(), valueInfo .getIsPartitionCol()); values.add(colDesc); @@ -205,7 +205,7 @@ valueExprMap.put(new Byte((byte) pos), values); } - org.apache.hadoop.hive.ql.plan.joinCond[] joinCondns = op.getConf() + org.apache.hadoop.hive.ql.plan.JoinCondDesc[] joinCondns = op.getConf() .getConds(); Operator[] newPar = new Operator[newParentOps.size()]; @@ -214,32 +214,32 @@ newPar[pos++] = o; } - List keyCols = keyExprMap.get(new Byte((byte) 0)); + List keyCols = keyExprMap.get(new Byte((byte) 0)); StringBuilder keyOrder = new StringBuilder(); for (int i = 0; i < keyCols.size(); i++) { keyOrder.append("+"); } - tableDesc keyTableDesc = PlanUtils.getMapJoinKeyTableDesc(PlanUtils + TableDesc keyTableDesc = PlanUtils.getMapJoinKeyTableDesc(PlanUtils .getFieldSchemasFromColumnList(keyCols, "mapjoinkey")); - List valueTableDescs = new ArrayList(); + List valueTableDescs = new ArrayList(); for (pos = 0; pos < newParentOps.size(); pos++) { - List valueCols = valueExprMap.get(new Byte((byte) pos)); + List valueCols = valueExprMap.get(new Byte((byte) pos)); keyOrder = new StringBuilder(); for (int i = 0; i < valueCols.size(); i++) { keyOrder.append("+"); } - tableDesc valueTableDesc = PlanUtils.getMapJoinValueTableDesc(PlanUtils + TableDesc valueTableDesc = PlanUtils.getMapJoinValueTableDesc(PlanUtils .getFieldSchemasFromColumnList(valueCols, "mapjoinvalue")); valueTableDescs.add(valueTableDesc); } MapJoinOperator mapJoinOp = (MapJoinOperator) putOpInsertMap( - OperatorFactory.getAndMakeChild(new mapJoinDesc(keyExprMap, + OperatorFactory.getAndMakeChild(new MapJoinDesc(keyExprMap, keyTableDesc, valueExprMap, valueTableDescs, outputColumnNames, mapJoinPos, joinCondns), new RowSchema(outputRS.getColumnInfos()), newPar), outputRS); @@ -273,18 +273,18 @@ // mapJoin later on RowResolver inputRR = pctx.getOpParseCtx().get(input).getRR(); - ArrayList exprs = new ArrayList(); + ArrayList exprs = new ArrayList(); ArrayList outputs = new ArrayList(); List outputCols = input.getConf().getOutputColumnNames(); RowResolver outputRS = new RowResolver(); - Map colExprMap = new HashMap(); + Map colExprMap = new HashMap(); for (int i = 0; i < outputCols.size(); i++) { String internalName = outputCols.get(i); String[] nm = inputRR.reverseLookup(internalName); ColumnInfo valueInfo = inputRR.get(nm[0], nm[1]); - exprNodeDesc colDesc = new exprNodeColumnDesc(valueInfo.getType(), + ExprNodeDesc colDesc = new ExprNodeColumnDesc(valueInfo.getType(), valueInfo.getInternalName(), nm[0], valueInfo.getIsPartitionCol()); exprs.add(colDesc); outputs.add(internalName); @@ -293,7 +293,7 @@ colExprMap.put(internalName, colDesc); } - selectDesc select = new selectDesc(exprs, outputs, false); + SelectDesc select = new SelectDesc(exprs, outputs, false); SelectOperator sel = (SelectOperator) putOpInsertMap( OperatorFactory.getAndMakeChild(select, new RowSchema(inputRR Index: ql/src/java/org/apache/hadoop/hive/ql/ppd/ExprWalkerInfo.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/ppd/ExprWalkerInfo.java (revision 901960) +++ ql/src/java/org/apache/hadoop/hive/ql/ppd/ExprWalkerInfo.java (working copy) @@ -30,7 +30,7 @@ import org.apache.hadoop.hive.ql.lib.Node; import org.apache.hadoop.hive.ql.lib.NodeProcessorCtx; import org.apache.hadoop.hive.ql.parse.RowResolver; -import org.apache.hadoop.hive.ql.plan.exprNodeDesc; +import org.apache.hadoop.hive.ql.plan.ExprNodeDesc; /** * Context for Expression Walker for determining predicate pushdown candidates @@ -48,12 +48,12 @@ /** alias that this expression refers to */ public String alias = null; /** new expr for this expression. */ - public exprNodeDesc convertedExpr = null; + public ExprNodeDesc convertedExpr = null; public ExprInfo() { } - public ExprInfo(boolean isCandidate, String alias, exprNodeDesc replacedNode) { + public ExprInfo(boolean isCandidate, String alias, ExprNodeDesc replacedNode) { this.isCandidate = isCandidate; this.alias = alias; convertedExpr = replacedNode; @@ -70,18 +70,18 @@ * and the information for each node is the value which is used while walking * the tree by its parent */ - private final Map> pushdownPreds; + private final Map> pushdownPreds; /** * Values the expression sub-trees (predicates) that can be pushed down for * root expression tree. Since there can be more than one alias in an * expression tree, this is a map from the alias to predicates. */ - private final Map exprInfoMap; + private final Map exprInfoMap; private boolean isDeterministic = true; public ExprWalkerInfo() { - pushdownPreds = new HashMap>(); - exprInfoMap = new HashMap(); + pushdownPreds = new HashMap>(); + exprInfoMap = new HashMap(); } public ExprWalkerInfo(Operator op, @@ -89,8 +89,8 @@ this.op = op; this.toRR = toRR; - pushdownPreds = new HashMap>(); - exprInfoMap = new HashMap(); + pushdownPreds = new HashMap>(); + exprInfoMap = new HashMap(); } /** @@ -111,7 +111,7 @@ * @return converted expression for give node. If there is none then returns * null. */ - public exprNodeDesc getConvertedNode(Node nd) { + public ExprNodeDesc getConvertedNode(Node nd) { ExprInfo ei = exprInfoMap.get(nd); if (ei == null) { return null; @@ -127,7 +127,7 @@ * @param newNode * new node */ - public void addConvertedNode(exprNodeDesc oldNode, exprNodeDesc newNode) { + public void addConvertedNode(ExprNodeDesc oldNode, ExprNodeDesc newNode) { ExprInfo ei = exprInfoMap.get(oldNode); if (ei == null) { ei = new ExprInfo(); @@ -143,7 +143,7 @@ * @param expr * @return true or false */ - public boolean isCandidate(exprNodeDesc expr) { + public boolean isCandidate(ExprNodeDesc expr) { ExprInfo ei = exprInfoMap.get(expr); if (ei == null) { return false; @@ -158,7 +158,7 @@ * @param b * can */ - public void setIsCandidate(exprNodeDesc expr, boolean b) { + public void setIsCandidate(ExprNodeDesc expr, boolean b) { ExprInfo ei = exprInfoMap.get(expr); if (ei == null) { ei = new ExprInfo(); @@ -173,7 +173,7 @@ * @param expr * @return The alias of the expression */ - public String getAlias(exprNodeDesc expr) { + public String getAlias(ExprNodeDesc expr) { ExprInfo ei = exprInfoMap.get(expr); if (ei == null) { return null; @@ -187,7 +187,7 @@ * @param expr * @param alias */ - public void addAlias(exprNodeDesc expr, String alias) { + public void addAlias(ExprNodeDesc expr, String alias) { if (alias == null) { return; } @@ -205,10 +205,10 @@ * * @param expr */ - public void addFinalCandidate(exprNodeDesc expr) { + public void addFinalCandidate(ExprNodeDesc expr) { String alias = getAlias(expr); if (pushdownPreds.get(alias) == null) { - pushdownPreds.put(alias, new ArrayList()); + pushdownPreds.put(alias, new ArrayList()); } pushdownPreds.get(alias).add(expr.clone()); } @@ -220,7 +220,7 @@ * * @return the map of alias to a list of pushdown predicates */ - public Map> getFinalCandidates() { + public Map> getFinalCandidates() { return pushdownPreds; } @@ -234,9 +234,9 @@ if (ewi == null) { return; } - for (Entry> e : ewi.getFinalCandidates() + for (Entry> e : ewi.getFinalCandidates() .entrySet()) { - List predList = pushdownPreds.get(e.getKey()); + List predList = pushdownPreds.get(e.getKey()); if (predList != null) { predList.addAll(e.getValue()); } else { Index: ql/src/java/org/apache/hadoop/hive/ql/ppd/ExprWalkerProcFactory.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/ppd/ExprWalkerProcFactory.java (revision 901960) +++ ql/src/java/org/apache/hadoop/hive/ql/ppd/ExprWalkerProcFactory.java (working copy) @@ -37,10 +37,10 @@ import org.apache.hadoop.hive.ql.lib.RuleRegExp; import org.apache.hadoop.hive.ql.parse.RowResolver; import org.apache.hadoop.hive.ql.parse.SemanticException; -import org.apache.hadoop.hive.ql.plan.exprNodeColumnDesc; -import org.apache.hadoop.hive.ql.plan.exprNodeDesc; -import org.apache.hadoop.hive.ql.plan.exprNodeFieldDesc; -import org.apache.hadoop.hive.ql.plan.exprNodeGenericFuncDesc; +import org.apache.hadoop.hive.ql.plan.ExprNodeColumnDesc; +import org.apache.hadoop.hive.ql.plan.ExprNodeDesc; +import org.apache.hadoop.hive.ql.plan.ExprNodeFieldDesc; +import org.apache.hadoop.hive.ql.plan.ExprNodeGenericFuncDesc; /** * Expression factory for predicate pushdown processing. Each processor @@ -58,7 +58,7 @@ public Object process(Node nd, Stack stack, NodeProcessorCtx procCtx, Object... nodeOutputs) throws SemanticException { ExprWalkerInfo ctx = (ExprWalkerInfo) procCtx; - exprNodeColumnDesc colref = (exprNodeColumnDesc) nd; + ExprNodeColumnDesc colref = (ExprNodeColumnDesc) nd; RowResolver toRR = ctx.getToRR(); Operator op = ctx.getOp(); String[] colAlias = toRR.reverseLookup(colref.getColumn()); @@ -66,7 +66,7 @@ if (op.getColumnExprMap() != null) { // replace the output expression with the input expression so that // parent op can understand this expression - exprNodeDesc exp = op.getColumnExprMap().get(colref.getColumn()); + ExprNodeDesc exp = op.getColumnExprMap().get(colref.getColumn()); if (exp == null) { // means that expression can't be pushed either because it is value in // group by @@ -95,12 +95,12 @@ Object... nodeOutputs) throws SemanticException { ExprWalkerInfo ctx = (ExprWalkerInfo) procCtx; String alias = null; - exprNodeFieldDesc expr = (exprNodeFieldDesc) nd; + ExprNodeFieldDesc expr = (ExprNodeFieldDesc) nd; boolean isCandidate = true; assert (nd.getChildren().size() == 1); - exprNodeDesc ch = (exprNodeDesc) nd.getChildren().get(0); - exprNodeDesc newCh = ctx.getConvertedNode(ch); + ExprNodeDesc ch = (ExprNodeDesc) nd.getChildren().get(0); + ExprNodeDesc newCh = ctx.getConvertedNode(ch); if (newCh != null) { expr.setDesc(newCh); ch = newCh; @@ -138,7 +138,7 @@ Object... nodeOutputs) throws SemanticException { ExprWalkerInfo ctx = (ExprWalkerInfo) procCtx; String alias = null; - exprNodeGenericFuncDesc expr = (exprNodeGenericFuncDesc) nd; + ExprNodeGenericFuncDesc expr = (ExprNodeGenericFuncDesc) nd; if (!FunctionRegistry.isDeterministic(expr.getGenericUDF())) { // this GenericUDF can't be pushed down @@ -149,8 +149,8 @@ boolean isCandidate = true; for (int i = 0; i < nd.getChildren().size(); i++) { - exprNodeDesc ch = (exprNodeDesc) nd.getChildren().get(i); - exprNodeDesc newCh = ctx.getConvertedNode(ch); + ExprNodeDesc ch = (ExprNodeDesc) nd.getChildren().get(i); + ExprNodeDesc newCh = ctx.getConvertedNode(ch); if (newCh != null) { expr.getChildExprs().set(i, newCh); ch = newCh; @@ -189,7 +189,7 @@ public Object process(Node nd, Stack stack, NodeProcessorCtx procCtx, Object... nodeOutputs) throws SemanticException { ExprWalkerInfo ctx = (ExprWalkerInfo) procCtx; - ctx.setIsCandidate((exprNodeDesc) nd, true); + ctx.setIsCandidate((ExprNodeDesc) nd, true); return true; } } @@ -211,9 +211,9 @@ } public static ExprWalkerInfo extractPushdownPreds(OpWalkerInfo opContext, - Operator op, exprNodeDesc pred) + Operator op, ExprNodeDesc pred) throws SemanticException { - List preds = new ArrayList(); + List preds = new ArrayList(); preds.add(pred); return extractPushdownPreds(opContext, op, preds); } @@ -230,7 +230,7 @@ * @throws SemanticException */ public static ExprWalkerInfo extractPushdownPreds(OpWalkerInfo opContext, - Operator op, List preds) + Operator op, List preds) throws SemanticException { // Create the walker, the rules dispatcher and the context. ExprWalkerInfo exprContext = new ExprWalkerInfo(op, opContext @@ -241,12 +241,12 @@ // generates the plan from the operator tree Map exprRules = new LinkedHashMap(); exprRules.put( - new RuleRegExp("R1", exprNodeColumnDesc.class.getName() + "%"), + new RuleRegExp("R1", ExprNodeColumnDesc.class.getName() + "%"), getColumnProcessor()); exprRules.put( - new RuleRegExp("R2", exprNodeFieldDesc.class.getName() + "%"), + new RuleRegExp("R2", ExprNodeFieldDesc.class.getName() + "%"), getFieldProcessor()); - exprRules.put(new RuleRegExp("R3", exprNodeGenericFuncDesc.class.getName() + exprRules.put(new RuleRegExp("R3", ExprNodeGenericFuncDesc.class.getName() + "%"), getGenericFuncProcessor()); // The dispatcher fires the processor corresponding to the closest matching @@ -256,8 +256,8 @@ GraphWalker egw = new DefaultGraphWalker(disp); List startNodes = new ArrayList(); - List clonedPreds = new ArrayList(); - for (exprNodeDesc node : preds) { + List clonedPreds = new ArrayList(); + for (ExprNodeDesc node : preds) { clonedPreds.add(node.clone()); } startNodes.addAll(clonedPreds); @@ -265,7 +265,7 @@ egw.startWalking(startNodes, null); // check the root expression for final candidates - for (exprNodeDesc pred : clonedPreds) { + for (ExprNodeDesc pred : clonedPreds) { extractFinalCandidates(pred, exprContext); } return exprContext; @@ -275,7 +275,7 @@ * Walks through the top AND nodes and determine which of them are final * candidates */ - private static void extractFinalCandidates(exprNodeDesc expr, + private static void extractFinalCandidates(ExprNodeDesc expr, ExprWalkerInfo ctx) { if (ctx.isCandidate(expr)) { ctx.addFinalCandidate(expr); @@ -286,7 +286,7 @@ // If the operator is AND, we need to determine if any of the children are // final candidates. for (Node ch : expr.getChildren()) { - extractFinalCandidates((exprNodeDesc) ch, ctx); + extractFinalCandidates((ExprNodeDesc) ch, ctx); } } Index: ql/src/java/org/apache/hadoop/hive/ql/ppd/OpProcFactory.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/ppd/OpProcFactory.java (revision 901960) +++ ql/src/java/org/apache/hadoop/hive/ql/ppd/OpProcFactory.java (working copy) @@ -42,11 +42,11 @@ import org.apache.hadoop.hive.ql.parse.OpParseContext; import org.apache.hadoop.hive.ql.parse.RowResolver; import org.apache.hadoop.hive.ql.parse.SemanticException; -import org.apache.hadoop.hive.ql.plan.exprNodeDesc; -import org.apache.hadoop.hive.ql.plan.exprNodeGenericFuncDesc; -import org.apache.hadoop.hive.ql.plan.filterDesc; -import org.apache.hadoop.hive.ql.plan.joinCond; -import org.apache.hadoop.hive.ql.plan.joinDesc; +import org.apache.hadoop.hive.ql.plan.ExprNodeDesc; +import org.apache.hadoop.hive.ql.plan.ExprNodeGenericFuncDesc; +import org.apache.hadoop.hive.ql.plan.FilterDesc; +import org.apache.hadoop.hive.ql.plan.JoinCondDesc; +import org.apache.hadoop.hive.ql.plan.JoinDesc; import org.apache.hadoop.hive.serde2.typeinfo.TypeInfoFactory; /** @@ -114,7 +114,7 @@ + ((Operator) nd).getIdentifier() + ")"); OpWalkerInfo owi = (OpWalkerInfo) procCtx; Operator op = (Operator) nd; - exprNodeDesc predicate = (((FilterOperator) nd).getConf()).getPredicate(); + ExprNodeDesc predicate = (((FilterOperator) nd).getConf()).getPredicate(); // get pushdown predicates for this operator's predicate ExprWalkerInfo ewi = ExprWalkerProcFactory.extractPushdownPreds(owi, op, predicate); @@ -181,17 +181,17 @@ int loj = Integer.MAX_VALUE; int roj = -1; boolean oj = false; - joinCond[] conds = op.getConf().getConds(); + JoinCondDesc[] conds = op.getConf().getConds(); Map> posToAliasMap = op.getPosToAliasMap(); - for (joinCond jc : conds) { - if (jc.getType() == joinDesc.FULL_OUTER_JOIN) { + for (JoinCondDesc jc : conds) { + if (jc.getType() == JoinDesc.FULL_OUTER_JOIN) { oj = true; break; - } else if (jc.getType() == joinDesc.LEFT_OUTER_JOIN) { + } else if (jc.getType() == JoinDesc.LEFT_OUTER_JOIN) { if (jc.getLeft() < loj) { loj = jc.getLeft(); } - } else if (jc.getType() == joinDesc.RIGHT_OUTER_JOIN) { + } else if (jc.getType() == JoinDesc.RIGHT_OUTER_JOIN) { if (jc.getRight() > roj) { roj = jc.getRight(); } @@ -264,11 +264,11 @@ * @param ewi */ protected void logExpr(Node nd, ExprWalkerInfo ewi) { - for (Entry> e : ewi.getFinalCandidates() + for (Entry> e : ewi.getFinalCandidates() .entrySet()) { LOG.info("Pushdown Predicates of " + nd.getName() + " For Alias : " + e.getKey()); - for (exprNodeDesc n : e.getValue()) { + for (ExprNodeDesc n : e.getValue()) { LOG.info("\t" + n.getExprString()); } } @@ -308,7 +308,7 @@ if (ewi == null) { ewi = new ExprWalkerInfo(); } - for (Entry> e : childPreds + for (Entry> e : childPreds .getFinalCandidates().entrySet()) { if (ignoreAliases || aliases == null || aliases.contains(e.getKey()) || e.getKey() == null) { @@ -334,9 +334,9 @@ RowResolver inputRR = owi.getRowResolver(op); // combine all predicates into a single expression - List preds = null; - exprNodeDesc condn = null; - Iterator> iterator = pushDownPreds.getFinalCandidates() + List preds = null; + ExprNodeDesc condn = null; + Iterator> iterator = pushDownPreds.getFinalCandidates() .values().iterator(); while (iterator.hasNext()) { preds = iterator.next(); @@ -347,10 +347,10 @@ } for (; i < preds.size(); i++) { - List children = new ArrayList(2); + List children = new ArrayList(2); children.add(condn); children.add(preds.get(i)); - condn = new exprNodeGenericFuncDesc(TypeInfoFactory.booleanTypeInfo, + condn = new ExprNodeGenericFuncDesc(TypeInfoFactory.booleanTypeInfo, FunctionRegistry.getGenericUDFForAnd(), children); } } @@ -363,8 +363,8 @@ List> originalChilren = op .getChildOperators(); op.setChildOperators(null); - Operator output = OperatorFactory.getAndMakeChild( - new filterDesc(condn, false), new RowSchema(inputRR.getColumnInfos()), + Operator output = OperatorFactory.getAndMakeChild( + new FilterDesc(condn, false), new RowSchema(inputRR.getColumnInfos()), op); output.setChildOperators(originalChilren); for (Operator ch : originalChilren) { Index: ql/src/java/org/apache/hadoop/hive/ql/exec/ExprNodeNullEvaluator.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/exec/ExprNodeNullEvaluator.java (revision 901960) +++ ql/src/java/org/apache/hadoop/hive/ql/exec/ExprNodeNullEvaluator.java (working copy) @@ -19,7 +19,7 @@ package org.apache.hadoop.hive.ql.exec; import org.apache.hadoop.hive.ql.metadata.HiveException; -import org.apache.hadoop.hive.ql.plan.exprNodeNullDesc; +import org.apache.hadoop.hive.ql.plan.ExprNodeNullDesc; import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspector; import org.apache.hadoop.hive.serde2.objectinspector.primitive.PrimitiveObjectInspectorFactory; @@ -27,9 +27,9 @@ // change the void to the first matching argument public class ExprNodeNullEvaluator extends ExprNodeEvaluator { - protected exprNodeNullDesc expr; + protected ExprNodeNullDesc expr; - public ExprNodeNullEvaluator(exprNodeNullDesc expr) { + public ExprNodeNullEvaluator(ExprNodeNullDesc expr) { this.expr = expr; } Index: ql/src/java/org/apache/hadoop/hive/ql/exec/FunctionTask.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/exec/FunctionTask.java (revision 901960) +++ ql/src/java/org/apache/hadoop/hive/ql/exec/FunctionTask.java (working copy) @@ -26,8 +26,8 @@ import org.apache.hadoop.hive.ql.QueryPlan; import org.apache.hadoop.hive.ql.metadata.HiveException; import org.apache.hadoop.hive.ql.plan.FunctionWork; -import org.apache.hadoop.hive.ql.plan.createFunctionDesc; -import org.apache.hadoop.hive.ql.plan.dropFunctionDesc; +import org.apache.hadoop.hive.ql.plan.CreateFunctionDesc; +import org.apache.hadoop.hive.ql.plan.DropFunctionDesc; import org.apache.hadoop.hive.ql.plan.api.StageType; import org.apache.hadoop.hive.ql.udf.generic.GenericUDAFResolver; import org.apache.hadoop.hive.ql.udf.generic.GenericUDF; @@ -53,39 +53,39 @@ @Override public int execute() { - createFunctionDesc createFunctionDesc = work.getCreateFunctionDesc(); - if (createFunctionDesc != null) { - return createFunction(createFunctionDesc); + CreateFunctionDesc CreateFunctionDesc = work.getCreateFunctionDesc(); + if (CreateFunctionDesc != null) { + return createFunction(CreateFunctionDesc); } - dropFunctionDesc dropFunctionDesc = work.getDropFunctionDesc(); - if (dropFunctionDesc != null) { - return dropFunction(dropFunctionDesc); + DropFunctionDesc DropFunctionDesc = work.getDropFunctionDesc(); + if (DropFunctionDesc != null) { + return dropFunction(DropFunctionDesc); } return 0; } - private int createFunction(createFunctionDesc createFunctionDesc) { + private int createFunction(CreateFunctionDesc CreateFunctionDesc) { try { - Class udfClass = getUdfClass(createFunctionDesc); + Class udfClass = getUdfClass(CreateFunctionDesc); if (UDF.class.isAssignableFrom(udfClass)) { - FunctionRegistry.registerTemporaryUDF(createFunctionDesc + FunctionRegistry.registerTemporaryUDF(CreateFunctionDesc .getFunctionName(), (Class) udfClass, false); return 0; } else if (GenericUDF.class.isAssignableFrom(udfClass)) { - FunctionRegistry.registerTemporaryGenericUDF(createFunctionDesc + FunctionRegistry.registerTemporaryGenericUDF(CreateFunctionDesc .getFunctionName(), (Class) udfClass); return 0; } else if (GenericUDTF.class.isAssignableFrom(udfClass)) { - FunctionRegistry.registerTemporaryGenericUDTF(createFunctionDesc + FunctionRegistry.registerTemporaryGenericUDTF(CreateFunctionDesc .getFunctionName(), (Class) udfClass); return 0; } else if (UDAF.class.isAssignableFrom(udfClass)) { - FunctionRegistry.registerTemporaryUDAF(createFunctionDesc + FunctionRegistry.registerTemporaryUDAF(CreateFunctionDesc .getFunctionName(), (Class) udfClass); return 0; } else if (GenericUDAFResolver.class.isAssignableFrom(udfClass)) { - FunctionRegistry.registerTemporaryGenericUDAF(createFunctionDesc + FunctionRegistry.registerTemporaryGenericUDAF(CreateFunctionDesc .getFunctionName(), (GenericUDAFResolver) ReflectionUtils .newInstance(udfClass, null)); return 0; @@ -98,9 +98,9 @@ } } - private int dropFunction(dropFunctionDesc dropFunctionDesc) { + private int dropFunction(DropFunctionDesc DropFunctionDesc) { try { - FunctionRegistry.unregisterTemporaryUDF(dropFunctionDesc + FunctionRegistry.unregisterTemporaryUDF(DropFunctionDesc .getFunctionName()); return 0; } catch (HiveException e) { @@ -110,7 +110,7 @@ } @SuppressWarnings("unchecked") - private Class getUdfClass(createFunctionDesc desc) + private Class getUdfClass(CreateFunctionDesc desc) throws ClassNotFoundException { return Class.forName(desc.getClassName(), true, JavaUtils.getClassLoader()); } Index: ql/src/java/org/apache/hadoop/hive/ql/exec/JoinOperator.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/exec/JoinOperator.java (revision 901960) +++ ql/src/java/org/apache/hadoop/hive/ql/exec/JoinOperator.java (working copy) @@ -28,7 +28,7 @@ import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hive.ql.metadata.HiveException; -import org.apache.hadoop.hive.ql.plan.joinDesc; +import org.apache.hadoop.hive.ql.plan.JoinDesc; import org.apache.hadoop.hive.ql.plan.api.OperatorType; import org.apache.hadoop.hive.serde2.objectinspector.StructField; import org.apache.hadoop.hive.serde2.objectinspector.StructObjectInspector; @@ -36,7 +36,7 @@ /** * Join operator implementation. */ -public class JoinOperator extends CommonJoinOperator implements +public class JoinOperator extends CommonJoinOperator implements Serializable { private static final long serialVersionUID = 1L; Index: ql/src/java/org/apache/hadoop/hive/ql/exec/FetchTask.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/exec/FetchTask.java (revision 901960) +++ ql/src/java/org/apache/hadoop/hive/ql/exec/FetchTask.java (working copy) @@ -26,8 +26,8 @@ import org.apache.hadoop.hive.conf.HiveConf; import org.apache.hadoop.hive.ql.DriverContext; import org.apache.hadoop.hive.ql.QueryPlan; -import org.apache.hadoop.hive.ql.plan.fetchWork; -import org.apache.hadoop.hive.ql.plan.tableDesc; +import org.apache.hadoop.hive.ql.plan.FetchWork; +import org.apache.hadoop.hive.ql.plan.TableDesc; import org.apache.hadoop.hive.ql.plan.api.StageType; import org.apache.hadoop.hive.serde.Constants; import org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe; @@ -39,7 +39,7 @@ /** * FetchTask implementation **/ -public class FetchTask extends Task implements Serializable { +public class FetchTask extends Task implements Serializable { private static final long serialVersionUID = 1L; private int maxRows = 100; @@ -83,9 +83,9 @@ } /** - * Return the tableDesc of the fetchWork + * Return the TableDesc of the FetchWork */ - public tableDesc getTblDesc() { + public TableDesc getTblDesc() { return work.getTblDesc(); } Index: ql/src/java/org/apache/hadoop/hive/ql/exec/ExprNodeConstantEvaluator.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/exec/ExprNodeConstantEvaluator.java (revision 901960) +++ ql/src/java/org/apache/hadoop/hive/ql/exec/ExprNodeConstantEvaluator.java (working copy) @@ -19,7 +19,7 @@ package org.apache.hadoop.hive.ql.exec; import org.apache.hadoop.hive.ql.metadata.HiveException; -import org.apache.hadoop.hive.ql.plan.exprNodeConstantDesc; +import org.apache.hadoop.hive.ql.plan.ExprNodeConstantDesc; import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspector; import org.apache.hadoop.hive.serde2.objectinspector.PrimitiveObjectInspector.PrimitiveCategory; import org.apache.hadoop.hive.serde2.objectinspector.primitive.PrimitiveObjectInspectorFactory; @@ -27,11 +27,11 @@ public class ExprNodeConstantEvaluator extends ExprNodeEvaluator { - protected exprNodeConstantDesc expr; + protected ExprNodeConstantDesc expr; transient ObjectInspector writableObjectInspector; transient Object writableValue; - public ExprNodeConstantEvaluator(exprNodeConstantDesc expr) { + public ExprNodeConstantEvaluator(ExprNodeConstantDesc expr) { this.expr = expr; PrimitiveCategory pc = ((PrimitiveTypeInfo) expr.getTypeInfo()) .getPrimitiveCategory(); Index: ql/src/java/org/apache/hadoop/hive/ql/exec/Description.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/exec/Description.java (revision 0) +++ ql/src/java/org/apache/hadoop/hive/ql/exec/Description.java (revision 0) @@ -0,0 +1,31 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hive.ql.exec; + +import java.lang.annotation.Retention; +import java.lang.annotation.RetentionPolicy; + +@Retention(RetentionPolicy.RUNTIME) +public @interface Description { + String value() default "_FUNC_ is undocumented"; + + String extended() default ""; + + String name() default ""; +} Index: ql/src/java/org/apache/hadoop/hive/ql/exec/UDAFEvaluatorResolver.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/exec/UDAFEvaluatorResolver.java (revision 901960) +++ ql/src/java/org/apache/hadoop/hive/ql/exec/UDAFEvaluatorResolver.java (working copy) @@ -43,4 +43,4 @@ Class getEvaluatorClass(List argClasses) throws AmbiguousMethodException; -} \ No newline at end of file +} Index: ql/src/java/org/apache/hadoop/hive/ql/exec/ExprNodeColumnEvaluator.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/exec/ExprNodeColumnEvaluator.java (revision 901960) +++ ql/src/java/org/apache/hadoop/hive/ql/exec/ExprNodeColumnEvaluator.java (working copy) @@ -19,7 +19,7 @@ package org.apache.hadoop.hive.ql.exec; import org.apache.hadoop.hive.ql.metadata.HiveException; -import org.apache.hadoop.hive.ql.plan.exprNodeColumnDesc; +import org.apache.hadoop.hive.ql.plan.ExprNodeColumnDesc; import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspector; import org.apache.hadoop.hive.serde2.objectinspector.StructField; import org.apache.hadoop.hive.serde2.objectinspector.StructObjectInspector; @@ -29,12 +29,12 @@ */ public class ExprNodeColumnEvaluator extends ExprNodeEvaluator { - protected exprNodeColumnDesc expr; + protected ExprNodeColumnDesc expr; transient StructObjectInspector[] inspectors; transient StructField[] fields; - public ExprNodeColumnEvaluator(exprNodeColumnDesc expr) { + public ExprNodeColumnEvaluator(ExprNodeColumnDesc expr) { this.expr = expr; } Index: ql/src/java/org/apache/hadoop/hive/ql/exec/SkewJoinHandler.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/exec/SkewJoinHandler.java (revision 901960) +++ ql/src/java/org/apache/hadoop/hive/ql/exec/SkewJoinHandler.java (working copy) @@ -34,8 +34,8 @@ import org.apache.hadoop.fs.Path; import org.apache.hadoop.hive.ql.exec.persistence.RowContainer; import org.apache.hadoop.hive.ql.metadata.HiveException; -import org.apache.hadoop.hive.ql.plan.joinDesc; -import org.apache.hadoop.hive.ql.plan.tableDesc; +import org.apache.hadoop.hive.ql.plan.JoinDesc; +import org.apache.hadoop.hive.ql.plan.TableDesc; import org.apache.hadoop.hive.serde2.SerDe; import org.apache.hadoop.hive.serde2.SerDeException; import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspector; @@ -82,7 +82,7 @@ private int skewKeyDefinition = -1; private Map skewKeysTableObjectInspector = null; private Map tblSerializers = null; - private Map tblDesc = null; + private Map tblDesc = null; private Map bigKeysExistingMap = null; @@ -92,7 +92,7 @@ private final CommonJoinOperator joinOp; private final int numAliases; - private final joinDesc conf; + private final JoinDesc conf; public SkewJoinHandler(CommonJoinOperator joinOp) { this.joinOp = joinOp; @@ -102,7 +102,7 @@ public void initiliaze(Configuration hconf) { this.hconf = hconf; - joinDesc desc = joinOp.getConf(); + JoinDesc desc = joinOp.getConf(); skewKeyDefinition = desc.getSkewKeyDefinition(); skewKeysTableObjectInspector = new HashMap( numAliases); @@ -123,7 +123,7 @@ for (int k = 0; k < keyFieldSize; k++) { skewTableKeyInspectors.add(keyFields.get(k).getFieldObjectInspector()); } - tableDesc joinKeyDesc = desc.getKeyTableDesc(); + TableDesc joinKeyDesc = desc.getKeyTableDesc(); List keyColNames = Utilities.getColumnNames(joinKeyDesc .getProperties()); StructObjectInspector structTblKeyInpector = ObjectInspectorFactory @@ -140,7 +140,7 @@ break; } - tableDesc valTblDesc = joinOp.getSpillTableDesc(alias); + TableDesc valTblDesc = joinOp.getSpillTableDesc(alias); List valColNames = new ArrayList(); if (valTblDesc != null) { valColNames = Utilities.getColumnNames(valTblDesc.getProperties()); @@ -156,7 +156,7 @@ skewKeysTableObjectInspector.put((byte) i, structTblInpector); } - // reset rowcontainer's serde, objectinspector, and tableDesc. + // reset rowcontainer's serde, objectinspector, and TableDesc. for (int i = 0; i < numAliases; i++) { Byte alias = conf.getTagOrder()[i]; RowContainer> rc = joinOp.storage.get(Byte Index: ql/src/java/org/apache/hadoop/hive/ql/exec/MapOperator.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/exec/MapOperator.java (revision 901960) +++ ql/src/java/org/apache/hadoop/hive/ql/exec/MapOperator.java (working copy) @@ -32,8 +32,8 @@ import org.apache.hadoop.fs.Path; import org.apache.hadoop.hive.conf.HiveConf; import org.apache.hadoop.hive.ql.metadata.HiveException; -import org.apache.hadoop.hive.ql.plan.mapredWork; -import org.apache.hadoop.hive.ql.plan.partitionDesc; +import org.apache.hadoop.hive.ql.plan.MapredWork; +import org.apache.hadoop.hive.ql.plan.PartitionDesc; import org.apache.hadoop.hive.serde2.Deserializer; import org.apache.hadoop.hive.serde2.SerDeException; import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspector; @@ -49,7 +49,7 @@ * different from regular operators in that it starts off by processing a * Writable data structure from a Table (instead of a Hive Object). **/ -public class MapOperator extends Operator implements Serializable { +public class MapOperator extends Operator implements Serializable { private static final long serialVersionUID = 1L; @@ -168,18 +168,18 @@ * @param mrwork * @throws HiveException */ - public void initializeAsRoot(Configuration hconf, mapredWork mrwork) + public void initializeAsRoot(Configuration hconf, MapredWork mrwork) throws HiveException { setConf(mrwork); setChildren(hconf); initialize(hconf, null); } - private static MapOpCtx initObjectInspector(mapredWork conf, + private static MapOpCtx initObjectInspector(MapredWork conf, Configuration hconf, String onefile) throws HiveException, ClassNotFoundException, InstantiationException, IllegalAccessException, SerDeException { - partitionDesc td = conf.getPathToPartitionInfo().get(onefile); + PartitionDesc td = conf.getPathToPartitionInfo().get(onefile); LinkedHashMap partSpec = td.getPartSpec(); Properties tblProps = td.getProperties(); Index: ql/src/java/org/apache/hadoop/hive/ql/exec/ExprNodeEvaluatorFactory.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/exec/ExprNodeEvaluatorFactory.java (revision 901960) +++ ql/src/java/org/apache/hadoop/hive/ql/exec/ExprNodeEvaluatorFactory.java (working copy) @@ -18,41 +18,41 @@ package org.apache.hadoop.hive.ql.exec; -import org.apache.hadoop.hive.ql.plan.exprNodeColumnDesc; -import org.apache.hadoop.hive.ql.plan.exprNodeConstantDesc; -import org.apache.hadoop.hive.ql.plan.exprNodeDesc; -import org.apache.hadoop.hive.ql.plan.exprNodeFieldDesc; -import org.apache.hadoop.hive.ql.plan.exprNodeGenericFuncDesc; -import org.apache.hadoop.hive.ql.plan.exprNodeNullDesc; +import org.apache.hadoop.hive.ql.plan.ExprNodeColumnDesc; +import org.apache.hadoop.hive.ql.plan.ExprNodeConstantDesc; +import org.apache.hadoop.hive.ql.plan.ExprNodeDesc; +import org.apache.hadoop.hive.ql.plan.ExprNodeFieldDesc; +import org.apache.hadoop.hive.ql.plan.ExprNodeGenericFuncDesc; +import org.apache.hadoop.hive.ql.plan.ExprNodeNullDesc; public class ExprNodeEvaluatorFactory { public ExprNodeEvaluatorFactory() { } - public static ExprNodeEvaluator get(exprNodeDesc desc) { + public static ExprNodeEvaluator get(ExprNodeDesc desc) { // Constant node - if (desc instanceof exprNodeConstantDesc) { - return new ExprNodeConstantEvaluator((exprNodeConstantDesc) desc); + if (desc instanceof ExprNodeConstantDesc) { + return new ExprNodeConstantEvaluator((ExprNodeConstantDesc) desc); } // Column-reference node, e.g. a column in the input row - if (desc instanceof exprNodeColumnDesc) { - return new ExprNodeColumnEvaluator((exprNodeColumnDesc) desc); + if (desc instanceof ExprNodeColumnDesc) { + return new ExprNodeColumnEvaluator((ExprNodeColumnDesc) desc); } // Generic Function node, e.g. CASE, an operator or a UDF node - if (desc instanceof exprNodeGenericFuncDesc) { - return new ExprNodeGenericFuncEvaluator((exprNodeGenericFuncDesc) desc); + if (desc instanceof ExprNodeGenericFuncDesc) { + return new ExprNodeGenericFuncEvaluator((ExprNodeGenericFuncDesc) desc); } // Field node, e.g. get a.myfield1 from a - if (desc instanceof exprNodeFieldDesc) { - return new ExprNodeFieldEvaluator((exprNodeFieldDesc) desc); + if (desc instanceof ExprNodeFieldDesc) { + return new ExprNodeFieldEvaluator((ExprNodeFieldDesc) desc); } // Null node, a constant node with value NULL and no type information - if (desc instanceof exprNodeNullDesc) { - return new ExprNodeNullEvaluator((exprNodeNullDesc) desc); + if (desc instanceof ExprNodeNullDesc) { + return new ExprNodeNullEvaluator((ExprNodeNullDesc) desc); } throw new RuntimeException( - "Cannot find ExprNodeEvaluator for the exprNodeDesc = " + desc); + "Cannot find ExprNodeEvaluator for the ExprNodeDesc = " + desc); } } Index: ql/src/java/org/apache/hadoop/hive/ql/exec/CollectOperator.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/exec/CollectOperator.java (revision 901960) +++ ql/src/java/org/apache/hadoop/hive/ql/exec/CollectOperator.java (working copy) @@ -23,7 +23,7 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hive.ql.metadata.HiveException; -import org.apache.hadoop.hive.ql.plan.collectDesc; +import org.apache.hadoop.hive.ql.plan.CollectDesc; import org.apache.hadoop.hive.serde2.objectinspector.InspectableObject; import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspector; import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspectorUtils; @@ -31,7 +31,7 @@ /** * Buffers rows emitted by other operators **/ -public class CollectOperator extends Operator implements +public class CollectOperator extends Operator implements Serializable { private static final long serialVersionUID = 1L; Index: ql/src/java/org/apache/hadoop/hive/ql/exec/LateralViewJoinOperator.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/exec/LateralViewJoinOperator.java (revision 901960) +++ ql/src/java/org/apache/hadoop/hive/ql/exec/LateralViewJoinOperator.java (working copy) @@ -23,7 +23,7 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hive.ql.metadata.HiveException; -import org.apache.hadoop.hive.ql.plan.lateralViewJoinDesc; +import org.apache.hadoop.hive.ql.plan.LateralViewJoinDesc; import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspector; import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspectorFactory; import org.apache.hadoop.hive.serde2.objectinspector.StructField; @@ -57,7 +57,7 @@ * previous LVJ operator. */ -public class LateralViewJoinOperator extends Operator { +public class LateralViewJoinOperator extends Operator { private static final long serialVersionUID = 1L; Index: ql/src/java/org/apache/hadoop/hive/ql/exec/FunctionRegistry.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/exec/FunctionRegistry.java (revision 901960) +++ ql/src/java/org/apache/hadoop/hive/ql/exec/FunctionRegistry.java (working copy) @@ -35,8 +35,8 @@ import org.apache.commons.logging.LogFactory; import org.apache.hadoop.hive.ql.metadata.HiveException; import org.apache.hadoop.hive.ql.parse.SemanticException; -import org.apache.hadoop.hive.ql.plan.exprNodeDesc; -import org.apache.hadoop.hive.ql.plan.exprNodeGenericFuncDesc; +import org.apache.hadoop.hive.ql.plan.ExprNodeDesc; +import org.apache.hadoop.hive.ql.plan.ExprNodeGenericFuncDesc; import org.apache.hadoop.hive.ql.udf.UDAFMax; import org.apache.hadoop.hive.ql.udf.UDAFMin; import org.apache.hadoop.hive.ql.udf.UDFAbs; @@ -866,14 +866,14 @@ } /** - * Get the UDF class from an exprNodeDesc. Returns null if the exprNodeDesc + * Get the UDF class from an ExprNodeDesc. Returns null if the ExprNodeDesc * does not contain a UDF class. */ - private static Class getUDFClassFromExprDesc(exprNodeDesc desc) { - if (!(desc instanceof exprNodeGenericFuncDesc)) { + private static Class getUDFClassFromExprDesc(ExprNodeDesc desc) { + if (!(desc instanceof ExprNodeGenericFuncDesc)) { return null; } - exprNodeGenericFuncDesc genericFuncDesc = (exprNodeGenericFuncDesc) desc; + ExprNodeGenericFuncDesc genericFuncDesc = (ExprNodeGenericFuncDesc) desc; if (!(genericFuncDesc.getGenericUDF() instanceof GenericUDFBridge)) { return null; } @@ -903,26 +903,26 @@ } /** - * Returns whether the exprNodeDesc is a node of "and", "or", "not". + * Returns whether the ExprNodeDesc is a node of "and", "or", "not". */ - public static boolean isOpAndOrNot(exprNodeDesc desc) { + public static boolean isOpAndOrNot(ExprNodeDesc desc) { Class udfClass = getUDFClassFromExprDesc(desc); return UDFOPAnd.class == udfClass || UDFOPOr.class == udfClass || UDFOPNot.class == udfClass; } /** - * Returns whether the exprNodeDesc is a node of "and". + * Returns whether the ExprNodeDesc is a node of "and". */ - public static boolean isOpAnd(exprNodeDesc desc) { + public static boolean isOpAnd(ExprNodeDesc desc) { Class udfClass = getUDFClassFromExprDesc(desc); return UDFOPAnd.class == udfClass; } /** - * Returns whether the exprNodeDesc is a node of "positive". + * Returns whether the ExprNodeDesc is a node of "positive". */ - public static boolean isOpPositive(exprNodeDesc desc) { + public static boolean isOpPositive(ExprNodeDesc desc) { Class udfClass = getUDFClassFromExprDesc(desc); return UDFOPPositive.class == udfClass; } Index: ql/src/java/org/apache/hadoop/hive/ql/exec/ExprNodeGenericFuncEvaluator.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/exec/ExprNodeGenericFuncEvaluator.java (revision 901960) +++ ql/src/java/org/apache/hadoop/hive/ql/exec/ExprNodeGenericFuncEvaluator.java (working copy) @@ -21,7 +21,7 @@ import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.hive.ql.metadata.HiveException; -import org.apache.hadoop.hive.ql.plan.exprNodeGenericFuncDesc; +import org.apache.hadoop.hive.ql.plan.ExprNodeGenericFuncDesc; import org.apache.hadoop.hive.ql.udf.generic.GenericUDF; import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspector; @@ -30,7 +30,7 @@ private static final Log LOG = LogFactory .getLog(ExprNodeGenericFuncEvaluator.class.getName()); - protected exprNodeGenericFuncDesc expr; + protected ExprNodeGenericFuncDesc expr; transient GenericUDF genericUDF; transient Object rowObject; @@ -53,7 +53,7 @@ } }; - public ExprNodeGenericFuncEvaluator(exprNodeGenericFuncDesc expr) { + public ExprNodeGenericFuncEvaluator(ExprNodeGenericFuncDesc expr) { this.expr = expr; children = new ExprNodeEvaluator[expr.getChildExprs().size()]; for (int i = 0; i < children.length; i++) { Index: ql/src/java/org/apache/hadoop/hive/ql/exec/FileSinkOperator.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/exec/FileSinkOperator.java (revision 901960) +++ ql/src/java/org/apache/hadoop/hive/ql/exec/FileSinkOperator.java (working copy) @@ -28,7 +28,7 @@ import org.apache.hadoop.hive.ql.io.HiveFileFormatUtils; import org.apache.hadoop.hive.ql.io.HiveOutputFormat; import org.apache.hadoop.hive.ql.metadata.HiveException; -import org.apache.hadoop.hive.ql.plan.fileSinkDesc; +import org.apache.hadoop.hive.ql.plan.FileSinkDesc; import org.apache.hadoop.hive.ql.plan.api.OperatorType; import org.apache.hadoop.hive.serde2.SerDeException; import org.apache.hadoop.hive.serde2.Serializer; @@ -41,7 +41,7 @@ /** * File Sink operator implementation **/ -public class FileSinkOperator extends TerminalOperator implements +public class FileSinkOperator extends TerminalOperator implements Serializable { public static interface RecordWriter { Index: ql/src/java/org/apache/hadoop/hive/ql/exec/description.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/exec/description.java (revision 901960) +++ ql/src/java/org/apache/hadoop/hive/ql/exec/description.java (working copy) @@ -1,31 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.hive.ql.exec; - -import java.lang.annotation.Retention; -import java.lang.annotation.RetentionPolicy; - -@Retention(RetentionPolicy.RUNTIME) -public @interface description { - String value() default "_FUNC_ is undocumented"; - - String extended() default ""; - - String name() default ""; -} Index: ql/src/java/org/apache/hadoop/hive/ql/exec/ExplainTask.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/exec/ExplainTask.java (revision 901960) +++ ql/src/java/org/apache/hadoop/hive/ql/exec/ExplainTask.java (working copy) @@ -31,8 +31,8 @@ import java.util.Set; import java.util.Map.Entry; -import org.apache.hadoop.hive.ql.plan.explain; -import org.apache.hadoop.hive.ql.plan.explainWork; +import org.apache.hadoop.hive.ql.plan.Explain; +import org.apache.hadoop.hive.ql.plan.ExplainWork; import org.apache.hadoop.hive.ql.plan.api.StageType; import org.apache.hadoop.util.StringUtils; @@ -40,7 +40,7 @@ * ExplainTask implementation * **/ -public class ExplainTask extends Task implements Serializable { +public class ExplainTask extends Task implements Serializable { private static final long serialVersionUID = 1L; public ExplainTask() { @@ -161,10 +161,10 @@ private void outputPlan(Serializable work, PrintStream out, boolean extended, int indent) throws Exception { // Check if work has an explain annotation - Annotation note = work.getClass().getAnnotation(explain.class); + Annotation note = work.getClass().getAnnotation(Explain.class); - if (note instanceof explain) { - explain xpl_note = (explain) note; + if (note instanceof Explain) { + Explain xpl_note = (Explain) note; if (extended || xpl_note.normalExplain()) { out.print(indentString(indent)); out.println(xpl_note.displayName()); @@ -193,10 +193,10 @@ for (Method m : methods) { int prop_indents = indent + 2; - note = m.getAnnotation(explain.class); + note = m.getAnnotation(Explain.class); - if (note instanceof explain) { - explain xpl_note = (explain) note; + if (note instanceof Explain) { + Explain xpl_note = (Explain) note; if (extended || xpl_note.normalExplain()) { Index: ql/src/java/org/apache/hadoop/hive/ql/exec/ExtractOperator.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/exec/ExtractOperator.java (revision 901960) +++ ql/src/java/org/apache/hadoop/hive/ql/exec/ExtractOperator.java (working copy) @@ -22,13 +22,13 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hive.ql.metadata.HiveException; -import org.apache.hadoop.hive.ql.plan.extractDesc; +import org.apache.hadoop.hive.ql.plan.ExtractDesc; import org.apache.hadoop.hive.ql.plan.api.OperatorType; /** * Extract operator implementation Extracts a subobject and passes that on. **/ -public class ExtractOperator extends Operator implements +public class ExtractOperator extends Operator implements Serializable { private static final long serialVersionUID = 1L; transient protected ExprNodeEvaluator eval; Index: ql/src/java/org/apache/hadoop/hive/ql/exec/ExecMapper.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/exec/ExecMapper.java (revision 901960) +++ ql/src/java/org/apache/hadoop/hive/ql/exec/ExecMapper.java (working copy) @@ -29,9 +29,9 @@ import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; -import org.apache.hadoop.hive.ql.plan.fetchWork; -import org.apache.hadoop.hive.ql.plan.mapredLocalWork; -import org.apache.hadoop.hive.ql.plan.mapredWork; +import org.apache.hadoop.hive.ql.plan.FetchWork; +import org.apache.hadoop.hive.ql.plan.MapredLocalWork; +import org.apache.hadoop.hive.ql.plan.MapredWork; import org.apache.hadoop.hive.serde2.objectinspector.InspectableObject; import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspector; import org.apache.hadoop.io.Writable; @@ -75,7 +75,7 @@ try { jc = job; // create map and fetch operators - mapredWork mrwork = Utilities.getMapRedWork(job); + MapredWork mrwork = Utilities.getMapRedWork(job); mo = new MapOperator(); mo.setConf(mrwork); // initialize map operator @@ -84,13 +84,13 @@ mo.initialize(jc, null); // initialize map local work - mapredLocalWork localWork = mrwork.getMapLocalWork(); + MapredLocalWork localWork = mrwork.getMapLocalWork(); if (localWork == null) { return; } fetchOperators = new HashMap(); // create map local operators - for (Map.Entry entry : localWork.getAliasToFetchWork() + for (Map.Entry entry : localWork.getAliasToFetchWork() .entrySet()) { fetchOperators.put(entry.getKey(), new FetchOperator(entry.getValue(), job)); @@ -130,7 +130,7 @@ // process map local operators if (fetchOperators != null) { try { - mapredLocalWork localWork = mo.getConf().getMapLocalWork(); + MapredLocalWork localWork = mo.getConf().getMapLocalWork(); int fetchOpNum = 0; for (Map.Entry entry : fetchOperators .entrySet()) { @@ -227,7 +227,7 @@ try { mo.close(abort); if (fetchOperators != null) { - mapredLocalWork localWork = mo.getConf().getMapLocalWork(); + MapredLocalWork localWork = mo.getConf().getMapLocalWork(); for (Map.Entry entry : fetchOperators.entrySet()) { Operator forwardOp = localWork .getAliasToWork().get(entry.getKey()); Index: ql/src/java/org/apache/hadoop/hive/ql/exec/TaskFactory.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/exec/TaskFactory.java (revision 901960) +++ ql/src/java/org/apache/hadoop/hive/ql/exec/TaskFactory.java (working copy) @@ -26,11 +26,11 @@ import org.apache.hadoop.hive.ql.plan.ConditionalWork; import org.apache.hadoop.hive.ql.plan.DDLWork; import org.apache.hadoop.hive.ql.plan.FunctionWork; -import org.apache.hadoop.hive.ql.plan.copyWork; -import org.apache.hadoop.hive.ql.plan.explainWork; -import org.apache.hadoop.hive.ql.plan.fetchWork; -import org.apache.hadoop.hive.ql.plan.mapredWork; -import org.apache.hadoop.hive.ql.plan.moveWork; +import org.apache.hadoop.hive.ql.plan.CopyWork; +import org.apache.hadoop.hive.ql.plan.ExplainWork; +import org.apache.hadoop.hive.ql.plan.FetchWork; +import org.apache.hadoop.hive.ql.plan.MapredWork; +import org.apache.hadoop.hive.ql.plan.MoveWork; /** * TaskFactory implementation @@ -50,19 +50,19 @@ public static ArrayList> taskvec; static { taskvec = new ArrayList>(); - taskvec.add(new taskTuple(moveWork.class, MoveTask.class)); - taskvec.add(new taskTuple(fetchWork.class, FetchTask.class)); - taskvec.add(new taskTuple(copyWork.class, CopyTask.class)); + taskvec.add(new taskTuple(MoveWork.class, MoveTask.class)); + taskvec.add(new taskTuple(FetchWork.class, FetchTask.class)); + taskvec.add(new taskTuple(CopyWork.class, CopyTask.class)); taskvec.add(new taskTuple(DDLWork.class, DDLTask.class)); taskvec.add(new taskTuple(FunctionWork.class, FunctionTask.class)); taskvec - .add(new taskTuple(explainWork.class, ExplainTask.class)); + .add(new taskTuple(ExplainWork.class, ExplainTask.class)); taskvec.add(new taskTuple(ConditionalWork.class, ConditionalTask.class)); // we are taking this out to allow us to instantiate either MapRedTask or // ExecDriver dynamically at run time based on configuration - // taskvec.add(new taskTuple(mapredWork.class, + // taskvec.add(new taskTuple(MapredWork.class, // ExecDriver.class)); } @@ -99,7 +99,7 @@ } } - if (workClass == mapredWork.class) { + if (workClass == MapredWork.class) { boolean viachild = conf.getBoolVar(HiveConf.ConfVars.SUBMITVIACHILD); Index: ql/src/java/org/apache/hadoop/hive/ql/exec/MapRedTask.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/exec/MapRedTask.java (revision 901960) +++ ql/src/java/org/apache/hadoop/hive/ql/exec/MapRedTask.java (working copy) @@ -29,7 +29,7 @@ import org.apache.hadoop.hive.conf.HiveConf; import org.apache.hadoop.hive.conf.HiveConf.ConfVars; import org.apache.hadoop.hive.ql.exec.Utilities.StreamPrinter; -import org.apache.hadoop.hive.ql.plan.mapredWork; +import org.apache.hadoop.hive.ql.plan.MapredWork; import org.apache.hadoop.hive.ql.plan.api.StageType; import org.apache.hadoop.hive.ql.session.SessionState; import org.apache.hadoop.hive.shims.ShimLoader; @@ -39,7 +39,7 @@ * runs it from a separate jvm. The primary issue with this is the inability to * control logging from a separate jvm in a consistent manner **/ -public class MapRedTask extends Task implements Serializable { +public class MapRedTask extends Task implements Serializable { private static final long serialVersionUID = 1L; @@ -86,7 +86,7 @@ String hiveConfArgs = ExecDriver.generateCmdLine(conf); File scratchDir = new File(conf.getVar(HiveConf.ConfVars.SCRATCHDIR)); - mapredWork plan = getWork(); + MapredWork plan = getWork(); File planFile = File.createTempFile("plan", ".xml", scratchDir); LOG.info("Generating plan file " + planFile.toString()); @@ -192,7 +192,7 @@ @Override public boolean hasReduce() { - mapredWork w = getWork(); + MapredWork w = getWork(); return w.getReducer() != null; } Index: ql/src/java/org/apache/hadoop/hive/ql/exec/SelectOperator.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/exec/SelectOperator.java (revision 901960) +++ ql/src/java/org/apache/hadoop/hive/ql/exec/SelectOperator.java (working copy) @@ -23,15 +23,15 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hive.ql.metadata.HiveException; -import org.apache.hadoop.hive.ql.plan.exprNodeDesc; -import org.apache.hadoop.hive.ql.plan.selectDesc; +import org.apache.hadoop.hive.ql.plan.ExprNodeDesc; +import org.apache.hadoop.hive.ql.plan.SelectDesc; import org.apache.hadoop.hive.ql.plan.api.OperatorType; import org.apache.hadoop.hive.serde2.objectinspector.StructObjectInspector; /** * Select operator implementation **/ -public class SelectOperator extends Operator implements +public class SelectOperator extends Operator implements Serializable { private static final long serialVersionUID = 1L; @@ -47,7 +47,7 @@ return; } - ArrayList colList = conf.getColList(); + ArrayList colList = conf.getColList(); eval = new ExprNodeEvaluator[colList.size()]; for (int i = 0; i < colList.size(); i++) { assert (colList.get(i) != null); Index: ql/src/java/org/apache/hadoop/hive/ql/exec/GroupByOperator.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/exec/GroupByOperator.java (revision 901960) +++ ql/src/java/org/apache/hadoop/hive/ql/exec/GroupByOperator.java (working copy) @@ -34,9 +34,9 @@ import org.apache.hadoop.hive.conf.HiveConf; import org.apache.hadoop.hive.ql.metadata.HiveException; import org.apache.hadoop.hive.ql.parse.OpParseContext; -import org.apache.hadoop.hive.ql.plan.aggregationDesc; -import org.apache.hadoop.hive.ql.plan.exprNodeDesc; -import org.apache.hadoop.hive.ql.plan.groupByDesc; +import org.apache.hadoop.hive.ql.plan.AggregationDesc; +import org.apache.hadoop.hive.ql.plan.ExprNodeDesc; +import org.apache.hadoop.hive.ql.plan.GroupByDesc; import org.apache.hadoop.hive.ql.plan.api.OperatorType; import org.apache.hadoop.hive.ql.udf.generic.GenericUDAFEvaluator; import org.apache.hadoop.hive.ql.udf.generic.GenericUDAFEvaluator.AggregationBuffer; @@ -53,7 +53,7 @@ /** * GroupBy operator implementation. */ -public class GroupByOperator extends Operator implements +public class GroupByOperator extends Operator implements Serializable { static final private Log LOG = LogFactory.getLog(GroupByOperator.class @@ -185,7 +185,7 @@ .getAggregators().size()][]; aggregationParameterObjects = new Object[conf.getAggregators().size()][]; for (int i = 0; i < aggregationParameterFields.length; i++) { - ArrayList parameters = conf.getAggregators().get(i) + ArrayList parameters = conf.getAggregators().get(i) .getParameters(); aggregationParameterFields[i] = new ExprNodeEvaluator[parameters.size()]; aggregationParameterObjectInspectors[i] = new ObjectInspector[parameters @@ -215,7 +215,7 @@ aggregationEvaluators = new GenericUDAFEvaluator[conf.getAggregators() .size()]; for (int i = 0; i < aggregationEvaluators.length; i++) { - aggregationDesc agg = conf.getAggregators().get(i); + AggregationDesc agg = conf.getAggregators().get(i); aggregationEvaluators[i] = agg.getGenericUDAFEvaluator(); } @@ -233,7 +233,7 @@ bucketGroup = conf.getBucketGroup(); aggregationsParametersLastInvoke = new Object[conf.getAggregators().size()][]; - if (conf.getMode() != groupByDesc.Mode.HASH || bucketGroup) { + if (conf.getMode() != GroupByDesc.Mode.HASH || bucketGroup) { aggregations = newAggregations(); hashAggr = false; } else { @@ -411,7 +411,7 @@ // 64 bytes is the overhead for a reference fixedRowSize = javaHashEntryOverHead; - ArrayList keys = conf.getKeys(); + ArrayList keys = conf.getKeys(); // Go over all the keys and get the size of the fields of fixed length. Keep // track of the variable length keys @@ -905,15 +905,15 @@ public List genColLists( HashMap, OpParseContext> opParseCtx) { List colLists = new ArrayList(); - ArrayList keys = conf.getKeys(); - for (exprNodeDesc key : keys) { + ArrayList keys = conf.getKeys(); + for (ExprNodeDesc key : keys) { colLists = Utilities.mergeUniqElems(colLists, key.getCols()); } - ArrayList aggrs = conf.getAggregators(); - for (aggregationDesc aggr : aggrs) { - ArrayList params = aggr.getParameters(); - for (exprNodeDesc param : params) { + ArrayList aggrs = conf.getAggregators(); + for (AggregationDesc aggr : aggrs) { + ArrayList params = aggr.getParameters(); + for (ExprNodeDesc param : params) { colLists = Utilities.mergeUniqElems(colLists, param.getCols()); } } Index: ql/src/java/org/apache/hadoop/hive/ql/exec/persistence/MapJoinObjectValue.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/exec/persistence/MapJoinObjectValue.java (revision 901960) +++ ql/src/java/org/apache/hadoop/hive/ql/exec/persistence/MapJoinObjectValue.java (working copy) @@ -83,7 +83,7 @@ try { metadataTag = in.readInt(); - // get the tableDesc from the map stored in the mapjoin operator + // get the TableDesc from the map stored in the mapjoin operator MapJoinObjectCtx ctx = MapJoinOperator.getMapMetadata().get( Integer.valueOf(metadataTag)); int sz = in.readInt(); @@ -114,7 +114,7 @@ out.writeInt(metadataTag); - // get the tableDesc from the map stored in the mapjoin operator + // get the TableDesc from the map stored in the mapjoin operator MapJoinObjectCtx ctx = MapJoinOperator.getMapMetadata().get( Integer.valueOf(metadataTag)); Index: ql/src/java/org/apache/hadoop/hive/ql/exec/persistence/RowContainer.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/exec/persistence/RowContainer.java (revision 901960) +++ ql/src/java/org/apache/hadoop/hive/ql/exec/persistence/RowContainer.java (working copy) @@ -35,7 +35,7 @@ import org.apache.hadoop.hive.ql.io.HiveFileFormatUtils; import org.apache.hadoop.hive.ql.io.HiveOutputFormat; import org.apache.hadoop.hive.ql.metadata.HiveException; -import org.apache.hadoop.hive.ql.plan.tableDesc; +import org.apache.hadoop.hive.ql.plan.TableDesc; import org.apache.hadoop.hive.serde2.SerDe; import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspector; import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspectorUtils; @@ -99,7 +99,7 @@ private List keyObject; - private tableDesc tblDesc; + private TableDesc tblDesc; boolean firstCalled = false; // once called first, it will never be able to // write again. @@ -165,7 +165,7 @@ } currentWriteBlock[addCursor++] = t; } else if (t != null) { - // the tableDesc will be null in the case that all columns in that table + // the TableDesc will be null in the case that all columns in that table // is not used. we use a dummy row to denote all rows in that table, and // the dummy row is added by caller. this.dummyRow = t; @@ -498,7 +498,7 @@ this.keyObject = dummyKey; } - public void setTableDesc(tableDesc tblDesc) { + public void setTableDesc(TableDesc tblDesc) { this.tblDesc = tblDesc; } Index: ql/src/java/org/apache/hadoop/hive/ql/exec/persistence/MapJoinObjectKey.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/exec/persistence/MapJoinObjectKey.java (revision 901960) +++ ql/src/java/org/apache/hadoop/hive/ql/exec/persistence/MapJoinObjectKey.java (working copy) @@ -80,7 +80,7 @@ try { metadataTag = in.readInt(); - // get the tableDesc from the map stored in the mapjoin operator + // get the TableDesc from the map stored in the mapjoin operator MapJoinObjectCtx ctx = MapJoinOperator.getMapMetadata().get( Integer.valueOf(metadataTag)); @@ -100,7 +100,7 @@ try { out.writeInt(metadataTag); - // get the tableDesc from the map stored in the mapjoin operator + // get the TableDesc from the map stored in the mapjoin operator MapJoinObjectCtx ctx = MapJoinOperator.getMapMetadata().get( Integer.valueOf(metadataTag)); Index: ql/src/java/org/apache/hadoop/hive/ql/exec/FetchOperator.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/exec/FetchOperator.java (revision 901960) +++ ql/src/java/org/apache/hadoop/hive/ql/exec/FetchOperator.java (working copy) @@ -34,9 +34,9 @@ import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hive.ql.metadata.HiveException; -import org.apache.hadoop.hive.ql.plan.fetchWork; -import org.apache.hadoop.hive.ql.plan.partitionDesc; -import org.apache.hadoop.hive.ql.plan.tableDesc; +import org.apache.hadoop.hive.ql.plan.FetchWork; +import org.apache.hadoop.hive.ql.plan.PartitionDesc; +import org.apache.hadoop.hive.ql.plan.TableDesc; import org.apache.hadoop.hive.ql.session.SessionState.LogHelper; import org.apache.hadoop.hive.serde2.Deserializer; import org.apache.hadoop.hive.serde2.objectinspector.InspectableObject; @@ -61,7 +61,7 @@ transient protected Log LOG; transient protected LogHelper console; - public FetchOperator(fetchWork work, JobConf job) { + public FetchOperator(FetchWork work, JobConf job) { LOG = LogFactory.getLog(this.getClass().getName()); console = new LogHelper(LOG); @@ -78,7 +78,7 @@ rowWithPart = new Object[2]; } - private final fetchWork work; + private final FetchWork work; private int splitNum; private RecordReader currRecReader; private InputSplit[] inputSplits; @@ -88,10 +88,10 @@ private Writable value; private Deserializer serde; private Iterator iterPath; - private Iterator iterPartDesc; + private Iterator iterPartDesc; private Path currPath; - private partitionDesc currPart; - private tableDesc currTbl; + private PartitionDesc currPart; + private TableDesc currTbl; private boolean tblDataDone; private StructObjectInspector rowObjectInspector; private final Object[] rowWithPart; @@ -110,7 +110,7 @@ inputFormats.put(inputFormatClass, newInstance); } catch (Exception e) { throw new IOException("Cannot create an instance of InputFormat class " - + inputFormatClass.getName() + " as specified in mapredWork!"); + + inputFormatClass.getName() + " as specified in MapredWork!"); } } return inputFormats.get(inputFormatClass); @@ -174,7 +174,7 @@ } return; } else { - iterPath = fetchWork.convertStringToPathArray(work.getPartDir()) + iterPath = FetchWork.convertStringToPathArray(work.getPartDir()) .iterator(); iterPartDesc = work.getPartDesc().iterator(); } @@ -182,7 +182,7 @@ while (iterPath.hasNext()) { Path nxt = iterPath.next(); - partitionDesc prt = iterPartDesc.next(); + PartitionDesc prt = iterPartDesc.next(); FileSystem fs = nxt.getFileSystem(job); if (fs.exists(nxt)) { FileStatus[] fStats = fs.listStatus(nxt); @@ -213,7 +213,7 @@ job.set("mapred.input.dir", org.apache.hadoop.util.StringUtils .escapeString(currPath.toString())); - tableDesc tmp = currTbl; + TableDesc tmp = currTbl; if (tmp == null) { tmp = currPart.getTableDesc(); } @@ -302,12 +302,12 @@ public ObjectInspector getOutputObjectInspector() throws HiveException { try { if (work.getTblDir() != null) { - tableDesc tbl = work.getTblDesc(); + TableDesc tbl = work.getTblDesc(); Deserializer serde = tbl.getDeserializerClass().newInstance(); serde.initialize(job, tbl.getProperties()); return serde.getObjectInspector(); } else { - List listParts = work.getPartDesc(); + List listParts = work.getPartDesc(); currPart = listParts.get(0); serde = currPart.getTableDesc().getDeserializerClass().newInstance(); serde.initialize(job, currPart.getTableDesc().getProperties()); Index: ql/src/java/org/apache/hadoop/hive/ql/exec/TableScanOperator.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/exec/TableScanOperator.java (revision 901960) +++ ql/src/java/org/apache/hadoop/hive/ql/exec/TableScanOperator.java (working copy) @@ -21,7 +21,7 @@ import java.io.Serializable; import org.apache.hadoop.hive.ql.metadata.HiveException; -import org.apache.hadoop.hive.ql.plan.tableScanDesc; +import org.apache.hadoop.hive.ql.plan.TableScanDesc; import org.apache.hadoop.hive.ql.plan.api.OperatorType; /** @@ -29,7 +29,7 @@ * forward it. This will be needed as part of local work when data is not being * read as part of map-reduce framework **/ -public class TableScanOperator extends Operator implements +public class TableScanOperator extends Operator implements Serializable { private static final long serialVersionUID = 1L; @@ -57,9 +57,9 @@ } // this 'neededColumnIDs' field is included in this operator class instead of - // its desc class.The reason is that 1)tableScanDesc can not be instantiated, + // its desc class.The reason is that 1)TableScanDesc can not be instantiated, // and 2) it will fail some join and union queries if this is added forcibly - // into tableScanDesc + // into TableScanDesc java.util.ArrayList neededColumnIDs; public void setNeededColumnIDs(java.util.ArrayList orign_columns) { Index: ql/src/java/org/apache/hadoop/hive/ql/exec/MapJoinOperator.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/exec/MapJoinOperator.java (revision 901960) +++ ql/src/java/org/apache/hadoop/hive/ql/exec/MapJoinOperator.java (working copy) @@ -34,8 +34,8 @@ import org.apache.hadoop.hive.ql.exec.persistence.MapJoinObjectValue; import org.apache.hadoop.hive.ql.exec.persistence.RowContainer; import org.apache.hadoop.hive.ql.metadata.HiveException; -import org.apache.hadoop.hive.ql.plan.mapJoinDesc; -import org.apache.hadoop.hive.ql.plan.tableDesc; +import org.apache.hadoop.hive.ql.plan.MapJoinDesc; +import org.apache.hadoop.hive.ql.plan.TableDesc; import org.apache.hadoop.hive.ql.plan.api.OperatorType; import org.apache.hadoop.hive.serde2.SerDe; import org.apache.hadoop.hive.serde2.SerDeException; @@ -50,7 +50,7 @@ /** * Map side Join operator implementation. */ -public class MapJoinOperator extends CommonJoinOperator implements +public class MapJoinOperator extends CommonJoinOperator implements Serializable { private static final long serialVersionUID = 1L; static final private Log LOG = LogFactory.getLog(MapJoinOperator.class @@ -86,7 +86,7 @@ public static class MapJoinObjectCtx { ObjectInspector standardOI; SerDe serde; - tableDesc tblDesc; + TableDesc tblDesc; Configuration conf; /** @@ -94,7 +94,7 @@ * @param serde */ public MapJoinObjectCtx(ObjectInspector standardOI, SerDe serde, - tableDesc tblDesc, Configuration conf) { + TableDesc tblDesc, Configuration conf) { this.standardOI = standardOI; this.serde = serde; this.tblDesc = tblDesc; @@ -115,7 +115,7 @@ return serde; } - public tableDesc getTblDesc() { + public TableDesc getTblDesc() { return tblDesc; } @@ -239,7 +239,7 @@ if (firstRow) { metadataKeyTag = nextVal++; - tableDesc keyTableDesc = conf.getKeyTblDesc(); + TableDesc keyTableDesc = conf.getKeyTblDesc(); SerDe keySerializer = (SerDe) ReflectionUtils.newInstance( keyTableDesc.getDeserializerClass(), null); keySerializer.initialize(null, keyTableDesc.getProperties()); @@ -299,7 +299,7 @@ if (metadataValueTag[tag] == -1) { metadataValueTag[tag] = nextVal++; - tableDesc valueTableDesc = conf.getValueTblDescs().get(tag); + TableDesc valueTableDesc = conf.getValueTblDescs().get(tag); SerDe valueSerDe = (SerDe) ReflectionUtils.newInstance(valueTableDesc .getDeserializerClass(), null); valueSerDe.initialize(null, valueTableDesc.getProperties()); Index: ql/src/java/org/apache/hadoop/hive/ql/exec/ExprNodeFieldEvaluator.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/exec/ExprNodeFieldEvaluator.java (revision 901960) +++ ql/src/java/org/apache/hadoop/hive/ql/exec/ExprNodeFieldEvaluator.java (working copy) @@ -22,7 +22,7 @@ import java.util.List; import org.apache.hadoop.hive.ql.metadata.HiveException; -import org.apache.hadoop.hive.ql.plan.exprNodeFieldDesc; +import org.apache.hadoop.hive.ql.plan.ExprNodeFieldDesc; import org.apache.hadoop.hive.serde2.objectinspector.ListObjectInspector; import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspector; import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspectorFactory; @@ -36,7 +36,7 @@ */ public class ExprNodeFieldEvaluator extends ExprNodeEvaluator { - protected exprNodeFieldDesc desc; + protected ExprNodeFieldDesc desc; transient ExprNodeEvaluator leftEvaluator; transient ObjectInspector leftInspector; transient StructObjectInspector structObjectInspector; @@ -44,7 +44,7 @@ transient ObjectInspector structFieldObjectInspector; transient ObjectInspector resultObjectInspector; - public ExprNodeFieldEvaluator(exprNodeFieldDesc desc) { + public ExprNodeFieldEvaluator(ExprNodeFieldDesc desc) { this.desc = desc; leftEvaluator = ExprNodeEvaluatorFactory.get(desc.getDesc()); } Index: ql/src/java/org/apache/hadoop/hive/ql/exec/UDTFOperator.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/exec/UDTFOperator.java (revision 901960) +++ ql/src/java/org/apache/hadoop/hive/ql/exec/UDTFOperator.java (working copy) @@ -26,7 +26,7 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hive.conf.HiveConf; import org.apache.hadoop.hive.ql.metadata.HiveException; -import org.apache.hadoop.hive.ql.plan.udtfDesc; +import org.apache.hadoop.hive.ql.plan.UDTFDesc; import org.apache.hadoop.hive.ql.plan.api.OperatorType; import org.apache.hadoop.hive.ql.udf.generic.UDTFCollector; import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspector; @@ -34,7 +34,7 @@ import org.apache.hadoop.hive.serde2.objectinspector.StructField; import org.apache.hadoop.hive.serde2.objectinspector.StructObjectInspector; -public class UDTFOperator extends Operator implements Serializable { +public class UDTFOperator extends Operator implements Serializable { private static final long serialVersionUID = 1L; protected final Log LOG = LogFactory.getLog(this.getClass().getName()); Index: ql/src/java/org/apache/hadoop/hive/ql/exec/ForwardOperator.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/exec/ForwardOperator.java (revision 901960) +++ ql/src/java/org/apache/hadoop/hive/ql/exec/ForwardOperator.java (working copy) @@ -21,13 +21,13 @@ import java.io.Serializable; import org.apache.hadoop.hive.ql.metadata.HiveException; -import org.apache.hadoop.hive.ql.plan.forwardDesc; +import org.apache.hadoop.hive.ql.plan.ForwardDesc; import org.apache.hadoop.hive.ql.plan.api.OperatorType; /** * Forward Operator Just forwards. Doesn't do anything itself. **/ -public class ForwardOperator extends Operator implements +public class ForwardOperator extends Operator implements Serializable { private static final long serialVersionUID = 1L; Index: ql/src/java/org/apache/hadoop/hive/ql/exec/ScriptOperator.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/exec/ScriptOperator.java (revision 901960) +++ ql/src/java/org/apache/hadoop/hive/ql/exec/ScriptOperator.java (working copy) @@ -35,7 +35,7 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hive.conf.HiveConf; import org.apache.hadoop.hive.ql.metadata.HiveException; -import org.apache.hadoop.hive.ql.plan.scriptDesc; +import org.apache.hadoop.hive.ql.plan.ScriptDesc; import org.apache.hadoop.hive.ql.plan.api.OperatorType; import org.apache.hadoop.hive.serde2.Deserializer; import org.apache.hadoop.hive.serde2.SerDeException; @@ -47,7 +47,7 @@ import org.apache.hadoop.io.Writable; import org.apache.hadoop.util.StringUtils; -public class ScriptOperator extends Operator implements +public class ScriptOperator extends Operator implements Serializable { private static final long serialVersionUID = 1L; Index: ql/src/java/org/apache/hadoop/hive/ql/exec/OperatorFactory.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/exec/OperatorFactory.java (revision 901960) +++ ql/src/java/org/apache/hadoop/hive/ql/exec/OperatorFactory.java (working copy) @@ -22,22 +22,22 @@ import java.util.ArrayList; import java.util.List; -import org.apache.hadoop.hive.ql.plan.collectDesc; -import org.apache.hadoop.hive.ql.plan.extractDesc; -import org.apache.hadoop.hive.ql.plan.fileSinkDesc; -import org.apache.hadoop.hive.ql.plan.filterDesc; -import org.apache.hadoop.hive.ql.plan.forwardDesc; -import org.apache.hadoop.hive.ql.plan.groupByDesc; -import org.apache.hadoop.hive.ql.plan.joinDesc; -import org.apache.hadoop.hive.ql.plan.lateralViewJoinDesc; -import org.apache.hadoop.hive.ql.plan.limitDesc; -import org.apache.hadoop.hive.ql.plan.mapJoinDesc; -import org.apache.hadoop.hive.ql.plan.reduceSinkDesc; -import org.apache.hadoop.hive.ql.plan.scriptDesc; -import org.apache.hadoop.hive.ql.plan.selectDesc; -import org.apache.hadoop.hive.ql.plan.tableScanDesc; -import org.apache.hadoop.hive.ql.plan.udtfDesc; -import org.apache.hadoop.hive.ql.plan.unionDesc; +import org.apache.hadoop.hive.ql.plan.CollectDesc; +import org.apache.hadoop.hive.ql.plan.ExtractDesc; +import org.apache.hadoop.hive.ql.plan.FileSinkDesc; +import org.apache.hadoop.hive.ql.plan.FilterDesc; +import org.apache.hadoop.hive.ql.plan.ForwardDesc; +import org.apache.hadoop.hive.ql.plan.GroupByDesc; +import org.apache.hadoop.hive.ql.plan.JoinDesc; +import org.apache.hadoop.hive.ql.plan.LateralViewJoinDesc; +import org.apache.hadoop.hive.ql.plan.LimitDesc; +import org.apache.hadoop.hive.ql.plan.MapJoinDesc; +import org.apache.hadoop.hive.ql.plan.ReduceSinkDesc; +import org.apache.hadoop.hive.ql.plan.ScriptDesc; +import org.apache.hadoop.hive.ql.plan.SelectDesc; +import org.apache.hadoop.hive.ql.plan.TableScanDesc; +import org.apache.hadoop.hive.ql.plan.UDTFDesc; +import org.apache.hadoop.hive.ql.plan.UnionDesc; public class OperatorFactory { @@ -54,30 +54,30 @@ public static ArrayList opvec; static { opvec = new ArrayList(); - opvec.add(new opTuple(filterDesc.class, FilterOperator.class)); - opvec.add(new opTuple(selectDesc.class, SelectOperator.class)); + opvec.add(new opTuple(FilterDesc.class, FilterOperator.class)); + opvec.add(new opTuple(SelectDesc.class, SelectOperator.class)); opvec - .add(new opTuple(forwardDesc.class, ForwardOperator.class)); - opvec.add(new opTuple(fileSinkDesc.class, + .add(new opTuple(ForwardDesc.class, ForwardOperator.class)); + opvec.add(new opTuple(FileSinkDesc.class, FileSinkOperator.class)); opvec - .add(new opTuple(collectDesc.class, CollectOperator.class)); - opvec.add(new opTuple(scriptDesc.class, ScriptOperator.class)); - opvec.add(new opTuple(reduceSinkDesc.class, + .add(new opTuple(CollectDesc.class, CollectOperator.class)); + opvec.add(new opTuple(ScriptDesc.class, ScriptOperator.class)); + opvec.add(new opTuple(ReduceSinkDesc.class, ReduceSinkOperator.class)); opvec - .add(new opTuple(extractDesc.class, ExtractOperator.class)); + .add(new opTuple(ExtractDesc.class, ExtractOperator.class)); opvec - .add(new opTuple(groupByDesc.class, GroupByOperator.class)); - opvec.add(new opTuple(joinDesc.class, JoinOperator.class)); + .add(new opTuple(GroupByDesc.class, GroupByOperator.class)); + opvec.add(new opTuple(JoinDesc.class, JoinOperator.class)); opvec - .add(new opTuple(mapJoinDesc.class, MapJoinOperator.class)); - opvec.add(new opTuple(limitDesc.class, LimitOperator.class)); - opvec.add(new opTuple(tableScanDesc.class, + .add(new opTuple(MapJoinDesc.class, MapJoinOperator.class)); + opvec.add(new opTuple(LimitDesc.class, LimitOperator.class)); + opvec.add(new opTuple(TableScanDesc.class, TableScanOperator.class)); - opvec.add(new opTuple(unionDesc.class, UnionOperator.class)); - opvec.add(new opTuple(udtfDesc.class, UDTFOperator.class)); - opvec.add(new opTuple(lateralViewJoinDesc.class, + opvec.add(new opTuple(UnionDesc.class, UnionOperator.class)); + opvec.add(new opTuple(UDTFDesc.class, UDTFOperator.class)); + opvec.add(new opTuple(LateralViewJoinDesc.class, LateralViewJoinOperator.class)); } Index: ql/src/java/org/apache/hadoop/hive/ql/exec/Operator.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/exec/Operator.java (revision 901960) +++ ql/src/java/org/apache/hadoop/hive/ql/exec/Operator.java (working copy) @@ -32,8 +32,8 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hive.ql.lib.Node; import org.apache.hadoop.hive.ql.metadata.HiveException; -import org.apache.hadoop.hive.ql.plan.explain; -import org.apache.hadoop.hive.ql.plan.exprNodeDesc; +import org.apache.hadoop.hive.ql.plan.Explain; +import org.apache.hadoop.hive.ql.plan.ExprNodeDesc; import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspector; import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspectorFactory; import org.apache.hadoop.hive.serde2.objectinspector.StructObjectInspector; @@ -159,7 +159,7 @@ this.conf = conf; } - @explain + @Explain public T getConf() { return conf; } @@ -201,7 +201,7 @@ * optimizer and built during semantic analysis contains only key elements for * reduce sink and group by op */ - protected transient Map colExprMap; + protected transient Map colExprMap; public void setId(String id) { this.id = id; @@ -707,11 +707,11 @@ * * @return null if the operator doesn't change columns */ - public Map getColumnExprMap() { + public Map getColumnExprMap() { return colExprMap; } - public void setColumnExprMap(Map colExprMap) { + public void setColumnExprMap(Map colExprMap) { this.colExprMap = colExprMap; } Index: ql/src/java/org/apache/hadoop/hive/ql/exec/ExecDriver.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/exec/ExecDriver.java (revision 901960) +++ ql/src/java/org/apache/hadoop/hive/ql/exec/ExecDriver.java (working copy) @@ -56,8 +56,8 @@ import org.apache.hadoop.hive.ql.io.HiveKey; import org.apache.hadoop.hive.ql.io.HiveOutputFormat; import org.apache.hadoop.hive.ql.metadata.HiveException; -import org.apache.hadoop.hive.ql.plan.mapredWork; -import org.apache.hadoop.hive.ql.plan.partitionDesc; +import org.apache.hadoop.hive.ql.plan.MapredWork; +import org.apache.hadoop.hive.ql.plan.PartitionDesc; import org.apache.hadoop.hive.ql.plan.api.StageType; import org.apache.hadoop.hive.ql.session.SessionState; import org.apache.hadoop.hive.ql.session.SessionState.LogHelper; @@ -75,7 +75,7 @@ import org.apache.log4j.BasicConfigurator; import org.apache.log4j.varia.NullAppender; -public class ExecDriver extends Task implements Serializable { +public class ExecDriver extends Task implements Serializable { private static final long serialVersionUID = 1L; @@ -154,7 +154,7 @@ /** * Constructor/Initialization for invocation as independent utility */ - public ExecDriver(mapredWork plan, JobConf job, boolean isSilent) + public ExecDriver(MapredWork plan, JobConf job, boolean isSilent) throws HiveException { setWork(plan); this.job = job; @@ -375,7 +375,7 @@ * @return the number of reducers. */ public int estimateNumberOfReducers(HiveConf hive, JobConf job, - mapredWork work) throws IOException { + MapredWork work) throws IOException { if (hive == null) { hive = new HiveConf(); } @@ -442,7 +442,7 @@ * @return the total size in bytes. * @throws IOException */ - public long getTotalInputFileSize(JobConf job, mapredWork work) + public long getTotalInputFileSize(JobConf job, MapredWork work) throws IOException { long r = 0; // For each input path, calculate the total size. @@ -930,7 +930,7 @@ } } - mapredWork plan = Utilities.deserializeMapRedWork(pathData, conf); + MapredWork plan = Utilities.deserializeMapRedWork(pathData, conf); ExecDriver ed = new ExecDriver(plan, conf, isSilent); int ret = ed.execute(); @@ -1002,7 +1002,7 @@ @Override public boolean hasReduce() { - mapredWork w = getWork(); + MapredWork w = getWork(); return w.getReducer() != null; } @@ -1022,7 +1022,7 @@ /** * Handle a empty/null path for a given alias */ - private int addInputPath(String path, JobConf job, mapredWork work, + private int addInputPath(String path, JobConf job, MapredWork work, String hiveScratchDir, int numEmptyPaths, boolean isEmptyPath, String alias) throws Exception { // either the directory does not exist or it is empty @@ -1065,14 +1065,14 @@ work.setPathToAliases(pathToAliases); - LinkedHashMap pathToPartitionInfo = work + LinkedHashMap pathToPartitionInfo = work .getPathToPartitionInfo(); if (isEmptyPath) { pathToPartitionInfo.put(newPath.toUri().toString(), pathToPartitionInfo .get(path)); pathToPartitionInfo.remove(path); } else { - partitionDesc pDesc = work.getAliasToPartnInfo().get(alias).clone(); + PartitionDesc pDesc = work.getAliasToPartnInfo().get(alias).clone(); pathToPartitionInfo.put(newPath.toUri().toString(), pDesc); } work.setPathToPartitionInfo(pathToPartitionInfo); @@ -1085,7 +1085,7 @@ return numEmptyPaths; } - private void addInputPaths(JobConf job, mapredWork work, String hiveScratchDir) + private void addInputPaths(JobConf job, MapredWork work, String hiveScratchDir) throws Exception { int numEmptyPaths = 0; Index: ql/src/java/org/apache/hadoop/hive/ql/exec/FilterOperator.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/exec/FilterOperator.java (revision 901960) +++ ql/src/java/org/apache/hadoop/hive/ql/exec/FilterOperator.java (working copy) @@ -23,7 +23,7 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hive.conf.HiveConf; import org.apache.hadoop.hive.ql.metadata.HiveException; -import org.apache.hadoop.hive.ql.plan.filterDesc; +import org.apache.hadoop.hive.ql.plan.FilterDesc; import org.apache.hadoop.hive.ql.plan.api.OperatorType; import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspector; import org.apache.hadoop.hive.serde2.objectinspector.PrimitiveObjectInspector; @@ -32,7 +32,7 @@ /** * Filter operator implementation **/ -public class FilterOperator extends Operator implements +public class FilterOperator extends Operator implements Serializable { private static final long serialVersionUID = 1L; Index: ql/src/java/org/apache/hadoop/hive/ql/exec/UnionOperator.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/exec/UnionOperator.java (revision 901960) +++ ql/src/java/org/apache/hadoop/hive/ql/exec/UnionOperator.java (working copy) @@ -24,7 +24,7 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hive.ql.metadata.HiveException; -import org.apache.hadoop.hive.ql.plan.unionDesc; +import org.apache.hadoop.hive.ql.plan.UnionDesc; import org.apache.hadoop.hive.ql.plan.api.OperatorType; import org.apache.hadoop.hive.ql.udf.generic.GenericUDFUtils.ReturnObjectInspectorResolver; import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspector; @@ -35,7 +35,7 @@ /** * Union Operator Just forwards. Doesn't do anything itself. **/ -public class UnionOperator extends Operator implements Serializable { +public class UnionOperator extends Operator implements Serializable { private static final long serialVersionUID = 1L; StructObjectInspector[] parentObjInspectors; Index: ql/src/java/org/apache/hadoop/hive/ql/exec/ReduceSinkOperator.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/exec/ReduceSinkOperator.java (revision 901960) +++ ql/src/java/org/apache/hadoop/hive/ql/exec/ReduceSinkOperator.java (working copy) @@ -25,9 +25,9 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hive.ql.io.HiveKey; import org.apache.hadoop.hive.ql.metadata.HiveException; -import org.apache.hadoop.hive.ql.plan.exprNodeDesc; -import org.apache.hadoop.hive.ql.plan.reduceSinkDesc; -import org.apache.hadoop.hive.ql.plan.tableDesc; +import org.apache.hadoop.hive.ql.plan.ExprNodeDesc; +import org.apache.hadoop.hive.ql.plan.ReduceSinkDesc; +import org.apache.hadoop.hive.ql.plan.TableDesc; import org.apache.hadoop.hive.ql.plan.api.OperatorType; import org.apache.hadoop.hive.serde2.SerDeException; import org.apache.hadoop.hive.serde2.Serializer; @@ -42,7 +42,7 @@ /** * Reduce Sink Operator sends output to the reduce stage **/ -public class ReduceSinkOperator extends TerminalOperator +public class ReduceSinkOperator extends TerminalOperator implements Serializable { private static final long serialVersionUID = 1L; @@ -78,19 +78,19 @@ try { keyEval = new ExprNodeEvaluator[conf.getKeyCols().size()]; int i = 0; - for (exprNodeDesc e : conf.getKeyCols()) { + for (ExprNodeDesc e : conf.getKeyCols()) { keyEval[i++] = ExprNodeEvaluatorFactory.get(e); } valueEval = new ExprNodeEvaluator[conf.getValueCols().size()]; i = 0; - for (exprNodeDesc e : conf.getValueCols()) { + for (ExprNodeDesc e : conf.getValueCols()) { valueEval[i++] = ExprNodeEvaluatorFactory.get(e); } partitionEval = new ExprNodeEvaluator[conf.getPartitionCols().size()]; i = 0; - for (exprNodeDesc e : conf.getPartitionCols()) { + for (ExprNodeDesc e : conf.getPartitionCols()) { partitionEval[i++] = ExprNodeEvaluatorFactory.get(e); } @@ -98,13 +98,13 @@ tagByte[0] = (byte) tag; LOG.info("Using tag = " + tag); - tableDesc keyTableDesc = conf.getKeySerializeInfo(); + TableDesc keyTableDesc = conf.getKeySerializeInfo(); keySerializer = (Serializer) keyTableDesc.getDeserializerClass() .newInstance(); keySerializer.initialize(null, keyTableDesc.getProperties()); keyIsText = keySerializer.getSerializedClass().equals(Text.class); - tableDesc valueTableDesc = conf.getValueSerializeInfo(); + TableDesc valueTableDesc = conf.getValueSerializeInfo(); valueSerializer = (Serializer) valueTableDesc.getDeserializerClass() .newInstance(); valueSerializer.initialize(null, valueTableDesc.getProperties()); Index: ql/src/java/org/apache/hadoop/hive/ql/exec/Utilities.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/exec/Utilities.java (revision 901960) +++ ql/src/java/org/apache/hadoop/hive/ql/exec/Utilities.java (working copy) @@ -73,10 +73,10 @@ import org.apache.hadoop.hive.ql.parse.ErrorMsg; import org.apache.hadoop.hive.ql.parse.SemanticException; import org.apache.hadoop.hive.ql.plan.PlanUtils; -import org.apache.hadoop.hive.ql.plan.groupByDesc; -import org.apache.hadoop.hive.ql.plan.mapredWork; -import org.apache.hadoop.hive.ql.plan.partitionDesc; -import org.apache.hadoop.hive.ql.plan.tableDesc; +import org.apache.hadoop.hive.ql.plan.GroupByDesc; +import org.apache.hadoop.hive.ql.plan.MapredWork; +import org.apache.hadoop.hive.ql.plan.PartitionDesc; +import org.apache.hadoop.hive.ql.plan.TableDesc; import org.apache.hadoop.hive.ql.plan.PlanUtils.ExpressionTypes; import org.apache.hadoop.hive.serde.Constants; import org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe; @@ -103,8 +103,8 @@ KEY, VALUE, ALIAS }; - private static Map gWorkMap = Collections - .synchronizedMap(new HashMap()); + private static Map gWorkMap = Collections + .synchronizedMap(new HashMap()); static final private Log LOG = LogFactory.getLog(Utilities.class.getName()); public static void clearMapRedWork(Configuration job) { @@ -128,8 +128,8 @@ } } - public static mapredWork getMapRedWork(Configuration job) { - mapredWork gWork = null; + public static MapredWork getMapRedWork(Configuration job) { + MapredWork gWork = null; try { synchronized (gWorkMap) { gWork = gWorkMap.get(getJobName(job)); @@ -141,7 +141,7 @@ } InputStream in = new FileInputStream("HIVE_PLAN" + sanitizedJobId(job)); - mapredWork ret = deserializeMapRedWork(in, job); + MapredWork ret = deserializeMapRedWork(in, job); gWork = ret; gWork.initialize(); gWorkMap.put(getJobName(job), gWork); @@ -185,7 +185,7 @@ } } - public static void setMapRedWork(Configuration job, mapredWork w) { + public static void setMapRedWork(Configuration job, MapredWork w) { try { // use the default file system of the job FileSystem fs = FileSystem.get(job); @@ -236,7 +236,7 @@ XMLEncoder e = new XMLEncoder(out); // workaround for java 1.5 e.setPersistenceDelegate(ExpressionTypes.class, new EnumDelegate()); - e.setPersistenceDelegate(groupByDesc.Mode.class, new EnumDelegate()); + e.setPersistenceDelegate(GroupByDesc.Mode.class, new EnumDelegate()); e .setPersistenceDelegate(Operator.ProgressCounter.class, new EnumDelegate()); @@ -247,22 +247,22 @@ /** * Serialize the plan object to an output stream. DO NOT use this to write to - * standard output since it closes the output stream DO USE mapredWork.toXML() + * standard output since it closes the output stream DO USE MapredWork.toXML() * instead */ - public static void serializeMapRedWork(mapredWork w, OutputStream out) { + public static void serializeMapRedWork(MapredWork w, OutputStream out) { XMLEncoder e = new XMLEncoder(out); // workaround for java 1.5 e.setPersistenceDelegate(ExpressionTypes.class, new EnumDelegate()); - e.setPersistenceDelegate(groupByDesc.Mode.class, new EnumDelegate()); + e.setPersistenceDelegate(GroupByDesc.Mode.class, new EnumDelegate()); e.writeObject(w); e.close(); } - public static mapredWork deserializeMapRedWork(InputStream in, + public static MapredWork deserializeMapRedWork(InputStream in, Configuration conf) { XMLDecoder d = new XMLDecoder(in, null, null, conf.getClassLoader()); - mapredWork ret = (mapredWork) d.readObject(); + MapredWork ret = (MapredWork) d.readObject(); d.close(); return (ret); } @@ -285,10 +285,10 @@ } } - public static tableDesc defaultTd; + public static TableDesc defaultTd; static { // by default we expect ^A separated strings - // This tableDesc does not provide column names. We should always use + // This TableDesc does not provide column names. We should always use // PlanUtils.getDefaultTableDesc(String separatorCode, String columns) // or getBinarySortableTableDesc(List fieldSchemas) when // we know the column names. @@ -378,14 +378,14 @@ } } - public static tableDesc getTableDesc(Table tbl) { - return (new tableDesc(tbl.getDeserializer().getClass(), tbl + public static TableDesc getTableDesc(Table tbl) { + return (new TableDesc(tbl.getDeserializer().getClass(), tbl .getInputFormatClass(), tbl.getOutputFormatClass(), tbl.getSchema())); } // column names and column types are all delimited by comma - public static tableDesc getTableDesc(String cols, String colTypes) { - return (new tableDesc(LazySimpleSerDe.class, SequenceFileInputFormat.class, + public static TableDesc getTableDesc(String cols, String colTypes) { + return (new TableDesc(LazySimpleSerDe.class, SequenceFileInputFormat.class, HiveSequenceFileOutputFormat.class, Utilities.makeProperties( org.apache.hadoop.hive.serde.Constants.SERIALIZATION_FORMAT, "" + Utilities.ctrlaCode, @@ -393,15 +393,15 @@ org.apache.hadoop.hive.serde.Constants.LIST_COLUMN_TYPES, colTypes))); } - public static partitionDesc getPartitionDesc(Partition part) + public static PartitionDesc getPartitionDesc(Partition part) throws HiveException { - return (new partitionDesc(part)); + return (new PartitionDesc(part)); } - public static void addMapWork(mapredWork mr, Table tbl, String alias, + public static void addMapWork(MapredWork mr, Table tbl, String alias, Operator work) { mr.addMapWork(tbl.getDataLocation().getPath(), alias, work, - new partitionDesc(getTableDesc(tbl), null)); + new PartitionDesc(getTableDesc(tbl), null)); } private static String getOpTreeSkel_helper(Operator op, String indent) { Index: ql/src/java/org/apache/hadoop/hive/ql/exec/CommonJoinOperator.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/exec/CommonJoinOperator.java (revision 901960) +++ ql/src/java/org/apache/hadoop/hive/ql/exec/CommonJoinOperator.java (working copy) @@ -34,10 +34,10 @@ import org.apache.hadoop.hive.ql.exec.persistence.RowContainer; import org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat; import org.apache.hadoop.hive.ql.metadata.HiveException; -import org.apache.hadoop.hive.ql.plan.exprNodeDesc; -import org.apache.hadoop.hive.ql.plan.joinCond; -import org.apache.hadoop.hive.ql.plan.joinDesc; -import org.apache.hadoop.hive.ql.plan.tableDesc; +import org.apache.hadoop.hive.ql.plan.ExprNodeDesc; +import org.apache.hadoop.hive.ql.plan.JoinCondDesc; +import org.apache.hadoop.hive.ql.plan.JoinDesc; +import org.apache.hadoop.hive.ql.plan.TableDesc; import org.apache.hadoop.hive.serde2.SerDe; import org.apache.hadoop.hive.serde2.SerDeException; import org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe; @@ -52,7 +52,7 @@ /** * Join operator implementation. */ -public abstract class CommonJoinOperator extends +public abstract class CommonJoinOperator extends Operator implements Serializable { private static final long serialVersionUID = 1L; static final protected Log LOG = LogFactory.getLog(CommonJoinOperator.class @@ -104,7 +104,7 @@ transient static protected Byte[] order; // order in which the results should // be output - transient protected joinCond[] condn; + transient protected JoinCondDesc[] condn; transient protected boolean noOuterJoin; transient private Object[] dummyObj; // for outer joins, contains the // potential nulls for the concerned @@ -122,7 +122,7 @@ transient private Map> posToAliasMap; transient LazyBinarySerDe[] spillTableSerDe; - transient protected Map spillTableDesc; // spill tables are + transient protected Map spillTableDesc; // spill tables are // used if the join // input is too large // to fit in memory @@ -137,17 +137,17 @@ transient boolean handleSkewJoin = false; protected int populateJoinKeyValue(Map> outMap, - Map> inputMap) { + Map> inputMap) { int total = 0; - Iterator>> entryIter = inputMap + Iterator>> entryIter = inputMap .entrySet().iterator(); while (entryIter.hasNext()) { - Map.Entry> e = entryIter.next(); + Map.Entry> e = entryIter.next(); Byte key = order[e.getKey()]; - List expr = e.getValue(); + List expr = e.getValue(); int sz = expr.size(); total += sz; @@ -199,7 +199,7 @@ } - protected static ObjectInspector getJoinOutputObjectInspector( + protected static ObjectInspector getJoinOutputObjectInspector( Byte[] order, Map> aliasToObjectInspectors, T conf) { ArrayList structFieldObjectInspectors = new ArrayList(); @@ -288,7 +288,7 @@ RowContainer getRowContainer(Configuration hconf, byte pos, Byte alias, int containerSize) throws HiveException { - tableDesc tblDesc = getSpillTableDesc(alias); + TableDesc tblDesc = getSpillTableDesc(alias); SerDe serde = getSpillSerDe(alias); if (serde == null) { @@ -311,7 +311,7 @@ } private SerDe getSpillSerDe(byte alias) { - tableDesc desc = getSpillTableDesc(alias); + TableDesc desc = getSpillTableDesc(alias); if (desc == null) { return null; } @@ -328,14 +328,14 @@ transient boolean newGroupStarted = false; - public tableDesc getSpillTableDesc(Byte alias) { + public TableDesc getSpillTableDesc(Byte alias) { if (spillTableDesc == null || spillTableDesc.size() == 0) { initSpillTables(); } return spillTableDesc.get(alias); } - public Map getSpillTableDesc() { + public Map getSpillTableDesc() { if (spillTableDesc == null) { initSpillTables(); } @@ -343,10 +343,10 @@ } private void initSpillTables() { - Map> exprs = conf.getExprs(); - spillTableDesc = new HashMap(exprs.size()); + Map> exprs = conf.getExprs(); + spillTableDesc = new HashMap(exprs.size()); for (int tag = 0; tag < exprs.size(); tag++) { - List valueCols = exprs.get((byte) tag); + List valueCols = exprs.get((byte) tag); int columnSize = valueCols.size(); StringBuffer colNames = new StringBuffer(); StringBuffer colTypes = new StringBuffer(); @@ -364,7 +364,7 @@ // remove the last ',' colNames.setLength(colNames.length() - 1); colTypes.setLength(colTypes.length() - 1); - tableDesc tblDesc = new tableDesc(LazyBinarySerDe.class, + TableDesc tblDesc = new TableDesc(LazyBinarySerDe.class, SequenceFileInputFormat.class, HiveSequenceFileOutputFormat.class, Utilities.makeProperties( org.apache.hadoop.hive.serde.Constants.SERIALIZATION_FORMAT, "" @@ -656,7 +656,7 @@ int type = condn[joinPos - 1].getType(); // process all nulls for RIGHT and FULL OUTER JOINS - if (((type == joinDesc.RIGHT_OUTER_JOIN) || (type == joinDesc.FULL_OUTER_JOIN)) + if (((type == JoinDesc.RIGHT_OUTER_JOIN) || (type == JoinDesc.FULL_OUTER_JOIN)) && !newObjNull && (inputNulls == null) && firstRow) { boolean[] newNulls = new boolean[intObj.getCurSize()]; for (int i = 0; i < newNulls.length - 1; i++) { @@ -671,21 +671,21 @@ return null; } - if (type == joinDesc.INNER_JOIN) { + if (type == JoinDesc.INNER_JOIN) { return joinObjectsInnerJoin(resNulls, inputNulls, newObj, intObj, left, newObjNull); - } else if (type == joinDesc.LEFT_OUTER_JOIN) { + } else if (type == JoinDesc.LEFT_OUTER_JOIN) { return joinObjectsLeftOuterJoin(resNulls, inputNulls, newObj, intObj, left, newObjNull); - } else if (type == joinDesc.RIGHT_OUTER_JOIN) { + } else if (type == JoinDesc.RIGHT_OUTER_JOIN) { return joinObjectsRightOuterJoin(resNulls, inputNulls, newObj, intObj, left, newObjNull, firstRow); - } else if (type == joinDesc.LEFT_SEMI_JOIN) { + } else if (type == JoinDesc.LEFT_SEMI_JOIN) { return joinObjectsLeftSemiJoin(resNulls, inputNulls, newObj, intObj, left, newObjNull); } - assert (type == joinDesc.FULL_OUTER_JOIN); + assert (type == JoinDesc.FULL_OUTER_JOIN); return joinObjectsFullOuterJoin(resNulls, inputNulls, newObj, intObj, left, newObjNull, firstRow); } @@ -712,7 +712,7 @@ // check for skipping in case of left semi join if (aliasNum > 0 - && condn[aliasNum - 1].getType() == joinDesc.LEFT_SEMI_JOIN + && condn[aliasNum - 1].getType() == JoinDesc.LEFT_SEMI_JOIN && newObj != dummyObj[aliasNum]) { // successful match skipping = true; } @@ -784,7 +784,7 @@ } protected void checkAndGenObject() throws HiveException { - if (condn[0].getType() == joinDesc.UNIQUE_JOIN) { + if (condn[0].getType() == JoinDesc.UNIQUE_JOIN) { new IntermediateObject(new ArrayList[numAliases], 0); // Check if results need to be emitted. Index: ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java (revision 901960) +++ ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java (working copy) @@ -66,17 +66,17 @@ import org.apache.hadoop.hive.ql.plan.AddPartitionDesc; import org.apache.hadoop.hive.ql.plan.DDLWork; import org.apache.hadoop.hive.ql.plan.MsckDesc; -import org.apache.hadoop.hive.ql.plan.alterTableDesc; -import org.apache.hadoop.hive.ql.plan.createTableDesc; -import org.apache.hadoop.hive.ql.plan.createTableLikeDesc; -import org.apache.hadoop.hive.ql.plan.createViewDesc; -import org.apache.hadoop.hive.ql.plan.descFunctionDesc; -import org.apache.hadoop.hive.ql.plan.descTableDesc; -import org.apache.hadoop.hive.ql.plan.dropTableDesc; -import org.apache.hadoop.hive.ql.plan.showFunctionsDesc; -import org.apache.hadoop.hive.ql.plan.showPartitionsDesc; -import org.apache.hadoop.hive.ql.plan.showTableStatusDesc; -import org.apache.hadoop.hive.ql.plan.showTablesDesc; +import org.apache.hadoop.hive.ql.plan.AlterTableDesc; +import org.apache.hadoop.hive.ql.plan.CreateTableDesc; +import org.apache.hadoop.hive.ql.plan.CreateTableLikeDesc; +import org.apache.hadoop.hive.ql.plan.CreateViewDesc; +import org.apache.hadoop.hive.ql.plan.DescFunctionDesc; +import org.apache.hadoop.hive.ql.plan.DescTableDesc; +import org.apache.hadoop.hive.ql.plan.DropTableDesc; +import org.apache.hadoop.hive.ql.plan.ShowFunctionsDesc; +import org.apache.hadoop.hive.ql.plan.ShowPartitionsDesc; +import org.apache.hadoop.hive.ql.plan.ShowTableStatusDesc; +import org.apache.hadoop.hive.ql.plan.ShowTablesDesc; import org.apache.hadoop.hive.ql.plan.api.StageType; import org.apache.hadoop.hive.serde.Constants; import org.apache.hadoop.hive.serde2.Deserializer; @@ -117,27 +117,27 @@ try { db = Hive.get(conf); - createTableDesc crtTbl = work.getCreateTblDesc(); + CreateTableDesc crtTbl = work.getCreateTblDesc(); if (crtTbl != null) { return createTable(db, crtTbl); } - createTableLikeDesc crtTblLike = work.getCreateTblLikeDesc(); + CreateTableLikeDesc crtTblLike = work.getCreateTblLikeDesc(); if (crtTblLike != null) { return createTableLike(db, crtTblLike); } - dropTableDesc dropTbl = work.getDropTblDesc(); + DropTableDesc dropTbl = work.getDropTblDesc(); if (dropTbl != null) { return dropTable(db, dropTbl); } - alterTableDesc alterTbl = work.getAlterTblDesc(); + AlterTableDesc alterTbl = work.getAlterTblDesc(); if (alterTbl != null) { return alterTable(db, alterTbl); } - createViewDesc crtView = work.getCreateViewDesc(); + CreateViewDesc crtView = work.getCreateViewDesc(); if (crtView != null) { return createView(db, crtView); } @@ -152,32 +152,32 @@ return msck(db, msckDesc); } - descTableDesc descTbl = work.getDescTblDesc(); + DescTableDesc descTbl = work.getDescTblDesc(); if (descTbl != null) { return describeTable(db, descTbl); } - descFunctionDesc descFunc = work.getDescFunctionDesc(); + DescFunctionDesc descFunc = work.getDescFunctionDesc(); if (descFunc != null) { return describeFunction(descFunc); } - showTablesDesc showTbls = work.getShowTblsDesc(); + ShowTablesDesc showTbls = work.getShowTblsDesc(); if (showTbls != null) { return showTables(db, showTbls); } - showTableStatusDesc showTblStatus = work.getShowTblStatusDesc(); + ShowTableStatusDesc showTblStatus = work.getShowTblStatusDesc(); if (showTblStatus != null) { return showTableStatus(db, showTblStatus); } - showFunctionsDesc showFuncs = work.getShowFuncsDesc(); + ShowFunctionsDesc showFuncs = work.getShowFuncsDesc(); if (showFuncs != null) { return showFunctions(showFuncs); } - showPartitionsDesc showParts = work.getShowPartsDesc(); + ShowPartitionsDesc showParts = work.getShowPartsDesc(); if (showParts != null) { return showPartitions(db, showParts); } @@ -360,7 +360,7 @@ * @throws HiveException * Throws this exception if an unexpected error occurs. */ - private int showPartitions(Hive db, showPartitionsDesc showParts) + private int showPartitions(Hive db, ShowPartitionsDesc showParts) throws HiveException { // get the partitions for the table and populate the output String tabName = showParts.getTabName(); @@ -413,7 +413,7 @@ * @throws HiveException * Throws this exception if an unexpected error occurs. */ - private int showTables(Hive db, showTablesDesc showTbls) throws HiveException { + private int showTables(Hive db, ShowTablesDesc showTbls) throws HiveException { // get the tables for the desired pattenn - populate the output stream List tbls = null; if (showTbls.getPattern() != null) { @@ -458,7 +458,7 @@ * @throws HiveException * Throws this exception if an unexpected error occurs. */ - private int showFunctions(showFunctionsDesc showFuncs) throws HiveException { + private int showFunctions(ShowFunctionsDesc showFuncs) throws HiveException { // get the tables for the desired pattenn - populate the output stream Set funcs = null; if (showFuncs.getPattern() != null) { @@ -501,7 +501,7 @@ * is the function we are describing * @throws HiveException */ - private int describeFunction(descFunctionDesc descFunc) throws HiveException { + private int describeFunction(DescFunctionDesc descFunc) throws HiveException { String funcName = descFunc.getName(); // write the results in the file @@ -510,14 +510,14 @@ DataOutput outStream = (DataOutput) fs.create(descFunc.getResFile()); // get the function documentation - description desc = null; + Description desc = null; Class funcClass = null; FunctionInfo functionInfo = FunctionRegistry.getFunctionInfo(funcName); if (functionInfo != null) { funcClass = functionInfo.getFunctionClass(); } if (funcClass != null) { - desc = funcClass.getAnnotation(description.class); + desc = funcClass.getAnnotation(Description.class); } if (desc != null) { outStream.writeBytes(desc.value().replace("_FUNC_", funcName)); @@ -564,7 +564,7 @@ * tables we are interested in * @return Return 0 when execution succeeds and above 0 if it fails. */ - private int showTableStatus(Hive db, showTableStatusDesc showTblStatus) + private int showTableStatus(Hive db, ShowTableStatusDesc showTblStatus) throws HiveException { // get the tables for the desired pattenn - populate the output stream List tbls = new ArrayList
(); @@ -684,7 +684,7 @@ * @throws HiveException * Throws this exception if an unexpected error occurs. */ - private int describeTable(Hive db, descTableDesc descTbl) + private int describeTable(Hive db, DescTableDesc descTbl) throws HiveException { String colPath = descTbl.getTableName(); String tableName = colPath.substring(0, @@ -939,7 +939,7 @@ * @throws HiveException * Throws this exception if an unexpected error occurs. */ - private int alterTable(Hive db, alterTableDesc alterTbl) throws HiveException { + private int alterTable(Hive db, AlterTableDesc alterTbl) throws HiveException { // alter the table Table tbl = db.getTable(MetaStoreUtils.DEFAULT_DATABASE_NAME, alterTbl .getOldName()); @@ -949,9 +949,9 @@ } Table oldTbl = tbl.copy(); - if (alterTbl.getOp() == alterTableDesc.alterTableTypes.RENAME) { + if (alterTbl.getOp() == AlterTableDesc.alterTableTypes.RENAME) { tbl.getTTable().setTableName(alterTbl.getNewName()); - } else if (alterTbl.getOp() == alterTableDesc.alterTableTypes.ADDCOLS) { + } else if (alterTbl.getOp() == AlterTableDesc.alterTableTypes.ADDCOLS) { List newCols = alterTbl.getNewCols(); List oldCols = tbl.getCols(); if (tbl.getSerializationLib().equals( @@ -978,7 +978,7 @@ } tbl.getTTable().getSd().setCols(oldCols); } - } else if (alterTbl.getOp() == alterTableDesc.alterTableTypes.RENAMECOLUMN) { + } else if (alterTbl.getOp() == AlterTableDesc.alterTableTypes.RENAMECOLUMN) { List oldCols = tbl.getCols(); List newCols = new ArrayList(); Iterator iterOldCols = oldCols.iterator(); @@ -1044,7 +1044,7 @@ } tbl.getTTable().getSd().setCols(newCols); - } else if (alterTbl.getOp() == alterTableDesc.alterTableTypes.REPLACECOLS) { + } else if (alterTbl.getOp() == AlterTableDesc.alterTableTypes.REPLACECOLS) { // change SerDe to LazySimpleSerDe if it is columnsetSerDe if (tbl.getSerializationLib().equals( "org.apache.hadoop.hive.serde.thrift.columnsetSerDe")) { @@ -1061,12 +1061,12 @@ return 1; } tbl.getTTable().getSd().setCols(alterTbl.getNewCols()); - } else if (alterTbl.getOp() == alterTableDesc.alterTableTypes.ADDPROPS) { + } else if (alterTbl.getOp() == AlterTableDesc.alterTableTypes.ADDPROPS) { tbl.getTTable().getParameters().putAll(alterTbl.getProps()); - } else if (alterTbl.getOp() == alterTableDesc.alterTableTypes.ADDSERDEPROPS) { + } else if (alterTbl.getOp() == AlterTableDesc.alterTableTypes.ADDSERDEPROPS) { tbl.getTTable().getSd().getSerdeInfo().getParameters().putAll( alterTbl.getProps()); - } else if (alterTbl.getOp() == alterTableDesc.alterTableTypes.ADDSERDE) { + } else if (alterTbl.getOp() == AlterTableDesc.alterTableTypes.ADDSERDE) { tbl.setSerializationLib(alterTbl.getSerdeName()); if ((alterTbl.getProps() != null) && (alterTbl.getProps().size() > 0)) { tbl.getTTable().getSd().getSerdeInfo().getParameters().putAll( @@ -1077,13 +1077,13 @@ tbl.reinitSerDe(); tbl.setFields(Hive.getFieldsFromDeserializer(tbl.getName(), tbl .getDeserializer())); - } else if (alterTbl.getOp() == alterTableDesc.alterTableTypes.ADDFILEFORMAT) { + } else if (alterTbl.getOp() == AlterTableDesc.alterTableTypes.ADDFILEFORMAT) { tbl.getTTable().getSd().setInputFormat(alterTbl.getInputFormat()); tbl.getTTable().getSd().setOutputFormat(alterTbl.getOutputFormat()); if (alterTbl.getSerdeName() != null) { tbl.setSerializationLib(alterTbl.getSerdeName()); } - } else if (alterTbl.getOp() == alterTableDesc.alterTableTypes.ADDCLUSTERSORTCOLUMN) { + } else if (alterTbl.getOp() == AlterTableDesc.alterTableTypes.ADDCLUSTERSORTCOLUMN) { // validate sort columns and bucket columns List columns = Utilities.getColumnNamesFromFieldSchema(tbl .getCols()); @@ -1150,7 +1150,7 @@ * @throws HiveException * Throws this exception if an unexpected error occurs. */ - private int dropTable(Hive db, dropTableDesc dropTbl) throws HiveException { + private int dropTable(Hive db, DropTableDesc dropTbl) throws HiveException { // We need to fetch the table before it is dropped so that it can be passed // to // post-execution hook @@ -1255,7 +1255,7 @@ * @throws HiveException * Throws this exception if an unexpected error occurs. */ - private int createTable(Hive db, createTableDesc crtTbl) throws HiveException { + private int createTable(Hive db, CreateTableDesc crtTbl) throws HiveException { // create the table Table tbl = new Table(crtTbl.getTableName()); StorageDescriptor tblStorDesc = tbl.getTTable().getSd(); @@ -1394,7 +1394,7 @@ * @throws HiveException * Throws this exception if an unexpected error occurs. */ - private int createTableLike(Hive db, createTableLikeDesc crtTbl) + private int createTableLike(Hive db, CreateTableLikeDesc crtTbl) throws HiveException { // Get the existing table Table tbl = db.getTable(MetaStoreUtils.DEFAULT_DATABASE_NAME, crtTbl @@ -1433,7 +1433,7 @@ * @throws HiveException * Throws this exception if an unexpected error occurs. */ - private int createView(Hive db, createViewDesc crtView) throws HiveException { + private int createView(Hive db, CreateViewDesc crtView) throws HiveException { Table tbl = new Table(crtView.getViewName()); tbl.setViewOriginalText(crtView.getViewOriginalText()); tbl.setViewExpandedText(crtView.getViewExpandedText()); Index: ql/src/java/org/apache/hadoop/hive/ql/exec/MoveTask.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/exec/MoveTask.java (revision 901960) +++ ql/src/java/org/apache/hadoop/hive/ql/exec/MoveTask.java (working copy) @@ -34,16 +34,16 @@ import org.apache.hadoop.hive.ql.metadata.HiveException; import org.apache.hadoop.hive.ql.metadata.Partition; import org.apache.hadoop.hive.ql.metadata.Table; -import org.apache.hadoop.hive.ql.plan.loadFileDesc; -import org.apache.hadoop.hive.ql.plan.loadTableDesc; -import org.apache.hadoop.hive.ql.plan.moveWork; +import org.apache.hadoop.hive.ql.plan.LoadFileDesc; +import org.apache.hadoop.hive.ql.plan.LoadTableDesc; +import org.apache.hadoop.hive.ql.plan.MoveWork; import org.apache.hadoop.hive.ql.plan.api.StageType; import org.apache.hadoop.util.StringUtils; /** * MoveTask implementation **/ -public class MoveTask extends Task implements Serializable { +public class MoveTask extends Task implements Serializable { private static final long serialVersionUID = 1L; @@ -57,7 +57,7 @@ try { // Do any hive related operations like moving tables and files // to appropriate locations - loadFileDesc lfd = work.getLoadFileWork(); + LoadFileDesc lfd = work.getLoadFileWork(); if (lfd != null) { Path targetPath = new Path(lfd.getTargetDir()); Path sourcePath = new Path(lfd.getSourceDir()); @@ -108,7 +108,7 @@ } // Next we do this for tables and partitions - loadTableDesc tbd = work.getLoadTableWork(); + LoadTableDesc tbd = work.getLoadTableWork(); if (tbd != null) { String mesg = "Loading data to table " + tbd.getTable().getTableName() @@ -181,12 +181,12 @@ * Does the move task involve moving to a local file system */ public boolean isLocal() { - loadTableDesc tbd = work.getLoadTableWork(); + LoadTableDesc tbd = work.getLoadTableWork(); if (tbd != null) { return false; } - loadFileDesc lfd = work.getLoadFileWork(); + LoadFileDesc lfd = work.getLoadFileWork(); if (lfd != null) { if (lfd.getIsDfsDir()) { return false; Index: ql/src/java/org/apache/hadoop/hive/ql/exec/LimitOperator.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/exec/LimitOperator.java (revision 901960) +++ ql/src/java/org/apache/hadoop/hive/ql/exec/LimitOperator.java (working copy) @@ -22,13 +22,13 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hive.ql.metadata.HiveException; -import org.apache.hadoop.hive.ql.plan.limitDesc; +import org.apache.hadoop.hive.ql.plan.LimitDesc; import org.apache.hadoop.hive.ql.plan.api.OperatorType; /** * Limit operator implementation Limits the number of rows to be passed on. **/ -public class LimitOperator extends Operator implements Serializable { +public class LimitOperator extends Operator implements Serializable { private static final long serialVersionUID = 1L; transient protected int limit; Index: ql/src/java/org/apache/hadoop/hive/ql/exec/CopyTask.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/exec/CopyTask.java (revision 901960) +++ ql/src/java/org/apache/hadoop/hive/ql/exec/CopyTask.java (working copy) @@ -25,14 +25,14 @@ import org.apache.hadoop.fs.FileUtil; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hive.ql.parse.LoadSemanticAnalyzer; -import org.apache.hadoop.hive.ql.plan.copyWork; +import org.apache.hadoop.hive.ql.plan.CopyWork; import org.apache.hadoop.hive.ql.plan.api.StageType; import org.apache.hadoop.util.StringUtils; /** * CopyTask implementation **/ -public class CopyTask extends Task implements Serializable { +public class CopyTask extends Task implements Serializable { private static final long serialVersionUID = 1L; Index: ql/src/java/org/apache/hadoop/hive/ql/exec/ExecReducer.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/exec/ExecReducer.java (revision 901960) +++ ql/src/java/org/apache/hadoop/hive/ql/exec/ExecReducer.java (working copy) @@ -30,8 +30,8 @@ import org.apache.commons.logging.LogFactory; import org.apache.hadoop.hive.ql.exec.ExecMapper.reportStats; import org.apache.hadoop.hive.ql.metadata.HiveException; -import org.apache.hadoop.hive.ql.plan.mapredWork; -import org.apache.hadoop.hive.ql.plan.tableDesc; +import org.apache.hadoop.hive.ql.plan.MapredWork; +import org.apache.hadoop.hive.ql.plan.TableDesc; import org.apache.hadoop.hive.serde2.Deserializer; import org.apache.hadoop.hive.serde2.SerDe; import org.apache.hadoop.hive.serde2.SerDeException; @@ -77,8 +77,8 @@ fieldNames = fieldNameArray.toArray(new String[0]); } - tableDesc keyTableDesc; - tableDesc[] valueTableDesc; + TableDesc keyTableDesc; + TableDesc[] valueTableDesc; @Override public void configure(JobConf job) { @@ -100,7 +100,7 @@ l4j.info("cannot get classpath: " + e.getMessage()); } jc = job; - mapredWork gWork = Utilities.getMapRedWork(job); + MapredWork gWork = Utilities.getMapRedWork(job); reducer = gWork.getReducer(); reducer.setParentOperators(null); // clear out any parents as reducer is the // root @@ -111,7 +111,7 @@ .getDeserializerClass(), null); inputKeyDeserializer.initialize(null, keyTableDesc.getProperties()); keyObjectInspector = inputKeyDeserializer.getObjectInspector(); - valueTableDesc = new tableDesc[gWork.getTagToValueDesc().size()]; + valueTableDesc = new TableDesc[gWork.getTagToValueDesc().size()]; for (int tag = 0; tag < gWork.getTagToValueDesc().size(); tag++) { // We should initialize the SerDe with the TypeInfo when available. valueTableDesc[tag] = gWork.getTagToValueDesc().get(tag); Index: ql/src/java/org/apache/hadoop/hive/ql/plan/udtfDesc.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/plan/udtfDesc.java (revision 901960) +++ ql/src/java/org/apache/hadoop/hive/ql/plan/udtfDesc.java (working copy) @@ -1,56 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.hive.ql.plan; - -import java.io.Serializable; - -import org.apache.hadoop.hive.ql.udf.generic.GenericUDTF; - -/** - * All member variables should have a setters and getters of the form get and set or else they won't be recreated properly at run - * time. - * - */ -@explain(displayName = "UDTF Operator") -public class udtfDesc implements Serializable { - private static final long serialVersionUID = 1L; - - private GenericUDTF genericUDTF; - - public udtfDesc() { - } - - public udtfDesc(final GenericUDTF genericUDTF) { - this.genericUDTF = genericUDTF; - } - - public GenericUDTF getGenericUDTF() { - return genericUDTF; - } - - public void setGenericUDTF(final GenericUDTF genericUDTF) { - this.genericUDTF = genericUDTF; - } - - @explain(displayName = "function name") - public String getUDTFName() { - return genericUDTF.toString(); - } -} Index: ql/src/java/org/apache/hadoop/hive/ql/plan/exprNodeColumnDesc.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/plan/exprNodeColumnDesc.java (revision 901960) +++ ql/src/java/org/apache/hadoop/hive/ql/plan/exprNodeColumnDesc.java (working copy) @@ -1,126 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.hive.ql.plan; - -import java.io.Serializable; -import java.util.ArrayList; -import java.util.List; - -import org.apache.hadoop.hive.serde2.typeinfo.TypeInfo; -import org.apache.hadoop.hive.serde2.typeinfo.TypeInfoFactory; - -public class exprNodeColumnDesc extends exprNodeDesc implements Serializable { - private static final long serialVersionUID = 1L; - - /** - * The column name. - */ - private String column; - - /** - * The alias of the table. - */ - private String tabAlias; - - /** - * Is the column a partitioned column. - */ - private boolean isPartitionCol; - - public exprNodeColumnDesc() { - } - - public exprNodeColumnDesc(TypeInfo typeInfo, String column, String tabAlias, - boolean isPartitionCol) { - super(typeInfo); - this.column = column; - this.tabAlias = tabAlias; - this.isPartitionCol = isPartitionCol; - } - - public exprNodeColumnDesc(Class c, String column, String tabAlias, - boolean isPartitionCol) { - super(TypeInfoFactory.getPrimitiveTypeInfoFromJavaPrimitive(c)); - this.column = column; - this.tabAlias = tabAlias; - this.isPartitionCol = isPartitionCol; - } - - public String getColumn() { - return column; - } - - public void setColumn(String column) { - this.column = column; - } - - public String getTabAlias() { - return tabAlias; - } - - public void setTabAlias(String tabAlias) { - this.tabAlias = tabAlias; - } - - public boolean getIsParititonCol() { - return isPartitionCol; - } - - public void setIsPartitionCol(boolean isPartitionCol) { - this.isPartitionCol = isPartitionCol; - } - - @Override - public String toString() { - return "Column[" + column + "]"; - } - - @explain(displayName = "expr") - @Override - public String getExprString() { - return getColumn(); - } - - @Override - public List getCols() { - List lst = new ArrayList(); - lst.add(column); - return lst; - } - - @Override - public exprNodeDesc clone() { - return new exprNodeColumnDesc(typeInfo, column, tabAlias, isPartitionCol); - } - - @Override - public boolean isSame(Object o) { - if (!(o instanceof exprNodeColumnDesc)) { - return false; - } - exprNodeColumnDesc dest = (exprNodeColumnDesc) o; - if (!column.equals(dest.getColumn())) { - return false; - } - if (!typeInfo.equals(dest.getTypeInfo())) { - return false; - } - return true; - } -} Index: ql/src/java/org/apache/hadoop/hive/ql/plan/UDTFDesc.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/plan/UDTFDesc.java (revision 0) +++ ql/src/java/org/apache/hadoop/hive/ql/plan/UDTFDesc.java (revision 0) @@ -0,0 +1,56 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hive.ql.plan; + +import java.io.Serializable; + +import org.apache.hadoop.hive.ql.udf.generic.GenericUDTF; + +/** + * All member variables should have a setters and getters of the form get and set or else they won't be recreated properly at run + * time. + * + */ +@Explain(displayName = "UDTF Operator") +public class UDTFDesc implements Serializable { + private static final long serialVersionUID = 1L; + + private GenericUDTF genericUDTF; + + public UDTFDesc() { + } + + public UDTFDesc(GenericUDTF genericUDTF) { + this.genericUDTF = genericUDTF; + } + + public GenericUDTF getGenericUDTF() { + return genericUDTF; + } + + public void setGenericUDTF(GenericUDTF genericUDTF) { + this.genericUDTF = genericUDTF; + } + + @Explain(displayName = "function name") + public String getUDTFName() { + return genericUDTF.toString(); + } +} Index: ql/src/java/org/apache/hadoop/hive/ql/plan/joinCond.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/plan/joinCond.java (revision 901960) +++ ql/src/java/org/apache/hadoop/hive/ql/plan/joinCond.java (working copy) @@ -1,144 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.hive.ql.plan; - -import java.io.Serializable; - -/** - * Join conditions Descriptor implementation. - * - */ -public class joinCond implements Serializable { - private static final long serialVersionUID = 1L; - private int left; - private int right; - private int type; - private boolean preserved; - - public joinCond() { - } - - public joinCond(int left, int right, int type) { - this.left = left; - this.right = right; - this.type = type; - } - - public joinCond(org.apache.hadoop.hive.ql.parse.joinCond condn) { - left = condn.getLeft(); - right = condn.getRight(); - preserved = condn.getPreserved(); - switch (condn.getJoinType()) { - case INNER: - type = joinDesc.INNER_JOIN; - break; - case LEFTOUTER: - type = joinDesc.LEFT_OUTER_JOIN; - break; - case RIGHTOUTER: - type = joinDesc.RIGHT_OUTER_JOIN; - break; - case FULLOUTER: - type = joinDesc.FULL_OUTER_JOIN; - break; - case UNIQUE: - type = joinDesc.UNIQUE_JOIN; - break; - case LEFTSEMI: - type = joinDesc.LEFT_SEMI_JOIN; - break; - default: - assert false; - } - } - - /** - * @return true if table is preserved, false otherwise - */ - public boolean getPreserved() { - return preserved; - } - - /** - * @param preserved - * if table is preserved, false otherwise - */ - public void setPreserved(final boolean preserved) { - this.preserved = preserved; - } - - public int getLeft() { - return left; - } - - public void setLeft(final int left) { - this.left = left; - } - - public int getRight() { - return right; - } - - public void setRight(final int right) { - this.right = right; - } - - public int getType() { - return type; - } - - public void setType(final int type) { - this.type = type; - } - - @explain - public String getJoinCondString() { - StringBuilder sb = new StringBuilder(); - - switch (type) { - case joinDesc.INNER_JOIN: - sb.append("Inner Join "); - break; - case joinDesc.FULL_OUTER_JOIN: - sb.append("Outer Join "); - break; - case joinDesc.LEFT_OUTER_JOIN: - sb.append("Left Outer Join"); - break; - case joinDesc.RIGHT_OUTER_JOIN: - sb.append("Right Outer Join"); - break; - case joinDesc.UNIQUE_JOIN: - sb.append("Unique Join"); - break; - case joinDesc.LEFT_SEMI_JOIN: - sb.append("Left Semi Join "); - break; - default: - sb.append("Unknow Join "); - break; - } - - sb.append(left); - sb.append(" to "); - sb.append(right); - - return sb.toString(); - } -} Index: ql/src/java/org/apache/hadoop/hive/ql/plan/fileSinkDesc.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/plan/fileSinkDesc.java (revision 901960) +++ ql/src/java/org/apache/hadoop/hive/ql/plan/fileSinkDesc.java (working copy) @@ -1,105 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.hive.ql.plan; - -import java.io.Serializable; - -@explain(displayName = "File Output Operator") -public class fileSinkDesc implements Serializable { - private static final long serialVersionUID = 1L; - private String dirName; - private tableDesc tableInfo; - private boolean compressed; - private int destTableId; - private String compressCodec; - private String compressType; - - public fileSinkDesc() { - } - - public fileSinkDesc(final String dirName, final tableDesc tableInfo, - final boolean compressed, int destTableId) { - - this.dirName = dirName; - this.tableInfo = tableInfo; - this.compressed = compressed; - this.destTableId = destTableId; - } - - public fileSinkDesc(final String dirName, final tableDesc tableInfo, - final boolean compressed) { - - this.dirName = dirName; - this.tableInfo = tableInfo; - this.compressed = compressed; - destTableId = 0; - } - - @explain(displayName = "directory", normalExplain = false) - public String getDirName() { - return dirName; - } - - public void setDirName(final String dirName) { - this.dirName = dirName; - } - - @explain(displayName = "table") - public tableDesc getTableInfo() { - return tableInfo; - } - - public void setTableInfo(final tableDesc tableInfo) { - this.tableInfo = tableInfo; - } - - @explain(displayName = "compressed") - public boolean getCompressed() { - return compressed; - } - - public void setCompressed(boolean compressed) { - this.compressed = compressed; - } - - @explain(displayName = "GlobalTableId") - public int getDestTableId() { - return destTableId; - } - - public void setDestTableId(int destTableId) { - this.destTableId = destTableId; - } - - public String getCompressCodec() { - return compressCodec; - } - - public void setCompressCodec(String intermediateCompressorCodec) { - compressCodec = intermediateCompressorCodec; - } - - public String getCompressType() { - return compressType; - } - - public void setCompressType(String intermediateCompressType) { - compressType = intermediateCompressType; - } -} Index: ql/src/java/org/apache/hadoop/hive/ql/plan/ForwardDesc.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/plan/ForwardDesc.java (revision 0) +++ ql/src/java/org/apache/hadoop/hive/ql/plan/ForwardDesc.java (revision 0) @@ -0,0 +1,32 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hive.ql.plan; + +import java.io.Serializable; + +@Explain(displayName = "Forward") +public class ForwardDesc implements Serializable { + private static final long serialVersionUID = 1L; + + @SuppressWarnings("nls") + public ForwardDesc() { + // throw new + // RuntimeException("This class does not need to be instantiated"); + } +} Index: ql/src/java/org/apache/hadoop/hive/ql/plan/ScriptDesc.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/plan/ScriptDesc.java (revision 0) +++ ql/src/java/org/apache/hadoop/hive/ql/plan/ScriptDesc.java (revision 0) @@ -0,0 +1,111 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hive.ql.plan; + +import java.io.Serializable; + +import org.apache.hadoop.hive.ql.exec.RecordReader; +import org.apache.hadoop.hive.ql.exec.RecordWriter; + +@Explain(displayName = "Transform Operator") +public class ScriptDesc implements Serializable { + private static final long serialVersionUID = 1L; + private String scriptCmd; + // Describe how to deserialize data back from user script + private TableDesc scriptOutputInfo; + private Class inRecordWriterClass; + + // Describe how to serialize data out to user script + private TableDesc scriptInputInfo; + private Class outRecordReaderClass; + + public ScriptDesc() { + } + + public ScriptDesc(String scriptCmd, TableDesc scriptInputInfo, + Class inRecordWriterClass, + TableDesc scriptOutputInfo, + Class outRecordReaderClass) { + + this.scriptCmd = scriptCmd; + this.scriptInputInfo = scriptInputInfo; + this.inRecordWriterClass = inRecordWriterClass; + this.scriptOutputInfo = scriptOutputInfo; + this.outRecordReaderClass = outRecordReaderClass; + } + + @Explain(displayName = "command") + public String getScriptCmd() { + return scriptCmd; + } + + public void setScriptCmd(String scriptCmd) { + this.scriptCmd = scriptCmd; + } + + @Explain(displayName = "output info") + public TableDesc getScriptOutputInfo() { + return scriptOutputInfo; + } + + public void setScriptOutputInfo(TableDesc scriptOutputInfo) { + this.scriptOutputInfo = scriptOutputInfo; + } + + public TableDesc getScriptInputInfo() { + return scriptInputInfo; + } + + public void setScriptInputInfo(TableDesc scriptInputInfo) { + this.scriptInputInfo = scriptInputInfo; + } + + /** + * @return the outRecordReaderClass + */ + public Class getOutRecordReaderClass() { + return outRecordReaderClass; + } + + /** + * @param outRecordReaderClass + * the outRecordReaderClass to set + */ + public void setOutRecordReaderClass( + Class outRecordReaderClass) { + this.outRecordReaderClass = outRecordReaderClass; + } + + /** + * @return the inRecordWriterClass + */ + public Class getInRecordWriterClass() { + return inRecordWriterClass; + } + + /** + * @param inRecordWriterClass + * the inRecordWriterClass to set + */ + public void setInRecordWriterClass( + Class inRecordWriterClass) { + this.inRecordWriterClass = inRecordWriterClass; + } + +} Index: ql/src/java/org/apache/hadoop/hive/ql/plan/DDLWork.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/plan/DDLWork.java (revision 901960) +++ ql/src/java/org/apache/hadoop/hive/ql/plan/DDLWork.java (working copy) @@ -26,19 +26,19 @@ public class DDLWork implements Serializable { private static final long serialVersionUID = 1L; - private createTableDesc createTblDesc; - private createTableLikeDesc createTblLikeDesc; - private createViewDesc createVwDesc; - private dropTableDesc dropTblDesc; - private alterTableDesc alterTblDesc; - private showTablesDesc showTblsDesc; - private showFunctionsDesc showFuncsDesc; - private descFunctionDesc descFunctionDesc; - private showPartitionsDesc showPartsDesc; - private descTableDesc descTblDesc; + private CreateTableDesc createTblDesc; + private CreateTableLikeDesc createTblLikeDesc; + private CreateViewDesc createVwDesc; + private DropTableDesc dropTblDesc; + private AlterTableDesc alterTblDesc; + private ShowTablesDesc showTblsDesc; + private ShowFunctionsDesc showFuncsDesc; + private DescFunctionDesc DescFunctionDesc; + private ShowPartitionsDesc showPartsDesc; + private DescTableDesc descTblDesc; private AddPartitionDesc addPartitionDesc; private MsckDesc msckDesc; - private showTableStatusDesc showTblStatusDesc; + private ShowTableStatusDesc showTblStatusDesc; /** * ReadEntitites that are passed to the hooks. @@ -62,7 +62,7 @@ * alter table descriptor */ public DDLWork(Set inputs, Set outputs, - alterTableDesc alterTblDesc) { + AlterTableDesc alterTblDesc) { this(inputs, outputs); this.alterTblDesc = alterTblDesc; } @@ -72,7 +72,7 @@ * create table descriptor */ public DDLWork(Set inputs, Set outputs, - createTableDesc createTblDesc) { + CreateTableDesc createTblDesc) { this(inputs, outputs); this.createTblDesc = createTblDesc; @@ -83,7 +83,7 @@ * create table like descriptor */ public DDLWork(Set inputs, Set outputs, - createTableLikeDesc createTblLikeDesc) { + CreateTableLikeDesc createTblLikeDesc) { this(inputs, outputs); this.createTblLikeDesc = createTblLikeDesc; @@ -94,7 +94,7 @@ * create view descriptor */ public DDLWork(Set inputs, Set outputs, - createViewDesc createVwDesc) { + CreateViewDesc createVwDesc) { this(inputs, outputs); this.createVwDesc = createVwDesc; @@ -105,7 +105,7 @@ * drop table descriptor */ public DDLWork(Set inputs, Set outputs, - dropTableDesc dropTblDesc) { + DropTableDesc dropTblDesc) { this(inputs, outputs); this.dropTblDesc = dropTblDesc; @@ -115,7 +115,7 @@ * @param descTblDesc */ public DDLWork(Set inputs, Set outputs, - descTableDesc descTblDesc) { + DescTableDesc descTblDesc) { this(inputs, outputs); this.descTblDesc = descTblDesc; @@ -125,7 +125,7 @@ * @param showTblsDesc */ public DDLWork(Set inputs, Set outputs, - showTablesDesc showTblsDesc) { + ShowTablesDesc showTblsDesc) { this(inputs, outputs); this.showTblsDesc = showTblsDesc; @@ -135,7 +135,7 @@ * @param showFuncsDesc */ public DDLWork(Set inputs, Set outputs, - showFunctionsDesc showFuncsDesc) { + ShowFunctionsDesc showFuncsDesc) { this(inputs, outputs); this.showFuncsDesc = showFuncsDesc; @@ -145,17 +145,17 @@ * @param descFuncDesc */ public DDLWork(Set inputs, Set outputs, - descFunctionDesc descFuncDesc) { + DescFunctionDesc descFuncDesc) { this(inputs, outputs); - descFunctionDesc = descFuncDesc; + DescFunctionDesc = descFuncDesc; } /** * @param showPartsDesc */ public DDLWork(Set inputs, Set outputs, - showPartitionsDesc showPartsDesc) { + ShowPartitionsDesc showPartsDesc) { this(inputs, outputs); this.showPartsDesc = showPartsDesc; @@ -184,7 +184,7 @@ * show table status descriptor */ public DDLWork(Set inputs, Set outputs, - showTableStatusDesc showTblStatusDesc) { + ShowTableStatusDesc showTblStatusDesc) { this(inputs, outputs); this.showTblStatusDesc = showTblStatusDesc; @@ -193,8 +193,8 @@ /** * @return the createTblDesc */ - @explain(displayName = "Create Table Operator") - public createTableDesc getCreateTblDesc() { + @Explain(displayName = "Create Table Operator") + public CreateTableDesc getCreateTblDesc() { return createTblDesc; } @@ -202,15 +202,15 @@ * @param createTblDesc * the createTblDesc to set */ - public void setCreateTblDesc(createTableDesc createTblDesc) { + public void setCreateTblDesc(CreateTableDesc createTblDesc) { this.createTblDesc = createTblDesc; } /** * @return the createTblDesc */ - @explain(displayName = "Create Table Operator") - public createTableLikeDesc getCreateTblLikeDesc() { + @Explain(displayName = "Create Table Operator") + public CreateTableLikeDesc getCreateTblLikeDesc() { return createTblLikeDesc; } @@ -218,31 +218,31 @@ * @param createTblLikeDesc * the createTblDesc to set */ - public void setCreateTblLikeDesc(createTableLikeDesc createTblLikeDesc) { + public void setCreateTblLikeDesc(CreateTableLikeDesc createTblLikeDesc) { this.createTblLikeDesc = createTblLikeDesc; } /** * @return the createTblDesc */ - @explain(displayName = "Create View Operator") - public createViewDesc getCreateViewDesc() { + @Explain(displayName = "Create View Operator") + public CreateViewDesc getCreateViewDesc() { return createVwDesc; } /** * @param createVwDesc - * the createViewDesc to set + * the CreateViewDesc to set */ - public void setCreateViewDesc(createViewDesc createVwDesc) { + public void setCreateViewDesc(CreateViewDesc createVwDesc) { this.createVwDesc = createVwDesc; } /** * @return the dropTblDesc */ - @explain(displayName = "Drop Table Operator") - public dropTableDesc getDropTblDesc() { + @Explain(displayName = "Drop Table Operator") + public DropTableDesc getDropTblDesc() { return dropTblDesc; } @@ -250,15 +250,15 @@ * @param dropTblDesc * the dropTblDesc to set */ - public void setDropTblDesc(dropTableDesc dropTblDesc) { + public void setDropTblDesc(DropTableDesc dropTblDesc) { this.dropTblDesc = dropTblDesc; } /** * @return the alterTblDesc */ - @explain(displayName = "Alter Table Operator") - public alterTableDesc getAlterTblDesc() { + @Explain(displayName = "Alter Table Operator") + public AlterTableDesc getAlterTblDesc() { return alterTblDesc; } @@ -266,15 +266,15 @@ * @param alterTblDesc * the alterTblDesc to set */ - public void setAlterTblDesc(alterTableDesc alterTblDesc) { + public void setAlterTblDesc(AlterTableDesc alterTblDesc) { this.alterTblDesc = alterTblDesc; } /** * @return the showTblsDesc */ - @explain(displayName = "Show Table Operator") - public showTablesDesc getShowTblsDesc() { + @Explain(displayName = "Show Table Operator") + public ShowTablesDesc getShowTblsDesc() { return showTblsDesc; } @@ -282,31 +282,31 @@ * @param showTblsDesc * the showTblsDesc to set */ - public void setShowTblsDesc(showTablesDesc showTblsDesc) { + public void setShowTblsDesc(ShowTablesDesc showTblsDesc) { this.showTblsDesc = showTblsDesc; } /** * @return the showFuncsDesc */ - @explain(displayName = "Show Function Operator") - public showFunctionsDesc getShowFuncsDesc() { + @Explain(displayName = "Show Function Operator") + public ShowFunctionsDesc getShowFuncsDesc() { return showFuncsDesc; } /** * @return the descFuncDesc */ - @explain(displayName = "Show Function Operator") - public descFunctionDesc getDescFunctionDesc() { - return descFunctionDesc; + @Explain(displayName = "Show Function Operator") + public DescFunctionDesc getDescFunctionDesc() { + return DescFunctionDesc; } /** * @param showFuncsDesc * the showFuncsDesc to set */ - public void setShowFuncsDesc(showFunctionsDesc showFuncsDesc) { + public void setShowFuncsDesc(ShowFunctionsDesc showFuncsDesc) { this.showFuncsDesc = showFuncsDesc; } @@ -314,15 +314,15 @@ * @param descFuncDesc * the showFuncsDesc to set */ - public void setDescFuncDesc(descFunctionDesc descFuncDesc) { - descFunctionDesc = descFuncDesc; + public void setDescFuncDesc(DescFunctionDesc descFuncDesc) { + DescFunctionDesc = descFuncDesc; } /** * @return the showPartsDesc */ - @explain(displayName = "Show Partitions Operator") - public showPartitionsDesc getShowPartsDesc() { + @Explain(displayName = "Show Partitions Operator") + public ShowPartitionsDesc getShowPartsDesc() { return showPartsDesc; } @@ -330,15 +330,15 @@ * @param showPartsDesc * the showPartsDesc to set */ - public void setShowPartsDesc(showPartitionsDesc showPartsDesc) { + public void setShowPartsDesc(ShowPartitionsDesc showPartsDesc) { this.showPartsDesc = showPartsDesc; } /** * @return the descTblDesc */ - @explain(displayName = "Describe Table Operator") - public descTableDesc getDescTblDesc() { + @Explain(displayName = "Describe Table Operator") + public DescTableDesc getDescTblDesc() { return descTblDesc; } @@ -346,7 +346,7 @@ * @param descTblDesc * the descTblDesc to set */ - public void setDescTblDesc(descTableDesc descTblDesc) { + public void setDescTblDesc(DescTableDesc descTblDesc) { this.descTblDesc = descTblDesc; } @@ -383,7 +383,7 @@ /** * @return show table descriptor */ - public showTableStatusDesc getShowTblStatusDesc() { + public ShowTableStatusDesc getShowTblStatusDesc() { return showTblStatusDesc; } @@ -391,7 +391,7 @@ * @param showTblStatusDesc * show table descriptor */ - public void setShowTblStatusDesc(showTableStatusDesc showTblStatusDesc) { + public void setShowTblStatusDesc(ShowTableStatusDesc showTblStatusDesc) { this.showTblStatusDesc = showTblStatusDesc; } Index: ql/src/java/org/apache/hadoop/hive/ql/plan/showFunctionsDesc.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/plan/showFunctionsDesc.java (revision 901960) +++ ql/src/java/org/apache/hadoop/hive/ql/plan/showFunctionsDesc.java (working copy) @@ -1,99 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.hive.ql.plan; - -import java.io.Serializable; - -import org.apache.hadoop.fs.Path; - -@explain(displayName = "Show Functions") -public class showFunctionsDesc extends ddlDesc implements Serializable { - private static final long serialVersionUID = 1L; - String pattern; - Path resFile; - /** - * table name for the result of show tables - */ - private final String table = "show"; - /** - * thrift ddl for the result of show tables - */ - private final String schema = "tab_name#string"; - - public String getTable() { - return table; - } - - public String getSchema() { - return schema; - } - - /** - * @param resFile - */ - public showFunctionsDesc(Path resFile) { - this.resFile = resFile; - pattern = null; - } - - /** - * @param pattern - * names of tables to show - */ - public showFunctionsDesc(Path resFile, String pattern) { - this.resFile = resFile; - this.pattern = pattern; - } - - /** - * @return the pattern - */ - @explain(displayName = "pattern") - public String getPattern() { - return pattern; - } - - /** - * @param pattern - * the pattern to set - */ - public void setPattern(String pattern) { - this.pattern = pattern; - } - - /** - * @return the resFile - */ - public Path getResFile() { - return resFile; - } - - @explain(displayName = "result file", normalExplain = false) - public String getResFileString() { - return getResFile().getName(); - } - - /** - * @param resFile - * the resFile to set - */ - public void setResFile(Path resFile) { - this.resFile = resFile; - } -} Index: ql/src/java/org/apache/hadoop/hive/ql/plan/PartitionDesc.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/plan/PartitionDesc.java (revision 0) +++ ql/src/java/org/apache/hadoop/hive/ql/plan/PartitionDesc.java (revision 0) @@ -0,0 +1,250 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hive.ql.plan; + +import java.io.File; +import java.io.Serializable; +import java.net.URI; +import java.util.Enumeration; +import java.util.Properties; + +import org.apache.hadoop.hive.ql.exec.Utilities; +import org.apache.hadoop.hive.ql.io.HiveFileFormatUtils; +import org.apache.hadoop.hive.ql.io.HiveOutputFormat; +import org.apache.hadoop.hive.ql.metadata.HiveException; +import org.apache.hadoop.hive.serde2.Deserializer; +import org.apache.hadoop.mapred.InputFormat; + +@Explain(displayName = "Partition") +public class PartitionDesc implements Serializable, Cloneable { + private static final long serialVersionUID = 2L; + private TableDesc table; + private java.util.LinkedHashMap partSpec; + private java.lang.Class deserializerClass; + private Class inputFileFormatClass; + private Class outputFileFormatClass; + private java.util.Properties properties; + private String serdeClassName; + private transient String baseFileName; + + public PartitionDesc() { + } + + public PartitionDesc(final TableDesc table, + final java.util.LinkedHashMap partSpec) { + this(table, partSpec, null, null, null, null, null); + } + + public PartitionDesc(final TableDesc table, + final java.util.LinkedHashMap partSpec, + final Class serdeClass, + final Class inputFileFormatClass, + final Class outputFormat, final java.util.Properties properties, + final String serdeClassName) { + this.table = table; + this.partSpec = partSpec; + deserializerClass = serdeClass; + this.inputFileFormatClass = inputFileFormatClass; + if (outputFormat != null) { + outputFileFormatClass = HiveFileFormatUtils + .getOutputFormatSubstitute(outputFormat); + } + this.properties = properties; + if (properties != null) { + this.serdeClassName = properties + .getProperty(org.apache.hadoop.hive.serde.Constants.SERIALIZATION_LIB); + } + } + + public PartitionDesc(final org.apache.hadoop.hive.ql.metadata.Partition part) + throws HiveException { + table = Utilities.getTableDesc(part.getTable()); + partSpec = part.getSpec(); + deserializerClass = part.getDeserializer().getClass(); + inputFileFormatClass = part.getInputFormatClass(); + outputFileFormatClass = part.getOutputFormatClass(); + properties = part.getSchema(); + serdeClassName = properties + .getProperty(org.apache.hadoop.hive.serde.Constants.SERIALIZATION_LIB); + ; + } + + @Explain(displayName = "") + public TableDesc getTableDesc() { + return table; + } + + public void setTableDesc(final TableDesc table) { + this.table = table; + } + + @Explain(displayName = "partition values") + public java.util.LinkedHashMap getPartSpec() { + return partSpec; + } + + public void setPartSpec(final java.util.LinkedHashMap partSpec) { + this.partSpec = partSpec; + } + + public java.lang.Class getDeserializerClass() { + if (deserializerClass == null && table != null) { + setDeserializerClass(table.getDeserializerClass()); + } + return deserializerClass; + } + + public void setDeserializerClass( + final java.lang.Class serdeClass) { + deserializerClass = serdeClass; + } + + public Class getInputFileFormatClass() { + if (inputFileFormatClass == null && table != null) { + setInputFileFormatClass(table.getInputFileFormatClass()); + } + return inputFileFormatClass; + } + + /** + * Return a deserializer object corresponding to the TableDesc + */ + public Deserializer getDeserializer() throws Exception { + Deserializer de = deserializerClass.newInstance(); + de.initialize(null, properties); + return de; + } + + public void setInputFileFormatClass( + final Class inputFileFormatClass) { + this.inputFileFormatClass = inputFileFormatClass; + } + + public Class getOutputFileFormatClass() { + if (outputFileFormatClass == null && table != null) { + setOutputFileFormatClass(table.getOutputFileFormatClass()); + } + return outputFileFormatClass; + } + + public void setOutputFileFormatClass(final Class outputFileFormatClass) { + this.outputFileFormatClass = HiveFileFormatUtils + .getOutputFormatSubstitute(outputFileFormatClass); + } + + @Explain(displayName = "properties", normalExplain = false) + public java.util.Properties getProperties() { + if (table != null) { + return table.getProperties(); + } + return properties; + } + + public void setProperties(final java.util.Properties properties) { + this.properties = properties; + } + + /** + * @return the serdeClassName + */ + @Explain(displayName = "serde") + public String getSerdeClassName() { + if (serdeClassName == null && table != null) { + setSerdeClassName(table.getSerdeClassName()); + } + return serdeClassName; + } + + /** + * @param serdeClassName + * the serde Class Name to set + */ + public void setSerdeClassName(String serdeClassName) { + this.serdeClassName = serdeClassName; + } + + @Explain(displayName = "name") + public String getTableName() { + return getProperties().getProperty( + org.apache.hadoop.hive.metastore.api.Constants.META_TABLE_NAME); + } + + @Explain(displayName = "input format") + public String getInputFileFormatClassName() { + return getInputFileFormatClass().getName(); + } + + @Explain(displayName = "output format") + public String getOutputFileFormatClassName() { + return getOutputFileFormatClass().getName(); + } + + @Explain(displayName = "base file name", normalExplain = false) + public String getBaseFileName() { + return baseFileName; + } + + @Override + public PartitionDesc clone() { + PartitionDesc ret = new PartitionDesc(); + + ret.setSerdeClassName(serdeClassName); + ret.setDeserializerClass(deserializerClass); + ret.inputFileFormatClass = inputFileFormatClass; + ret.outputFileFormatClass = outputFileFormatClass; + if (properties != null) { + Properties newProp = new Properties(); + Enumeration keysProp = properties.keys(); + while (keysProp.hasMoreElements()) { + Object key = keysProp.nextElement(); + newProp.put(key, properties.get(key)); + } + ret.setProperties(newProp); + } + ret.table = (TableDesc) table.clone(); + // The partition spec is not present + if (partSpec != null) { + ret.partSpec = new java.util.LinkedHashMap(); + ret.partSpec.putAll(partSpec); + } + return ret; + } + + /** + * Attempt to derive a virtual base file name property from the + * path. If path format is unrecognized, just use the full path. + * + * @param path + * URI to the partition file + */ + void deriveBaseFileName(String path) { + if (path == null) { + return; + } + try { + URI uri = new URI(path); + File file = new File(uri); + baseFileName = file.getName(); + } catch (Exception ex) { + // This could be due to either URI syntax error or File constructor + // illegal arg; we don't really care which one it is. + baseFileName = path; + } + } +} Index: ql/src/java/org/apache/hadoop/hive/ql/plan/extractDesc.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/plan/extractDesc.java (revision 901960) +++ ql/src/java/org/apache/hadoop/hive/ql/plan/extractDesc.java (working copy) @@ -1,42 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.hive.ql.plan; - -import java.io.Serializable; - -@explain(displayName = "Extract") -public class extractDesc implements Serializable { - private static final long serialVersionUID = 1L; - private exprNodeDesc col; - - public extractDesc() { - } - - public extractDesc(final exprNodeDesc col) { - this.col = col; - } - - public exprNodeDesc getCol() { - return col; - } - - public void setCol(final exprNodeDesc col) { - this.col = col; - } -} Index: ql/src/java/org/apache/hadoop/hive/ql/plan/DropFunctionDesc.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/plan/DropFunctionDesc.java (revision 0) +++ ql/src/java/org/apache/hadoop/hive/ql/plan/DropFunctionDesc.java (revision 0) @@ -0,0 +1,42 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hive.ql.plan; + +import java.io.Serializable; + +@Explain(displayName = "Drop Function") +public class DropFunctionDesc implements Serializable { + private static final long serialVersionUID = 1L; + + private String functionName; + + public DropFunctionDesc(String functionName) { + this.functionName = functionName; + } + + @Explain(displayName = "name") + public String getFunctionName() { + return functionName; + } + + public void setFunctionName(String functionName) { + this.functionName = functionName; + } + +} Index: ql/src/java/org/apache/hadoop/hive/ql/plan/MoveWork.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/plan/MoveWork.java (revision 0) +++ ql/src/java/org/apache/hadoop/hive/ql/plan/MoveWork.java (revision 0) @@ -0,0 +1,103 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hive.ql.plan; + +import java.io.Serializable; +import java.util.Set; + +import org.apache.hadoop.hive.ql.hooks.ReadEntity; +import org.apache.hadoop.hive.ql.hooks.WriteEntity; + +@Explain(displayName = "Move Operator") +public class MoveWork implements Serializable { + private static final long serialVersionUID = 1L; + private LoadTableDesc loadTableWork; + private LoadFileDesc loadFileWork; + + private boolean checkFileFormat; + + /** + * ReadEntitites that are passed to the hooks. + */ + protected Set inputs; + /** + * List of WriteEntities that are passed to the hooks. + */ + protected Set outputs; + + public MoveWork() { + } + + public MoveWork(Set inputs, Set outputs) { + this.inputs = inputs; + this.outputs = outputs; + } + + public MoveWork(Set inputs, Set outputs, + final LoadTableDesc loadTableWork, final LoadFileDesc loadFileWork, + boolean checkFileFormat) { + this(inputs, outputs); + this.loadTableWork = loadTableWork; + this.loadFileWork = loadFileWork; + this.checkFileFormat = checkFileFormat; + } + + @Explain(displayName = "tables") + public LoadTableDesc getLoadTableWork() { + return loadTableWork; + } + + public void setLoadTableWork(final LoadTableDesc loadTableWork) { + this.loadTableWork = loadTableWork; + } + + @Explain(displayName = "files") + public LoadFileDesc getLoadFileWork() { + return loadFileWork; + } + + public void setLoadFileWork(final LoadFileDesc loadFileWork) { + this.loadFileWork = loadFileWork; + } + + public boolean getCheckFileFormat() { + return checkFileFormat; + } + + public void setCheckFileFormat(boolean checkFileFormat) { + this.checkFileFormat = checkFileFormat; + } + + public Set getInputs() { + return inputs; + } + + public Set getOutputs() { + return outputs; + } + + public void setInputs(Set inputs) { + this.inputs = inputs; + } + + public void setOutputs(Set outputs) { + this.outputs = outputs; + } + +} Index: ql/src/java/org/apache/hadoop/hive/ql/plan/mapredWork.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/plan/mapredWork.java (revision 901960) +++ ql/src/java/org/apache/hadoop/hive/ql/plan/mapredWork.java (working copy) @@ -1,280 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.hive.ql.plan; - -import java.io.ByteArrayOutputStream; -import java.io.Serializable; -import java.util.ArrayList; -import java.util.LinkedHashMap; -import java.util.List; -import java.util.Map; - -import org.apache.hadoop.hive.ql.exec.Operator; -import org.apache.hadoop.hive.ql.exec.Utilities; - -@explain(displayName = "Map Reduce") -public class mapredWork implements Serializable { - private static final long serialVersionUID = 1L; - private String command; - // map side work - // use LinkedHashMap to make sure the iteration order is - // deterministic, to ease testing - private LinkedHashMap> pathToAliases; - - private LinkedHashMap pathToPartitionInfo; - - private LinkedHashMap> aliasToWork; - - private LinkedHashMap aliasToPartnInfo; - - // map<->reduce interface - // schema of the map-reduce 'key' object - this is homogeneous - private tableDesc keyDesc; - - // schema of the map-reduce 'val' object - this is heterogeneous - private List tagToValueDesc; - - private Operator reducer; - - private Integer numReduceTasks; - - private boolean needsTagging; - private mapredLocalWork mapLocalWork; - - public mapredWork() { - aliasToPartnInfo = new LinkedHashMap(); - } - - public mapredWork( - final String command, - final LinkedHashMap> pathToAliases, - final LinkedHashMap pathToPartitionInfo, - final LinkedHashMap> aliasToWork, - final tableDesc keyDesc, List tagToValueDesc, - final Operator reducer, final Integer numReduceTasks, - final mapredLocalWork mapLocalWork) { - this.command = command; - this.pathToAliases = pathToAliases; - this.pathToPartitionInfo = pathToPartitionInfo; - this.aliasToWork = aliasToWork; - this.keyDesc = keyDesc; - this.tagToValueDesc = tagToValueDesc; - this.reducer = reducer; - this.numReduceTasks = numReduceTasks; - this.mapLocalWork = mapLocalWork; - aliasToPartnInfo = new LinkedHashMap(); - } - - public String getCommand() { - return command; - } - - public void setCommand(final String command) { - this.command = command; - } - - @explain(displayName = "Path -> Alias", normalExplain = false) - public LinkedHashMap> getPathToAliases() { - return pathToAliases; - } - - public void setPathToAliases( - final LinkedHashMap> pathToAliases) { - this.pathToAliases = pathToAliases; - } - - @explain(displayName = "Path -> Partition", normalExplain = false) - public LinkedHashMap getPathToPartitionInfo() { - return pathToPartitionInfo; - } - - public void setPathToPartitionInfo( - final LinkedHashMap pathToPartitionInfo) { - this.pathToPartitionInfo = pathToPartitionInfo; - } - - /** - * @return the aliasToPartnInfo - */ - public LinkedHashMap getAliasToPartnInfo() { - return aliasToPartnInfo; - } - - /** - * @param aliasToPartnInfo - * the aliasToPartnInfo to set - */ - public void setAliasToPartnInfo( - LinkedHashMap aliasToPartnInfo) { - this.aliasToPartnInfo = aliasToPartnInfo; - } - - @explain(displayName = "Alias -> Map Operator Tree") - public LinkedHashMap> getAliasToWork() { - return aliasToWork; - } - - public void setAliasToWork( - final LinkedHashMap> aliasToWork) { - this.aliasToWork = aliasToWork; - } - - /** - * @return the mapredLocalWork - */ - @explain(displayName = "Local Work") - public mapredLocalWork getMapLocalWork() { - return mapLocalWork; - } - - /** - * @param mapLocalWork - * the mapredLocalWork to set - */ - public void setMapLocalWork(final mapredLocalWork mapLocalWork) { - this.mapLocalWork = mapLocalWork; - } - - public tableDesc getKeyDesc() { - return keyDesc; - } - - public void setKeyDesc(final tableDesc keyDesc) { - this.keyDesc = keyDesc; - } - - public List getTagToValueDesc() { - return tagToValueDesc; - } - - public void setTagToValueDesc(final List tagToValueDesc) { - this.tagToValueDesc = tagToValueDesc; - } - - @explain(displayName = "Reduce Operator Tree") - public Operator getReducer() { - return reducer; - } - - public void setReducer(final Operator reducer) { - this.reducer = reducer; - } - - /** - * If the number of reducers is -1, the runtime will automatically figure it - * out by input data size. - * - * The number of reducers will be a positive number only in case the target - * table is bucketed into N buckets (through CREATE TABLE). This feature is - * not supported yet, so the number of reducers will always be -1 for now. - */ - public Integer getNumReduceTasks() { - return numReduceTasks; - } - - public void setNumReduceTasks(final Integer numReduceTasks) { - this.numReduceTasks = numReduceTasks; - } - - @SuppressWarnings("nls") - public void addMapWork(String path, String alias, Operator work, - partitionDesc pd) { - ArrayList curAliases = pathToAliases.get(path); - if (curAliases == null) { - assert (pathToPartitionInfo.get(path) == null); - curAliases = new ArrayList(); - pathToAliases.put(path, curAliases); - pathToPartitionInfo.put(path, pd); - } else { - assert (pathToPartitionInfo.get(path) != null); - } - - for (String oneAlias : curAliases) { - if (oneAlias.equals(alias)) { - throw new RuntimeException("Multiple aliases named: " + alias - + " for path: " + path); - } - } - curAliases.add(alias); - - if (aliasToWork.get(alias) != null) { - throw new RuntimeException("Existing work for alias: " + alias); - } - aliasToWork.put(alias, work); - } - - @SuppressWarnings("nls") - public String isInvalid() { - if ((getNumReduceTasks() >= 1) && (getReducer() == null)) { - return "Reducers > 0 but no reduce operator"; - } - - if ((getNumReduceTasks() == 0) && (getReducer() != null)) { - return "Reducers == 0 but reduce operator specified"; - } - - return null; - } - - public String toXML() { - ByteArrayOutputStream baos = new ByteArrayOutputStream(); - Utilities.serializeMapRedWork(this, baos); - return (baos.toString()); - } - - // non bean - - /** - * For each map side operator - stores the alias the operator is working on - * behalf of in the operator runtime state. This is used by reducesink - * operator - but could be useful for debugging as well. - */ - private void setAliases() { - for (String oneAlias : aliasToWork.keySet()) { - aliasToWork.get(oneAlias).setAlias(oneAlias); - } - } - - /** - * Derive additional attributes to be rendered by EXPLAIN. - */ - public void deriveExplainAttributes() { - if (pathToPartitionInfo == null) { - return; - } - for (Map.Entry entry : pathToPartitionInfo - .entrySet()) { - entry.getValue().deriveBaseFileName(entry.getKey()); - } - } - - public void initialize() { - setAliases(); - } - - @explain(displayName = "Needs Tagging", normalExplain = false) - public boolean getNeedsTagging() { - return needsTagging; - } - - public void setNeedsTagging(boolean needsTagging) { - this.needsTagging = needsTagging; - } - -} Index: ql/src/java/org/apache/hadoop/hive/ql/plan/DropTableDesc.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/plan/DropTableDesc.java (revision 0) +++ ql/src/java/org/apache/hadoop/hive/ql/plan/DropTableDesc.java (revision 0) @@ -0,0 +1,93 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hive.ql.plan; + +import java.io.Serializable; +import java.util.List; +import java.util.Map; + +@Explain(displayName = "Drop Table") +public class DropTableDesc extends DDLDesc implements Serializable { + private static final long serialVersionUID = 1L; + + String tableName; + List> partSpecs; + boolean expectView; + + /** + * @param tableName + */ + public DropTableDesc(String tableName, boolean expectView) { + this.tableName = tableName; + partSpecs = null; + this.expectView = expectView; + } + + public DropTableDesc(String tableName, List> partSpecs) { + this.tableName = tableName; + this.partSpecs = partSpecs; + expectView = false; + } + + /** + * @return the tableName + */ + @Explain(displayName = "table") + public String getTableName() { + return tableName; + } + + /** + * @param tableName + * the tableName to set + */ + public void setTableName(String tableName) { + this.tableName = tableName; + } + + /** + * @return the partSpecs + */ + public List> getPartSpecs() { + return partSpecs; + } + + /** + * @param partSpecs + * the partSpecs to set + */ + public void setPartSpecs(List> partSpecs) { + this.partSpecs = partSpecs; + } + + /** + * @return whether to expect a view being dropped + */ + public boolean getExpectView() { + return expectView; + } + + /** + * @param expectView + * set whether to expect a view being dropped + */ + public void setExpectView(boolean expectView) { + this.expectView = expectView; + } +} Index: ql/src/java/org/apache/hadoop/hive/ql/plan/CopyWork.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/plan/CopyWork.java (revision 0) +++ ql/src/java/org/apache/hadoop/hive/ql/plan/CopyWork.java (revision 0) @@ -0,0 +1,54 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hive.ql.plan; + +import java.io.Serializable; + +@Explain(displayName = "Copy") +public class CopyWork implements Serializable { + private static final long serialVersionUID = 1L; + private String fromPath; + private String toPath; + + public CopyWork() { + } + + public CopyWork(final String fromPath, final String toPath) { + this.fromPath = fromPath; + this.toPath = toPath; + } + + @Explain(displayName = "source") + public String getFromPath() { + return fromPath; + } + + public void setFromPath(final String fromPath) { + this.fromPath = fromPath; + } + + @Explain(displayName = "destination") + public String getToPath() { + return toPath; + } + + public void setToPath(final String toPath) { + this.toPath = toPath; + } +} Index: ql/src/java/org/apache/hadoop/hive/ql/plan/alterTableDesc.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/plan/alterTableDesc.java (revision 901960) +++ ql/src/java/org/apache/hadoop/hive/ql/plan/alterTableDesc.java (working copy) @@ -1,420 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.hive.ql.plan; - -import java.io.Serializable; -import java.util.List; -import java.util.Map; - -import org.apache.hadoop.hive.metastore.api.FieldSchema; -import org.apache.hadoop.hive.metastore.api.Order; -import org.apache.hadoop.hive.ql.exec.Utilities; - -@explain(displayName = "Alter Table") -public class alterTableDesc extends ddlDesc implements Serializable { - private static final long serialVersionUID = 1L; - - public static enum alterTableTypes { - RENAME, ADDCOLS, REPLACECOLS, ADDPROPS, ADDSERDE, ADDSERDEPROPS, ADDFILEFORMAT, ADDCLUSTERSORTCOLUMN, RENAMECOLUMN - }; - - alterTableTypes op; - String oldName; - String newName; - List newCols; - String serdeName; - Map props; - String inputFormat; - String outputFormat; - int numberBuckets; - List bucketColumns; - List sortColumns; - - String oldColName; - String newColName; - String newColType; - String newColComment; - boolean first; - String afterCol; - - /** - * @param tblName - * table name - * @param oldColName - * old column name - * @param newColName - * new column name - * @param newComment - * @param newType - */ - public alterTableDesc(String tblName, String oldColName, String newColName, - String newType, String newComment, boolean first, String afterCol) { - super(); - oldName = tblName; - this.oldColName = oldColName; - this.newColName = newColName; - newColType = newType; - newColComment = newComment; - this.first = first; - this.afterCol = afterCol; - op = alterTableTypes.RENAMECOLUMN; - } - - /** - * @param oldName - * old name of the table - * @param newName - * new name of the table - */ - public alterTableDesc(String oldName, String newName) { - op = alterTableTypes.RENAME; - this.oldName = oldName; - this.newName = newName; - } - - /** - * @param name - * name of the table - * @param newCols - * new columns to be added - */ - public alterTableDesc(String name, List newCols, - alterTableTypes alterType) { - op = alterType; - oldName = name; - this.newCols = newCols; - } - - /** - * @param alterType - * type of alter op - */ - public alterTableDesc(alterTableTypes alterType) { - op = alterType; - } - - /** - * - * @param name - * name of the table - * @param inputFormat - * new table input format - * @param outputFormat - * new table output format - */ - public alterTableDesc(String name, String inputFormat, String outputFormat, - String serdeName) { - super(); - op = alterTableTypes.ADDFILEFORMAT; - oldName = name; - this.inputFormat = inputFormat; - this.outputFormat = outputFormat; - this.serdeName = serdeName; - } - - public alterTableDesc(String tableName, int numBuckets, - List bucketCols, List sortCols) { - oldName = tableName; - op = alterTableTypes.ADDCLUSTERSORTCOLUMN; - numberBuckets = numBuckets; - bucketColumns = bucketCols; - sortColumns = sortCols; - } - - /** - * @return the old name of the table - */ - @explain(displayName = "old name") - public String getOldName() { - return oldName; - } - - /** - * @param oldName - * the oldName to set - */ - public void setOldName(String oldName) { - this.oldName = oldName; - } - - /** - * @return the newName - */ - @explain(displayName = "new name") - public String getNewName() { - return newName; - } - - /** - * @param newName - * the newName to set - */ - public void setNewName(String newName) { - this.newName = newName; - } - - /** - * @return the op - */ - public alterTableTypes getOp() { - return op; - } - - @explain(displayName = "type") - public String getAlterTableTypeString() { - switch (op) { - case RENAME: - return "rename"; - case ADDCOLS: - return "add columns"; - case REPLACECOLS: - return "replace columns"; - } - - return "unknown"; - } - - /** - * @param op - * the op to set - */ - public void setOp(alterTableTypes op) { - this.op = op; - } - - /** - * @return the newCols - */ - public List getNewCols() { - return newCols; - } - - @explain(displayName = "new columns") - public List getNewColsString() { - return Utilities.getFieldSchemaString(getNewCols()); - } - - /** - * @param newCols - * the newCols to set - */ - public void setNewCols(List newCols) { - this.newCols = newCols; - } - - /** - * @return the serdeName - */ - @explain(displayName = "deserializer library") - public String getSerdeName() { - return serdeName; - } - - /** - * @param serdeName - * the serdeName to set - */ - public void setSerdeName(String serdeName) { - this.serdeName = serdeName; - } - - /** - * @return the props - */ - @explain(displayName = "properties") - public Map getProps() { - return props; - } - - /** - * @param props - * the props to set - */ - public void setProps(Map props) { - this.props = props; - } - - /** - * @return the input format - */ - @explain(displayName = "input format") - public String getInputFormat() { - return inputFormat; - } - - /** - * @param inputFormat - * the input format to set - */ - public void setInputFormat(String inputFormat) { - this.inputFormat = inputFormat; - } - - /** - * @return the output format - */ - @explain(displayName = "output format") - public String getOutputFormat() { - return outputFormat; - } - - /** - * @param outputFormat - * the output format to set - */ - public void setOutputFormat(String outputFormat) { - this.outputFormat = outputFormat; - } - - /** - * @return the number of buckets - */ - public int getNumberBuckets() { - return numberBuckets; - } - - /** - * @param numberBuckets - * the number of buckets to set - */ - public void setNumberBuckets(int numberBuckets) { - this.numberBuckets = numberBuckets; - } - - /** - * @return the bucket columns - */ - public List getBucketColumns() { - return bucketColumns; - } - - /** - * @param bucketColumns - * the bucket columns to set - */ - public void setBucketColumns(List bucketColumns) { - this.bucketColumns = bucketColumns; - } - - /** - * @return the sort columns - */ - public List getSortColumns() { - return sortColumns; - } - - /** - * @param sortColumns - * the sort columns to set - */ - public void setSortColumns(List sortColumns) { - this.sortColumns = sortColumns; - } - - /** - * @return old column name - */ - public String getOldColName() { - return oldColName; - } - - /** - * @param oldColName - * the old column name - */ - public void setOldColName(String oldColName) { - this.oldColName = oldColName; - } - - /** - * @return new column name - */ - public String getNewColName() { - return newColName; - } - - /** - * @param newColName - * the new column name - */ - public void setNewColName(String newColName) { - this.newColName = newColName; - } - - /** - * @return new column type - */ - public String getNewColType() { - return newColType; - } - - /** - * @param newType - * new column's type - */ - public void setNewColType(String newType) { - newColType = newType; - } - - /** - * @return new column's comment - */ - public String getNewColComment() { - return newColComment; - } - - /** - * @param newComment - * new column's comment - */ - public void setNewColComment(String newComment) { - newColComment = newComment; - } - - /** - * @return if the column should be changed to position 0 - */ - public boolean getFirst() { - return first; - } - - /** - * @param first - * set the column to position 0 - */ - public void setFirst(boolean first) { - this.first = first; - } - - /** - * @return the column's after position - */ - public String getAfterCol() { - return afterCol; - } - - /** - * @param afterCol - * set the column's after position - */ - public void setAfterCol(String afterCol) { - this.afterCol = afterCol; - } - -} Index: ql/src/java/org/apache/hadoop/hive/ql/plan/loadDesc.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/plan/loadDesc.java (revision 901960) +++ ql/src/java/org/apache/hadoop/hive/ql/plan/loadDesc.java (working copy) @@ -1,43 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.hive.ql.plan; - -import java.io.Serializable; - -public class loadDesc implements Serializable { - private static final long serialVersionUID = 1L; - private String sourceDir; - - public loadDesc() { - } - - public loadDesc(final String sourceDir) { - - this.sourceDir = sourceDir; - } - - @explain(displayName = "source", normalExplain = false) - public String getSourceDir() { - return sourceDir; - } - - public void setSourceDir(final String source) { - sourceDir = source; - } -} Index: ql/src/java/org/apache/hadoop/hive/ql/plan/loadFileDesc.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/plan/loadFileDesc.java (revision 901960) +++ ql/src/java/org/apache/hadoop/hive/ql/plan/loadFileDesc.java (working copy) @@ -1,91 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.hive.ql.plan; - -import java.io.Serializable; - -public class loadFileDesc extends loadDesc implements Serializable { - private static final long serialVersionUID = 1L; - private String targetDir; - private boolean isDfsDir; - // list of columns, comma separated - private String columns; - private String columnTypes; - - public loadFileDesc() { - } - - public loadFileDesc(final String sourceDir, final String targetDir, - final boolean isDfsDir, final String columns, final String columnTypes) { - - super(sourceDir); - this.targetDir = targetDir; - this.isDfsDir = isDfsDir; - this.columns = columns; - this.columnTypes = columnTypes; - } - - @explain(displayName = "destination") - public String getTargetDir() { - return targetDir; - } - - public void setTargetDir(final String targetDir) { - this.targetDir = targetDir; - } - - @explain(displayName = "hdfs directory") - public boolean getIsDfsDir() { - return isDfsDir; - } - - public void setIsDfsDir(final boolean isDfsDir) { - this.isDfsDir = isDfsDir; - } - - /** - * @return the columns - */ - public String getColumns() { - return columns; - } - - /** - * @param columns - * the columns to set - */ - public void setColumns(String columns) { - this.columns = columns; - } - - /** - * @return the columnTypes - */ - public String getColumnTypes() { - return columnTypes; - } - - /** - * @param columnTypes - * the columnTypes to set - */ - public void setColumnTypes(String columnTypes) { - this.columnTypes = columnTypes; - } -} Index: ql/src/java/org/apache/hadoop/hive/ql/plan/selectDesc.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/plan/selectDesc.java (revision 901960) +++ ql/src/java/org/apache/hadoop/hive/ql/plan/selectDesc.java (working copy) @@ -1,119 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.hive.ql.plan; - -import java.io.Serializable; - -@explain(displayName = "Select Operator") -public class selectDesc implements Serializable { - private static final long serialVersionUID = 1L; - private java.util.ArrayList colList; - private java.util.ArrayList outputColumnNames; - private boolean selectStar; - private boolean selStarNoCompute; - - public selectDesc() { - } - - public selectDesc(final boolean selStarNoCompute) { - this.selStarNoCompute = selStarNoCompute; - } - - public selectDesc( - final java.util.ArrayList colList, - final java.util.ArrayList outputColumnNames) { - this(colList, outputColumnNames, false); - } - - public selectDesc( - final java.util.ArrayList colList, - java.util.ArrayList outputColumnNames, - final boolean selectStar) { - this.colList = colList; - this.selectStar = selectStar; - this.outputColumnNames = outputColumnNames; - } - - public selectDesc( - final java.util.ArrayList colList, - final boolean selectStar, final boolean selStarNoCompute) { - this.colList = colList; - this.selectStar = selectStar; - this.selStarNoCompute = selStarNoCompute; - } - - @explain(displayName = "expressions") - public java.util.ArrayList getColList() { - return colList; - } - - public void setColList( - final java.util.ArrayList colList) { - this.colList = colList; - } - - @explain(displayName = "outputColumnNames") - public java.util.ArrayList getOutputColumnNames() { - return outputColumnNames; - } - - public void setOutputColumnNames( - java.util.ArrayList outputColumnNames) { - this.outputColumnNames = outputColumnNames; - } - - @explain(displayName = "SELECT * ") - public String explainNoCompute() { - if (isSelStarNoCompute()) { - return "(no compute)"; - } else { - return null; - } - } - - /** - * @return the selectStar - */ - public boolean isSelectStar() { - return selectStar; - } - - /** - * @param selectStar - * the selectStar to set - */ - public void setSelectStar(boolean selectStar) { - this.selectStar = selectStar; - } - - /** - * @return the selStarNoCompute - */ - public boolean isSelStarNoCompute() { - return selStarNoCompute; - } - - /** - * @param selStarNoCompute - * the selStarNoCompute to set - */ - public void setSelStarNoCompute(boolean selStarNoCompute) { - this.selStarNoCompute = selStarNoCompute; - } -} Index: ql/src/java/org/apache/hadoop/hive/ql/plan/createTableLikeDesc.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/plan/createTableLikeDesc.java (revision 901960) +++ ql/src/java/org/apache/hadoop/hive/ql/plan/createTableLikeDesc.java (working copy) @@ -1,86 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.hive.ql.plan; - -import java.io.Serializable; - -@explain(displayName = "Create Table") -public class createTableLikeDesc extends ddlDesc implements Serializable { - private static final long serialVersionUID = 1L; - String tableName; - boolean isExternal; - String location; - boolean ifNotExists; - String likeTableName; - - public createTableLikeDesc(String tableName, boolean isExternal, - String location, boolean ifNotExists, String likeTableName) { - this.tableName = tableName; - this.isExternal = isExternal; - this.location = location; - this.ifNotExists = ifNotExists; - this.likeTableName = likeTableName; - } - - @explain(displayName = "if not exists") - public boolean getIfNotExists() { - return ifNotExists; - } - - public void setIfNotExists(boolean ifNotExists) { - this.ifNotExists = ifNotExists; - } - - @explain(displayName = "name") - public String getTableName() { - return tableName; - } - - public void setTableName(String tableName) { - this.tableName = tableName; - } - - @explain(displayName = "location") - public String getLocation() { - return location; - } - - public void setLocation(String location) { - this.location = location; - } - - @explain(displayName = "isExternal") - public boolean isExternal() { - return isExternal; - } - - public void setExternal(boolean isExternal) { - this.isExternal = isExternal; - } - - @explain(displayName = "like") - public String getLikeTableName() { - return likeTableName; - } - - public void setLikeTableName(String likeTableName) { - this.likeTableName = likeTableName; - } - -} Index: ql/src/java/org/apache/hadoop/hive/ql/plan/AddPartitionDesc.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/plan/AddPartitionDesc.java (revision 901960) +++ ql/src/java/org/apache/hadoop/hive/ql/plan/AddPartitionDesc.java (working copy) @@ -108,4 +108,4 @@ this.partSpec = partSpec; } -} \ No newline at end of file +} Index: ql/src/java/org/apache/hadoop/hive/ql/plan/descFunctionDesc.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/plan/descFunctionDesc.java (revision 901960) +++ ql/src/java/org/apache/hadoop/hive/ql/plan/descFunctionDesc.java (working copy) @@ -1,110 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.hive.ql.plan; - -import java.io.Serializable; - -import org.apache.hadoop.fs.Path; - -@explain(displayName = "Describe Function") -public class descFunctionDesc extends ddlDesc implements Serializable { - private static final long serialVersionUID = 1L; - String name; - Path resFile; - boolean isExtended; - - public boolean isExtended() { - return isExtended; - } - - public void setExtended(boolean isExtended) { - this.isExtended = isExtended; - } - - /** - * table name for the result of show tables - */ - private final String table = "show"; - /** - * thrift ddl for the result of show tables - */ - private final String schema = "tab_name#string"; - - public String getTable() { - return table; - } - - public String getSchema() { - return schema; - } - - /** - * @param resFile - */ - public descFunctionDesc(Path resFile) { - this.resFile = resFile; - name = null; - } - - /** - * @param name - * of the function to describe - */ - public descFunctionDesc(Path resFile, String name, boolean isExtended) { - this.isExtended = isExtended; - this.resFile = resFile; - this.name = name; - } - - /** - * @return the name - */ - @explain(displayName = "name") - public String getName() { - return name; - } - - /** - * @param name - * is the function name - */ - public void setName(String name) { - this.name = name; - } - - /** - * @return the resFile - */ - public Path getResFile() { - return resFile; - } - - @explain(displayName = "result file", normalExplain = false) - public String getResFileString() { - return getResFile().getName(); - } - - /** - * @param resFile - * the resFile to set - */ - public void setResFile(Path resFile) { - this.resFile = resFile; - } -} Index: ql/src/java/org/apache/hadoop/hive/ql/plan/FilterDesc.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/plan/FilterDesc.java (revision 0) +++ ql/src/java/org/apache/hadoop/hive/ql/plan/FilterDesc.java (revision 0) @@ -0,0 +1,115 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hive.ql.plan; + +import java.io.Serializable; +import java.util.List; + +@Explain(displayName = "Filter Operator") +public class FilterDesc implements Serializable { + + /** + * sampleDesc is used to keep track of the sampling descriptor + */ + public static class sampleDesc { + // The numerator of the TABLESAMPLE clause + private int numerator; + + // The denominator of the TABLESAMPLE clause + private int denominator; + + // Input files can be pruned + private boolean inputPruning; + + public sampleDesc() { + } + + public sampleDesc(int numerator, int denominator, + List tabBucketCols, boolean inputPruning) { + this.numerator = numerator; + this.denominator = denominator; + this.inputPruning = inputPruning; + } + + public int getNumerator() { + return numerator; + } + + public int getDenominator() { + return denominator; + } + + public boolean getInputPruning() { + return inputPruning; + } + } + + private static final long serialVersionUID = 1L; + private org.apache.hadoop.hive.ql.plan.ExprNodeDesc predicate; + private boolean isSamplingPred; + private transient sampleDesc sampleDescr; + + public FilterDesc() { + } + + public FilterDesc( + final org.apache.hadoop.hive.ql.plan.ExprNodeDesc predicate, + boolean isSamplingPred) { + this.predicate = predicate; + this.isSamplingPred = isSamplingPred; + sampleDescr = null; + } + + public FilterDesc( + final org.apache.hadoop.hive.ql.plan.ExprNodeDesc predicate, + boolean isSamplingPred, final sampleDesc sampleDescr) { + this.predicate = predicate; + this.isSamplingPred = isSamplingPred; + this.sampleDescr = sampleDescr; + } + + @Explain(displayName = "predicate") + public org.apache.hadoop.hive.ql.plan.ExprNodeDesc getPredicate() { + return predicate; + } + + public void setPredicate( + final org.apache.hadoop.hive.ql.plan.ExprNodeDesc predicate) { + this.predicate = predicate; + } + + @Explain(displayName = "isSamplingPred", normalExplain = false) + public boolean getIsSamplingPred() { + return isSamplingPred; + } + + public void setIsSamplingPred(final boolean isSamplingPred) { + this.isSamplingPred = isSamplingPred; + } + + @Explain(displayName = "sampleDesc", normalExplain = false) + public sampleDesc getSampleDescr() { + return sampleDescr; + } + + public void setSampleDescr(final sampleDesc sampleDescr) { + this.sampleDescr = sampleDescr; + } + +} Index: ql/src/java/org/apache/hadoop/hive/ql/plan/AggregationDesc.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/plan/AggregationDesc.java (revision 0) +++ ql/src/java/org/apache/hadoop/hive/ql/plan/AggregationDesc.java (revision 0) @@ -0,0 +1,114 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hive.ql.plan; + +import org.apache.hadoop.hive.ql.udf.generic.GenericUDAFEvaluator; + +public class AggregationDesc implements java.io.Serializable { + private static final long serialVersionUID = 1L; + private String genericUDAFName; + + /** + * In case genericUDAFEvaluator is Serializable, we will serialize the object. + * + * In case genericUDAFEvaluator does not implement Serializable, Java will + * remember the class of genericUDAFEvaluator and creates a new instance when + * deserialized. This is exactly what we want. + */ + private GenericUDAFEvaluator genericUDAFEvaluator; + private java.util.ArrayList parameters; + private boolean distinct; + private GenericUDAFEvaluator.Mode mode; + + public AggregationDesc() { + } + + public AggregationDesc(final String genericUDAFName, + final GenericUDAFEvaluator genericUDAFEvaluator, + final java.util.ArrayList parameters, + final boolean distinct, final GenericUDAFEvaluator.Mode mode) { + this.genericUDAFName = genericUDAFName; + this.genericUDAFEvaluator = genericUDAFEvaluator; + this.parameters = parameters; + this.distinct = distinct; + this.mode = mode; + } + + public void setGenericUDAFName(final String genericUDAFName) { + this.genericUDAFName = genericUDAFName; + } + + public String getGenericUDAFName() { + return genericUDAFName; + } + + public void setGenericUDAFEvaluator( + final GenericUDAFEvaluator genericUDAFEvaluator) { + this.genericUDAFEvaluator = genericUDAFEvaluator; + } + + public GenericUDAFEvaluator getGenericUDAFEvaluator() { + return genericUDAFEvaluator; + } + + public java.util.ArrayList getParameters() { + return parameters; + } + + public void setParameters(final java.util.ArrayList parameters) { + this.parameters = parameters; + } + + public boolean getDistinct() { + return distinct; + } + + public void setDistinct(final boolean distinct) { + this.distinct = distinct; + } + + public void setMode(final GenericUDAFEvaluator.Mode mode) { + this.mode = mode; + } + + public GenericUDAFEvaluator.Mode getMode() { + return mode; + } + + @Explain(displayName = "expr") + public String getExprString() { + StringBuilder sb = new StringBuilder(); + sb.append(genericUDAFName); + sb.append("("); + if (distinct) { + sb.append("DISTINCT "); + } + boolean first = true; + for (ExprNodeDesc exp : parameters) { + if (first) { + first = false; + } else { + sb.append(", "); + } + sb.append(exp.getExprString()); + } + sb.append(")"); + return sb.toString(); + } +} Index: ql/src/java/org/apache/hadoop/hive/ql/plan/groupByDesc.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/plan/groupByDesc.java (revision 901960) +++ ql/src/java/org/apache/hadoop/hive/ql/plan/groupByDesc.java (working copy) @@ -1,149 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.hive.ql.plan; - -@explain(displayName = "Group By Operator") -public class groupByDesc implements java.io.Serializable { - /** - * Group-by Mode: COMPLETE: complete 1-phase aggregation: iterate, terminate - * PARTIAL1: partial aggregation - first phase: iterate, terminatePartial - * PARTIAL2: partial aggregation - second phase: merge, terminatePartial - * PARTIALS: For non-distinct the same as PARTIAL2, for distinct the same as - * PARTIAL1 FINAL: partial aggregation - final phase: merge, terminate HASH: - * For non-distinct the same as PARTIAL1 but use hash-table-based aggregation - * MERGEPARTIAL: FINAL for non-distinct aggregations, COMPLETE for distinct - * aggregations - */ - private static final long serialVersionUID = 1L; - - public static enum Mode { - COMPLETE, PARTIAL1, PARTIAL2, PARTIALS, FINAL, HASH, MERGEPARTIAL - }; - - private Mode mode; - private boolean groupKeyNotReductionKey; - private boolean bucketGroup; - - private java.util.ArrayList keys; - private java.util.ArrayList aggregators; - private java.util.ArrayList outputColumnNames; - - public groupByDesc() { - } - - public groupByDesc( - final Mode mode, - final java.util.ArrayList outputColumnNames, - final java.util.ArrayList keys, - final java.util.ArrayList aggregators, - final boolean groupKeyNotReductionKey) { - this(mode, outputColumnNames, keys, aggregators, groupKeyNotReductionKey, - false); - } - - public groupByDesc( - final Mode mode, - final java.util.ArrayList outputColumnNames, - final java.util.ArrayList keys, - final java.util.ArrayList aggregators, - final boolean groupKeyNotReductionKey, final boolean bucketGroup) { - this.mode = mode; - this.outputColumnNames = outputColumnNames; - this.keys = keys; - this.aggregators = aggregators; - this.groupKeyNotReductionKey = groupKeyNotReductionKey; - this.bucketGroup = bucketGroup; - } - - public Mode getMode() { - return mode; - } - - @explain(displayName = "mode") - public String getModeString() { - switch (mode) { - case COMPLETE: - return "complete"; - case PARTIAL1: - return "partial1"; - case PARTIAL2: - return "partial2"; - case PARTIALS: - return "partials"; - case HASH: - return "hash"; - case FINAL: - return "final"; - case MERGEPARTIAL: - return "mergepartial"; - } - - return "unknown"; - } - - public void setMode(final Mode mode) { - this.mode = mode; - } - - @explain(displayName = "keys") - public java.util.ArrayList getKeys() { - return keys; - } - - public void setKeys(final java.util.ArrayList keys) { - this.keys = keys; - } - - @explain(displayName = "outputColumnNames") - public java.util.ArrayList getOutputColumnNames() { - return outputColumnNames; - } - - public void setOutputColumnNames( - java.util.ArrayList outputColumnNames) { - this.outputColumnNames = outputColumnNames; - } - - @explain(displayName = "aggregations") - public java.util.ArrayList getAggregators() { - return aggregators; - } - - public void setAggregators( - final java.util.ArrayList aggregators) { - this.aggregators = aggregators; - } - - public boolean getGroupKeyNotReductionKey() { - return groupKeyNotReductionKey; - } - - public void setGroupKeyNotReductionKey(final boolean groupKeyNotReductionKey) { - this.groupKeyNotReductionKey = groupKeyNotReductionKey; - } - - @explain(displayName = "bucketGroup") - public boolean getBucketGroup() { - return bucketGroup; - } - - public void setBucketGroup(boolean dataSorted) { - bucketGroup = dataSorted; - } -} Index: ql/src/java/org/apache/hadoop/hive/ql/plan/UnionDesc.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/plan/UnionDesc.java (revision 0) +++ ql/src/java/org/apache/hadoop/hive/ql/plan/UnionDesc.java (revision 0) @@ -0,0 +1,52 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hive.ql.plan; + +import java.io.Serializable; + +/** + * UnionDesc is a empty class currently. However, union has more than one input + * (as compared with forward), and therefore, we need a separate class. + **/ +@Explain(displayName = "Union") +public class UnionDesc implements Serializable { + private static final long serialVersionUID = 1L; + + transient private int numInputs; + + @SuppressWarnings("nls") + public UnionDesc() { + numInputs = 2; + } + + /** + * @return the numInputs + */ + public int getNumInputs() { + return numInputs; + } + + /** + * @param numInputs + * the numInputs to set + */ + public void setNumInputs(int numInputs) { + this.numInputs = numInputs; + } +} Index: ql/src/java/org/apache/hadoop/hive/ql/plan/FunctionWork.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/plan/FunctionWork.java (revision 901960) +++ ql/src/java/org/apache/hadoop/hive/ql/plan/FunctionWork.java (working copy) @@ -22,31 +22,31 @@ public class FunctionWork implements Serializable { private static final long serialVersionUID = 1L; - private createFunctionDesc createFunctionDesc; - private dropFunctionDesc dropFunctionDesc; + private CreateFunctionDesc CreateFunctionDesc; + private DropFunctionDesc DropFunctionDesc; - public FunctionWork(createFunctionDesc createFunctionDesc) { - this.createFunctionDesc = createFunctionDesc; + public FunctionWork(CreateFunctionDesc CreateFunctionDesc) { + this.CreateFunctionDesc = CreateFunctionDesc; } - public FunctionWork(dropFunctionDesc dropFunctionDesc) { - this.dropFunctionDesc = dropFunctionDesc; + public FunctionWork(DropFunctionDesc DropFunctionDesc) { + this.DropFunctionDesc = DropFunctionDesc; } - public createFunctionDesc getCreateFunctionDesc() { - return createFunctionDesc; + public CreateFunctionDesc getCreateFunctionDesc() { + return CreateFunctionDesc; } - public void setCreateFunctionDesc(createFunctionDesc createFunctionDesc) { - this.createFunctionDesc = createFunctionDesc; + public void setCreateFunctionDesc(CreateFunctionDesc CreateFunctionDesc) { + this.CreateFunctionDesc = CreateFunctionDesc; } - public dropFunctionDesc getDropFunctionDesc() { - return dropFunctionDesc; + public DropFunctionDesc getDropFunctionDesc() { + return DropFunctionDesc; } - public void setDropFunctionDesc(dropFunctionDesc dropFunctionDesc) { - this.dropFunctionDesc = dropFunctionDesc; + public void setDropFunctionDesc(DropFunctionDesc DropFunctionDesc) { + this.DropFunctionDesc = DropFunctionDesc; } } Index: ql/src/java/org/apache/hadoop/hive/ql/plan/ReduceSinkDesc.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/plan/ReduceSinkDesc.java (revision 0) +++ ql/src/java/org/apache/hadoop/hive/ql/plan/ReduceSinkDesc.java (revision 0) @@ -0,0 +1,176 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hive.ql.plan; + +import java.io.Serializable; +import java.util.ArrayList; + +import static org.apache.hadoop.hive.serde.Constants.SERIALIZATION_SORT_ORDER; + +@Explain(displayName = "Reduce Output Operator") +public class ReduceSinkDesc implements Serializable { + private static final long serialVersionUID = 1L; + /** + * Key columns are passed to reducer in the "key". + */ + private ArrayList keyCols; + private ArrayList outputKeyColumnNames; + /** + * Value columns are passed to reducer in the "value". + */ + private ArrayList valueCols; + private ArrayList outputValueColumnNames; + /** + * Describe how to serialize the key. + */ + private TableDesc keySerializeInfo; + /** + * Describe how to serialize the value. + */ + private TableDesc valueSerializeInfo; + + /** + * The tag for this reducesink descriptor. + */ + private int tag; + + /** + * The partition columns (CLUSTER BY or DISTRIBUTE BY in Hive language). + * Partition columns decide the reducer that the current row goes to. + * Partition columns are not passed to reducer. + */ + private ArrayList partitionCols; + + private int numReducers; + + public ReduceSinkDesc() { + } + + public ReduceSinkDesc(ArrayList keyCols, + ArrayList valueCols, + ArrayList outputKeyColumnNames, + ArrayList outputValueolumnNames, int tag, + ArrayList partitionCols, int numReducers, + TableDesc keySerializeInfo, TableDesc valueSerializeInfo) { + this.keyCols = keyCols; + this.valueCols = valueCols; + this.outputKeyColumnNames = outputKeyColumnNames; + outputValueColumnNames = outputValueolumnNames; + this.tag = tag; + this.numReducers = numReducers; + this.partitionCols = partitionCols; + this.keySerializeInfo = keySerializeInfo; + this.valueSerializeInfo = valueSerializeInfo; + } + + public ArrayList getOutputKeyColumnNames() { + return outputKeyColumnNames; + } + + public void setOutputKeyColumnNames(ArrayList outputKeyColumnNames) { + this.outputKeyColumnNames = outputKeyColumnNames; + } + + public ArrayList getOutputValueColumnNames() { + return outputValueColumnNames; + } + + public void setOutputValueColumnNames(ArrayList outputValueColumnNames) { + this.outputValueColumnNames = outputValueColumnNames; + } + + @Explain(displayName = "key expressions") + public ArrayList getKeyCols() { + return keyCols; + } + + public void setKeyCols(ArrayList keyCols) { + this.keyCols = keyCols; + } + + @Explain(displayName = "value expressions") + public ArrayList getValueCols() { + return valueCols; + } + + public void setValueCols(ArrayList valueCols) { + this.valueCols = valueCols; + } + + @Explain(displayName = "Map-reduce partition columns") + public ArrayList getPartitionCols() { + return partitionCols; + } + + public void setPartitionCols(ArrayList partitionCols) { + this.partitionCols = partitionCols; + } + + @Explain(displayName = "tag") + public int getTag() { + return tag; + } + + public void setTag(int tag) { + this.tag = tag; + } + + /** + * Returns the number of reducers for the map-reduce job. -1 means to decide + * the number of reducers at runtime. This enables Hive to estimate the number + * of reducers based on the map-reduce input data size, which is only + * available right before we start the map-reduce job. + */ + public int getNumReducers() { + return numReducers; + } + + public void setNumReducers(int numReducers) { + this.numReducers = numReducers; + } + + public TableDesc getKeySerializeInfo() { + return keySerializeInfo; + } + + public void setKeySerializeInfo(TableDesc keySerializeInfo) { + this.keySerializeInfo = keySerializeInfo; + } + + public TableDesc getValueSerializeInfo() { + return valueSerializeInfo; + } + + public void setValueSerializeInfo(TableDesc valueSerializeInfo) { + this.valueSerializeInfo = valueSerializeInfo; + } + + /** + * Returns the sort order of the key columns. + * + * @return null, which means ascending order for all key columns, or a String + * of the same length as key columns, that consists of only "+" + * (ascending order) and "-" (descending order). + */ + @Explain(displayName = "sort order") + public String getOrder() { + return keySerializeInfo.getProperties().getProperty(SERIALIZATION_SORT_ORDER); + } + +} Index: ql/src/java/org/apache/hadoop/hive/ql/plan/tableScanDesc.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/plan/tableScanDesc.java (revision 901960) +++ ql/src/java/org/apache/hadoop/hive/ql/plan/tableScanDesc.java (working copy) @@ -1,50 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.hive.ql.plan; - -import java.io.Serializable; - -/** - * Table Scan Descriptor Currently, data is only read from a base source as part - * of map-reduce framework. So, nothing is stored in the descriptor. But, more - * things will be added here as table scan is invoked as part of local work. - **/ -@explain(displayName = "TableScan") -public class tableScanDesc implements Serializable { - private static final long serialVersionUID = 1L; - - private String alias; - - @SuppressWarnings("nls") - public tableScanDesc() { - } - - public tableScanDesc(final String alias) { - this.alias = alias; - } - - @explain(displayName = "alias") - public String getAlias() { - return alias; - } - - public void setAlias(String alias) { - this.alias = alias; - } -} Index: ql/src/java/org/apache/hadoop/hive/ql/plan/mapJoinDesc.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/plan/mapJoinDesc.java (revision 901960) +++ ql/src/java/org/apache/hadoop/hive/ql/plan/mapJoinDesc.java (working copy) @@ -1,144 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.hive.ql.plan; - -import java.io.Serializable; -import java.util.ArrayList; -import java.util.HashMap; -import java.util.Iterator; -import java.util.List; -import java.util.Map; -import java.util.Set; -import java.util.Map.Entry; - -/** - * Map Join operator Descriptor implementation. - * - */ -@explain(displayName = "Common Join Operator") -public class mapJoinDesc extends joinDesc implements Serializable { - private static final long serialVersionUID = 1L; - - private Map> keys; - private tableDesc keyTblDesc; - private List valueTblDescs; - - private int posBigTable; - - private Map> retainList; - - public mapJoinDesc() { - } - - public mapJoinDesc(final Map> keys, - final tableDesc keyTblDesc, final Map> values, - final List valueTblDescs, ArrayList outputColumnNames, - final int posBigTable, final joinCond[] conds) { - super(values, outputColumnNames, conds); - this.keys = keys; - this.keyTblDesc = keyTblDesc; - this.valueTblDescs = valueTblDescs; - this.posBigTable = posBigTable; - initRetainExprList(); - } - - private void initRetainExprList() { - retainList = new HashMap>(); - Set>> set = super.getExprs().entrySet(); - Iterator>> setIter = set.iterator(); - while (setIter.hasNext()) { - Entry> current = setIter.next(); - List list = new ArrayList(); - for (int i = 0; i < current.getValue().size(); i++) { - list.add(i); - } - retainList.put(current.getKey(), list); - } - } - - public Map> getRetainList() { - return retainList; - } - - public void setRetainList(Map> retainList) { - this.retainList = retainList; - } - - /** - * @return the keys - */ - @explain(displayName = "keys") - public Map> getKeys() { - return keys; - } - - /** - * @param keys - * the keys to set - */ - public void setKeys(Map> keys) { - this.keys = keys; - } - - /** - * @return the position of the big table not in memory - */ - @explain(displayName = "Position of Big Table") - public int getPosBigTable() { - return posBigTable; - } - - /** - * @param posBigTable - * the position of the big table not in memory - */ - public void setPosBigTable(int posBigTable) { - this.posBigTable = posBigTable; - } - - /** - * @return the keyTblDesc - */ - public tableDesc getKeyTblDesc() { - return keyTblDesc; - } - - /** - * @param keyTblDesc - * the keyTblDesc to set - */ - public void setKeyTblDesc(tableDesc keyTblDesc) { - this.keyTblDesc = keyTblDesc; - } - - /** - * @return the valueTblDescs - */ - public List getValueTblDescs() { - return valueTblDescs; - } - - /** - * @param valueTblDescs - * the valueTblDescs to set - */ - public void setValueTblDescs(List valueTblDescs) { - this.valueTblDescs = valueTblDescs; - } -} Index: ql/src/java/org/apache/hadoop/hive/ql/plan/FetchWork.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/plan/FetchWork.java (revision 0) +++ ql/src/java/org/apache/hadoop/hive/ql/plan/FetchWork.java (revision 0) @@ -0,0 +1,205 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hive.ql.plan; + +import java.io.Serializable; +import java.util.ArrayList; +import java.util.List; + +import org.apache.hadoop.fs.Path; + +@Explain(displayName = "Fetch Operator") +public class FetchWork implements Serializable { + private static final long serialVersionUID = 1L; + + private String tblDir; + private TableDesc tblDesc; + + private List partDir; + private List partDesc; + + private int limit; + + /** + * Serialization Null Format for the serde used to fetch data + */ + private String serializationNullFormat = "NULL"; + + public FetchWork() { + } + + public FetchWork(String tblDir, TableDesc tblDesc) { + this(tblDir, tblDesc, -1); + } + + public FetchWork(String tblDir, TableDesc tblDesc, int limit) { + this.tblDir = tblDir; + this.tblDesc = tblDesc; + this.limit = limit; + } + + public FetchWork(List partDir, List partDesc) { + this(partDir, partDesc, -1); + } + + public FetchWork(List partDir, List partDesc, int limit) { + this.partDir = partDir; + this.partDesc = partDesc; + this.limit = limit; + } + + public String getSerializationNullFormat() { + return serializationNullFormat; + } + + public void setSerializationNullFormat(String format) { + serializationNullFormat = format; + } + + /** + * @return the tblDir + */ + public String getTblDir() { + return tblDir; + } + + /** + * @return the tblDir + */ + public Path getTblDirPath() { + return new Path(tblDir); + } + + /** + * @param tblDir + * the tblDir to set + */ + public void setTblDir(String tblDir) { + this.tblDir = tblDir; + } + + /** + * @return the tblDesc + */ + public TableDesc getTblDesc() { + return tblDesc; + } + + /** + * @param tblDesc + * the tblDesc to set + */ + public void setTblDesc(TableDesc tblDesc) { + this.tblDesc = tblDesc; + } + + /** + * @return the partDir + */ + public List getPartDir() { + return partDir; + } + + public List getPartDirPath() { + return FetchWork.convertStringToPathArray(partDir); + } + + public static List convertPathToStringArray(List paths) { + if (paths == null) { + return null; + } + + List pathsStr = new ArrayList(); + for (Path path : paths) { + pathsStr.add(path.toString()); + } + + return pathsStr; + } + + public static List convertStringToPathArray(List paths) { + if (paths == null) { + return null; + } + + List pathsStr = new ArrayList(); + for (String path : paths) { + pathsStr.add(new Path(path)); + } + + return pathsStr; + } + + /** + * @param partDir + * the partDir to set + */ + public void setPartDir(List partDir) { + this.partDir = partDir; + } + + /** + * @return the partDesc + */ + public List getPartDesc() { + return partDesc; + } + + /** + * @param partDesc + * the partDesc to set + */ + public void setPartDesc(List partDesc) { + this.partDesc = partDesc; + } + + /** + * @return the limit + */ + @Explain(displayName = "limit") + public int getLimit() { + return limit; + } + + /** + * @param limit + * the limit to set + */ + public void setLimit(int limit) { + this.limit = limit; + } + + @Override + public String toString() { + if (tblDir != null) { + return new String("table = " + tblDir); + } + + if (partDir == null) { + return "null fetchwork"; + } + + String ret = new String("partition = "); + for (String part : partDir) { + ret = ret.concat(part); + } + + return ret; + } +} Index: ql/src/java/org/apache/hadoop/hive/ql/plan/ExprNodeGenericFuncDesc.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/plan/ExprNodeGenericFuncDesc.java (revision 0) +++ ql/src/java/org/apache/hadoop/hive/ql/plan/ExprNodeGenericFuncDesc.java (revision 0) @@ -0,0 +1,192 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hive.ql.plan; + +import java.io.Serializable; +import java.util.ArrayList; +import java.util.List; + +import org.apache.hadoop.hive.ql.exec.FunctionRegistry; +import org.apache.hadoop.hive.ql.exec.UDFArgumentException; +import org.apache.hadoop.hive.ql.exec.Utilities; +import org.apache.hadoop.hive.ql.udf.generic.GenericUDF; +import org.apache.hadoop.hive.ql.udf.generic.GenericUDFBridge; +import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspector; +import org.apache.hadoop.hive.serde2.typeinfo.TypeInfo; +import org.apache.hadoop.hive.serde2.typeinfo.TypeInfoUtils; + +/** + * Describes a GenericFunc node. + */ +public class ExprNodeGenericFuncDesc extends ExprNodeDesc implements + Serializable { + + private static final long serialVersionUID = 1L; + + /** + * In case genericUDF is Serializable, we will serialize the object. + * + * In case genericUDF does not implement Serializable, Java will remember the + * class of genericUDF and creates a new instance when deserialized. This is + * exactly what we want. + */ + private GenericUDF genericUDF; + private List childExprs; + + public ExprNodeGenericFuncDesc() { + } + + public ExprNodeGenericFuncDesc(TypeInfo typeInfo, GenericUDF genericUDF, + List children) { + super(typeInfo); + assert (genericUDF != null); + this.genericUDF = genericUDF; + childExprs = children; + } + + public GenericUDF getGenericUDF() { + return genericUDF; + } + + public void setGenericUDF(GenericUDF genericUDF) { + this.genericUDF = genericUDF; + } + + public List getChildExprs() { + return childExprs; + } + + public void setChildExprs(List children) { + childExprs = children; + } + + @Override + public List getChildren() { + return childExprs; + } + + @Override + public String toString() { + StringBuilder sb = new StringBuilder(); + sb.append(genericUDF.getClass().toString()); + sb.append("("); + for (int i = 0; i < childExprs.size(); i++) { + if (i > 0) { + sb.append(", "); + } + sb.append(childExprs.get(i).toString()); + } + sb.append("("); + sb.append(")"); + return sb.toString(); + } + + @Explain(displayName = "expr") + @Override + public String getExprString() { + // Get the children expr strings + String[] childrenExprStrings = new String[childExprs.size()]; + for (int i = 0; i < childrenExprStrings.length; i++) { + childrenExprStrings[i] = childExprs.get(i).getExprString(); + } + + return genericUDF.getDisplayString(childrenExprStrings); + } + + @Override + public List getCols() { + List colList = new ArrayList(); + if (childExprs != null) { + int pos = 0; + while (pos < childExprs.size()) { + List colCh = childExprs.get(pos).getCols(); + colList = Utilities.mergeUniqElems(colList, colCh); + pos++; + } + } + + return colList; + } + + @Override + public ExprNodeDesc clone() { + List cloneCh = new ArrayList(childExprs.size()); + for (ExprNodeDesc ch : childExprs) { + cloneCh.add(ch.clone()); + } + ExprNodeGenericFuncDesc clone = new ExprNodeGenericFuncDesc(typeInfo, + FunctionRegistry.cloneGenericUDF(genericUDF), cloneCh); + return clone; + } + + /** + * Create a ExprNodeGenericFuncDesc based on the genericUDFClass and the + * children parameters. + * + * @throws UDFArgumentException + */ + public static ExprNodeGenericFuncDesc newInstance(GenericUDF genericUDF, + List children) throws UDFArgumentException { + ObjectInspector[] childrenOIs = new ObjectInspector[children.size()]; + for (int i = 0; i < childrenOIs.length; i++) { + childrenOIs[i] = TypeInfoUtils + .getStandardWritableObjectInspectorFromTypeInfo(children.get(i) + .getTypeInfo()); + } + + ObjectInspector oi = genericUDF.initialize(childrenOIs); + return new ExprNodeGenericFuncDesc(TypeInfoUtils + .getTypeInfoFromObjectInspector(oi), genericUDF, children); + } + + @Override + public boolean isSame(Object o) { + if (!(o instanceof ExprNodeGenericFuncDesc)) { + return false; + } + ExprNodeGenericFuncDesc dest = (ExprNodeGenericFuncDesc) o; + if (!typeInfo.equals(dest.getTypeInfo()) + || !genericUDF.getClass().equals(dest.getGenericUDF().getClass())) { + return false; + } + + if (genericUDF instanceof GenericUDFBridge) { + GenericUDFBridge bridge = (GenericUDFBridge) genericUDF; + GenericUDFBridge bridge2 = (GenericUDFBridge) dest.getGenericUDF(); + if (!bridge.getUdfClass().equals(bridge2.getUdfClass()) + || !bridge.getUdfName().equals(bridge2.getUdfName()) + || bridge.isOperator() != bridge2.isOperator()) { + return false; + } + } + + if (childExprs.size() != dest.getChildExprs().size()) { + return false; + } + + for (int pos = 0; pos < childExprs.size(); pos++) { + if (!childExprs.get(pos).isSame(dest.getChildExprs().get(pos))) { + return false; + } + } + + return true; + } + +} Index: ql/src/java/org/apache/hadoop/hive/ql/plan/ShowPartitionsDesc.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/plan/ShowPartitionsDesc.java (revision 0) +++ ql/src/java/org/apache/hadoop/hive/ql/plan/ShowPartitionsDesc.java (revision 0) @@ -0,0 +1,93 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hive.ql.plan; + +import java.io.Serializable; + +import org.apache.hadoop.fs.Path; + +@Explain(displayName = "Show Partitions") +public class ShowPartitionsDesc extends DDLDesc implements Serializable { + private static final long serialVersionUID = 1L; + String tabName; + Path resFile; + /** + * table name for the result of show tables + */ + private final String table = "showpartitions"; + /** + * thrift ddl for the result of show tables + */ + private final String schema = "partition#string"; + + public String getTable() { + return table; + } + + public String getSchema() { + return schema; + } + + /** + * @param tabName + * Name of the table whose partitions need to be listed + * @param resFile + * File to store the results in + */ + public ShowPartitionsDesc(String tabName, Path resFile) { + this.tabName = tabName; + this.resFile = resFile; + } + + /** + * @return the name of the table + */ + @Explain(displayName = "table") + public String getTabName() { + return tabName; + } + + /** + * @param tabName + * the table whose partitions have to be listed + */ + public void setTabName(String tabName) { + this.tabName = tabName; + } + + /** + * @return the results file + */ + public Path getResFile() { + return resFile; + } + + @Explain(displayName = "result file", normalExplain = false) + public String getResFileString() { + return getResFile().getName(); + } + + /** + * @param resFile + * the results file to be used to return the results + */ + public void setResFile(Path resFile) { + this.resFile = resFile; + } +} Index: ql/src/java/org/apache/hadoop/hive/ql/plan/createViewDesc.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/plan/createViewDesc.java (revision 901960) +++ ql/src/java/org/apache/hadoop/hive/ql/plan/createViewDesc.java (working copy) @@ -1,103 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.hive.ql.plan; - -import java.io.Serializable; -import java.util.List; - -import org.apache.hadoop.hive.metastore.api.FieldSchema; -import org.apache.hadoop.hive.ql.exec.Utilities; - -@explain(displayName = "Create View") -public class createViewDesc implements Serializable { - private static final long serialVersionUID = 1L; - - private String viewName; - private String originalText; - private String expandedText; - private List schema; - private String comment; - private boolean ifNotExists; - - public createViewDesc(String viewName, List schema, - String comment, boolean ifNotExists) { - this.viewName = viewName; - this.schema = schema; - this.comment = comment; - this.ifNotExists = ifNotExists; - } - - @explain(displayName = "name") - public String getViewName() { - return viewName; - } - - public void setViewName(String viewName) { - this.viewName = viewName; - } - - @explain(displayName = "original text") - public String getViewOriginalText() { - return originalText; - } - - public void setViewOriginalText(String originalText) { - this.originalText = originalText; - } - - @explain(displayName = "expanded text") - public String getViewExpandedText() { - return expandedText; - } - - public void setViewExpandedText(String expandedText) { - this.expandedText = expandedText; - } - - @explain(displayName = "columns") - public List getSchemaString() { - return Utilities.getFieldSchemaString(schema); - } - - public List getSchema() { - return schema; - } - - public void setSchema(List schema) { - this.schema = schema; - } - - @explain(displayName = "comment") - public String getComment() { - return comment; - } - - public void setComment(String comment) { - this.comment = comment; - } - - @explain(displayName = "if not exists") - public boolean getIfNotExists() { - return ifNotExists; - } - - public void setIfNotExists(boolean ifNotExists) { - this.ifNotExists = ifNotExists; - } -} Index: ql/src/java/org/apache/hadoop/hive/ql/plan/Explain.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/plan/Explain.java (revision 0) +++ ql/src/java/org/apache/hadoop/hive/ql/plan/Explain.java (revision 0) @@ -0,0 +1,29 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hive.ql.plan; + +import java.lang.annotation.Retention; +import java.lang.annotation.RetentionPolicy; + +@Retention(RetentionPolicy.RUNTIME) +public @interface Explain { + String displayName() default ""; + + boolean normalExplain() default true; +} Index: ql/src/java/org/apache/hadoop/hive/ql/plan/scriptDesc.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/plan/scriptDesc.java (revision 901960) +++ ql/src/java/org/apache/hadoop/hive/ql/plan/scriptDesc.java (working copy) @@ -1,111 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.hive.ql.plan; - -import java.io.Serializable; - -import org.apache.hadoop.hive.ql.exec.RecordReader; -import org.apache.hadoop.hive.ql.exec.RecordWriter; - -@explain(displayName = "Transform Operator") -public class scriptDesc implements Serializable { - private static final long serialVersionUID = 1L; - private String scriptCmd; - // Describe how to deserialize data back from user script - private tableDesc scriptOutputInfo; - private Class inRecordWriterClass; - - // Describe how to serialize data out to user script - private tableDesc scriptInputInfo; - private Class outRecordReaderClass; - - public scriptDesc() { - } - - public scriptDesc(final String scriptCmd, final tableDesc scriptInputInfo, - final Class inRecordWriterClass, - final tableDesc scriptOutputInfo, - final Class outRecordReaderClass) { - - this.scriptCmd = scriptCmd; - this.scriptInputInfo = scriptInputInfo; - this.inRecordWriterClass = inRecordWriterClass; - this.scriptOutputInfo = scriptOutputInfo; - this.outRecordReaderClass = outRecordReaderClass; - } - - @explain(displayName = "command") - public String getScriptCmd() { - return scriptCmd; - } - - public void setScriptCmd(final String scriptCmd) { - this.scriptCmd = scriptCmd; - } - - @explain(displayName = "output info") - public tableDesc getScriptOutputInfo() { - return scriptOutputInfo; - } - - public void setScriptOutputInfo(final tableDesc scriptOutputInfo) { - this.scriptOutputInfo = scriptOutputInfo; - } - - public tableDesc getScriptInputInfo() { - return scriptInputInfo; - } - - public void setScriptInputInfo(tableDesc scriptInputInfo) { - this.scriptInputInfo = scriptInputInfo; - } - - /** - * @return the outRecordReaderClass - */ - public Class getOutRecordReaderClass() { - return outRecordReaderClass; - } - - /** - * @param outRecordReaderClass - * the outRecordReaderClass to set - */ - public void setOutRecordReaderClass( - Class outRecordReaderClass) { - this.outRecordReaderClass = outRecordReaderClass; - } - - /** - * @return the inRecordWriterClass - */ - public Class getInRecordWriterClass() { - return inRecordWriterClass; - } - - /** - * @param inRecordWriterClass - * the inRecordWriterClass to set - */ - public void setInRecordWriterClass( - Class inRecordWriterClass) { - this.inRecordWriterClass = inRecordWriterClass; - } - -} Index: ql/src/java/org/apache/hadoop/hive/ql/plan/forwardDesc.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/plan/forwardDesc.java (revision 901960) +++ ql/src/java/org/apache/hadoop/hive/ql/plan/forwardDesc.java (working copy) @@ -1,32 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.hive.ql.plan; - -import java.io.Serializable; - -@explain(displayName = "Forward") -public class forwardDesc implements Serializable { - private static final long serialVersionUID = 1L; - - @SuppressWarnings("nls") - public forwardDesc() { - // throw new - // RuntimeException("This class does not need to be instantiated"); - } -} Index: ql/src/java/org/apache/hadoop/hive/ql/plan/partitionDesc.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/plan/partitionDesc.java (revision 901960) +++ ql/src/java/org/apache/hadoop/hive/ql/plan/partitionDesc.java (working copy) @@ -1,250 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.hive.ql.plan; - -import java.io.File; -import java.io.Serializable; -import java.net.URI; -import java.util.Enumeration; -import java.util.Properties; - -import org.apache.hadoop.hive.ql.exec.Utilities; -import org.apache.hadoop.hive.ql.io.HiveFileFormatUtils; -import org.apache.hadoop.hive.ql.io.HiveOutputFormat; -import org.apache.hadoop.hive.ql.metadata.HiveException; -import org.apache.hadoop.hive.serde2.Deserializer; -import org.apache.hadoop.mapred.InputFormat; - -@explain(displayName = "Partition") -public class partitionDesc implements Serializable, Cloneable { - private static final long serialVersionUID = 2L; - private tableDesc table; - private java.util.LinkedHashMap partSpec; - private java.lang.Class deserializerClass; - private Class inputFileFormatClass; - private Class outputFileFormatClass; - private java.util.Properties properties; - private String serdeClassName; - private transient String baseFileName; - - public partitionDesc() { - } - - public partitionDesc(final tableDesc table, - final java.util.LinkedHashMap partSpec) { - this(table, partSpec, null, null, null, null, null); - } - - public partitionDesc(final tableDesc table, - final java.util.LinkedHashMap partSpec, - final Class serdeClass, - final Class inputFileFormatClass, - final Class outputFormat, final java.util.Properties properties, - final String serdeClassName) { - this.table = table; - this.partSpec = partSpec; - deserializerClass = serdeClass; - this.inputFileFormatClass = inputFileFormatClass; - if (outputFormat != null) { - outputFileFormatClass = HiveFileFormatUtils - .getOutputFormatSubstitute(outputFormat); - } - this.properties = properties; - if (properties != null) { - this.serdeClassName = properties - .getProperty(org.apache.hadoop.hive.serde.Constants.SERIALIZATION_LIB); - } - } - - public partitionDesc(final org.apache.hadoop.hive.ql.metadata.Partition part) - throws HiveException { - table = Utilities.getTableDesc(part.getTable()); - partSpec = part.getSpec(); - deserializerClass = part.getDeserializer().getClass(); - inputFileFormatClass = part.getInputFormatClass(); - outputFileFormatClass = part.getOutputFormatClass(); - properties = part.getSchema(); - serdeClassName = properties - .getProperty(org.apache.hadoop.hive.serde.Constants.SERIALIZATION_LIB); - ; - } - - @explain(displayName = "") - public tableDesc getTableDesc() { - return table; - } - - public void setTableDesc(final tableDesc table) { - this.table = table; - } - - @explain(displayName = "partition values") - public java.util.LinkedHashMap getPartSpec() { - return partSpec; - } - - public void setPartSpec(final java.util.LinkedHashMap partSpec) { - this.partSpec = partSpec; - } - - public java.lang.Class getDeserializerClass() { - if (deserializerClass == null && table != null) { - setDeserializerClass(table.getDeserializerClass()); - } - return deserializerClass; - } - - public void setDeserializerClass( - final java.lang.Class serdeClass) { - deserializerClass = serdeClass; - } - - public Class getInputFileFormatClass() { - if (inputFileFormatClass == null && table != null) { - setInputFileFormatClass(table.getInputFileFormatClass()); - } - return inputFileFormatClass; - } - - /** - * Return a deserializer object corresponding to the tableDesc - */ - public Deserializer getDeserializer() throws Exception { - Deserializer de = deserializerClass.newInstance(); - de.initialize(null, properties); - return de; - } - - public void setInputFileFormatClass( - final Class inputFileFormatClass) { - this.inputFileFormatClass = inputFileFormatClass; - } - - public Class getOutputFileFormatClass() { - if (outputFileFormatClass == null && table != null) { - setOutputFileFormatClass(table.getOutputFileFormatClass()); - } - return outputFileFormatClass; - } - - public void setOutputFileFormatClass(final Class outputFileFormatClass) { - this.outputFileFormatClass = HiveFileFormatUtils - .getOutputFormatSubstitute(outputFileFormatClass); - } - - @explain(displayName = "properties", normalExplain = false) - public java.util.Properties getProperties() { - if (table != null) { - return table.getProperties(); - } - return properties; - } - - public void setProperties(final java.util.Properties properties) { - this.properties = properties; - } - - /** - * @return the serdeClassName - */ - @explain(displayName = "serde") - public String getSerdeClassName() { - if (serdeClassName == null && table != null) { - setSerdeClassName(table.getSerdeClassName()); - } - return serdeClassName; - } - - /** - * @param serdeClassName - * the serde Class Name to set - */ - public void setSerdeClassName(String serdeClassName) { - this.serdeClassName = serdeClassName; - } - - @explain(displayName = "name") - public String getTableName() { - return getProperties().getProperty( - org.apache.hadoop.hive.metastore.api.Constants.META_TABLE_NAME); - } - - @explain(displayName = "input format") - public String getInputFileFormatClassName() { - return getInputFileFormatClass().getName(); - } - - @explain(displayName = "output format") - public String getOutputFileFormatClassName() { - return getOutputFileFormatClass().getName(); - } - - @explain(displayName = "base file name", normalExplain = false) - public String getBaseFileName() { - return baseFileName; - } - - @Override - public partitionDesc clone() { - partitionDesc ret = new partitionDesc(); - - ret.setSerdeClassName(serdeClassName); - ret.setDeserializerClass(deserializerClass); - ret.inputFileFormatClass = inputFileFormatClass; - ret.outputFileFormatClass = outputFileFormatClass; - if (properties != null) { - Properties newProp = new Properties(); - Enumeration keysProp = properties.keys(); - while (keysProp.hasMoreElements()) { - Object key = keysProp.nextElement(); - newProp.put(key, properties.get(key)); - } - ret.setProperties(newProp); - } - ret.table = (tableDesc) table.clone(); - // The partition spec is not present - if (partSpec != null) { - ret.partSpec = new java.util.LinkedHashMap(); - ret.partSpec.putAll(partSpec); - } - return ret; - } - - /** - * Attempt to derive a virtual base file name property from the - * path. If path format is unrecognized, just use the full path. - * - * @param path - * URI to the partition file - */ - void deriveBaseFileName(String path) { - if (path == null) { - return; - } - try { - URI uri = new URI(path); - File file = new File(uri); - baseFileName = file.getName(); - } catch (Exception ex) { - // This could be due to either URI syntax error or File constructor - // illegal arg; we don't really care which one it is. - baseFileName = path; - } - } -} Index: ql/src/java/org/apache/hadoop/hive/ql/plan/moveWork.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/plan/moveWork.java (revision 901960) +++ ql/src/java/org/apache/hadoop/hive/ql/plan/moveWork.java (working copy) @@ -1,103 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.hive.ql.plan; - -import java.io.Serializable; -import java.util.Set; - -import org.apache.hadoop.hive.ql.hooks.ReadEntity; -import org.apache.hadoop.hive.ql.hooks.WriteEntity; - -@explain(displayName = "Move Operator") -public class moveWork implements Serializable { - private static final long serialVersionUID = 1L; - private loadTableDesc loadTableWork; - private loadFileDesc loadFileWork; - - private boolean checkFileFormat; - - /** - * ReadEntitites that are passed to the hooks. - */ - protected Set inputs; - /** - * List of WriteEntities that are passed to the hooks. - */ - protected Set outputs; - - public moveWork() { - } - - public moveWork(Set inputs, Set outputs) { - this.inputs = inputs; - this.outputs = outputs; - } - - public moveWork(Set inputs, Set outputs, - final loadTableDesc loadTableWork, final loadFileDesc loadFileWork, - boolean checkFileFormat) { - this(inputs, outputs); - this.loadTableWork = loadTableWork; - this.loadFileWork = loadFileWork; - this.checkFileFormat = checkFileFormat; - } - - @explain(displayName = "tables") - public loadTableDesc getLoadTableWork() { - return loadTableWork; - } - - public void setLoadTableWork(final loadTableDesc loadTableWork) { - this.loadTableWork = loadTableWork; - } - - @explain(displayName = "files") - public loadFileDesc getLoadFileWork() { - return loadFileWork; - } - - public void setLoadFileWork(final loadFileDesc loadFileWork) { - this.loadFileWork = loadFileWork; - } - - public boolean getCheckFileFormat() { - return checkFileFormat; - } - - public void setCheckFileFormat(boolean checkFileFormat) { - this.checkFileFormat = checkFileFormat; - } - - public Set getInputs() { - return inputs; - } - - public Set getOutputs() { - return outputs; - } - - public void setInputs(Set inputs) { - this.inputs = inputs; - } - - public void setOutputs(Set outputs) { - this.outputs = outputs; - } - -} Index: ql/src/java/org/apache/hadoop/hive/ql/plan/dropFunctionDesc.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/plan/dropFunctionDesc.java (revision 901960) +++ ql/src/java/org/apache/hadoop/hive/ql/plan/dropFunctionDesc.java (working copy) @@ -1,42 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.hive.ql.plan; - -import java.io.Serializable; - -@explain(displayName = "Drop Function") -public class dropFunctionDesc implements Serializable { - private static final long serialVersionUID = 1L; - - private String functionName; - - public dropFunctionDesc(String functionName) { - this.functionName = functionName; - } - - @explain(displayName = "name") - public String getFunctionName() { - return functionName; - } - - public void setFunctionName(String functionName) { - this.functionName = functionName; - } - -} Index: ql/src/java/org/apache/hadoop/hive/ql/plan/CreateTableDesc.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/plan/CreateTableDesc.java (revision 0) +++ ql/src/java/org/apache/hadoop/hive/ql/plan/CreateTableDesc.java (revision 0) @@ -0,0 +1,280 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hive.ql.plan; + +import java.io.Serializable; +import java.util.List; +import java.util.Map; + +import org.apache.hadoop.hive.metastore.api.FieldSchema; +import org.apache.hadoop.hive.metastore.api.Order; +import org.apache.hadoop.hive.ql.exec.Utilities; + +@Explain(displayName = "Create Table") +public class CreateTableDesc extends DDLDesc implements Serializable { + private static final long serialVersionUID = 1L; + String tableName; + boolean isExternal; + List cols; + List partCols; + List bucketCols; + List sortCols; + int numBuckets; + String fieldDelim; + String fieldEscape; + String collItemDelim; + String mapKeyDelim; + String lineDelim; + String comment; + String inputFormat; + String outputFormat; + String location; + String serName; + Map mapProp; + boolean ifNotExists; + + public CreateTableDesc(String tableName, boolean isExternal, + List cols, List partCols, + List bucketCols, List sortCols, int numBuckets, + String fieldDelim, String fieldEscape, String collItemDelim, + String mapKeyDelim, String lineDelim, String comment, String inputFormat, + String outputFormat, String location, String serName, + Map mapProp, boolean ifNotExists) { + this.tableName = tableName; + this.isExternal = isExternal; + this.bucketCols = bucketCols; + this.sortCols = sortCols; + this.collItemDelim = collItemDelim; + this.cols = cols; + this.comment = comment; + this.fieldDelim = fieldDelim; + this.fieldEscape = fieldEscape; + this.inputFormat = inputFormat; + this.outputFormat = outputFormat; + this.lineDelim = lineDelim; + this.location = location; + this.mapKeyDelim = mapKeyDelim; + this.numBuckets = numBuckets; + this.partCols = partCols; + this.serName = serName; + this.mapProp = mapProp; + this.ifNotExists = ifNotExists; + } + + @Explain(displayName = "if not exists") + public boolean getIfNotExists() { + return ifNotExists; + } + + public void setIfNotExists(boolean ifNotExists) { + this.ifNotExists = ifNotExists; + } + + @Explain(displayName = "name") + public String getTableName() { + return tableName; + } + + public void setTableName(String tableName) { + this.tableName = tableName; + } + + public List getCols() { + return cols; + } + + @Explain(displayName = "columns") + public List getColsString() { + return Utilities.getFieldSchemaString(getCols()); + } + + public void setCols(List cols) { + this.cols = cols; + } + + public List getPartCols() { + return partCols; + } + + @Explain(displayName = "partition columns") + public List getPartColsString() { + return Utilities.getFieldSchemaString(getPartCols()); + } + + public void setPartCols(List partCols) { + this.partCols = partCols; + } + + @Explain(displayName = "bucket columns") + public List getBucketCols() { + return bucketCols; + } + + public void setBucketCols(List bucketCols) { + this.bucketCols = bucketCols; + } + + @Explain(displayName = "# buckets") + public int getNumBuckets() { + return numBuckets; + } + + public void setNumBuckets(int numBuckets) { + this.numBuckets = numBuckets; + } + + @Explain(displayName = "field delimiter") + public String getFieldDelim() { + return fieldDelim; + } + + public void setFieldDelim(String fieldDelim) { + this.fieldDelim = fieldDelim; + } + + @Explain(displayName = "field escape") + public String getFieldEscape() { + return fieldEscape; + } + + public void setFieldEscape(String fieldEscape) { + this.fieldEscape = fieldEscape; + } + + @Explain(displayName = "collection delimiter") + public String getCollItemDelim() { + return collItemDelim; + } + + public void setCollItemDelim(String collItemDelim) { + this.collItemDelim = collItemDelim; + } + + @Explain(displayName = "map key delimiter") + public String getMapKeyDelim() { + return mapKeyDelim; + } + + public void setMapKeyDelim(String mapKeyDelim) { + this.mapKeyDelim = mapKeyDelim; + } + + @Explain(displayName = "line delimiter") + public String getLineDelim() { + return lineDelim; + } + + public void setLineDelim(String lineDelim) { + this.lineDelim = lineDelim; + } + + @Explain(displayName = "comment") + public String getComment() { + return comment; + } + + public void setComment(String comment) { + this.comment = comment; + } + + @Explain(displayName = "input format") + public String getInputFormat() { + return inputFormat; + } + + public void setInputFormat(String inputFormat) { + this.inputFormat = inputFormat; + } + + @Explain(displayName = "output format") + public String getOutputFormat() { + return outputFormat; + } + + public void setOutputFormat(String outputFormat) { + this.outputFormat = outputFormat; + } + + @Explain(displayName = "location") + public String getLocation() { + return location; + } + + public void setLocation(String location) { + this.location = location; + } + + @Explain(displayName = "isExternal") + public boolean isExternal() { + return isExternal; + } + + public void setExternal(boolean isExternal) { + this.isExternal = isExternal; + } + + /** + * @return the sortCols + */ + @Explain(displayName = "sort columns") + public List getSortCols() { + return sortCols; + } + + /** + * @param sortCols + * the sortCols to set + */ + public void setSortCols(List sortCols) { + this.sortCols = sortCols; + } + + /** + * @return the serDeName + */ + @Explain(displayName = "serde name") + public String getSerName() { + return serName; + } + + /** + * @param serName + * the serName to set + */ + public void setSerName(String serName) { + this.serName = serName; + } + + /** + * @return the serDe properties + */ + @Explain(displayName = "serde properties") + public Map getMapProp() { + return mapProp; + } + + /** + * @param mapProp + * the map properties to set + */ + public void setMapProp(Map mapProp) { + this.mapProp = mapProp; + } + +} Index: ql/src/java/org/apache/hadoop/hive/ql/plan/ShowTablesDesc.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/plan/ShowTablesDesc.java (revision 0) +++ ql/src/java/org/apache/hadoop/hive/ql/plan/ShowTablesDesc.java (revision 0) @@ -0,0 +1,99 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hive.ql.plan; + +import java.io.Serializable; + +import org.apache.hadoop.fs.Path; + +@Explain(displayName = "Show Tables") +public class ShowTablesDesc extends DDLDesc implements Serializable { + private static final long serialVersionUID = 1L; + String pattern; + Path resFile; + /** + * table name for the result of show tables + */ + private final String table = "show"; + /** + * thrift ddl for the result of show tables + */ + private final String schema = "tab_name#string"; + + public String getTable() { + return table; + } + + public String getSchema() { + return schema; + } + + /** + * @param resFile + */ + public ShowTablesDesc(Path resFile) { + this.resFile = resFile; + pattern = null; + } + + /** + * @param pattern + * names of tables to show + */ + public ShowTablesDesc(Path resFile, String pattern) { + this.resFile = resFile; + this.pattern = pattern; + } + + /** + * @return the pattern + */ + @Explain(displayName = "pattern") + public String getPattern() { + return pattern; + } + + /** + * @param pattern + * the pattern to set + */ + public void setPattern(String pattern) { + this.pattern = pattern; + } + + /** + * @return the resFile + */ + public Path getResFile() { + return resFile; + } + + @Explain(displayName = "result file", normalExplain = false) + public String getResFileString() { + return getResFile().getName(); + } + + /** + * @param resFile + * the resFile to set + */ + public void setResFile(Path resFile) { + this.resFile = resFile; + } +} Index: ql/src/java/org/apache/hadoop/hive/ql/plan/dropTableDesc.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/plan/dropTableDesc.java (revision 901960) +++ ql/src/java/org/apache/hadoop/hive/ql/plan/dropTableDesc.java (working copy) @@ -1,93 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.hive.ql.plan; - -import java.io.Serializable; -import java.util.List; -import java.util.Map; - -@explain(displayName = "Drop Table") -public class dropTableDesc extends ddlDesc implements Serializable { - private static final long serialVersionUID = 1L; - - String tableName; - List> partSpecs; - boolean expectView; - - /** - * @param tableName - */ - public dropTableDesc(String tableName, boolean expectView) { - this.tableName = tableName; - partSpecs = null; - this.expectView = expectView; - } - - public dropTableDesc(String tableName, List> partSpecs) { - this.tableName = tableName; - this.partSpecs = partSpecs; - expectView = false; - } - - /** - * @return the tableName - */ - @explain(displayName = "table") - public String getTableName() { - return tableName; - } - - /** - * @param tableName - * the tableName to set - */ - public void setTableName(String tableName) { - this.tableName = tableName; - } - - /** - * @return the partSpecs - */ - public List> getPartSpecs() { - return partSpecs; - } - - /** - * @param partSpecs - * the partSpecs to set - */ - public void setPartSpecs(List> partSpecs) { - this.partSpecs = partSpecs; - } - - /** - * @return whether to expect a view being dropped - */ - public boolean getExpectView() { - return expectView; - } - - /** - * @param expectView - * set whether to expect a view being dropped - */ - public void setExpectView(boolean expectView) { - this.expectView = expectView; - } -} Index: ql/src/java/org/apache/hadoop/hive/ql/plan/copyWork.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/plan/copyWork.java (revision 901960) +++ ql/src/java/org/apache/hadoop/hive/ql/plan/copyWork.java (working copy) @@ -1,54 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.hive.ql.plan; - -import java.io.Serializable; - -@explain(displayName = "Copy") -public class copyWork implements Serializable { - private static final long serialVersionUID = 1L; - private String fromPath; - private String toPath; - - public copyWork() { - } - - public copyWork(final String fromPath, final String toPath) { - this.fromPath = fromPath; - this.toPath = toPath; - } - - @explain(displayName = "source") - public String getFromPath() { - return fromPath; - } - - public void setFromPath(final String fromPath) { - this.fromPath = fromPath; - } - - @explain(displayName = "destination") - public String getToPath() { - return toPath; - } - - public void setToPath(final String toPath) { - this.toPath = toPath; - } -} Index: ql/src/java/org/apache/hadoop/hive/ql/plan/LimitDesc.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/plan/LimitDesc.java (revision 0) +++ ql/src/java/org/apache/hadoop/hive/ql/plan/LimitDesc.java (revision 0) @@ -0,0 +1,43 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hive.ql.plan; + +import java.io.Serializable; + +@Explain(displayName = "Limit") +public class LimitDesc implements Serializable { + private static final long serialVersionUID = 1L; + private int limit; + + public LimitDesc() { + } + + public LimitDesc(final int limit) { + this.limit = limit; + } + + public int getLimit() { + return limit; + } + + public void setLimit(final int limit) { + this.limit = limit; + } + +} Index: ql/src/java/org/apache/hadoop/hive/ql/plan/ExprNodeNullDesc.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/plan/ExprNodeNullDesc.java (revision 0) +++ ql/src/java/org/apache/hadoop/hive/ql/plan/ExprNodeNullDesc.java (revision 0) @@ -0,0 +1,61 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hive.ql.plan; + +import java.io.Serializable; + +import org.apache.hadoop.hive.serde2.typeinfo.TypeInfoFactory; +import org.apache.hadoop.io.NullWritable; + +public class ExprNodeNullDesc extends ExprNodeDesc implements Serializable { + + private static final long serialVersionUID = 1L; + + public ExprNodeNullDesc() { + super(TypeInfoFactory + .getPrimitiveTypeInfoFromPrimitiveWritable(NullWritable.class)); + } + + public Object getValue() { + return null; + } + + @Explain(displayName = "expr") + @Override + public String getExprString() { + return "null"; + } + + @Override + public ExprNodeDesc clone() { + return new ExprNodeNullDesc(); + } + + @Override + public boolean isSame(Object o) { + if (!(o instanceof ExprNodeNullDesc)) { + return false; + } + if (!typeInfo.equals(((ExprNodeNullDesc) o).getTypeInfo())) { + return false; + } + + return true; + } +} Index: ql/src/java/org/apache/hadoop/hive/ql/plan/ExplosionDesc.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/plan/ExplosionDesc.java (revision 0) +++ ql/src/java/org/apache/hadoop/hive/ql/plan/ExplosionDesc.java (revision 0) @@ -0,0 +1,52 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hive.ql.plan; + +import java.io.Serializable; + +@Explain(displayName = "Explosion") +public class ExplosionDesc implements Serializable { + private static final long serialVersionUID = 1L; + private String fieldName; + private int position; + + public ExplosionDesc() { + } + + public ExplosionDesc(final String fieldName, final int position) { + this.fieldName = fieldName; + this.position = position; + } + + public String getFieldName() { + return fieldName; + } + + public void setFieldName(final String fieldName) { + this.fieldName = fieldName; + } + + public int getPosition() { + return position; + } + + public void setPosition(final int position) { + this.position = position; + } +} Index: ql/src/java/org/apache/hadoop/hive/ql/plan/ExprNodeDesc.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/plan/ExprNodeDesc.java (revision 0) +++ ql/src/java/org/apache/hadoop/hive/ql/plan/ExprNodeDesc.java (revision 0) @@ -0,0 +1,82 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hive.ql.plan; + +import java.io.Serializable; +import java.util.List; + +import org.apache.hadoop.hive.ql.lib.Node; +import org.apache.hadoop.hive.serde2.typeinfo.TypeInfo; + +public abstract class ExprNodeDesc implements Serializable, Node { + private static final long serialVersionUID = 1L; + TypeInfo typeInfo; + + public ExprNodeDesc() { + } + + public ExprNodeDesc(TypeInfo typeInfo) { + this.typeInfo = typeInfo; + if (typeInfo == null) { + throw new RuntimeException("typeInfo cannot be null!"); + } + } + + @Override + public abstract ExprNodeDesc clone(); + + // Cant use equals because the walker depends on them being object equal + // The default graph walker processes a node after its kids have been + // processed. That comparison needs + // object equality - isSame means that the objects are semantically equal. + public abstract boolean isSame(Object o); + + public TypeInfo getTypeInfo() { + return typeInfo; + } + + public void setTypeInfo(TypeInfo typeInfo) { + this.typeInfo = typeInfo; + } + + public String getExprString() { + assert (false); + return null; + } + + @Explain(displayName = "type") + public String getTypeString() { + return typeInfo.getTypeName(); + } + + public List getCols() { + return null; + } + + @Override + public List getChildren() { + return null; + } + + @Override + public String getName() { + return this.getClass().getName(); + } + +} Index: ql/src/java/org/apache/hadoop/hive/ql/plan/filterDesc.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/plan/filterDesc.java (revision 901960) +++ ql/src/java/org/apache/hadoop/hive/ql/plan/filterDesc.java (working copy) @@ -1,115 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.hive.ql.plan; - -import java.io.Serializable; -import java.util.List; - -@explain(displayName = "Filter Operator") -public class filterDesc implements Serializable { - - /** - * sampleDesc is used to keep track of the sampling descriptor - */ - public static class sampleDesc { - // The numerator of the TABLESAMPLE clause - private int numerator; - - // The denominator of the TABLESAMPLE clause - private int denominator; - - // Input files can be pruned - private boolean inputPruning; - - public sampleDesc() { - } - - public sampleDesc(int numerator, int denominator, - List tabBucketCols, boolean inputPruning) { - this.numerator = numerator; - this.denominator = denominator; - this.inputPruning = inputPruning; - } - - public int getNumerator() { - return numerator; - } - - public int getDenominator() { - return denominator; - } - - public boolean getInputPruning() { - return inputPruning; - } - } - - private static final long serialVersionUID = 1L; - private org.apache.hadoop.hive.ql.plan.exprNodeDesc predicate; - private boolean isSamplingPred; - private transient sampleDesc sampleDescr; - - public filterDesc() { - } - - public filterDesc( - final org.apache.hadoop.hive.ql.plan.exprNodeDesc predicate, - boolean isSamplingPred) { - this.predicate = predicate; - this.isSamplingPred = isSamplingPred; - sampleDescr = null; - } - - public filterDesc( - final org.apache.hadoop.hive.ql.plan.exprNodeDesc predicate, - boolean isSamplingPred, final sampleDesc sampleDescr) { - this.predicate = predicate; - this.isSamplingPred = isSamplingPred; - this.sampleDescr = sampleDescr; - } - - @explain(displayName = "predicate") - public org.apache.hadoop.hive.ql.plan.exprNodeDesc getPredicate() { - return predicate; - } - - public void setPredicate( - final org.apache.hadoop.hive.ql.plan.exprNodeDesc predicate) { - this.predicate = predicate; - } - - @explain(displayName = "isSamplingPred", normalExplain = false) - public boolean getIsSamplingPred() { - return isSamplingPred; - } - - public void setIsSamplingPred(final boolean isSamplingPred) { - this.isSamplingPred = isSamplingPred; - } - - @explain(displayName = "sampleDesc", normalExplain = false) - public sampleDesc getSampleDescr() { - return sampleDescr; - } - - public void setSampleDescr(final sampleDesc sampleDescr) { - this.sampleDescr = sampleDescr; - } - -} Index: ql/src/java/org/apache/hadoop/hive/ql/plan/DescTableDesc.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/plan/DescTableDesc.java (revision 0) +++ ql/src/java/org/apache/hadoop/hive/ql/plan/DescTableDesc.java (revision 0) @@ -0,0 +1,131 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hive.ql.plan; + +import java.io.Serializable; +import java.util.HashMap; + +import org.apache.hadoop.fs.Path; + +@Explain(displayName = "Describe Table") +public class DescTableDesc extends DDLDesc implements Serializable { + private static final long serialVersionUID = 1L; + + String tableName; + HashMap partSpec; + Path resFile; + boolean isExt; + /** + * table name for the result of describe table + */ + private final String table = "describe"; + /** + * thrift ddl for the result of describe table + */ + private final String schema = "col_name,data_type,comment#string:string:string"; + + public String getTable() { + return table; + } + + public String getSchema() { + return schema; + } + + /** + * @param isExt + * @param partSpec + * @param resFile + * @param tableName + */ + public DescTableDesc(Path resFile, String tableName, + HashMap partSpec, boolean isExt) { + this.isExt = isExt; + this.partSpec = partSpec; + this.resFile = resFile; + this.tableName = tableName; + } + + /** + * @return the isExt + */ + public boolean isExt() { + return isExt; + } + + /** + * @param isExt + * the isExt to set + */ + public void setExt(boolean isExt) { + this.isExt = isExt; + } + + /** + * @return the tableName + */ + @Explain(displayName = "table") + public String getTableName() { + return tableName; + } + + /** + * @param tableName + * the tableName to set + */ + public void setTableName(String tableName) { + this.tableName = tableName; + } + + /** + * @return the partSpec + */ + @Explain(displayName = "partition") + public HashMap getPartSpec() { + return partSpec; + } + + /** + * @param partSpec + * the partSpec to set + */ + public void setPartSpecs(HashMap partSpec) { + this.partSpec = partSpec; + } + + /** + * @return the resFile + */ + public Path getResFile() { + return resFile; + } + + @Explain(displayName = "result file", normalExplain = false) + public String getResFileString() { + return getResFile().getName(); + } + + /** + * @param resFile + * the resFile to set + */ + public void setResFile(Path resFile) { + this.resFile = resFile; + } +} Index: ql/src/java/org/apache/hadoop/hive/ql/plan/LoadTableDesc.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/plan/LoadTableDesc.java (revision 0) +++ ql/src/java/org/apache/hadoop/hive/ql/plan/LoadTableDesc.java (revision 0) @@ -0,0 +1,89 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hive.ql.plan; + +import java.io.Serializable; +import java.util.HashMap; + +public class LoadTableDesc extends org.apache.hadoop.hive.ql.plan.LoadDesc + implements Serializable { + private static final long serialVersionUID = 1L; + private boolean replace; + private String tmpDir; + + // TODO: the below seems like they should just be combined into PartitionDesc + private org.apache.hadoop.hive.ql.plan.TableDesc table; + private HashMap partitionSpec; + + public LoadTableDesc() { + } + + public LoadTableDesc(final String sourceDir, final String tmpDir, + final org.apache.hadoop.hive.ql.plan.TableDesc table, + final HashMap partitionSpec, final boolean replace) { + + super(sourceDir); + this.tmpDir = tmpDir; + this.table = table; + this.partitionSpec = partitionSpec; + this.replace = replace; + } + + public LoadTableDesc(final String sourceDir, final String tmpDir, + final org.apache.hadoop.hive.ql.plan.TableDesc table, + final HashMap partitionSpec) { + this(sourceDir, tmpDir, table, partitionSpec, true); + } + + @Explain(displayName = "tmp directory", normalExplain = false) + public String getTmpDir() { + return tmpDir; + } + + public void setTmpDir(final String tmp) { + tmpDir = tmp; + } + + @Explain(displayName = "table") + public TableDesc getTable() { + return table; + } + + public void setTable(final org.apache.hadoop.hive.ql.plan.TableDesc table) { + this.table = table; + } + + @Explain(displayName = "partition") + public HashMap getPartitionSpec() { + return partitionSpec; + } + + public void setPartitionSpec(final HashMap partitionSpec) { + this.partitionSpec = partitionSpec; + } + + @Explain(displayName = "replace") + public boolean getReplace() { + return replace; + } + + public void setReplace(boolean replace) { + this.replace = replace; + } +} Index: ql/src/java/org/apache/hadoop/hive/ql/plan/aggregationDesc.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/plan/aggregationDesc.java (revision 901960) +++ ql/src/java/org/apache/hadoop/hive/ql/plan/aggregationDesc.java (working copy) @@ -1,114 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.hive.ql.plan; - -import org.apache.hadoop.hive.ql.udf.generic.GenericUDAFEvaluator; - -public class aggregationDesc implements java.io.Serializable { - private static final long serialVersionUID = 1L; - private String genericUDAFName; - - /** - * In case genericUDAFEvaluator is Serializable, we will serialize the object. - * - * In case genericUDAFEvaluator does not implement Serializable, Java will - * remember the class of genericUDAFEvaluator and creates a new instance when - * deserialized. This is exactly what we want. - */ - private GenericUDAFEvaluator genericUDAFEvaluator; - private java.util.ArrayList parameters; - private boolean distinct; - private GenericUDAFEvaluator.Mode mode; - - public aggregationDesc() { - } - - public aggregationDesc(final String genericUDAFName, - final GenericUDAFEvaluator genericUDAFEvaluator, - final java.util.ArrayList parameters, - final boolean distinct, final GenericUDAFEvaluator.Mode mode) { - this.genericUDAFName = genericUDAFName; - this.genericUDAFEvaluator = genericUDAFEvaluator; - this.parameters = parameters; - this.distinct = distinct; - this.mode = mode; - } - - public void setGenericUDAFName(final String genericUDAFName) { - this.genericUDAFName = genericUDAFName; - } - - public String getGenericUDAFName() { - return genericUDAFName; - } - - public void setGenericUDAFEvaluator( - final GenericUDAFEvaluator genericUDAFEvaluator) { - this.genericUDAFEvaluator = genericUDAFEvaluator; - } - - public GenericUDAFEvaluator getGenericUDAFEvaluator() { - return genericUDAFEvaluator; - } - - public java.util.ArrayList getParameters() { - return parameters; - } - - public void setParameters(final java.util.ArrayList parameters) { - this.parameters = parameters; - } - - public boolean getDistinct() { - return distinct; - } - - public void setDistinct(final boolean distinct) { - this.distinct = distinct; - } - - public void setMode(final GenericUDAFEvaluator.Mode mode) { - this.mode = mode; - } - - public GenericUDAFEvaluator.Mode getMode() { - return mode; - } - - @explain(displayName = "expr") - public String getExprString() { - StringBuilder sb = new StringBuilder(); - sb.append(genericUDAFName); - sb.append("("); - if (distinct) { - sb.append("DISTINCT "); - } - boolean first = true; - for (exprNodeDesc exp : parameters) { - if (first) { - first = false; - } else { - sb.append(", "); - } - sb.append(exp.getExprString()); - } - sb.append(")"); - return sb.toString(); - } -} Index: ql/src/java/org/apache/hadoop/hive/ql/plan/unionDesc.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/plan/unionDesc.java (revision 901960) +++ ql/src/java/org/apache/hadoop/hive/ql/plan/unionDesc.java (working copy) @@ -1,52 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.hive.ql.plan; - -import java.io.Serializable; - -/** - * unionDesc is a empty class currently. However, union has more than one input - * (as compared with forward), and therefore, we need a separate class. - **/ -@explain(displayName = "Union") -public class unionDesc implements Serializable { - private static final long serialVersionUID = 1L; - - transient private int numInputs; - - @SuppressWarnings("nls") - public unionDesc() { - numInputs = 2; - } - - /** - * @return the numInputs - */ - public int getNumInputs() { - return numInputs; - } - - /** - * @param numInputs - * the numInputs to set - */ - public void setNumInputs(int numInputs) { - this.numInputs = numInputs; - } -} Index: ql/src/java/org/apache/hadoop/hive/ql/plan/ConditionalResolverMergeFiles.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/plan/ConditionalResolverMergeFiles.java (revision 901960) +++ ql/src/java/org/apache/hadoop/hive/ql/plan/ConditionalResolverMergeFiles.java (working copy) @@ -117,7 +117,7 @@ if ((currAvgSz < avgConditionSize) && (fStats.length > 1)) { // also set the number of reducers Task tsk = ctx.getListTasks().get(1); - mapredWork work = (mapredWork) tsk.getWork(); + MapredWork work = (MapredWork) tsk.getWork(); int maxReducers = conf.getIntVar(HiveConf.ConfVars.MAXREDUCERS); int reducers = (int) ((totalSz + trgtSize - 1) / trgtSize); Index: ql/src/java/org/apache/hadoop/hive/ql/plan/reduceSinkDesc.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/plan/reduceSinkDesc.java (revision 901960) +++ ql/src/java/org/apache/hadoop/hive/ql/plan/reduceSinkDesc.java (working copy) @@ -1,177 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.hive.ql.plan; - -import java.io.Serializable; - -@explain(displayName = "Reduce Output Operator") -public class reduceSinkDesc implements Serializable { - private static final long serialVersionUID = 1L; - /** - * Key columns are passed to reducer in the "key". - */ - private java.util.ArrayList keyCols; - private java.util.ArrayList outputKeyColumnNames; - /** - * Value columns are passed to reducer in the "value". - */ - private java.util.ArrayList valueCols; - private java.util.ArrayList outputValueColumnNames; - /** - * Describe how to serialize the key. - */ - private tableDesc keySerializeInfo; - /** - * Describe how to serialize the value. - */ - private tableDesc valueSerializeInfo; - - /** - * The tag for this reducesink descriptor. - */ - private int tag; - - /** - * The partition columns (CLUSTER BY or DISTRIBUTE BY in Hive language). - * Partition columns decide the reducer that the current row goes to. - * Partition columns are not passed to reducer. - */ - private java.util.ArrayList partitionCols; - - private int numReducers; - - public reduceSinkDesc() { - } - - public reduceSinkDesc(java.util.ArrayList keyCols, - java.util.ArrayList valueCols, - java.util.ArrayList outputKeyColumnNames, - java.util.ArrayList outputValueolumnNames, int tag, - java.util.ArrayList partitionCols, int numReducers, - final tableDesc keySerializeInfo, final tableDesc valueSerializeInfo) { - this.keyCols = keyCols; - this.valueCols = valueCols; - this.outputKeyColumnNames = outputKeyColumnNames; - outputValueColumnNames = outputValueolumnNames; - this.tag = tag; - this.numReducers = numReducers; - this.partitionCols = partitionCols; - this.keySerializeInfo = keySerializeInfo; - this.valueSerializeInfo = valueSerializeInfo; - } - - public java.util.ArrayList getOutputKeyColumnNames() { - return outputKeyColumnNames; - } - - public void setOutputKeyColumnNames( - java.util.ArrayList outputKeyColumnNames) { - this.outputKeyColumnNames = outputKeyColumnNames; - } - - public java.util.ArrayList getOutputValueColumnNames() { - return outputValueColumnNames; - } - - public void setOutputValueColumnNames( - java.util.ArrayList outputValueColumnNames) { - this.outputValueColumnNames = outputValueColumnNames; - } - - @explain(displayName = "key expressions") - public java.util.ArrayList getKeyCols() { - return keyCols; - } - - public void setKeyCols(final java.util.ArrayList keyCols) { - this.keyCols = keyCols; - } - - @explain(displayName = "value expressions") - public java.util.ArrayList getValueCols() { - return valueCols; - } - - public void setValueCols(final java.util.ArrayList valueCols) { - this.valueCols = valueCols; - } - - @explain(displayName = "Map-reduce partition columns") - public java.util.ArrayList getPartitionCols() { - return partitionCols; - } - - public void setPartitionCols( - final java.util.ArrayList partitionCols) { - this.partitionCols = partitionCols; - } - - @explain(displayName = "tag") - public int getTag() { - return tag; - } - - public void setTag(int tag) { - this.tag = tag; - } - - /** - * Returns the number of reducers for the map-reduce job. -1 means to decide - * the number of reducers at runtime. This enables Hive to estimate the number - * of reducers based on the map-reduce input data size, which is only - * available right before we start the map-reduce job. - */ - public int getNumReducers() { - return numReducers; - } - - public void setNumReducers(int numReducers) { - this.numReducers = numReducers; - } - - public tableDesc getKeySerializeInfo() { - return keySerializeInfo; - } - - public void setKeySerializeInfo(tableDesc keySerializeInfo) { - this.keySerializeInfo = keySerializeInfo; - } - - public tableDesc getValueSerializeInfo() { - return valueSerializeInfo; - } - - public void setValueSerializeInfo(tableDesc valueSerializeInfo) { - this.valueSerializeInfo = valueSerializeInfo; - } - - /** - * Returns the sort order of the key columns. - * - * @return null, which means ascending order for all key columns, or a String - * of the same length as key columns, that consists of only "+" - * (ascending order) and "-" (descending order). - */ - @explain(displayName = "sort order") - public String getOrder() { - return keySerializeInfo.getProperties().getProperty( - org.apache.hadoop.hive.serde.Constants.SERIALIZATION_SORT_ORDER); - } - -} Index: ql/src/java/org/apache/hadoop/hive/ql/plan/showPartitionsDesc.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/plan/showPartitionsDesc.java (revision 901960) +++ ql/src/java/org/apache/hadoop/hive/ql/plan/showPartitionsDesc.java (working copy) @@ -1,93 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.hive.ql.plan; - -import java.io.Serializable; - -import org.apache.hadoop.fs.Path; - -@explain(displayName = "Show Partitions") -public class showPartitionsDesc extends ddlDesc implements Serializable { - private static final long serialVersionUID = 1L; - String tabName; - Path resFile; - /** - * table name for the result of show tables - */ - private final String table = "showpartitions"; - /** - * thrift ddl for the result of show tables - */ - private final String schema = "partition#string"; - - public String getTable() { - return table; - } - - public String getSchema() { - return schema; - } - - /** - * @param tabName - * Name of the table whose partitions need to be listed - * @param resFile - * File to store the results in - */ - public showPartitionsDesc(String tabName, Path resFile) { - this.tabName = tabName; - this.resFile = resFile; - } - - /** - * @return the name of the table - */ - @explain(displayName = "table") - public String getTabName() { - return tabName; - } - - /** - * @param tabName - * the table whose partitions have to be listed - */ - public void setTabName(String tabName) { - this.tabName = tabName; - } - - /** - * @return the results file - */ - public Path getResFile() { - return resFile; - } - - @explain(displayName = "result file", normalExplain = false) - public String getResFileString() { - return getResFile().getName(); - } - - /** - * @param resFile - * the results file to be used to return the results - */ - public void setResFile(Path resFile) { - this.resFile = resFile; - } -} Index: ql/src/java/org/apache/hadoop/hive/ql/plan/fetchWork.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/plan/fetchWork.java (revision 901960) +++ ql/src/java/org/apache/hadoop/hive/ql/plan/fetchWork.java (working copy) @@ -1,205 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.hive.ql.plan; - -import java.io.Serializable; -import java.util.ArrayList; -import java.util.List; - -import org.apache.hadoop.fs.Path; - -@explain(displayName = "Fetch Operator") -public class fetchWork implements Serializable { - private static final long serialVersionUID = 1L; - - private String tblDir; - private tableDesc tblDesc; - - private List partDir; - private List partDesc; - - private int limit; - - /** - * Serialization Null Format for the serde used to fetch data - */ - private String serializationNullFormat = "NULL"; - - public fetchWork() { - } - - public fetchWork(String tblDir, tableDesc tblDesc) { - this(tblDir, tblDesc, -1); - } - - public fetchWork(String tblDir, tableDesc tblDesc, int limit) { - this.tblDir = tblDir; - this.tblDesc = tblDesc; - this.limit = limit; - } - - public fetchWork(List partDir, List partDesc) { - this(partDir, partDesc, -1); - } - - public fetchWork(List partDir, List partDesc, int limit) { - this.partDir = partDir; - this.partDesc = partDesc; - this.limit = limit; - } - - public String getSerializationNullFormat() { - return serializationNullFormat; - } - - public void setSerializationNullFormat(String format) { - serializationNullFormat = format; - } - - /** - * @return the tblDir - */ - public String getTblDir() { - return tblDir; - } - - /** - * @return the tblDir - */ - public Path getTblDirPath() { - return new Path(tblDir); - } - - /** - * @param tblDir - * the tblDir to set - */ - public void setTblDir(String tblDir) { - this.tblDir = tblDir; - } - - /** - * @return the tblDesc - */ - public tableDesc getTblDesc() { - return tblDesc; - } - - /** - * @param tblDesc - * the tblDesc to set - */ - public void setTblDesc(tableDesc tblDesc) { - this.tblDesc = tblDesc; - } - - /** - * @return the partDir - */ - public List getPartDir() { - return partDir; - } - - public List getPartDirPath() { - return fetchWork.convertStringToPathArray(partDir); - } - - public static List convertPathToStringArray(List paths) { - if (paths == null) { - return null; - } - - List pathsStr = new ArrayList(); - for (Path path : paths) { - pathsStr.add(path.toString()); - } - - return pathsStr; - } - - public static List convertStringToPathArray(List paths) { - if (paths == null) { - return null; - } - - List pathsStr = new ArrayList(); - for (String path : paths) { - pathsStr.add(new Path(path)); - } - - return pathsStr; - } - - /** - * @param partDir - * the partDir to set - */ - public void setPartDir(List partDir) { - this.partDir = partDir; - } - - /** - * @return the partDesc - */ - public List getPartDesc() { - return partDesc; - } - - /** - * @param partDesc - * the partDesc to set - */ - public void setPartDesc(List partDesc) { - this.partDesc = partDesc; - } - - /** - * @return the limit - */ - @explain(displayName = "limit") - public int getLimit() { - return limit; - } - - /** - * @param limit - * the limit to set - */ - public void setLimit(int limit) { - this.limit = limit; - } - - @Override - public String toString() { - if (tblDir != null) { - return new String("table = " + tblDir); - } - - if (partDir == null) { - return "null fetchwork"; - } - - String ret = new String("partition = "); - for (String part : partDir) { - ret = ret.concat(part); - } - - return ret; - } -} Index: ql/src/java/org/apache/hadoop/hive/ql/plan/exprNodeGenericFuncDesc.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/plan/exprNodeGenericFuncDesc.java (revision 901960) +++ ql/src/java/org/apache/hadoop/hive/ql/plan/exprNodeGenericFuncDesc.java (working copy) @@ -1,192 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.hive.ql.plan; - -import java.io.Serializable; -import java.util.ArrayList; -import java.util.List; - -import org.apache.hadoop.hive.ql.exec.FunctionRegistry; -import org.apache.hadoop.hive.ql.exec.UDFArgumentException; -import org.apache.hadoop.hive.ql.exec.Utilities; -import org.apache.hadoop.hive.ql.udf.generic.GenericUDF; -import org.apache.hadoop.hive.ql.udf.generic.GenericUDFBridge; -import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspector; -import org.apache.hadoop.hive.serde2.typeinfo.TypeInfo; -import org.apache.hadoop.hive.serde2.typeinfo.TypeInfoUtils; - -/** - * Describes a GenericFunc node. - */ -public class exprNodeGenericFuncDesc extends exprNodeDesc implements - Serializable { - - private static final long serialVersionUID = 1L; - - /** - * In case genericUDF is Serializable, we will serialize the object. - * - * In case genericUDF does not implement Serializable, Java will remember the - * class of genericUDF and creates a new instance when deserialized. This is - * exactly what we want. - */ - private GenericUDF genericUDF; - private List childExprs; - - public exprNodeGenericFuncDesc() { - } - - public exprNodeGenericFuncDesc(TypeInfo typeInfo, GenericUDF genericUDF, - List children) { - super(typeInfo); - assert (genericUDF != null); - this.genericUDF = genericUDF; - childExprs = children; - } - - public GenericUDF getGenericUDF() { - return genericUDF; - } - - public void setGenericUDF(GenericUDF genericUDF) { - this.genericUDF = genericUDF; - } - - public List getChildExprs() { - return childExprs; - } - - public void setChildExprs(List children) { - childExprs = children; - } - - @Override - public List getChildren() { - return childExprs; - } - - @Override - public String toString() { - StringBuilder sb = new StringBuilder(); - sb.append(genericUDF.getClass().toString()); - sb.append("("); - for (int i = 0; i < childExprs.size(); i++) { - if (i > 0) { - sb.append(", "); - } - sb.append(childExprs.get(i).toString()); - } - sb.append("("); - sb.append(")"); - return sb.toString(); - } - - @explain(displayName = "expr") - @Override - public String getExprString() { - // Get the children expr strings - String[] childrenExprStrings = new String[childExprs.size()]; - for (int i = 0; i < childrenExprStrings.length; i++) { - childrenExprStrings[i] = childExprs.get(i).getExprString(); - } - - return genericUDF.getDisplayString(childrenExprStrings); - } - - @Override - public List getCols() { - List colList = new ArrayList(); - if (childExprs != null) { - int pos = 0; - while (pos < childExprs.size()) { - List colCh = childExprs.get(pos).getCols(); - colList = Utilities.mergeUniqElems(colList, colCh); - pos++; - } - } - - return colList; - } - - @Override - public exprNodeDesc clone() { - List cloneCh = new ArrayList(childExprs.size()); - for (exprNodeDesc ch : childExprs) { - cloneCh.add(ch.clone()); - } - exprNodeGenericFuncDesc clone = new exprNodeGenericFuncDesc(typeInfo, - FunctionRegistry.cloneGenericUDF(genericUDF), cloneCh); - return clone; - } - - /** - * Create a exprNodeGenericFuncDesc based on the genericUDFClass and the - * children parameters. - * - * @throws UDFArgumentException - */ - public static exprNodeGenericFuncDesc newInstance(GenericUDF genericUDF, - List children) throws UDFArgumentException { - ObjectInspector[] childrenOIs = new ObjectInspector[children.size()]; - for (int i = 0; i < childrenOIs.length; i++) { - childrenOIs[i] = TypeInfoUtils - .getStandardWritableObjectInspectorFromTypeInfo(children.get(i) - .getTypeInfo()); - } - - ObjectInspector oi = genericUDF.initialize(childrenOIs); - return new exprNodeGenericFuncDesc(TypeInfoUtils - .getTypeInfoFromObjectInspector(oi), genericUDF, children); - } - - @Override - public boolean isSame(Object o) { - if (!(o instanceof exprNodeGenericFuncDesc)) { - return false; - } - exprNodeGenericFuncDesc dest = (exprNodeGenericFuncDesc) o; - if (!typeInfo.equals(dest.getTypeInfo()) - || !genericUDF.getClass().equals(dest.getGenericUDF().getClass())) { - return false; - } - - if (genericUDF instanceof GenericUDFBridge) { - GenericUDFBridge bridge = (GenericUDFBridge) genericUDF; - GenericUDFBridge bridge2 = (GenericUDFBridge) dest.getGenericUDF(); - if (!bridge.getUdfClass().equals(bridge2.getUdfClass()) - || !bridge.getUdfName().equals(bridge2.getUdfName()) - || bridge.isOperator() != bridge2.isOperator()) { - return false; - } - } - - if (childExprs.size() != dest.getChildExprs().size()) { - return false; - } - - for (int pos = 0; pos < childExprs.size(); pos++) { - if (!childExprs.get(pos).isSame(dest.getChildExprs().get(pos))) { - return false; - } - } - - return true; - } - -} Index: ql/src/java/org/apache/hadoop/hive/ql/plan/JoinDesc.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/plan/JoinDesc.java (revision 0) +++ ql/src/java/org/apache/hadoop/hive/ql/plan/JoinDesc.java (revision 0) @@ -0,0 +1,293 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hive.ql.plan; + +import java.io.Serializable; +import java.util.ArrayList; +import java.util.LinkedHashMap; +import java.util.List; +import java.util.Map; + +/** + * Join operator Descriptor implementation. + * + */ +@Explain(displayName = "Join Operator") +public class JoinDesc implements Serializable { + private static final long serialVersionUID = 1L; + public static final int INNER_JOIN = 0; + public static final int LEFT_OUTER_JOIN = 1; + public static final int RIGHT_OUTER_JOIN = 2; + public static final int FULL_OUTER_JOIN = 3; + public static final int UNIQUE_JOIN = 4; + public static final int LEFT_SEMI_JOIN = 5; + + // used to handle skew join + private boolean handleSkewJoin = false; + private int skewKeyDefinition = -1; + private Map bigKeysDirMap; + private Map> smallKeysDirMap; + private Map skewKeysValuesTables; + + // alias to key mapping + private Map> exprs; + + // used for create joinOutputObjectInspector + protected java.util.ArrayList outputColumnNames; + + // key:column output name, value:tag + transient private Map reversedExprs; + + // No outer join involved + protected boolean noOuterJoin; + + protected JoinCondDesc[] conds; + + protected Byte[] tagOrder; + private TableDesc keyTableDesc; + + public JoinDesc() { + } + + public JoinDesc(final Map> exprs, + ArrayList outputColumnNames, final boolean noOuterJoin, + final JoinCondDesc[] conds) { + this.exprs = exprs; + this.outputColumnNames = outputColumnNames; + this.noOuterJoin = noOuterJoin; + this.conds = conds; + + tagOrder = new Byte[exprs.size()]; + for (int i = 0; i < tagOrder.length; i++) { + tagOrder[i] = (byte) i; + } + } + + public JoinDesc(final Map> exprs, + ArrayList outputColumnNames) { + this(exprs, outputColumnNames, true, null); + } + + public JoinDesc(final Map> exprs, + ArrayList outputColumnNames, final JoinCondDesc[] conds) { + this(exprs, outputColumnNames, false, conds); + } + + public Map> getExprs() { + return exprs; + } + + public Map getReversedExprs() { + return reversedExprs; + } + + public void setReversedExprs(Map reversed_Exprs) { + reversedExprs = reversed_Exprs; + } + + @Explain(displayName = "condition expressions") + public Map getExprsStringMap() { + if (getExprs() == null) { + return null; + } + + LinkedHashMap ret = new LinkedHashMap(); + + for (Map.Entry> ent : getExprs().entrySet()) { + StringBuilder sb = new StringBuilder(); + boolean first = true; + if (ent.getValue() != null) { + for (ExprNodeDesc expr : ent.getValue()) { + if (!first) { + sb.append(" "); + } + + first = false; + sb.append("{"); + sb.append(expr.getExprString()); + sb.append("}"); + } + } + ret.put(ent.getKey(), sb.toString()); + } + + return ret; + } + + public void setExprs(final Map> exprs) { + this.exprs = exprs; + } + + @Explain(displayName = "outputColumnNames") + public java.util.ArrayList getOutputColumnNames() { + return outputColumnNames; + } + + public void setOutputColumnNames( + java.util.ArrayList outputColumnNames) { + this.outputColumnNames = outputColumnNames; + } + + public boolean getNoOuterJoin() { + return noOuterJoin; + } + + public void setNoOuterJoin(final boolean noOuterJoin) { + this.noOuterJoin = noOuterJoin; + } + + @Explain(displayName = "condition map") + public List getCondsList() { + if (conds == null) { + return null; + } + + ArrayList l = new ArrayList(); + for (JoinCondDesc cond : conds) { + l.add(cond); + } + + return l; + } + + public JoinCondDesc[] getConds() { + return conds; + } + + public void setConds(final JoinCondDesc[] conds) { + this.conds = conds; + } + + /** + * The order in which tables should be processed when joining + * + * @return Array of tags + */ + public Byte[] getTagOrder() { + return tagOrder; + } + + /** + * The order in which tables should be processed when joining + * + * @param tagOrder + * Array of tags + */ + public void setTagOrder(Byte[] tagOrder) { + this.tagOrder = tagOrder; + } + + @Explain(displayName = "handleSkewJoin") + public boolean getHandleSkewJoin() { + return handleSkewJoin; + } + + /** + * set to handle skew join in this join op + * + * @param handleSkewJoin + */ + public void setHandleSkewJoin(boolean handleSkewJoin) { + this.handleSkewJoin = handleSkewJoin; + } + + /** + * @return mapping from tbl to dir for big keys + */ + public Map getBigKeysDirMap() { + return bigKeysDirMap; + } + + /** + * set the mapping from tbl to dir for big keys + * + * @param bigKeysDirMap + */ + public void setBigKeysDirMap(Map bigKeysDirMap) { + this.bigKeysDirMap = bigKeysDirMap; + } + + /** + * @return mapping from tbl to dir for small keys + */ + public Map> getSmallKeysDirMap() { + return smallKeysDirMap; + } + + /** + * set the mapping from tbl to dir for small keys + * + * @param bigKeysDirMap + */ + public void setSmallKeysDirMap(Map> smallKeysDirMap) { + this.smallKeysDirMap = smallKeysDirMap; + } + + /** + * @return skew key definition. If we see a key's associated entries' number + * is bigger than this, we will define this key as a skew key. + */ + public int getSkewKeyDefinition() { + return skewKeyDefinition; + } + + /** + * set skew key definition + * + * @param skewKeyDefinition + */ + public void setSkewKeyDefinition(int skewKeyDefinition) { + this.skewKeyDefinition = skewKeyDefinition; + } + + /** + * @return the table desc for storing skew keys and their corresponding value; + */ + public Map getSkewKeysValuesTables() { + return skewKeysValuesTables; + } + + /** + * @param skewKeysValuesTable + * set the table desc for storing skew keys and their corresponding + * value; + */ + public void setSkewKeysValuesTables(Map skewKeysValuesTables) { + this.skewKeysValuesTables = skewKeysValuesTables; + } + + public boolean isNoOuterJoin() { + for (org.apache.hadoop.hive.ql.plan.JoinCondDesc cond : conds) { + if (cond.getType() == JoinDesc.FULL_OUTER_JOIN + || (cond.getType() == JoinDesc.LEFT_OUTER_JOIN) + || cond.getType() == JoinDesc.RIGHT_OUTER_JOIN) { + return false; + } + } + return true; + } + + public void setKeyTableDesc(TableDesc keyTblDesc) { + keyTableDesc = keyTblDesc; + } + + public TableDesc getKeyTableDesc() { + return keyTableDesc; + } +} Index: ql/src/java/org/apache/hadoop/hive/ql/plan/SchemaDesc.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/plan/SchemaDesc.java (revision 0) +++ ql/src/java/org/apache/hadoop/hive/ql/plan/SchemaDesc.java (revision 0) @@ -0,0 +1,42 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hive.ql.plan; + +import java.io.Serializable; + +public class SchemaDesc implements Serializable { + private static final long serialVersionUID = 1L; + private String schema; + + public SchemaDesc() { + } + + public SchemaDesc(final String schema) { + this.schema = schema; + } + + public String getSchema() { + return schema; + } + + public void setSchema(final String schema) { + this.schema = schema; + } + +} Index: ql/src/java/org/apache/hadoop/hive/ql/plan/explain.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/plan/explain.java (revision 901960) +++ ql/src/java/org/apache/hadoop/hive/ql/plan/explain.java (working copy) @@ -1,29 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.hive.ql.plan; - -import java.lang.annotation.Retention; -import java.lang.annotation.RetentionPolicy; - -@Retention(RetentionPolicy.RUNTIME) -public @interface explain { - String displayName() default ""; - - boolean normalExplain() default true; -} Index: ql/src/java/org/apache/hadoop/hive/ql/plan/ddlDesc.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/plan/ddlDesc.java (revision 901960) +++ ql/src/java/org/apache/hadoop/hive/ql/plan/ddlDesc.java (working copy) @@ -1,25 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.hive.ql.plan; - -import java.io.Serializable; - -public abstract class ddlDesc implements Serializable { - private static final long serialVersionUID = 1L; -} Index: ql/src/java/org/apache/hadoop/hive/ql/plan/ExplainWork.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/plan/ExplainWork.java (revision 0) +++ ql/src/java/org/apache/hadoop/hive/ql/plan/ExplainWork.java (revision 0) @@ -0,0 +1,78 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hive.ql.plan; + +import java.io.Serializable; +import java.util.List; + +import org.apache.hadoop.fs.Path; +import org.apache.hadoop.hive.ql.exec.Task; + +public class ExplainWork implements Serializable { + private static final long serialVersionUID = 1L; + + private Path resFile; + private List> rootTasks; + private String astStringTree; + boolean extended; + + public ExplainWork() { + } + + public ExplainWork(Path resFile, + List> rootTasks, String astStringTree, + boolean extended) { + this.resFile = resFile; + this.rootTasks = rootTasks; + this.astStringTree = astStringTree; + this.extended = extended; + } + + public Path getResFile() { + return resFile; + } + + public void setResFile(Path resFile) { + this.resFile = resFile; + } + + public List> getRootTasks() { + return rootTasks; + } + + public void setRootTasks(List> rootTasks) { + this.rootTasks = rootTasks; + } + + public String getAstStringTree() { + return astStringTree; + } + + public void setAstStringTree(String astStringTree) { + this.astStringTree = astStringTree; + } + + public boolean getExtended() { + return extended; + } + + public void setExtended(boolean extended) { + this.extended = extended; + } +} Index: ql/src/java/org/apache/hadoop/hive/ql/plan/ConditionalWork.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/plan/ConditionalWork.java (revision 901960) +++ ql/src/java/org/apache/hadoop/hive/ql/plan/ConditionalWork.java (working copy) @@ -21,7 +21,7 @@ import java.io.Serializable; import java.util.List; -@explain(displayName = "Conditional Operator") +@Explain(displayName = "Conditional Operator") public class ConditionalWork implements Serializable { private static final long serialVersionUID = 1L; List listWorks; Index: ql/src/java/org/apache/hadoop/hive/ql/plan/CreateFunctionDesc.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/plan/CreateFunctionDesc.java (revision 0) +++ ql/src/java/org/apache/hadoop/hive/ql/plan/CreateFunctionDesc.java (revision 0) @@ -0,0 +1,53 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hive.ql.plan; + +import java.io.Serializable; + +@Explain(displayName = "Create Function") +public class CreateFunctionDesc implements Serializable { + private static final long serialVersionUID = 1L; + + private String functionName; + private String className; + + public CreateFunctionDesc(String functionName, String className) { + this.functionName = functionName; + this.className = className; + } + + @Explain(displayName = "name") + public String getFunctionName() { + return functionName; + } + + public void setFunctionName(String functionName) { + this.functionName = functionName; + } + + @Explain(displayName = "class") + public String getClassName() { + return className; + } + + public void setClassName(String className) { + this.className = className; + } + +} Index: ql/src/java/org/apache/hadoop/hive/ql/plan/createTableDesc.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/plan/createTableDesc.java (revision 901960) +++ ql/src/java/org/apache/hadoop/hive/ql/plan/createTableDesc.java (working copy) @@ -1,280 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.hive.ql.plan; - -import java.io.Serializable; -import java.util.List; -import java.util.Map; - -import org.apache.hadoop.hive.metastore.api.FieldSchema; -import org.apache.hadoop.hive.metastore.api.Order; -import org.apache.hadoop.hive.ql.exec.Utilities; - -@explain(displayName = "Create Table") -public class createTableDesc extends ddlDesc implements Serializable { - private static final long serialVersionUID = 1L; - String tableName; - boolean isExternal; - List cols; - List partCols; - List bucketCols; - List sortCols; - int numBuckets; - String fieldDelim; - String fieldEscape; - String collItemDelim; - String mapKeyDelim; - String lineDelim; - String comment; - String inputFormat; - String outputFormat; - String location; - String serName; - Map mapProp; - boolean ifNotExists; - - public createTableDesc(String tableName, boolean isExternal, - List cols, List partCols, - List bucketCols, List sortCols, int numBuckets, - String fieldDelim, String fieldEscape, String collItemDelim, - String mapKeyDelim, String lineDelim, String comment, String inputFormat, - String outputFormat, String location, String serName, - Map mapProp, boolean ifNotExists) { - this.tableName = tableName; - this.isExternal = isExternal; - this.bucketCols = bucketCols; - this.sortCols = sortCols; - this.collItemDelim = collItemDelim; - this.cols = cols; - this.comment = comment; - this.fieldDelim = fieldDelim; - this.fieldEscape = fieldEscape; - this.inputFormat = inputFormat; - this.outputFormat = outputFormat; - this.lineDelim = lineDelim; - this.location = location; - this.mapKeyDelim = mapKeyDelim; - this.numBuckets = numBuckets; - this.partCols = partCols; - this.serName = serName; - this.mapProp = mapProp; - this.ifNotExists = ifNotExists; - } - - @explain(displayName = "if not exists") - public boolean getIfNotExists() { - return ifNotExists; - } - - public void setIfNotExists(boolean ifNotExists) { - this.ifNotExists = ifNotExists; - } - - @explain(displayName = "name") - public String getTableName() { - return tableName; - } - - public void setTableName(String tableName) { - this.tableName = tableName; - } - - public List getCols() { - return cols; - } - - @explain(displayName = "columns") - public List getColsString() { - return Utilities.getFieldSchemaString(getCols()); - } - - public void setCols(List cols) { - this.cols = cols; - } - - public List getPartCols() { - return partCols; - } - - @explain(displayName = "partition columns") - public List getPartColsString() { - return Utilities.getFieldSchemaString(getPartCols()); - } - - public void setPartCols(List partCols) { - this.partCols = partCols; - } - - @explain(displayName = "bucket columns") - public List getBucketCols() { - return bucketCols; - } - - public void setBucketCols(List bucketCols) { - this.bucketCols = bucketCols; - } - - @explain(displayName = "# buckets") - public int getNumBuckets() { - return numBuckets; - } - - public void setNumBuckets(int numBuckets) { - this.numBuckets = numBuckets; - } - - @explain(displayName = "field delimiter") - public String getFieldDelim() { - return fieldDelim; - } - - public void setFieldDelim(String fieldDelim) { - this.fieldDelim = fieldDelim; - } - - @explain(displayName = "field escape") - public String getFieldEscape() { - return fieldEscape; - } - - public void setFieldEscape(String fieldEscape) { - this.fieldEscape = fieldEscape; - } - - @explain(displayName = "collection delimiter") - public String getCollItemDelim() { - return collItemDelim; - } - - public void setCollItemDelim(String collItemDelim) { - this.collItemDelim = collItemDelim; - } - - @explain(displayName = "map key delimiter") - public String getMapKeyDelim() { - return mapKeyDelim; - } - - public void setMapKeyDelim(String mapKeyDelim) { - this.mapKeyDelim = mapKeyDelim; - } - - @explain(displayName = "line delimiter") - public String getLineDelim() { - return lineDelim; - } - - public void setLineDelim(String lineDelim) { - this.lineDelim = lineDelim; - } - - @explain(displayName = "comment") - public String getComment() { - return comment; - } - - public void setComment(String comment) { - this.comment = comment; - } - - @explain(displayName = "input format") - public String getInputFormat() { - return inputFormat; - } - - public void setInputFormat(String inputFormat) { - this.inputFormat = inputFormat; - } - - @explain(displayName = "output format") - public String getOutputFormat() { - return outputFormat; - } - - public void setOutputFormat(String outputFormat) { - this.outputFormat = outputFormat; - } - - @explain(displayName = "location") - public String getLocation() { - return location; - } - - public void setLocation(String location) { - this.location = location; - } - - @explain(displayName = "isExternal") - public boolean isExternal() { - return isExternal; - } - - public void setExternal(boolean isExternal) { - this.isExternal = isExternal; - } - - /** - * @return the sortCols - */ - @explain(displayName = "sort columns") - public List getSortCols() { - return sortCols; - } - - /** - * @param sortCols - * the sortCols to set - */ - public void setSortCols(List sortCols) { - this.sortCols = sortCols; - } - - /** - * @return the serDeName - */ - @explain(displayName = "serde name") - public String getSerName() { - return serName; - } - - /** - * @param serName - * the serName to set - */ - public void setSerName(String serName) { - this.serName = serName; - } - - /** - * @return the serDe properties - */ - @explain(displayName = "serde properties") - public Map getMapProp() { - return mapProp; - } - - /** - * @param mapProp - * the map properties to set - */ - public void setMapProp(Map mapProp) { - this.mapProp = mapProp; - } - -} Index: ql/src/java/org/apache/hadoop/hive/ql/plan/showTablesDesc.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/plan/showTablesDesc.java (revision 901960) +++ ql/src/java/org/apache/hadoop/hive/ql/plan/showTablesDesc.java (working copy) @@ -1,99 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.hive.ql.plan; - -import java.io.Serializable; - -import org.apache.hadoop.fs.Path; - -@explain(displayName = "Show Tables") -public class showTablesDesc extends ddlDesc implements Serializable { - private static final long serialVersionUID = 1L; - String pattern; - Path resFile; - /** - * table name for the result of show tables - */ - private final String table = "show"; - /** - * thrift ddl for the result of show tables - */ - private final String schema = "tab_name#string"; - - public String getTable() { - return table; - } - - public String getSchema() { - return schema; - } - - /** - * @param resFile - */ - public showTablesDesc(Path resFile) { - this.resFile = resFile; - pattern = null; - } - - /** - * @param pattern - * names of tables to show - */ - public showTablesDesc(Path resFile, String pattern) { - this.resFile = resFile; - this.pattern = pattern; - } - - /** - * @return the pattern - */ - @explain(displayName = "pattern") - public String getPattern() { - return pattern; - } - - /** - * @param pattern - * the pattern to set - */ - public void setPattern(String pattern) { - this.pattern = pattern; - } - - /** - * @return the resFile - */ - public Path getResFile() { - return resFile; - } - - @explain(displayName = "result file", normalExplain = false) - public String getResFileString() { - return getResFile().getName(); - } - - /** - * @param resFile - * the resFile to set - */ - public void setResFile(Path resFile) { - this.resFile = resFile; - } -} Index: ql/src/java/org/apache/hadoop/hive/ql/plan/limitDesc.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/plan/limitDesc.java (revision 901960) +++ ql/src/java/org/apache/hadoop/hive/ql/plan/limitDesc.java (working copy) @@ -1,43 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.hive.ql.plan; - -import java.io.Serializable; - -@explain(displayName = "Limit") -public class limitDesc implements Serializable { - private static final long serialVersionUID = 1L; - private int limit; - - public limitDesc() { - } - - public limitDesc(final int limit) { - this.limit = limit; - } - - public int getLimit() { - return limit; - } - - public void setLimit(final int limit) { - this.limit = limit; - } - -} Index: ql/src/java/org/apache/hadoop/hive/ql/plan/exprNodeNullDesc.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/plan/exprNodeNullDesc.java (revision 901960) +++ ql/src/java/org/apache/hadoop/hive/ql/plan/exprNodeNullDesc.java (working copy) @@ -1,61 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.hive.ql.plan; - -import java.io.Serializable; - -import org.apache.hadoop.hive.serde2.typeinfo.TypeInfoFactory; -import org.apache.hadoop.io.NullWritable; - -public class exprNodeNullDesc extends exprNodeDesc implements Serializable { - - private static final long serialVersionUID = 1L; - - public exprNodeNullDesc() { - super(TypeInfoFactory - .getPrimitiveTypeInfoFromPrimitiveWritable(NullWritable.class)); - } - - public Object getValue() { - return null; - } - - @explain(displayName = "expr") - @Override - public String getExprString() { - return "null"; - } - - @Override - public exprNodeDesc clone() { - return new exprNodeNullDesc(); - } - - @Override - public boolean isSame(Object o) { - if (!(o instanceof exprNodeNullDesc)) { - return false; - } - if (!typeInfo.equals(((exprNodeNullDesc) o).getTypeInfo())) { - return false; - } - - return true; - } -} Index: ql/src/java/org/apache/hadoop/hive/ql/plan/explosionDesc.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/plan/explosionDesc.java (revision 901960) +++ ql/src/java/org/apache/hadoop/hive/ql/plan/explosionDesc.java (working copy) @@ -1,52 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.hive.ql.plan; - -import java.io.Serializable; - -@explain(displayName = "Explosion") -public class explosionDesc implements Serializable { - private static final long serialVersionUID = 1L; - private String fieldName; - private int position; - - public explosionDesc() { - } - - public explosionDesc(final String fieldName, final int position) { - this.fieldName = fieldName; - this.position = position; - } - - public String getFieldName() { - return fieldName; - } - - public void setFieldName(final String fieldName) { - this.fieldName = fieldName; - } - - public int getPosition() { - return position; - } - - public void setPosition(final int position) { - this.position = position; - } -} Index: ql/src/java/org/apache/hadoop/hive/ql/plan/exprNodeDesc.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/plan/exprNodeDesc.java (revision 901960) +++ ql/src/java/org/apache/hadoop/hive/ql/plan/exprNodeDesc.java (working copy) @@ -1,82 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.hive.ql.plan; - -import java.io.Serializable; -import java.util.List; - -import org.apache.hadoop.hive.ql.lib.Node; -import org.apache.hadoop.hive.serde2.typeinfo.TypeInfo; - -public abstract class exprNodeDesc implements Serializable, Node { - private static final long serialVersionUID = 1L; - TypeInfo typeInfo; - - public exprNodeDesc() { - } - - public exprNodeDesc(TypeInfo typeInfo) { - this.typeInfo = typeInfo; - if (typeInfo == null) { - throw new RuntimeException("typeInfo cannot be null!"); - } - } - - @Override - public abstract exprNodeDesc clone(); - - // Cant use equals because the walker depends on them being object equal - // The default graph walker processes a node after its kids have been - // processed. That comparison needs - // object equality - isSame means that the objects are semantically equal. - public abstract boolean isSame(Object o); - - public TypeInfo getTypeInfo() { - return typeInfo; - } - - public void setTypeInfo(TypeInfo typeInfo) { - this.typeInfo = typeInfo; - } - - public String getExprString() { - assert (false); - return null; - } - - @explain(displayName = "type") - public String getTypeString() { - return typeInfo.getTypeName(); - } - - public List getCols() { - return null; - } - - @Override - public List getChildren() { - return null; - } - - @Override - public String getName() { - return this.getClass().getName(); - } - -} Index: ql/src/java/org/apache/hadoop/hive/ql/plan/CollectDesc.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/plan/CollectDesc.java (revision 0) +++ ql/src/java/org/apache/hadoop/hive/ql/plan/CollectDesc.java (revision 0) @@ -0,0 +1,42 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hive.ql.plan; + +import java.io.Serializable; + +@Explain(displayName = "Collect") +public class CollectDesc implements Serializable { + private static final long serialVersionUID = 1L; + Integer bufferSize; + + public CollectDesc() { + } + + public CollectDesc(final Integer bufferSize) { + this.bufferSize = bufferSize; + } + + public Integer getBufferSize() { + return bufferSize; + } + + public void setBufferSize(Integer bufferSize) { + this.bufferSize = bufferSize; + } +} Index: ql/src/java/org/apache/hadoop/hive/ql/plan/descTableDesc.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/plan/descTableDesc.java (revision 901960) +++ ql/src/java/org/apache/hadoop/hive/ql/plan/descTableDesc.java (working copy) @@ -1,131 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.hive.ql.plan; - -import java.io.Serializable; -import java.util.HashMap; - -import org.apache.hadoop.fs.Path; - -@explain(displayName = "Describe Table") -public class descTableDesc extends ddlDesc implements Serializable { - private static final long serialVersionUID = 1L; - - String tableName; - HashMap partSpec; - Path resFile; - boolean isExt; - /** - * table name for the result of describe table - */ - private final String table = "describe"; - /** - * thrift ddl for the result of describe table - */ - private final String schema = "col_name,data_type,comment#string:string:string"; - - public String getTable() { - return table; - } - - public String getSchema() { - return schema; - } - - /** - * @param isExt - * @param partSpec - * @param resFile - * @param tableName - */ - public descTableDesc(Path resFile, String tableName, - HashMap partSpec, boolean isExt) { - this.isExt = isExt; - this.partSpec = partSpec; - this.resFile = resFile; - this.tableName = tableName; - } - - /** - * @return the isExt - */ - public boolean isExt() { - return isExt; - } - - /** - * @param isExt - * the isExt to set - */ - public void setExt(boolean isExt) { - this.isExt = isExt; - } - - /** - * @return the tableName - */ - @explain(displayName = "table") - public String getTableName() { - return tableName; - } - - /** - * @param tableName - * the tableName to set - */ - public void setTableName(String tableName) { - this.tableName = tableName; - } - - /** - * @return the partSpec - */ - @explain(displayName = "partition") - public HashMap getPartSpec() { - return partSpec; - } - - /** - * @param partSpec - * the partSpec to set - */ - public void setPartSpecs(HashMap partSpec) { - this.partSpec = partSpec; - } - - /** - * @return the resFile - */ - public Path getResFile() { - return resFile; - } - - @explain(displayName = "result file", normalExplain = false) - public String getResFileString() { - return getResFile().getName(); - } - - /** - * @param resFile - * the resFile to set - */ - public void setResFile(Path resFile) { - this.resFile = resFile; - } -} Index: ql/src/java/org/apache/hadoop/hive/ql/plan/TableDesc.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/plan/TableDesc.java (revision 0) +++ ql/src/java/org/apache/hadoop/hive/ql/plan/TableDesc.java (revision 0) @@ -0,0 +1,146 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hive.ql.plan; + +import java.io.Serializable; +import java.util.Enumeration; +import java.util.Properties; + +import org.apache.hadoop.hive.ql.io.HiveFileFormatUtils; +import org.apache.hadoop.hive.ql.io.HiveOutputFormat; +import org.apache.hadoop.hive.serde2.Deserializer; +import org.apache.hadoop.mapred.InputFormat; + +import static org.apache.hadoop.hive.metastore.api.Constants.META_TABLE_NAME; +import static org.apache.hadoop.hive.serde.Constants.SERIALIZATION_LIB; + +public class TableDesc implements Serializable, Cloneable { + private static final long serialVersionUID = 1L; + private Class deserializerClass; + private Class inputFileFormatClass; + private Class outputFileFormatClass; + private Properties properties; + private String serdeClassName; + + public TableDesc() { + } + + public TableDesc(Class serdeClass, + Class inputFileFormatClass, + Class class1, Properties properties) { + deserializerClass = serdeClass; + this.inputFileFormatClass = inputFileFormatClass; + outputFileFormatClass = HiveFileFormatUtils.getOutputFormatSubstitute(class1); + this.properties = properties; + serdeClassName = properties.getProperty(SERIALIZATION_LIB); + ; + } + + public Class getDeserializerClass() { + return deserializerClass; + } + + public void setDeserializerClass(Class serdeClass) { + deserializerClass = serdeClass; + } + + public Class getInputFileFormatClass() { + return inputFileFormatClass; + } + + /** + * Return a deserializer object corresponding to the TableDesc + */ + public Deserializer getDeserializer() throws Exception { + Deserializer de = deserializerClass.newInstance(); + de.initialize(null, properties); + return de; + } + + public void setInputFileFormatClass(Class inputFileFormatClass) { + this.inputFileFormatClass = inputFileFormatClass; + } + + public Class getOutputFileFormatClass() { + return outputFileFormatClass; + } + + public void setOutputFileFormatClass(Class outputFileFormatClass) { + this.outputFileFormatClass = HiveFileFormatUtils + .getOutputFormatSubstitute(outputFileFormatClass); + } + + @Explain(displayName = "properties", normalExplain = false) + public Properties getProperties() { + return properties; + } + + public void setProperties(Properties properties) { + this.properties = properties; + } + + /** + * @return the serdeClassName + */ + @Explain(displayName = "serde") + public String getSerdeClassName() { + return serdeClassName; + } + + /** + * @param serdeClassName + * the serde Class Name to set + */ + public void setSerdeClassName(String serdeClassName) { + this.serdeClassName = serdeClassName; + } + + @Explain(displayName = "name") + public String getTableName() { + return properties.getProperty(META_TABLE_NAME); + } + + @Explain(displayName = "input format") + public String getInputFileFormatClassName() { + return getInputFileFormatClass().getName(); + } + + @Explain(displayName = "output format") + public String getOutputFileFormatClassName() { + return getOutputFileFormatClass().getName(); + } + + @Override + public Object clone() { + TableDesc ret = new TableDesc(); + ret.setSerdeClassName(serdeClassName); + ret.setDeserializerClass(deserializerClass); + ret.setInputFileFormatClass(inputFileFormatClass); + ret.setOutputFileFormatClass(outputFileFormatClass); + Properties newProp = new Properties(); + Enumeration keysProp = properties.keys(); + while (keysProp.hasMoreElements()) { + Object key = keysProp.nextElement(); + newProp.put(key, properties.get(key)); + } + + ret.setProperties(newProp); + return ret; + } +} Index: ql/src/java/org/apache/hadoop/hive/ql/plan/MapredLocalWork.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/plan/MapredLocalWork.java (revision 0) +++ ql/src/java/org/apache/hadoop/hive/ql/plan/MapredLocalWork.java (revision 0) @@ -0,0 +1,69 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hive.ql.plan; + +import java.io.Serializable; +import java.util.LinkedHashMap; + +import org.apache.hadoop.hive.ql.exec.Operator; + +@Explain(displayName = "Map Reduce Local Work") +public class MapredLocalWork implements Serializable { + private static final long serialVersionUID = 1L; + + private LinkedHashMap> aliasToWork; + private LinkedHashMap aliasToFetchWork; + + public MapredLocalWork() { + } + + public MapredLocalWork( + final LinkedHashMap> aliasToWork, + final LinkedHashMap aliasToFetchWork) { + this.aliasToWork = aliasToWork; + this.aliasToFetchWork = aliasToFetchWork; + } + + @Explain(displayName = "Alias -> Map Local Operator Tree") + public LinkedHashMap> getAliasToWork() { + return aliasToWork; + } + + public void setAliasToWork( + final LinkedHashMap> aliasToWork) { + this.aliasToWork = aliasToWork; + } + + /** + * @return the aliasToFetchWork + */ + @Explain(displayName = "Alias -> Map Local Tables") + public LinkedHashMap getAliasToFetchWork() { + return aliasToFetchWork; + } + + /** + * @param aliasToFetchWork + * the aliasToFetchWork to set + */ + public void setAliasToFetchWork( + final LinkedHashMap aliasToFetchWork) { + this.aliasToFetchWork = aliasToFetchWork; + } +} Index: ql/src/java/org/apache/hadoop/hive/ql/plan/loadTableDesc.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/plan/loadTableDesc.java (revision 901960) +++ ql/src/java/org/apache/hadoop/hive/ql/plan/loadTableDesc.java (working copy) @@ -1,89 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.hive.ql.plan; - -import java.io.Serializable; -import java.util.HashMap; - -public class loadTableDesc extends org.apache.hadoop.hive.ql.plan.loadDesc - implements Serializable { - private static final long serialVersionUID = 1L; - private boolean replace; - private String tmpDir; - - // TODO: the below seems like they should just be combined into partitionDesc - private org.apache.hadoop.hive.ql.plan.tableDesc table; - private HashMap partitionSpec; - - public loadTableDesc() { - } - - public loadTableDesc(final String sourceDir, final String tmpDir, - final org.apache.hadoop.hive.ql.plan.tableDesc table, - final HashMap partitionSpec, final boolean replace) { - - super(sourceDir); - this.tmpDir = tmpDir; - this.table = table; - this.partitionSpec = partitionSpec; - this.replace = replace; - } - - public loadTableDesc(final String sourceDir, final String tmpDir, - final org.apache.hadoop.hive.ql.plan.tableDesc table, - final HashMap partitionSpec) { - this(sourceDir, tmpDir, table, partitionSpec, true); - } - - @explain(displayName = "tmp directory", normalExplain = false) - public String getTmpDir() { - return tmpDir; - } - - public void setTmpDir(final String tmp) { - tmpDir = tmp; - } - - @explain(displayName = "table") - public tableDesc getTable() { - return table; - } - - public void setTable(final org.apache.hadoop.hive.ql.plan.tableDesc table) { - this.table = table; - } - - @explain(displayName = "partition") - public HashMap getPartitionSpec() { - return partitionSpec; - } - - public void setPartitionSpec(final HashMap partitionSpec) { - this.partitionSpec = partitionSpec; - } - - @explain(displayName = "replace") - public boolean getReplace() { - return replace; - } - - public void setReplace(boolean replace) { - this.replace = replace; - } -} Index: ql/src/java/org/apache/hadoop/hive/ql/plan/ExprNodeFieldDesc.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/plan/ExprNodeFieldDesc.java (revision 0) +++ ql/src/java/org/apache/hadoop/hive/ql/plan/ExprNodeFieldDesc.java (revision 0) @@ -0,0 +1,121 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hive.ql.plan; + +import java.io.Serializable; +import java.util.ArrayList; +import java.util.List; + +import org.apache.hadoop.hive.ql.exec.Utilities; +import org.apache.hadoop.hive.serde2.typeinfo.TypeInfo; + +public class ExprNodeFieldDesc extends ExprNodeDesc implements Serializable { + private static final long serialVersionUID = 1L; + ExprNodeDesc desc; + String fieldName; + + // Used to support a.b where a is a list of struct that contains a field + // called b. + // a.b will return an array that contains field b of all elements of array a. + Boolean isList; + + public ExprNodeFieldDesc() { + } + + public ExprNodeFieldDesc(TypeInfo typeInfo, ExprNodeDesc desc, + String fieldName, Boolean isList) { + super(typeInfo); + this.desc = desc; + this.fieldName = fieldName; + this.isList = isList; + } + + @Override + public List getChildren() { + List children = new ArrayList(2); + children.add(desc); + return children; + } + + public ExprNodeDesc getDesc() { + return desc; + } + + public void setDesc(ExprNodeDesc desc) { + this.desc = desc; + } + + public String getFieldName() { + return fieldName; + } + + public void setFieldName(String fieldName) { + this.fieldName = fieldName; + } + + public Boolean getIsList() { + return isList; + } + + public void setIsList(Boolean isList) { + this.isList = isList; + } + + @Override + public String toString() { + return desc.toString() + "." + fieldName; + } + + @Explain(displayName = "expr") + @Override + public String getExprString() { + return desc.getExprString() + "." + fieldName; + } + + @Override + public List getCols() { + List colList = new ArrayList(); + if (desc != null) { + colList = Utilities.mergeUniqElems(colList, desc.getCols()); + } + return colList; + } + + @Override + public ExprNodeDesc clone() { + return new ExprNodeFieldDesc(typeInfo, desc, fieldName, isList); + } + + @Override + public boolean isSame(Object o) { + if (!(o instanceof ExprNodeFieldDesc)) { + return false; + } + ExprNodeFieldDesc dest = (ExprNodeFieldDesc) o; + if (!typeInfo.equals(dest.getTypeInfo())) { + return false; + } + if (!fieldName.equals(dest.getFieldName()) + || !isList.equals(dest.getIsList()) || !desc.isSame(dest.getDesc())) { + return false; + } + + return true; + } +} Index: ql/src/java/org/apache/hadoop/hive/ql/plan/ShowTableStatusDesc.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/plan/ShowTableStatusDesc.java (revision 0) +++ ql/src/java/org/apache/hadoop/hive/ql/plan/ShowTableStatusDesc.java (revision 0) @@ -0,0 +1,145 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hive.ql.plan; + +import java.io.Serializable; +import java.util.HashMap; + +import org.apache.hadoop.fs.Path; + +@Explain(displayName = "Show Table Status") +public class ShowTableStatusDesc extends DDLDesc implements Serializable { + private static final long serialVersionUID = 1L; + String pattern; + Path resFile; + String dbName; + HashMap partSpec; + + /** + * table name for the result of show tables + */ + private final String table = "show_tablestatus"; + /** + * thrift ddl for the result of show tables + */ + private final String schema = "tab_name#string"; + + public String getTable() { + return table; + } + + public String getSchema() { + return schema; + } + + /** + * @param pattern + * names of tables to show + */ + public ShowTableStatusDesc(Path resFile, String dbName, String pattern) { + this.dbName = dbName; + this.resFile = resFile; + this.pattern = pattern; + } + + /** + * @param resFile + * @param dbName + * data base name + * @param pattern + * names of tables to show + * @param part + * partition specification + */ + public ShowTableStatusDesc(Path resFile, String dbName, String pattern, + HashMap partSpec) { + this.dbName = dbName; + this.resFile = resFile; + this.pattern = pattern; + this.partSpec = partSpec; + } + + /** + * @return the pattern + */ + @Explain(displayName = "pattern") + public String getPattern() { + return pattern; + } + + /** + * @param pattern + * the pattern to set + */ + public void setPattern(String pattern) { + this.pattern = pattern; + } + + /** + * @return the resFile + */ + public Path getResFile() { + return resFile; + } + + @Explain(displayName = "result file", normalExplain = false) + public String getResFileString() { + return getResFile().getName(); + } + + /** + * @param resFile + * the resFile to set + */ + public void setResFile(Path resFile) { + this.resFile = resFile; + } + + /** + * @return the database name + */ + @Explain(displayName = "database") + public String getDbName() { + return dbName; + } + + /** + * @param dbName + * the database name + */ + public void setDbName(String dbName) { + this.dbName = dbName; + } + + /** + * @return the partSpec + */ + @Explain(displayName = "partition") + public HashMap getPartSpec() { + return partSpec; + } + + /** + * @param partSpec + * the partSpec to set + */ + public void setPartSpecs(HashMap partSpec) { + this.partSpec = partSpec; + } +} Index: ql/src/java/org/apache/hadoop/hive/ql/plan/ExprNodeConstantDesc.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/plan/ExprNodeConstantDesc.java (revision 0) +++ ql/src/java/org/apache/hadoop/hive/ql/plan/ExprNodeConstantDesc.java (revision 0) @@ -0,0 +1,94 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hive.ql.plan; + +import java.io.Serializable; + +import org.apache.hadoop.hive.serde.Constants; +import org.apache.hadoop.hive.serde2.typeinfo.TypeInfo; +import org.apache.hadoop.hive.serde2.typeinfo.TypeInfoFactory; + +/** + * A constant expression. + */ +public class ExprNodeConstantDesc extends ExprNodeDesc implements Serializable { + private static final long serialVersionUID = 1L; + private Object value; + + public ExprNodeConstantDesc() { + } + + public ExprNodeConstantDesc(TypeInfo typeInfo, Object value) { + super(typeInfo); + this.value = value; + } + + public ExprNodeConstantDesc(Object value) { + this(TypeInfoFactory + .getPrimitiveTypeInfoFromJavaPrimitive(value.getClass()), value); + } + + public void setValue(Object value) { + this.value = value; + } + + public Object getValue() { + return value; + } + + @Override + public String toString() { + return "Const " + typeInfo.toString() + " " + value; + } + + @Explain(displayName = "expr") + @Override + public String getExprString() { + if (value == null) { + return "null"; + } + + if (typeInfo.getTypeName().equals(Constants.STRING_TYPE_NAME)) { + return "'" + value.toString() + "'"; + } else { + return value.toString(); + } + } + + @Override + public ExprNodeDesc clone() { + return new ExprNodeConstantDesc(typeInfo, value); + } + + @Override + public boolean isSame(Object o) { + if (!(o instanceof ExprNodeConstantDesc)) { + return false; + } + ExprNodeConstantDesc dest = (ExprNodeConstantDesc) o; + if (!typeInfo.equals(dest.getTypeInfo())) { + return false; + } + if (!value.equals(dest.getValue())) { + return false; + } + + return true; + } +} Index: ql/src/java/org/apache/hadoop/hive/ql/plan/LateralViewJoinDesc.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/plan/LateralViewJoinDesc.java (revision 0) +++ ql/src/java/org/apache/hadoop/hive/ql/plan/LateralViewJoinDesc.java (revision 0) @@ -0,0 +1,45 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hive.ql.plan; + +import java.io.Serializable; +import java.util.ArrayList; + +@Explain(displayName = "Lateral View Join Operator") +public class LateralViewJoinDesc implements Serializable { + private static final long serialVersionUID = 1L; + + private ArrayList outputInternalColNames; + + public LateralViewJoinDesc() { + } + + public LateralViewJoinDesc(ArrayList outputInternalColNames) { + this.outputInternalColNames = outputInternalColNames; + } + + public void setOutputInternalColNames(ArrayList outputInternalColNames) { + this.outputInternalColNames = outputInternalColNames; + } + + @Explain(displayName = "outputColumnNames") + public ArrayList getOutputInternalColNames() { + return outputInternalColNames; + } +} Index: ql/src/java/org/apache/hadoop/hive/ql/plan/joinDesc.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/plan/joinDesc.java (revision 901960) +++ ql/src/java/org/apache/hadoop/hive/ql/plan/joinDesc.java (working copy) @@ -1,293 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.hive.ql.plan; - -import java.io.Serializable; -import java.util.ArrayList; -import java.util.LinkedHashMap; -import java.util.List; -import java.util.Map; - -/** - * Join operator Descriptor implementation. - * - */ -@explain(displayName = "Join Operator") -public class joinDesc implements Serializable { - private static final long serialVersionUID = 1L; - public static final int INNER_JOIN = 0; - public static final int LEFT_OUTER_JOIN = 1; - public static final int RIGHT_OUTER_JOIN = 2; - public static final int FULL_OUTER_JOIN = 3; - public static final int UNIQUE_JOIN = 4; - public static final int LEFT_SEMI_JOIN = 5; - - // used to handle skew join - private boolean handleSkewJoin = false; - private int skewKeyDefinition = -1; - private Map bigKeysDirMap; - private Map> smallKeysDirMap; - private Map skewKeysValuesTables; - - // alias to key mapping - private Map> exprs; - - // used for create joinOutputObjectInspector - protected java.util.ArrayList outputColumnNames; - - // key:column output name, value:tag - transient private Map reversedExprs; - - // No outer join involved - protected boolean noOuterJoin; - - protected joinCond[] conds; - - protected Byte[] tagOrder; - private tableDesc keyTableDesc; - - public joinDesc() { - } - - public joinDesc(final Map> exprs, - ArrayList outputColumnNames, final boolean noOuterJoin, - final joinCond[] conds) { - this.exprs = exprs; - this.outputColumnNames = outputColumnNames; - this.noOuterJoin = noOuterJoin; - this.conds = conds; - - tagOrder = new Byte[exprs.size()]; - for (int i = 0; i < tagOrder.length; i++) { - tagOrder[i] = (byte) i; - } - } - - public joinDesc(final Map> exprs, - ArrayList outputColumnNames) { - this(exprs, outputColumnNames, true, null); - } - - public joinDesc(final Map> exprs, - ArrayList outputColumnNames, final joinCond[] conds) { - this(exprs, outputColumnNames, false, conds); - } - - public Map> getExprs() { - return exprs; - } - - public Map getReversedExprs() { - return reversedExprs; - } - - public void setReversedExprs(Map reversed_Exprs) { - reversedExprs = reversed_Exprs; - } - - @explain(displayName = "condition expressions") - public Map getExprsStringMap() { - if (getExprs() == null) { - return null; - } - - LinkedHashMap ret = new LinkedHashMap(); - - for (Map.Entry> ent : getExprs().entrySet()) { - StringBuilder sb = new StringBuilder(); - boolean first = true; - if (ent.getValue() != null) { - for (exprNodeDesc expr : ent.getValue()) { - if (!first) { - sb.append(" "); - } - - first = false; - sb.append("{"); - sb.append(expr.getExprString()); - sb.append("}"); - } - } - ret.put(ent.getKey(), sb.toString()); - } - - return ret; - } - - public void setExprs(final Map> exprs) { - this.exprs = exprs; - } - - @explain(displayName = "outputColumnNames") - public java.util.ArrayList getOutputColumnNames() { - return outputColumnNames; - } - - public void setOutputColumnNames( - java.util.ArrayList outputColumnNames) { - this.outputColumnNames = outputColumnNames; - } - - public boolean getNoOuterJoin() { - return noOuterJoin; - } - - public void setNoOuterJoin(final boolean noOuterJoin) { - this.noOuterJoin = noOuterJoin; - } - - @explain(displayName = "condition map") - public List getCondsList() { - if (conds == null) { - return null; - } - - ArrayList l = new ArrayList(); - for (joinCond cond : conds) { - l.add(cond); - } - - return l; - } - - public joinCond[] getConds() { - return conds; - } - - public void setConds(final joinCond[] conds) { - this.conds = conds; - } - - /** - * The order in which tables should be processed when joining - * - * @return Array of tags - */ - public Byte[] getTagOrder() { - return tagOrder; - } - - /** - * The order in which tables should be processed when joining - * - * @param tagOrder - * Array of tags - */ - public void setTagOrder(Byte[] tagOrder) { - this.tagOrder = tagOrder; - } - - @explain(displayName = "handleSkewJoin") - public boolean getHandleSkewJoin() { - return handleSkewJoin; - } - - /** - * set to handle skew join in this join op - * - * @param handleSkewJoin - */ - public void setHandleSkewJoin(boolean handleSkewJoin) { - this.handleSkewJoin = handleSkewJoin; - } - - /** - * @return mapping from tbl to dir for big keys - */ - public Map getBigKeysDirMap() { - return bigKeysDirMap; - } - - /** - * set the mapping from tbl to dir for big keys - * - * @param bigKeysDirMap - */ - public void setBigKeysDirMap(Map bigKeysDirMap) { - this.bigKeysDirMap = bigKeysDirMap; - } - - /** - * @return mapping from tbl to dir for small keys - */ - public Map> getSmallKeysDirMap() { - return smallKeysDirMap; - } - - /** - * set the mapping from tbl to dir for small keys - * - * @param bigKeysDirMap - */ - public void setSmallKeysDirMap(Map> smallKeysDirMap) { - this.smallKeysDirMap = smallKeysDirMap; - } - - /** - * @return skew key definition. If we see a key's associated entries' number - * is bigger than this, we will define this key as a skew key. - */ - public int getSkewKeyDefinition() { - return skewKeyDefinition; - } - - /** - * set skew key definition - * - * @param skewKeyDefinition - */ - public void setSkewKeyDefinition(int skewKeyDefinition) { - this.skewKeyDefinition = skewKeyDefinition; - } - - /** - * @return the table desc for storing skew keys and their corresponding value; - */ - public Map getSkewKeysValuesTables() { - return skewKeysValuesTables; - } - - /** - * @param skewKeysValuesTable - * set the table desc for storing skew keys and their corresponding - * value; - */ - public void setSkewKeysValuesTables(Map skewKeysValuesTables) { - this.skewKeysValuesTables = skewKeysValuesTables; - } - - public boolean isNoOuterJoin() { - for (org.apache.hadoop.hive.ql.plan.joinCond cond : conds) { - if (cond.getType() == joinDesc.FULL_OUTER_JOIN - || (cond.getType() == joinDesc.LEFT_OUTER_JOIN) - || cond.getType() == joinDesc.RIGHT_OUTER_JOIN) { - return false; - } - } - return true; - } - - public void setKeyTableDesc(tableDesc keyTblDesc) { - keyTableDesc = keyTblDesc; - } - - public tableDesc getKeyTableDesc() { - return keyTableDesc; - } -} Index: ql/src/java/org/apache/hadoop/hive/ql/plan/schemaDesc.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/plan/schemaDesc.java (revision 901960) +++ ql/src/java/org/apache/hadoop/hive/ql/plan/schemaDesc.java (working copy) @@ -1,42 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.hive.ql.plan; - -import java.io.Serializable; - -public class schemaDesc implements Serializable { - private static final long serialVersionUID = 1L; - private String schema; - - public schemaDesc() { - } - - public schemaDesc(final String schema) { - this.schema = schema; - } - - public String getSchema() { - return schema; - } - - public void setSchema(final String schema) { - this.schema = schema; - } - -} Index: ql/src/java/org/apache/hadoop/hive/ql/plan/ExprNodeColumnDesc.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/plan/ExprNodeColumnDesc.java (revision 0) +++ ql/src/java/org/apache/hadoop/hive/ql/plan/ExprNodeColumnDesc.java (revision 0) @@ -0,0 +1,126 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hive.ql.plan; + +import java.io.Serializable; +import java.util.ArrayList; +import java.util.List; + +import org.apache.hadoop.hive.serde2.typeinfo.TypeInfo; +import org.apache.hadoop.hive.serde2.typeinfo.TypeInfoFactory; + +public class ExprNodeColumnDesc extends ExprNodeDesc implements Serializable { + private static final long serialVersionUID = 1L; + + /** + * The column name. + */ + private String column; + + /** + * The alias of the table. + */ + private String tabAlias; + + /** + * Is the column a partitioned column. + */ + private boolean isPartitionCol; + + public ExprNodeColumnDesc() { + } + + public ExprNodeColumnDesc(TypeInfo typeInfo, String column, String tabAlias, + boolean isPartitionCol) { + super(typeInfo); + this.column = column; + this.tabAlias = tabAlias; + this.isPartitionCol = isPartitionCol; + } + + public ExprNodeColumnDesc(Class c, String column, String tabAlias, + boolean isPartitionCol) { + super(TypeInfoFactory.getPrimitiveTypeInfoFromJavaPrimitive(c)); + this.column = column; + this.tabAlias = tabAlias; + this.isPartitionCol = isPartitionCol; + } + + public String getColumn() { + return column; + } + + public void setColumn(String column) { + this.column = column; + } + + public String getTabAlias() { + return tabAlias; + } + + public void setTabAlias(String tabAlias) { + this.tabAlias = tabAlias; + } + + public boolean getIsParititonCol() { + return isPartitionCol; + } + + public void setIsPartitionCol(boolean isPartitionCol) { + this.isPartitionCol = isPartitionCol; + } + + @Override + public String toString() { + return "Column[" + column + "]"; + } + + @Explain(displayName = "expr") + @Override + public String getExprString() { + return getColumn(); + } + + @Override + public List getCols() { + List lst = new ArrayList(); + lst.add(column); + return lst; + } + + @Override + public ExprNodeDesc clone() { + return new ExprNodeColumnDesc(typeInfo, column, tabAlias, isPartitionCol); + } + + @Override + public boolean isSame(Object o) { + if (!(o instanceof ExprNodeColumnDesc)) { + return false; + } + ExprNodeColumnDesc dest = (ExprNodeColumnDesc) o; + if (!column.equals(dest.getColumn())) { + return false; + } + if (!typeInfo.equals(dest.getTypeInfo())) { + return false; + } + return true; + } +} Index: ql/src/java/org/apache/hadoop/hive/ql/plan/JoinCondDesc.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/plan/JoinCondDesc.java (revision 901960) +++ ql/src/java/org/apache/hadoop/hive/ql/plan/JoinCondDesc.java (working copy) @@ -20,48 +20,50 @@ import java.io.Serializable; +import org.apache.hadoop.hive.ql.parse.JoinCond; + /** * Join conditions Descriptor implementation. * */ -public class joinCond implements Serializable { +public class JoinCondDesc implements Serializable { private static final long serialVersionUID = 1L; private int left; private int right; private int type; private boolean preserved; - public joinCond() { + public JoinCondDesc() { } - public joinCond(int left, int right, int type) { + public JoinCondDesc(int left, int right, int type) { this.left = left; this.right = right; this.type = type; } - public joinCond(org.apache.hadoop.hive.ql.parse.joinCond condn) { + public JoinCondDesc(JoinCond condn) { left = condn.getLeft(); right = condn.getRight(); preserved = condn.getPreserved(); switch (condn.getJoinType()) { case INNER: - type = joinDesc.INNER_JOIN; + type = JoinDesc.INNER_JOIN; break; case LEFTOUTER: - type = joinDesc.LEFT_OUTER_JOIN; + type = JoinDesc.LEFT_OUTER_JOIN; break; case RIGHTOUTER: - type = joinDesc.RIGHT_OUTER_JOIN; + type = JoinDesc.RIGHT_OUTER_JOIN; break; case FULLOUTER: - type = joinDesc.FULL_OUTER_JOIN; + type = JoinDesc.FULL_OUTER_JOIN; break; case UNIQUE: - type = joinDesc.UNIQUE_JOIN; + type = JoinDesc.UNIQUE_JOIN; break; case LEFTSEMI: - type = joinDesc.LEFT_SEMI_JOIN; + type = JoinDesc.LEFT_SEMI_JOIN; break; default: assert false; @@ -107,27 +109,27 @@ this.type = type; } - @explain + @Explain public String getJoinCondString() { StringBuilder sb = new StringBuilder(); switch (type) { - case joinDesc.INNER_JOIN: + case JoinDesc.INNER_JOIN: sb.append("Inner Join "); break; - case joinDesc.FULL_OUTER_JOIN: + case JoinDesc.FULL_OUTER_JOIN: sb.append("Outer Join "); break; - case joinDesc.LEFT_OUTER_JOIN: + case JoinDesc.LEFT_OUTER_JOIN: sb.append("Left Outer Join"); break; - case joinDesc.RIGHT_OUTER_JOIN: + case JoinDesc.RIGHT_OUTER_JOIN: sb.append("Right Outer Join"); break; - case joinDesc.UNIQUE_JOIN: + case JoinDesc.UNIQUE_JOIN: sb.append("Unique Join"); break; - case joinDesc.LEFT_SEMI_JOIN: + case JoinDesc.LEFT_SEMI_JOIN: sb.append("Left Semi Join "); break; default: Index: ql/src/java/org/apache/hadoop/hive/ql/plan/DDLDesc.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/plan/DDLDesc.java (revision 0) +++ ql/src/java/org/apache/hadoop/hive/ql/plan/DDLDesc.java (revision 0) @@ -0,0 +1,25 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hive.ql.plan; + +import java.io.Serializable; + +public abstract class DDLDesc implements Serializable { + private static final long serialVersionUID = 1L; +} Index: ql/src/java/org/apache/hadoop/hive/ql/plan/FileSinkDesc.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/plan/FileSinkDesc.java (revision 0) +++ ql/src/java/org/apache/hadoop/hive/ql/plan/FileSinkDesc.java (revision 0) @@ -0,0 +1,105 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hive.ql.plan; + +import java.io.Serializable; + +@Explain(displayName = "File Output Operator") +public class FileSinkDesc implements Serializable { + private static final long serialVersionUID = 1L; + private String dirName; + private TableDesc tableInfo; + private boolean compressed; + private int destTableId; + private String compressCodec; + private String compressType; + + public FileSinkDesc() { + } + + public FileSinkDesc(final String dirName, final TableDesc tableInfo, + final boolean compressed, int destTableId) { + + this.dirName = dirName; + this.tableInfo = tableInfo; + this.compressed = compressed; + this.destTableId = destTableId; + } + + public FileSinkDesc(final String dirName, final TableDesc tableInfo, + final boolean compressed) { + + this.dirName = dirName; + this.tableInfo = tableInfo; + this.compressed = compressed; + destTableId = 0; + } + + @Explain(displayName = "directory", normalExplain = false) + public String getDirName() { + return dirName; + } + + public void setDirName(final String dirName) { + this.dirName = dirName; + } + + @Explain(displayName = "table") + public TableDesc getTableInfo() { + return tableInfo; + } + + public void setTableInfo(final TableDesc tableInfo) { + this.tableInfo = tableInfo; + } + + @Explain(displayName = "compressed") + public boolean getCompressed() { + return compressed; + } + + public void setCompressed(boolean compressed) { + this.compressed = compressed; + } + + @Explain(displayName = "GlobalTableId") + public int getDestTableId() { + return destTableId; + } + + public void setDestTableId(int destTableId) { + this.destTableId = destTableId; + } + + public String getCompressCodec() { + return compressCodec; + } + + public void setCompressCodec(String intermediateCompressorCodec) { + compressCodec = intermediateCompressorCodec; + } + + public String getCompressType() { + return compressType; + } + + public void setCompressType(String intermediateCompressType) { + compressType = intermediateCompressType; + } +} Index: ql/src/java/org/apache/hadoop/hive/ql/plan/explainWork.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/plan/explainWork.java (revision 901960) +++ ql/src/java/org/apache/hadoop/hive/ql/plan/explainWork.java (working copy) @@ -1,78 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.hive.ql.plan; - -import java.io.Serializable; -import java.util.List; - -import org.apache.hadoop.fs.Path; -import org.apache.hadoop.hive.ql.exec.Task; - -public class explainWork implements Serializable { - private static final long serialVersionUID = 1L; - - private Path resFile; - private List> rootTasks; - private String astStringTree; - boolean extended; - - public explainWork() { - } - - public explainWork(Path resFile, - List> rootTasks, String astStringTree, - boolean extended) { - this.resFile = resFile; - this.rootTasks = rootTasks; - this.astStringTree = astStringTree; - this.extended = extended; - } - - public Path getResFile() { - return resFile; - } - - public void setResFile(Path resFile) { - this.resFile = resFile; - } - - public List> getRootTasks() { - return rootTasks; - } - - public void setRootTasks(List> rootTasks) { - this.rootTasks = rootTasks; - } - - public String getAstStringTree() { - return astStringTree; - } - - public void setAstStringTree(String astStringTree) { - this.astStringTree = astStringTree; - } - - public boolean getExtended() { - return extended; - } - - public void setExtended(boolean extended) { - this.extended = extended; - } -} Index: ql/src/java/org/apache/hadoop/hive/ql/plan/ShowFunctionsDesc.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/plan/ShowFunctionsDesc.java (revision 0) +++ ql/src/java/org/apache/hadoop/hive/ql/plan/ShowFunctionsDesc.java (revision 0) @@ -0,0 +1,99 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hive.ql.plan; + +import java.io.Serializable; + +import org.apache.hadoop.fs.Path; + +@Explain(displayName = "Show Functions") +public class ShowFunctionsDesc extends DDLDesc implements Serializable { + private static final long serialVersionUID = 1L; + String pattern; + Path resFile; + /** + * table name for the result of show tables + */ + private final String table = "show"; + /** + * thrift ddl for the result of show tables + */ + private final String schema = "tab_name#string"; + + public String getTable() { + return table; + } + + public String getSchema() { + return schema; + } + + /** + * @param resFile + */ + public ShowFunctionsDesc(Path resFile) { + this.resFile = resFile; + pattern = null; + } + + /** + * @param pattern + * names of tables to show + */ + public ShowFunctionsDesc(Path resFile, String pattern) { + this.resFile = resFile; + this.pattern = pattern; + } + + /** + * @return the pattern + */ + @Explain(displayName = "pattern") + public String getPattern() { + return pattern; + } + + /** + * @param pattern + * the pattern to set + */ + public void setPattern(String pattern) { + this.pattern = pattern; + } + + /** + * @return the resFile + */ + public Path getResFile() { + return resFile; + } + + @Explain(displayName = "result file", normalExplain = false) + public String getResFileString() { + return getResFile().getName(); + } + + /** + * @param resFile + * the resFile to set + */ + public void setResFile(Path resFile) { + this.resFile = resFile; + } +} Index: ql/src/java/org/apache/hadoop/hive/ql/plan/createFunctionDesc.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/plan/createFunctionDesc.java (revision 901960) +++ ql/src/java/org/apache/hadoop/hive/ql/plan/createFunctionDesc.java (working copy) @@ -1,53 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.hive.ql.plan; - -import java.io.Serializable; - -@explain(displayName = "Create Function") -public class createFunctionDesc implements Serializable { - private static final long serialVersionUID = 1L; - - private String functionName; - private String className; - - public createFunctionDesc(String functionName, String className) { - this.functionName = functionName; - this.className = className; - } - - @explain(displayName = "name") - public String getFunctionName() { - return functionName; - } - - public void setFunctionName(String functionName) { - this.functionName = functionName; - } - - @explain(displayName = "class") - public String getClassName() { - return className; - } - - public void setClassName(String className) { - this.className = className; - } - -} Index: ql/src/java/org/apache/hadoop/hive/ql/plan/ExtractDesc.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/plan/ExtractDesc.java (revision 0) +++ ql/src/java/org/apache/hadoop/hive/ql/plan/ExtractDesc.java (revision 0) @@ -0,0 +1,42 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hive.ql.plan; + +import java.io.Serializable; + +@Explain(displayName = "Extract") +public class ExtractDesc implements Serializable { + private static final long serialVersionUID = 1L; + private ExprNodeDesc col; + + public ExtractDesc() { + } + + public ExtractDesc(final ExprNodeDesc col) { + this.col = col; + } + + public ExprNodeDesc getCol() { + return col; + } + + public void setCol(final ExprNodeDesc col) { + this.col = col; + } +} Index: ql/src/java/org/apache/hadoop/hive/ql/plan/PlanUtils.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/plan/PlanUtils.java (revision 901960) +++ ql/src/java/org/apache/hadoop/hive/ql/plan/PlanUtils.java (working copy) @@ -59,11 +59,11 @@ }; @SuppressWarnings("nls") - public static mapredWork getMapRedWork() { - return new mapredWork("", new LinkedHashMap>(), - new LinkedHashMap(), + public static MapredWork getMapRedWork() { + return new MapredWork("", new LinkedHashMap>(), + new LinkedHashMap(), new LinkedHashMap>(), - new tableDesc(), new ArrayList(), null, Integer.valueOf(1), + new TableDesc(), new ArrayList(), null, Integer.valueOf(1), null); } @@ -71,7 +71,7 @@ * Generate the table descriptor of MetadataTypedColumnsetSerDe with the * separatorCode and column names (comma separated string). */ - public static tableDesc getDefaultTableDesc(String separatorCode, + public static TableDesc getDefaultTableDesc(String separatorCode, String columns) { return getDefaultTableDesc(separatorCode, columns, false); } @@ -80,7 +80,7 @@ * Generate the table descriptor of given serde with the separatorCode and * column names (comma separated string). */ - public static tableDesc getTableDesc( + public static TableDesc getTableDesc( Class serdeClass, String separatorCode, String columns) { return getTableDesc(serdeClass, separatorCode, columns, false); @@ -91,7 +91,7 @@ * separatorCode and column names (comma separated string), and whether the * last column should take the rest of the line. */ - public static tableDesc getDefaultTableDesc(String separatorCode, + public static TableDesc getDefaultTableDesc(String separatorCode, String columns, boolean lastColumnTakesRestOfTheLine) { return getDefaultTableDesc(separatorCode, columns, null, lastColumnTakesRestOfTheLine); @@ -102,7 +102,7 @@ * and column names (comma separated string), and whether the last column * should take the rest of the line. */ - public static tableDesc getTableDesc( + public static TableDesc getTableDesc( Class serdeClass, String separatorCode, String columns, boolean lastColumnTakesRestOfTheLine) { return getTableDesc(serdeClass, separatorCode, columns, null, @@ -114,20 +114,20 @@ * separatorCode and column names (comma separated string), and whether the * last column should take the rest of the line. */ - public static tableDesc getDefaultTableDesc(String separatorCode, + public static TableDesc getDefaultTableDesc(String separatorCode, String columns, String columnTypes, boolean lastColumnTakesRestOfTheLine) { return getTableDesc(LazySimpleSerDe.class, separatorCode, columns, columnTypes, lastColumnTakesRestOfTheLine); } - public static tableDesc getTableDesc( + public static TableDesc getTableDesc( Class serdeClass, String separatorCode, String columns, String columnTypes, boolean lastColumnTakesRestOfTheLine) { return getTableDesc(serdeClass, separatorCode, columns, columnTypes, lastColumnTakesRestOfTheLine, false); } - public static tableDesc getTableDesc( + public static TableDesc getTableDesc( Class serdeClass, String separatorCode, String columns, String columnTypes, boolean lastColumnTakesRestOfTheLine, boolean useJSONForLazy) { @@ -158,14 +158,14 @@ properties.setProperty(Constants.SERIALIZATION_USE_JSON_OBJECTS, "true"); } - return new tableDesc(serdeClass, TextInputFormat.class, + return new TableDesc(serdeClass, TextInputFormat.class, IgnoreKeyTextOutputFormat.class, properties); } /** - * Generate a table descriptor from a createTableDesc. + * Generate a table descriptor from a CreateTableDesc. */ - public static tableDesc getTableDesc(createTableDesc crtTblDesc, String cols, + public static TableDesc getTableDesc(CreateTableDesc crtTblDesc, String cols, String colTypes) { Class serdeClass = LazySimpleSerDe.class; @@ -173,7 +173,7 @@ String columns = cols; String columnTypes = colTypes; boolean lastColumnTakesRestOfTheLine = false; - tableDesc ret; + TableDesc ret; try { if (crtTblDesc.getSerName() != null) { @@ -232,8 +232,8 @@ * does not support a table with a single column "col" with type * "array". */ - public static tableDesc getDefaultTableDesc(String separatorCode) { - return new tableDesc(MetadataTypedColumnsetSerDe.class, + public static TableDesc getDefaultTableDesc(String separatorCode) { + return new TableDesc(MetadataTypedColumnsetSerDe.class, TextInputFormat.class, IgnoreKeyTextOutputFormat.class, Utilities .makeProperties( org.apache.hadoop.hive.serde.Constants.SERIALIZATION_FORMAT, @@ -243,9 +243,9 @@ /** * Generate the table descriptor for reduce key. */ - public static tableDesc getReduceKeyTableDesc(List fieldSchemas, + public static TableDesc getReduceKeyTableDesc(List fieldSchemas, String order) { - return new tableDesc(BinarySortableSerDe.class, + return new TableDesc(BinarySortableSerDe.class, SequenceFileInputFormat.class, SequenceFileOutputFormat.class, Utilities.makeProperties(Constants.LIST_COLUMNS, MetaStoreUtils .getColumnNamesFromFieldSchema(fieldSchemas), @@ -257,8 +257,8 @@ /** * Generate the table descriptor for Map-side join key. */ - public static tableDesc getMapJoinKeyTableDesc(List fieldSchemas) { - return new tableDesc(LazyBinarySerDe.class, SequenceFileInputFormat.class, + public static TableDesc getMapJoinKeyTableDesc(List fieldSchemas) { + return new TableDesc(LazyBinarySerDe.class, SequenceFileInputFormat.class, SequenceFileOutputFormat.class, Utilities.makeProperties("columns", MetaStoreUtils.getColumnNamesFromFieldSchema(fieldSchemas), "columns.types", MetaStoreUtils @@ -269,9 +269,9 @@ /** * Generate the table descriptor for Map-side join key. */ - public static tableDesc getMapJoinValueTableDesc( + public static TableDesc getMapJoinValueTableDesc( List fieldSchemas) { - return new tableDesc(LazyBinarySerDe.class, SequenceFileInputFormat.class, + return new TableDesc(LazyBinarySerDe.class, SequenceFileInputFormat.class, SequenceFileOutputFormat.class, Utilities.makeProperties("columns", MetaStoreUtils.getColumnNamesFromFieldSchema(fieldSchemas), "columns.types", MetaStoreUtils @@ -282,9 +282,9 @@ /** * Generate the table descriptor for intermediate files. */ - public static tableDesc getIntermediateFileTableDesc( + public static TableDesc getIntermediateFileTableDesc( List fieldSchemas) { - return new tableDesc(LazyBinarySerDe.class, SequenceFileInputFormat.class, + return new TableDesc(LazyBinarySerDe.class, SequenceFileInputFormat.class, SequenceFileOutputFormat.class, Utilities.makeProperties( Constants.LIST_COLUMNS, MetaStoreUtils .getColumnNamesFromFieldSchema(fieldSchemas), @@ -296,8 +296,8 @@ /** * Generate the table descriptor for intermediate files. */ - public static tableDesc getReduceValueTableDesc(List fieldSchemas) { - return new tableDesc(LazyBinarySerDe.class, SequenceFileInputFormat.class, + public static TableDesc getReduceValueTableDesc(List fieldSchemas) { + return new TableDesc(LazyBinarySerDe.class, SequenceFileInputFormat.class, SequenceFileOutputFormat.class, Utilities.makeProperties( Constants.LIST_COLUMNS, MetaStoreUtils .getColumnNamesFromFieldSchema(fieldSchemas), @@ -310,7 +310,7 @@ * Convert the ColumnList to FieldSchema list. */ public static List getFieldSchemasFromColumnList( - List cols, List outputColumnNames, int start, + List cols, List outputColumnNames, int start, String fieldPrefix) { List schemas = new ArrayList(cols.size()); for (int i = 0; i < cols.size(); i++) { @@ -324,7 +324,7 @@ * Convert the ColumnList to FieldSchema list. */ public static List getFieldSchemasFromColumnList( - List cols, String fieldPrefix) { + List cols, String fieldPrefix) { List schemas = new ArrayList(cols.size()); for (int i = 0; i < cols.size(); i++) { schemas.add(MetaStoreUtils.getFieldSchemaFromTypeInfo(fieldPrefix + i, @@ -391,14 +391,14 @@ * @param numReducers * The number of reducers, set to -1 for automatic inference based on * input data size. - * @return The reduceSinkDesc object. + * @return The ReduceSinkDesc object. */ - public static reduceSinkDesc getReduceSinkDesc( - ArrayList keyCols, ArrayList valueCols, + public static ReduceSinkDesc getReduceSinkDesc( + ArrayList keyCols, ArrayList valueCols, List outputColumnNames, boolean includeKeyCols, int tag, - ArrayList partitionCols, String order, int numReducers) { - tableDesc keyTable = null; - tableDesc valueTable = null; + ArrayList partitionCols, String order, int numReducers) { + TableDesc keyTable = null; + TableDesc valueTable = null; ArrayList outputKeyCols = new ArrayList(); ArrayList outputValCols = new ArrayList(); if (includeKeyCols) { @@ -419,7 +419,7 @@ valueCols, outputColumnNames, 0, "")); outputValCols.addAll(outputColumnNames); } - return new reduceSinkDesc(keyCols, valueCols, outputKeyCols, outputValCols, + return new ReduceSinkDesc(keyCols, valueCols, outputKeyCols, outputValCols, tag, partitionCols, numReducers, keyTable, // Revert to DynamicSerDe: // getBinaryTableDesc(getFieldSchemasFromColumnList(valueCols, @@ -444,24 +444,24 @@ * @param numReducers * The number of reducers, set to -1 for automatic inference based on * input data size. - * @return The reduceSinkDesc object. + * @return The ReduceSinkDesc object. */ - public static reduceSinkDesc getReduceSinkDesc( - ArrayList keyCols, ArrayList valueCols, + public static ReduceSinkDesc getReduceSinkDesc( + ArrayList keyCols, ArrayList valueCols, List outputColumnNames, boolean includeKey, int tag, int numPartitionFields, int numReducers) { - ArrayList partitionCols = null; + ArrayList partitionCols = null; if (numPartitionFields >= keyCols.size()) { partitionCols = keyCols; } else if (numPartitionFields >= 0) { - partitionCols = new ArrayList(numPartitionFields); + partitionCols = new ArrayList(numPartitionFields); for (int i = 0; i < numPartitionFields; i++) { partitionCols.add(keyCols.get(i)); } } else { // numPartitionFields = -1 means random partitioning - partitionCols = new ArrayList(1); + partitionCols = new ArrayList(1); partitionCols.add(TypeCheckProcFactory.DefaultExprProcessor .getFuncExprNodeDesc("rand")); } Index: ql/src/java/org/apache/hadoop/hive/ql/plan/MapredWork.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/plan/MapredWork.java (revision 0) +++ ql/src/java/org/apache/hadoop/hive/ql/plan/MapredWork.java (revision 0) @@ -0,0 +1,280 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hive.ql.plan; + +import java.io.ByteArrayOutputStream; +import java.io.Serializable; +import java.util.ArrayList; +import java.util.LinkedHashMap; +import java.util.List; +import java.util.Map; + +import org.apache.hadoop.hive.ql.exec.Operator; +import org.apache.hadoop.hive.ql.exec.Utilities; + +@Explain(displayName = "Map Reduce") +public class MapredWork implements Serializable { + private static final long serialVersionUID = 1L; + private String command; + // map side work + // use LinkedHashMap to make sure the iteration order is + // deterministic, to ease testing + private LinkedHashMap> pathToAliases; + + private LinkedHashMap pathToPartitionInfo; + + private LinkedHashMap> aliasToWork; + + private LinkedHashMap aliasToPartnInfo; + + // map<->reduce interface + // schema of the map-reduce 'key' object - this is homogeneous + private TableDesc keyDesc; + + // schema of the map-reduce 'val' object - this is heterogeneous + private List tagToValueDesc; + + private Operator reducer; + + private Integer numReduceTasks; + + private boolean needsTagging; + private MapredLocalWork mapLocalWork; + + public MapredWork() { + aliasToPartnInfo = new LinkedHashMap(); + } + + public MapredWork( + final String command, + final LinkedHashMap> pathToAliases, + final LinkedHashMap pathToPartitionInfo, + final LinkedHashMap> aliasToWork, + final TableDesc keyDesc, List tagToValueDesc, + final Operator reducer, final Integer numReduceTasks, + final MapredLocalWork mapLocalWork) { + this.command = command; + this.pathToAliases = pathToAliases; + this.pathToPartitionInfo = pathToPartitionInfo; + this.aliasToWork = aliasToWork; + this.keyDesc = keyDesc; + this.tagToValueDesc = tagToValueDesc; + this.reducer = reducer; + this.numReduceTasks = numReduceTasks; + this.mapLocalWork = mapLocalWork; + aliasToPartnInfo = new LinkedHashMap(); + } + + public String getCommand() { + return command; + } + + public void setCommand(final String command) { + this.command = command; + } + + @Explain(displayName = "Path -> Alias", normalExplain = false) + public LinkedHashMap> getPathToAliases() { + return pathToAliases; + } + + public void setPathToAliases( + final LinkedHashMap> pathToAliases) { + this.pathToAliases = pathToAliases; + } + + @Explain(displayName = "Path -> Partition", normalExplain = false) + public LinkedHashMap getPathToPartitionInfo() { + return pathToPartitionInfo; + } + + public void setPathToPartitionInfo( + final LinkedHashMap pathToPartitionInfo) { + this.pathToPartitionInfo = pathToPartitionInfo; + } + + /** + * @return the aliasToPartnInfo + */ + public LinkedHashMap getAliasToPartnInfo() { + return aliasToPartnInfo; + } + + /** + * @param aliasToPartnInfo + * the aliasToPartnInfo to set + */ + public void setAliasToPartnInfo( + LinkedHashMap aliasToPartnInfo) { + this.aliasToPartnInfo = aliasToPartnInfo; + } + + @Explain(displayName = "Alias -> Map Operator Tree") + public LinkedHashMap> getAliasToWork() { + return aliasToWork; + } + + public void setAliasToWork( + final LinkedHashMap> aliasToWork) { + this.aliasToWork = aliasToWork; + } + + /** + * @return the MapredLocalWork + */ + @Explain(displayName = "Local Work") + public MapredLocalWork getMapLocalWork() { + return mapLocalWork; + } + + /** + * @param mapLocalWork + * the MapredLocalWork to set + */ + public void setMapLocalWork(final MapredLocalWork mapLocalWork) { + this.mapLocalWork = mapLocalWork; + } + + public TableDesc getKeyDesc() { + return keyDesc; + } + + public void setKeyDesc(final TableDesc keyDesc) { + this.keyDesc = keyDesc; + } + + public List getTagToValueDesc() { + return tagToValueDesc; + } + + public void setTagToValueDesc(final List tagToValueDesc) { + this.tagToValueDesc = tagToValueDesc; + } + + @Explain(displayName = "Reduce Operator Tree") + public Operator getReducer() { + return reducer; + } + + public void setReducer(final Operator reducer) { + this.reducer = reducer; + } + + /** + * If the number of reducers is -1, the runtime will automatically figure it + * out by input data size. + * + * The number of reducers will be a positive number only in case the target + * table is bucketed into N buckets (through CREATE TABLE). This feature is + * not supported yet, so the number of reducers will always be -1 for now. + */ + public Integer getNumReduceTasks() { + return numReduceTasks; + } + + public void setNumReduceTasks(final Integer numReduceTasks) { + this.numReduceTasks = numReduceTasks; + } + + @SuppressWarnings("nls") + public void addMapWork(String path, String alias, Operator work, + PartitionDesc pd) { + ArrayList curAliases = pathToAliases.get(path); + if (curAliases == null) { + assert (pathToPartitionInfo.get(path) == null); + curAliases = new ArrayList(); + pathToAliases.put(path, curAliases); + pathToPartitionInfo.put(path, pd); + } else { + assert (pathToPartitionInfo.get(path) != null); + } + + for (String oneAlias : curAliases) { + if (oneAlias.equals(alias)) { + throw new RuntimeException("Multiple aliases named: " + alias + + " for path: " + path); + } + } + curAliases.add(alias); + + if (aliasToWork.get(alias) != null) { + throw new RuntimeException("Existing work for alias: " + alias); + } + aliasToWork.put(alias, work); + } + + @SuppressWarnings("nls") + public String isInvalid() { + if ((getNumReduceTasks() >= 1) && (getReducer() == null)) { + return "Reducers > 0 but no reduce operator"; + } + + if ((getNumReduceTasks() == 0) && (getReducer() != null)) { + return "Reducers == 0 but reduce operator specified"; + } + + return null; + } + + public String toXML() { + ByteArrayOutputStream baos = new ByteArrayOutputStream(); + Utilities.serializeMapRedWork(this, baos); + return (baos.toString()); + } + + // non bean + + /** + * For each map side operator - stores the alias the operator is working on + * behalf of in the operator runtime state. This is used by reducesink + * operator - but could be useful for debugging as well. + */ + private void setAliases() { + for (String oneAlias : aliasToWork.keySet()) { + aliasToWork.get(oneAlias).setAlias(oneAlias); + } + } + + /** + * Derive additional attributes to be rendered by EXPLAIN. + */ + public void deriveExplainAttributes() { + if (pathToPartitionInfo == null) { + return; + } + for (Map.Entry entry : pathToPartitionInfo + .entrySet()) { + entry.getValue().deriveBaseFileName(entry.getKey()); + } + } + + public void initialize() { + setAliases(); + } + + @Explain(displayName = "Needs Tagging", normalExplain = false) + public boolean getNeedsTagging() { + return needsTagging; + } + + public void setNeedsTagging(boolean needsTagging) { + this.needsTagging = needsTagging; + } + +} Index: ql/src/java/org/apache/hadoop/hive/ql/plan/SelectDesc.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/plan/SelectDesc.java (revision 0) +++ ql/src/java/org/apache/hadoop/hive/ql/plan/SelectDesc.java (revision 0) @@ -0,0 +1,113 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hive.ql.plan; + +import java.io.Serializable; +import java.util.ArrayList; + +@Explain(displayName = "Select Operator") +public class SelectDesc implements Serializable { + private static final long serialVersionUID = 1L; + private ArrayList colList; + private ArrayList outputColumnNames; + private boolean selectStar; + private boolean selStarNoCompute; + + public SelectDesc() { + } + + public SelectDesc(boolean selStarNoCompute) { + this.selStarNoCompute = selStarNoCompute; + } + + public SelectDesc(ArrayList colList, ArrayList outputColumnNames) { + this(colList, outputColumnNames, false); + } + + public SelectDesc(ArrayList colList, + ArrayList outputColumnNames, boolean selectStar) { + this.colList = colList; + this.selectStar = selectStar; + this.outputColumnNames = outputColumnNames; + } + + public SelectDesc(ArrayList colList, + boolean selectStar, boolean selStarNoCompute) { + this.colList = colList; + this.selectStar = selectStar; + this.selStarNoCompute = selStarNoCompute; + } + + @Explain(displayName = "expressions") + public ArrayList getColList() { + return colList; + } + + public void setColList(ArrayList colList) { + this.colList = colList; + } + + @Explain(displayName = "outputColumnNames") + public ArrayList getOutputColumnNames() { + return outputColumnNames; + } + + public void setOutputColumnNames(ArrayList outputColumnNames) { + this.outputColumnNames = outputColumnNames; + } + + @Explain(displayName = "SELECT * ") + public String explainNoCompute() { + if (isSelStarNoCompute()) { + return "(no compute)"; + } else { + return null; + } + } + + /** + * @return the selectStar + */ + public boolean isSelectStar() { + return selectStar; + } + + /** + * @param selectStar + * the selectStar to set + */ + public void setSelectStar(boolean selectStar) { + this.selectStar = selectStar; + } + + /** + * @return the selStarNoCompute + */ + public boolean isSelStarNoCompute() { + return selStarNoCompute; + } + + /** + * @param selStarNoCompute + * the selStarNoCompute to set + */ + public void setSelStarNoCompute(boolean selStarNoCompute) { + this.selStarNoCompute = selStarNoCompute; + } +} Index: ql/src/java/org/apache/hadoop/hive/ql/plan/LoadFileDesc.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/plan/LoadFileDesc.java (revision 0) +++ ql/src/java/org/apache/hadoop/hive/ql/plan/LoadFileDesc.java (revision 0) @@ -0,0 +1,91 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hive.ql.plan; + +import java.io.Serializable; + +public class LoadFileDesc extends LoadDesc implements Serializable { + private static final long serialVersionUID = 1L; + private String targetDir; + private boolean isDfsDir; + // list of columns, comma separated + private String columns; + private String columnTypes; + + public LoadFileDesc() { + } + + public LoadFileDesc(final String sourceDir, final String targetDir, + final boolean isDfsDir, final String columns, final String columnTypes) { + + super(sourceDir); + this.targetDir = targetDir; + this.isDfsDir = isDfsDir; + this.columns = columns; + this.columnTypes = columnTypes; + } + + @Explain(displayName = "destination") + public String getTargetDir() { + return targetDir; + } + + public void setTargetDir(final String targetDir) { + this.targetDir = targetDir; + } + + @Explain(displayName = "hdfs directory") + public boolean getIsDfsDir() { + return isDfsDir; + } + + public void setIsDfsDir(final boolean isDfsDir) { + this.isDfsDir = isDfsDir; + } + + /** + * @return the columns + */ + public String getColumns() { + return columns; + } + + /** + * @param columns + * the columns to set + */ + public void setColumns(String columns) { + this.columns = columns; + } + + /** + * @return the columnTypes + */ + public String getColumnTypes() { + return columnTypes; + } + + /** + * @param columnTypes + * the columnTypes to set + */ + public void setColumnTypes(String columnTypes) { + this.columnTypes = columnTypes; + } +} Index: ql/src/java/org/apache/hadoop/hive/ql/plan/LoadDesc.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/plan/LoadDesc.java (revision 0) +++ ql/src/java/org/apache/hadoop/hive/ql/plan/LoadDesc.java (revision 0) @@ -0,0 +1,43 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hive.ql.plan; + +import java.io.Serializable; + +public class LoadDesc implements Serializable { + private static final long serialVersionUID = 1L; + private String sourceDir; + + public LoadDesc() { + } + + public LoadDesc(final String sourceDir) { + + this.sourceDir = sourceDir; + } + + @Explain(displayName = "source", normalExplain = false) + public String getSourceDir() { + return sourceDir; + } + + public void setSourceDir(final String source) { + sourceDir = source; + } +} Index: ql/src/java/org/apache/hadoop/hive/ql/plan/AlterTableDesc.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/plan/AlterTableDesc.java (revision 0) +++ ql/src/java/org/apache/hadoop/hive/ql/plan/AlterTableDesc.java (revision 0) @@ -0,0 +1,420 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hive.ql.plan; + +import java.io.Serializable; +import java.util.List; +import java.util.Map; + +import org.apache.hadoop.hive.metastore.api.FieldSchema; +import org.apache.hadoop.hive.metastore.api.Order; +import org.apache.hadoop.hive.ql.exec.Utilities; + +@Explain(displayName = "Alter Table") +public class AlterTableDesc extends DDLDesc implements Serializable { + private static final long serialVersionUID = 1L; + + public static enum alterTableTypes { + RENAME, ADDCOLS, REPLACECOLS, ADDPROPS, ADDSERDE, ADDSERDEPROPS, ADDFILEFORMAT, ADDCLUSTERSORTCOLUMN, RENAMECOLUMN + }; + + alterTableTypes op; + String oldName; + String newName; + List newCols; + String serdeName; + Map props; + String inputFormat; + String outputFormat; + int numberBuckets; + List bucketColumns; + List sortColumns; + + String oldColName; + String newColName; + String newColType; + String newColComment; + boolean first; + String afterCol; + + /** + * @param tblName + * table name + * @param oldColName + * old column name + * @param newColName + * new column name + * @param newComment + * @param newType + */ + public AlterTableDesc(String tblName, String oldColName, String newColName, + String newType, String newComment, boolean first, String afterCol) { + super(); + oldName = tblName; + this.oldColName = oldColName; + this.newColName = newColName; + newColType = newType; + newColComment = newComment; + this.first = first; + this.afterCol = afterCol; + op = alterTableTypes.RENAMECOLUMN; + } + + /** + * @param oldName + * old name of the table + * @param newName + * new name of the table + */ + public AlterTableDesc(String oldName, String newName) { + op = alterTableTypes.RENAME; + this.oldName = oldName; + this.newName = newName; + } + + /** + * @param name + * name of the table + * @param newCols + * new columns to be added + */ + public AlterTableDesc(String name, List newCols, + alterTableTypes alterType) { + op = alterType; + oldName = name; + this.newCols = newCols; + } + + /** + * @param alterType + * type of alter op + */ + public AlterTableDesc(alterTableTypes alterType) { + op = alterType; + } + + /** + * + * @param name + * name of the table + * @param inputFormat + * new table input format + * @param outputFormat + * new table output format + */ + public AlterTableDesc(String name, String inputFormat, String outputFormat, + String serdeName) { + super(); + op = alterTableTypes.ADDFILEFORMAT; + oldName = name; + this.inputFormat = inputFormat; + this.outputFormat = outputFormat; + this.serdeName = serdeName; + } + + public AlterTableDesc(String tableName, int numBuckets, + List bucketCols, List sortCols) { + oldName = tableName; + op = alterTableTypes.ADDCLUSTERSORTCOLUMN; + numberBuckets = numBuckets; + bucketColumns = bucketCols; + sortColumns = sortCols; + } + + /** + * @return the old name of the table + */ + @Explain(displayName = "old name") + public String getOldName() { + return oldName; + } + + /** + * @param oldName + * the oldName to set + */ + public void setOldName(String oldName) { + this.oldName = oldName; + } + + /** + * @return the newName + */ + @Explain(displayName = "new name") + public String getNewName() { + return newName; + } + + /** + * @param newName + * the newName to set + */ + public void setNewName(String newName) { + this.newName = newName; + } + + /** + * @return the op + */ + public alterTableTypes getOp() { + return op; + } + + @Explain(displayName = "type") + public String getAlterTableTypeString() { + switch (op) { + case RENAME: + return "rename"; + case ADDCOLS: + return "add columns"; + case REPLACECOLS: + return "replace columns"; + } + + return "unknown"; + } + + /** + * @param op + * the op to set + */ + public void setOp(alterTableTypes op) { + this.op = op; + } + + /** + * @return the newCols + */ + public List getNewCols() { + return newCols; + } + + @Explain(displayName = "new columns") + public List getNewColsString() { + return Utilities.getFieldSchemaString(getNewCols()); + } + + /** + * @param newCols + * the newCols to set + */ + public void setNewCols(List newCols) { + this.newCols = newCols; + } + + /** + * @return the serdeName + */ + @Explain(displayName = "deserializer library") + public String getSerdeName() { + return serdeName; + } + + /** + * @param serdeName + * the serdeName to set + */ + public void setSerdeName(String serdeName) { + this.serdeName = serdeName; + } + + /** + * @return the props + */ + @Explain(displayName = "properties") + public Map getProps() { + return props; + } + + /** + * @param props + * the props to set + */ + public void setProps(Map props) { + this.props = props; + } + + /** + * @return the input format + */ + @Explain(displayName = "input format") + public String getInputFormat() { + return inputFormat; + } + + /** + * @param inputFormat + * the input format to set + */ + public void setInputFormat(String inputFormat) { + this.inputFormat = inputFormat; + } + + /** + * @return the output format + */ + @Explain(displayName = "output format") + public String getOutputFormat() { + return outputFormat; + } + + /** + * @param outputFormat + * the output format to set + */ + public void setOutputFormat(String outputFormat) { + this.outputFormat = outputFormat; + } + + /** + * @return the number of buckets + */ + public int getNumberBuckets() { + return numberBuckets; + } + + /** + * @param numberBuckets + * the number of buckets to set + */ + public void setNumberBuckets(int numberBuckets) { + this.numberBuckets = numberBuckets; + } + + /** + * @return the bucket columns + */ + public List getBucketColumns() { + return bucketColumns; + } + + /** + * @param bucketColumns + * the bucket columns to set + */ + public void setBucketColumns(List bucketColumns) { + this.bucketColumns = bucketColumns; + } + + /** + * @return the sort columns + */ + public List getSortColumns() { + return sortColumns; + } + + /** + * @param sortColumns + * the sort columns to set + */ + public void setSortColumns(List sortColumns) { + this.sortColumns = sortColumns; + } + + /** + * @return old column name + */ + public String getOldColName() { + return oldColName; + } + + /** + * @param oldColName + * the old column name + */ + public void setOldColName(String oldColName) { + this.oldColName = oldColName; + } + + /** + * @return new column name + */ + public String getNewColName() { + return newColName; + } + + /** + * @param newColName + * the new column name + */ + public void setNewColName(String newColName) { + this.newColName = newColName; + } + + /** + * @return new column type + */ + public String getNewColType() { + return newColType; + } + + /** + * @param newType + * new column's type + */ + public void setNewColType(String newType) { + newColType = newType; + } + + /** + * @return new column's comment + */ + public String getNewColComment() { + return newColComment; + } + + /** + * @param newComment + * new column's comment + */ + public void setNewColComment(String newComment) { + newColComment = newComment; + } + + /** + * @return if the column should be changed to position 0 + */ + public boolean getFirst() { + return first; + } + + /** + * @param first + * set the column to position 0 + */ + public void setFirst(boolean first) { + this.first = first; + } + + /** + * @return the column's after position + */ + public String getAfterCol() { + return afterCol; + } + + /** + * @param afterCol + * set the column's after position + */ + public void setAfterCol(String afterCol) { + this.afterCol = afterCol; + } + +} Index: ql/src/java/org/apache/hadoop/hive/ql/plan/CreateTableLikeDesc.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/plan/CreateTableLikeDesc.java (revision 0) +++ ql/src/java/org/apache/hadoop/hive/ql/plan/CreateTableLikeDesc.java (revision 0) @@ -0,0 +1,86 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hive.ql.plan; + +import java.io.Serializable; + +@Explain(displayName = "Create Table") +public class CreateTableLikeDesc extends DDLDesc implements Serializable { + private static final long serialVersionUID = 1L; + String tableName; + boolean isExternal; + String location; + boolean ifNotExists; + String likeTableName; + + public CreateTableLikeDesc(String tableName, boolean isExternal, + String location, boolean ifNotExists, String likeTableName) { + this.tableName = tableName; + this.isExternal = isExternal; + this.location = location; + this.ifNotExists = ifNotExists; + this.likeTableName = likeTableName; + } + + @Explain(displayName = "if not exists") + public boolean getIfNotExists() { + return ifNotExists; + } + + public void setIfNotExists(boolean ifNotExists) { + this.ifNotExists = ifNotExists; + } + + @Explain(displayName = "name") + public String getTableName() { + return tableName; + } + + public void setTableName(String tableName) { + this.tableName = tableName; + } + + @Explain(displayName = "location") + public String getLocation() { + return location; + } + + public void setLocation(String location) { + this.location = location; + } + + @Explain(displayName = "isExternal") + public boolean isExternal() { + return isExternal; + } + + public void setExternal(boolean isExternal) { + this.isExternal = isExternal; + } + + @Explain(displayName = "like") + public String getLikeTableName() { + return likeTableName; + } + + public void setLikeTableName(String likeTableName) { + this.likeTableName = likeTableName; + } + +} Index: ql/src/java/org/apache/hadoop/hive/ql/plan/DescFunctionDesc.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/plan/DescFunctionDesc.java (revision 0) +++ ql/src/java/org/apache/hadoop/hive/ql/plan/DescFunctionDesc.java (revision 0) @@ -0,0 +1,110 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hive.ql.plan; + +import java.io.Serializable; + +import org.apache.hadoop.fs.Path; + +@Explain(displayName = "Describe Function") +public class DescFunctionDesc extends DDLDesc implements Serializable { + private static final long serialVersionUID = 1L; + String name; + Path resFile; + boolean isExtended; + + public boolean isExtended() { + return isExtended; + } + + public void setExtended(boolean isExtended) { + this.isExtended = isExtended; + } + + /** + * table name for the result of show tables + */ + private final String table = "show"; + /** + * thrift ddl for the result of show tables + */ + private final String schema = "tab_name#string"; + + public String getTable() { + return table; + } + + public String getSchema() { + return schema; + } + + /** + * @param resFile + */ + public DescFunctionDesc(Path resFile) { + this.resFile = resFile; + name = null; + } + + /** + * @param name + * of the function to describe + */ + public DescFunctionDesc(Path resFile, String name, boolean isExtended) { + this.isExtended = isExtended; + this.resFile = resFile; + this.name = name; + } + + /** + * @return the name + */ + @Explain(displayName = "name") + public String getName() { + return name; + } + + /** + * @param name + * is the function name + */ + public void setName(String name) { + this.name = name; + } + + /** + * @return the resFile + */ + public Path getResFile() { + return resFile; + } + + @Explain(displayName = "result file", normalExplain = false) + public String getResFileString() { + return getResFile().getName(); + } + + /** + * @param resFile + * the resFile to set + */ + public void setResFile(Path resFile) { + this.resFile = resFile; + } +} Index: ql/src/java/org/apache/hadoop/hive/ql/plan/collectDesc.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/plan/collectDesc.java (revision 901960) +++ ql/src/java/org/apache/hadoop/hive/ql/plan/collectDesc.java (working copy) @@ -1,42 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.hive.ql.plan; - -import java.io.Serializable; - -@explain(displayName = "Collect") -public class collectDesc implements Serializable { - private static final long serialVersionUID = 1L; - Integer bufferSize; - - public collectDesc() { - } - - public collectDesc(final Integer bufferSize) { - this.bufferSize = bufferSize; - } - - public Integer getBufferSize() { - return bufferSize; - } - - public void setBufferSize(Integer bufferSize) { - this.bufferSize = bufferSize; - } -} Index: ql/src/java/org/apache/hadoop/hive/ql/plan/GroupByDesc.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/plan/GroupByDesc.java (revision 0) +++ ql/src/java/org/apache/hadoop/hive/ql/plan/GroupByDesc.java (revision 0) @@ -0,0 +1,149 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hive.ql.plan; + +@Explain(displayName = "Group By Operator") +public class GroupByDesc implements java.io.Serializable { + /** + * Group-by Mode: COMPLETE: complete 1-phase aggregation: iterate, terminate + * PARTIAL1: partial aggregation - first phase: iterate, terminatePartial + * PARTIAL2: partial aggregation - second phase: merge, terminatePartial + * PARTIALS: For non-distinct the same as PARTIAL2, for distinct the same as + * PARTIAL1 FINAL: partial aggregation - final phase: merge, terminate HASH: + * For non-distinct the same as PARTIAL1 but use hash-table-based aggregation + * MERGEPARTIAL: FINAL for non-distinct aggregations, COMPLETE for distinct + * aggregations + */ + private static final long serialVersionUID = 1L; + + public static enum Mode { + COMPLETE, PARTIAL1, PARTIAL2, PARTIALS, FINAL, HASH, MERGEPARTIAL + }; + + private Mode mode; + private boolean groupKeyNotReductionKey; + private boolean bucketGroup; + + private java.util.ArrayList keys; + private java.util.ArrayList aggregators; + private java.util.ArrayList outputColumnNames; + + public GroupByDesc() { + } + + public GroupByDesc( + final Mode mode, + final java.util.ArrayList outputColumnNames, + final java.util.ArrayList keys, + final java.util.ArrayList aggregators, + final boolean groupKeyNotReductionKey) { + this(mode, outputColumnNames, keys, aggregators, groupKeyNotReductionKey, + false); + } + + public GroupByDesc( + final Mode mode, + final java.util.ArrayList outputColumnNames, + final java.util.ArrayList keys, + final java.util.ArrayList aggregators, + final boolean groupKeyNotReductionKey, final boolean bucketGroup) { + this.mode = mode; + this.outputColumnNames = outputColumnNames; + this.keys = keys; + this.aggregators = aggregators; + this.groupKeyNotReductionKey = groupKeyNotReductionKey; + this.bucketGroup = bucketGroup; + } + + public Mode getMode() { + return mode; + } + + @Explain(displayName = "mode") + public String getModeString() { + switch (mode) { + case COMPLETE: + return "complete"; + case PARTIAL1: + return "partial1"; + case PARTIAL2: + return "partial2"; + case PARTIALS: + return "partials"; + case HASH: + return "hash"; + case FINAL: + return "final"; + case MERGEPARTIAL: + return "mergepartial"; + } + + return "unknown"; + } + + public void setMode(final Mode mode) { + this.mode = mode; + } + + @Explain(displayName = "keys") + public java.util.ArrayList getKeys() { + return keys; + } + + public void setKeys(final java.util.ArrayList keys) { + this.keys = keys; + } + + @Explain(displayName = "outputColumnNames") + public java.util.ArrayList getOutputColumnNames() { + return outputColumnNames; + } + + public void setOutputColumnNames( + java.util.ArrayList outputColumnNames) { + this.outputColumnNames = outputColumnNames; + } + + @Explain(displayName = "aggregations") + public java.util.ArrayList getAggregators() { + return aggregators; + } + + public void setAggregators( + final java.util.ArrayList aggregators) { + this.aggregators = aggregators; + } + + public boolean getGroupKeyNotReductionKey() { + return groupKeyNotReductionKey; + } + + public void setGroupKeyNotReductionKey(final boolean groupKeyNotReductionKey) { + this.groupKeyNotReductionKey = groupKeyNotReductionKey; + } + + @Explain(displayName = "bucketGroup") + public boolean getBucketGroup() { + return bucketGroup; + } + + public void setBucketGroup(boolean dataSorted) { + bucketGroup = dataSorted; + } +} Index: ql/src/java/org/apache/hadoop/hive/ql/plan/mapredLocalWork.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/plan/mapredLocalWork.java (revision 901960) +++ ql/src/java/org/apache/hadoop/hive/ql/plan/mapredLocalWork.java (working copy) @@ -1,69 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.hive.ql.plan; - -import java.io.Serializable; -import java.util.LinkedHashMap; - -import org.apache.hadoop.hive.ql.exec.Operator; - -@explain(displayName = "Map Reduce Local Work") -public class mapredLocalWork implements Serializable { - private static final long serialVersionUID = 1L; - - private LinkedHashMap> aliasToWork; - private LinkedHashMap aliasToFetchWork; - - public mapredLocalWork() { - } - - public mapredLocalWork( - final LinkedHashMap> aliasToWork, - final LinkedHashMap aliasToFetchWork) { - this.aliasToWork = aliasToWork; - this.aliasToFetchWork = aliasToFetchWork; - } - - @explain(displayName = "Alias -> Map Local Operator Tree") - public LinkedHashMap> getAliasToWork() { - return aliasToWork; - } - - public void setAliasToWork( - final LinkedHashMap> aliasToWork) { - this.aliasToWork = aliasToWork; - } - - /** - * @return the aliasToFetchWork - */ - @explain(displayName = "Alias -> Map Local Tables") - public LinkedHashMap getAliasToFetchWork() { - return aliasToFetchWork; - } - - /** - * @param aliasToFetchWork - * the aliasToFetchWork to set - */ - public void setAliasToFetchWork( - final LinkedHashMap aliasToFetchWork) { - this.aliasToFetchWork = aliasToFetchWork; - } -} Index: ql/src/java/org/apache/hadoop/hive/ql/plan/tableDesc.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/plan/tableDesc.java (revision 901960) +++ ql/src/java/org/apache/hadoop/hive/ql/plan/tableDesc.java (working copy) @@ -1,148 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.hive.ql.plan; - -import java.io.Serializable; -import java.util.Enumeration; -import java.util.Properties; - -import org.apache.hadoop.hive.ql.io.HiveFileFormatUtils; -import org.apache.hadoop.hive.ql.io.HiveOutputFormat; -import org.apache.hadoop.hive.serde2.Deserializer; -import org.apache.hadoop.mapred.InputFormat; - -public class tableDesc implements Serializable, Cloneable { - private static final long serialVersionUID = 1L; - private Class deserializerClass; - private Class inputFileFormatClass; - private Class outputFileFormatClass; - private java.util.Properties properties; - private String serdeClassName; - - public tableDesc() { - } - - public tableDesc(final Class serdeClass, - final Class inputFileFormatClass, - final Class class1, final java.util.Properties properties) { - deserializerClass = serdeClass; - this.inputFileFormatClass = inputFileFormatClass; - outputFileFormatClass = HiveFileFormatUtils - .getOutputFormatSubstitute(class1); - this.properties = properties; - serdeClassName = properties - .getProperty(org.apache.hadoop.hive.serde.Constants.SERIALIZATION_LIB); - ; - } - - public Class getDeserializerClass() { - return deserializerClass; - } - - public void setDeserializerClass( - final Class serdeClass) { - deserializerClass = serdeClass; - } - - public Class getInputFileFormatClass() { - return inputFileFormatClass; - } - - /** - * Return a deserializer object corresponding to the tableDesc - */ - public Deserializer getDeserializer() throws Exception { - Deserializer de = deserializerClass.newInstance(); - de.initialize(null, properties); - return de; - } - - public void setInputFileFormatClass( - final Class inputFileFormatClass) { - this.inputFileFormatClass = inputFileFormatClass; - } - - public Class getOutputFileFormatClass() { - return outputFileFormatClass; - } - - public void setOutputFileFormatClass(final Class outputFileFormatClass) { - this.outputFileFormatClass = HiveFileFormatUtils - .getOutputFormatSubstitute(outputFileFormatClass); - } - - @explain(displayName = "properties", normalExplain = false) - public java.util.Properties getProperties() { - return properties; - } - - public void setProperties(final java.util.Properties properties) { - this.properties = properties; - } - - /** - * @return the serdeClassName - */ - @explain(displayName = "serde") - public String getSerdeClassName() { - return serdeClassName; - } - - /** - * @param serdeClassName - * the serde Class Name to set - */ - public void setSerdeClassName(String serdeClassName) { - this.serdeClassName = serdeClassName; - } - - @explain(displayName = "name") - public String getTableName() { - return properties - .getProperty(org.apache.hadoop.hive.metastore.api.Constants.META_TABLE_NAME); - } - - @explain(displayName = "input format") - public String getInputFileFormatClassName() { - return getInputFileFormatClass().getName(); - } - - @explain(displayName = "output format") - public String getOutputFileFormatClassName() { - return getOutputFileFormatClass().getName(); - } - - @Override - public Object clone() { - tableDesc ret = new tableDesc(); - ret.setSerdeClassName(serdeClassName); - ret.setDeserializerClass(deserializerClass); - ret.setInputFileFormatClass(inputFileFormatClass); - ret.setOutputFileFormatClass(outputFileFormatClass); - Properties newProp = new Properties(); - Enumeration keysProp = properties.keys(); - while (keysProp.hasMoreElements()) { - Object key = keysProp.nextElement(); - newProp.put(key, properties.get(key)); - } - - ret.setProperties(newProp); - return ret; - } -} Index: ql/src/java/org/apache/hadoop/hive/ql/plan/exprNodeFieldDesc.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/plan/exprNodeFieldDesc.java (revision 901960) +++ ql/src/java/org/apache/hadoop/hive/ql/plan/exprNodeFieldDesc.java (working copy) @@ -1,121 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.hive.ql.plan; - -import java.io.Serializable; -import java.util.ArrayList; -import java.util.List; - -import org.apache.hadoop.hive.ql.exec.Utilities; -import org.apache.hadoop.hive.serde2.typeinfo.TypeInfo; - -public class exprNodeFieldDesc extends exprNodeDesc implements Serializable { - private static final long serialVersionUID = 1L; - exprNodeDesc desc; - String fieldName; - - // Used to support a.b where a is a list of struct that contains a field - // called b. - // a.b will return an array that contains field b of all elements of array a. - Boolean isList; - - public exprNodeFieldDesc() { - } - - public exprNodeFieldDesc(TypeInfo typeInfo, exprNodeDesc desc, - String fieldName, Boolean isList) { - super(typeInfo); - this.desc = desc; - this.fieldName = fieldName; - this.isList = isList; - } - - @Override - public List getChildren() { - List children = new ArrayList(2); - children.add(desc); - return children; - } - - public exprNodeDesc getDesc() { - return desc; - } - - public void setDesc(exprNodeDesc desc) { - this.desc = desc; - } - - public String getFieldName() { - return fieldName; - } - - public void setFieldName(String fieldName) { - this.fieldName = fieldName; - } - - public Boolean getIsList() { - return isList; - } - - public void setIsList(Boolean isList) { - this.isList = isList; - } - - @Override - public String toString() { - return desc.toString() + "." + fieldName; - } - - @explain(displayName = "expr") - @Override - public String getExprString() { - return desc.getExprString() + "." + fieldName; - } - - @Override - public List getCols() { - List colList = new ArrayList(); - if (desc != null) { - colList = Utilities.mergeUniqElems(colList, desc.getCols()); - } - return colList; - } - - @Override - public exprNodeDesc clone() { - return new exprNodeFieldDesc(typeInfo, desc, fieldName, isList); - } - - @Override - public boolean isSame(Object o) { - if (!(o instanceof exprNodeFieldDesc)) { - return false; - } - exprNodeFieldDesc dest = (exprNodeFieldDesc) o; - if (!typeInfo.equals(dest.getTypeInfo())) { - return false; - } - if (!fieldName.equals(dest.getFieldName()) - || !isList.equals(dest.getIsList()) || !desc.isSame(dest.getDesc())) { - return false; - } - - return true; - } -} Index: ql/src/java/org/apache/hadoop/hive/ql/plan/TableScanDesc.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/plan/TableScanDesc.java (revision 0) +++ ql/src/java/org/apache/hadoop/hive/ql/plan/TableScanDesc.java (revision 0) @@ -0,0 +1,50 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hive.ql.plan; + +import java.io.Serializable; + +/** + * Table Scan Descriptor Currently, data is only read from a base source as part + * of map-reduce framework. So, nothing is stored in the descriptor. But, more + * things will be added here as table scan is invoked as part of local work. + **/ +@Explain(displayName = "TableScan") +public class TableScanDesc implements Serializable { + private static final long serialVersionUID = 1L; + + private String alias; + + @SuppressWarnings("nls") + public TableScanDesc() { + } + + public TableScanDesc(final String alias) { + this.alias = alias; + } + + @Explain(displayName = "alias") + public String getAlias() { + return alias; + } + + public void setAlias(String alias) { + this.alias = alias; + } +} Index: ql/src/java/org/apache/hadoop/hive/ql/plan/showTableStatusDesc.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/plan/showTableStatusDesc.java (revision 901960) +++ ql/src/java/org/apache/hadoop/hive/ql/plan/showTableStatusDesc.java (working copy) @@ -1,145 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.hive.ql.plan; - -import java.io.Serializable; -import java.util.HashMap; - -import org.apache.hadoop.fs.Path; - -@explain(displayName = "Show Table Status") -public class showTableStatusDesc extends ddlDesc implements Serializable { - private static final long serialVersionUID = 1L; - String pattern; - Path resFile; - String dbName; - HashMap partSpec; - - /** - * table name for the result of show tables - */ - private final String table = "show_tablestatus"; - /** - * thrift ddl for the result of show tables - */ - private final String schema = "tab_name#string"; - - public String getTable() { - return table; - } - - public String getSchema() { - return schema; - } - - /** - * @param pattern - * names of tables to show - */ - public showTableStatusDesc(Path resFile, String dbName, String pattern) { - this.dbName = dbName; - this.resFile = resFile; - this.pattern = pattern; - } - - /** - * @param resFile - * @param dbName - * data base name - * @param pattern - * names of tables to show - * @param part - * partition specification - */ - public showTableStatusDesc(Path resFile, String dbName, String pattern, - HashMap partSpec) { - this.dbName = dbName; - this.resFile = resFile; - this.pattern = pattern; - this.partSpec = partSpec; - } - - /** - * @return the pattern - */ - @explain(displayName = "pattern") - public String getPattern() { - return pattern; - } - - /** - * @param pattern - * the pattern to set - */ - public void setPattern(String pattern) { - this.pattern = pattern; - } - - /** - * @return the resFile - */ - public Path getResFile() { - return resFile; - } - - @explain(displayName = "result file", normalExplain = false) - public String getResFileString() { - return getResFile().getName(); - } - - /** - * @param resFile - * the resFile to set - */ - public void setResFile(Path resFile) { - this.resFile = resFile; - } - - /** - * @return the database name - */ - @explain(displayName = "database") - public String getDbName() { - return dbName; - } - - /** - * @param dbName - * the database name - */ - public void setDbName(String dbName) { - this.dbName = dbName; - } - - /** - * @return the partSpec - */ - @explain(displayName = "partition") - public HashMap getPartSpec() { - return partSpec; - } - - /** - * @param partSpec - * the partSpec to set - */ - public void setPartSpecs(HashMap partSpec) { - this.partSpec = partSpec; - } -} Index: ql/src/java/org/apache/hadoop/hive/ql/plan/MapJoinDesc.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/plan/MapJoinDesc.java (revision 0) +++ ql/src/java/org/apache/hadoop/hive/ql/plan/MapJoinDesc.java (revision 0) @@ -0,0 +1,144 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hive.ql.plan; + +import java.io.Serializable; +import java.util.ArrayList; +import java.util.HashMap; +import java.util.Iterator; +import java.util.List; +import java.util.Map; +import java.util.Set; +import java.util.Map.Entry; + +/** + * Map Join operator Descriptor implementation. + * + */ +@Explain(displayName = "Common Join Operator") +public class MapJoinDesc extends JoinDesc implements Serializable { + private static final long serialVersionUID = 1L; + + private Map> keys; + private TableDesc keyTblDesc; + private List valueTblDescs; + + private int posBigTable; + + private Map> retainList; + + public MapJoinDesc() { + } + + public MapJoinDesc(final Map> keys, + final TableDesc keyTblDesc, final Map> values, + final List valueTblDescs, ArrayList outputColumnNames, + final int posBigTable, final JoinCondDesc[] conds) { + super(values, outputColumnNames, conds); + this.keys = keys; + this.keyTblDesc = keyTblDesc; + this.valueTblDescs = valueTblDescs; + this.posBigTable = posBigTable; + initRetainExprList(); + } + + private void initRetainExprList() { + retainList = new HashMap>(); + Set>> set = super.getExprs().entrySet(); + Iterator>> setIter = set.iterator(); + while (setIter.hasNext()) { + Entry> current = setIter.next(); + List list = new ArrayList(); + for (int i = 0; i < current.getValue().size(); i++) { + list.add(i); + } + retainList.put(current.getKey(), list); + } + } + + public Map> getRetainList() { + return retainList; + } + + public void setRetainList(Map> retainList) { + this.retainList = retainList; + } + + /** + * @return the keys + */ + @Explain(displayName = "keys") + public Map> getKeys() { + return keys; + } + + /** + * @param keys + * the keys to set + */ + public void setKeys(Map> keys) { + this.keys = keys; + } + + /** + * @return the position of the big table not in memory + */ + @Explain(displayName = "Position of Big Table") + public int getPosBigTable() { + return posBigTable; + } + + /** + * @param posBigTable + * the position of the big table not in memory + */ + public void setPosBigTable(int posBigTable) { + this.posBigTable = posBigTable; + } + + /** + * @return the keyTblDesc + */ + public TableDesc getKeyTblDesc() { + return keyTblDesc; + } + + /** + * @param keyTblDesc + * the keyTblDesc to set + */ + public void setKeyTblDesc(TableDesc keyTblDesc) { + this.keyTblDesc = keyTblDesc; + } + + /** + * @return the valueTblDescs + */ + public List getValueTblDescs() { + return valueTblDescs; + } + + /** + * @param valueTblDescs + * the valueTblDescs to set + */ + public void setValueTblDescs(List valueTblDescs) { + this.valueTblDescs = valueTblDescs; + } +} Index: ql/src/java/org/apache/hadoop/hive/ql/plan/CreateViewDesc.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/plan/CreateViewDesc.java (revision 0) +++ ql/src/java/org/apache/hadoop/hive/ql/plan/CreateViewDesc.java (revision 0) @@ -0,0 +1,103 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hive.ql.plan; + +import java.io.Serializable; +import java.util.List; + +import org.apache.hadoop.hive.metastore.api.FieldSchema; +import org.apache.hadoop.hive.ql.exec.Utilities; + +@Explain(displayName = "Create View") +public class CreateViewDesc implements Serializable { + private static final long serialVersionUID = 1L; + + private String viewName; + private String originalText; + private String expandedText; + private List schema; + private String comment; + private boolean ifNotExists; + + public CreateViewDesc(String viewName, List schema, + String comment, boolean ifNotExists) { + this.viewName = viewName; + this.schema = schema; + this.comment = comment; + this.ifNotExists = ifNotExists; + } + + @Explain(displayName = "name") + public String getViewName() { + return viewName; + } + + public void setViewName(String viewName) { + this.viewName = viewName; + } + + @Explain(displayName = "original text") + public String getViewOriginalText() { + return originalText; + } + + public void setViewOriginalText(String originalText) { + this.originalText = originalText; + } + + @Explain(displayName = "expanded text") + public String getViewExpandedText() { + return expandedText; + } + + public void setViewExpandedText(String expandedText) { + this.expandedText = expandedText; + } + + @Explain(displayName = "columns") + public List getSchemaString() { + return Utilities.getFieldSchemaString(schema); + } + + public List getSchema() { + return schema; + } + + public void setSchema(List schema) { + this.schema = schema; + } + + @Explain(displayName = "comment") + public String getComment() { + return comment; + } + + public void setComment(String comment) { + this.comment = comment; + } + + @Explain(displayName = "if not exists") + public boolean getIfNotExists() { + return ifNotExists; + } + + public void setIfNotExists(boolean ifNotExists) { + this.ifNotExists = ifNotExists; + } +} Index: ql/src/java/org/apache/hadoop/hive/ql/plan/exprNodeConstantDesc.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/plan/exprNodeConstantDesc.java (revision 901960) +++ ql/src/java/org/apache/hadoop/hive/ql/plan/exprNodeConstantDesc.java (working copy) @@ -1,94 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.hive.ql.plan; - -import java.io.Serializable; - -import org.apache.hadoop.hive.serde.Constants; -import org.apache.hadoop.hive.serde2.typeinfo.TypeInfo; -import org.apache.hadoop.hive.serde2.typeinfo.TypeInfoFactory; - -/** - * A constant expression. - */ -public class exprNodeConstantDesc extends exprNodeDesc implements Serializable { - private static final long serialVersionUID = 1L; - private Object value; - - public exprNodeConstantDesc() { - } - - public exprNodeConstantDesc(TypeInfo typeInfo, Object value) { - super(typeInfo); - this.value = value; - } - - public exprNodeConstantDesc(Object value) { - this(TypeInfoFactory - .getPrimitiveTypeInfoFromJavaPrimitive(value.getClass()), value); - } - - public void setValue(Object value) { - this.value = value; - } - - public Object getValue() { - return value; - } - - @Override - public String toString() { - return "Const " + typeInfo.toString() + " " + value; - } - - @explain(displayName = "expr") - @Override - public String getExprString() { - if (value == null) { - return "null"; - } - - if (typeInfo.getTypeName().equals(Constants.STRING_TYPE_NAME)) { - return "'" + value.toString() + "'"; - } else { - return value.toString(); - } - } - - @Override - public exprNodeDesc clone() { - return new exprNodeConstantDesc(typeInfo, value); - } - - @Override - public boolean isSame(Object o) { - if (!(o instanceof exprNodeConstantDesc)) { - return false; - } - exprNodeConstantDesc dest = (exprNodeConstantDesc) o; - if (!typeInfo.equals(dest.getTypeInfo())) { - return false; - } - if (!value.equals(dest.getValue())) { - return false; - } - - return true; - } -} Index: ql/src/java/org/apache/hadoop/hive/ql/plan/lateralViewJoinDesc.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/plan/lateralViewJoinDesc.java (revision 901960) +++ ql/src/java/org/apache/hadoop/hive/ql/plan/lateralViewJoinDesc.java (working copy) @@ -1,45 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.hive.ql.plan; - -import java.io.Serializable; -import java.util.ArrayList; - -@explain(displayName = "Lateral View Join Operator") -public class lateralViewJoinDesc implements Serializable { - private static final long serialVersionUID = 1L; - - private ArrayList outputInternalColNames; - - public lateralViewJoinDesc() { - } - - public lateralViewJoinDesc(ArrayList outputInternalColNames) { - this.outputInternalColNames = outputInternalColNames; - } - - public void setOutputInternalColNames(ArrayList outputInternalColNames) { - this.outputInternalColNames = outputInternalColNames; - } - - @explain(displayName = "outputColumnNames") - public ArrayList getOutputInternalColNames() { - return outputInternalColNames; - } -} Index: ql/src/java/org/apache/hadoop/hive/ql/lib/GraphWalker.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/lib/GraphWalker.java (revision 901960) +++ ql/src/java/org/apache/hadoop/hive/ql/lib/GraphWalker.java (working copy) @@ -41,4 +41,4 @@ public void startWalking(Collection startNodes, HashMap nodeOutput) throws SemanticException; -} \ No newline at end of file +} Index: ql/src/java/org/apache/hadoop/hive/ql/io/CombineHiveInputFormat.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/io/CombineHiveInputFormat.java (revision 901960) +++ ql/src/java/org/apache/hadoop/hive/ql/io/CombineHiveInputFormat.java (working copy) @@ -32,7 +32,7 @@ import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.PathFilter; import org.apache.hadoop.hive.ql.exec.Utilities; -import org.apache.hadoop.hive.ql.plan.partitionDesc; +import org.apache.hadoop.hive.ql.plan.PartitionDesc; import org.apache.hadoop.hive.shims.ShimLoader; import org.apache.hadoop.hive.shims.HadoopShims.CombineFileInputFormatShim; import org.apache.hadoop.hive.shims.HadoopShims.InputSplitShim; @@ -80,14 +80,14 @@ throws IOException { this.inputSplitShim = inputSplitShim; if (job != null) { - Map pathToPartitionInfo = Utilities + Map pathToPartitionInfo = Utilities .getMapRedWork(job).getPathToPartitionInfo(); // extract all the inputFormatClass names for each chunk in the // CombinedSplit. Path[] ipaths = inputSplitShim.getPaths(); for (int i = 0; i < ipaths.length; i++) { - partitionDesc part = null; + PartitionDesc part = null; try { part = getPartitionDescFromPath(pathToPartitionInfo, ipaths[i] .getParent()); @@ -209,12 +209,12 @@ inputSplitShim.write(out); if (inputFormatClassName == null) { - Map pathToPartitionInfo = Utilities + Map pathToPartitionInfo = Utilities .getMapRedWork(getJob()).getPathToPartitionInfo(); // extract all the inputFormatClass names for each chunk in the // CombinedSplit. - partitionDesc part = null; + PartitionDesc part = null; try { part = getPartitionDescFromPath(pathToPartitionInfo, inputSplitShim .getPath(0).getParent()); @@ -298,12 +298,12 @@ CombineHiveRecordReader.class); } - protected static partitionDesc getPartitionDescFromPath( - Map pathToPartitionInfo, Path dir) + protected static PartitionDesc getPartitionDescFromPath( + Map pathToPartitionInfo, Path dir) throws IOException { // The format of the keys in pathToPartitionInfo sometimes contains a port // and sometimes doesn't, so we just compare paths. - for (Map.Entry entry : pathToPartitionInfo + for (Map.Entry entry : pathToPartitionInfo .entrySet()) { try { if (new URI(entry.getKey()).getPath().equals(dir.toUri().getPath())) { Index: ql/src/java/org/apache/hadoop/hive/ql/io/HiveInputFormat.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/io/HiveInputFormat.java (revision 901960) +++ ql/src/java/org/apache/hadoop/hive/ql/io/HiveInputFormat.java (working copy) @@ -36,8 +36,8 @@ import org.apache.hadoop.hive.ql.exec.Operator; import org.apache.hadoop.hive.ql.exec.TableScanOperator; import org.apache.hadoop.hive.ql.exec.Utilities; -import org.apache.hadoop.hive.ql.plan.mapredWork; -import org.apache.hadoop.hive.ql.plan.partitionDesc; +import org.apache.hadoop.hive.ql.plan.MapredWork; +import org.apache.hadoop.hive.ql.plan.PartitionDesc; import org.apache.hadoop.hive.serde2.ColumnProjectionUtils; import org.apache.hadoop.hive.shims.ShimLoader; import org.apache.hadoop.io.Writable; @@ -191,7 +191,7 @@ inputFormats.put(inputFormatClass, newInstance); } catch (Exception e) { throw new IOException("Cannot create an instance of InputFormat class " - + inputFormatClass.getName() + " as specified in mapredWork!"); + + inputFormatClass.getName() + " as specified in MapredWork!"); } } return inputFormats.get(inputFormatClass); @@ -223,8 +223,8 @@ cloneJobConf, reporter)); } - private Map pathToPartitionInfo; - mapredWork mrwork = null; + private Map pathToPartitionInfo; + MapredWork mrwork = null; protected void init(JobConf job) { mrwork = Utilities.getMapRedWork(job); @@ -244,7 +244,7 @@ // for each dir, get the InputFormat, and do getSplits. for (Path dir : dirs) { - partitionDesc part = getPartitionDescFromPath(pathToPartitionInfo, dir); + PartitionDesc part = getPartitionDescFromPath(pathToPartitionInfo, dir); // create a new InputFormat instance if this is the first time to see this // class Class inputFormatClass = part.getInputFileFormatClass(); @@ -272,7 +272,7 @@ // for each dir, get the InputFormat, and do validateInput. for (Path dir : dirs) { - partitionDesc part = getPartitionDescFromPath(pathToPartitionInfo, dir); + PartitionDesc part = getPartitionDescFromPath(pathToPartitionInfo, dir); // create a new InputFormat instance if this is the first time to see this // class InputFormat inputFormat = getInputFormatFromCache(part @@ -284,10 +284,10 @@ } } - protected static partitionDesc getPartitionDescFromPath( - Map pathToPartitionInfo, Path dir) + protected static PartitionDesc getPartitionDescFromPath( + Map pathToPartitionInfo, Path dir) throws IOException { - partitionDesc partDesc = pathToPartitionInfo.get(dir.toString()); + PartitionDesc partDesc = pathToPartitionInfo.get(dir.toString()); if (partDesc == null) { partDesc = pathToPartitionInfo.get(dir.toUri().getPath()); } Index: ql/src/java/org/apache/hadoop/hive/ql/io/HiveFileFormatUtils.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/io/HiveFileFormatUtils.java (revision 901960) +++ ql/src/java/org/apache/hadoop/hive/ql/io/HiveFileFormatUtils.java (working copy) @@ -32,8 +32,8 @@ import org.apache.hadoop.hive.ql.exec.Utilities; import org.apache.hadoop.hive.ql.exec.FileSinkOperator.RecordWriter; import org.apache.hadoop.hive.ql.metadata.HiveException; -import org.apache.hadoop.hive.ql.plan.fileSinkDesc; -import org.apache.hadoop.hive.ql.plan.tableDesc; +import org.apache.hadoop.hive.ql.plan.FileSinkDesc; +import org.apache.hadoop.hive.ql.plan.TableDesc; import org.apache.hadoop.io.Writable; import org.apache.hadoop.io.SequenceFile.CompressionType; import org.apache.hadoop.io.compress.CompressionCodec; @@ -200,8 +200,8 @@ } public static RecordWriter getHiveRecordWriter(JobConf jc, - tableDesc tableInfo, Class outputClass, - fileSinkDesc conf, Path outPath) throws HiveException { + TableDesc tableInfo, Class outputClass, + FileSinkDesc conf, Path outPath) throws HiveException { try { HiveOutputFormat hiveOutputFormat = tableInfo .getOutputFileFormatClass().newInstance(); Index: ql/src/java/org/apache/hadoop/hive/ql/io/RCFileOutputFormat.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/io/RCFileOutputFormat.java (revision 901960) +++ ql/src/java/org/apache/hadoop/hive/ql/io/RCFileOutputFormat.java (working copy) @@ -145,4 +145,4 @@ } }; } -} \ No newline at end of file +} Index: ql/src/java/org/apache/hadoop/hive/ql/parse/joinType.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/parse/joinType.java (revision 901960) +++ ql/src/java/org/apache/hadoop/hive/ql/parse/joinType.java (working copy) @@ -1,23 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.hive.ql.parse; - -public enum joinType { - INNER, LEFTOUTER, RIGHTOUTER, FULLOUTER, UNIQUE, LEFTSEMI -}; Index: ql/src/java/org/apache/hadoop/hive/ql/parse/QBJoinTree.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/parse/QBJoinTree.java (revision 901960) +++ ql/src/java/org/apache/hadoop/hive/ql/parse/QBJoinTree.java (working copy) @@ -35,7 +35,7 @@ private QBJoinTree joinSrc; private String[] baseSrc; private int nextTag; - private joinCond[] joinCond; + private JoinCond[] joinConds; private boolean noOuterJoin; private boolean noSemiJoin; @@ -133,12 +133,12 @@ return "$INTNAME"; } - public joinCond[] getJoinCond() { - return joinCond; + public JoinCond[] getJoinCond() { + return joinConds; } - public void setJoinCond(joinCond[] joinCond) { - this.joinCond = joinCond; + public void setJoinCond(JoinCond[] joinConds) { + this.joinConds = joinConds; } public boolean getNoOuterJoin() { Index: ql/src/java/org/apache/hadoop/hive/ql/parse/joinCond.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/parse/joinCond.java (revision 901960) +++ ql/src/java/org/apache/hadoop/hive/ql/parse/joinCond.java (working copy) @@ -1,83 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package org.apache.hadoop.hive.ql.parse; - - -/** - * Join conditions Descriptor implementation. - * - */ -public class joinCond { - private int left; - private int right; - private joinType joinType; - private boolean preserved; - - public joinCond() { - } - - public joinCond(int left, int right, joinType joinType) { - this.left = left; - this.right = right; - this.joinType = joinType; - } - - /** - * Constructor for a UNIQUEJOIN cond - * - * @param p - * true if table is preserved, false otherwise - */ - public joinCond(boolean p) { - joinType = org.apache.hadoop.hive.ql.parse.joinType.UNIQUE; - preserved = p; - } - - /** - * @return the true if table is preserved, false otherwise - */ - public boolean getPreserved() { - return preserved; - } - - public int getLeft() { - return left; - } - - public void setLeft(final int left) { - this.left = left; - } - - public int getRight() { - return right; - } - - public void setRight(final int right) { - this.right = right; - } - - public joinType getJoinType() { - return joinType; - } - - public void setJoinType(final joinType joinType) { - this.joinType = joinType; - } - -} Index: ql/src/java/org/apache/hadoop/hive/ql/parse/LoadSemanticAnalyzer.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/parse/LoadSemanticAnalyzer.java (revision 901960) +++ ql/src/java/org/apache/hadoop/hive/ql/parse/LoadSemanticAnalyzer.java (working copy) @@ -35,9 +35,9 @@ import org.apache.hadoop.hive.ql.exec.Task; import org.apache.hadoop.hive.ql.exec.TaskFactory; import org.apache.hadoop.hive.ql.exec.Utilities; -import org.apache.hadoop.hive.ql.plan.copyWork; -import org.apache.hadoop.hive.ql.plan.loadTableDesc; -import org.apache.hadoop.hive.ql.plan.moveWork; +import org.apache.hadoop.hive.ql.plan.CopyWork; +import org.apache.hadoop.hive.ql.plan.LoadTableDesc; +import org.apache.hadoop.hive.ql.plan.MoveWork; public class LoadSemanticAnalyzer extends BaseSemanticAnalyzer { @@ -210,7 +210,7 @@ // that's just a test case. String copyURIStr = ctx.getExternalTmpFileURI(toURI); URI copyURI = URI.create(copyURIStr); - rTask = TaskFactory.get(new copyWork(fromURI.toString(), copyURIStr), + rTask = TaskFactory.get(new CopyWork(fromURI.toString(), copyURIStr), conf); fromURI = copyURI; } @@ -218,16 +218,16 @@ // create final load/move work String loadTmpPath = ctx.getExternalTmpFileURI(toURI); - loadTableDesc loadTableWork = new loadTableDesc(fromURI.toString(), + LoadTableDesc loadTableWork = new LoadTableDesc(fromURI.toString(), loadTmpPath, Utilities.getTableDesc(ts.tableHandle), (ts.partSpec != null) ? ts.partSpec : new HashMap(), isOverWrite); if (rTask != null) { - rTask.addDependentTask(TaskFactory.get(new moveWork(getInputs(), + rTask.addDependentTask(TaskFactory.get(new MoveWork(getInputs(), getOutputs(), loadTableWork, null, true), conf)); } else { - rTask = TaskFactory.get(new moveWork(getInputs(), getOutputs(), + rTask = TaskFactory.get(new MoveWork(getInputs(), getOutputs(), loadTableWork, null, true), conf); } Index: ql/src/java/org/apache/hadoop/hive/ql/parse/FunctionSemanticAnalyzer.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/parse/FunctionSemanticAnalyzer.java (revision 901960) +++ ql/src/java/org/apache/hadoop/hive/ql/parse/FunctionSemanticAnalyzer.java (working copy) @@ -23,8 +23,8 @@ import org.apache.hadoop.hive.conf.HiveConf; import org.apache.hadoop.hive.ql.exec.TaskFactory; import org.apache.hadoop.hive.ql.plan.FunctionWork; -import org.apache.hadoop.hive.ql.plan.createFunctionDesc; -import org.apache.hadoop.hive.ql.plan.dropFunctionDesc; +import org.apache.hadoop.hive.ql.plan.CreateFunctionDesc; +import org.apache.hadoop.hive.ql.plan.DropFunctionDesc; public class FunctionSemanticAnalyzer extends BaseSemanticAnalyzer { private static final Log LOG = LogFactory @@ -49,13 +49,13 @@ private void analyzeCreateFunction(ASTNode ast) throws SemanticException { String functionName = ast.getChild(0).getText(); String className = unescapeSQLString(ast.getChild(1).getText()); - createFunctionDesc desc = new createFunctionDesc(functionName, className); + CreateFunctionDesc desc = new CreateFunctionDesc(functionName, className); rootTasks.add(TaskFactory.get(new FunctionWork(desc), conf)); } private void analyzeDropFunction(ASTNode ast) throws SemanticException { String functionName = ast.getChild(0).getText(); - dropFunctionDesc desc = new dropFunctionDesc(functionName); + DropFunctionDesc desc = new DropFunctionDesc(functionName); rootTasks.add(TaskFactory.get(new FunctionWork(desc), conf)); } } Index: ql/src/java/org/apache/hadoop/hive/ql/parse/TypeCheckProcFactory.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/parse/TypeCheckProcFactory.java (revision 901960) +++ ql/src/java/org/apache/hadoop/hive/ql/parse/TypeCheckProcFactory.java (working copy) @@ -36,12 +36,12 @@ import org.apache.hadoop.hive.ql.lib.Node; import org.apache.hadoop.hive.ql.lib.NodeProcessor; import org.apache.hadoop.hive.ql.lib.NodeProcessorCtx; -import org.apache.hadoop.hive.ql.plan.exprNodeColumnDesc; -import org.apache.hadoop.hive.ql.plan.exprNodeConstantDesc; -import org.apache.hadoop.hive.ql.plan.exprNodeDesc; -import org.apache.hadoop.hive.ql.plan.exprNodeFieldDesc; -import org.apache.hadoop.hive.ql.plan.exprNodeGenericFuncDesc; -import org.apache.hadoop.hive.ql.plan.exprNodeNullDesc; +import org.apache.hadoop.hive.ql.plan.ExprNodeColumnDesc; +import org.apache.hadoop.hive.ql.plan.ExprNodeConstantDesc; +import org.apache.hadoop.hive.ql.plan.ExprNodeDesc; +import org.apache.hadoop.hive.ql.plan.ExprNodeFieldDesc; +import org.apache.hadoop.hive.ql.plan.ExprNodeGenericFuncDesc; +import org.apache.hadoop.hive.ql.plan.ExprNodeNullDesc; import org.apache.hadoop.hive.ql.udf.generic.GenericUDF; import org.apache.hadoop.hive.serde.Constants; import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspector; @@ -77,25 +77,25 @@ * @param procCtx * The processor context. * - * @return exprNodeColumnDesc. + * @return ExprNodeColumnDesc. */ - public static exprNodeDesc processGByExpr(Node nd, Object procCtx) + public static ExprNodeDesc processGByExpr(Node nd, Object procCtx) throws SemanticException { - // We recursively create the exprNodeDesc. Base cases: when we encounter - // a column ref, we convert that into an exprNodeColumnDesc; when we + // We recursively create the ExprNodeDesc. Base cases: when we encounter + // a column ref, we convert that into an ExprNodeColumnDesc; when we // encounter - // a constant, we convert that into an exprNodeConstantDesc. For others we + // a constant, we convert that into an ExprNodeConstantDesc. For others we // just // build the exprNodeFuncDesc with recursively built children. ASTNode expr = (ASTNode) nd; TypeCheckCtx ctx = (TypeCheckCtx) procCtx; RowResolver input = ctx.getInputRR(); - exprNodeDesc desc = null; + ExprNodeDesc desc = null; // If the current subExpression is pre-calculated, as in Group-By etc. ColumnInfo colInfo = input.get("", expr.toStringTree()); if (colInfo != null) { - desc = new exprNodeColumnDesc(colInfo.getType(), colInfo + desc = new ExprNodeColumnDesc(colInfo.getType(), colInfo .getInternalName(), colInfo.getTabAlias(), colInfo .getIsPartitionCol()); return desc; @@ -117,12 +117,12 @@ return null; } - exprNodeDesc desc = TypeCheckProcFactory.processGByExpr(nd, procCtx); + ExprNodeDesc desc = TypeCheckProcFactory.processGByExpr(nd, procCtx); if (desc != null) { return desc; } - return new exprNodeNullDesc(); + return new ExprNodeNullDesc(); } } @@ -150,7 +150,7 @@ return null; } - exprNodeDesc desc = TypeCheckProcFactory.processGByExpr(nd, procCtx); + ExprNodeDesc desc = TypeCheckProcFactory.processGByExpr(nd, procCtx); if (desc != null) { return desc; } @@ -171,7 +171,7 @@ throw new SemanticException(ErrorMsg.INVALID_NUMERICAL_CONSTANT .getMsg(expr)); } - return new exprNodeConstantDesc(v); + return new ExprNodeConstantDesc(v); } } @@ -199,7 +199,7 @@ return null; } - exprNodeDesc desc = TypeCheckProcFactory.processGByExpr(nd, procCtx); + ExprNodeDesc desc = TypeCheckProcFactory.processGByExpr(nd, procCtx); if (desc != null) { return desc; } @@ -221,7 +221,7 @@ str = BaseSemanticAnalyzer.unescapeIdentifier(expr.getText()); break; } - return new exprNodeConstantDesc(TypeInfoFactory.stringTypeInfo, str); + return new ExprNodeConstantDesc(TypeInfoFactory.stringTypeInfo, str); } } @@ -249,7 +249,7 @@ return null; } - exprNodeDesc desc = TypeCheckProcFactory.processGByExpr(nd, procCtx); + ExprNodeDesc desc = TypeCheckProcFactory.processGByExpr(nd, procCtx); if (desc != null) { return desc; } @@ -267,7 +267,7 @@ default: assert false; } - return new exprNodeConstantDesc(TypeInfoFactory.booleanTypeInfo, bool); + return new ExprNodeConstantDesc(TypeInfoFactory.booleanTypeInfo, bool); } } @@ -295,7 +295,7 @@ return null; } - exprNodeDesc desc = TypeCheckProcFactory.processGByExpr(nd, procCtx); + ExprNodeDesc desc = TypeCheckProcFactory.processGByExpr(nd, procCtx); if (desc != null) { return desc; } @@ -340,7 +340,7 @@ } } else { // It's a column. - return new exprNodeColumnDesc(colInfo.getType(), colInfo + return new ExprNodeColumnDesc(colInfo.getType(), colInfo .getInternalName(), colInfo.getTabAlias(), colInfo .getIsPartitionCol()); } @@ -394,7 +394,7 @@ } public static boolean isRedundantConversionFunction(ASTNode expr, - boolean isFunction, ArrayList children) { + boolean isFunction, ArrayList children) { if (!isFunction) { return false; } @@ -446,16 +446,16 @@ } /** - * Get the exprNodeDesc + * Get the ExprNodeDesc * * @param name * @param children * @return The expression node descriptor * @throws UDFArgumentException */ - public static exprNodeDesc getFuncExprNodeDesc(String name, - exprNodeDesc... children) { - ArrayList c = new ArrayList(Arrays + public static ExprNodeDesc getFuncExprNodeDesc(String name, + ExprNodeDesc... children) { + ArrayList c = new ArrayList(Arrays .asList(children)); try { return getFuncExprNodeDesc(name, c); @@ -471,8 +471,8 @@ * * @throws UDFArgumentException */ - public static exprNodeDesc getFuncExprNodeDesc(String udfName, - List children) throws UDFArgumentException { + public static ExprNodeDesc getFuncExprNodeDesc(String udfName, + List children) throws UDFArgumentException { FunctionInfo fi = FunctionRegistry.getFunctionInfo(udfName); if (fi == null) { @@ -485,11 +485,11 @@ + " is an aggregation function."); } - return exprNodeGenericFuncDesc.newInstance(genericUDF, children); + return ExprNodeGenericFuncDesc.newInstance(genericUDF, children); } - static exprNodeDesc getXpathOrFuncExprNodeDesc(ASTNode expr, - boolean isFunction, ArrayList children, TypeCheckCtx ctx) + static ExprNodeDesc getXpathOrFuncExprNodeDesc(ASTNode expr, + boolean isFunction, ArrayList children, TypeCheckCtx ctx) throws SemanticException, UDFArgumentException { // return the child directly if the conversion is redundant. if (isRedundantConversionFunction(expr, isFunction, children)) { @@ -498,14 +498,14 @@ return children.get(0); } String funcText = getFunctionText(expr, isFunction); - exprNodeDesc desc; + ExprNodeDesc desc; if (funcText.equals(".")) { // "." : FIELD Expression assert (children.size() == 2); // Only allow constant field name for now - assert (children.get(1) instanceof exprNodeConstantDesc); - exprNodeDesc object = children.get(0); - exprNodeConstantDesc fieldName = (exprNodeConstantDesc) children.get(1); + assert (children.get(1) instanceof ExprNodeConstantDesc); + ExprNodeDesc object = children.get(0); + ExprNodeConstantDesc fieldName = (ExprNodeConstantDesc) children.get(1); assert (fieldName.getValue() instanceof String); // Calculate result TypeInfo @@ -527,7 +527,7 @@ t = TypeInfoFactory.getListTypeInfo(t); } - desc = new exprNodeFieldDesc(t, children.get(0), fieldNameString, + desc = new ExprNodeFieldDesc(t, children.get(0), fieldNameString, isList); } else if (funcText.equals("[")) { @@ -539,8 +539,8 @@ if (myt.getCategory() == Category.LIST) { // Only allow constant integer index for now - if (!(children.get(1) instanceof exprNodeConstantDesc) - || !(((exprNodeConstantDesc) children.get(1)).getTypeInfo() + if (!(children.get(1) instanceof ExprNodeConstantDesc) + || !(((ExprNodeConstantDesc) children.get(1)).getTypeInfo() .equals(TypeInfoFactory.intTypeInfo))) { throw new SemanticException(ErrorMsg.INVALID_ARRAYINDEX_CONSTANT .getMsg(expr)); @@ -548,22 +548,22 @@ // Calculate TypeInfo TypeInfo t = ((ListTypeInfo) myt).getListElementTypeInfo(); - desc = new exprNodeGenericFuncDesc(t, FunctionRegistry + desc = new ExprNodeGenericFuncDesc(t, FunctionRegistry .getGenericUDFForIndex(), children); } else if (myt.getCategory() == Category.MAP) { // Only allow only constant indexes for now - if (!(children.get(1) instanceof exprNodeConstantDesc)) { + if (!(children.get(1) instanceof ExprNodeConstantDesc)) { throw new SemanticException(ErrorMsg.INVALID_MAPINDEX_CONSTANT .getMsg(expr)); } - if (!(((exprNodeConstantDesc) children.get(1)).getTypeInfo() + if (!(((ExprNodeConstantDesc) children.get(1)).getTypeInfo() .equals(((MapTypeInfo) myt).getMapKeyTypeInfo()))) { throw new SemanticException(ErrorMsg.INVALID_MAPINDEX_TYPE .getMsg(expr)); } // Calculate TypeInfo TypeInfo t = ((MapTypeInfo) myt).getMapValueTypeInfo(); - desc = new exprNodeGenericFuncDesc(t, FunctionRegistry + desc = new ExprNodeGenericFuncDesc(t, FunctionRegistry .getGenericUDFForIndex(), children); } else { throw new SemanticException(ErrorMsg.NON_COLLECTION_TYPE.getMsg(expr, @@ -635,7 +635,7 @@ TypeCheckCtx ctx = (TypeCheckCtx) procCtx; // If this is a GroupBy expression, clear error and continue - exprNodeDesc desc = TypeCheckProcFactory.processGByExpr(nd, procCtx); + ExprNodeDesc desc = TypeCheckProcFactory.processGByExpr(nd, procCtx); if (desc != null) { ctx.setError(null); return desc; @@ -659,13 +659,13 @@ // NOTE: tableAlias must be a valid non-ambiguous table alias, // because we've checked that in TOK_TABLE_OR_COL's process method. ColumnInfo colInfo = input.get(tableAlias, - ((exprNodeConstantDesc) nodeOutputs[1]).getValue().toString()); + ((ExprNodeConstantDesc) nodeOutputs[1]).getValue().toString()); if (colInfo == null) { ctx.setError(ErrorMsg.INVALID_COLUMN.getMsg(expr.getChild(1))); return null; } - return new exprNodeColumnDesc(colInfo.getType(), colInfo + return new ExprNodeColumnDesc(colInfo.getType(), colInfo .getInternalName(), colInfo.getTabAlias(), colInfo .getIsPartitionCol()); } @@ -682,11 +682,11 @@ // Create all children int childrenBegin = (isFunction ? 1 : 0); - ArrayList children = new ArrayList(expr + ArrayList children = new ArrayList(expr .getChildCount() - childrenBegin); for (int ci = childrenBegin; ci < expr.getChildCount(); ci++) { - children.add((exprNodeDesc) nodeOutputs[ci]); + children.add((ExprNodeDesc) nodeOutputs[ci]); } // If any of the children contains null, then return a null Index: ql/src/java/org/apache/hadoop/hive/ql/parse/JoinType.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/parse/JoinType.java (revision 0) +++ ql/src/java/org/apache/hadoop/hive/ql/parse/JoinType.java (revision 0) @@ -0,0 +1,23 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hive.ql.parse; + +public enum JoinType { + INNER, LEFTOUTER, RIGHTOUTER, FULLOUTER, UNIQUE, LEFTSEMI +}; Index: ql/src/java/org/apache/hadoop/hive/ql/parse/QB.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/parse/QB.java (revision 901960) +++ ql/src/java/org/apache/hadoop/hive/ql/parse/QB.java (working copy) @@ -23,7 +23,7 @@ import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; -import org.apache.hadoop.hive.ql.plan.createTableDesc; +import org.apache.hadoop.hive.ql.plan.CreateTableDesc; /** * Implementation of the query block @@ -45,7 +45,7 @@ private QBJoinTree qbjoin; private String id; private boolean isQuery; - private createTableDesc tblDesc = null; // table descriptor of the final + private CreateTableDesc tblDesc = null; // table descriptor of the final // results public void print(String msg) { @@ -172,11 +172,11 @@ return qbp.isSelectStarQuery() && aliasToSubq.isEmpty() && !isCTAS(); } - public createTableDesc getTableDesc() { + public CreateTableDesc getTableDesc() { return tblDesc; } - public void setTableDesc(createTableDesc desc) { + public void setTableDesc(CreateTableDesc desc) { tblDesc = desc; } Index: ql/src/java/org/apache/hadoop/hive/ql/parse/JoinCond.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/parse/JoinCond.java (revision 0) +++ ql/src/java/org/apache/hadoop/hive/ql/parse/JoinCond.java (revision 0) @@ -0,0 +1,82 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.hadoop.hive.ql.parse; + +/** + * Join conditions Descriptor implementation. + * + */ +public class JoinCond { + private int left; + private int right; + private JoinType joinType; + private boolean preserved; + + public JoinCond() { + } + + public JoinCond(int left, int right, JoinType joinType) { + this.left = left; + this.right = right; + this.joinType = joinType; + } + + /** + * Constructor for a UNIQUEJOIN cond + * + * @param p + * true if table is preserved, false otherwise + */ + public JoinCond(boolean p) { + joinType = JoinType.UNIQUE; + preserved = p; + } + + /** + * @return the true if table is preserved, false otherwise + */ + public boolean getPreserved() { + return preserved; + } + + public int getLeft() { + return left; + } + + public void setLeft(final int left) { + this.left = left; + } + + public int getRight() { + return right; + } + + public void setRight(final int right) { + this.right = right; + } + + public JoinType getJoinType() { + return joinType; + } + + public void setJoinType(final JoinType joinType) { + this.joinType = joinType; + } + +} Index: ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java (revision 901960) +++ ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java (working copy) @@ -98,37 +98,37 @@ import org.apache.hadoop.hive.ql.optimizer.unionproc.UnionProcContext; import org.apache.hadoop.hive.ql.plan.DDLWork; import org.apache.hadoop.hive.ql.plan.PlanUtils; -import org.apache.hadoop.hive.ql.plan.aggregationDesc; -import org.apache.hadoop.hive.ql.plan.createTableDesc; -import org.apache.hadoop.hive.ql.plan.createTableLikeDesc; -import org.apache.hadoop.hive.ql.plan.createViewDesc; -import org.apache.hadoop.hive.ql.plan.exprNodeColumnDesc; -import org.apache.hadoop.hive.ql.plan.exprNodeConstantDesc; -import org.apache.hadoop.hive.ql.plan.exprNodeDesc; -import org.apache.hadoop.hive.ql.plan.exprNodeGenericFuncDesc; -import org.apache.hadoop.hive.ql.plan.exprNodeNullDesc; -import org.apache.hadoop.hive.ql.plan.extractDesc; -import org.apache.hadoop.hive.ql.plan.fetchWork; -import org.apache.hadoop.hive.ql.plan.fileSinkDesc; -import org.apache.hadoop.hive.ql.plan.filterDesc; -import org.apache.hadoop.hive.ql.plan.forwardDesc; -import org.apache.hadoop.hive.ql.plan.groupByDesc; -import org.apache.hadoop.hive.ql.plan.joinDesc; -import org.apache.hadoop.hive.ql.plan.lateralViewJoinDesc; -import org.apache.hadoop.hive.ql.plan.limitDesc; -import org.apache.hadoop.hive.ql.plan.loadFileDesc; -import org.apache.hadoop.hive.ql.plan.loadTableDesc; -import org.apache.hadoop.hive.ql.plan.mapredWork; -import org.apache.hadoop.hive.ql.plan.moveWork; -import org.apache.hadoop.hive.ql.plan.partitionDesc; -import org.apache.hadoop.hive.ql.plan.reduceSinkDesc; -import org.apache.hadoop.hive.ql.plan.scriptDesc; -import org.apache.hadoop.hive.ql.plan.selectDesc; -import org.apache.hadoop.hive.ql.plan.tableDesc; -import org.apache.hadoop.hive.ql.plan.tableScanDesc; -import org.apache.hadoop.hive.ql.plan.udtfDesc; -import org.apache.hadoop.hive.ql.plan.unionDesc; -import org.apache.hadoop.hive.ql.plan.filterDesc.sampleDesc; +import org.apache.hadoop.hive.ql.plan.AggregationDesc; +import org.apache.hadoop.hive.ql.plan.CreateTableDesc; +import org.apache.hadoop.hive.ql.plan.CreateTableLikeDesc; +import org.apache.hadoop.hive.ql.plan.CreateViewDesc; +import org.apache.hadoop.hive.ql.plan.ExprNodeColumnDesc; +import org.apache.hadoop.hive.ql.plan.ExprNodeConstantDesc; +import org.apache.hadoop.hive.ql.plan.ExprNodeDesc; +import org.apache.hadoop.hive.ql.plan.ExprNodeGenericFuncDesc; +import org.apache.hadoop.hive.ql.plan.ExprNodeNullDesc; +import org.apache.hadoop.hive.ql.plan.ExtractDesc; +import org.apache.hadoop.hive.ql.plan.FetchWork; +import org.apache.hadoop.hive.ql.plan.FileSinkDesc; +import org.apache.hadoop.hive.ql.plan.FilterDesc; +import org.apache.hadoop.hive.ql.plan.ForwardDesc; +import org.apache.hadoop.hive.ql.plan.GroupByDesc; +import org.apache.hadoop.hive.ql.plan.JoinDesc; +import org.apache.hadoop.hive.ql.plan.LateralViewJoinDesc; +import org.apache.hadoop.hive.ql.plan.LimitDesc; +import org.apache.hadoop.hive.ql.plan.LoadFileDesc; +import org.apache.hadoop.hive.ql.plan.LoadTableDesc; +import org.apache.hadoop.hive.ql.plan.MapredWork; +import org.apache.hadoop.hive.ql.plan.MoveWork; +import org.apache.hadoop.hive.ql.plan.PartitionDesc; +import org.apache.hadoop.hive.ql.plan.ReduceSinkDesc; +import org.apache.hadoop.hive.ql.plan.ScriptDesc; +import org.apache.hadoop.hive.ql.plan.SelectDesc; +import org.apache.hadoop.hive.ql.plan.TableDesc; +import org.apache.hadoop.hive.ql.plan.TableScanDesc; +import org.apache.hadoop.hive.ql.plan.UDTFDesc; +import org.apache.hadoop.hive.ql.plan.UnionDesc; +import org.apache.hadoop.hive.ql.plan.FilterDesc.sampleDesc; import org.apache.hadoop.hive.ql.session.SessionState; import org.apache.hadoop.hive.ql.udf.generic.GenericUDAFEvaluator; import org.apache.hadoop.hive.ql.udf.generic.GenericUDFHash; @@ -155,12 +155,12 @@ */ public class SemanticAnalyzer extends BaseSemanticAnalyzer { - private HashMap opToPartPruner; + private HashMap opToPartPruner; private HashMap> topOps; private HashMap> topSelOps; private LinkedHashMap, OpParseContext> opParseCtx; - private List loadTableWork; - private List loadFileWork; + private List loadTableWork; + private List loadFileWork; private Map joinContext; private final HashMap topToTable; private QB qb; @@ -171,7 +171,7 @@ private HashMap opToSamplePruner; Map> groupOpToInputTables; Map prunedPartitions; - private createViewDesc createVwDesc; + private CreateViewDesc createVwDesc; private ASTNode viewSelect; private final UnparseTranslator unparseTranslator; @@ -184,12 +184,12 @@ super(conf); - opToPartPruner = new HashMap(); + opToPartPruner = new HashMap(); opToSamplePruner = new HashMap(); topOps = new HashMap>(); topSelOps = new HashMap>(); - loadTableWork = new ArrayList(); - loadFileWork = new ArrayList(); + loadTableWork = new ArrayList(); + loadFileWork = new ArrayList(); opParseCtx = new LinkedHashMap, OpParseContext>(); joinContext = new HashMap(); topToTable = new HashMap(); @@ -1148,7 +1148,7 @@ OpParseContext inputCtx = opParseCtx.get(input); RowResolver inputRR = inputCtx.getRR(); Operator output = putOpInsertMap(OperatorFactory.getAndMakeChild( - new filterDesc(genExprNodeDesc(condn, inputRR), false), new RowSchema( + new FilterDesc(genExprNodeDesc(condn, inputRR), false), new RowSchema( inputRR.getColumnInfos()), input), inputRR); LOG.debug("Created Filter Plan for " + qb.getId() + " row schema: " @@ -1158,7 +1158,7 @@ @SuppressWarnings("nls") private Integer genColListRegex(String colRegex, String tabAlias, - String alias, ASTNode sel, ArrayList col_list, + String alias, ASTNode sel, ArrayList col_list, RowResolver input, Integer pos, RowResolver output) throws SemanticException { @@ -1195,7 +1195,7 @@ continue; } - exprNodeColumnDesc expr = new exprNodeColumnDesc(colInfo.getType(), name, + ExprNodeColumnDesc expr = new ExprNodeColumnDesc(colInfo.getType(), name, colInfo.getTabAlias(), colInfo.getIsPartitionCol()); col_list.add(expr); output.put(tmp[0], tmp[1], @@ -1259,7 +1259,7 @@ return cmd; } - private tableDesc getTableDescFromSerDe(ASTNode child, String cols, + private TableDesc getTableDescFromSerDe(ASTNode child, String cols, String colTypes, boolean defaultCols) throws SemanticException { if (child.getType() == HiveParser.TOK_SERDENAME) { String serdeName = unescapeSQLString(child.getChild(0).getText()); @@ -1272,7 +1272,7 @@ throw new SemanticException(e); } - tableDesc tblDesc = PlanUtils.getTableDesc(serdeClass, Integer + TableDesc tblDesc = PlanUtils.getTableDesc(serdeClass, Integer .toString(Utilities.tabCode), cols, colTypes, defaultCols, true); // copy all the properties if (child.getChildCount() == 2) { @@ -1287,7 +1287,7 @@ } return tblDesc; } else if (child.getType() == HiveParser.TOK_SERDEPROPS) { - tableDesc tblDesc = PlanUtils.getDefaultTableDesc(Integer + TableDesc tblDesc = PlanUtils.getDefaultTableDesc(Integer .toString(Utilities.ctrlaCode), cols, colTypes, defaultCols); int numChildRowFormat = child.getChildCount(); for (int numC = 0; numC < numChildRowFormat; numC++) { @@ -1443,8 +1443,8 @@ inpColumnTypes.append(inputSchema.get(i).getType().getTypeName()); } - tableDesc outInfo; - tableDesc inInfo; + TableDesc outInfo; + TableDesc inInfo; String defaultSerdeName = conf.getVar(HiveConf.ConfVars.HIVESCRIPTSERDE); Class serde; @@ -1487,7 +1487,7 @@ .getChild(inputRecordWriterNum)); Operator output = putOpInsertMap(OperatorFactory.getAndMakeChild( - new scriptDesc( + new ScriptDesc( getFixedCmd(stripQuotes(trfm.getChild(execPos).getText())), inInfo, inRecordWriter, outInfo, outRecordReader), new RowSchema(out_rwsch .getColumnInfos()), input), out_rwsch); @@ -1639,7 +1639,7 @@ Operator input) throws SemanticException { LOG.debug("tree: " + selExprList.toStringTree()); - ArrayList col_list = new ArrayList(); + ArrayList col_list = new ArrayList(); RowResolver out_rwsch = new RowResolver(); ASTNode trfm = null; String alias = qb.getParseInfo().getAlias(); @@ -1793,7 +1793,7 @@ .toLowerCase()), alias, expr, col_list, inputRR, pos, out_rwsch); } else { // Case when this is an expression - exprNodeDesc exp = genExprNodeDesc(expr, inputRR); + ExprNodeDesc exp = genExprNodeDesc(expr, inputRR); col_list.add(exp); if (!StringUtils.isEmpty(alias) && (out_rwsch.get(null, colAlias) != null)) { @@ -1809,11 +1809,11 @@ selectStar = selectStar && exprList.getChildCount() == posn + 1; ArrayList columnNames = new ArrayList(); - Map colExprMap = new HashMap(); + Map colExprMap = new HashMap(); for (int i = 0; i < col_list.size(); i++) { // Replace NULL with CAST(NULL AS STRING) - if (col_list.get(i) instanceof exprNodeNullDesc) { - col_list.set(i, new exprNodeConstantDesc( + if (col_list.get(i) instanceof ExprNodeNullDesc) { + col_list.set(i, new ExprNodeConstantDesc( TypeInfoFactory.stringTypeInfo, null)); } String outputCol = getColumnInternalName(i); @@ -1822,7 +1822,7 @@ } Operator output = putOpInsertMap(OperatorFactory.getAndMakeChild( - new selectDesc(col_list, columnNames, selectStar), new RowSchema( + new SelectDesc(col_list, columnNames, selectStar), new RowSchema( out_rwsch.getColumnInfos()), input), out_rwsch); output.setColumnExprMap(colExprMap); @@ -1842,24 +1842,24 @@ * Class to store GenericUDAF related information. */ static class GenericUDAFInfo { - ArrayList convertedParameters; + ArrayList convertedParameters; GenericUDAFEvaluator genericUDAFEvaluator; TypeInfo returnType; } /** - * Convert exprNodeDesc array to Typeinfo array. + * Convert ExprNodeDesc array to Typeinfo array. */ - static ArrayList getTypeInfo(ArrayList exprs) { + static ArrayList getTypeInfo(ArrayList exprs) { ArrayList result = new ArrayList(); - for (exprNodeDesc expr : exprs) { + for (ExprNodeDesc expr : exprs) { result.add(expr.getTypeInfo()); } return result; } /** - * Convert exprNodeDesc array to Typeinfo array. + * Convert ExprNodeDesc array to Typeinfo array. */ static ObjectInspector[] getStandardObjectInspector(ArrayList exprs) { ObjectInspector[] result = new ObjectInspector[exprs.size()]; @@ -1875,7 +1875,7 @@ * for each GroupBy aggregation. */ static GenericUDAFEvaluator getGenericUDAFEvaluator(String aggName, - ArrayList aggParameters, ASTNode aggTree) + ArrayList aggParameters, ASTNode aggTree) throws SemanticException { ArrayList originalParameterTypeInfos = getTypeInfo(aggParameters); GenericUDAFEvaluator result = FunctionRegistry.getGenericUDAFEvaluator( @@ -1895,7 +1895,7 @@ * @param aggName * The name of the UDAF. * @param aggParameters - * The exprNodeDesc of the original parameters + * The ExprNodeDesc of the original parameters * @param aggTree * The ASTNode node of the UDAF in the query. * @return GenericUDAFInfo @@ -1903,7 +1903,7 @@ * when the UDAF is not found or has problems. */ static GenericUDAFInfo getGenericUDAFInfo(GenericUDAFEvaluator evaluator, - GenericUDAFEvaluator.Mode emode, ArrayList aggParameters) + GenericUDAFEvaluator.Mode emode, ArrayList aggParameters) throws SemanticException { GenericUDAFInfo r = new GenericUDAFInfo(); @@ -1927,8 +1927,8 @@ return r; } - private static GenericUDAFEvaluator.Mode groupByDescModeToUDAFMode( - groupByDesc.Mode mode, boolean isDistinct) { + private static GenericUDAFEvaluator.Mode GroupByDescModeToUDAFMode( + GroupByDesc.Mode mode, boolean isDistinct) { switch (mode) { case COMPLETE: return GenericUDAFEvaluator.Mode.COMPLETE; @@ -1947,7 +1947,7 @@ return isDistinct ? GenericUDAFEvaluator.Mode.COMPLETE : GenericUDAFEvaluator.Mode.FINAL; default: - throw new RuntimeException("internal error in groupByDescModeToUDAFMode"); + throw new RuntimeException("internal error in GroupByDescModeToUDAFMode"); } } @@ -1965,17 +1965,17 @@ */ @SuppressWarnings("nls") private Operator genGroupByPlanGroupByOperator(QBParseInfo parseInfo, - String dest, Operator reduceSinkOperatorInfo, groupByDesc.Mode mode, + String dest, Operator reduceSinkOperatorInfo, GroupByDesc.Mode mode, Map genericUDAFEvaluators) throws SemanticException { RowResolver groupByInputRowResolver = opParseCtx .get(reduceSinkOperatorInfo).getRR(); RowResolver groupByOutputRowResolver = new RowResolver(); groupByOutputRowResolver.setIsExprResolver(true); - ArrayList groupByKeys = new ArrayList(); - ArrayList aggregations = new ArrayList(); + ArrayList groupByKeys = new ArrayList(); + ArrayList aggregations = new ArrayList(); ArrayList outputColumnNames = new ArrayList(); - Map colExprMap = new HashMap(); + Map colExprMap = new HashMap(); List grpByExprs = getGroupByForClause(parseInfo, dest); for (int i = 0; i < grpByExprs.size(); ++i) { ASTNode grpbyExpr = grpByExprs.get(i); @@ -1986,7 +1986,7 @@ throw new SemanticException(ErrorMsg.INVALID_COLUMN.getMsg(grpbyExpr)); } - groupByKeys.add(new exprNodeColumnDesc(exprInfo.getType(), exprInfo + groupByKeys.add(new ExprNodeColumnDesc(exprInfo.getType(), exprInfo .getInternalName(), "", false)); String field = getColumnInternalName(i); outputColumnNames.add(field); @@ -2005,7 +2005,7 @@ String aggName = value.getChild(0).getText(); // Convert children to aggParameters - ArrayList aggParameters = new ArrayList(); + ArrayList aggParameters = new ArrayList(); // 0 is the function name for (int i = 1; i < value.getChildCount(); i++) { String text = value.getChild(i).toStringTree(); @@ -2017,19 +2017,19 @@ String paraExpression = paraExprInfo.getInternalName(); assert (paraExpression != null); - aggParameters.add(new exprNodeColumnDesc(paraExprInfo.getType(), + aggParameters.add(new ExprNodeColumnDesc(paraExprInfo.getType(), paraExprInfo.getInternalName(), paraExprInfo.getTabAlias(), paraExprInfo.getIsPartitionCol())); } boolean isDistinct = value.getType() == HiveParser.TOK_FUNCTIONDI; - Mode amode = groupByDescModeToUDAFMode(mode, isDistinct); + Mode amode = GroupByDescModeToUDAFMode(mode, isDistinct); GenericUDAFEvaluator genericUDAFEvaluator = getGenericUDAFEvaluator( aggName, aggParameters, value); assert (genericUDAFEvaluator != null); GenericUDAFInfo udaf = getGenericUDAFInfo(genericUDAFEvaluator, amode, aggParameters); - aggregations.add(new aggregationDesc(aggName.toLowerCase(), + aggregations.add(new AggregationDesc(aggName.toLowerCase(), udaf.genericUDAFEvaluator, udaf.convertedParameters, isDistinct, amode)); String field = getColumnInternalName(groupByKeys.size() @@ -2045,7 +2045,7 @@ } Operator op = putOpInsertMap(OperatorFactory.getAndMakeChild( - new groupByDesc(mode, outputColumnNames, groupByKeys, aggregations, + new GroupByDesc(mode, outputColumnNames, groupByKeys, aggregations, false), new RowSchema(groupByOutputRowResolver.getColumnInfos()), reduceSinkOperatorInfo), groupByOutputRowResolver); op.setColumnExprMap(colExprMap); @@ -2067,7 +2067,7 @@ */ @SuppressWarnings("nls") private Operator genGroupByPlanGroupByOperator1(QBParseInfo parseInfo, - String dest, Operator reduceSinkOperatorInfo, groupByDesc.Mode mode, + String dest, Operator reduceSinkOperatorInfo, GroupByDesc.Mode mode, Map genericUDAFEvaluators, boolean distPartAgg) throws SemanticException { ArrayList outputColumnNames = new ArrayList(); @@ -2075,10 +2075,10 @@ .get(reduceSinkOperatorInfo).getRR(); RowResolver groupByOutputRowResolver = new RowResolver(); groupByOutputRowResolver.setIsExprResolver(true); - ArrayList groupByKeys = new ArrayList(); - ArrayList aggregations = new ArrayList(); + ArrayList groupByKeys = new ArrayList(); + ArrayList aggregations = new ArrayList(); List grpByExprs = getGroupByForClause(parseInfo, dest); - Map colExprMap = new HashMap(); + Map colExprMap = new HashMap(); for (int i = 0; i < grpByExprs.size(); ++i) { ASTNode grpbyExpr = grpByExprs.get(i); String text = grpbyExpr.toStringTree(); @@ -2088,7 +2088,7 @@ throw new SemanticException(ErrorMsg.INVALID_COLUMN.getMsg(grpbyExpr)); } - groupByKeys.add(new exprNodeColumnDesc(exprInfo.getType(), exprInfo + groupByKeys.add(new ExprNodeColumnDesc(exprInfo.getType(), exprInfo .getInternalName(), exprInfo.getTabAlias(), exprInfo .getIsPartitionCol())); String field = getColumnInternalName(i); @@ -2103,7 +2103,7 @@ for (Map.Entry entry : aggregationTrees.entrySet()) { ASTNode value = entry.getValue(); String aggName = value.getChild(0).getText(); - ArrayList aggParameters = new ArrayList(); + ArrayList aggParameters = new ArrayList(); // If the function is distinct, partial aggregartion has not been done on // the client side. @@ -2129,7 +2129,7 @@ String paraExpression = paraExprInfo.getInternalName(); assert (paraExpression != null); - aggParameters.add(new exprNodeColumnDesc(paraExprInfo.getType(), + aggParameters.add(new ExprNodeColumnDesc(paraExprInfo.getType(), paraExprInfo.getInternalName(), paraExprInfo.getTabAlias(), paraExprInfo.getIsPartitionCol())); } @@ -2141,12 +2141,12 @@ } String paraExpression = paraExprInfo.getInternalName(); assert (paraExpression != null); - aggParameters.add(new exprNodeColumnDesc(paraExprInfo.getType(), + aggParameters.add(new ExprNodeColumnDesc(paraExprInfo.getType(), paraExpression, paraExprInfo.getTabAlias(), paraExprInfo .getIsPartitionCol())); } boolean isDistinct = (value.getType() == HiveParser.TOK_FUNCTIONDI); - Mode amode = groupByDescModeToUDAFMode(mode, isDistinct); + Mode amode = GroupByDescModeToUDAFMode(mode, isDistinct); GenericUDAFEvaluator genericUDAFEvaluator = null; // For distincts, partial aggregations have not been done if (distPartAgg) { @@ -2161,9 +2161,9 @@ GenericUDAFInfo udaf = getGenericUDAFInfo(genericUDAFEvaluator, amode, aggParameters); - aggregations.add(new aggregationDesc(aggName.toLowerCase(), + aggregations.add(new AggregationDesc(aggName.toLowerCase(), udaf.genericUDAFEvaluator, udaf.convertedParameters, - (mode != groupByDesc.Mode.FINAL && isDistinct), amode)); + (mode != GroupByDesc.Mode.FINAL && isDistinct), amode)); String field = getColumnInternalName(groupByKeys.size() + aggregations.size() - 1); outputColumnNames.add(field); @@ -2172,7 +2172,7 @@ } Operator op = putOpInsertMap(OperatorFactory.getAndMakeChild( - new groupByDesc(mode, outputColumnNames, groupByKeys, aggregations, + new GroupByDesc(mode, outputColumnNames, groupByKeys, aggregations, distPartAgg), new RowSchema(groupByOutputRowResolver .getColumnInfos()), reduceSinkOperatorInfo), groupByOutputRowResolver); @@ -2195,7 +2195,7 @@ */ @SuppressWarnings("nls") private Operator genGroupByPlanMapGroupByOperator(QB qb, String dest, - Operator inputOperatorInfo, groupByDesc.Mode mode, + Operator inputOperatorInfo, GroupByDesc.Mode mode, Map genericUDAFEvaluators) throws SemanticException { @@ -2204,14 +2204,14 @@ QBParseInfo parseInfo = qb.getParseInfo(); RowResolver groupByOutputRowResolver = new RowResolver(); groupByOutputRowResolver.setIsExprResolver(true); - ArrayList groupByKeys = new ArrayList(); + ArrayList groupByKeys = new ArrayList(); ArrayList outputColumnNames = new ArrayList(); - ArrayList aggregations = new ArrayList(); - Map colExprMap = new HashMap(); + ArrayList aggregations = new ArrayList(); + Map colExprMap = new HashMap(); List grpByExprs = getGroupByForClause(parseInfo, dest); for (int i = 0; i < grpByExprs.size(); ++i) { ASTNode grpbyExpr = grpByExprs.get(i); - exprNodeDesc grpByExprNode = genExprNodeDesc(grpbyExpr, + ExprNodeDesc grpByExprNode = genExprNodeDesc(grpbyExpr, groupByInputRowResolver); groupByKeys.add(grpByExprNode); @@ -2231,7 +2231,7 @@ ASTNode parameter = (ASTNode) value.getChild(i); String text = parameter.toStringTree(); if (groupByOutputRowResolver.get("", text) == null) { - exprNodeDesc distExprNode = genExprNodeDesc(parameter, + ExprNodeDesc distExprNode = genExprNodeDesc(parameter, groupByInputRowResolver); groupByKeys.add(distExprNode); numDistn++; @@ -2252,26 +2252,26 @@ for (Map.Entry entry : aggregationTrees.entrySet()) { ASTNode value = entry.getValue(); String aggName = unescapeIdentifier(value.getChild(0).getText()); - ArrayList aggParameters = new ArrayList(); + ArrayList aggParameters = new ArrayList(); new ArrayList>(); // 0 is the function name for (int i = 1; i < value.getChildCount(); i++) { ASTNode paraExpr = (ASTNode) value.getChild(i); - exprNodeDesc paraExprNode = genExprNodeDesc(paraExpr, + ExprNodeDesc paraExprNode = genExprNodeDesc(paraExpr, groupByInputRowResolver); aggParameters.add(paraExprNode); } boolean isDistinct = value.getType() == HiveParser.TOK_FUNCTIONDI; - Mode amode = groupByDescModeToUDAFMode(mode, isDistinct); + Mode amode = GroupByDescModeToUDAFMode(mode, isDistinct); GenericUDAFEvaluator genericUDAFEvaluator = getGenericUDAFEvaluator( aggName, aggParameters, value); assert (genericUDAFEvaluator != null); GenericUDAFInfo udaf = getGenericUDAFInfo(genericUDAFEvaluator, amode, aggParameters); - aggregations.add(new aggregationDesc(aggName.toLowerCase(), + aggregations.add(new AggregationDesc(aggName.toLowerCase(), udaf.genericUDAFEvaluator, udaf.convertedParameters, isDistinct, amode)); String field = getColumnInternalName(groupByKeys.size() @@ -2287,7 +2287,7 @@ } Operator op = putOpInsertMap(OperatorFactory.getAndMakeChild( - new groupByDesc(mode, outputColumnNames, groupByKeys, aggregations, + new GroupByDesc(mode, outputColumnNames, groupByKeys, aggregations, false), new RowSchema(groupByOutputRowResolver.getColumnInfos()), inputOperatorInfo), groupByOutputRowResolver); op.setColumnExprMap(colExprMap); @@ -2318,15 +2318,15 @@ QBParseInfo parseInfo = qb.getParseInfo(); RowResolver reduceSinkOutputRowResolver = new RowResolver(); reduceSinkOutputRowResolver.setIsExprResolver(true); - Map colExprMap = new HashMap(); - ArrayList reduceKeys = new ArrayList(); + Map colExprMap = new HashMap(); + ArrayList reduceKeys = new ArrayList(); // Pre-compute group-by keys and store in reduceKeys List outputColumnNames = new ArrayList(); List grpByExprs = getGroupByForClause(parseInfo, dest); for (int i = 0; i < grpByExprs.size(); ++i) { ASTNode grpbyExpr = grpByExprs.get(i); - exprNodeDesc inputExpr = genExprNodeDesc(grpbyExpr, + ExprNodeDesc inputExpr = genExprNodeDesc(grpbyExpr, reduceSinkInputRowResolver); reduceKeys.add(inputExpr); String text = grpbyExpr.toStringTree(); @@ -2366,7 +2366,7 @@ } } - ArrayList reduceValues = new ArrayList(); + ArrayList reduceValues = new ArrayList(); HashMap aggregationTrees = parseInfo .getAggregationExprsForClause(dest); @@ -2399,7 +2399,7 @@ TypeInfo type = reduceSinkInputRowResolver.getColumnInfos().get( inputField).getType(); - reduceValues.add(new exprNodeColumnDesc(type, + reduceValues.add(new ExprNodeColumnDesc(type, getColumnInternalName(inputField), "", false)); inputField++; outputColumnNames.add(getColumnInternalName(reduceValues.size() - 1)); @@ -2443,8 +2443,8 @@ groupByOperatorInfo).getRR(); RowResolver reduceSinkOutputRowResolver2 = new RowResolver(); reduceSinkOutputRowResolver2.setIsExprResolver(true); - Map colExprMap = new HashMap(); - ArrayList reduceKeys = new ArrayList(); + Map colExprMap = new HashMap(); + ArrayList reduceKeys = new ArrayList(); ArrayList outputColumnNames = new ArrayList(); // Get group-by keys and store in reduceKeys List grpByExprs = getGroupByForClause(parseInfo, dest); @@ -2454,7 +2454,7 @@ outputColumnNames.add(field); TypeInfo typeInfo = reduceSinkInputRowResolver2.get("", grpbyExpr.toStringTree()).getType(); - exprNodeColumnDesc inputExpr = new exprNodeColumnDesc(typeInfo, field, + ExprNodeColumnDesc inputExpr = new ExprNodeColumnDesc(typeInfo, field, "", false); reduceKeys.add(inputExpr); ColumnInfo colInfo = new ColumnInfo(Utilities.ReduceField.KEY.toString() @@ -2463,7 +2463,7 @@ colExprMap.put(colInfo.getInternalName(), inputExpr); } // Get partial aggregation results and store in reduceValues - ArrayList reduceValues = new ArrayList(); + ArrayList reduceValues = new ArrayList(); int inputField = reduceKeys.size(); HashMap aggregationTrees = parseInfo .getAggregationExprsForClause(dest); @@ -2472,7 +2472,7 @@ ASTNode t = entry.getValue(); TypeInfo typeInfo = reduceSinkInputRowResolver2.get("", t.toStringTree()) .getType(); - reduceValues.add(new exprNodeColumnDesc(typeInfo, field, "", false)); + reduceValues.add(new ExprNodeColumnDesc(typeInfo, field, "", false)); inputField++; String col = getColumnInternalName(reduceValues.size() - 1); outputColumnNames.add(col); @@ -2507,16 +2507,16 @@ */ @SuppressWarnings("nls") private Operator genGroupByPlanGroupByOperator2MR(QBParseInfo parseInfo, - String dest, Operator reduceSinkOperatorInfo2, groupByDesc.Mode mode, + String dest, Operator reduceSinkOperatorInfo2, GroupByDesc.Mode mode, Map genericUDAFEvaluators) throws SemanticException { RowResolver groupByInputRowResolver2 = opParseCtx.get( reduceSinkOperatorInfo2).getRR(); RowResolver groupByOutputRowResolver2 = new RowResolver(); groupByOutputRowResolver2.setIsExprResolver(true); - ArrayList groupByKeys = new ArrayList(); - ArrayList aggregations = new ArrayList(); - Map colExprMap = new HashMap(); + ArrayList groupByKeys = new ArrayList(); + ArrayList aggregations = new ArrayList(); + Map colExprMap = new HashMap(); List grpByExprs = getGroupByForClause(parseInfo, dest); ArrayList outputColumnNames = new ArrayList(); for (int i = 0; i < grpByExprs.size(); ++i) { @@ -2528,7 +2528,7 @@ } String expression = exprInfo.getInternalName(); - groupByKeys.add(new exprNodeColumnDesc(exprInfo.getType(), expression, + groupByKeys.add(new ExprNodeColumnDesc(exprInfo.getType(), expression, exprInfo.getTabAlias(), exprInfo.getIsPartitionCol())); String field = getColumnInternalName(i); outputColumnNames.add(field); @@ -2539,7 +2539,7 @@ HashMap aggregationTrees = parseInfo .getAggregationExprsForClause(dest); for (Map.Entry entry : aggregationTrees.entrySet()) { - ArrayList aggParameters = new ArrayList(); + ArrayList aggParameters = new ArrayList(); ASTNode value = entry.getValue(); String text = entry.getKey(); ColumnInfo paraExprInfo = groupByInputRowResolver2.get("", text); @@ -2548,25 +2548,25 @@ } String paraExpression = paraExprInfo.getInternalName(); assert (paraExpression != null); - aggParameters.add(new exprNodeColumnDesc(paraExprInfo.getType(), + aggParameters.add(new ExprNodeColumnDesc(paraExprInfo.getType(), paraExpression, paraExprInfo.getTabAlias(), paraExprInfo .getIsPartitionCol())); String aggName = value.getChild(0).getText(); boolean isDistinct = value.getType() == HiveParser.TOK_FUNCTIONDI; - Mode amode = groupByDescModeToUDAFMode(mode, isDistinct); + Mode amode = GroupByDescModeToUDAFMode(mode, isDistinct); GenericUDAFEvaluator genericUDAFEvaluator = genericUDAFEvaluators .get(entry.getKey()); assert (genericUDAFEvaluator != null); GenericUDAFInfo udaf = getGenericUDAFInfo(genericUDAFEvaluator, amode, aggParameters); aggregations - .add(new aggregationDesc( + .add(new AggregationDesc( aggName.toLowerCase(), udaf.genericUDAFEvaluator, udaf.convertedParameters, - (mode != groupByDesc.Mode.FINAL && value.getToken().getType() == HiveParser.TOK_FUNCTIONDI), + (mode != GroupByDesc.Mode.FINAL && value.getToken().getType() == HiveParser.TOK_FUNCTIONDI), amode)); String field = getColumnInternalName(groupByKeys.size() + aggregations.size() - 1); @@ -2576,7 +2576,7 @@ } Operator op = putOpInsertMap(OperatorFactory.getAndMakeChild( - new groupByDesc(mode, outputColumnNames, groupByKeys, aggregations, + new GroupByDesc(mode, outputColumnNames, groupByKeys, aggregations, false), new RowSchema(groupByOutputRowResolver2.getColumnInfos()), reduceSinkOperatorInfo2), groupByOutputRowResolver2); op.setColumnExprMap(colExprMap); @@ -2626,13 +2626,13 @@ // ////// 2. Generate GroupbyOperator Operator groupByOperatorInfo = genGroupByPlanGroupByOperator(parseInfo, - dest, reduceSinkOperatorInfo, groupByDesc.Mode.COMPLETE, null); + dest, reduceSinkOperatorInfo, GroupByDesc.Mode.COMPLETE, null); return groupByOperatorInfo; } static ArrayList getUDAFEvaluators( - ArrayList aggs) { + ArrayList aggs) { ArrayList result = new ArrayList(); for (int i = 0; i < aggs.size(); i++) { result.add(aggs.get(i).getGenericUDAFEvaluator()); @@ -2679,7 +2679,7 @@ // ////// 2. Generate GroupbyOperator Operator groupByOperatorInfo = genGroupByPlanGroupByOperator1(parseInfo, - dest, input, groupByDesc.Mode.HASH, genericUDAFEvaluators, true); + dest, input, GroupByDesc.Mode.HASH, genericUDAFEvaluators, true); int numReducers = -1; List grpByExprs = getGroupByForClause(parseInfo, dest); @@ -2690,7 +2690,7 @@ // ////// 4. Generate GroupbyOperator2 Operator groupByOperatorInfo2 = genGroupByPlanGroupByOperator2MR(parseInfo, - dest, reduceSinkOperatorInfo2, groupByDesc.Mode.FINAL, + dest, reduceSinkOperatorInfo2, GroupByDesc.Mode.FINAL, genericUDAFEvaluators); return groupByOperatorInfo2; @@ -2757,7 +2757,7 @@ // ////// 2. Generate GroupbyOperator Map genericUDAFEvaluators = new LinkedHashMap(); GroupByOperator groupByOperatorInfo = (GroupByOperator) genGroupByPlanGroupByOperator( - parseInfo, dest, reduceSinkOperatorInfo, groupByDesc.Mode.PARTIAL1, + parseInfo, dest, reduceSinkOperatorInfo, GroupByDesc.Mode.PARTIAL1, genericUDAFEvaluators); int numReducers = -1; @@ -2772,7 +2772,7 @@ // ////// 4. Generate GroupbyOperator2 Operator groupByOperatorInfo2 = genGroupByPlanGroupByOperator2MR(parseInfo, - dest, reduceSinkOperatorInfo2, groupByDesc.Mode.FINAL, + dest, reduceSinkOperatorInfo2, GroupByDesc.Mode.FINAL, genericUDAFEvaluators); return groupByOperatorInfo2; @@ -2818,7 +2818,7 @@ // ////// Generate GroupbyOperator for a map-side partial aggregation Map genericUDAFEvaluators = new LinkedHashMap(); GroupByOperator groupByOperatorInfo = (GroupByOperator) genGroupByPlanMapGroupByOperator( - qb, dest, inputOperatorInfo, groupByDesc.Mode.HASH, + qb, dest, inputOperatorInfo, GroupByDesc.Mode.HASH, genericUDAFEvaluators); groupOpToInputTables.put(groupByOperatorInfo, opParseCtx.get( @@ -2844,7 +2844,7 @@ // used, and merge is invoked // on the reducer. return genGroupByPlanGroupByOperator1(parseInfo, dest, - reduceSinkOperatorInfo, groupByDesc.Mode.MERGEPARTIAL, + reduceSinkOperatorInfo, GroupByDesc.Mode.MERGEPARTIAL, genericUDAFEvaluators, false); } @@ -2888,7 +2888,7 @@ // ////// Generate GroupbyOperator for a map-side partial aggregation Map genericUDAFEvaluators = new LinkedHashMap(); GroupByOperator groupByOperatorInfo = (GroupByOperator) genGroupByPlanMapGroupByOperator( - qb, dest, inputOperatorInfo, groupByDesc.Mode.HASH, + qb, dest, inputOperatorInfo, GroupByDesc.Mode.HASH, genericUDAFEvaluators); groupOpToInputTables.put(groupByOperatorInfo, opParseCtx.get( @@ -2906,7 +2906,7 @@ // ////// Generate GroupbyOperator for a partial aggregation Operator groupByOperatorInfo2 = genGroupByPlanGroupByOperator1(parseInfo, - dest, reduceSinkOperatorInfo, groupByDesc.Mode.PARTIALS, + dest, reduceSinkOperatorInfo, GroupByDesc.Mode.PARTIALS, genericUDAFEvaluators, false); int numReducers = -1; @@ -2921,7 +2921,7 @@ // ////// Generate GroupbyOperator3 return genGroupByPlanGroupByOperator2MR(parseInfo, dest, - reduceSinkOperatorInfo2, groupByDesc.Mode.FINAL, + reduceSinkOperatorInfo2, GroupByDesc.Mode.FINAL, genericUDAFEvaluators); } else { // ////// Generate ReduceSink Operator @@ -2930,7 +2930,7 @@ .size(), 1, true); return genGroupByPlanGroupByOperator2MR(parseInfo, dest, - reduceSinkOperatorInfo, groupByDesc.Mode.FINAL, genericUDAFEvaluators); + reduceSinkOperatorInfo, GroupByDesc.Mode.FINAL, genericUDAFEvaluators); } } @@ -2967,7 +2967,7 @@ Table dest_tab; // destination table if any String queryTmpdir; // the intermediate destination directory Path dest_path; // the final destination directory - tableDesc table_desc = null; + TableDesc table_desc = null; int currentTableId = 0; boolean isLocal = false; @@ -2989,7 +2989,7 @@ destTableId++; // Create the work for moving the table - loadTableWork.add(new loadTableDesc(queryTmpdir, ctx + loadTableWork.add(new LoadTableDesc(queryTmpdir, ctx .getExternalTmpFileURI(dest_path.toUri()), table_desc, new HashMap())); if (!outputs.add(new WriteEntity(dest_tab))) { @@ -3010,7 +3010,7 @@ currentTableId = destTableId; destTableId++; - loadTableWork.add(new loadTableDesc(queryTmpdir, ctx + loadTableWork.add(new LoadTableDesc(queryTmpdir, ctx .getExternalTmpFileURI(dest_path.toUri()), table_desc, dest_part .getSpec())); if (!outputs.add(new WriteEntity(dest_part))) { @@ -3050,7 +3050,7 @@ // table command // rather than taking the default value List field_schemas = null; - createTableDesc tblDesc = qb.getTableDesc(); + CreateTableDesc tblDesc = qb.getTableDesc(); if (tblDesc != null) { field_schemas = new ArrayList(); } @@ -3113,7 +3113,7 @@ } boolean isDfsDir = (dest_type.intValue() == QBMetaData.DEST_DFS_FILE); - loadFileWork.add(new loadFileDesc(queryTmpdir, destStr, isDfsDir, cols, + loadFileWork.add(new LoadFileDesc(queryTmpdir, destStr, isDfsDir, cols, colTypes)); if (tblDesc == null) { @@ -3155,7 +3155,7 @@ RowSchema fsRS = new RowSchema(vecCol); Operator output = putOpInsertMap(OperatorFactory.getAndMakeChild( - new fileSinkDesc(queryTmpdir, table_desc, conf + new FileSinkDesc(queryTmpdir, table_desc, conf .getBoolVar(HiveConf.ConfVars.COMPRESSRESULT), currentTableId), fsRS, input), inputRR); @@ -3170,7 +3170,7 @@ * types that are expected by the table_desc. */ Operator genConversionSelectOperator(String dest, QB qb, Operator input, - tableDesc table_desc) throws SemanticException { + TableDesc table_desc) throws SemanticException { StructObjectInspector oi = null; try { Deserializer deserializer = table_desc.getDeserializerClass() @@ -3195,7 +3195,7 @@ // Check column types boolean converted = false; int columnNumber = tableFields.size(); - ArrayList expressions = new ArrayList( + ArrayList expressions = new ArrayList( columnNumber); // MetadataTypedColumnsetSerDe does not need type conversions because it // does @@ -3211,7 +3211,7 @@ TypeInfo tableFieldTypeInfo = TypeInfoUtils .getTypeInfoFromObjectInspector(tableFieldOI); TypeInfo rowFieldTypeInfo = rowFields.get(i).getType(); - exprNodeDesc column = new exprNodeColumnDesc(rowFieldTypeInfo, + ExprNodeDesc column = new ExprNodeColumnDesc(rowFieldTypeInfo, rowFields.get(i).getInternalName(), "", false); // LazySimpleSerDe can convert any types to String type using // JSON-format. @@ -3250,7 +3250,7 @@ colName.add(name); } Operator output = putOpInsertMap(OperatorFactory.getAndMakeChild( - new selectDesc(expressions, colName), new RowSchema(rowResolver + new SelectDesc(expressions, colName), new RowSchema(rowResolver .getColumnInfos()), input), rowResolver); return output; @@ -3272,7 +3272,7 @@ RowResolver inputRR = opParseCtx.get(input).getRR(); Operator limitMap = putOpInsertMap(OperatorFactory.getAndMakeChild( - new limitDesc(limit), new RowSchema(inputRR.getColumnInfos()), input), + new LimitDesc(limit), new RowSchema(inputRR.getColumnInfos()), input), inputRR); LOG.debug("Created LimitOperator Plan for clause: " + dest @@ -3361,7 +3361,7 @@ // Add the UDTFOperator to the operator DAG Operator udtf = putOpInsertMap(OperatorFactory.getAndMakeChild( - new udtfDesc(genericUDTF), new RowSchema(out_rwsch.getColumnInfos()), + new UDTFDesc(genericUDTF), new RowSchema(out_rwsch.getColumnInfos()), input), out_rwsch); return udtf; } @@ -3400,7 +3400,7 @@ if (partitionExprs == null) { partitionExprs = qb.getParseInfo().getDistributeByForClause(dest); } - ArrayList partitionCols = new ArrayList(); + ArrayList partitionCols = new ArrayList(); if (partitionExprs != null) { int ccount = partitionExprs.getChildCount(); for (int i = 0; i < ccount; ++i) { @@ -3429,7 +3429,7 @@ } } - ArrayList sortCols = new ArrayList(); + ArrayList sortCols = new ArrayList(); StringBuilder order = new StringBuilder(); if (sortExprs != null) { int ccount = sortExprs.getChildCount(); @@ -3448,17 +3448,17 @@ // ClusterBy order.append("+"); } - exprNodeDesc exprNode = genExprNodeDesc(cl, inputRR); + ExprNodeDesc exprNode = genExprNodeDesc(cl, inputRR); sortCols.add(exprNode); } } // For the generation of the values expression just get the inputs // signature and generate field expressions for those - Map colExprMap = new HashMap(); - ArrayList valueCols = new ArrayList(); + Map colExprMap = new HashMap(); + ArrayList valueCols = new ArrayList(); for (ColumnInfo colInfo : inputRR.getColumnInfos()) { - valueCols.add(new exprNodeColumnDesc(colInfo.getType(), colInfo + valueCols.add(new ExprNodeColumnDesc(colInfo.getType(), colInfo .getInternalName(), colInfo.getTabAlias(), colInfo .getIsPartitionCol())); colExprMap.put(colInfo.getInternalName(), valueCols @@ -3487,7 +3487,7 @@ } Operator output = putOpInsertMap(OperatorFactory.getAndMakeChild( - new extractDesc(new exprNodeColumnDesc(TypeInfoFactory.stringTypeInfo, + new ExtractDesc(new ExprNodeColumnDesc(TypeInfoFactory.stringTypeInfo, Utilities.ReduceField.VALUE.toString(), "", false)), new RowSchema( out_rwsch.getColumnInfos()), interim), out_rwsch); @@ -3506,8 +3506,8 @@ int outputPos = 0; Map reversedExprs = new HashMap(); - HashMap> exprMap = new HashMap>(); - Map colExprMap = new HashMap(); + HashMap> exprMap = new HashMap>(); + Map colExprMap = new HashMap(); HashMap> posToAliasMap = new HashMap>(); for (int pos = 0; pos < right.length; ++pos) { @@ -3517,8 +3517,8 @@ input = left; } - ArrayList keyDesc = new ArrayList(); - Byte tag = Byte.valueOf((byte) (((reduceSinkDesc) (input.getConf())) + ArrayList keyDesc = new ArrayList(); + Byte tag = Byte.valueOf((byte) (((ReduceSinkDesc) (input.getConf())) .getTag())); // check whether this input operator produces output @@ -3539,7 +3539,7 @@ while (fNamesIter.hasNext()) { String field = fNamesIter.next(); ColumnInfo valueInfo = inputRS.get(key, field); - keyDesc.add(new exprNodeColumnDesc(valueInfo.getType(), valueInfo + keyDesc.add(new ExprNodeColumnDesc(valueInfo.getType(), valueInfo .getInternalName(), valueInfo.getTabAlias(), valueInfo .getIsPartitionCol())); @@ -3559,14 +3559,14 @@ rightOps[pos] = input; } - org.apache.hadoop.hive.ql.plan.joinCond[] joinCondns = new org.apache.hadoop.hive.ql.plan.joinCond[join + org.apache.hadoop.hive.ql.plan.JoinCondDesc[] joinCondns = new org.apache.hadoop.hive.ql.plan.JoinCondDesc[join .getJoinCond().length]; for (int i = 0; i < join.getJoinCond().length; i++) { - joinCond condn = join.getJoinCond()[i]; - joinCondns[i] = new org.apache.hadoop.hive.ql.plan.joinCond(condn); + JoinCond condn = join.getJoinCond()[i]; + joinCondns[i] = new org.apache.hadoop.hive.ql.plan.JoinCondDesc(condn); } - joinDesc desc = new joinDesc(exprMap, outputColumnNames, joinCondns); + JoinDesc desc = new JoinDesc(exprMap, outputColumnNames, joinCondns); desc.setReversedExprs(reversedExprs); JoinOperator joinOp = (JoinOperator) OperatorFactory.getAndMakeChild(desc, new RowSchema(outputRS.getColumnInfos()), rightOps); @@ -3581,7 +3581,7 @@ RowResolver inputRS = opParseCtx.get(child).getRR(); RowResolver outputRS = new RowResolver(); ArrayList outputColumns = new ArrayList(); - ArrayList reduceKeys = new ArrayList(); + ArrayList reduceKeys = new ArrayList(); // Compute join keys and store in reduceKeys Vector exprs = joinTree.getExpressions().get(pos); @@ -3591,16 +3591,16 @@ } // Walk over the input row resolver and copy in the output - ArrayList reduceValues = new ArrayList(); + ArrayList reduceValues = new ArrayList(); Iterator tblNamesIter = inputRS.getTableNames().iterator(); - Map colExprMap = new HashMap(); + Map colExprMap = new HashMap(); while (tblNamesIter.hasNext()) { String src = tblNamesIter.next(); HashMap fMap = inputRS.getFieldMap(src); for (Map.Entry entry : fMap.entrySet()) { String field = entry.getKey(); ColumnInfo valueInfo = entry.getValue(); - exprNodeColumnDesc inputExpr = new exprNodeColumnDesc(valueInfo + ExprNodeColumnDesc inputExpr = new ExprNodeColumnDesc(valueInfo .getType(), valueInfo.getInternalName(), valueInfo.getTabAlias(), valueInfo.getIsPartitionCol()); reduceValues.add(inputExpr); @@ -3677,7 +3677,7 @@ // generate a groupby operator (HASH mode) for a map-side partial // aggregation for semijoin srcOp = genMapGroupByForSemijoin(qb, fields, srcOp, - groupByDesc.Mode.HASH); + GroupByDesc.Mode.HASH); } // generate a ReduceSink operator for the join @@ -3713,12 +3713,12 @@ Operator input) throws SemanticException { RowResolver inputRR = opParseCtx.get(input).getRR(); - ArrayList colList = new ArrayList(); + ArrayList colList = new ArrayList(); ArrayList columnNames = new ArrayList(); // construct the list of columns that need to be projected for (ASTNode field : fields) { - exprNodeColumnDesc exprNode = (exprNodeColumnDesc) genExprNodeDesc(field, + ExprNodeColumnDesc exprNode = (ExprNodeColumnDesc) genExprNodeDesc(field, inputRR); colList.add(exprNode); columnNames.add(exprNode.getColumn()); @@ -3726,7 +3726,7 @@ // create selection operator Operator output = putOpInsertMap(OperatorFactory.getAndMakeChild( - new selectDesc(colList, columnNames, false), new RowSchema(inputRR + new SelectDesc(colList, columnNames, false), new RowSchema(inputRR .getColumnInfos()), input), inputRR); output.setColumnExprMap(input.getColumnExprMap()); @@ -3740,16 +3740,16 @@ // join // key // "tab.col" - Operator inputOperatorInfo, groupByDesc.Mode mode) + Operator inputOperatorInfo, GroupByDesc.Mode mode) throws SemanticException { RowResolver groupByInputRowResolver = opParseCtx.get(inputOperatorInfo) .getRR(); RowResolver groupByOutputRowResolver = new RowResolver(); - ArrayList groupByKeys = new ArrayList(); + ArrayList groupByKeys = new ArrayList(); ArrayList outputColumnNames = new ArrayList(); - ArrayList aggregations = new ArrayList(); - Map colExprMap = new HashMap(); + ArrayList aggregations = new ArrayList(); + Map colExprMap = new HashMap(); qb.getParseInfo(); groupByOutputRowResolver.setIsExprResolver(true); // join keys should only @@ -3759,7 +3759,7 @@ for (int i = 0; i < fields.size(); ++i) { // get the group by keys to ColumnInfo ASTNode colName = fields.get(i); - exprNodeDesc grpByExprNode = genExprNodeDesc(colName, + ExprNodeDesc grpByExprNode = genExprNodeDesc(colName, groupByInputRowResolver); groupByKeys.add(grpByExprNode); @@ -3776,7 +3776,7 @@ // Generate group-by operator Operator op = putOpInsertMap(OperatorFactory.getAndMakeChild( - new groupByDesc(mode, outputColumnNames, groupByKeys, aggregations, + new GroupByDesc(mode, outputColumnNames, groupByKeys, aggregations, false), new RowSchema(groupByOutputRowResolver.getColumnInfos()), inputOperatorInfo), groupByOutputRowResolver); @@ -3786,12 +3786,12 @@ private void genJoinOperatorTypeCheck(Operator left, Operator[] right) throws SemanticException { - // keys[i] -> ArrayList for the i-th join operator key list - ArrayList> keys = new ArrayList>(); + // keys[i] -> ArrayList for the i-th join operator key list + ArrayList> keys = new ArrayList>(); int keyLength = 0; for (int i = 0; i < right.length; i++) { Operator oi = (i == 0 && right[i] == null ? left : right[i]); - reduceSinkDesc now = ((ReduceSinkOperator) (oi)).getConf(); + ReduceSinkDesc now = ((ReduceSinkOperator) (oi)).getConf(); if (i == 0) { keyLength = now.getKeyCols().size(); } else { @@ -3827,7 +3827,7 @@ // output key types might have changed. for (int i = 0; i < right.length; i++) { Operator oi = (i == 0 && right[i] == null ? left : right[i]); - reduceSinkDesc now = ((ReduceSinkOperator) (oi)).getConf(); + ReduceSinkDesc now = ((ReduceSinkOperator) (oi)).getConf(); now.setKeySerializeInfo(PlanUtils.getReduceKeyTableDesc(PlanUtils .getFieldSchemasFromColumnList(now.getKeyCols(), "joinkey"), now @@ -3967,9 +3967,9 @@ joinTree.setLeftAliases(leftAliases.toArray(new String[0])); joinTree.setRightAliases(rightAliases.toArray(new String[0])); - joinCond[] condn = new joinCond[preserved.size()]; + JoinCond[] condn = new JoinCond[preserved.size()]; for (int i = 0; i < condn.length; i++) { - condn[i] = new joinCond(preserved.get(i)); + condn[i] = new JoinCond(preserved.get(i)); } joinTree.setJoinCond(condn); @@ -3983,27 +3983,27 @@ private QBJoinTree genJoinTree(QB qb, ASTNode joinParseTree) throws SemanticException { QBJoinTree joinTree = new QBJoinTree(); - joinCond[] condn = new joinCond[1]; + JoinCond[] condn = new JoinCond[1]; switch (joinParseTree.getToken().getType()) { case HiveParser.TOK_LEFTOUTERJOIN: joinTree.setNoOuterJoin(false); - condn[0] = new joinCond(0, 1, joinType.LEFTOUTER); + condn[0] = new JoinCond(0, 1, JoinType.LEFTOUTER); break; case HiveParser.TOK_RIGHTOUTERJOIN: joinTree.setNoOuterJoin(false); - condn[0] = new joinCond(0, 1, joinType.RIGHTOUTER); + condn[0] = new JoinCond(0, 1, JoinType.RIGHTOUTER); break; case HiveParser.TOK_FULLOUTERJOIN: joinTree.setNoOuterJoin(false); - condn[0] = new joinCond(0, 1, joinType.FULLOUTER); + condn[0] = new JoinCond(0, 1, JoinType.FULLOUTER); break; case HiveParser.TOK_LEFTSEMIJOIN: joinTree.setNoSemiJoin(false); - condn[0] = new joinCond(0, 1, joinType.LEFTSEMI); + condn[0] = new JoinCond(0, 1, JoinType.LEFTSEMI); break; default: - condn[0] = new joinCond(0, 1, joinType.INNER); + condn[0] = new JoinCond(0, 1, JoinType.INNER); joinTree.setNoOuterJoin(true); break; } @@ -4197,17 +4197,17 @@ target.mergeRHSSemijoin(node); - joinCond[] nodeCondns = node.getJoinCond(); + JoinCond[] nodeCondns = node.getJoinCond(); int nodeCondnsSize = nodeCondns.length; - joinCond[] targetCondns = target.getJoinCond(); + JoinCond[] targetCondns = target.getJoinCond(); int targetCondnsSize = targetCondns.length; - joinCond[] newCondns = new joinCond[nodeCondnsSize + targetCondnsSize]; + JoinCond[] newCondns = new JoinCond[nodeCondnsSize + targetCondnsSize]; for (int i = 0; i < targetCondnsSize; i++) { newCondns[i] = targetCondns[i]; } for (int i = 0; i < nodeCondnsSize; i++) { - joinCond nodeCondn = nodeCondns[i]; + JoinCond nodeCondn = nodeCondns[i]; if (nodeCondn.getLeft() == 0) { nodeCondn.setLeft(pos); } else { @@ -4307,16 +4307,16 @@ OpParseContext inputCtx = opParseCtx.get(input); RowResolver inputRR = inputCtx.getRR(); Vector columns = inputRR.getColumnInfos(); - ArrayList colList = new ArrayList(); + ArrayList colList = new ArrayList(); ArrayList columnNames = new ArrayList(); for (int i = 0; i < columns.size(); i++) { ColumnInfo col = columns.get(i); - colList.add(new exprNodeColumnDesc(col.getType(), col.getInternalName(), + colList.add(new ExprNodeColumnDesc(col.getType(), col.getInternalName(), col.getTabAlias(), col.getIsPartitionCol())); columnNames.add(col.getInternalName()); } Operator output = putOpInsertMap(OperatorFactory.getAndMakeChild( - new selectDesc(colList, columnNames, true), new RowSchema(inputRR + new SelectDesc(colList, columnNames, true), new RowSchema(inputRR .getColumnInfos()), input), inputRR); output.setColumnExprMap(input.getColumnExprMap()); return output; @@ -4336,7 +4336,7 @@ return null; } - List oldList = null; + List oldList = null; List oldASTList = null; for (String dest : ks) { @@ -4356,7 +4356,7 @@ return null; } - List currDestList = new ArrayList(); + List currDestList = new ArrayList(); List currASTList = new ArrayList(); try { // 0 is function name @@ -4400,15 +4400,15 @@ RowResolver inputRR = opParseCtx.get(input).getRR(); RowResolver reduceSinkOutputRowResolver = new RowResolver(); reduceSinkOutputRowResolver.setIsExprResolver(true); - ArrayList reduceKeys = new ArrayList(); - ArrayList reduceValues = new ArrayList(); - Map colExprMap = new HashMap(); + ArrayList reduceKeys = new ArrayList(); + ArrayList reduceValues = new ArrayList(); + Map colExprMap = new HashMap(); // Pre-compute distinct group-by keys and store in reduceKeys List outputColumnNames = new ArrayList(); for (ASTNode distn : distExprs) { - exprNodeDesc distExpr = genExprNodeDesc(distn, inputRR); + ExprNodeDesc distExpr = genExprNodeDesc(distn, inputRR); reduceKeys.add(distExpr); String text = distn.toStringTree(); if (reduceSinkOutputRowResolver.get("", text) == null) { @@ -4431,7 +4431,7 @@ String text = grpbyExpr.toStringTree(); if (reduceSinkOutputRowResolver.get("", text) == null) { - exprNodeDesc grpByExprNode = genExprNodeDesc(grpbyExpr, inputRR); + ExprNodeDesc grpByExprNode = genExprNodeDesc(grpbyExpr, inputRR); reduceValues.add(grpByExprNode); String field = Utilities.ReduceField.VALUE.toString() + "." + getColumnInternalName(reduceValues.size() - 1); @@ -4457,7 +4457,7 @@ String text = paraExpr.toStringTree(); if (reduceSinkOutputRowResolver.get("", text) == null) { - exprNodeDesc paraExprNode = genExprNodeDesc(paraExpr, inputRR); + ExprNodeDesc paraExprNode = genExprNodeDesc(paraExpr, inputRR); reduceValues.add(paraExprNode); String field = Utilities.ReduceField.VALUE.toString() + "." + getColumnInternalName(reduceValues.size() - 1); @@ -4507,7 +4507,7 @@ RowResolver currRR = opParseCtx.get(curr).getRR(); // create a forward operator - input = putOpInsertMap(OperatorFactory.getAndMakeChild(new forwardDesc(), + input = putOpInsertMap(OperatorFactory.getAndMakeChild(new ForwardDesc(), new RowSchema(currRR.getColumnInfos()), curr), currRR); for (String dest : ks) { @@ -4679,7 +4679,7 @@ .getParentOperators(); parent.add(rightOp); - unionDesc uDesc = ((UnionOperator) leftOp).getConf(); + UnionDesc uDesc = ((UnionOperator) leftOp).getConf(); uDesc.setNumInputs(uDesc.getNumInputs() + 1); return putOpInsertMap(leftOp, unionoutRR); } else { @@ -4691,7 +4691,7 @@ List> parent = rightOp .getParentOperators(); parent.add(leftOp); - unionDesc uDesc = ((UnionOperator) rightOp).getConf(); + UnionDesc uDesc = ((UnionOperator) rightOp).getConf(); uDesc.setNumInputs(uDesc.getNumInputs() + 1); return putOpInsertMap(rightOp, unionoutRR); @@ -4700,7 +4700,7 @@ // Create a new union operator Operator unionforward = OperatorFactory - .getAndMakeChild(new unionDesc(), new RowSchema(unionoutRR + .getAndMakeChild(new UnionDesc(), new RowSchema(unionoutRR .getColumnInfos())); // set union operator as child of each of leftOp and rightOp @@ -4750,31 +4750,31 @@ * @param planExpr * The plan tree for the expression. If the user specified this, the * parse expressions are not used - * @return exprNodeDesc + * @return ExprNodeDesc * @exception SemanticException */ - private exprNodeDesc genSamplePredicate(TableSample ts, + private ExprNodeDesc genSamplePredicate(TableSample ts, List bucketCols, boolean useBucketCols, String alias, - RowResolver rwsch, QBMetaData qbm, exprNodeDesc planExpr) + RowResolver rwsch, QBMetaData qbm, ExprNodeDesc planExpr) throws SemanticException { - exprNodeDesc numeratorExpr = new exprNodeConstantDesc( + ExprNodeDesc numeratorExpr = new ExprNodeConstantDesc( TypeInfoFactory.intTypeInfo, Integer.valueOf(ts.getNumerator() - 1)); - exprNodeDesc denominatorExpr = new exprNodeConstantDesc( + ExprNodeDesc denominatorExpr = new ExprNodeConstantDesc( TypeInfoFactory.intTypeInfo, Integer.valueOf(ts.getDenominator())); - exprNodeDesc intMaxExpr = new exprNodeConstantDesc( + ExprNodeDesc intMaxExpr = new ExprNodeConstantDesc( TypeInfoFactory.intTypeInfo, Integer.valueOf(Integer.MAX_VALUE)); - ArrayList args = new ArrayList(); + ArrayList args = new ArrayList(); if (planExpr != null) { args.add(planExpr); } else if (useBucketCols) { for (String col : bucketCols) { ColumnInfo ci = rwsch.get(alias, col); // TODO: change type to the one in the table schema - args.add(new exprNodeColumnDesc(ci.getType(), ci.getInternalName(), ci + args.add(new ExprNodeColumnDesc(ci.getType(), ci.getInternalName(), ci .getTabAlias(), ci.getIsPartitionCol())); } } else { @@ -4783,17 +4783,17 @@ } } - exprNodeDesc equalsExpr = null; + ExprNodeDesc equalsExpr = null; { - exprNodeDesc hashfnExpr = new exprNodeGenericFuncDesc( + ExprNodeDesc hashfnExpr = new ExprNodeGenericFuncDesc( TypeInfoFactory.intTypeInfo, new GenericUDFHash(), args); assert (hashfnExpr != null); LOG.info("hashfnExpr = " + hashfnExpr); - exprNodeDesc andExpr = TypeCheckProcFactory.DefaultExprProcessor + ExprNodeDesc andExpr = TypeCheckProcFactory.DefaultExprProcessor .getFuncExprNodeDesc("&", hashfnExpr, intMaxExpr); assert (andExpr != null); LOG.info("andExpr = " + andExpr); - exprNodeDesc modExpr = TypeCheckProcFactory.DefaultExprProcessor + ExprNodeDesc modExpr = TypeCheckProcFactory.DefaultExprProcessor .getFuncExprNodeDesc("%", andExpr, denominatorExpr); assert (modExpr != null); LOG.info("modExpr = " + modExpr); @@ -4847,7 +4847,7 @@ } // Create the root of the operator tree - top = putOpInsertMap(OperatorFactory.get(new tableScanDesc(alias), + top = putOpInsertMap(OperatorFactory.get(new TableScanDesc(alias), new RowSchema(rwsch.getColumnInfos())), rwsch); // Add this to the list of top operators - we always start from a table @@ -4922,18 +4922,18 @@ // input pruning is enough; add the filter for the optimizer to use it // later LOG.info("No need for sample filter"); - exprNodeDesc samplePredicate = genSamplePredicate(ts, tabBucketCols, + ExprNodeDesc samplePredicate = genSamplePredicate(ts, tabBucketCols, colsEqual, alias, rwsch, qb.getMetaData(), null); - tableOp = OperatorFactory.getAndMakeChild(new filterDesc( + tableOp = OperatorFactory.getAndMakeChild(new FilterDesc( samplePredicate, true, new sampleDesc(ts.getNumerator(), ts .getDenominator(), tabBucketCols, true)), top); } else { // need to add filter - // create tableOp to be filterDesc and set as child to 'top' + // create tableOp to be FilterDesc and set as child to 'top' LOG.info("Need sample filter"); - exprNodeDesc samplePredicate = genSamplePredicate(ts, tabBucketCols, + ExprNodeDesc samplePredicate = genSamplePredicate(ts, tabBucketCols, colsEqual, alias, rwsch, qb.getMetaData(), null); - tableOp = OperatorFactory.getAndMakeChild(new filterDesc( + tableOp = OperatorFactory.getAndMakeChild(new FilterDesc( samplePredicate, true), top); } } else { @@ -4960,10 +4960,10 @@ TableSample tsSample = new TableSample(1, numBuckets); tsSample.setInputPruning(true); qb.getParseInfo().setTabSample(alias, tsSample); - exprNodeDesc samplePred = genSamplePredicate(tsSample, tab + ExprNodeDesc samplePred = genSamplePredicate(tsSample, tab .getBucketCols(), true, alias, rwsch, qb.getMetaData(), null); tableOp = OperatorFactory - .getAndMakeChild(new filterDesc(samplePred, true, + .getAndMakeChild(new FilterDesc(samplePred, true, new sampleDesc(tsSample.getNumerator(), tsSample .getDenominator(), tab.getBucketCols(), true)), top); LOG.info("No need for sample filter"); @@ -4975,12 +4975,12 @@ tsSample.setInputPruning(false); qb.getParseInfo().setTabSample(alias, tsSample); LOG.info("Need sample filter"); - exprNodeDesc randFunc = TypeCheckProcFactory.DefaultExprProcessor - .getFuncExprNodeDesc("rand", new exprNodeConstantDesc(Integer + ExprNodeDesc randFunc = TypeCheckProcFactory.DefaultExprProcessor + .getFuncExprNodeDesc("rand", new ExprNodeConstantDesc(Integer .valueOf(460476415))); - exprNodeDesc samplePred = genSamplePredicate(tsSample, null, false, + ExprNodeDesc samplePred = genSamplePredicate(tsSample, null, false, alias, rwsch, qb.getMetaData(), randFunc); - tableOp = OperatorFactory.getAndMakeChild(new filterDesc( + tableOp = OperatorFactory.getAndMakeChild(new FilterDesc( samplePred, true), top); } } @@ -5098,7 +5098,7 @@ // Get the all path by making a select(*) RowResolver allPathRR = opParseCtx.get(op).getRR(); Operator allPath = putOpInsertMap(OperatorFactory.getAndMakeChild( - new selectDesc(true), new RowSchema(allPathRR.getColumnInfos()), + new SelectDesc(true), new RowSchema(allPathRR.getColumnInfos()), op), allPathRR); // Get the UDTF Path @@ -5119,7 +5119,7 @@ LVmergeRowResolvers(udtfPathRR, lateralViewRR, outputInternalColNames); Operator lateralViewJoin = putOpInsertMap(OperatorFactory - .getAndMakeChild(new lateralViewJoinDesc(outputInternalColNames), + .getAndMakeChild(new LateralViewJoinDesc(outputInternalColNames), new RowSchema(lateralViewRR.getColumnInfos()), allPath, udtfPath), lateralViewRR); op = lateralViewJoin; @@ -5161,7 +5161,7 @@ @SuppressWarnings("nls") private void genMapRedTasks(QB qb) throws SemanticException { - fetchWork fetch = null; + FetchWork fetch = null; List> mvTask = new ArrayList>(); Task fetchTask = null; @@ -5179,7 +5179,7 @@ Table tab = (iter.next()).getValue(); if (!tab.isPartitioned()) { if (qbParseInfo.getDestToWhereExpr().isEmpty()) { - fetch = new fetchWork(tab.getPath().toString(), Utilities + fetch = new FetchWork(tab.getPath().toString(), Utilities .getTableDesc(tab), qb.getParseInfo().getOuterQueryLimit()); noMapRed = true; inputs.add(new ReadEntity(tab)); @@ -5210,7 +5210,7 @@ // the filter to prune correctly if (partsList.getUnknownPartns().size() == 0) { List listP = new ArrayList(); - List partP = new ArrayList(); + List partP = new ArrayList(); Set parts = partsList.getConfirmedPartns(); Iterator iterParts = parts.iterator(); @@ -5225,7 +5225,7 @@ inputs.add(new ReadEntity(part)); } - fetch = new fetchWork(listP, partP, qb.getParseInfo() + fetch = new FetchWork(listP, partP, qb.getParseInfo() .getOuterQueryLimit()); noMapRed = true; } @@ -5251,8 +5251,8 @@ String cols = loadFileWork.get(0).getColumns(); String colTypes = loadFileWork.get(0).getColumnTypes(); - fetch = new fetchWork(new Path(loadFileWork.get(0).getSourceDir()) - .toString(), new tableDesc(LazySimpleSerDe.class, + fetch = new FetchWork(new Path(loadFileWork.get(0).getSourceDir()) + .toString(), new TableDesc(LazySimpleSerDe.class, TextInputFormat.class, IgnoreKeyTextOutputFormat.class, Utilities .makeProperties( org.apache.hadoop.hive.serde.Constants.SERIALIZATION_FORMAT, @@ -5264,14 +5264,14 @@ fetchTask = TaskFactory.get(fetch, conf); setFetchTask(fetchTask); } else { - new ArrayList(); - for (loadTableDesc ltd : loadTableWork) { - mvTask.add(TaskFactory.get(new moveWork(null, null, ltd, null, false), + new ArrayList(); + for (LoadTableDesc ltd : loadTableWork) { + mvTask.add(TaskFactory.get(new MoveWork(null, null, ltd, null, false), conf)); } boolean oneLoadFile = true; - for (loadFileDesc lfd : loadFileWork) { + for (LoadFileDesc lfd : loadFileWork) { if (qb.isCTAS()) { assert (oneLoadFile); // should not have more than 1 load file for // CTAS @@ -5289,7 +5289,7 @@ lfd.setTargetDir(location); oneLoadFile = false; } - mvTask.add(TaskFactory.get(new moveWork(null, null, null, lfd, false), + mvTask.add(TaskFactory.get(new MoveWork(null, null, null, lfd, false), conf)); } } @@ -5367,12 +5367,12 @@ if (qb.isCTAS()) { // generate a DDL task and make it a dependent task of the leaf - createTableDesc crtTblDesc = qb.getTableDesc(); + CreateTableDesc crtTblDesc = qb.getTableDesc(); validateCreateTable(crtTblDesc); // Clear the output for CTAS since we don't need the output from the - // mapredWork, the + // MapredWork, the // DDLWork at the tail of the chain will have the output getOutputs().clear(); @@ -5415,7 +5415,7 @@ // loop over all the tasks recursviely private void generateCountersTask(Task task) { if ((task instanceof MapRedTask) || (task instanceof ExecDriver)) { - HashMap> opMap = ((mapredWork) task + HashMap> opMap = ((MapredWork) task .getWork()).getAliasToWork(); if (!opMap.isEmpty()) { for (Operator op : opMap.values()) { @@ -5423,7 +5423,7 @@ } } - Operator reducer = ((mapredWork) task.getWork()) + Operator reducer = ((MapredWork) task.getWork()) .getReducer(); if (reducer != null) { LOG.info("Generating counters for operator " + reducer); @@ -5465,7 +5465,7 @@ private void breakTaskTree(Task task) { if ((task instanceof MapRedTask) || (task instanceof ExecDriver)) { - HashMap> opMap = ((mapredWork) task + HashMap> opMap = ((MapredWork) task .getWork()).getAliasToWork(); if (!opMap.isEmpty()) { for (Operator op : opMap.values()) { @@ -5508,7 +5508,7 @@ private void setKeyDescTaskTree(Task task) { if ((task instanceof MapRedTask) || (task instanceof ExecDriver)) { - mapredWork work = (mapredWork) task.getWork(); + MapredWork work = (MapredWork) task.getWork(); work.deriveExplainAttributes(); HashMap> opMap = work .getAliasToWork(); @@ -5697,23 +5697,23 @@ * The expression * @param input * The row resolver - * @return exprNodeDesc + * @return ExprNodeDesc * @throws SemanticException */ @SuppressWarnings("nls") - public exprNodeDesc genExprNodeDesc(ASTNode expr, RowResolver input) + public ExprNodeDesc genExprNodeDesc(ASTNode expr, RowResolver input) throws SemanticException { - // We recursively create the exprNodeDesc. Base cases: when we encounter - // a column ref, we convert that into an exprNodeColumnDesc; when we + // We recursively create the ExprNodeDesc. Base cases: when we encounter + // a column ref, we convert that into an ExprNodeColumnDesc; when we // encounter - // a constant, we convert that into an exprNodeConstantDesc. For others we + // a constant, we convert that into an ExprNodeConstantDesc. For others we // just // build the exprNodeFuncDesc with recursively built children. // If the current subExpression is pre-calculated, as in Group-By etc. ColumnInfo colInfo = input.get("", expr.toStringTree()); if (colInfo != null) { - return new exprNodeColumnDesc(colInfo.getType(), colInfo + return new ExprNodeColumnDesc(colInfo.getType(), colInfo .getInternalName(), colInfo.getTabAlias(), colInfo .getIsPartitionCol()); } @@ -5754,7 +5754,7 @@ topNodes.add(expr); HashMap nodeOutputs = new HashMap(); ogw.startWalking(topNodes, nodeOutputs); - exprNodeDesc desc = (exprNodeDesc) nodeOutputs.get(expr); + ExprNodeDesc desc = (ExprNodeDesc) nodeOutputs.get(expr); if (desc == null) { throw new SemanticException(tcCtx.getError()); } @@ -5768,11 +5768,11 @@ if (!(entry.getKey() instanceof ASTNode)) { continue; } - if (!(entry.getValue() instanceof exprNodeColumnDesc)) { + if (!(entry.getValue() instanceof ExprNodeColumnDesc)) { continue; } ASTNode node = (ASTNode) entry.getKey(); - exprNodeColumnDesc columnDesc = (exprNodeColumnDesc) entry.getValue(); + ExprNodeColumnDesc columnDesc = (ExprNodeColumnDesc) entry.getValue(); if ((columnDesc.getTabAlias() == null) || (columnDesc.getTabAlias().length() == 0)) { // These aren't real column refs; instead, they are special @@ -6065,11 +6065,11 @@ } // Handle different types of CREATE TABLE command - createTableDesc crtTblDesc = null; + CreateTableDesc crtTblDesc = null; switch (command_type) { case CREATE_TABLE: // REGULAR CREATE TABLE DDL - crtTblDesc = new createTableDesc(tableName, isExt, cols, partCols, + crtTblDesc = new CreateTableDesc(tableName, isExt, cols, partCols, bucketCols, sortCols, numBuckets, fieldDelim, fieldEscape, collItemDelim, mapKeyDelim, lineDelim, comment, inputFormat, outputFormat, location, serde, mapProp, ifNotExists); @@ -6080,7 +6080,7 @@ break; case CTLT: // create table like - createTableLikeDesc crtTblLikeDesc = new createTableLikeDesc(tableName, + CreateTableLikeDesc crtTblLikeDesc = new CreateTableLikeDesc(tableName, isExt, location, ifNotExists, likeTableName); rootTasks.add(TaskFactory.get(new DDLWork(getInputs(), getOutputs(), crtTblLikeDesc), conf)); @@ -6101,7 +6101,7 @@ throw new SemanticException(e); } - crtTblDesc = new createTableDesc(tableName, isExt, cols, partCols, + crtTblDesc = new CreateTableDesc(tableName, isExt, cols, partCols, bucketCols, sortCols, numBuckets, fieldDelim, fieldEscape, collItemDelim, mapKeyDelim, lineDelim, comment, inputFormat, outputFormat, location, serde, mapProp, ifNotExists); @@ -6145,7 +6145,7 @@ } } - createVwDesc = new createViewDesc(tableName, cols, comment, ifNotExists); + createVwDesc = new CreateViewDesc(tableName, cols, comment, ifNotExists); unparseTranslator.enable(); rootTasks.add(TaskFactory.get(new DDLWork(getInputs(), getOutputs(), createVwDesc), conf)); @@ -6177,7 +6177,7 @@ return colNames; } - private void validateCreateTable(createTableDesc crtTblDesc) + private void validateCreateTable(CreateTableDesc crtTblDesc) throws SemanticException { if ((crtTblDesc.getCols() == null) || (crtTblDesc.getCols().size() == 0)) { Index: ql/src/java/org/apache/hadoop/hive/ql/parse/ParseContext.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/parse/ParseContext.java (revision 901960) +++ ql/src/java/org/apache/hadoop/hive/ql/parse/ParseContext.java (working copy) @@ -34,10 +34,10 @@ import org.apache.hadoop.hive.ql.exec.TableScanOperator; import org.apache.hadoop.hive.ql.metadata.Table; import org.apache.hadoop.hive.ql.optimizer.unionproc.UnionProcContext; -import org.apache.hadoop.hive.ql.plan.exprNodeDesc; -import org.apache.hadoop.hive.ql.plan.loadFileDesc; -import org.apache.hadoop.hive.ql.plan.loadTableDesc; -import org.apache.hadoop.hive.ql.plan.filterDesc.sampleDesc; +import org.apache.hadoop.hive.ql.plan.ExprNodeDesc; +import org.apache.hadoop.hive.ql.plan.LoadFileDesc; +import org.apache.hadoop.hive.ql.plan.LoadTableDesc; +import org.apache.hadoop.hive.ql.plan.FilterDesc.sampleDesc; /** * Parse Context: The current parse context. This is passed to the optimizer @@ -52,15 +52,15 @@ public class ParseContext { private QB qb; private ASTNode ast; - private HashMap opToPartPruner; + private HashMap opToPartPruner; private HashMap opToSamplePruner; private HashMap> topOps; private HashMap> topSelOps; private LinkedHashMap, OpParseContext> opParseCtx; private Map joinContext; private HashMap topToTable; - private List loadTableWork; - private List loadFileWork; + private List loadTableWork; + private List loadFileWork; private Context ctx; private HiveConf conf; private HashMap idToTableNameMap; @@ -120,13 +120,13 @@ HiveConf conf, QB qb, ASTNode ast, - HashMap opToPartPruner, + HashMap opToPartPruner, HashMap> topOps, HashMap> topSelOps, LinkedHashMap, OpParseContext> opParseCtx, Map joinContext, HashMap topToTable, - List loadTableWork, List loadFileWork, + List loadTableWork, List loadFileWork, Context ctx, HashMap idToTableNameMap, int destTableId, UnionProcContext uCtx, List listMapJoinOpsNoReducer, Map> groupOpToInputTables, @@ -218,7 +218,7 @@ /** * @return the opToPartPruner */ - public HashMap getOpToPartPruner() { + public HashMap getOpToPartPruner() { return opToPartPruner; } @@ -227,7 +227,7 @@ * the opToPartPruner to set */ public void setOpToPartPruner( - HashMap opToPartPruner) { + HashMap opToPartPruner) { this.opToPartPruner = opToPartPruner; } @@ -296,7 +296,7 @@ /** * @return the loadTableWork */ - public List getLoadTableWork() { + public List getLoadTableWork() { return loadTableWork; } @@ -304,14 +304,14 @@ * @param loadTableWork * the loadTableWork to set */ - public void setLoadTableWork(List loadTableWork) { + public void setLoadTableWork(List loadTableWork) { this.loadTableWork = loadTableWork; } /** * @return the loadFileWork */ - public List getLoadFileWork() { + public List getLoadFileWork() { return loadFileWork; } @@ -319,7 +319,7 @@ * @param loadFileWork * the loadFileWork to set */ - public void setLoadFileWork(List loadFileWork) { + public void setLoadFileWork(List loadFileWork) { this.loadFileWork = loadFileWork; } Index: ql/src/java/org/apache/hadoop/hive/ql/parse/ExplainSemanticAnalyzer.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/parse/ExplainSemanticAnalyzer.java (revision 901960) +++ ql/src/java/org/apache/hadoop/hive/ql/parse/ExplainSemanticAnalyzer.java (working copy) @@ -26,7 +26,7 @@ import org.apache.hadoop.hive.conf.HiveConf; import org.apache.hadoop.hive.ql.exec.Task; import org.apache.hadoop.hive.ql.exec.TaskFactory; -import org.apache.hadoop.hive.ql.plan.explainWork; +import org.apache.hadoop.hive.ql.plan.ExplainWork; public class ExplainSemanticAnalyzer extends BaseSemanticAnalyzer { @@ -60,7 +60,7 @@ tasks.add(fetchTask); } - rootTasks.add(TaskFactory.get(new explainWork(ctx.getResFile(), tasks, + rootTasks.add(TaskFactory.get(new ExplainWork(ctx.getResFile(), tasks, ((ASTNode) ast.getChild(0)).toStringTree(), extended), conf)); } } Index: ql/src/java/org/apache/hadoop/hive/ql/parse/DDLSemanticAnalyzer.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/parse/DDLSemanticAnalyzer.java (revision 901960) +++ ql/src/java/org/apache/hadoop/hive/ql/parse/DDLSemanticAnalyzer.java (working copy) @@ -42,17 +42,17 @@ import org.apache.hadoop.hive.ql.plan.AddPartitionDesc; import org.apache.hadoop.hive.ql.plan.DDLWork; import org.apache.hadoop.hive.ql.plan.MsckDesc; -import org.apache.hadoop.hive.ql.plan.alterTableDesc; -import org.apache.hadoop.hive.ql.plan.descFunctionDesc; -import org.apache.hadoop.hive.ql.plan.descTableDesc; -import org.apache.hadoop.hive.ql.plan.dropTableDesc; -import org.apache.hadoop.hive.ql.plan.fetchWork; -import org.apache.hadoop.hive.ql.plan.showFunctionsDesc; -import org.apache.hadoop.hive.ql.plan.showPartitionsDesc; -import org.apache.hadoop.hive.ql.plan.showTableStatusDesc; -import org.apache.hadoop.hive.ql.plan.showTablesDesc; -import org.apache.hadoop.hive.ql.plan.tableDesc; -import org.apache.hadoop.hive.ql.plan.alterTableDesc.alterTableTypes; +import org.apache.hadoop.hive.ql.plan.AlterTableDesc; +import org.apache.hadoop.hive.ql.plan.DescFunctionDesc; +import org.apache.hadoop.hive.ql.plan.DescTableDesc; +import org.apache.hadoop.hive.ql.plan.DropTableDesc; +import org.apache.hadoop.hive.ql.plan.FetchWork; +import org.apache.hadoop.hive.ql.plan.ShowFunctionsDesc; +import org.apache.hadoop.hive.ql.plan.ShowPartitionsDesc; +import org.apache.hadoop.hive.ql.plan.ShowTableStatusDesc; +import org.apache.hadoop.hive.ql.plan.ShowTablesDesc; +import org.apache.hadoop.hive.ql.plan.TableDesc; +import org.apache.hadoop.hive.ql.plan.AlterTableDesc.alterTableTypes; import org.apache.hadoop.hive.serde.Constants; import org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe; import org.apache.hadoop.mapred.TextInputFormat; @@ -141,7 +141,7 @@ private void analyzeDropTable(ASTNode ast, boolean expectView) throws SemanticException { String tableName = unescapeIdentifier(ast.getChild(0).getText()); - dropTableDesc dropTblDesc = new dropTableDesc(tableName, expectView); + DropTableDesc dropTblDesc = new DropTableDesc(tableName, expectView); rootTasks.add(TaskFactory.get(new DDLWork(getInputs(), getOutputs(), dropTblDesc), conf)); } @@ -150,7 +150,7 @@ String tableName = unescapeIdentifier(ast.getChild(0).getText()); HashMap mapProp = getProps((ASTNode) (ast.getChild(1)) .getChild(0)); - alterTableDesc alterTblDesc = new alterTableDesc(alterTableTypes.ADDPROPS); + AlterTableDesc alterTblDesc = new AlterTableDesc(alterTableTypes.ADDPROPS); alterTblDesc.setProps(mapProp); alterTblDesc.setOldName(tableName); rootTasks.add(TaskFactory.get(new DDLWork(getInputs(), getOutputs(), @@ -162,7 +162,7 @@ String tableName = unescapeIdentifier(ast.getChild(0).getText()); HashMap mapProp = getProps((ASTNode) (ast.getChild(1)) .getChild(0)); - alterTableDesc alterTblDesc = new alterTableDesc( + AlterTableDesc alterTblDesc = new AlterTableDesc( alterTableTypes.ADDSERDEPROPS); alterTblDesc.setProps(mapProp); alterTblDesc.setOldName(tableName); @@ -173,7 +173,7 @@ private void analyzeAlterTableSerde(ASTNode ast) throws SemanticException { String tableName = unescapeIdentifier(ast.getChild(0).getText()); String serdeName = unescapeSQLString(ast.getChild(1).getText()); - alterTableDesc alterTblDesc = new alterTableDesc(alterTableTypes.ADDSERDE); + AlterTableDesc alterTblDesc = new AlterTableDesc(alterTableTypes.ADDSERDE); if (ast.getChildCount() > 2) { HashMap mapProp = getProps((ASTNode) (ast.getChild(2)) .getChild(0)); @@ -220,7 +220,7 @@ serde = COLUMNAR_SERDE; break; } - alterTableDesc alterTblDesc = new alterTableDesc(tableName, inputFormat, + AlterTableDesc alterTblDesc = new AlterTableDesc(tableName, inputFormat, outputFormat, serde); rootTasks.add(TaskFactory.get(new DDLWork(getInputs(), getOutputs(), alterTblDesc), conf)); @@ -242,7 +242,7 @@ if (numBuckets <= 0) { throw new SemanticException(ErrorMsg.INVALID_BUCKET_NUMBER.getMsg()); } - alterTableDesc alterTblDesc = new alterTableDesc(tableName, numBuckets, + AlterTableDesc alterTblDesc = new AlterTableDesc(tableName, numBuckets, bucketCols, sortCols); rootTasks.add(TaskFactory.get(new DDLWork(getInputs(), getOutputs(), alterTblDesc), conf)); @@ -294,7 +294,7 @@ prop.setProperty("columns", colTypes[0]); prop.setProperty("columns.types", colTypes[1]); - fetchWork fetch = new fetchWork(ctx.getResFile().toString(), new tableDesc( + FetchWork fetch = new FetchWork(ctx.getResFile().toString(), new TableDesc( LazySimpleSerDe.class, TextInputFormat.class, IgnoreKeyTextOutputFormat.class, prop), -1); fetch.setSerializationNullFormat(" "); @@ -314,7 +314,7 @@ } boolean isExt = ast.getChildCount() > 1; - descTableDesc descTblDesc = new descTableDesc(ctx.getResFile(), tableName, + DescTableDesc descTblDesc = new DescTableDesc(ctx.getResFile(), tableName, partSpec, isExt); rootTasks.add(TaskFactory.get(new DDLWork(getInputs(), getOutputs(), descTblDesc), conf)); @@ -334,21 +334,21 @@ } private void analyzeShowPartitions(ASTNode ast) throws SemanticException { - showPartitionsDesc showPartsDesc; + ShowPartitionsDesc showPartsDesc; String tableName = unescapeIdentifier(ast.getChild(0).getText()); - showPartsDesc = new showPartitionsDesc(tableName, ctx.getResFile()); + showPartsDesc = new ShowPartitionsDesc(tableName, ctx.getResFile()); rootTasks.add(TaskFactory.get(new DDLWork(getInputs(), getOutputs(), showPartsDesc), conf)); setFetchTask(createFetchTask(showPartsDesc.getSchema())); } private void analyzeShowTables(ASTNode ast) throws SemanticException { - showTablesDesc showTblsDesc; + ShowTablesDesc showTblsDesc; if (ast.getChildCount() == 1) { String tableNames = unescapeSQLString(ast.getChild(0).getText()); - showTblsDesc = new showTablesDesc(ctx.getResFile(), tableNames); + showTblsDesc = new ShowTablesDesc(ctx.getResFile(), tableNames); } else { - showTblsDesc = new showTablesDesc(ctx.getResFile()); + showTblsDesc = new ShowTablesDesc(ctx.getResFile()); } rootTasks.add(TaskFactory.get(new DDLWork(getInputs(), getOutputs(), showTblsDesc), conf)); @@ -356,7 +356,7 @@ } private void analyzeShowTableStatus(ASTNode ast) throws SemanticException { - showTableStatusDesc showTblStatusDesc; + ShowTableStatusDesc showTblStatusDesc; String tableNames = unescapeIdentifier(ast.getChild(0).getText()); String dbName = MetaStoreUtils.DEFAULT_DATABASE_NAME; int children = ast.getChildCount(); @@ -376,7 +376,7 @@ } } } - showTblStatusDesc = new showTableStatusDesc(ctx.getResFile(), dbName, + showTblStatusDesc = new ShowTableStatusDesc(ctx.getResFile(), dbName, tableNames, partSpec); rootTasks.add(TaskFactory.get(new DDLWork(getInputs(), getOutputs(), showTblStatusDesc), conf)); @@ -393,12 +393,12 @@ * Parsin failed */ private void analyzeShowFunctions(ASTNode ast) throws SemanticException { - showFunctionsDesc showFuncsDesc; + ShowFunctionsDesc showFuncsDesc; if (ast.getChildCount() == 1) { String funcNames = stripQuotes(ast.getChild(0).getText()); - showFuncsDesc = new showFunctionsDesc(ctx.getResFile(), funcNames); + showFuncsDesc = new ShowFunctionsDesc(ctx.getResFile(), funcNames); } else { - showFuncsDesc = new showFunctionsDesc(ctx.getResFile()); + showFuncsDesc = new ShowFunctionsDesc(ctx.getResFile()); } rootTasks.add(TaskFactory.get(new DDLWork(getInputs(), getOutputs(), showFuncsDesc), conf)); @@ -428,7 +428,7 @@ throw new SemanticException("Unexpected Tokens at DESCRIBE FUNCTION"); } - descFunctionDesc descFuncDesc = new descFunctionDesc(ctx.getResFile(), + DescFunctionDesc descFuncDesc = new DescFunctionDesc(ctx.getResFile(), funcName, isExtended); rootTasks.add(TaskFactory.get(new DDLWork(getInputs(), getOutputs(), descFuncDesc), conf)); @@ -436,7 +436,7 @@ } private void analyzeAlterTableRename(ASTNode ast) throws SemanticException { - alterTableDesc alterTblDesc = new alterTableDesc(unescapeIdentifier(ast + AlterTableDesc alterTblDesc = new AlterTableDesc(unescapeIdentifier(ast .getChild(0).getText()), unescapeIdentifier(ast.getChild(1).getText())); rootTasks.add(TaskFactory.get(new DDLWork(getInputs(), getOutputs(), alterTblDesc), conf)); @@ -469,7 +469,7 @@ } } - alterTableDesc alterTblDesc = new alterTableDesc(tblName, + AlterTableDesc alterTblDesc = new AlterTableDesc(tblName, unescapeIdentifier(ast.getChild(1).getText()), unescapeIdentifier(ast .getChild(2).getText()), newType, newComment, first, flagCol); rootTasks.add(TaskFactory.get(new DDLWork(getInputs(), getOutputs(), @@ -480,7 +480,7 @@ alterTableTypes alterType) throws SemanticException { String tblName = unescapeIdentifier(ast.getChild(0).getText()); List newCols = getColumns((ASTNode) ast.getChild(1)); - alterTableDesc alterTblDesc = new alterTableDesc(tblName, newCols, + AlterTableDesc alterTblDesc = new AlterTableDesc(tblName, newCols, alterType); rootTasks.add(TaskFactory.get(new DDLWork(getInputs(), getOutputs(), alterTblDesc), conf)); @@ -490,7 +490,7 @@ String tblName = unescapeIdentifier(ast.getChild(0).getText()); // get table metadata List> partSpecs = getPartitionSpecs(ast); - dropTableDesc dropTblDesc = new dropTableDesc(tblName, partSpecs); + DropTableDesc dropTblDesc = new DropTableDesc(tblName, partSpecs); rootTasks.add(TaskFactory.get(new DDLWork(getInputs(), getOutputs(), dropTblDesc), conf)); } Index: ql/src/java/org/apache/hadoop/hive/ql/Driver.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/Driver.java (revision 901960) +++ ql/src/java/org/apache/hadoop/hive/ql/Driver.java (working copy) @@ -59,7 +59,7 @@ import org.apache.hadoop.hive.ql.parse.ParseUtils; import org.apache.hadoop.hive.ql.parse.SemanticAnalyzerFactory; import org.apache.hadoop.hive.ql.parse.SemanticException; -import org.apache.hadoop.hive.ql.plan.tableDesc; +import org.apache.hadoop.hive.ql.plan.TableDesc; import org.apache.hadoop.hive.ql.processors.CommandProcessor; import org.apache.hadoop.hive.ql.session.SessionState; import org.apache.hadoop.hive.ql.session.SessionState.LogHelper; @@ -152,8 +152,8 @@ } FetchTask ft = (FetchTask) sem.getFetchTask(); - tableDesc td = ft.getTblDesc(); - // partitioned tables don't have tableDesc set on the FetchTask. Instead + TableDesc td = ft.getTblDesc(); + // partitioned tables don't have TableDesc set on the FetchTask. Instead // they have a list of PartitionDesc objects, each with a table desc. // Let's // try to fetch the desc for the first partition and use it's Index: ql/src/java/org/apache/hadoop/hive/ql/udf/UDFYear.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/udf/UDFYear.java (revision 901960) +++ ql/src/java/org/apache/hadoop/hive/ql/udf/UDFYear.java (working copy) @@ -26,11 +26,11 @@ import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.hive.ql.exec.UDF; -import org.apache.hadoop.hive.ql.exec.description; +import org.apache.hadoop.hive.ql.exec.Description; import org.apache.hadoop.io.IntWritable; import org.apache.hadoop.io.Text; -@description(name = "year", value = "_FUNC_(date) - Returns the year of date", extended = "date is a string in the format of 'yyyy-MM-dd HH:mm:ss' or " +@Description(name = "year", value = "_FUNC_(date) - Returns the year of date", extended = "date is a string in the format of 'yyyy-MM-dd HH:mm:ss' or " + "'yyyy-MM-dd'.\n" + "Example:\n " + " > SELECT _FUNC_('2009-30-07', 1) FROM src LIMIT 1;\n" + " 2009") Index: ql/src/java/org/apache/hadoop/hive/ql/udf/UDFWeekOfYear.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/udf/UDFWeekOfYear.java (revision 901960) +++ ql/src/java/org/apache/hadoop/hive/ql/udf/UDFWeekOfYear.java (working copy) @@ -26,11 +26,11 @@ import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.hive.ql.exec.UDF; -import org.apache.hadoop.hive.ql.exec.description; +import org.apache.hadoop.hive.ql.exec.Description; import org.apache.hadoop.io.IntWritable; import org.apache.hadoop.io.Text; -@description(name = "yearweek", value = "_FUNC_(date) - Returns the week of the year of the given date. A week " +@Description(name = "yearweek", value = "_FUNC_(date) - Returns the week of the year of the given date. A week " + "is considered to start on a Monday and week 1 is the first week with >3 days.", extended = "Examples:\n" + " > SELECT _FUNC_('2008-02-20') FROM src LIMIT 1;\n" + " 8\n" Index: ql/src/java/org/apache/hadoop/hive/ql/udf/UDFOPPositive.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/udf/UDFOPPositive.java (revision 901960) +++ ql/src/java/org/apache/hadoop/hive/ql/udf/UDFOPPositive.java (working copy) @@ -20,7 +20,7 @@ import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; -import org.apache.hadoop.hive.ql.exec.description; +import org.apache.hadoop.hive.ql.exec.Description; import org.apache.hadoop.hive.serde2.io.ByteWritable; import org.apache.hadoop.hive.serde2.io.DoubleWritable; import org.apache.hadoop.hive.serde2.io.ShortWritable; @@ -28,7 +28,7 @@ import org.apache.hadoop.io.IntWritable; import org.apache.hadoop.io.LongWritable; -@description(name = "positive", value = "_FUNC_ a - Returns a") +@Description(name = "positive", value = "_FUNC_ a - Returns a") public class UDFOPPositive extends UDFBaseNumericUnaryOp { private static Log LOG = LogFactory.getLog(UDFOPPositive.class.getName()); Index: ql/src/java/org/apache/hadoop/hive/ql/udf/UDFLog2.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/udf/UDFLog2.java (revision 901960) +++ ql/src/java/org/apache/hadoop/hive/ql/udf/UDFLog2.java (working copy) @@ -21,10 +21,10 @@ import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.hive.ql.exec.UDF; -import org.apache.hadoop.hive.ql.exec.description; +import org.apache.hadoop.hive.ql.exec.Description; import org.apache.hadoop.hive.serde2.io.DoubleWritable; -@description(name = "log2", value = "_FUNC_(x) - Returns the logarithm of x with base 2", extended = "Example:\n" +@Description(name = "log2", value = "_FUNC_(x) - Returns the logarithm of x with base 2", extended = "Example:\n" + " > SELECT _FUNC_(2) FROM src LIMIT 1;\n" + " 1") public class UDFLog2 extends UDF { Index: ql/src/java/org/apache/hadoop/hive/ql/udf/UDFOPGreaterThan.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/udf/UDFOPGreaterThan.java (revision 901960) +++ ql/src/java/org/apache/hadoop/hive/ql/udf/UDFOPGreaterThan.java (working copy) @@ -20,7 +20,7 @@ import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; -import org.apache.hadoop.hive.ql.exec.description; +import org.apache.hadoop.hive.ql.exec.Description; import org.apache.hadoop.hive.serde2.io.ByteWritable; import org.apache.hadoop.hive.serde2.io.DoubleWritable; import org.apache.hadoop.hive.serde2.io.ShortWritable; @@ -31,7 +31,7 @@ import org.apache.hadoop.io.LongWritable; import org.apache.hadoop.io.Text; -@description(name = ">", value = "a _FUNC_ b - Returns TRUE if a is greater than b") +@Description(name = ">", value = "a _FUNC_ b - Returns TRUE if a is greater than b") public class UDFOPGreaterThan extends UDFBaseCompare { private static Log LOG = LogFactory.getLog(UDFOPGreaterThan.class.getName()); Index: ql/src/java/org/apache/hadoop/hive/ql/udf/UDFAbs.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/udf/UDFAbs.java (revision 901960) +++ ql/src/java/org/apache/hadoop/hive/ql/udf/UDFAbs.java (working copy) @@ -19,12 +19,12 @@ package org.apache.hadoop.hive.ql.udf; import org.apache.hadoop.hive.ql.exec.UDF; -import org.apache.hadoop.hive.ql.exec.description; +import org.apache.hadoop.hive.ql.exec.Description; import org.apache.hadoop.hive.serde2.io.DoubleWritable; import org.apache.hadoop.io.IntWritable; import org.apache.hadoop.io.LongWritable; -@description(name = "abs", value = "_FUNC_(x) - returns the absolute value of x", extended = "Example:\n" +@Description(name = "abs", value = "_FUNC_(x) - returns the absolute value of x", extended = "Example:\n" + " > SELECT _FUNC_(0) FROM src LIMIT 1;\n" + " 0\n" + " > SELECT _FUNC_(-5) FROM src LIMIT 1;\n" + " 5") Index: ql/src/java/org/apache/hadoop/hive/ql/udf/UDFConv.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/udf/UDFConv.java (revision 901960) +++ ql/src/java/org/apache/hadoop/hive/ql/udf/UDFConv.java (working copy) @@ -20,11 +20,11 @@ import java.util.Arrays; import org.apache.hadoop.hive.ql.exec.UDF; -import org.apache.hadoop.hive.ql.exec.description; +import org.apache.hadoop.hive.ql.exec.Description; import org.apache.hadoop.io.IntWritable; import org.apache.hadoop.io.Text; -@description(name = "conv", value = "_FUNC_(num, from_base, to_base) - convert num from from_base to" +@Description(name = "conv", value = "_FUNC_(num, from_base, to_base) - convert num from from_base to" + " to_base", extended = "If to_base is negative, treat num as a signed integer," + "otherwise, treat it as an unsigned integer.\n" + "Example:\n" @@ -190,4 +190,4 @@ result.set(value, first, value.length - first); return result; } -} \ No newline at end of file +} Index: ql/src/java/org/apache/hadoop/hive/ql/udf/UDFBin.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/udf/UDFBin.java (revision 901960) +++ ql/src/java/org/apache/hadoop/hive/ql/udf/UDFBin.java (working copy) @@ -19,11 +19,11 @@ package org.apache.hadoop.hive.ql.udf; import org.apache.hadoop.hive.ql.exec.UDF; -import org.apache.hadoop.hive.ql.exec.description; +import org.apache.hadoop.hive.ql.exec.Description; import org.apache.hadoop.io.LongWritable; import org.apache.hadoop.io.Text; -@description(name = "bin", value = "_FUNC_(n) - returns n in binary", extended = "n is a BIGINT. Returns NULL if n is NULL.\n" +@Description(name = "bin", value = "_FUNC_(n) - returns n in binary", extended = "n is a BIGINT. Returns NULL if n is NULL.\n" + "Example:\n" + " > SELECT _FUNC_(13) FROM src LIMIT 1\n" + " '1101'") public class UDFBin extends UDF { private final Text result = new Text(); Index: ql/src/java/org/apache/hadoop/hive/ql/udf/UDFOPEqualOrLessThan.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/udf/UDFOPEqualOrLessThan.java (revision 901960) +++ ql/src/java/org/apache/hadoop/hive/ql/udf/UDFOPEqualOrLessThan.java (working copy) @@ -20,7 +20,7 @@ import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; -import org.apache.hadoop.hive.ql.exec.description; +import org.apache.hadoop.hive.ql.exec.Description; import org.apache.hadoop.hive.serde2.io.ByteWritable; import org.apache.hadoop.hive.serde2.io.DoubleWritable; import org.apache.hadoop.hive.serde2.io.ShortWritable; @@ -31,7 +31,7 @@ import org.apache.hadoop.io.LongWritable; import org.apache.hadoop.io.Text; -@description(name = "<=", value = "a _FUNC_ b - Returns TRUE if b is not greater than a") +@Description(name = "<=", value = "a _FUNC_ b - Returns TRUE if b is not greater than a") public class UDFOPEqualOrLessThan extends UDFBaseCompare { private static Log LOG = LogFactory.getLog(UDFOPEqualOrLessThan.class Index: ql/src/java/org/apache/hadoop/hive/ql/udf/UDFDate.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/udf/UDFDate.java (revision 901960) +++ ql/src/java/org/apache/hadoop/hive/ql/udf/UDFDate.java (working copy) @@ -25,10 +25,10 @@ import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.hive.ql.exec.UDF; -import org.apache.hadoop.hive.ql.exec.description; +import org.apache.hadoop.hive.ql.exec.Description; import org.apache.hadoop.io.Text; -@description(name = "to_date", value = "_FUNC_(expr) - Extracts the date part of the date or datetime " +@Description(name = "to_date", value = "_FUNC_(expr) - Extracts the date part of the date or datetime " + "expression expr", extended = "Example:\n " + " > SELECT _FUNC_('2009-30-07 04:17:52') FROM src LIMIT 1;\n" + " '2009-30-07'") Index: ql/src/java/org/apache/hadoop/hive/ql/udf/UDFOPBitOr.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/udf/UDFOPBitOr.java (revision 901960) +++ ql/src/java/org/apache/hadoop/hive/ql/udf/UDFOPBitOr.java (working copy) @@ -20,13 +20,13 @@ import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; -import org.apache.hadoop.hive.ql.exec.description; +import org.apache.hadoop.hive.ql.exec.Description; import org.apache.hadoop.hive.serde2.io.ByteWritable; import org.apache.hadoop.hive.serde2.io.ShortWritable; import org.apache.hadoop.io.IntWritable; import org.apache.hadoop.io.LongWritable; -@description(name = "|", value = "a _FUNC_ b - Bitwise or", extended = "Example:\n" +@Description(name = "|", value = "a _FUNC_ b - Bitwise or", extended = "Example:\n" + " > SELECT 3 _FUNC_ 5 FROM src LIMIT 1;\n" + " 7") public class UDFOPBitOr extends UDFBaseBitOP { Index: ql/src/java/org/apache/hadoop/hive/ql/udf/UDFOPMultiply.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/udf/UDFOPMultiply.java (revision 901960) +++ ql/src/java/org/apache/hadoop/hive/ql/udf/UDFOPMultiply.java (working copy) @@ -20,7 +20,7 @@ import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; -import org.apache.hadoop.hive.ql.exec.description; +import org.apache.hadoop.hive.ql.exec.Description; import org.apache.hadoop.hive.serde2.io.ByteWritable; import org.apache.hadoop.hive.serde2.io.DoubleWritable; import org.apache.hadoop.hive.serde2.io.ShortWritable; @@ -28,7 +28,7 @@ import org.apache.hadoop.io.IntWritable; import org.apache.hadoop.io.LongWritable; -@description(name = "*", value = "a _FUNC_ b - Multiplies a by b") +@Description(name = "*", value = "a _FUNC_ b - Multiplies a by b") public class UDFOPMultiply extends UDFBaseNumericOp { private static Log LOG = LogFactory Index: ql/src/java/org/apache/hadoop/hive/ql/udf/UDFSubstr.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/udf/UDFSubstr.java (revision 901960) +++ ql/src/java/org/apache/hadoop/hive/ql/udf/UDFSubstr.java (working copy) @@ -19,11 +19,11 @@ package org.apache.hadoop.hive.ql.udf; import org.apache.hadoop.hive.ql.exec.UDF; -import org.apache.hadoop.hive.ql.exec.description; +import org.apache.hadoop.hive.ql.exec.Description; import org.apache.hadoop.io.IntWritable; import org.apache.hadoop.io.Text; -@description(name = "substr,substring", value = "_FUNC_(str, pos[, len]) - returns the substring of str that" +@Description(name = "substr,substring", value = "_FUNC_(str, pos[, len]) - returns the substring of str that" + " starts at pos and is of length len", extended = "pos is a 1-based index. If pos<0 the starting position is" + " determined by counting backwards from the end of str.\n" + "Example:\n " Index: ql/src/java/org/apache/hadoop/hive/ql/udf/UDFCos.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/udf/UDFCos.java (revision 901960) +++ ql/src/java/org/apache/hadoop/hive/ql/udf/UDFCos.java (working copy) @@ -21,10 +21,10 @@ import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.hive.ql.exec.UDF; -import org.apache.hadoop.hive.ql.exec.description; +import org.apache.hadoop.hive.ql.exec.Description; import org.apache.hadoop.hive.serde2.io.DoubleWritable; -@description(name = "cos", value = "_FUNC_(x) - returns the cosine of x (x is in radians)", extended = "Example:\n " +@Description(name = "cos", value = "_FUNC_(x) - returns the cosine of x (x is in radians)", extended = "Example:\n " + " > SELECT _FUNC_(0) FROM src LIMIT 1;\n" + " 1") public class UDFCos extends UDF { Index: ql/src/java/org/apache/hadoop/hive/ql/udf/UDFHex.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/udf/UDFHex.java (revision 901960) +++ ql/src/java/org/apache/hadoop/hive/ql/udf/UDFHex.java (working copy) @@ -19,12 +19,12 @@ package org.apache.hadoop.hive.ql.udf; import org.apache.hadoop.hive.ql.exec.UDF; -import org.apache.hadoop.hive.ql.exec.description; +import org.apache.hadoop.hive.ql.exec.Description; import org.apache.hadoop.io.IntWritable; import org.apache.hadoop.io.LongWritable; import org.apache.hadoop.io.Text; -@description(name = "hex", value = "_FUNC_(n or str) - Convert the argument to hexadecimal ", extended = "If the argument is a string, returns two hex digits for each " +@Description(name = "hex", value = "_FUNC_(n or str) - Convert the argument to hexadecimal ", extended = "If the argument is a string, returns two hex digits for each " + "character in the string.\n" + "If the argument is a number, returns the hexadecimal representation.\n" + "Example:\n" Index: ql/src/java/org/apache/hadoop/hive/ql/udf/UDFRound.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/udf/UDFRound.java (revision 901960) +++ ql/src/java/org/apache/hadoop/hive/ql/udf/UDFRound.java (working copy) @@ -22,12 +22,12 @@ import java.math.RoundingMode; import org.apache.hadoop.hive.ql.exec.UDF; -import org.apache.hadoop.hive.ql.exec.description; +import org.apache.hadoop.hive.ql.exec.Description; import org.apache.hadoop.hive.serde2.io.DoubleWritable; import org.apache.hadoop.io.IntWritable; import org.apache.hadoop.io.LongWritable; -@description(name = "round", value = "_FUNC_(x[, d]) - round x to d decimal places", extended = "Example:\n" +@Description(name = "round", value = "_FUNC_(x[, d]) - round x to d decimal places", extended = "Example:\n" + " > SELECT _FUNC_(12.3456, 1) FROM src LIMIT 1;\n" + " 12.3'") public class UDFRound extends UDF { Index: ql/src/java/org/apache/hadoop/hive/ql/udf/UDFLower.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/udf/UDFLower.java (revision 901960) +++ ql/src/java/org/apache/hadoop/hive/ql/udf/UDFLower.java (working copy) @@ -19,10 +19,10 @@ package org.apache.hadoop.hive.ql.udf; import org.apache.hadoop.hive.ql.exec.UDF; -import org.apache.hadoop.hive.ql.exec.description; +import org.apache.hadoop.hive.ql.exec.Description; import org.apache.hadoop.io.Text; -@description(name = "lower,lcase", value = "_FUNC_(str) - Returns str with all characters changed to lowercase", extended = "Example:\n" +@Description(name = "lower,lcase", value = "_FUNC_(str) - Returns str with all characters changed to lowercase", extended = "Example:\n" + " > SELECT _FUNC_('Facebook') FROM src LIMIT 1;\n" + " 'facebook'") public class UDFLower extends UDF { Index: ql/src/java/org/apache/hadoop/hive/ql/udf/UDFSqrt.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/udf/UDFSqrt.java (revision 901960) +++ ql/src/java/org/apache/hadoop/hive/ql/udf/UDFSqrt.java (working copy) @@ -21,13 +21,13 @@ import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.hive.ql.exec.UDF; -import org.apache.hadoop.hive.ql.exec.description; +import org.apache.hadoop.hive.ql.exec.Description; import org.apache.hadoop.hive.serde2.io.DoubleWritable; /** * Implementation of the SQRT UDF found in many databases. */ -@description(name = "sqrt", value = "_FUNC_(x) - returns the square root of x", extended = "Example:\n " +@Description(name = "sqrt", value = "_FUNC_(x) - returns the square root of x", extended = "Example:\n " + " > SELECT _FUNC_(4) FROM src LIMIT 1;\n" + " 2") public class UDFSqrt extends UDF { Index: ql/src/java/org/apache/hadoop/hive/ql/udf/UDFRegExp.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/udf/UDFRegExp.java (revision 901960) +++ ql/src/java/org/apache/hadoop/hive/ql/udf/UDFRegExp.java (working copy) @@ -24,11 +24,11 @@ import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.hive.ql.exec.UDF; -import org.apache.hadoop.hive.ql.exec.description; +import org.apache.hadoop.hive.ql.exec.Description; import org.apache.hadoop.io.BooleanWritable; import org.apache.hadoop.io.Text; -@description(name = "rlike,regexp", value = "str _FUNC_ regexp - Returns true if str matches regexp and " +@Description(name = "rlike,regexp", value = "str _FUNC_ regexp - Returns true if str matches regexp and " + "false otherwise", extended = "Example:\n" + " > SELECT 'fb' _FUNC_ '.*' FROM src LIMIT 1;\n" + " true") public class UDFRegExp extends UDF { Index: ql/src/java/org/apache/hadoop/hive/ql/udf/UDFUpper.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/udf/UDFUpper.java (revision 901960) +++ ql/src/java/org/apache/hadoop/hive/ql/udf/UDFUpper.java (working copy) @@ -19,10 +19,10 @@ package org.apache.hadoop.hive.ql.udf; import org.apache.hadoop.hive.ql.exec.UDF; -import org.apache.hadoop.hive.ql.exec.description; +import org.apache.hadoop.hive.ql.exec.Description; import org.apache.hadoop.io.Text; -@description(name = "upper,ucase", value = "_FUNC_(str) - Returns str with all characters changed to uppercase", extended = "Example:\n" +@Description(name = "upper,ucase", value = "_FUNC_(str) - Returns str with all characters changed to uppercase", extended = "Example:\n" + " > SELECT _FUNC_('Facebook') FROM src LIMIT 1;\n" + " 'FACEBOOK'") public class UDFUpper extends UDF { Index: ql/src/java/org/apache/hadoop/hive/ql/udf/UDFPower.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/udf/UDFPower.java (revision 901960) +++ ql/src/java/org/apache/hadoop/hive/ql/udf/UDFPower.java (working copy) @@ -21,10 +21,10 @@ import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.hive.ql.exec.UDF; -import org.apache.hadoop.hive.ql.exec.description; +import org.apache.hadoop.hive.ql.exec.Description; import org.apache.hadoop.hive.serde2.io.DoubleWritable; -@description(name = "power,pow", value = "_FUNC_(x1, x2) - raise x1 to the power of x2", extended = "Example:\n" +@Description(name = "power,pow", value = "_FUNC_(x1, x2) - raise x1 to the power of x2", extended = "Example:\n" + " > SELECT _FUNC_(2, 3) FROM src LIMIT 1;\n" + " 8") public class UDFPower extends UDF { Index: ql/src/java/org/apache/hadoop/hive/ql/udf/UDFOPEqualOrGreaterThan.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/udf/UDFOPEqualOrGreaterThan.java (revision 901960) +++ ql/src/java/org/apache/hadoop/hive/ql/udf/UDFOPEqualOrGreaterThan.java (working copy) @@ -20,7 +20,7 @@ import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; -import org.apache.hadoop.hive.ql.exec.description; +import org.apache.hadoop.hive.ql.exec.Description; import org.apache.hadoop.hive.serde2.io.ByteWritable; import org.apache.hadoop.hive.serde2.io.DoubleWritable; import org.apache.hadoop.hive.serde2.io.ShortWritable; @@ -31,7 +31,7 @@ import org.apache.hadoop.io.LongWritable; import org.apache.hadoop.io.Text; -@description(name = ">=", value = "a _FUNC_ b - Returns TRUE if b is not smaller than a") +@Description(name = ">=", value = "a _FUNC_ b - Returns TRUE if b is not smaller than a") public class UDFOPEqualOrGreaterThan extends UDFBaseCompare { private static Log LOG = LogFactory.getLog(UDFOPEqualOrGreaterThan.class Index: ql/src/java/org/apache/hadoop/hive/ql/udf/UDFOPBitNot.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/udf/UDFOPBitNot.java (revision 901960) +++ ql/src/java/org/apache/hadoop/hive/ql/udf/UDFOPBitNot.java (working copy) @@ -20,13 +20,13 @@ import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; -import org.apache.hadoop.hive.ql.exec.description; +import org.apache.hadoop.hive.ql.exec.Description; import org.apache.hadoop.hive.serde2.io.ByteWritable; import org.apache.hadoop.hive.serde2.io.ShortWritable; import org.apache.hadoop.io.IntWritable; import org.apache.hadoop.io.LongWritable; -@description(name = "~", value = "_FUNC_ n - Bitwise not", extended = "Example:\n" +@Description(name = "~", value = "_FUNC_ n - Bitwise not", extended = "Example:\n" + " > SELECT _FUNC_ 0 FROM src LIMIT 1;\n" + " -1") public class UDFOPBitNot extends UDFBaseBitOP { Index: ql/src/java/org/apache/hadoop/hive/ql/udf/UDFOPNot.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/udf/UDFOPNot.java (revision 901960) +++ ql/src/java/org/apache/hadoop/hive/ql/udf/UDFOPNot.java (working copy) @@ -21,10 +21,10 @@ import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.hive.ql.exec.UDF; -import org.apache.hadoop.hive.ql.exec.description; +import org.apache.hadoop.hive.ql.exec.Description; import org.apache.hadoop.io.BooleanWritable; -@description(name = "not,!", value = "_FUNC_ a - Logical not") +@Description(name = "not,!", value = "_FUNC_ a - Logical not") public class UDFOPNot extends UDF { private static Log LOG = LogFactory Index: ql/src/java/org/apache/hadoop/hive/ql/udf/UDFPosMod.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/udf/UDFPosMod.java (revision 901960) +++ ql/src/java/org/apache/hadoop/hive/ql/udf/UDFPosMod.java (working copy) @@ -20,7 +20,7 @@ import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; -import org.apache.hadoop.hive.ql.exec.description; +import org.apache.hadoop.hive.ql.exec.Description; import org.apache.hadoop.hive.serde2.io.ByteWritable; import org.apache.hadoop.hive.serde2.io.DoubleWritable; import org.apache.hadoop.hive.serde2.io.ShortWritable; @@ -33,7 +33,7 @@ * {org.apache.hadoop.hive.ql.udf.UDFOPMod} See * {org.apache.hadoop.hive.ql.exec.FunctionRegistry} */ -@description(name = "pmod", value = "a _FUNC_ b - Compute the positive modulo") +@Description(name = "pmod", value = "a _FUNC_ b - Compute the positive modulo") public class UDFPosMod extends UDFBaseNumericOp { private static Log LOG = LogFactory Index: ql/src/java/org/apache/hadoop/hive/ql/udf/UDFMinute.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/udf/UDFMinute.java (revision 901960) +++ ql/src/java/org/apache/hadoop/hive/ql/udf/UDFMinute.java (working copy) @@ -26,11 +26,11 @@ import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.hive.ql.exec.UDF; -import org.apache.hadoop.hive.ql.exec.description; +import org.apache.hadoop.hive.ql.exec.Description; import org.apache.hadoop.io.IntWritable; import org.apache.hadoop.io.Text; -@description(name = "minute", value = "_FUNC_(date) - Returns the minute of date", extended = "date is a string in the format of 'yyyy-MM-dd HH:mm:ss' or " +@Description(name = "minute", value = "_FUNC_(date) - Returns the minute of date", extended = "date is a string in the format of 'yyyy-MM-dd HH:mm:ss' or " + "'HH:mm:ss'.\n" + "Example:\n " + " > SELECT _FUNC_('2009-07-30 12:58:59') FROM src LIMIT 1;\n" Index: ql/src/java/org/apache/hadoop/hive/ql/udf/UDFOPNegative.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/udf/UDFOPNegative.java (revision 901960) +++ ql/src/java/org/apache/hadoop/hive/ql/udf/UDFOPNegative.java (working copy) @@ -20,7 +20,7 @@ import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; -import org.apache.hadoop.hive.ql.exec.description; +import org.apache.hadoop.hive.ql.exec.Description; import org.apache.hadoop.hive.serde2.io.ByteWritable; import org.apache.hadoop.hive.serde2.io.DoubleWritable; import org.apache.hadoop.hive.serde2.io.ShortWritable; @@ -28,7 +28,7 @@ import org.apache.hadoop.io.IntWritable; import org.apache.hadoop.io.LongWritable; -@description(name = "-", value = "_FUNC_ a - Returns -a") +@Description(name = "-", value = "_FUNC_ a - Returns -a") public class UDFOPNegative extends UDFBaseNumericUnaryOp { private static Log LOG = LogFactory.getLog(UDFOPNegative.class.getName()); Index: ql/src/java/org/apache/hadoop/hive/ql/udf/UDFOPDivide.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/udf/UDFOPDivide.java (revision 901960) +++ ql/src/java/org/apache/hadoop/hive/ql/udf/UDFOPDivide.java (working copy) @@ -21,10 +21,10 @@ import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.hive.ql.exec.UDF; -import org.apache.hadoop.hive.ql.exec.description; +import org.apache.hadoop.hive.ql.exec.Description; import org.apache.hadoop.hive.serde2.io.DoubleWritable; -@description(name = "/", value = "a _FUNC_ b - Divide a by b", extended = "Example:\n" +@Description(name = "/", value = "a _FUNC_ b - Divide a by b", extended = "Example:\n" + " > SELECT 3 _FUNC_ 2 FROM src LIMIT 1;\n" + " 1.5") /** * Note that in SQL, the return type of divide is not necessarily the same Index: ql/src/java/org/apache/hadoop/hive/ql/udf/UDFDateDiff.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/udf/UDFDateDiff.java (revision 901960) +++ ql/src/java/org/apache/hadoop/hive/ql/udf/UDFDateDiff.java (working copy) @@ -25,11 +25,11 @@ import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.hive.ql.exec.UDF; -import org.apache.hadoop.hive.ql.exec.description; +import org.apache.hadoop.hive.ql.exec.Description; import org.apache.hadoop.io.IntWritable; import org.apache.hadoop.io.Text; -@description(name = "datediff", value = "_FUNC_(date1, date2) - Returns the number of days between date1 " +@Description(name = "datediff", value = "_FUNC_(date1, date2) - Returns the number of days between date1 " + "and date2", extended = "date1 and date2 are strings in the format " + "'yyyy-MM-dd HH:mm:ss' or 'yyyy-MM-dd'. The time parts are ignored." + "If date1 is earlier than date2, the result is negative.\n" Index: ql/src/java/org/apache/hadoop/hive/ql/udf/UDFOPBitXor.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/udf/UDFOPBitXor.java (revision 901960) +++ ql/src/java/org/apache/hadoop/hive/ql/udf/UDFOPBitXor.java (working copy) @@ -20,13 +20,13 @@ import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; -import org.apache.hadoop.hive.ql.exec.description; +import org.apache.hadoop.hive.ql.exec.Description; import org.apache.hadoop.hive.serde2.io.ByteWritable; import org.apache.hadoop.hive.serde2.io.ShortWritable; import org.apache.hadoop.io.IntWritable; import org.apache.hadoop.io.LongWritable; -@description(name = "^", value = "a _FUNC_ b - Bitwise exclusive or", extended = "Example:\n" +@Description(name = "^", value = "a _FUNC_ b - Bitwise exclusive or", extended = "Example:\n" + " > SELECT 3 _FUNC_ 5 FROM src LIMIT 1;\n" + " 2") public class UDFOPBitXor extends UDFBaseBitOP { Index: ql/src/java/org/apache/hadoop/hive/ql/udf/UDFOPEqual.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/udf/UDFOPEqual.java (revision 901960) +++ ql/src/java/org/apache/hadoop/hive/ql/udf/UDFOPEqual.java (working copy) @@ -20,7 +20,7 @@ import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; -import org.apache.hadoop.hive.ql.exec.description; +import org.apache.hadoop.hive.ql.exec.Description; import org.apache.hadoop.hive.serde2.io.ByteWritable; import org.apache.hadoop.hive.serde2.io.DoubleWritable; import org.apache.hadoop.hive.serde2.io.ShortWritable; @@ -35,7 +35,7 @@ * performance; otherwise a single method that takes (Number a, Number b) and * use a.doubleValue() == b.doubleValue() is enough. */ -@description(name = "=,==", value = "a _FUNC_ b - Returns TRUE if a equals b and false otherwise") +@Description(name = "=,==", value = "a _FUNC_ b - Returns TRUE if a equals b and false otherwise") public class UDFOPEqual extends UDFBaseCompare { private static Log LOG = LogFactory.getLog(UDFOPEqual.class.getName()); Index: ql/src/java/org/apache/hadoop/hive/ql/udf/UDFConcat.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/udf/UDFConcat.java (revision 901960) +++ ql/src/java/org/apache/hadoop/hive/ql/udf/UDFConcat.java (working copy) @@ -19,10 +19,10 @@ package org.apache.hadoop.hive.ql.udf; import org.apache.hadoop.hive.ql.exec.UDF; -import org.apache.hadoop.hive.ql.exec.description; +import org.apache.hadoop.hive.ql.exec.Description; import org.apache.hadoop.io.Text; -@description(name = "concat", value = "_FUNC_(str1, str2, ... strN) - returns the concatenation of str1, str2, ... strN", extended = "Returns NULL if any argument is NULL.\n" +@Description(name = "concat", value = "_FUNC_(str1, str2, ... strN) - returns the concatenation of str1, str2, ... strN", extended = "Returns NULL if any argument is NULL.\n" + "Example:\n" + " > SELECT _FUNC_('abc', 'def') FROM src LIMIT 1;\n" + " 'abcdef'") Index: ql/src/java/org/apache/hadoop/hive/ql/udf/UDFSecond.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/udf/UDFSecond.java (revision 901960) +++ ql/src/java/org/apache/hadoop/hive/ql/udf/UDFSecond.java (working copy) @@ -26,11 +26,11 @@ import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.hive.ql.exec.UDF; -import org.apache.hadoop.hive.ql.exec.description; +import org.apache.hadoop.hive.ql.exec.Description; import org.apache.hadoop.io.IntWritable; import org.apache.hadoop.io.Text; -@description(name = "second", value = "_FUNC_(date) - Returns the second of date", extended = "date is a string in the format of 'yyyy-MM-dd HH:mm:ss' or " +@Description(name = "second", value = "_FUNC_(date) - Returns the second of date", extended = "date is a string in the format of 'yyyy-MM-dd HH:mm:ss' or " + "'HH:mm:ss'.\n" + "Example:\n " + " > SELECT _FUNC_('2009-07-30 12:58:59') FROM src LIMIT 1;\n" Index: ql/src/java/org/apache/hadoop/hive/ql/udf/UDFCeil.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/udf/UDFCeil.java (revision 901960) +++ ql/src/java/org/apache/hadoop/hive/ql/udf/UDFCeil.java (working copy) @@ -21,11 +21,11 @@ import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.hive.ql.exec.UDF; -import org.apache.hadoop.hive.ql.exec.description; +import org.apache.hadoop.hive.ql.exec.Description; import org.apache.hadoop.hive.serde2.io.DoubleWritable; import org.apache.hadoop.io.LongWritable; -@description(name = "ceil,ceiling", value = "_FUNC_(x) - Find the smallest integer not smaller than x", extended = "Example:\n" +@Description(name = "ceil,ceiling", value = "_FUNC_(x) - Find the smallest integer not smaller than x", extended = "Example:\n" + " > SELECT _FUNC_(-0.1) FROM src LIMIT 1;\n" + " 0\n" + " > SELECT _FUNC_(5) FROM src LIMIT 1;\n" + " 5") Index: ql/src/java/org/apache/hadoop/hive/ql/udf/UDFOPMod.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/udf/UDFOPMod.java (revision 901960) +++ ql/src/java/org/apache/hadoop/hive/ql/udf/UDFOPMod.java (working copy) @@ -20,7 +20,7 @@ import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; -import org.apache.hadoop.hive.ql.exec.description; +import org.apache.hadoop.hive.ql.exec.Description; import org.apache.hadoop.hive.serde2.io.ByteWritable; import org.apache.hadoop.hive.serde2.io.DoubleWritable; import org.apache.hadoop.hive.serde2.io.ShortWritable; @@ -28,7 +28,7 @@ import org.apache.hadoop.io.IntWritable; import org.apache.hadoop.io.LongWritable; -@description(name = "%", value = "a _FUNC_ b - Returns the remainder when dividing a by b") +@Description(name = "%", value = "a _FUNC_ b - Returns the remainder when dividing a by b") public class UDFOPMod extends UDFBaseNumericOp { private static Log LOG = LogFactory Index: ql/src/java/org/apache/hadoop/hive/ql/udf/UDFOPLongDivide.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/udf/UDFOPLongDivide.java (revision 901960) +++ ql/src/java/org/apache/hadoop/hive/ql/udf/UDFOPLongDivide.java (working copy) @@ -21,10 +21,10 @@ import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.hive.ql.exec.UDF; -import org.apache.hadoop.hive.ql.exec.description; +import org.apache.hadoop.hive.ql.exec.Description; import org.apache.hadoop.io.LongWritable; -@description(name = "div", value = "a _FUNC_ b - Divide a by b rounded to the long integer", extended = "Example:\n" +@Description(name = "div", value = "a _FUNC_ b - Divide a by b rounded to the long integer", extended = "Example:\n" + " > SELECT 3 _FUNC_ 2 FROM src LIMIT 1;\n" + " 1") public class UDFOPLongDivide extends UDF { Index: ql/src/java/org/apache/hadoop/hive/ql/udf/UDAFMin.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/udf/UDAFMin.java (revision 901960) +++ ql/src/java/org/apache/hadoop/hive/ql/udf/UDAFMin.java (working copy) @@ -20,7 +20,7 @@ import org.apache.hadoop.hive.ql.exec.UDAF; import org.apache.hadoop.hive.ql.exec.UDAFEvaluator; -import org.apache.hadoop.hive.ql.exec.description; +import org.apache.hadoop.hive.ql.exec.Description; import org.apache.hadoop.hive.serde2.io.DoubleWritable; import org.apache.hadoop.hive.serde2.io.ShortWritable; import org.apache.hadoop.hive.shims.ShimLoader; @@ -29,7 +29,7 @@ import org.apache.hadoop.io.LongWritable; import org.apache.hadoop.io.Text; -@description(name = "min", value = "_FUNC_(expr) - Returns the minimum value of expr") +@Description(name = "min", value = "_FUNC_(expr) - Returns the minimum value of expr") public class UDAFMin extends UDAF { static public class MinShortEvaluator implements UDAFEvaluator { Index: ql/src/java/org/apache/hadoop/hive/ql/udf/UDFRegExpExtract.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/udf/UDFRegExpExtract.java (revision 901960) +++ ql/src/java/org/apache/hadoop/hive/ql/udf/UDFRegExpExtract.java (working copy) @@ -25,14 +25,14 @@ import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.hive.ql.exec.UDF; -import org.apache.hadoop.hive.ql.exec.description; +import org.apache.hadoop.hive.ql.exec.Description; /** * UDF to extract a specific group identified by a java regex. Note that if a * regexp has a backslash ('\'), then need to specify '\\' For example, * regexp_extract('100-200', '(\\d+)-(\\d+)', 1) will return '100' */ -@description(name = "regexp_extract", value = "_FUNC_(str, regexp[, idx]) - extracts a group that matches regexp", extended = "Example:\n" +@Description(name = "regexp_extract", value = "_FUNC_(str, regexp[, idx]) - extracts a group that matches regexp", extended = "Example:\n" + " > SELECT _FUNC_('100-200', '(\\d+)-(\\d+)', 1) FROM src LIMIT 1;\n" + " '100'") public class UDFRegExpExtract extends UDF { Index: ql/src/java/org/apache/hadoop/hive/ql/udf/UDFFromUnixTime.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/udf/UDFFromUnixTime.java (revision 901960) +++ ql/src/java/org/apache/hadoop/hive/ql/udf/UDFFromUnixTime.java (working copy) @@ -24,12 +24,12 @@ import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.hive.ql.exec.UDF; -import org.apache.hadoop.hive.ql.exec.description; +import org.apache.hadoop.hive.ql.exec.Description; import org.apache.hadoop.io.IntWritable; import org.apache.hadoop.io.LongWritable; import org.apache.hadoop.io.Text; -@description(name = "from_unixtime", value = "_FUNC_(unix_time, format) - returns unix_time in the specified " +@Description(name = "from_unixtime", value = "_FUNC_(unix_time, format) - returns unix_time in the specified " + "format", extended = "Example:\n" + " > SELECT _FUNC_(0, 'yyyy-MM-dd HH:mm:ss') FROM src LIMIT 1;\n" + " '1970-01-01 00:00:00'") Index: ql/src/java/org/apache/hadoop/hive/ql/udf/UDFDateSub.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/udf/UDFDateSub.java (revision 901960) +++ ql/src/java/org/apache/hadoop/hive/ql/udf/UDFDateSub.java (working copy) @@ -27,11 +27,11 @@ import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.hive.ql.exec.UDF; -import org.apache.hadoop.hive.ql.exec.description; +import org.apache.hadoop.hive.ql.exec.Description; import org.apache.hadoop.io.IntWritable; import org.apache.hadoop.io.Text; -@description(name = "date_sub", value = "_FUNC_(start_date, num_days) - Returns the date that is num_days" +@Description(name = "date_sub", value = "_FUNC_(start_date, num_days) - Returns the date that is num_days" + " before start_date.", extended = "start_date is a string in the format 'yyyy-MM-dd HH:mm:ss' or" + " 'yyyy-MM-dd'. num_days is a number. The time part of start_date is " + "ignored.\n" Index: ql/src/java/org/apache/hadoop/hive/ql/udf/UDFOPNotEqual.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/udf/UDFOPNotEqual.java (revision 901960) +++ ql/src/java/org/apache/hadoop/hive/ql/udf/UDFOPNotEqual.java (working copy) @@ -20,7 +20,7 @@ import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; -import org.apache.hadoop.hive.ql.exec.description; +import org.apache.hadoop.hive.ql.exec.Description; import org.apache.hadoop.hive.serde2.io.ByteWritable; import org.apache.hadoop.hive.serde2.io.DoubleWritable; import org.apache.hadoop.hive.serde2.io.ShortWritable; @@ -30,7 +30,7 @@ import org.apache.hadoop.io.LongWritable; import org.apache.hadoop.io.Text; -@description(name = "<>", value = "a _FUNC_ b - Returns TRUE if a is not equal to b") +@Description(name = "<>", value = "a _FUNC_ b - Returns TRUE if a is not equal to b") public class UDFOPNotEqual extends UDFBaseCompare { private static Log LOG = LogFactory.getLog(UDFOPNotEqual.class.getName()); Index: ql/src/java/org/apache/hadoop/hive/ql/udf/UDFAsin.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/udf/UDFAsin.java (revision 901960) +++ ql/src/java/org/apache/hadoop/hive/ql/udf/UDFAsin.java (working copy) @@ -21,10 +21,10 @@ import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.hive.ql.exec.UDF; -import org.apache.hadoop.hive.ql.exec.description; +import org.apache.hadoop.hive.ql.exec.Description; import org.apache.hadoop.hive.serde2.io.DoubleWritable; -@description(name = "asin", value = "_FUNC_(x) - returns the arc sine of x if -1<=x<=1 or NULL otherwise", extended = "Example:\n" +@Description(name = "asin", value = "_FUNC_(x) - returns the arc sine of x if -1<=x<=1 or NULL otherwise", extended = "Example:\n" + " > SELECT _FUNC_(0) FROM src LIMIT 1;\n" + " 0\n" + " > SELECT _FUNC_(2) FROM src LIMIT 1;\n" + " NULL") Index: ql/src/java/org/apache/hadoop/hive/ql/udf/UDFExp.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/udf/UDFExp.java (revision 901960) +++ ql/src/java/org/apache/hadoop/hive/ql/udf/UDFExp.java (working copy) @@ -21,10 +21,10 @@ import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.hive.ql.exec.UDF; -import org.apache.hadoop.hive.ql.exec.description; +import org.apache.hadoop.hive.ql.exec.Description; import org.apache.hadoop.hive.serde2.io.DoubleWritable; -@description(name = "exp", value = "_FUNC_(x) - Returns e to the power of x", extended = "Example:\n " +@Description(name = "exp", value = "_FUNC_(x) - Returns e to the power of x", extended = "Example:\n " + " > SELECT _FUNC_(0) FROM src LIMIT 1;\n" + " 1") public class UDFExp extends UDF { Index: ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDAFSum.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDAFSum.java (revision 901960) +++ ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDAFSum.java (working copy) @@ -20,7 +20,7 @@ import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.hive.ql.exec.UDFArgumentTypeException; -import org.apache.hadoop.hive.ql.exec.description; +import org.apache.hadoop.hive.ql.exec.Description; import org.apache.hadoop.hive.ql.metadata.HiveException; import org.apache.hadoop.hive.ql.parse.SemanticException; import org.apache.hadoop.hive.serde2.io.DoubleWritable; @@ -33,7 +33,7 @@ import org.apache.hadoop.io.LongWritable; import org.apache.hadoop.util.StringUtils; -@description(name = "sum", value = "_FUNC_(x) - Returns the sum of a set of numbers") +@Description(name = "sum", value = "_FUNC_(x) - Returns the sum of a set of numbers") public class GenericUDAFSum implements GenericUDAFResolver { static final Log LOG = LogFactory.getLog(GenericUDAFSum.class.getName()); Index: ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDFIndex.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDFIndex.java (revision 901960) +++ ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDFIndex.java (working copy) @@ -21,7 +21,7 @@ import org.apache.hadoop.hive.ql.exec.UDFArgumentException; import org.apache.hadoop.hive.ql.exec.UDFArgumentLengthException; import org.apache.hadoop.hive.ql.exec.UDFArgumentTypeException; -import org.apache.hadoop.hive.ql.exec.description; +import org.apache.hadoop.hive.ql.exec.Description; import org.apache.hadoop.hive.ql.metadata.HiveException; import org.apache.hadoop.hive.serde2.objectinspector.ListObjectInspector; import org.apache.hadoop.hive.serde2.objectinspector.MapObjectInspector; @@ -31,7 +31,7 @@ import org.apache.hadoop.hive.serde2.objectinspector.primitive.PrimitiveObjectInspectorUtils; import org.apache.hadoop.io.IntWritable; -@description(name = "index", value = "_FUNC_(a, n) - Returns the n-th element of a ") +@Description(name = "index", value = "_FUNC_(a, n) - Returns the n-th element of a ") public class GenericUDFIndex extends GenericUDF { private MapObjectInspector mapOI; private boolean mapKeyPreferWritable; Index: ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDFOPNull.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDFOPNull.java (revision 901960) +++ ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDFOPNull.java (working copy) @@ -20,13 +20,13 @@ import org.apache.hadoop.hive.ql.exec.UDFArgumentException; import org.apache.hadoop.hive.ql.exec.UDFArgumentLengthException; -import org.apache.hadoop.hive.ql.exec.description; +import org.apache.hadoop.hive.ql.exec.Description; import org.apache.hadoop.hive.ql.metadata.HiveException; import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspector; import org.apache.hadoop.hive.serde2.objectinspector.primitive.PrimitiveObjectInspectorFactory; import org.apache.hadoop.io.BooleanWritable; -@description(name = "isnull", value = "_FUNC_ a - Returns true if a is NULL and false otherwise") +@Description(name = "isnull", value = "_FUNC_ a - Returns true if a is NULL and false otherwise") public class GenericUDFOPNull extends GenericUDF { BooleanWritable result = new BooleanWritable(); Index: ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDFSize.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDFSize.java (revision 901960) +++ ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDFSize.java (working copy) @@ -21,7 +21,7 @@ import org.apache.hadoop.hive.ql.exec.UDFArgumentException; import org.apache.hadoop.hive.ql.exec.UDFArgumentLengthException; import org.apache.hadoop.hive.ql.exec.UDFArgumentTypeException; -import org.apache.hadoop.hive.ql.exec.description; +import org.apache.hadoop.hive.ql.exec.Description; import org.apache.hadoop.hive.ql.metadata.HiveException; import org.apache.hadoop.hive.serde.Constants; import org.apache.hadoop.hive.serde2.objectinspector.ListObjectInspector; @@ -31,7 +31,7 @@ import org.apache.hadoop.hive.serde2.objectinspector.primitive.PrimitiveObjectInspectorFactory; import org.apache.hadoop.io.IntWritable; -@description(name = "size", value = "_FUNC_(a) - Returns the size of a") +@Description(name = "size", value = "_FUNC_(a) - Returns the size of a") public class GenericUDFSize extends GenericUDF { private ObjectInspector returnOI; private final IntWritable result = new IntWritable(-1); Index: ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDAFAverage.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDAFAverage.java (revision 901960) +++ ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDAFAverage.java (working copy) @@ -22,7 +22,7 @@ import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.hive.ql.exec.UDFArgumentTypeException; -import org.apache.hadoop.hive.ql.exec.description; +import org.apache.hadoop.hive.ql.exec.Description; import org.apache.hadoop.hive.ql.metadata.HiveException; import org.apache.hadoop.hive.ql.parse.SemanticException; import org.apache.hadoop.hive.serde2.io.DoubleWritable; @@ -40,7 +40,7 @@ import org.apache.hadoop.io.LongWritable; import org.apache.hadoop.util.StringUtils; -@description(name = "avg", value = "_FUNC_(x) - Returns the mean of a set of numbers") +@Description(name = "avg", value = "_FUNC_(x) - Returns the mean of a set of numbers") public class GenericUDAFAverage implements GenericUDAFResolver { static final Log LOG = LogFactory.getLog(GenericUDAFAverage.class.getName()); Index: ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDFConcatWS.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDFConcatWS.java (revision 901960) +++ ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDFConcatWS.java (working copy) @@ -21,7 +21,7 @@ import org.apache.hadoop.hive.ql.exec.UDFArgumentException; import org.apache.hadoop.hive.ql.exec.UDFArgumentLengthException; import org.apache.hadoop.hive.ql.exec.UDFArgumentTypeException; -import org.apache.hadoop.hive.ql.exec.description; +import org.apache.hadoop.hive.ql.exec.Description; import org.apache.hadoop.hive.ql.metadata.HiveException; import org.apache.hadoop.hive.serde.Constants; import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspector; @@ -37,7 +37,7 @@ * * @see org.apache.hadoop.hive.ql.udf.generic.GenericUDF */ -@description(name = "concat_ws", value = "_FUNC_(separator, str1, str2, ...) - " +@Description(name = "concat_ws", value = "_FUNC_(separator, str1, str2, ...) - " + "returns the concatenation of the strings separated by the separator.", extended = "Example:\n" + " > SELECT _FUNC_('ce', 'fa', 'book') FROM src LIMIT 1;\n" + " 'facebook'") Index: ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDFHash.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDFHash.java (revision 901960) +++ ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDFHash.java (working copy) @@ -22,7 +22,7 @@ import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.hive.ql.exec.UDFArgumentTypeException; -import org.apache.hadoop.hive.ql.exec.description; +import org.apache.hadoop.hive.ql.exec.Description; import org.apache.hadoop.hive.ql.metadata.HiveException; import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspector; import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspectorUtils; @@ -32,7 +32,7 @@ /** * GenericUDF Class for computing hash values. */ -@description(name = "hash", value = "_FUNC_(a1, a2, ...) - Returns a hash value of the arguments") +@Description(name = "hash", value = "_FUNC_(a1, a2, ...) - Returns a hash value of the arguments") public class GenericUDFHash extends GenericUDF { private static Log LOG = LogFactory.getLog(GenericUDFHash.class.getName()); Index: ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDAFCount.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDAFCount.java (revision 901960) +++ ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDAFCount.java (working copy) @@ -18,7 +18,7 @@ package org.apache.hadoop.hive.ql.udf.generic; import org.apache.hadoop.hive.ql.exec.UDFArgumentTypeException; -import org.apache.hadoop.hive.ql.exec.description; +import org.apache.hadoop.hive.ql.exec.Description; import org.apache.hadoop.hive.ql.metadata.HiveException; import org.apache.hadoop.hive.ql.parse.SemanticException; import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspector; @@ -30,7 +30,7 @@ /** * This class implements the COUNT aggregation function as in SQL. */ -@description(name = "count", value = "_FUNC_(x) - Returns the count") +@Description(name = "count", value = "_FUNC_(x) - Returns the count") public class GenericUDAFCount implements GenericUDAFResolver { @Override Index: ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDFOPNotNull.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDFOPNotNull.java (revision 901960) +++ ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDFOPNotNull.java (working copy) @@ -20,13 +20,13 @@ import org.apache.hadoop.hive.ql.exec.UDFArgumentException; import org.apache.hadoop.hive.ql.exec.UDFArgumentLengthException; -import org.apache.hadoop.hive.ql.exec.description; +import org.apache.hadoop.hive.ql.exec.Description; import org.apache.hadoop.hive.ql.metadata.HiveException; import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspector; import org.apache.hadoop.hive.serde2.objectinspector.primitive.PrimitiveObjectInspectorFactory; import org.apache.hadoop.io.BooleanWritable; -@description(name = "isnotnull", value = "_FUNC_ a - Returns true if a is not NULL and false otherwise") +@Description(name = "isnotnull", value = "_FUNC_ a - Returns true if a is not NULL and false otherwise") public class GenericUDFOPNotNull extends GenericUDF { BooleanWritable result = new BooleanWritable(); Index: ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDFSplit.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDFSplit.java (revision 901960) +++ ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDFSplit.java (working copy) @@ -22,7 +22,7 @@ import org.apache.hadoop.hive.ql.exec.UDFArgumentException; import org.apache.hadoop.hive.ql.exec.UDFArgumentLengthException; -import org.apache.hadoop.hive.ql.exec.description; +import org.apache.hadoop.hive.ql.exec.Description; import org.apache.hadoop.hive.ql.metadata.HiveException; import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspector; import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspectorConverters; @@ -30,7 +30,7 @@ import org.apache.hadoop.hive.serde2.objectinspector.primitive.PrimitiveObjectInspectorFactory; import org.apache.hadoop.io.Text; -@description(name = "split", value = "_FUNC_(str, regex) - Splits str around occurances that match " +@Description(name = "split", value = "_FUNC_(str, regex) - Splits str around occurances that match " + "regex", extended = "Example:\n" + " > SELECT _FUNC_('oneAtwoBthreeC', '[ABC]') FROM src LIMIT 1;\n" + " [\"one\", \"two\", \"three\"]") Index: ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDAFStd.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDAFStd.java (revision 901960) +++ ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDAFStd.java (working copy) @@ -18,7 +18,7 @@ package org.apache.hadoop.hive.ql.udf.generic; import org.apache.hadoop.hive.ql.exec.UDFArgumentTypeException; -import org.apache.hadoop.hive.ql.exec.description; +import org.apache.hadoop.hive.ql.exec.Description; import org.apache.hadoop.hive.ql.metadata.HiveException; import org.apache.hadoop.hive.ql.parse.SemanticException; import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspector; @@ -30,7 +30,7 @@ * overriding the terminate() method of the evaluator. * */ -@description(name = "std,stddev,stddev_pop", value = "_FUNC_(x) - Returns the standard deviation of a set of numbers") +@Description(name = "std,stddev,stddev_pop", value = "_FUNC_(x) - Returns the standard deviation of a set of numbers") public class GenericUDAFStd extends GenericUDAFVariance { @Override Index: ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDAFVarianceSample.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDAFVarianceSample.java (revision 901960) +++ ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDAFVarianceSample.java (working copy) @@ -18,7 +18,7 @@ package org.apache.hadoop.hive.ql.udf.generic; import org.apache.hadoop.hive.ql.exec.UDFArgumentTypeException; -import org.apache.hadoop.hive.ql.exec.description; +import org.apache.hadoop.hive.ql.exec.Description; import org.apache.hadoop.hive.ql.metadata.HiveException; import org.apache.hadoop.hive.ql.parse.SemanticException; import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspector; @@ -30,7 +30,7 @@ * the terminate() method of the evaluator. * */ -@description(name = "var_samp", value = "_FUNC_(x) - Returns the sample variance of a set of numbers") +@Description(name = "var_samp", value = "_FUNC_(x) - Returns the sample variance of a set of numbers") public class GenericUDAFVarianceSample extends GenericUDAFVariance { @Override Index: ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDFLocate.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDFLocate.java (revision 901960) +++ ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDFLocate.java (working copy) @@ -21,7 +21,7 @@ import org.apache.hadoop.hive.ql.exec.UDFArgumentException; import org.apache.hadoop.hive.ql.exec.UDFArgumentLengthException; import org.apache.hadoop.hive.ql.exec.UDFArgumentTypeException; -import org.apache.hadoop.hive.ql.exec.description; +import org.apache.hadoop.hive.ql.exec.Description; import org.apache.hadoop.hive.ql.metadata.HiveException; import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspector; import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspectorConverters; @@ -42,7 +42,7 @@ * *

*/ -@description(name = "locate", value = "_FUNC_(substr, str[, pos]) - Returns the position of the first " +@Description(name = "locate", value = "_FUNC_(substr, str[, pos]) - Returns the position of the first " + "occurance of substr in str after position pos", extended = "Example:\n" + " > SELECT _FUNC_('bar', 'foobarbar', 5) FROM src LIMIT 1;\n" + " 7") public class GenericUDFLocate extends GenericUDF { Index: ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDFMap.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDFMap.java (revision 901960) +++ ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDFMap.java (working copy) @@ -23,7 +23,7 @@ import org.apache.hadoop.hive.ql.exec.UDFArgumentException; import org.apache.hadoop.hive.ql.exec.UDFArgumentLengthException; import org.apache.hadoop.hive.ql.exec.UDFArgumentTypeException; -import org.apache.hadoop.hive.ql.exec.description; +import org.apache.hadoop.hive.ql.exec.Description; import org.apache.hadoop.hive.ql.metadata.HiveException; import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspector; import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspectorConverters; @@ -32,7 +32,7 @@ import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspectorConverters.Converter; import org.apache.hadoop.hive.serde2.objectinspector.primitive.PrimitiveObjectInspectorFactory; -@description(name = "map", value = "_FUNC_(key0, value0, key1, value1...) - Creates a map with the given key/value pairs ") +@Description(name = "map", value = "_FUNC_(key0, value0, key1, value1...) - Creates a map with the given key/value pairs ") public class GenericUDFMap extends GenericUDF { Converter[] converters; HashMap ret = new HashMap(); Index: ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDFCoalesce.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDFCoalesce.java (revision 901960) +++ ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDFCoalesce.java (working copy) @@ -21,7 +21,7 @@ import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.hive.ql.exec.UDFArgumentTypeException; -import org.apache.hadoop.hive.ql.exec.description; +import org.apache.hadoop.hive.ql.exec.Description; import org.apache.hadoop.hive.ql.metadata.HiveException; import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspector; @@ -31,7 +31,7 @@ * NOTES: 1. a, b and c should have the same TypeInfo, or an exception will be * thrown. */ -@description(name = "coalesce", value = "_FUNC_(a1, a2, ...) - Returns the first non-null argument", extended = "Example:\n" +@Description(name = "coalesce", value = "_FUNC_(a1, a2, ...) - Returns the first non-null argument", extended = "Example:\n" + " > SELECT _FUNC_(NULL, 1, NULL) FROM src LIMIT 1;\n" + " 1") public class GenericUDFCoalesce extends GenericUDF { Index: ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDFArray.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDFArray.java (revision 901960) +++ ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDFArray.java (working copy) @@ -22,7 +22,7 @@ import org.apache.hadoop.hive.ql.exec.UDFArgumentException; import org.apache.hadoop.hive.ql.exec.UDFArgumentTypeException; -import org.apache.hadoop.hive.ql.exec.description; +import org.apache.hadoop.hive.ql.exec.Description; import org.apache.hadoop.hive.ql.metadata.HiveException; import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspector; import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspectorConverters; @@ -31,7 +31,7 @@ import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspectorConverters.Converter; import org.apache.hadoop.hive.serde2.objectinspector.primitive.PrimitiveObjectInspectorFactory; -@description(name = "array", value = "_FUNC_(n0, n1...) - Creates an array with the given elements ") +@Description(name = "array", value = "_FUNC_(n0, n1...) - Creates an array with the given elements ") public class GenericUDFArray extends GenericUDF { Converter[] converters; Index: ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDFField.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDFField.java (revision 901960) +++ ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDFField.java (working copy) @@ -21,14 +21,14 @@ import org.apache.commons.lang.StringUtils; import org.apache.hadoop.hive.ql.exec.UDFArgumentException; import org.apache.hadoop.hive.ql.exec.UDFArgumentTypeException; -import org.apache.hadoop.hive.ql.exec.description; +import org.apache.hadoop.hive.ql.exec.Description; import org.apache.hadoop.hive.ql.metadata.HiveException; import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspector; import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspector.Category; import org.apache.hadoop.hive.serde2.objectinspector.primitive.PrimitiveObjectInspectorFactory; import org.apache.hadoop.io.IntWritable; -@description(name = "field", value = "_FUNC_(str, str1, str2, ...) - returns the index of str in the str1,str2,... list or 0 if not found", extended = "All primitive types are supported, arguments are compared using str.equals(x)." +@Description(name = "field", value = "_FUNC_(str, str1, str2, ...) - returns the index of str in the str1,str2,... list or 0 if not found", extended = "All primitive types are supported, arguments are compared using str.equals(x)." + " If str is NULL, the return value is 0.") public class GenericUDFField extends GenericUDF { @Override Index: ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDTFExplode.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDTFExplode.java (revision 901960) +++ ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDTFExplode.java (working copy) @@ -22,14 +22,14 @@ import java.util.List; import org.apache.hadoop.hive.ql.exec.UDFArgumentException; -import org.apache.hadoop.hive.ql.exec.description; +import org.apache.hadoop.hive.ql.exec.Description; import org.apache.hadoop.hive.ql.metadata.HiveException; import org.apache.hadoop.hive.serde2.objectinspector.ListObjectInspector; import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspector; import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspectorFactory; import org.apache.hadoop.hive.serde2.objectinspector.StructObjectInspector; -@description(name = "explode", value = "_FUNC_(a) - separates the elements of array a into multiple rows ") +@Description(name = "explode", value = "_FUNC_(a) - separates the elements of array a into multiple rows ") public class GenericUDTFExplode extends GenericUDTF { ListObjectInspector listOI = null; Index: ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDFElt.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDFElt.java (revision 901960) +++ ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDFElt.java (working copy) @@ -21,7 +21,7 @@ import org.apache.hadoop.hive.ql.exec.UDFArgumentException; import org.apache.hadoop.hive.ql.exec.UDFArgumentLengthException; import org.apache.hadoop.hive.ql.exec.UDFArgumentTypeException; -import org.apache.hadoop.hive.ql.exec.description; +import org.apache.hadoop.hive.ql.exec.Description; import org.apache.hadoop.hive.ql.metadata.HiveException; import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspector; import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspectorConverters; @@ -36,7 +36,7 @@ * * @see org.apache.hadoop.hive.ql.udf.generic.GenericUDF */ -@description(name = "elt", value = "_FUNC_(n, str1, str2, ...) - returns the n-th string", extended = "Example:\n" +@Description(name = "elt", value = "_FUNC_(n, str1, str2, ...) - returns the n-th string", extended = "Example:\n" + " > SELECT _FUNC_(1, 'face', 'book') FROM src LIMIT 1;\n" + " 'face'") public class GenericUDFElt extends GenericUDF { Index: ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDAFVariance.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDAFVariance.java (revision 901960) +++ ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDAFVariance.java (working copy) @@ -22,7 +22,7 @@ import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.hive.ql.exec.UDFArgumentTypeException; -import org.apache.hadoop.hive.ql.exec.description; +import org.apache.hadoop.hive.ql.exec.Description; import org.apache.hadoop.hive.ql.metadata.HiveException; import org.apache.hadoop.hive.ql.parse.SemanticException; import org.apache.hadoop.hive.serde2.io.DoubleWritable; @@ -45,7 +45,7 @@ * GenericUDAFStd GenericUDAFStdSample * */ -@description(name = "variance,var_pop", value = "_FUNC_(x) - Returns the variance of a set of numbers") +@Description(name = "variance,var_pop", value = "_FUNC_(x) - Returns the variance of a set of numbers") public class GenericUDAFVariance implements GenericUDAFResolver { static final Log LOG = LogFactory.getLog(GenericUDAFVariance.class.getName()); Index: ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDAFStdSample.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDAFStdSample.java (revision 901960) +++ ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDAFStdSample.java (working copy) @@ -18,7 +18,7 @@ package org.apache.hadoop.hive.ql.udf.generic; import org.apache.hadoop.hive.ql.exec.UDFArgumentTypeException; -import org.apache.hadoop.hive.ql.exec.description; +import org.apache.hadoop.hive.ql.exec.Description; import org.apache.hadoop.hive.ql.metadata.HiveException; import org.apache.hadoop.hive.ql.parse.SemanticException; import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspector; @@ -30,7 +30,7 @@ * overriding the terminate() method of the evaluator. * */ -@description(name = "stddev_samp", value = "_FUNC_(x) - Returns the sample standard deviation of a set of " +@Description(name = "stddev_samp", value = "_FUNC_(x) - Returns the sample standard deviation of a set of " + "numbers") public class GenericUDAFStdSample extends GenericUDAFVariance { Index: ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDFInstr.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDFInstr.java (revision 901960) +++ ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDFInstr.java (working copy) @@ -21,7 +21,7 @@ import org.apache.hadoop.hive.ql.exec.UDFArgumentException; import org.apache.hadoop.hive.ql.exec.UDFArgumentLengthException; import org.apache.hadoop.hive.ql.exec.UDFArgumentTypeException; -import org.apache.hadoop.hive.ql.exec.description; +import org.apache.hadoop.hive.ql.exec.Description; import org.apache.hadoop.hive.ql.metadata.HiveException; import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspector; import org.apache.hadoop.hive.serde2.objectinspector.ObjectInspectorConverters; @@ -41,7 +41,7 @@ * *

*/ -@description(name = "instr", value = "_FUNC_(str, substr) - Returns the index of the first occurance " +@Description(name = "instr", value = "_FUNC_(str, substr) - Returns the index of the first occurance " + "of substr in str", extended = "Example:\n" + " > SELECT _FUNC_('Facebook', 'boo') FROM src LIMIT 1;\n" + " 5") public class GenericUDFInstr extends GenericUDF { Index: ql/src/java/org/apache/hadoop/hive/ql/udf/UDFRTrim.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/udf/UDFRTrim.java (revision 901960) +++ ql/src/java/org/apache/hadoop/hive/ql/udf/UDFRTrim.java (working copy) @@ -20,10 +20,10 @@ import org.apache.commons.lang.StringUtils; import org.apache.hadoop.hive.ql.exec.UDF; -import org.apache.hadoop.hive.ql.exec.description; +import org.apache.hadoop.hive.ql.exec.Description; import org.apache.hadoop.io.Text; -@description(name = "rtrim", value = "_FUNC_(str) - Removes the trailing space characters from str ", extended = "Example:\n" +@Description(name = "rtrim", value = "_FUNC_(str) - Removes the trailing space characters from str ", extended = "Example:\n" + " > SELECT _FUNC_('facebook ') FROM src LIMIT 1;\n" + " 'facebook'") public class UDFRTrim extends UDF { Index: ql/src/java/org/apache/hadoop/hive/ql/udf/UDFUnixTimeStamp.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/udf/UDFUnixTimeStamp.java (revision 901960) +++ ql/src/java/org/apache/hadoop/hive/ql/udf/UDFUnixTimeStamp.java (working copy) @@ -25,12 +25,12 @@ import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.hive.ql.exec.UDF; -import org.apache.hadoop.hive.ql.exec.description; +import org.apache.hadoop.hive.ql.exec.Description; import org.apache.hadoop.io.LongWritable; import org.apache.hadoop.io.Text; @UDFType(deterministic = false) -@description(name = "unix_timestamp", value = "_FUNC_([date[, pattern]]) - Returns the UNIX timestamp", extended = "Converts the current or specified time to number of seconds " +@Description(name = "unix_timestamp", value = "_FUNC_([date[, pattern]]) - Returns the UNIX timestamp", extended = "Converts the current or specified time to number of seconds " + "since 1970-01-01.") public class UDFUnixTimeStamp extends UDF { Index: ql/src/java/org/apache/hadoop/hive/ql/udf/UDFDayOfMonth.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/udf/UDFDayOfMonth.java (revision 901960) +++ ql/src/java/org/apache/hadoop/hive/ql/udf/UDFDayOfMonth.java (working copy) @@ -26,11 +26,11 @@ import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.hive.ql.exec.UDF; -import org.apache.hadoop.hive.ql.exec.description; +import org.apache.hadoop.hive.ql.exec.Description; import org.apache.hadoop.io.IntWritable; import org.apache.hadoop.io.Text; -@description(name = "day,dayofmonth", value = "_FUNC_(date) - Returns the date of the month of date", extended = "date is a string in the format of 'yyyy-MM-dd HH:mm:ss' or " +@Description(name = "day,dayofmonth", value = "_FUNC_(date) - Returns the date of the month of date", extended = "date is a string in the format of 'yyyy-MM-dd HH:mm:ss' or " + "'yyyy-MM-dd'.\n" + "Example:\n " + " > SELECT _FUNC_('2009-30-07', 1) FROM src LIMIT 1;\n" + " 30") Index: ql/src/java/org/apache/hadoop/hive/ql/udf/UDFLn.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/udf/UDFLn.java (revision 901960) +++ ql/src/java/org/apache/hadoop/hive/ql/udf/UDFLn.java (working copy) @@ -21,10 +21,10 @@ import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.hive.ql.exec.UDF; -import org.apache.hadoop.hive.ql.exec.description; +import org.apache.hadoop.hive.ql.exec.Description; import org.apache.hadoop.hive.serde2.io.DoubleWritable; -@description(name = "ln", value = "_FUNC_(x) - Returns the natural logarithm of x", extended = "Example:\n" +@Description(name = "ln", value = "_FUNC_(x) - Returns the natural logarithm of x", extended = "Example:\n" + " > SELECT _FUNC_(1) FROM src LIMIT 1;\n" + " 0") public class UDFLn extends UDF { Index: ql/src/java/org/apache/hadoop/hive/ql/udf/UDFJson.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/udf/UDFJson.java (revision 901960) +++ ql/src/java/org/apache/hadoop/hive/ql/udf/UDFJson.java (working copy) @@ -28,13 +28,13 @@ import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.hive.ql.exec.UDF; -import org.apache.hadoop.hive.ql.exec.description; +import org.apache.hadoop.hive.ql.exec.Description; import org.apache.hadoop.io.Text; import org.json.JSONArray; import org.json.JSONException; import org.json.JSONObject; -@description(name = "get_json_object", value = "_FUNC_(json_txt, path) - Extract a json object from path ", extended = "Extract json object from a json string based on json path " +@Description(name = "get_json_object", value = "_FUNC_(json_txt, path) - Extract a json object from path ", extended = "Extract json object from a json string based on json path " + "specified, and return json string of the extracted json object. It " + "will return null if the input json string is invalid.\n" + "A limited version of JSONPath supported:\n" Index: ql/src/java/org/apache/hadoop/hive/ql/udf/UDFTrim.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/udf/UDFTrim.java (revision 901960) +++ ql/src/java/org/apache/hadoop/hive/ql/udf/UDFTrim.java (working copy) @@ -20,10 +20,10 @@ import org.apache.commons.lang.StringUtils; import org.apache.hadoop.hive.ql.exec.UDF; -import org.apache.hadoop.hive.ql.exec.description; +import org.apache.hadoop.hive.ql.exec.Description; import org.apache.hadoop.io.Text; -@description(name = "trim", value = "_FUNC_(str) - Removes the leading and trailing space characters " +@Description(name = "trim", value = "_FUNC_(str) - Removes the leading and trailing space characters " + "from str ", extended = "Example:\n" + " > SELECT _FUNC_(' facebook ') FROM src LIMIT 1;\n" + " 'facebook'") public class UDFTrim extends UDF { Index: ql/src/java/org/apache/hadoop/hive/ql/udf/UDFHour.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/udf/UDFHour.java (revision 901960) +++ ql/src/java/org/apache/hadoop/hive/ql/udf/UDFHour.java (working copy) @@ -26,11 +26,11 @@ import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.hive.ql.exec.UDF; -import org.apache.hadoop.hive.ql.exec.description; +import org.apache.hadoop.hive.ql.exec.Description; import org.apache.hadoop.io.IntWritable; import org.apache.hadoop.io.Text; -@description(name = "hour", value = "_FUNC_(date) - Returns the hour of date", extended = "date is a string in the format of 'yyyy-MM-dd HH:mm:ss' or " +@Description(name = "hour", value = "_FUNC_(date) - Returns the hour of date", extended = "date is a string in the format of 'yyyy-MM-dd HH:mm:ss' or " + "'HH:mm:ss'.\n" + "Example:\n " + " > SELECT _FUNC_('2009-07-30 12:58:59') FROM src LIMIT 1;\n" Index: ql/src/java/org/apache/hadoop/hive/ql/udf/UDFLpad.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/udf/UDFLpad.java (revision 901960) +++ ql/src/java/org/apache/hadoop/hive/ql/udf/UDFLpad.java (working copy) @@ -19,11 +19,11 @@ package org.apache.hadoop.hive.ql.udf; import org.apache.hadoop.hive.ql.exec.UDF; -import org.apache.hadoop.hive.ql.exec.description; +import org.apache.hadoop.hive.ql.exec.Description; import org.apache.hadoop.io.IntWritable; import org.apache.hadoop.io.Text; -@description(name = "lpad", value = "_FUNC_(str, len, pad) - Returns str, left-padded with pad to a " +@Description(name = "lpad", value = "_FUNC_(str, len, pad) - Returns str, left-padded with pad to a " + "length of len", extended = "If str is longer than len, the return value is shortened to " + "len characters.\n" + "Example:\n" Index: ql/src/java/org/apache/hadoop/hive/ql/udf/UDFOPLessThan.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/udf/UDFOPLessThan.java (revision 901960) +++ ql/src/java/org/apache/hadoop/hive/ql/udf/UDFOPLessThan.java (working copy) @@ -20,7 +20,7 @@ import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; -import org.apache.hadoop.hive.ql.exec.description; +import org.apache.hadoop.hive.ql.exec.Description; import org.apache.hadoop.hive.serde2.io.ByteWritable; import org.apache.hadoop.hive.serde2.io.DoubleWritable; import org.apache.hadoop.hive.serde2.io.ShortWritable; @@ -31,7 +31,7 @@ import org.apache.hadoop.io.LongWritable; import org.apache.hadoop.io.Text; -@description(name = "<", value = "a _FUNC_ b - Returns TRUE if a is less than b") +@Description(name = "<", value = "a _FUNC_ b - Returns TRUE if a is less than b") public class UDFOPLessThan extends UDFBaseCompare { private static Log LOG = LogFactory.getLog(UDFOPLessThan.class.getName()); Index: ql/src/java/org/apache/hadoop/hive/ql/udf/UDFFloor.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/udf/UDFFloor.java (revision 901960) +++ ql/src/java/org/apache/hadoop/hive/ql/udf/UDFFloor.java (working copy) @@ -21,11 +21,11 @@ import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.hive.ql.exec.UDF; -import org.apache.hadoop.hive.ql.exec.description; +import org.apache.hadoop.hive.ql.exec.Description; import org.apache.hadoop.hive.serde2.io.DoubleWritable; import org.apache.hadoop.io.LongWritable; -@description(name = "floor", value = "_FUNC_(x) - Find the largest integer not greater than x", extended = "Example:\n" +@Description(name = "floor", value = "_FUNC_(x) - Find the largest integer not greater than x", extended = "Example:\n" + " > SELECT _FUNC_(-0.1) FROM src LIMIT 1;\n" + " -1\n" + " > SELECT _FUNC_(5) FROM src LIMIT 1;\n" + " 5") Index: ql/src/java/org/apache/hadoop/hive/ql/udf/UDFLog10.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/udf/UDFLog10.java (revision 901960) +++ ql/src/java/org/apache/hadoop/hive/ql/udf/UDFLog10.java (working copy) @@ -21,10 +21,10 @@ import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.hive.ql.exec.UDF; -import org.apache.hadoop.hive.ql.exec.description; +import org.apache.hadoop.hive.ql.exec.Description; import org.apache.hadoop.hive.serde2.io.DoubleWritable; -@description(name = "log10", value = "_FUNC_(x) - Returns the logarithm of x with base 10", extended = "Example:\n" +@Description(name = "log10", value = "_FUNC_(x) - Returns the logarithm of x with base 10", extended = "Example:\n" + " > SELECT _FUNC_(10) FROM src LIMIT 1;\n" + " 1") public class UDFLog10 extends UDF { Index: ql/src/java/org/apache/hadoop/hive/ql/udf/UDFFindInSet.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/udf/UDFFindInSet.java (revision 901960) +++ ql/src/java/org/apache/hadoop/hive/ql/udf/UDFFindInSet.java (working copy) @@ -19,11 +19,11 @@ package org.apache.hadoop.hive.ql.udf; import org.apache.hadoop.hive.ql.exec.UDF; -import org.apache.hadoop.hive.ql.exec.description; +import org.apache.hadoop.hive.ql.exec.Description; import org.apache.hadoop.io.IntWritable; import org.apache.hadoop.io.Text; -@description(name = "find_in_set", value = "_FUNC_(str,str_array) - Returns the first occurrence " +@Description(name = "find_in_set", value = "_FUNC_(str,str_array) - Returns the first occurrence " + " of str in str_array where str_array is a comma-delimited string." + " Returns null if either argument is null." + " Returns 0 if the first argument has any commas.", extended = "Example:\n" Index: ql/src/java/org/apache/hadoop/hive/ql/udf/UDFLike.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/udf/UDFLike.java (revision 901960) +++ ql/src/java/org/apache/hadoop/hive/ql/udf/UDFLike.java (working copy) @@ -24,11 +24,11 @@ import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.hive.ql.exec.UDF; -import org.apache.hadoop.hive.ql.exec.description; +import org.apache.hadoop.hive.ql.exec.Description; import org.apache.hadoop.io.BooleanWritable; import org.apache.hadoop.io.Text; -@description(name = "like", value = "_FUNC_(str, pattern) - Checks if str matches pattern", extended = "Example:\n" +@Description(name = "like", value = "_FUNC_(str, pattern) - Checks if str matches pattern", extended = "Example:\n" + " > SELECT a.* FROM srcpart a WHERE a.hr _FUNC_ '%2' LIMIT 1;\n" + " 27 val_27 2008-04-08 12") public class UDFLike extends UDF { Index: ql/src/java/org/apache/hadoop/hive/ql/udf/UDFMonth.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/udf/UDFMonth.java (revision 901960) +++ ql/src/java/org/apache/hadoop/hive/ql/udf/UDFMonth.java (working copy) @@ -26,11 +26,11 @@ import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.hive.ql.exec.UDF; -import org.apache.hadoop.hive.ql.exec.description; +import org.apache.hadoop.hive.ql.exec.Description; import org.apache.hadoop.io.IntWritable; import org.apache.hadoop.io.Text; -@description(name = "month", value = "_FUNC_(date) - Returns the month of date", extended = "Example:\n" +@Description(name = "month", value = "_FUNC_(date) - Returns the month of date", extended = "Example:\n" + " > SELECT _FUNC_('2009-30-07') FROM src LIMIT 1;\n" + " 7") public class UDFMonth extends UDF { Index: ql/src/java/org/apache/hadoop/hive/ql/udf/UDFAcos.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/udf/UDFAcos.java (revision 901960) +++ ql/src/java/org/apache/hadoop/hive/ql/udf/UDFAcos.java (working copy) @@ -21,10 +21,10 @@ import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.hive.ql.exec.UDF; -import org.apache.hadoop.hive.ql.exec.description; +import org.apache.hadoop.hive.ql.exec.Description; import org.apache.hadoop.hive.serde2.io.DoubleWritable; -@description(name = "acos", value = "_FUNC_(x) - returns the arc cosine of x if -1<=x<=1 or " +@Description(name = "acos", value = "_FUNC_(x) - returns the arc cosine of x if -1<=x<=1 or " + "NULL otherwise", extended = "Example:\n" + " > SELECT _FUNC_(1) FROM src LIMIT 1;\n" + " 0\n" + " > SELECT _FUNC_(2) FROM src LIMIT 1;\n" + " NULL") Index: ql/src/java/org/apache/hadoop/hive/ql/udf/UDFDateAdd.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/udf/UDFDateAdd.java (revision 901960) +++ ql/src/java/org/apache/hadoop/hive/ql/udf/UDFDateAdd.java (working copy) @@ -27,11 +27,11 @@ import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.hive.ql.exec.UDF; -import org.apache.hadoop.hive.ql.exec.description; +import org.apache.hadoop.hive.ql.exec.Description; import org.apache.hadoop.io.IntWritable; import org.apache.hadoop.io.Text; -@description(name = "date_add", value = "_FUNC_(start_date, num_days) - Returns the date that is num_days" +@Description(name = "date_add", value = "_FUNC_(start_date, num_days) - Returns the date that is num_days" + " after start_date.", extended = "start_date is a string in the format 'yyyy-MM-dd HH:mm:ss' or" + " 'yyyy-MM-dd'. num_days is a number. The time part of start_date is " + "ignored.\n" Index: ql/src/java/org/apache/hadoop/hive/ql/udf/UDFRegExpReplace.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/udf/UDFRegExpReplace.java (revision 901960) +++ ql/src/java/org/apache/hadoop/hive/ql/udf/UDFRegExpReplace.java (working copy) @@ -22,10 +22,10 @@ import java.util.regex.Pattern; import org.apache.hadoop.hive.ql.exec.UDF; -import org.apache.hadoop.hive.ql.exec.description; +import org.apache.hadoop.hive.ql.exec.Description; import org.apache.hadoop.io.Text; -@description(name = "regexp_replace", value = "_FUNC_(str, regexp, rep) - replace all substrings of str that " +@Description(name = "regexp_replace", value = "_FUNC_(str, regexp, rep) - replace all substrings of str that " + "match regexp with rep", extended = "Example:\n" + " > SELECT _FUNC_('100-200', '(\\d+)', 'num') FROM src LIMIT 1;\n" + " 'num-num'") Index: ql/src/java/org/apache/hadoop/hive/ql/udf/UDFRpad.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/udf/UDFRpad.java (revision 901960) +++ ql/src/java/org/apache/hadoop/hive/ql/udf/UDFRpad.java (working copy) @@ -19,11 +19,11 @@ package org.apache.hadoop.hive.ql.udf; import org.apache.hadoop.hive.ql.exec.UDF; -import org.apache.hadoop.hive.ql.exec.description; +import org.apache.hadoop.hive.ql.exec.Description; import org.apache.hadoop.io.IntWritable; import org.apache.hadoop.io.Text; -@description(name = "rpad", value = "_FUNC_(str, len, pad) - Returns str, right-padded with pad to a " +@Description(name = "rpad", value = "_FUNC_(str, len, pad) - Returns str, right-padded with pad to a " + "length of len", extended = "If str is longer than len, the return value is shortened to " + "len characters.\n" + "Example:\n" Index: ql/src/java/org/apache/hadoop/hive/ql/udf/UDFUnhex.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/udf/UDFUnhex.java (revision 901960) +++ ql/src/java/org/apache/hadoop/hive/ql/udf/UDFUnhex.java (working copy) @@ -19,10 +19,10 @@ package org.apache.hadoop.hive.ql.udf; import org.apache.hadoop.hive.ql.exec.UDF; -import org.apache.hadoop.hive.ql.exec.description; +import org.apache.hadoop.hive.ql.exec.Description; import org.apache.hadoop.io.Text; -@description(name = "unhex", value = "_FUNC_(str) - Converts hexadecimal argument to string", extended = "Performs the inverse operation of HEX(str). That is, it interprets\n" +@Description(name = "unhex", value = "_FUNC_(str) - Converts hexadecimal argument to string", extended = "Performs the inverse operation of HEX(str). That is, it interprets\n" + "each pair of hexadecimal digits in the argument as a number and\n" + "converts it to the character represented by the number. The\n" + "resulting characters are returned as a binary string.\n\n" Index: ql/src/java/org/apache/hadoop/hive/ql/udf/UDFParseUrl.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/udf/UDFParseUrl.java (revision 901960) +++ ql/src/java/org/apache/hadoop/hive/ql/udf/UDFParseUrl.java (working copy) @@ -25,7 +25,7 @@ import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.hive.ql.exec.UDF; -import org.apache.hadoop.hive.ql.exec.description; +import org.apache.hadoop.hive.ql.exec.Description; /** * UDF to extract specfic parts from URL For example, @@ -40,7 +40,7 @@ * HOST,PATH,QUERY,REF,PROTOCOL,AUTHORITY,FILE,USERINFO Also you can get a value * of particular key in QUERY, using syntax QUERY: eg: QUERY:k1. */ -@description(name = "parse_url", value = "_FUNC_(url, partToExtract[, key]) - extracts a part from a URL", extended = "Parts: HOST, PATH, QUERY, REF, PROTOCOL, AUTHORITY, FILE, " +@Description(name = "parse_url", value = "_FUNC_(url, partToExtract[, key]) - extracts a part from a URL", extended = "Parts: HOST, PATH, QUERY, REF, PROTOCOL, AUTHORITY, FILE, " + "USERINFO\nkey specifies which query to extract\n" + "Example:\n" + " > SELECT _FUNC_('http://facebook.com/path/p1.php?query=1', " Index: ql/src/java/org/apache/hadoop/hive/ql/udf/UDFOPBitAnd.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/udf/UDFOPBitAnd.java (revision 901960) +++ ql/src/java/org/apache/hadoop/hive/ql/udf/UDFOPBitAnd.java (working copy) @@ -20,13 +20,13 @@ import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; -import org.apache.hadoop.hive.ql.exec.description; +import org.apache.hadoop.hive.ql.exec.Description; import org.apache.hadoop.hive.serde2.io.ByteWritable; import org.apache.hadoop.hive.serde2.io.ShortWritable; import org.apache.hadoop.io.IntWritable; import org.apache.hadoop.io.LongWritable; -@description(name = "&", value = "a _FUNC_ b - Bitwise and", extended = "Example:\n" +@Description(name = "&", value = "a _FUNC_ b - Bitwise and", extended = "Example:\n" + " > SELECT 3 _FUNC_ 5 FROM src LIMIT 1;\n" + " 1") public class UDFOPBitAnd extends UDFBaseBitOP { Index: ql/src/java/org/apache/hadoop/hive/ql/udf/UDFOPAnd.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/udf/UDFOPAnd.java (revision 901960) +++ ql/src/java/org/apache/hadoop/hive/ql/udf/UDFOPAnd.java (working copy) @@ -21,10 +21,10 @@ import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.hive.ql.exec.UDF; -import org.apache.hadoop.hive.ql.exec.description; +import org.apache.hadoop.hive.ql.exec.Description; import org.apache.hadoop.io.BooleanWritable; -@description(name = "and", value = "a _FUNC_ b - Logical and", extended = "Example:\n" +@Description(name = "and", value = "a _FUNC_ b - Logical and", extended = "Example:\n" + " > SELECT * FROM srcpart WHERE src.hr=12 _FUNC_ " + "src.hr='2008-04-08' LIMIT 1;\n" + " 27 val_27 2008-04-08 12") public class UDFOPAnd extends UDF { Index: ql/src/java/org/apache/hadoop/hive/ql/udf/UDFReverse.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/udf/UDFReverse.java (revision 901960) +++ ql/src/java/org/apache/hadoop/hive/ql/udf/UDFReverse.java (working copy) @@ -19,11 +19,11 @@ package org.apache.hadoop.hive.ql.udf; import org.apache.hadoop.hive.ql.exec.UDF; -import org.apache.hadoop.hive.ql.exec.description; +import org.apache.hadoop.hive.ql.exec.Description; import org.apache.hadoop.hive.ql.udf.generic.GenericUDFUtils; import org.apache.hadoop.io.Text; -@description(name = "reverse", value = "_FUNC_(str) - reverse str", extended = "Example:\n" +@Description(name = "reverse", value = "_FUNC_(str) - reverse str", extended = "Example:\n" + " > SELECT _FUNC_('Facebook') FROM src LIMIT 1;\n" + " 'koobecaF'") public class UDFReverse extends UDF { private final Text result = new Text(); Index: ql/src/java/org/apache/hadoop/hive/ql/udf/UDFOPOr.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/udf/UDFOPOr.java (revision 901960) +++ ql/src/java/org/apache/hadoop/hive/ql/udf/UDFOPOr.java (working copy) @@ -21,10 +21,10 @@ import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.hive.ql.exec.UDF; -import org.apache.hadoop.hive.ql.exec.description; +import org.apache.hadoop.hive.ql.exec.Description; import org.apache.hadoop.io.BooleanWritable; -@description(name = "or", value = "a _FUNC_ b - Logical or") +@Description(name = "or", value = "a _FUNC_ b - Logical or") public class UDFOPOr extends UDF { private static Log LOG = LogFactory.getLog(UDFOPOr.class.getName()); Index: ql/src/java/org/apache/hadoop/hive/ql/udf/UDFRepeat.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/udf/UDFRepeat.java (revision 901960) +++ ql/src/java/org/apache/hadoop/hive/ql/udf/UDFRepeat.java (working copy) @@ -19,11 +19,11 @@ package org.apache.hadoop.hive.ql.udf; import org.apache.hadoop.hive.ql.exec.UDF; -import org.apache.hadoop.hive.ql.exec.description; +import org.apache.hadoop.hive.ql.exec.Description; import org.apache.hadoop.io.IntWritable; import org.apache.hadoop.io.Text; -@description(name = "repeat", value = "_FUNC_(str, n) - repeat str n times ", extended = "Example:\n" +@Description(name = "repeat", value = "_FUNC_(str, n) - repeat str n times ", extended = "Example:\n" + " > SELECT _FUNC_('123', 2) FROM src LIMIT 1;\n" + " '123123'") public class UDFRepeat extends UDF { private final Text result = new Text(); Index: ql/src/java/org/apache/hadoop/hive/ql/udf/UDFLog.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/udf/UDFLog.java (revision 901960) +++ ql/src/java/org/apache/hadoop/hive/ql/udf/UDFLog.java (working copy) @@ -21,10 +21,10 @@ import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.hive.ql.exec.UDF; -import org.apache.hadoop.hive.ql.exec.description; +import org.apache.hadoop.hive.ql.exec.Description; import org.apache.hadoop.hive.serde2.io.DoubleWritable; -@description(name = "log", value = "_FUNC_([b], x) - Returns the logarithm of x with base b", extended = "Example:\n" +@Description(name = "log", value = "_FUNC_([b], x) - Returns the logarithm of x with base b", extended = "Example:\n" + " > SELECT _FUNC_(13, 13) FROM src LIMIT 1;\n" + " 1") public class UDFLog extends UDF { Index: ql/src/java/org/apache/hadoop/hive/ql/udf/UDFLength.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/udf/UDFLength.java (revision 901960) +++ ql/src/java/org/apache/hadoop/hive/ql/udf/UDFLength.java (working copy) @@ -18,12 +18,12 @@ package org.apache.hadoop.hive.ql.udf; import org.apache.hadoop.hive.ql.exec.UDF; -import org.apache.hadoop.hive.ql.exec.description; +import org.apache.hadoop.hive.ql.exec.Description; import org.apache.hadoop.hive.ql.udf.generic.GenericUDFUtils; import org.apache.hadoop.io.IntWritable; import org.apache.hadoop.io.Text; -@description(name = "length", value = "_FUNC_(str) - Returns the length of str ", extended = "Example:\n" +@Description(name = "length", value = "_FUNC_(str) - Returns the length of str ", extended = "Example:\n" + " > SELECT _FUNC_('Facebook') FROM src LIMIT 1;\n" + " 8") public class UDFLength extends UDF { private final IntWritable result = new IntWritable(); Index: ql/src/java/org/apache/hadoop/hive/ql/udf/UDFOPPlus.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/udf/UDFOPPlus.java (revision 901960) +++ ql/src/java/org/apache/hadoop/hive/ql/udf/UDFOPPlus.java (working copy) @@ -20,7 +20,7 @@ import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; -import org.apache.hadoop.hive.ql.exec.description; +import org.apache.hadoop.hive.ql.exec.Description; import org.apache.hadoop.hive.serde2.io.ByteWritable; import org.apache.hadoop.hive.serde2.io.DoubleWritable; import org.apache.hadoop.hive.serde2.io.ShortWritable; @@ -37,7 +37,7 @@ * The case of int + double will be handled by implicit type casting using * UDFRegistry.implicitConvertable method. */ -@description(name = "+", value = "a _FUNC_ b - Returns a+b") +@Description(name = "+", value = "a _FUNC_ b - Returns a+b") public class UDFOPPlus extends UDFBaseNumericOp { private static Log LOG = LogFactory Index: ql/src/java/org/apache/hadoop/hive/ql/udf/UDFRand.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/udf/UDFRand.java (revision 901960) +++ ql/src/java/org/apache/hadoop/hive/ql/udf/UDFRand.java (working copy) @@ -23,11 +23,11 @@ import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.hive.ql.exec.UDF; -import org.apache.hadoop.hive.ql.exec.description; +import org.apache.hadoop.hive.ql.exec.Description; import org.apache.hadoop.hive.serde2.io.DoubleWritable; import org.apache.hadoop.io.LongWritable; -@description(name = "rand", value = "_FUNC_([seed]) - Returns a pseudorandom number between 0 and 1") +@Description(name = "rand", value = "_FUNC_([seed]) - Returns a pseudorandom number between 0 and 1") @UDFType(deterministic = false) public class UDFRand extends UDF { Index: ql/src/java/org/apache/hadoop/hive/ql/udf/UDAFMax.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/udf/UDAFMax.java (revision 901960) +++ ql/src/java/org/apache/hadoop/hive/ql/udf/UDAFMax.java (working copy) @@ -20,7 +20,7 @@ import org.apache.hadoop.hive.ql.exec.UDAF; import org.apache.hadoop.hive.ql.exec.UDAFEvaluator; -import org.apache.hadoop.hive.ql.exec.description; +import org.apache.hadoop.hive.ql.exec.Description; import org.apache.hadoop.hive.serde2.io.DoubleWritable; import org.apache.hadoop.hive.serde2.io.ShortWritable; import org.apache.hadoop.hive.shims.ShimLoader; @@ -29,7 +29,7 @@ import org.apache.hadoop.io.LongWritable; import org.apache.hadoop.io.Text; -@description(name = "max", value = "_FUNC_(expr) - Returns the maximum value of expr") +@Description(name = "max", value = "_FUNC_(expr) - Returns the maximum value of expr") public class UDAFMax extends UDAF { static public class MaxShortEvaluator implements UDAFEvaluator { Index: ql/src/java/org/apache/hadoop/hive/ql/udf/UDFLTrim.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/udf/UDFLTrim.java (revision 901960) +++ ql/src/java/org/apache/hadoop/hive/ql/udf/UDFLTrim.java (working copy) @@ -20,10 +20,10 @@ import org.apache.commons.lang.StringUtils; import org.apache.hadoop.hive.ql.exec.UDF; -import org.apache.hadoop.hive.ql.exec.description; +import org.apache.hadoop.hive.ql.exec.Description; import org.apache.hadoop.io.Text; -@description(name = "ltrim", value = "_FUNC_(str) - Removes the leading space characters from str ", extended = "Example:\n" +@Description(name = "ltrim", value = "_FUNC_(str) - Removes the leading space characters from str ", extended = "Example:\n" + " > SELECT _FUNC_(' facebook') FROM src LIMIT 1;\n" + " 'facebook'") public class UDFLTrim extends UDF { Index: ql/src/java/org/apache/hadoop/hive/ql/udf/UDFAscii.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/udf/UDFAscii.java (revision 901960) +++ ql/src/java/org/apache/hadoop/hive/ql/udf/UDFAscii.java (working copy) @@ -19,11 +19,11 @@ package org.apache.hadoop.hive.ql.udf; import org.apache.hadoop.hive.ql.exec.UDF; -import org.apache.hadoop.hive.ql.exec.description; +import org.apache.hadoop.hive.ql.exec.Description; import org.apache.hadoop.io.IntWritable; import org.apache.hadoop.io.Text; -@description(name = "ascii", value = "_FUNC_(str) - returns the numeric value of the first character" +@Description(name = "ascii", value = "_FUNC_(str) - returns the numeric value of the first character" + " of str", extended = "Returns 0 if str is empty or NULL if str is NULL\n" + "Example:\n" + " > SELECT _FUNC_('222') FROM src LIMIT 1;" Index: ql/src/java/org/apache/hadoop/hive/ql/udf/UDFSin.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/udf/UDFSin.java (revision 901960) +++ ql/src/java/org/apache/hadoop/hive/ql/udf/UDFSin.java (working copy) @@ -21,10 +21,10 @@ import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.hive.ql.exec.UDF; -import org.apache.hadoop.hive.ql.exec.description; +import org.apache.hadoop.hive.ql.exec.Description; import org.apache.hadoop.hive.serde2.io.DoubleWritable; -@description(name = "sin", value = "_FUNC_(x) - returns the sine of x (x is in radians)", extended = "Example:\n " +@Description(name = "sin", value = "_FUNC_(x) - returns the sine of x (x is in radians)", extended = "Example:\n " + " > SELECT _FUNC_(0) FROM src LIMIT 1;\n" + " 0") public class UDFSin extends UDF { Index: ql/src/java/org/apache/hadoop/hive/ql/udf/UDFOPMinus.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/udf/UDFOPMinus.java (revision 901960) +++ ql/src/java/org/apache/hadoop/hive/ql/udf/UDFOPMinus.java (working copy) @@ -20,7 +20,7 @@ import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; -import org.apache.hadoop.hive.ql.exec.description; +import org.apache.hadoop.hive.ql.exec.Description; import org.apache.hadoop.hive.serde2.io.ByteWritable; import org.apache.hadoop.hive.serde2.io.DoubleWritable; import org.apache.hadoop.hive.serde2.io.ShortWritable; @@ -28,7 +28,7 @@ import org.apache.hadoop.io.IntWritable; import org.apache.hadoop.io.LongWritable; -@description(name = "-", value = "a _FUNC_ b - Returns the difference a-b") +@Description(name = "-", value = "a _FUNC_ b - Returns the difference a-b") public class UDFOPMinus extends UDFBaseNumericOp { private static Log LOG = LogFactory Index: ql/src/java/org/apache/hadoop/hive/ql/udf/UDFSpace.java =================================================================== --- ql/src/java/org/apache/hadoop/hive/ql/udf/UDFSpace.java (revision 901960) +++ ql/src/java/org/apache/hadoop/hive/ql/udf/UDFSpace.java (working copy) @@ -21,11 +21,11 @@ import java.util.Arrays; import org.apache.hadoop.hive.ql.exec.UDF; -import org.apache.hadoop.hive.ql.exec.description; +import org.apache.hadoop.hive.ql.exec.Description; import org.apache.hadoop.io.IntWritable; import org.apache.hadoop.io.Text; -@description(name = "space", value = "_FUNC_(n) - returns n spaces", extended = "Example:\n " +@Description(name = "space", value = "_FUNC_(n) - returns n spaces", extended = "Example:\n " + " > SELECT _FUNC_(2) FROM src LIMIT 1;\n" + " ' '") public class UDFSpace extends UDF { private final Text result = new Text();