diff --git ql/src/java/org/apache/hadoop/hive/ql/metadata/HiveMaterializedViewsRegistry.java ql/src/java/org/apache/hadoop/hive/ql/metadata/HiveMaterializedViewsRegistry.java index 696227be48..0c7f055a28 100644 --- ql/src/java/org/apache/hadoop/hive/ql/metadata/HiveMaterializedViewsRegistry.java +++ ql/src/java/org/apache/hadoop/hive/ql/metadata/HiveMaterializedViewsRegistry.java @@ -17,6 +17,7 @@ */ package org.apache.hadoop.hive.ql.metadata; +import java.io.IOException; import java.util.ArrayList; import java.util.Arrays; import java.util.HashMap; @@ -64,9 +65,11 @@ import org.apache.hadoop.hive.ql.parse.ASTNode; import org.apache.hadoop.hive.ql.parse.CalcitePlanner; import org.apache.hadoop.hive.ql.parse.ColumnStatsList; +import org.apache.hadoop.hive.ql.parse.ParseException; import org.apache.hadoop.hive.ql.parse.ParseUtils; import org.apache.hadoop.hive.ql.parse.PrunedPartitionList; import org.apache.hadoop.hive.ql.parse.RowResolver; +import org.apache.hadoop.hive.ql.parse.SemanticException; import org.apache.hadoop.hive.ql.session.SessionState; import org.apache.hadoop.hive.serde2.SerDeException; import org.apache.hadoop.hive.serde2.objectinspector.StructField; @@ -224,10 +227,12 @@ private RelOptMaterialization addMaterializedView(HiveConf conf, Table materiali " ignored; error creating view replacement"); return null; } - final RelNode queryRel = parseQuery(conf, viewQuery); - if (queryRel == null) { + final RelNode queryRel; + try { + queryRel = parseQuery(conf, viewQuery); + } catch (Exception e) { LOG.warn("Materialized view " + materializedViewTable.getCompleteName() + - " ignored; error parsing original query"); + " ignored; error parsing original query; " + e); return null; } @@ -400,22 +405,27 @@ private static RelNode createMaterializedViewScan(HiveConf conf, Table viewTable return tableRel; } - private static RelNode parseQuery(HiveConf conf, String viewQuery) { - try { - final ASTNode node = ParseUtils.parse(viewQuery); - final QueryState qs = - new QueryState.Builder().withHiveConf(conf).build(); - CalcitePlanner analyzer = new CalcitePlanner(qs); - Context ctx = new Context(conf); - ctx.setIsLoadingMaterializedView(true); - analyzer.initCtx(ctx); - analyzer.init(false); - return analyzer.genLogicalPlan(node); - } catch (Exception e) { - // We could not parse the view - LOG.error("Error parsing original query for materialized view", e); - return null; - } + private static RelNode parseQuery(HiveConf conf, String viewQuery) + throws SemanticException, IOException, ParseException { + return getAnalyzer(conf).genLogicalPlan(ParseUtils.parse(viewQuery)); + } + + public static List parseQueryAndGetSchema(HiveConf conf, String viewQuery) + throws SemanticException, IOException, ParseException { + final CalcitePlanner analyzer = getAnalyzer(conf); + analyzer.genLogicalPlan(ParseUtils.parse(viewQuery)); + return analyzer.getResultSchema(); + } + + private static CalcitePlanner getAnalyzer(HiveConf conf) throws SemanticException, IOException { + final QueryState qs = + new QueryState.Builder().withHiveConf(conf).build(); + CalcitePlanner analyzer = new CalcitePlanner(qs); + Context ctx = new Context(conf); + ctx.setIsLoadingMaterializedView(true); + analyzer.initCtx(ctx); + analyzer.init(false); + return analyzer; } private static TableType obtainTableType(Table tabMetaData) { diff --git ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDTFGetSplits.java ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDTFGetSplits.java index a29b560453..c076e9d08b 100644 --- ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDTFGetSplits.java +++ ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDTFGetSplits.java @@ -74,6 +74,8 @@ import org.apache.hadoop.hive.ql.lockmgr.HiveTxnManager; import org.apache.hadoop.hive.ql.lockmgr.TxnManagerFactory; import org.apache.hadoop.hive.ql.metadata.HiveException; +import org.apache.hadoop.hive.ql.metadata.HiveMaterializedViewsRegistry; +import org.apache.hadoop.hive.ql.parse.ParseException; import org.apache.hadoop.hive.ql.plan.MapWork; import org.apache.hadoop.hive.ql.plan.PlanUtils; import org.apache.hadoop.hive.ql.plan.TezWork; @@ -246,6 +248,18 @@ public PlanFragment createPlanFragment(String query, int num, ApplicationId spli // hive compiler is going to remove inner order by. disable that optimization until then. HiveConf.setBoolVar(conf, ConfVars.HIVE_REMOVE_ORDERBY_IN_SUBQUERY, false); + if(num == 0) { + //Schema only + try { + List fieldSchemas = + HiveMaterializedViewsRegistry.parseQueryAndGetSchema(conf, query); + Schema schema = new Schema(convertSchema(fieldSchemas)); + return new PlanFragment(null, schema, null); + } catch (IOException | ParseException e) { + throw new HiveException(e); + } + } + try { jc = DagUtils.getInstance().createConfiguration(conf); } catch (IOException e) { @@ -274,10 +288,6 @@ public PlanFragment createPlanFragment(String query, int num, ApplicationId spli HiveConf.getBoolVar(conf, ConfVars.LLAP_EXTERNAL_SPLITS_ORDER_BY_FORCE_SINGLE_SPLIT); List> roots = plan.getRootTasks(); Schema schema = convertSchema(plan.getResultSchema()); - if(num == 0) { - //Schema only - return new PlanFragment(null, schema, null); - } boolean fetchTask = plan.getFetchTask() != null; TezWork tezWork; if (roots == null || roots.size() != 1 || !(roots.get(0) instanceof TezTask)) { @@ -665,16 +675,18 @@ private String getSha(Path localFile, Configuration conf) throws IOException, } } - private Schema convertSchema(Object obj) throws HiveException { - org.apache.hadoop.hive.metastore.api.Schema schema = (org.apache.hadoop.hive.metastore.api.Schema) obj; + private List convertSchema(List fieldSchemas) { List colDescs = new ArrayList(); - for (FieldSchema fs : schema.getFieldSchemas()) { + for (FieldSchema fs : fieldSchemas) { String colName = fs.getName(); String typeString = fs.getType(); colDescs.add(new FieldDesc(colName, TypeInfoUtils.getTypeInfoFromTypeString(typeString))); } - Schema Schema = new Schema(colDescs); - return Schema; + return colDescs; + } + + private Schema convertSchema(org.apache.hadoop.hive.metastore.api.Schema schema) { + return new Schema(convertSchema(schema.getFieldSchemas())); } private String getTempTableStorageFormatString(HiveConf conf) {