diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/CalcitePlanner.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/CalcitePlanner.java index a268d80..516ce19 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/parse/CalcitePlanner.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/CalcitePlanner.java @@ -291,7 +291,7 @@ public RelNode genLogicalPlan(ASTNode ast) throws SemanticException { return null; } ASTNode queryForCbo = ast; - if (cboCtx.type == PreCboCtx.Type.CTAS_OR_MV) { + if (cboCtx.type == PreCboCtx.Type.CTAS || cboCtx.type == PreCboCtx.Type.VIEW) { queryForCbo = cboCtx.nodeOfInterest; // nodeOfInterest is the query } runCBO = canCBOHandleAst(queryForCbo, getQB(), cboCtx); @@ -322,7 +322,7 @@ Operator genOPTree(ASTNode ast, PlannerContext plannerCtx) throws SemanticExcept // table, destination), so if the query is otherwise ok, it is as if we // did remove those and gave CBO the proper AST. That is kinda hacky. ASTNode queryForCbo = ast; - if (cboCtx.type == PreCboCtx.Type.CTAS_OR_MV) { + if (cboCtx.type == PreCboCtx.Type.CTAS || cboCtx.type == PreCboCtx.Type.VIEW) { queryForCbo = cboCtx.nodeOfInterest; // nodeOfInterest is the query } runCBO = canCBOHandleAst(queryForCbo, getQB(), cboCtx); @@ -355,26 +355,31 @@ Operator genOPTree(ASTNode ast, PlannerContext plannerCtx) throws SemanticExcept newAST = fixUpAfterCbo(ast, newAST, cboCtx); // 2. Regen OP plan from optimized AST + if (cboCtx.type == PreCboCtx.Type.VIEW && !materializedView) { + LOG.info("CBO Succeeded; optimized logical plan."); + this.ctx.setCboInfo("Plan optimized by CBO."); + this.ctx.setCboSucceeded(true); + return null; + } init(false); - if (cboCtx.type == PreCboCtx.Type.CTAS_OR_MV) { - // Redo create-table/view analysis, because it's not part of doPhase1. - if (materializedView) { - // Use the REWRITTEN AST - setAST(newAST); - newAST = reAnalyzeMaterializedViewAfterCbo(newAST); - // Store text of the ORIGINAL QUERY - String originalText = ctx.getTokenRewriteStream().toString( - cboCtx.nodeOfInterest.getTokenStartIndex(), - cboCtx.nodeOfInterest.getTokenStopIndex()); - createVwDesc.setViewOriginalText(originalText); - viewSelect = newAST; - viewsExpanded = new ArrayList<>(); - viewsExpanded.add(createVwDesc.getViewName()); - } else { - // CTAS - setAST(newAST); - newAST = reAnalyzeCTASAfterCbo(newAST); - } + if (cboCtx.type == PreCboCtx.Type.VIEW && materializedView) { + // Redo create-table/view analysis, because it's not part of + // doPhase1. + // Use the REWRITTEN AST + setAST(newAST); + newAST = reAnalyzeMaterializedViewAfterCbo(newAST); + // Store text of the ORIGINAL QUERY + String originalText = ctx.getTokenRewriteStream().toString( + cboCtx.nodeOfInterest.getTokenStartIndex(), + cboCtx.nodeOfInterest.getTokenStopIndex()); + createVwDesc.setViewOriginalText(originalText); + viewSelect = newAST; + viewsExpanded = new ArrayList<>(); + viewsExpanded.add(createVwDesc.getViewName()); + } else if (cboCtx.type == PreCboCtx.Type.CTAS) { + // CTAS + setAST(newAST); + newAST = reAnalyzeCTASAfterCbo(newAST); } Phase1Ctx ctx_1 = initPhase1Ctx(); if (!doPhase1(newAST, getQB(), ctx_1, null)) { @@ -648,9 +653,7 @@ boolean canCBOHandleAst(ASTNode ast, QB qb, PreCboCtx cboCtx) { || qb.isCTAS() || qb.isMaterializedView() || cboCtx.type == PreCboCtx.Type.INSERT || cboCtx.type == PreCboCtx.Type.MULTI_INSERT; boolean noBadTokens = HiveCalciteUtil.validateASTForUnsupportedTokens(ast); - boolean result = isSupportedRoot && isSupportedType - && (getCreateViewDesc() == null || getCreateViewDesc().isMaterialized()) - && noBadTokens; + boolean result = isSupportedRoot && isSupportedType && noBadTokens; if (!result) { if (needToLogMessage) { @@ -662,9 +665,6 @@ boolean canCBOHandleAst(ASTNode ast, QB qb, PreCboCtx cboCtx) { msg += "is not a query with at least one source table " + " or there is a subquery without a source table, or CTAS, or insert; "; } - if (getCreateViewDesc() != null && !getCreateViewDesc().isMaterialized()) { - msg += "has create view; "; - } if (!noBadTokens) { msg += "has unsupported tokens; "; } @@ -832,7 +832,7 @@ String fixCtasColumnName(String colName) { */ static class PreCboCtx extends PlannerContext { enum Type { - NONE, INSERT, MULTI_INSERT, CTAS_OR_MV, UNEXPECTED + NONE, INSERT, MULTI_INSERT, CTAS, VIEW, UNEXPECTED } private ASTNode nodeOfInterest; @@ -850,8 +850,13 @@ private void set(Type type, ASTNode ast) { } @Override - void setCTASOrMVToken(ASTNode child) { - set(PreCboCtx.Type.CTAS_OR_MV, child); + void setCTASToken(ASTNode child) { + set(PreCboCtx.Type.CTAS, child); + } + + @Override + void setViewToken(ASTNode child) { + set(PreCboCtx.Type.VIEW, child); } @Override @@ -881,7 +886,8 @@ ASTNode fixUpAfterCbo(ASTNode originalAst, ASTNode newAst, PreCboCtx cboCtx) // nothing to do return newAst; - case CTAS_OR_MV: { + case CTAS: + case VIEW: { // Patch the optimized query back into original CTAS AST, replacing the // original query. replaceASTChild(cboCtx.nodeOfInterest, newAst); @@ -1244,7 +1250,11 @@ public RelNode apply(RelOptCluster cluster, RelOptSchema relOptSchema, SchemaPlu calciteGenPlan = genLogicalPlan(getQB(), true, null, null); resultSchema = SemanticAnalyzer.convertRowSchemaToResultSetSchema( relToHiveRR.get(calciteGenPlan), - HiveConf.getBoolVar(conf, HiveConf.ConfVars.HIVE_RESULTSET_USE_UNIQUE_COLUMN_NAMES)); + getQB().isView() ? false : HiveConf.getBoolVar(conf, + HiveConf.ConfVars.HIVE_RESULTSET_USE_UNIQUE_COLUMN_NAMES)); + if (getQB().isView()) { + return calciteGenPlan; + } } catch (SemanticException e) { semanticException = e; throw new RuntimeException(e); diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/QB.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/QB.java index 59d537f..bfe2ab5 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/parse/QB.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/QB.java @@ -420,6 +420,10 @@ public boolean isMaterializedView() { return viewDesc != null && viewDesc.isMaterialized(); } + public boolean isView() { + return viewDesc != null && !viewDesc.isMaterialized(); + } + void addEncryptedTargetTablePath(Path p) { if(encryptedTargetTablePaths == null) { encryptedTargetTablePaths = new ArrayList<>(); diff --git a/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java b/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java index 663a148..0517c15 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java @@ -10708,7 +10708,10 @@ void setParseTreeAttr(ASTNode child, Phase1Ctx ctx_1) { this.ctx_1 = ctx_1; } - void setCTASOrMVToken(ASTNode child) { + void setCTASToken(ASTNode child) { + } + + void setViewToken(ASTNode child) { } void setInsertToken(ASTNode ast, boolean isTmpFileDest) { @@ -11021,7 +11024,7 @@ void analyzeInternal(ASTNode ast, PlannerContext plannerCtx) throws SemanticExce } // 3. Deduce Resultset Schema - if (createVwDesc != null) { + if (createVwDesc != null && !this.ctx.isCboSucceeded()) { resultSchema = convertRowSchemaToViewSchema(opParseCtx.get(sinkOp).getRowResolver()); } else { // resultSchema will be null if @@ -11795,7 +11798,7 @@ ASTNode analyzeCreateTable( } command_type = CTAS; if (plannerCtx != null) { - plannerCtx.setCTASOrMVToken(child); + plannerCtx.setCTASToken(child); } selectStmt = child; break; @@ -12072,7 +12075,7 @@ protected ASTNode analyzeCreateView(ASTNode ast, QB qb, PlannerContext plannerCt case HiveParser.TOK_QUERY: // For CBO if (plannerCtx != null) { - plannerCtx.setCTASOrMVToken(child); + plannerCtx.setViewToken(child); } selectStmt = child; break; @@ -12132,7 +12135,6 @@ protected ASTNode analyzeCreateView(ASTNode ast, QB qb, PlannerContext plannerCt storageFormat.getSerdeProps()); addDbAndTabToOutputs(qualTabName, TableType.MATERIALIZED_VIEW); queryState.setCommandType(HiveOperation.CREATE_MATERIALIZED_VIEW); - qb.setViewDesc(createVwDesc); } else { createVwDesc = new CreateViewDesc( dbDotTable, cols, comment, tblProps, partColNames, @@ -12143,6 +12145,7 @@ protected ASTNode analyzeCreateView(ASTNode ast, QB qb, PlannerContext plannerCt addDbAndTabToOutputs(qualTabName, TableType.VIRTUAL_VIEW); queryState.setCommandType(HiveOperation.CREATEVIEW); } + qb.setViewDesc(createVwDesc); return selectStmt; } diff --git a/ql/src/test/queries/clientpositive/view_cbo.q b/ql/src/test/queries/clientpositive/view_cbo.q new file mode 100644 index 0000000..e9a4b7a --- /dev/null +++ b/ql/src/test/queries/clientpositive/view_cbo.q @@ -0,0 +1,19 @@ +set hive.mapred.mode=nonstrict; +-- SORT_QUERY_RESULTS + +create view v as select key from src; +desc formatted v; + +drop view v; +create view v as select * from src; +desc formatted v; + +drop view v; +create view v as select * from src intersect select * from src; +desc formatted v; + +drop view v; +create view v as select * from src except select * from src; +desc formatted v; + +explain select * from v; diff --git a/ql/src/test/results/clientpositive/view_cbo.q.out b/ql/src/test/results/clientpositive/view_cbo.q.out new file mode 100644 index 0000000..f9af4a6 --- /dev/null +++ b/ql/src/test/results/clientpositive/view_cbo.q.out @@ -0,0 +1,355 @@ +PREHOOK: query: create view v as select key from src +PREHOOK: type: CREATEVIEW +PREHOOK: Input: default@src +PREHOOK: Output: database:default +PREHOOK: Output: default@v +POSTHOOK: query: create view v as select key from src +POSTHOOK: type: CREATEVIEW +POSTHOOK: Input: default@src +POSTHOOK: Output: database:default +POSTHOOK: Output: default@v +PREHOOK: query: desc formatted v +PREHOOK: type: DESCTABLE +PREHOOK: Input: default@v +POSTHOOK: query: desc formatted v +POSTHOOK: type: DESCTABLE +POSTHOOK: Input: default@v +# col_name data_type comment + +key string + +# Detailed Table Information +Database: default +#### A masked pattern was here #### +Retention: 0 +Table Type: VIRTUAL_VIEW +Table Parameters: +#### A masked pattern was here #### + +# Storage Information +SerDe Library: null +InputFormat: org.apache.hadoop.mapred.TextInputFormat +OutputFormat: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat +Compressed: No +Num Buckets: -1 +Bucket Columns: [] +Sort Columns: [] + +# View Information +View Original Text: select key from src +View Expanded Text: select `src`.`key` from `default`.`src` +View Rewrite Enabled: No +PREHOOK: query: drop view v +PREHOOK: type: DROPVIEW +PREHOOK: Input: default@v +PREHOOK: Output: default@v +POSTHOOK: query: drop view v +POSTHOOK: type: DROPVIEW +POSTHOOK: Input: default@v +POSTHOOK: Output: default@v +PREHOOK: query: create view v as select * from src +PREHOOK: type: CREATEVIEW +PREHOOK: Input: default@src +PREHOOK: Output: database:default +PREHOOK: Output: default@v +POSTHOOK: query: create view v as select * from src +POSTHOOK: type: CREATEVIEW +POSTHOOK: Input: default@src +POSTHOOK: Output: database:default +POSTHOOK: Output: default@v +PREHOOK: query: desc formatted v +PREHOOK: type: DESCTABLE +PREHOOK: Input: default@v +POSTHOOK: query: desc formatted v +POSTHOOK: type: DESCTABLE +POSTHOOK: Input: default@v +# col_name data_type comment + +key string +value string + +# Detailed Table Information +Database: default +#### A masked pattern was here #### +Retention: 0 +Table Type: VIRTUAL_VIEW +Table Parameters: +#### A masked pattern was here #### + +# Storage Information +SerDe Library: null +InputFormat: org.apache.hadoop.mapred.TextInputFormat +OutputFormat: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat +Compressed: No +Num Buckets: -1 +Bucket Columns: [] +Sort Columns: [] + +# View Information +View Original Text: select * from src +View Expanded Text: select `src`.`key`, `src`.`value` from `default`.`src` +View Rewrite Enabled: No +PREHOOK: query: drop view v +PREHOOK: type: DROPVIEW +PREHOOK: Input: default@v +PREHOOK: Output: default@v +POSTHOOK: query: drop view v +POSTHOOK: type: DROPVIEW +POSTHOOK: Input: default@v +POSTHOOK: Output: default@v +PREHOOK: query: create view v as select * from src intersect select * from src +PREHOOK: type: CREATEVIEW +PREHOOK: Input: default@src +PREHOOK: Output: database:default +PREHOOK: Output: default@v +POSTHOOK: query: create view v as select * from src intersect select * from src +POSTHOOK: type: CREATEVIEW +POSTHOOK: Input: default@src +POSTHOOK: Output: database:default +POSTHOOK: Output: default@v +PREHOOK: query: desc formatted v +PREHOOK: type: DESCTABLE +PREHOOK: Input: default@v +POSTHOOK: query: desc formatted v +POSTHOOK: type: DESCTABLE +POSTHOOK: Input: default@v +# col_name data_type comment + +key string +value string + +# Detailed Table Information +Database: default +#### A masked pattern was here #### +Retention: 0 +Table Type: VIRTUAL_VIEW +Table Parameters: +#### A masked pattern was here #### + +# Storage Information +SerDe Library: null +InputFormat: org.apache.hadoop.mapred.TextInputFormat +OutputFormat: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat +Compressed: No +Num Buckets: -1 +Bucket Columns: [] +Sort Columns: [] + +# View Information +View Original Text: select * from src intersect select * from src +View Expanded Text: select `src`.`key`, `src`.`value` from `default`.`src` intersect select `src`.`key`, `src`.`value` from `default`.`src` +View Rewrite Enabled: No +PREHOOK: query: drop view v +PREHOOK: type: DROPVIEW +PREHOOK: Input: default@v +PREHOOK: Output: default@v +POSTHOOK: query: drop view v +POSTHOOK: type: DROPVIEW +POSTHOOK: Input: default@v +POSTHOOK: Output: default@v +PREHOOK: query: create view v as select * from src except select * from src +PREHOOK: type: CREATEVIEW +PREHOOK: Input: default@src +PREHOOK: Output: database:default +PREHOOK: Output: default@v +POSTHOOK: query: create view v as select * from src except select * from src +POSTHOOK: type: CREATEVIEW +POSTHOOK: Input: default@src +POSTHOOK: Output: database:default +POSTHOOK: Output: default@v +PREHOOK: query: desc formatted v +PREHOOK: type: DESCTABLE +PREHOOK: Input: default@v +POSTHOOK: query: desc formatted v +POSTHOOK: type: DESCTABLE +POSTHOOK: Input: default@v +# col_name data_type comment + +key string +value string + +# Detailed Table Information +Database: default +#### A masked pattern was here #### +Retention: 0 +Table Type: VIRTUAL_VIEW +Table Parameters: +#### A masked pattern was here #### + +# Storage Information +SerDe Library: null +InputFormat: org.apache.hadoop.mapred.TextInputFormat +OutputFormat: org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat +Compressed: No +Num Buckets: -1 +Bucket Columns: [] +Sort Columns: [] + +# View Information +View Original Text: select * from src except select * from src +View Expanded Text: select `src`.`key`, `src`.`value` from `default`.`src` except select `src`.`key`, `src`.`value` from `default`.`src` +View Rewrite Enabled: No +PREHOOK: query: explain select * from v +PREHOOK: type: QUERY +POSTHOOK: query: explain select * from v +POSTHOOK: type: QUERY +STAGE DEPENDENCIES: + Stage-1 is a root stage + Stage-2 depends on stages: Stage-1, Stage-3 + Stage-3 is a root stage + Stage-0 depends on stages: Stage-2 + +STAGE PLANS: + Stage: Stage-1 + Map Reduce + Map Operator Tree: + TableScan + alias: src + properties: + insideView TRUE + Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: key (type: string), value (type: string) + outputColumnNames: _col0, _col1 + Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE + Group By Operator + aggregations: count(2) + keys: _col0 (type: string), _col1 (type: string) + mode: hash + outputColumnNames: _col0, _col1, _col2 + Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + key expressions: _col0 (type: string), _col1 (type: string) + sort order: ++ + Map-reduce partition columns: _col0 (type: string), _col1 (type: string) + Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE + value expressions: _col2 (type: bigint) + Reduce Operator Tree: + Group By Operator + aggregations: count(VALUE._col0) + keys: KEY._col0 (type: string), KEY._col1 (type: string) + mode: mergepartial + outputColumnNames: _col0, _col1, _col2 + Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: _col0 (type: string), _col1 (type: string), 2 (type: bigint), _col2 (type: bigint) + outputColumnNames: _col0, _col1, _col2, _col3 + Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE + File Output Operator + compressed: false + table: + input format: org.apache.hadoop.mapred.SequenceFileInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat + serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe + + Stage: Stage-2 + Map Reduce + Map Operator Tree: + TableScan + Union + Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: _col0 (type: string), _col1 (type: string), _col3 (type: bigint), (_col2 * _col3) (type: bigint) + outputColumnNames: _col0, _col1, _col2, _col3 + Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE + Group By Operator + aggregations: sum(_col2), sum(_col3) + keys: _col0 (type: string), _col1 (type: string) + mode: hash + outputColumnNames: _col0, _col1, _col2, _col3 + Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + key expressions: _col0 (type: string), _col1 (type: string) + sort order: ++ + Map-reduce partition columns: _col0 (type: string), _col1 (type: string) + Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE + value expressions: _col2 (type: bigint), _col3 (type: bigint) + TableScan + Union + Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: _col0 (type: string), _col1 (type: string), _col3 (type: bigint), (_col2 * _col3) (type: bigint) + outputColumnNames: _col0, _col1, _col2, _col3 + Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE + Group By Operator + aggregations: sum(_col2), sum(_col3) + keys: _col0 (type: string), _col1 (type: string) + mode: hash + outputColumnNames: _col0, _col1, _col2, _col3 + Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + key expressions: _col0 (type: string), _col1 (type: string) + sort order: ++ + Map-reduce partition columns: _col0 (type: string), _col1 (type: string) + Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE + value expressions: _col2 (type: bigint), _col3 (type: bigint) + Reduce Operator Tree: + Group By Operator + aggregations: sum(VALUE._col0), sum(VALUE._col1) + keys: KEY._col0 (type: string), KEY._col1 (type: string) + mode: mergepartial + outputColumnNames: _col0, _col1, _col2, _col3 + Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE + Filter Operator + predicate: ((_col2 > 0) and ((_col2 * 2) = _col3)) (type: boolean) + Statistics: Num rows: 41 Data size: 435 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: _col0 (type: string), _col1 (type: string) + outputColumnNames: _col0, _col1 + Statistics: Num rows: 41 Data size: 435 Basic stats: COMPLETE Column stats: NONE + File Output Operator + compressed: false + Statistics: Num rows: 41 Data size: 435 Basic stats: COMPLETE Column stats: NONE + table: + input format: org.apache.hadoop.mapred.SequenceFileInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat + serde: org.apache.hadoop.hive.serde2.lazy.LazySimpleSerDe + + Stage: Stage-3 + Map Reduce + Map Operator Tree: + TableScan + alias: src + properties: + insideView TRUE + Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: key (type: string), value (type: string) + outputColumnNames: _col0, _col1 + Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE + Group By Operator + aggregations: count(1) + keys: _col0 (type: string), _col1 (type: string) + mode: hash + outputColumnNames: _col0, _col1, _col2 + Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE + Reduce Output Operator + key expressions: _col0 (type: string), _col1 (type: string) + sort order: ++ + Map-reduce partition columns: _col0 (type: string), _col1 (type: string) + Statistics: Num rows: 500 Data size: 5312 Basic stats: COMPLETE Column stats: NONE + value expressions: _col2 (type: bigint) + Reduce Operator Tree: + Group By Operator + aggregations: count(VALUE._col0) + keys: KEY._col0 (type: string), KEY._col1 (type: string) + mode: mergepartial + outputColumnNames: _col0, _col1, _col2 + Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE + Select Operator + expressions: _col0 (type: string), _col1 (type: string), 1 (type: bigint), _col2 (type: bigint) + outputColumnNames: _col0, _col1, _col2, _col3 + Statistics: Num rows: 250 Data size: 2656 Basic stats: COMPLETE Column stats: NONE + File Output Operator + compressed: false + table: + input format: org.apache.hadoop.mapred.SequenceFileInputFormat + output format: org.apache.hadoop.hive.ql.io.HiveSequenceFileOutputFormat + serde: org.apache.hadoop.hive.serde2.lazybinary.LazyBinarySerDe + + Stage: Stage-0 + Fetch Operator + limit: -1 + Processor Tree: + ListSink +