diff --git pom.xml pom.xml
index 52e5301..8f49d06 100644
--- pom.xml
+++ pom.xml
@@ -119,7 +119,7 @@
1.10.0
1.7.7
0.8.0.RELEASE
- 1.13.0
+ 1.14.0
4.2.4
4.1.17
4.1.19
diff --git ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java
index b0e68b1..6bfd828 100644
--- ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java
+++ ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java
@@ -1503,7 +1503,7 @@ public Table apply(org.apache.hadoop.hive.metastore.api.Table table) {
Map qualifiedNameToView =
new HashMap();
for (RelOptMaterialization materialization : cachedViews) {
- qualifiedNameToView.put(materialization.table.getQualifiedName().get(0), materialization);
+ qualifiedNameToView.put(materialization.qualifiedTableName.get(0), materialization);
}
for (String table : tables) {
// Compose qualified name
diff --git ql/src/java/org/apache/hadoop/hive/ql/metadata/HiveMaterializedViewsRegistry.java ql/src/java/org/apache/hadoop/hive/ql/metadata/HiveMaterializedViewsRegistry.java
index 154ea68..423fcf0 100644
--- ql/src/java/org/apache/hadoop/hive/ql/metadata/HiveMaterializedViewsRegistry.java
+++ ql/src/java/org/apache/hadoop/hive/ql/metadata/HiveMaterializedViewsRegistry.java
@@ -188,7 +188,8 @@ public RelOptMaterialization addMaterializedView(Table materializedViewTable) {
" ignored; error parsing original query");
return null;
}
- RelOptMaterialization materialization = new RelOptMaterialization(tableRel, queryRel, null);
+ RelOptMaterialization materialization = new RelOptMaterialization(tableRel, queryRel,
+ null, tableRel.getTable().getQualifiedName());
cq.put(vk, materialization);
if (LOG.isDebugEnabled()) {
LOG.debug("Cached materialized view for rewriting: " + tableRel.getTable().getQualifiedName());
@@ -317,7 +318,8 @@ private static RelNode createTableScan(Table viewTable) {
List intervals = Arrays.asList(DruidTable.DEFAULT_INTERVAL);
DruidTable druidTable = new DruidTable(new DruidSchema(address, address, false),
- dataSource, RelDataTypeImpl.proto(rowType), metrics, DruidTable.DEFAULT_TIMESTAMP_COLUMN, intervals);
+ dataSource, RelDataTypeImpl.proto(rowType), metrics, DruidTable.DEFAULT_TIMESTAMP_COLUMN,
+ intervals, null, null);
final TableScan scan = new HiveTableScan(cluster, cluster.traitSetOf(HiveRelNode.CONVENTION),
optTable, viewTable.getTableName(), null, false, false);
tableRel = DruidQuery.create(cluster, cluster.traitSetOf(HiveRelNode.CONVENTION),
diff --git ql/src/java/org/apache/hadoop/hive/ql/parse/CalcitePlanner.java ql/src/java/org/apache/hadoop/hive/ql/parse/CalcitePlanner.java
index 6555269..80aa963 100644
--- ql/src/java/org/apache/hadoop/hive/ql/parse/CalcitePlanner.java
+++ ql/src/java/org/apache/hadoop/hive/ql/parse/CalcitePlanner.java
@@ -231,8 +231,6 @@
import org.apache.hadoop.hive.ql.parse.PTFInvocationSpec.PartitionExpression;
import org.apache.hadoop.hive.ql.parse.PTFInvocationSpec.PartitionSpec;
import org.apache.hadoop.hive.ql.parse.QBExpr.Opcode;
-import org.apache.hadoop.hive.ql.parse.SemanticAnalyzer.PlannerContext;
-import org.apache.hadoop.hive.ql.parse.SemanticAnalyzer.PlannerContextFactory;
import org.apache.hadoop.hive.ql.parse.WindowingSpec.BoundarySpec;
import org.apache.hadoop.hive.ql.parse.WindowingSpec.WindowExpressionSpec;
import org.apache.hadoop.hive.ql.parse.WindowingSpec.WindowFunctionSpec;
@@ -1500,7 +1498,8 @@ public RelOptMaterialization apply(RelOptMaterialization materialization) {
} else {
newViewScan = copyNodeScan(viewScan);
}
- return new RelOptMaterialization(newViewScan, materialization.queryRel, null);
+ return new RelOptMaterialization(newViewScan, materialization.queryRel, null,
+ materialization.qualifiedTableName);
}
private RelNode copyNodeScan(RelNode scan) {
@@ -2402,7 +2401,8 @@ private RelNode genTableLogicalPlan(String tableAlias, QB qb) throws SemanticExc
List intervals = Arrays.asList(DruidTable.DEFAULT_INTERVAL);
DruidTable druidTable = new DruidTable(new DruidSchema(address, address, false),
- dataSource, RelDataTypeImpl.proto(rowType), metrics, DruidTable.DEFAULT_TIMESTAMP_COLUMN, intervals);
+ dataSource, RelDataTypeImpl.proto(rowType), metrics, DruidTable.DEFAULT_TIMESTAMP_COLUMN,
+ intervals, null, null);
final TableScan scan = new HiveTableScan(cluster, cluster.traitSetOf(HiveRelNode.CONVENTION),
optTable, null == tableAlias ? tabMetaData.getTableName() : tableAlias,
getAliasId(tableAlias, qb), HiveConf.getBoolVar(conf,