diff --git ql/src/java/org/apache/hadoop/hive/ql/optimizer/optiq/RelOptHiveTable.java ql/src/java/org/apache/hadoop/hive/ql/optimizer/optiq/RelOptHiveTable.java index a1cb310..e117156 100644 --- ql/src/java/org/apache/hadoop/hive/ql/optimizer/optiq/RelOptHiveTable.java +++ ql/src/java/org/apache/hadoop/hive/ql/optimizer/optiq/RelOptHiveTable.java @@ -10,10 +10,12 @@ import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; +import org.apache.hadoop.hive.common.StatsSetupConst; import org.apache.hadoop.hive.conf.HiveConf; import org.apache.hadoop.hive.ql.exec.ColumnInfo; import org.apache.hadoop.hive.ql.metadata.Hive; import org.apache.hadoop.hive.ql.metadata.HiveException; +import org.apache.hadoop.hive.ql.metadata.Partition; import org.apache.hadoop.hive.ql.metadata.Table; import org.apache.hadoop.hive.ql.optimizer.optiq.translator.ExprNodeConverter; import org.apache.hadoop.hive.ql.optimizer.ppr.PartitionPruner; @@ -92,8 +94,29 @@ public RelNode toRel(ToRelContext context) { @Override public double getRowCount() { - if (m_rowCount == -1) - m_rowCount = StatsUtils.getNumRows(m_hiveTblMetadata); + if (m_rowCount == -1) { + + if (m_hiveTblMetadata.isPartitioned()) { + if (partitionList == null) { + try { + List parts = Hive.get().getPartitions(m_hiveTblMetadata); + List rowCounts = StatsUtils.getBasicStatForPartitions( + m_hiveTblMetadata, parts, StatsSetupConst.ROW_COUNT); + m_rowCount = StatsUtils.getSumIgnoreNegatives(rowCounts); + + } catch (HiveException he) { + throw new RuntimeException(he); + } + } else { + List rowCounts = StatsUtils.getBasicStatForPartitions( + m_hiveTblMetadata, partitionList.getNotDeniedPartns(), + StatsSetupConst.ROW_COUNT); + m_rowCount = StatsUtils.getSumIgnoreNegatives(rowCounts); + } + } else { + m_rowCount = StatsUtils.getNumRows(m_hiveTblMetadata); + } + } return m_rowCount; }