From ce6933d3a40750ecf3d1cc4ffd70400457eebb01 Mon Sep 17 00:00:00 2001
From: kathy.sun <kathy.sun@cloudera.com>
Date: Mon, 25 Jul 2016 14:27:16 -0700
Subject: [PATCH] IMPALA-4024: Add "system" database and expose Impala metrics as a table

This is to expose metadata (current status of impala, e.g impala-metrics)
into system table, so that users could query them via sql.
Currently, only metrics table is included.
We could add other table later, e.g. queries table

You can run impala-shell.sh and type query like:
select * from system.metrics;
select name, value, description from system.metrics;

Change-Id: I7adbeb45220c468e43b424d70c30b952f6cec2cd
---

diff --git a/be/src/exec/CMakeLists.txt b/be/src/exec/CMakeLists.txt
index afb1a59..1445e82 100644
--- a/be/src/exec/CMakeLists.txt
+++ b/be/src/exec/CMakeLists.txt
@@ -84,6 +84,8 @@
   sort-exec-exprs.cc
   sort-node.cc
   subplan-node.cc
+  system-table-scan-node.cc
+  system-table-scanner.cc
   text-converter.cc
   topn-node.cc
   topn-node-ir.cc
diff --git a/be/src/exec/exec-node.cc b/be/src/exec/exec-node.cc
index 937c6f2..999cce5 100644
--- a/be/src/exec/exec-node.cc
+++ b/be/src/exec/exec-node.cc
@@ -26,7 +26,6 @@
 #include "codegen/llvm-codegen.h"
 #include "common/object-pool.h"
 #include "common/status.h"
-#include "exprs/expr.h"
 #include "exec/aggregation-node.h"
 #include "exec/analytic-eval-node.h"
 #include "exec/data-source-scan-node.h"
@@ -44,12 +43,14 @@
 #include "exec/singular-row-src-node.h"
 #include "exec/sort-node.h"
 #include "exec/subplan-node.h"
+#include "exec/system-table-scan-node.h"
 #include "exec/topn-node.h"
 #include "exec/union-node.h"
 #include "exec/unnest-node.h"
+#include "exprs/expr.h"
 #include "runtime/descriptors.h"
-#include "runtime/mem-tracker.h"
 #include "runtime/mem-pool.h"
+#include "runtime/mem-tracker.h"
 #include "runtime/row-batch.h"
 #include "runtime/runtime-state.h"
 #include "util/debug-util.h"
@@ -299,6 +300,9 @@
       RETURN_IF_ERROR(CheckKuduAvailability());
       *node = pool->Add(new KuduScanNode(pool, tnode, descs));
       break;
+    case TPlanNodeType::SYSTEM_TABLE_SCAN_NODE:
+      *node = pool->Add(new SystemTableScanNode(pool, tnode, descs));
+      break;
     case TPlanNodeType::AGGREGATION_NODE:
       if (FLAGS_enable_partitioned_aggregation) {
         *node = pool->Add(new PartitionedAggregationNode(pool, tnode, descs));
diff --git a/be/src/exec/system-table-scan-node.cc b/be/src/exec/system-table-scan-node.cc
new file mode 100644
index 0000000..add1d30
--- /dev/null
+++ b/be/src/exec/system-table-scan-node.cc
@@ -0,0 +1,144 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements.  See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership.  The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License.  You may obtain a copy of the License at
+//
+//   http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied.  See the License for the
+// specific language governing permissions and limitations
+// under the License.
+
+#include "exec/system-table-scan-node.h"
+
+#include "common/names.h"
+#include "runtime/exec-env.h"
+#include "runtime/mem-pool.h"
+#include "runtime/mem-tracker.h"
+#include "runtime/row-batch.h"
+#include "runtime/runtime-state.h"
+#include "runtime/tuple-row.h"
+#include "util/periodic-counter-updater.h"
+
+#include "gen-cpp/PlanNodes_types.h"
+#include "util/metrics.h"
+
+#include <gutil/strings/substitute.h>
+#include <rapidjson/rapidjson.h>
+#include <rapidjson/stringbuffer.h>
+#include <rapidjson/writer.h>
+#include <algorithm>
+#include <boost/algorithm/string/replace.hpp>
+#include <vector>
+
+using boost::algorithm::replace_all_copy;
+using strings::Substitute;
+
+namespace impala {
+
+SystemTableScanNode::SystemTableScanNode(
+    ObjectPool* pool, const TPlanNode& tnode, const DescriptorTbl& descs)
+  : ScanNode(pool, tnode, descs),
+    scanner_(nullptr),
+    tuple_id_(tnode.system_table_scan_node.tuple_id),
+    tuple_desc_(NULL),
+    num_rows_(0) {
+  table_name_ = tnode.system_table_scan_node.table_name;
+  // currently, we only have metrics table in system db
+  DCHECK_EQ(table_name_, TSystemTableName::type::METRICS);
+}
+
+SystemTableScanNode::~SystemTableScanNode() {}
+
+Status SystemTableScanNode::Prepare(RuntimeState* state) {
+  RETURN_IF_ERROR(ScanNode::Prepare(state));
+  tuple_desc_ = state->desc_tbl().GetTupleDescriptor(tuple_id_);
+  DCHECK(tuple_desc_ != NULL);
+  return Status::OK();
+}
+
+Status SystemTableScanNode::Open(RuntimeState* state) {
+  RETURN_IF_ERROR(ExecNode::Open(state));
+  RETURN_IF_CANCELLED(state);
+  RETURN_IF_ERROR(QueryMaintenance(state));
+  SCOPED_TIMER(runtime_profile_->total_time_counter());
+
+  switch (table_name_) {
+    case TSystemTableName::METRICS:
+      scanner_.reset(new MetricScanner());
+      break;
+    default:
+      return Status(Substitute("Unknown table type: $0", table_name_));
+  }
+  RETURN_IF_ERROR(scanner_->Open());
+  return Status::OK();
+}
+
+Status SystemTableScanNode::MaterializeNextTuple(MemPool* tuple_pool, Tuple* tuple) {
+  tuple->Init(tuple_desc_->byte_size());
+  RETURN_IF_ERROR(scanner_->MaterializeNextTuple(tuple_pool, tuple, tuple_desc_));
+  return Status::OK();
+}
+
+Status SystemTableScanNode::GetNext(RuntimeState* state, RowBatch* row_batch, bool* eos) {
+  RETURN_IF_ERROR(ExecDebugAction(TExecNodePhase::GETNEXT, state));
+  RETURN_IF_CANCELLED(state);
+  RETURN_IF_ERROR(QueryMaintenance(state));
+  SCOPED_TIMER(runtime_profile_->total_time_counter());
+  if (ReachedLimit()) {
+    *eos = true;
+    return Status::OK();
+  }
+  *eos = false;
+
+  int64_t tuple_buffer_size;
+  uint8_t* tuple_buffer;
+  RETURN_IF_ERROR(
+      row_batch->ResizeAndAllocateTupleBuffer(state, &tuple_buffer_size, &tuple_buffer));
+  Tuple* tuple = reinterpret_cast<Tuple*>(tuple_buffer);
+  ExprContext** ctxs = &conjunct_ctxs_[0];
+  int num_ctxs = conjunct_ctxs_.size();
+
+  SCOPED_TIMER(materialize_tuple_timer());
+
+  // copy rows until we hit the limit/capacity or until we exhaust input batch
+  while (!ReachedLimit() && !row_batch->AtCapacity() && !scanner_->isEos()) {
+    RETURN_IF_ERROR(MaterializeNextTuple(row_batch->tuple_data_pool(), tuple));
+    int row_idx = row_batch->AddRow();
+    TupleRow* tuple_row = row_batch->GetRow(row_idx);
+    tuple_row->SetTuple(0, tuple);
+
+    if (ExecNode::EvalConjuncts(ctxs, num_ctxs, tuple_row)) {
+      row_batch->CommitLastRow();
+      tuple = reinterpret_cast<Tuple*>(
+          reinterpret_cast<uint8_t*>(tuple) + tuple_desc_->byte_size());
+      ++num_rows_returned_;
+    }
+  }
+  COUNTER_SET(rows_returned_counter_, num_rows_returned_);
+  if (ReachedLimit() || row_batch->AtCapacity() || scanner_->isEos()) {
+    *eos = ReachedLimit() || scanner_->isEos();
+  }
+  return Status::OK();
+}
+
+Status SystemTableScanNode::Reset(RuntimeState* state) {
+  DCHECK(false) << "NYI";
+  return Status("NYI");
+}
+
+void SystemTableScanNode::Close(RuntimeState* state) {
+  if (is_closed()) return;
+  SCOPED_TIMER(runtime_profile_->total_time_counter());
+  PeriodicCounterUpdater::StopRateCounter(total_throughput_counter());
+  PeriodicCounterUpdater::StopTimeSeriesCounter(bytes_read_timeseries_counter_);
+  ExecNode::Close(state);
+}
+
+} // namespace impala
diff --git a/be/src/exec/system-table-scan-node.h b/be/src/exec/system-table-scan-node.h
new file mode 100644
index 0000000..ae5b9b3
--- /dev/null
+++ b/be/src/exec/system-table-scan-node.h
@@ -0,0 +1,79 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements.  See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership.  The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License.  You may obtain a copy of the License at
+//
+//   http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied.  See the License for the
+// specific language governing permissions and limitations
+// under the License.
+
+#ifndef IMPALA_EXEC_SYSTEM_TABLE_SCAN_NODE_H_
+#define IMPALA_EXEC_SYSTEM_TABLE_SCAN_NODE_H_
+
+#include <rapidjson/document.h>
+#include <boost/scoped_ptr.hpp>
+#include "exec/scan-node.h"
+#include "exec/system-table-scanner.h"
+#include "runtime/descriptors.h"
+#include "util/metrics.h"
+
+namespace impala {
+class Tuple;
+
+class SystemTableScanNode : public ScanNode {
+  /// A scan node that exposes Impala system state as a table.
+  ///
+  /// Different SystemTableScanner subclasses gather data from different Impala subsystems
+  /// and
+  /// materialize it into Tuples.
+  /// e.g. MetricScanner materialize impala metric into tuples.
+ public:
+  SystemTableScanNode(
+      ObjectPool* pool, const TPlanNode& tnode, const DescriptorTbl& descs);
+  ~SystemTableScanNode();
+
+  /// Create schema and columns to slots mapping.
+  virtual Status Prepare(RuntimeState* state);
+
+  /// Start scan.
+  virtual Status Open(RuntimeState* state);
+
+  /// Fill the next row batch by fetching more data from metrics.
+  virtual Status GetNext(RuntimeState* state, RowBatch* row_batch, bool* eos);
+
+  /// NYI
+  virtual Status Reset(RuntimeState* state);
+
+  /// Close after scan is finished
+  virtual void Close(RuntimeState* state);
+
+ private:
+  // Used to scan gather data of system table from ExecEnv
+  boost::scoped_ptr<SystemTableScanner> scanner_;
+
+  /// Tuple id resolved in Prepare() to set tuple_desc_
+  const int tuple_id_;
+
+  /// Descriptor of tuples read from SystemTable
+  const TupleDescriptor* tuple_desc_;
+
+  // enum table type: e.g. metrics, queries
+  TSystemTableName::type table_name_;
+
+  /// The number of rows in input_batch_->rows. The data source should have set
+  /// TRowBatch.num_rows, but we compute it just in case they haven't.
+  int num_rows_;
+
+  /// Materializes the next row into tuple.
+  Status MaterializeNextTuple(MemPool* mem_pool, Tuple* tuple);
+};
+}
+#endif /* SYSTEM_TABLE_SCAN_NODE_H_ */
diff --git a/be/src/exec/system-table-scanner.cc b/be/src/exec/system-table-scanner.cc
new file mode 100644
index 0000000..51acf4b
--- /dev/null
+++ b/be/src/exec/system-table-scanner.cc
@@ -0,0 +1,127 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements.  See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership.  The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License.  You may obtain a copy of the License at
+//
+//   http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied.  See the License for the
+// specific language governing permissions and limitations
+// under the License.
+
+#include "exec/system-table-scanner.h"
+
+#include "common/names.h"
+#include "gen-cpp/PlanNodes_types.h"
+#include "runtime/exec-env.h"
+#include "runtime/mem-pool.h"
+#include "runtime/mem-tracker.h"
+#include "runtime/row-batch.h"
+#include "runtime/runtime-state.h"
+#include "runtime/tuple-row.h"
+#include "util/metrics.h"
+#include "util/periodic-counter-updater.h"
+
+#include <gutil/strings/substitute.h>
+#include <algorithm>
+#include <boost/algorithm/string/replace.hpp>
+
+using strings::Substitute;
+namespace impala {
+
+const string ERROR_MEM_LIMIT_EXCEEDED =
+    "SystemTableScanNode::$0() failed to allocate $1 bytes for $2.";
+
+Status MetricScanner::Open() {
+  MetricGroup* all_metrics = ExecEnv::GetInstance()->metrics();
+  std::stack<MetricGroup*> groups;
+  groups.push(all_metrics);
+  typedef std::map<std::string, MetricGroup*> ChildGroupMap;
+
+  while (!groups.empty()) {
+    // Depth-first traversal of children to flatten all metrics
+    MetricGroup* group = groups.top();
+    groups.pop();
+    for (auto child : group->getChildren()) {
+      groups.push(child.second);
+    }
+    for (auto it : group->getMetricMap()) {
+      MetricPackage metric_info;
+      metric_info.parent_ = group;
+      metric_info.metric_ = it.second;
+      metric_pool_.push_back(metric_info);
+    }
+  }
+  return Status::OK();
+}
+
+enum MetricsColumn {
+  IMPALAD_ADDRESS,
+  METRIC_GROUP,
+  METRIC_NAME,
+  HUMAN_READABLE,
+  DESCRIPTION,
+};
+
+Status WriteStringSlot(const string& slot_value, MemPool* tuple_pool, void* slot) {
+  size_t value_size = slot_value.size();
+  char* buffer = reinterpret_cast<char*>(tuple_pool->TryAllocate(value_size));
+  if (UNLIKELY(buffer == NULL)) {
+    string details = Substitute(
+        ERROR_MEM_LIMIT_EXCEEDED, "MaterializeNextRow", value_size, "string slot");
+    return tuple_pool->mem_tracker()->MemLimitExceeded(NULL, details, value_size);
+  }
+  memcpy(buffer, slot_value.data(), value_size);
+  reinterpret_cast<StringValue*>(slot)->ptr = buffer;
+  reinterpret_cast<StringValue*>(slot)->len = value_size;
+  return Status::OK();
+}
+
+Status MetricScanner::MaterializeNextTuple(
+    MemPool* tuple_pool, Tuple* tuple, const TupleDescriptor* tuple_desc_) {
+  const TNetworkAddress& backend_address = ExecEnv::GetInstance()->backend_address();
+  const string& node_address =
+      backend_address.hostname + ":" + string(std::to_string(backend_address.port));
+  const string& group_name = metric_pool_[next_row_idx_].parent_->name();
+  const string& metric_name = metric_pool_[next_row_idx_].metric_->key();
+  const string& human_readable = metric_pool_[next_row_idx_].metric_->ToHumanReadable();
+  const string& description = metric_pool_[next_row_idx_].metric_->description();
+
+  for (int i = 0; i < tuple_desc_->slots().size(); ++i) {
+    const SlotDescriptor* slot_desc = tuple_desc_->slots()[i];
+    void* slot = tuple->GetSlot(slot_desc->tuple_offset());
+
+    switch (slot_desc->col_pos()) {
+      case IMPALAD_ADDRESS:
+        RETURN_IF_ERROR(WriteStringSlot(node_address, tuple_pool, slot));
+        break;
+      case METRIC_GROUP:
+        RETURN_IF_ERROR(WriteStringSlot(group_name, tuple_pool, slot));
+        break;
+      case METRIC_NAME:
+        RETURN_IF_ERROR(WriteStringSlot(metric_name, tuple_pool, slot));
+        break;
+      case HUMAN_READABLE:
+        RETURN_IF_ERROR(WriteStringSlot(human_readable, tuple_pool, slot));
+        break;
+      case DESCRIPTION:
+        RETURN_IF_ERROR(WriteStringSlot(description, tuple_pool, slot));
+        break;
+      default:
+        DCHECK(false) << "Unknown column position " << slot_desc->col_pos();
+    }
+  }
+  ++next_row_idx_;
+  if (next_row_idx_ >= metric_pool_.size()) {
+    setEos(true);
+  }
+  return Status::OK();
+}
+
+} /* namespace impala */
diff --git a/be/src/exec/system-table-scanner.h b/be/src/exec/system-table-scanner.h
new file mode 100644
index 0000000..9703522
--- /dev/null
+++ b/be/src/exec/system-table-scanner.h
@@ -0,0 +1,81 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements.  See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership.  The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License.  You may obtain a copy of the License at
+//
+//   http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied.  See the License for the
+// specific language governing permissions and limitations
+// under the License.
+
+#ifndef IMPALA_SYSTEM_TABLE_SCANNER_H_
+#define IMPALA_SYSTEM_TABLE_SCANNER_H_
+
+#include <gutil/strings/substitute.h>
+#include <boost/scoped_ptr.hpp>
+#include <vector>
+
+#include "exec/scan-node.h"
+#include "runtime/descriptors.h"
+#include "util/metrics.h"
+
+namespace impala {
+
+// SystemTableScanner is used to encapsulate code of the specific system table
+// For each table in "system" database, we should implement a scanner that can
+// materialize the table's rows
+class SystemTableScanner {
+ public:
+  SystemTableScanner() : eos_(false){};
+  virtual ~SystemTableScanner(){};
+
+  /// Start scan, load data needed
+  virtual Status Open() = 0;
+
+  /// Fill the next row batch by fetching more data from system table data source.
+  virtual Status MaterializeNextTuple(
+      MemPool* tuple_pool, Tuple* tuple, const TupleDescriptor* tuple_desc_) = 0;
+
+  bool isEos() const { return eos_; }
+
+  void setEos(bool eos) { eos_ = eos; }
+
+ private:
+  /// if true, nothing left to return in getNext() in SystemTableScanNode
+  bool eos_;
+};
+
+struct MetricPackage {
+  Metric* metric_;
+  MetricGroup* parent_;
+};
+
+class MetricScanner : public SystemTableScanner {
+ public:
+  MetricScanner() : SystemTableScanner(), next_row_idx_(0){};
+
+  /// Start scan, load metrics into metric_pool.
+  virtual Status Open();
+
+  /// Fill the next row batch by fetching more data from metric_pool.
+  virtual Status MaterializeNextTuple(
+      MemPool* tuple_pool, Tuple* tuple, const TupleDescriptor* tuple_desc_);
+
+ private:
+  /// store all metrics, loaded in Open().
+  vector<MetricPackage> metric_pool_;
+
+  /// The index of the next row of all items,
+  int next_row_idx_; /// The index of the next row of all items,
+};
+
+} /* namespace impala */
+
+#endif /* SYSTEM_TABLE_SCANNER_H_ */
diff --git a/be/src/runtime/coordinator.cc b/be/src/runtime/coordinator.cc
index 66c792c..b34cc48 100644
--- a/be/src/runtime/coordinator.cc
+++ b/be/src/runtime/coordinator.cc
@@ -1869,6 +1869,9 @@
       case TPlanNodeType::DATA_SOURCE_NODE:
         tuple_ids.insert(plan_node.data_source_node.tuple_id);
         break;
+      case TPlanNodeType::SYSTEM_TABLE_SCAN_NODE:
+        tuple_ids.insert(plan_node.system_table_scan_node.tuple_id);
+        break;
       case TPlanNodeType::HASH_JOIN_NODE:
       case TPlanNodeType::AGGREGATION_NODE:
       case TPlanNodeType::SORT_NODE:
diff --git a/be/src/runtime/descriptors.cc b/be/src/runtime/descriptors.cc
index 548370a..de5ef18 100644
--- a/be/src/runtime/descriptors.cc
+++ b/be/src/runtime/descriptors.cc
@@ -308,6 +308,16 @@
   return out.str();
 }
 
+SystemTableDescriptor::SystemTableDescriptor(const TTableDescriptor& tdesc)
+  : TableDescriptor(tdesc), table_name_(tdesc.systemTable.table_name) {}
+
+string SystemTableDescriptor::DebugString() const {
+  stringstream out;
+  out << "SystemTable(" << TableDescriptor::DebugString() << " table=" << table_name_
+      << ")";
+  return out.str();
+}
+
 TupleDescriptor::TupleDescriptor(const TTupleDescriptor& tdesc)
   : id_(tdesc.id),
     table_desc_(NULL),
@@ -501,6 +511,9 @@
       case TTableType::KUDU_TABLE:
         desc = pool->Add(new KuduTableDescriptor(tdesc));
         break;
+      case TTableType::SYSTEM_TABLE:
+        desc = pool->Add(new SystemTableDescriptor(tdesc));
+        break;
       default:
         DCHECK(false) << "invalid table type: " << tdesc.tableType;
     }
diff --git a/be/src/runtime/descriptors.h b/be/src/runtime/descriptors.h
index 10c2ff3..0bd23b5 100644
--- a/be/src/runtime/descriptors.h
+++ b/be/src/runtime/descriptors.h
@@ -28,7 +28,8 @@
 #include "common/global-types.h"
 #include "runtime/types.h"
 
-#include "gen-cpp/Descriptors_types.h"  // for TTupleId
+#include "gen-cpp/Descriptors_types.h" // for TTupleId
+#include "gen-cpp/PlanNodes_types.h"
 #include "gen-cpp/Types_types.h"
 
 namespace llvm {
@@ -360,6 +361,16 @@
   std::vector<std::string> master_addresses_;
 };
 
+// Descriptor for a SystemTable
+class SystemTableDescriptor : public TableDescriptor {
+ public:
+  SystemTableDescriptor(const TTableDescriptor& tdesc);
+  virtual std::string DebugString() const;
+
+ private:
+  TSystemTableName::type table_name_;
+};
+
 class TupleDescriptor {
  public:
   int byte_size() const { return byte_size_; }
diff --git a/be/src/scheduling/simple-scheduler.cc b/be/src/scheduling/simple-scheduler.cc
index 5c05557..60e3338 100644
--- a/be/src/scheduling/simple-scheduler.cc
+++ b/be/src/scheduling/simple-scheduler.cc
@@ -558,6 +558,7 @@
   scan_node_types.push_back(TPlanNodeType::HBASE_SCAN_NODE);
   scan_node_types.push_back(TPlanNodeType::DATA_SOURCE_NODE);
   scan_node_types.push_back(TPlanNodeType::KUDU_SCAN_NODE);
+  scan_node_types.push_back(TPlanNodeType::SYSTEM_TABLE_SCAN_NODE);
 
   // compute hosts of producer fragment before those of consumer fragment(s),
   // the latter might inherit the set of hosts from the former
diff --git a/be/src/scheduling/simple-scheduler.h b/be/src/scheduling/simple-scheduler.h
index a6ab93e..a64328d 100644
--- a/be/src/scheduling/simple-scheduler.h
+++ b/be/src/scheduling/simple-scheduler.h
@@ -94,14 +94,16 @@
   virtual void HandlePreemptedResource(const TUniqueId& client_resource_id);
   virtual void HandleLostResource(const TUniqueId& client_resource_id);
 
+  typedef std::list<TBackendDescriptor> BackendList;
+  /// Return a list of all backends registered with the scheduler.
+  void GetAllKnownBackends(BackendList* backends);
+
  private:
   /// Type to store hostnames, which can be rfc1123 hostnames or IPv4 addresses.
   typedef std::string Hostname;
 
   /// Type to store IPv4 addresses.
   typedef std::string IpAddr;
-
-  typedef std::list<TBackendDescriptor> BackendList;
 
   /// Map from a host's IP address to a list of backends running on that node.
   typedef boost::unordered_map<IpAddr, BackendList> BackendMap;
@@ -381,9 +383,6 @@
   /// protecting the access with backend_config_lock_.
   BackendConfigPtr GetBackendConfig() const;
   void SetBackendConfig(const BackendConfigPtr& backend_config);
-
-  /// Return a list of all backends registered with the scheduler.
-  void GetAllKnownBackends(BackendList* backends);
 
   /// Add the granted reservation and resources to the active_reservations_ and
   /// active_client_resources_ maps, respectively.
diff --git a/be/src/service/fe-support.cc b/be/src/service/fe-support.cc
index f3fc445..ef1cb1a 100644
--- a/be/src/service/fe-support.cc
+++ b/be/src/service/fe-support.cc
@@ -335,6 +335,25 @@
   return result_bytes;
 }
 
+extern "C" JNIEXPORT jbyteArray JNICALL
+Java_com_cloudera_impala_service_FeSupport_NativeGetBackends(
+    JNIEnv* env, jclass caller_class) {
+  TBackendsList backends_container;
+  ExecEnv* exec_env = ExecEnv::GetInstance();
+  SimpleScheduler* simple_scheduler = (SimpleScheduler*)exec_env->scheduler();
+
+  typedef std::list<TBackendDescriptor> BackendList;
+  BackendList backendList;
+  simple_scheduler->GetAllKnownBackends(&backendList);
+
+  vector<TBackendDescriptor> vec1(backendList.begin(), backendList.end());
+  backends_container.__set_backend_descs(vec1);
+  jbyteArray result_bytes = NULL;
+  THROW_IF_ERROR_RET(SerializeThriftMsg(env, &backends_container, &result_bytes), env,
+      JniUtil::internal_exc_class(), result_bytes);
+  return result_bytes;
+}
+
 namespace impala {
 
 static JNINativeMethod native_methods[] = {
@@ -362,6 +381,10 @@
     (char*)"NativeGetStartupOptions", (char*)"()[B",
     (void*)::Java_com_cloudera_impala_service_FeSupport_NativeGetStartupOptions
   },
+  {
+    (char*)"NativeGetBackends", (char*)"()[B",
+    (void*)::Java_com_cloudera_impala_service_FeSupport_NativeGetBackends
+  },
 };
 
 void InitFeSupport() {
diff --git a/be/src/util/metrics.h b/be/src/util/metrics.h
index 8b3942d..f007487 100644
--- a/be/src/util/metrics.h
+++ b/be/src/util/metrics.h
@@ -309,6 +309,12 @@
 
   const std::string& name() const { return name_; }
 
+  typedef std::map<std::string, MetricGroup*> ChildGroupMap;
+  const ChildGroupMap& getChildren() const { return children_; }
+
+  typedef std::map<std::string, Metric*> MetricMap;
+  const MetricMap& getMetricMap() const { return metric_map_; }
+
  private:
   /// Pool containing all metric objects
   boost::scoped_ptr<ObjectPool> obj_pool_;
@@ -320,11 +326,9 @@
   SpinLock lock_;
 
   /// Contains all Metric objects, indexed by key
-  typedef std::map<std::string, Metric*> MetricMap;
   MetricMap metric_map_;
 
   /// All child metric groups
-  typedef std::map<std::string, MetricGroup*> ChildGroupMap;
   ChildGroupMap children_;
 
   /// Webserver callback for /metrics. Produces a tree of JSON values, each representing a
diff --git a/common/thrift/CatalogObjects.thrift b/common/thrift/CatalogObjects.thrift
index 46370fb..bb22f38 100644
--- a/common/thrift/CatalogObjects.thrift
+++ b/common/thrift/CatalogObjects.thrift
@@ -47,6 +47,7 @@
   VIEW,
   DATA_SOURCE_TABLE,
   KUDU_TABLE,
+  SYSTEM_TABLE
 }
 
 enum THdfsFileFormat {
@@ -348,6 +349,16 @@
   3: required list<string> key_columns
 }
 
+enum TSystemTableName {
+  METRICS,
+  QUERIES
+}
+
+// Represents a System Table
+struct TSystemTable {
+  1: required TSystemTableName table_name
+}
+
 // Represents a table or view.
 struct TTable {
   // Name of the parent database. Case insensitive, expected to be stored as lowercase.
@@ -395,6 +406,9 @@
 
   // Set iff this a kudu table
   14: optional TKuduTable kudu_table
+
+  // Set Set iff this a system table
+  15: optional TSystemTable system_table
 }
 
 // Represents a database.
diff --git a/common/thrift/Descriptors.thrift b/common/thrift/Descriptors.thrift
index 40dcd16..0c4d73d 100644
--- a/common/thrift/Descriptors.thrift
+++ b/common/thrift/Descriptors.thrift
@@ -68,6 +68,7 @@
   6: optional CatalogObjects.THBaseTable hbaseTable
   9: optional CatalogObjects.TDataSourceTable dataSourceTable
   10: optional CatalogObjects.TKuduTable kuduTable
+  11: optional CatalogObjects.TSystemTable systemTable
 
   // Unqualified name of table
   7: required string tableName
diff --git a/common/thrift/PlanNodes.thrift b/common/thrift/PlanNodes.thrift
index 3e31120..42d0f4c 100644
--- a/common/thrift/PlanNodes.thrift
+++ b/common/thrift/PlanNodes.thrift
@@ -46,7 +46,8 @@
   SINGULAR_ROW_SRC_NODE,
   UNNEST_NODE,
   SUBPLAN_NODE,
-  KUDU_SCAN_NODE
+  KUDU_SCAN_NODE,
+  SYSTEM_TABLE_SCAN_NODE
 }
 
 // phases of an execution node
@@ -250,6 +251,11 @@
 
   // List of conjuncts that can be pushed down to Kudu.
   2: optional list<Exprs.TExpr> kudu_conjuncts
+}
+
+struct TSystemTableScanNode {
+  1: required Types.TTupleId tuple_id
+  2: required CatalogObjects.TSystemTableName table_name
 }
 
 struct TEqJoinCondition {
@@ -475,6 +481,7 @@
   9: optional THBaseScanNode hbase_scan_node
   23: optional TKuduScanNode kudu_scan_node
   10: optional TDataSourceScanNode data_source_node
+  24: optional TSystemTableScanNode system_table_scan_node
   11: optional THashJoinNode hash_join_node
   12: optional TNestedLoopJoinNode nested_loop_join_node
   13: optional TAggregationNode agg_node
diff --git a/common/thrift/StatestoreService.thrift b/common/thrift/StatestoreService.thrift
index a1dcc24..6155d6a 100644
--- a/common/thrift/StatestoreService.thrift
+++ b/common/thrift/StatestoreService.thrift
@@ -63,6 +63,11 @@
   4: optional bool secure_webserver;
 }
 
+// a list of TBackendDescriptor
+struct TBackendsList {
+  1: required list<TBackendDescriptor> backend_descs;
+}
+
 // Description of a single entry in a topic
 struct TTopicItem {
   // Human-readable topic entry identifier
diff --git a/fe/src/main/java/com/cloudera/impala/analysis/AnalysisContext.java b/fe/src/main/java/com/cloudera/impala/analysis/AnalysisContext.java
index 006474d..ece4e75 100644
--- a/fe/src/main/java/com/cloudera/impala/analysis/AnalysisContext.java
+++ b/fe/src/main/java/com/cloudera/impala/analysis/AnalysisContext.java
@@ -523,6 +523,7 @@
    * Throws an AuthorizationException if the dbName is a system db
    * and the user is trying to modify it.
    * Returns true if this is a system db and the action is allowed.
+   * Return false if authorization should be checked in the usual way
    */
   private boolean checkSystemDbAccess(String dbName, Privilege privilege)
       throws AuthorizationException {
@@ -532,6 +533,9 @@
         case VIEW_METADATA:
         case ANY:
           return true;
+        case SELECT:
+          // Check authorization for SELECT on system tables in the usual way.
+          return false;
         default:
           throw new AuthorizationException("Cannot modify system database.");
       }
diff --git a/fe/src/main/java/com/cloudera/impala/analysis/Analyzer.java b/fe/src/main/java/com/cloudera/impala/analysis/Analyzer.java
index a931489..c7783c9 100644
--- a/fe/src/main/java/com/cloudera/impala/analysis/Analyzer.java
+++ b/fe/src/main/java/com/cloudera/impala/analysis/Analyzer.java
@@ -46,6 +46,7 @@
 import com.cloudera.impala.catalog.Db;
 import com.cloudera.impala.catalog.HBaseTable;
 import com.cloudera.impala.catalog.HdfsTable;
+import com.cloudera.impala.catalog.SystemTable;
 import com.cloudera.impala.catalog.ImpaladCatalog;
 import com.cloudera.impala.catalog.KuduTable;
 import com.cloudera.impala.catalog.Table;
@@ -562,10 +563,9 @@
       Preconditions.checkNotNull(table);
       if (table instanceof View) return new InlineViewRef((View) table, tableRef);
       // The table must be a base table.
-      Preconditions.checkState(table instanceof HdfsTable ||
-          table instanceof KuduTable ||
-          table instanceof HBaseTable ||
-          table instanceof DataSourceTable);
+      Preconditions.checkState(table instanceof HdfsTable || table instanceof KuduTable
+          || table instanceof HBaseTable || table instanceof DataSourceTable
+          || table instanceof SystemTable);
       return new BaseTableRef(tableRef, resolvedPath);
     } else {
       return new CollectionTableRef(tableRef, resolvedPath);
diff --git a/fe/src/main/java/com/cloudera/impala/analysis/DescribeTableStmt.java b/fe/src/main/java/com/cloudera/impala/analysis/DescribeTableStmt.java
index b1f9b95..631659a 100644
--- a/fe/src/main/java/com/cloudera/impala/analysis/DescribeTableStmt.java
+++ b/fe/src/main/java/com/cloudera/impala/analysis/DescribeTableStmt.java
@@ -27,6 +27,8 @@
 import com.cloudera.impala.authorization.Privilege;
 import com.cloudera.impala.authorization.PrivilegeRequestBuilder;
 import com.cloudera.impala.catalog.StructType;
+import com.cloudera.impala.catalog.SystemTable;
+import com.cloudera.impala.catalog.Table;
 import com.cloudera.impala.catalog.TableLoadingException;
 import com.cloudera.impala.common.AnalysisException;
 import com.cloudera.impala.thrift.TDescribeOutputStyle;
@@ -123,7 +125,15 @@
     }
 
     tableName_ = analyzer.getFqTableName(path_.getRootTable().getTableName());
-    analyzer.getTable(tableName_, getPrivilegeRequirement());
+    Table table = analyzer.getTable(tableName_, getPrivilegeRequirement());
+
+    if (table instanceof SystemTable
+        && (outputStyle_ == TDescribeOutputStyle.FORMATTED
+        || outputStyle_ == TDescribeOutputStyle.EXTENDED)) {
+      throw new AnalysisException(String.format("User '%s' does not have " +
+          "privileges to see table on: 'system' database",
+          analyzer.getUser().getName()));
+    }
 
     if (path_.destTable() != null) {
       resultStruct_ = path_.getRootTable().getHiveColumnsAsStruct();
diff --git a/fe/src/main/java/com/cloudera/impala/analysis/DescriptorTable.java b/fe/src/main/java/com/cloudera/impala/analysis/DescriptorTable.java
index c0d7571..c8fbc04 100644
--- a/fe/src/main/java/com/cloudera/impala/analysis/DescriptorTable.java
+++ b/fe/src/main/java/com/cloudera/impala/analysis/DescriptorTable.java
@@ -24,6 +24,7 @@
 
 import org.apache.commons.lang.StringUtils;
 
+import com.cloudera.impala.catalog.SystemTable;
 import com.cloudera.impala.catalog.Table;
 import com.cloudera.impala.catalog.View;
 import com.cloudera.impala.common.IdGenerator;
diff --git a/fe/src/main/java/com/cloudera/impala/analysis/ShowCreateTableStmt.java b/fe/src/main/java/com/cloudera/impala/analysis/ShowCreateTableStmt.java
index 68b593c..f906423 100644
--- a/fe/src/main/java/com/cloudera/impala/analysis/ShowCreateTableStmt.java
+++ b/fe/src/main/java/com/cloudera/impala/analysis/ShowCreateTableStmt.java
@@ -18,6 +18,7 @@
 package com.cloudera.impala.analysis;
 
 import com.cloudera.impala.authorization.Privilege;
+import com.cloudera.impala.catalog.SystemTable;
 import com.cloudera.impala.catalog.Table;
 import com.cloudera.impala.catalog.View;
 import com.cloudera.impala.common.AnalysisException;
@@ -68,6 +69,10 @@
       // statement references a column by its implicitly defined column names.
       viewAnalyzer.setUseHiveColLabels(true);
       viewQuery.analyze(viewAnalyzer);
+    } else if (table instanceof SystemTable) {
+      throw new AnalysisException(String.format("User '%s' does not have " +
+          "privileges to see detailed infomation about table on: 'system' database",
+          analyzer.getUser().getName()));
     }
   }
 
diff --git a/fe/src/main/java/com/cloudera/impala/analysis/TupleDescriptor.java b/fe/src/main/java/com/cloudera/impala/analysis/TupleDescriptor.java
index 145a10b..8de47b8 100644
--- a/fe/src/main/java/com/cloudera/impala/analysis/TupleDescriptor.java
+++ b/fe/src/main/java/com/cloudera/impala/analysis/TupleDescriptor.java
@@ -27,8 +27,10 @@
 
 import com.cloudera.impala.catalog.ColumnStats;
 import com.cloudera.impala.catalog.HdfsTable;
+import com.cloudera.impala.catalog.SystemTable;
 import com.cloudera.impala.catalog.StructType;
 import com.cloudera.impala.catalog.Table;
+import com.cloudera.impala.catalog.TableId;
 import com.cloudera.impala.catalog.View;
 import com.cloudera.impala.thrift.TTupleDescriptor;
 import com.google.common.base.Joiner;
diff --git a/fe/src/main/java/com/cloudera/impala/catalog/Catalog.java b/fe/src/main/java/com/cloudera/impala/catalog/Catalog.java
index 4cd1c42..fbc5dfd 100644
--- a/fe/src/main/java/com/cloudera/impala/catalog/Catalog.java
+++ b/fe/src/main/java/com/cloudera/impala/catalog/Catalog.java
@@ -63,6 +63,7 @@
   private static final int META_STORE_CLIENT_POOL_SIZE = 10;
 
   public static final String BUILTINS_DB = "_impala_builtins";
+  public static final String SYSTEM_DB = "system";
 
   protected final MetaStoreClientPool metaStoreClientPool_ = new MetaStoreClientPool(0);
 
@@ -79,6 +80,8 @@
 
   // DB that contains all builtins
   private static Db builtinsDb_;
+  // System database, containing system table, e.g. "metrics"
+  private static Db systemDb_;
 
   // Cache of data sources.
   protected final CatalogObjectCache<DataSource> dataSources_;
@@ -99,9 +102,12 @@
     dataSources_ = new CatalogObjectCache<DataSource>();
     builtinsDb_ = new BuiltinsDb(BUILTINS_DB, this);
     addDb(builtinsDb_);
+    systemDb_ = new SystemDb(SYSTEM_DB, this);
+    addDb(systemDb_);
   }
 
   public Db getBuiltinsDb() { return builtinsDb_; }
+  public Db getSystemDb() { return systemDb_; }
 
   /**
    * Adds a new database to the catalog, replacing any existing database with the same
diff --git a/fe/src/main/java/com/cloudera/impala/catalog/SystemDb.java b/fe/src/main/java/com/cloudera/impala/catalog/SystemDb.java
new file mode 100644
index 0000000..4e6bd0a
--- /dev/null
+++ b/fe/src/main/java/com/cloudera/impala/catalog/SystemDb.java
@@ -0,0 +1,49 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements.  See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership.  The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License.  You may obtain a copy of the License at
+//
+//   http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied.  See the License for the
+// specific language governing permissions and limitations
+// under the License.
+
+package com.cloudera.impala.catalog;
+
+import java.util.Collections;
+
+import org.apache.hadoop.hive.metastore.api.Database;
+
+public final class SystemDb extends Db {
+  public SystemDb(String name, Catalog catalog) {
+    super(name, catalog, createMetastoreDb(name));
+    AddMetricTable();
+    setIsSystemDb(true);
+  }
+
+  private void AddMetricTable() {
+    /// Table for all Impala daemon metrics.
+    Table table = new SystemTable(
+        new TableId(TableId.SYSTEM_METRICS), null, this, "metrics", "system");
+    table.addColumn(new Column("impalad_address", Type.STRING, 0));
+    table.addColumn(new Column("group", Type.STRING, 1));
+    table.addColumn(new Column("name", Type.STRING, 2));
+    table.addColumn(new Column("value", Type.STRING, 3));
+    table.addColumn(new Column("description", Type.STRING, 4));
+    addTable(table);
+  }
+
+  private static final String SYSTEM_DB_COMMENT = "System database for Impala cluster";
+
+  private static Database createMetastoreDb(String name) {
+    return new org.apache.hadoop.hive.metastore.api.Database(
+        name, SYSTEM_DB_COMMENT, "", Collections.<String, String>emptyMap());
+  }
+}
diff --git a/fe/src/main/java/com/cloudera/impala/catalog/SystemTable.java b/fe/src/main/java/com/cloudera/impala/catalog/SystemTable.java
new file mode 100644
index 0000000..a6d0970
--- /dev/null
+++ b/fe/src/main/java/com/cloudera/impala/catalog/SystemTable.java
@@ -0,0 +1,108 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements.  See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership.  The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License.  You may obtain a copy of the License at
+//
+//   http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied.  See the License for the
+// specific language governing permissions and limitations
+// under the License.
+
+package com.cloudera.impala.catalog;
+
+import java.util.HashMap;
+import java.util.Set;
+
+import org.apache.hadoop.hive.metastore.IMetaStoreClient;
+
+import com.cloudera.impala.thrift.TCatalogObjectType;
+import com.cloudera.impala.thrift.TColumn;
+import com.cloudera.impala.thrift.TResultSet;
+import com.cloudera.impala.thrift.TResultSetMetadata;
+import com.cloudera.impala.thrift.TSystemTable;
+import com.cloudera.impala.thrift.TSystemTableName;
+import com.cloudera.impala.thrift.TTable;
+import com.cloudera.impala.thrift.TTableDescriptor;
+import com.cloudera.impala.thrift.TTableType;
+import com.cloudera.impala.thrift.TSystemTableName;
+import com.cloudera.impala.util.TResultRowBuilder;
+import com.google.common.base.Preconditions;
+
+public final class SystemTable extends Table {
+  private static HashMap<String, TSystemTableName> SYSTEM_TABLE_NAME_MAP =
+      new HashMap<String, TSystemTableName>();
+  static {
+    SYSTEM_TABLE_NAME_MAP.put("metrics", TSystemTableName.METRICS);
+    SYSTEM_TABLE_NAME_MAP.put("queries", TSystemTableName.QUERIES);
+  }
+
+  public SystemTable(TableId id, org.apache.hadoop.hive.metastore.api.Table msTable,
+      Db db, String name, String owner) {
+    super(id, msTable, db, name, owner);
+    systemTableName_ = SYSTEM_TABLE_NAME_MAP.get(name);
+  }
+
+  private TSystemTableName systemTableName_;
+  public TSystemTableName getSystemTableName_() { return systemTableName_; }
+
+  @Override
+  public TTableDescriptor toThriftDescriptor(Set<Long> referencedPartitions) {
+    // Create thrift descriptors to send to the BE.
+    TTableDescriptor tableDescriptor =
+        new TTableDescriptor(id_.asInt(), TTableType.SYSTEM_TABLE,
+            getTColumnDescriptors(), numClusteringCols_, name_, db_.getName());
+    tableDescriptor.setSystemTable(getTSystemTable());
+    return tableDescriptor;
+  }
+
+  /**
+   * Returns a thrift structure for the system table.
+   */
+  private TSystemTable getTSystemTable() { return new TSystemTable(systemTableName_); }
+
+  @Override
+  public TCatalogObjectType getCatalogObjectType() {
+    return TCatalogObjectType.TABLE;
+  }
+
+  @Override
+  public void load(boolean reuseMetadata, IMetaStoreClient client,
+      org.apache.hadoop.hive.metastore.api.Table msTbl) throws TableLoadingException {
+    // Table is always loaded.
+    Preconditions.checkState(false);
+  }
+
+  /**
+   * Returns a thrift structure representing the table.
+   */
+  @Override
+  public TTable toThrift() {
+    TTable table = super.toThrift();
+    table.setTable_type(TTableType.SYSTEM_TABLE);
+    table.setSystem_table(getTSystemTable());
+    return table;
+  }
+
+  /**
+   * Returns statistics on this table as a tabular result set. Used for the SHOW
+   * TABLE STATS statement. The schema of the returned TResultSet is set inside
+   * this method.
+   */
+  public TResultSet getTableStats() {
+    TResultSet result = new TResultSet();
+    TResultSetMetadata resultSchema = new TResultSetMetadata();
+    resultSchema.addToColumns(new TColumn("#Rows", Type.BIGINT.toThrift()));
+    result.setSchema(resultSchema);
+    TResultRowBuilder rowBuilder = new TResultRowBuilder();
+    rowBuilder.add(numRows_);
+    result.addToRows(rowBuilder.get());
+    return result;
+  }
+}
diff --git a/fe/src/main/java/com/cloudera/impala/catalog/TableId.java b/fe/src/main/java/com/cloudera/impala/catalog/TableId.java
index 1918029..e04144e 100644
--- a/fe/src/main/java/com/cloudera/impala/catalog/TableId.java
+++ b/fe/src/main/java/com/cloudera/impala/catalog/TableId.java
@@ -39,4 +39,8 @@
    * Returns an invalid table id intended for temporary use, e.g., for CTAS.
    */
   public static TableId createInvalidId() { return new TableId(INVALID_ID); }
+
+  // System table id start from -2, and is a sequence of negative number
+  // (-2, -3, -4...)
+  public static final int SYSTEM_METRICS = -2;
 }
diff --git a/fe/src/main/java/com/cloudera/impala/planner/SingleNodePlanner.java b/fe/src/main/java/com/cloudera/impala/planner/SingleNodePlanner.java
index 2212d35..bc4cb00 100644
--- a/fe/src/main/java/com/cloudera/impala/planner/SingleNodePlanner.java
+++ b/fe/src/main/java/com/cloudera/impala/planner/SingleNodePlanner.java
@@ -60,6 +60,7 @@
 import com.cloudera.impala.catalog.HBaseTable;
 import com.cloudera.impala.catalog.HdfsPartition;
 import com.cloudera.impala.catalog.HdfsTable;
+import com.cloudera.impala.catalog.SystemTable;
 import com.cloudera.impala.catalog.KuduTable;
 import com.cloudera.impala.catalog.Table;
 import com.cloudera.impala.catalog.Type;
@@ -1252,10 +1253,14 @@
     } else if (table instanceof HBaseTable) {
       // HBase table
       scanNode = new HBaseScanNode(ctx_.getNextNodeId(), tblRef.getDesc());
-    } else if (tblRef.getTable() instanceof KuduTable) {
+    } else if (table instanceof KuduTable) {
       scanNode = new KuduScanNode(ctx_.getNextNodeId(), tblRef.getDesc());
       scanNode.init(analyzer);
       return scanNode;
+    } else if (table instanceof SystemTable) {
+      scanNode = new SystemTableScanNode(ctx_.getNextNodeId(), tblRef.getDesc());
+      scanNode.init(analyzer);
+      return scanNode;
     } else {
       throw new NotImplementedException(
           "Planning not implemented for table ref class: " + tblRef.getClass());
diff --git a/fe/src/main/java/com/cloudera/impala/planner/SystemTableScanNode.java b/fe/src/main/java/com/cloudera/impala/planner/SystemTableScanNode.java
new file mode 100644
index 0000000..6c0174a
--- /dev/null
+++ b/fe/src/main/java/com/cloudera/impala/planner/SystemTableScanNode.java
@@ -0,0 +1,145 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements.  See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership.  The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License.  You may obtain a copy of the License at
+//
+//   http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied.  See the License for the
+// specific language governing permissions and limitations
+// under the License.
+
+package com.cloudera.impala.planner;
+
+import java.util.List;
+
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import com.cloudera.impala.analysis.Analyzer;
+import com.cloudera.impala.analysis.TupleDescriptor;
+import com.cloudera.impala.catalog.SystemTable;
+import com.cloudera.impala.common.ImpalaException;
+import com.cloudera.impala.common.InternalException;
+import com.cloudera.impala.service.FeSupport;
+import com.cloudera.impala.thrift.TBackendDescriptor;
+import com.cloudera.impala.thrift.TBackendsList;
+import com.cloudera.impala.thrift.TExplainLevel;
+import com.cloudera.impala.thrift.TNetworkAddress;
+import com.cloudera.impala.thrift.TPlanNode;
+import com.cloudera.impala.thrift.TPlanNodeType;
+import com.cloudera.impala.thrift.TScanRange;
+import com.cloudera.impala.thrift.TScanRangeLocation;
+import com.cloudera.impala.thrift.TScanRangeLocations;
+import com.cloudera.impala.thrift.TSystemTableScanNode;
+import com.google.common.base.Objects;
+import com.google.common.collect.Lists;
+
+public class SystemTableScanNode extends ScanNode {
+  private final static Logger LOG = LoggerFactory.getLogger(SystemTableScanNode.class);
+  private final static int ESTIMATED_NUM_ROWS_PER_NODE = 300;
+
+  public SystemTableScanNode(PlanNodeId id, TupleDescriptor desc) {
+    super(id, desc, "SCAN SYSTEM_TABLE");
+    table_ = (SystemTable) desc_.getTable();
+  }
+
+  private final SystemTable table_;
+
+  @Override
+  public void init(Analyzer analyzer) throws ImpalaException {
+    checkForSupportedFileFormats();
+    assignConjuncts(analyzer);
+    analyzer.createEquivConjuncts(tupleIds_.get(0), conjuncts_);
+    conjuncts_ = orderConjunctsByCost(conjuncts_);
+    computeStats(analyzer);
+    // materialize slots in remaining conjuncts_
+    analyzer.materializeSlots(conjuncts_);
+    computeMemLayout(analyzer);
+    computeScanRangeLocations(analyzer);
+  }
+
+  /**
+   * Create a single scan range for each node in cluster.
+   */
+  private void computeScanRangeLocations(Analyzer analyzer) throws InternalException {
+    TBackendsList backends_container = FeSupport.GetBackends();
+    List<TBackendDescriptor> backends = backends_container.getBackend_descs();
+
+    scanRanges_ = Lists.newArrayList();
+    for (int i = 0; i < backends.size(); ++i) {
+      TNetworkAddress networkAddress = backends.get(i).getAddress();
+      // Translate from network address to the global (to this request) host index.
+      Integer globalHostIdx = analyzer.getHostIndex().getIndex(networkAddress);
+
+      TScanRangeLocations scanRangeLocations = new TScanRangeLocations();
+      scanRangeLocations.scan_range = new TScanRange();
+      scanRangeLocations.locations =
+          Lists.newArrayList(new TScanRangeLocation(globalHostIdx));
+
+      scanRanges_.add(scanRangeLocations);
+    }
+  }
+
+  @Override
+  public void computeStats(Analyzer analyzer) {
+    super.computeStats(analyzer);
+    numNodes_ = getNumNodes();
+    int numRowsEstimate_ = numNodes_ * ESTIMATED_NUM_ROWS_PER_NODE;
+    inputCardinality_ = numRowsEstimate_;
+    cardinality_ = numRowsEstimate_;
+    cardinality_ *= computeSelectivity();
+    cardinality_ = Math.max(1, cardinality_);
+    cardinality_ = capAtLimit(cardinality_);
+
+    LOG.debug("computeStats SystemTableScan: cardinality=" + Long.toString(cardinality_));
+    LOG.debug("computeStats SystemTableScan: #nodes=" + Integer.toString(numNodes_));
+  }
+
+  @Override
+  protected String debugString() {
+    return Objects.toStringHelper(this)
+        .add("tid", desc_.getId().asInt())
+        .add("TblName", desc_.getTable().getFullName())
+        .addValue(super.debugString())
+        .toString();
+  }
+
+  @Override
+  protected void toThrift(TPlanNode msg) {
+    msg.node_type = TPlanNodeType.SYSTEM_TABLE_SCAN_NODE;
+    msg.system_table_scan_node =
+        new TSystemTableScanNode(desc_.getId().asInt(), table_.getSystemTableName_());
+  }
+
+  @Override
+  protected String getNodeExplainString(
+      String prefix, String detailPrefix, TExplainLevel detailLevel) {
+    StringBuilder output = new StringBuilder();
+    String aliasStr = "";
+    if (!table_.getFullName().equalsIgnoreCase(desc_.getAlias())
+        && !table_.getName().equalsIgnoreCase(desc_.getAlias())) {
+      aliasStr = " " + desc_.getAlias();
+    }
+
+    output.append(String.format("%s%s:%s [%s%s]\n", prefix, id_.toString(), displayName_,
+        table_.getFullName(), aliasStr));
+
+    if (!conjuncts_.isEmpty()) {
+      output.append(prefix + "predicates: " + getExplainString(conjuncts_) + "\n");
+    }
+
+    // Add table and column stats in verbose mode.
+    if (detailLevel == TExplainLevel.VERBOSE) {
+      output.append(getStatsExplainString(prefix, detailLevel));
+      output.append("\n");
+    }
+    return output.toString();
+  }
+}
diff --git a/fe/src/main/java/com/cloudera/impala/service/FeSupport.java b/fe/src/main/java/com/cloudera/impala/service/FeSupport.java
index 4014129..10b5797 100644
--- a/fe/src/main/java/com/cloudera/impala/service/FeSupport.java
+++ b/fe/src/main/java/com/cloudera/impala/service/FeSupport.java
@@ -33,6 +33,8 @@
 import com.cloudera.impala.analysis.NullLiteral;
 import com.cloudera.impala.analysis.TableName;
 import com.cloudera.impala.common.InternalException;
+import com.cloudera.impala.thrift.TBackendDescriptor;
+import com.cloudera.impala.thrift.TBackendsList;
 import com.cloudera.impala.thrift.TCacheJarParams;
 import com.cloudera.impala.thrift.TCacheJarResult;
 import com.cloudera.impala.thrift.TCatalogObject;
@@ -86,6 +88,9 @@
 
   // Return select BE startup options as a serialized TStartupOptions
   public native static byte[] NativeGetStartupOptions();
+
+  // Returns a serialized TBackEndDescriptor
+  public native static byte[] NativeGetBackends();
 
   /**
    * Locally caches the jar at the specified HDFS location.
@@ -272,8 +277,22 @@
       deserializer.deserialize(options, result);
       return options;
     } catch (TException e) {
-      throw new InternalException("Error retrieving startup options: " + e.getMessage(),
-          e);
+      throw new InternalException(
+          "Error retrieving startup options: " + e.getMessage(), e);
+    }
+  }
+
+  public static TBackendsList GetBackends() throws InternalException {
+    try {
+      byte[] result = NativeGetBackends();
+      Preconditions.checkNotNull(result);
+      TDeserializer deserializer = new TDeserializer(new TBinaryProtocol.Factory());
+      TBackendsList backends_list = new TBackendsList();
+      deserializer.deserialize(backends_list, result);
+      return backends_list;
+    } catch (TException e) {
+      throw new InternalException(
+          "Error retrieving startup options: " + e.getMessage(), e);
     }
   }
 
diff --git a/fe/src/main/java/com/cloudera/impala/service/Frontend.java b/fe/src/main/java/com/cloudera/impala/service/Frontend.java
index 83af818..f18ec8b 100644
--- a/fe/src/main/java/com/cloudera/impala/service/Frontend.java
+++ b/fe/src/main/java/com/cloudera/impala/service/Frontend.java
@@ -83,6 +83,8 @@
 import com.cloudera.impala.catalog.Function;
 import com.cloudera.impala.catalog.HBaseTable;
 import com.cloudera.impala.catalog.HdfsTable;
+import com.cloudera.impala.catalog.HdfsTable;
+import com.cloudera.impala.catalog.SystemTable;
 import com.cloudera.impala.catalog.ImpaladCatalog;
 import com.cloudera.impala.catalog.StructType;
 import com.cloudera.impala.catalog.Table;
@@ -719,6 +721,8 @@
       return ((DataSourceTable) table).getTableStats();
     } else if (table instanceof KuduTable) {
       return ((KuduTable) table).getTableStats();
+    } else if (table instanceof SystemTable) {
+      return ((SystemTable) table).getTableStats();
     } else {
       throw new InternalException("Invalid table class: " + table.getClass());
     }
diff --git a/fe/src/test/java/com/cloudera/impala/analysis/AnalyzeStmtsTest.java b/fe/src/test/java/com/cloudera/impala/analysis/AnalyzeStmtsTest.java
index 33fd6ac..29624bc 100644
--- a/fe/src/test/java/com/cloudera/impala/analysis/AnalyzeStmtsTest.java
+++ b/fe/src/test/java/com/cloudera/impala/analysis/AnalyzeStmtsTest.java
@@ -3483,4 +3483,9 @@
     AnalyzesOk("set foo=true");
     AnalyzesOk("set");
   }
+
+  @Test
+  public void TestSystemTableScanAnalysis() {
+    AnalyzesOk("select * from system.metrics");
+  }
 }
diff --git a/fe/src/test/java/com/cloudera/impala/analysis/AuthorizationTest.java b/fe/src/test/java/com/cloudera/impala/analysis/AuthorizationTest.java
index 3cd04e5..38a7221 100644
--- a/fe/src/test/java/com/cloudera/impala/analysis/AuthorizationTest.java
+++ b/fe/src/test/java/com/cloudera/impala/analysis/AuthorizationTest.java
@@ -29,7 +29,9 @@
 import java.util.UUID;
 
 import org.apache.hadoop.conf.Configuration;
+
 import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.HADOOP_SECURITY_AUTH_TO_LOCAL;
+
 import org.apache.hive.service.cli.thrift.TGetColumnsReq;
 import org.apache.hive.service.cli.thrift.TGetSchemasReq;
 import org.apache.hive.service.cli.thrift.TGetTablesReq;
@@ -51,9 +53,13 @@
 import com.cloudera.impala.authorization.User;
 import com.cloudera.impala.catalog.AuthorizationException;
 import com.cloudera.impala.catalog.Catalog;
+import com.cloudera.impala.catalog.Column;
 import com.cloudera.impala.catalog.Db;
 import com.cloudera.impala.catalog.ImpaladCatalog;
 import com.cloudera.impala.catalog.ScalarFunction;
+import com.cloudera.impala.catalog.SystemTable;
+import com.cloudera.impala.catalog.Table;
+import com.cloudera.impala.catalog.TableId;
 import com.cloudera.impala.catalog.Type;
 import com.cloudera.impala.common.AnalysisException;
 import com.cloudera.impala.common.ImpalaException;
@@ -102,6 +108,8 @@
   //   INSERT permissions on 'functional.alltypes' (no SELECT permissions)
   //   INSERT permissions on all tables in 'functional_parquet' database
   //   No permissions on database 'functional_rc'
+  //   SELECT permissions on 'system.metrics'
+
   public final static String AUTHZ_POLICY_FILE = "/test-warehouse/authz-policy.ini";
   public final static User USER = new User(System.getProperty("user.name"));
 
@@ -112,6 +120,7 @@
       "allcomplextypes", "alltypes", "alltypesagg", "alltypessmall", "alltypestiny",
       "complex_view", "view_view");
 
+  private static final List<String> SYSTEM_VISIBLE_TABLES = Lists.newArrayList("metrics");
   /**
    * Test context whose instances are used to parameterize this test.
    */
@@ -289,6 +298,17 @@
     privilege.setServer_name("server1");
     privilege.setDb_name("tpcds");
     privilege.setTable_name(AuthorizeableTable.ANY_TABLE_NAME);
+    sentryService.grantRolePrivilege(USER, roleName, privilege);
+
+    // select_system_database_metrics
+    roleName = "select_system_database_metrics";
+    sentryService.createRole(USER, roleName, true);
+    sentryService.grantRoleToGroup(USER, roleName, USER.getName());
+
+    privilege = new TPrivilege("", TPrivilegeLevel.SELECT, TPrivilegeScope.TABLE, false);
+    privilege.setServer_name("server1");
+    privilege.setDb_name("system");
+    privilege.setTable_name("metrics");
     sentryService.grantRolePrivilege(USER, roleName, privilege);
 
     // select_functional_alltypesagg
@@ -1558,7 +1578,7 @@
     // These are the only dbs that should show up because they are the only
     // dbs the user has any permissions on.
     List<String> expectedDbs = Lists.newArrayList("default", "functional",
-        "functional_parquet", "functional_seq_snap", "tpcds", "tpch");
+        "functional_parquet", "functional_seq_snap", "system", "tpcds", "tpch");
 
     List<Db> dbs = fe_.getDbs(PatternMatcher.createHivePatternMatcher("*"), USER);
     assertEquals(expectedDbs, extractDbNames(dbs));
@@ -1720,7 +1740,7 @@
     req.get_schemas_req.setSchemaName("%");
     TResultSet resp = fe_.execHiveServer2MetadataOp(req);
     List<String> expectedDbs = Lists.newArrayList("default", "functional",
-        "functional_parquet", "functional_seq_snap", "tpcds", "tpch");
+        "functional_parquet", "functional_seq_snap", "system", "tpcds", "tpch");
     assertEquals(expectedDbs.size(), resp.rows.size());
     for (int i = 0; i < resp.rows.size(); ++i) {
       assertEquals(expectedDbs.get(i),
@@ -2156,6 +2176,141 @@
     AuthzOk(fe, context, "invalidate metadata");
   }
 
+  private boolean containDb(List<Db> databases, String dbName) {
+    for(Db db : databases) {
+      if(db.getName()==dbName) return true;
+    }
+    return false;
+  }
+
+  @Test
+  public void TestSystemDatabase() throws ImpalaException {
+    Db system = ctx_.catalog.getSystemDb();
+    Table table = new SystemTable(
+        TableId.createInvalidId(), null, system, "test_table", "system");
+    system.addTable(table);
+    table.addColumn(new Column("test_column", Type.STRING, 0));
+
+    // User should be able to see 'system' db
+    AuthzOk("show databases");
+    List<Db> databases = fe_.getDbs(PatternMatcher.createHivePatternMatcher("*"), USER);
+    Assert.assertTrue("User should be able to see 'system' db",
+        databases.contains(ctx_.catalog.getDb("system")));
+    Assert.assertTrue("User should be able to see 'system' db",
+        containDb(databases, "system"));
+    // All users should be able to use the "system" database.
+    AuthzOk("use system");
+
+    // Only show tables defined in SYSTEM_VISIBLE_TABLES
+    AuthzOk("show tables in system");
+    List<String> tables =
+        fe_.getTableNames("system", PatternMatcher.createHivePatternMatcher("*"), USER);
+    Assert.assertEquals(SYSTEM_VISIBLE_TABLES, tables);
+
+    // Basic test, table exist, user has access
+    AuthzOk("select * from system.metrics");
+
+    // table exist, user doesn't have access
+    AuthzError("select * from system.test_table",
+        "User '%s' does not have privileges to execute 'SELECT' on: " +
+        "system.test_table");
+
+    // Select a non-existent system table on "system" does not reveal existence
+    // information.
+    AuthzError("select * from system.fake_table",
+        "User '%s' does not have privileges to execute 'SELECT' on: " +
+        "system.fake_table");
+
+    // Test create existent "system" database. (no permissions).
+    AuthzError("create database if not exists system", "Cannot modify system database.");
+
+    // Test drop "system" database
+    AuthzError("drop database system", "Cannot modify system database.");
+
+    // Test create table on "system" database
+    AuthzError("create table system.new_table (i int)", "Cannot modify system database.");
+
+    // Test drop table on "system" database
+    AuthzError("drop table if exists system.metrics", "Cannot modify system database.");
+    // Drop a non-existent table on "system" database does not reveal privileged
+    // information.
+    AuthzError(
+        "drop table if exists system.fake_table", "Cannot modify system database.");
+
+    // Test create view on system database
+    AuthzError("create view system.new_view as " +
+        "select * from functional.alltypesagg",
+        "Cannot modify system database.");
+
+    // Test drop view on system database
+    // Using DROP VIEW on a table does not reveal privileged information.
+    AuthzError("drop view system.metrics", "Cannot modify system database.");
+  }
+
+  @Test
+  public void TestSystemDatabaseWithAdminUser() throws ImpalaException {
+    AuthorizationConfig authzConfig = new AuthorizationConfig("server1",
+        AUTHZ_POLICY_FILE, "",
+        LocalGroupResourceAuthorizationProvider.class.getName());
+    ImpaladCatalog catalog = new ImpaladTestCatalog(authzConfig);
+
+    Db system = catalog.getSystemDb();
+    Table table = new SystemTable(
+        TableId.createInvalidId(), null, system, "test_table", "system");
+    system.addTable(table);
+    table.addColumn(new Column("test_column", Type.STRING, 0));
+
+    User admin_user = new User("admin_user");
+    AnalysisContext context = new AnalysisContext(catalog,
+        TestUtils.createQueryContext(Catalog.DEFAULT_DB, admin_user.getName()),
+        authzConfig);
+    Frontend fe = new Frontend(authzConfig, catalog);
+
+    // Admin user should have privileges to every table for SELECT
+    AuthzOk(fe, context, "select * from system.metrics");
+    AuthzOk(fe, context, "select * from system.test_table");
+
+    // Admin user have access, but table doesn't exists
+    try {
+      AuthzOk(fe, context, "select * from system.fake_table");
+      fail("Expected analysis error");
+    } catch (AnalysisException e) {
+      Assert.assertEquals(
+          e.getMessage(), "Could not resolve table reference: 'system.fake_table'");
+    }
+
+    // Even admin_user don't have operation privilege other than SELECT
+    // Test create existent "system" database. (no permissions).
+    AuthzError(fe, context, "create database if not exists system",
+        "Cannot modify system database.", admin_user);
+
+    // Test drop "system" database
+    AuthzError(fe, context, "drop database system",
+        "Cannot modify system database.", admin_user);
+
+    // Test create table on "system" database
+    AuthzError(fe, context, "create table system.new_table (i int)",
+        "Cannot modify system database.", admin_user);
+
+    // Test drop table on "system" database
+    AuthzError(fe, context, "drop table if exists system.metrics",
+        "Cannot modify system database.", admin_user);
+    // Drop a non-existent table on "system" database does not reveal privileged
+    // information.
+    AuthzError(fe, context, "drop table if exists system.fake_tbl",
+        "Cannot modify system database.", admin_user);
+
+    // Test create view on system database
+    AuthzError(fe, context, "create view system.new_view as " +
+        "select * from functional.alltypesagg",
+        "Cannot modify system database.", admin_user);
+
+    // Test drop view on system database
+    // Using DROP VIEW on a table does not reveal privileged information.
+    AuthzError(fe, context, "drop view system.metrics",
+        "Cannot modify system database.", admin_user);
+  }
+
   private void TestWithIncorrectConfig(AuthorizationConfig authzConfig, User user)
       throws AnalysisException, InternalException {
     Frontend fe = new Frontend(authzConfig, ctx_.catalog);
diff --git a/fe/src/test/java/com/cloudera/impala/planner/PlannerTest.java b/fe/src/test/java/com/cloudera/impala/planner/PlannerTest.java
index 7472da0..1787c7c 100644
--- a/fe/src/test/java/com/cloudera/impala/planner/PlannerTest.java
+++ b/fe/src/test/java/com/cloudera/impala/planner/PlannerTest.java
@@ -244,4 +244,9 @@
   public void testConjunctOrdering() {
     runPlannerTestFile("conjunct-ordering");
   }
+
+  @Test
+  public void testSystemDbPlanner() {
+    runPlannerTestFile("system-db");
+  }
 }
diff --git a/fe/src/test/resources/authz-policy.ini.template b/fe/src/test/resources/authz-policy.ini.template
index 4c11bc4..ddf4e5f 100644
--- a/fe/src/test/resources/authz-policy.ini.template
+++ b/fe/src/test/resources/authz-policy.ini.template
@@ -7,7 +7,7 @@
           select_functional_alltypesagg, insert_functional_alltypes,\
           select_functional_complex_view, select_functional_view_view,\
           insert_parquet, new_table_uri, tpch_data_uri, select_column_level_functional,\
-          upper_case_uri
+          upper_case_uri, select_system_database_metrics
 auth_to_local_group = test_role
 server_admin = all_server
 
@@ -29,6 +29,8 @@
     server=server1->db=functional->table=complex_view->action=select
 select_functional_view_view =\
     server=server1->db=functional->table=view_view->action=select
+select_system_database_metrics =\
+    server=server1->db=system->table=metrics->action=select
 insert_parquet = server=server1->db=functional_parquet->table=*->action=insert
 select_column_level_functional =\
     server=server1->db=functional->table=alltypessmall->column=id->action=select,\
diff --git a/testdata/workloads/functional-planner/queries/PlannerTest/system-db.test b/testdata/workloads/functional-planner/queries/PlannerTest/system-db.test
new file mode 100644
index 0000000..29b79f4
--- /dev/null
+++ b/testdata/workloads/functional-planner/queries/PlannerTest/system-db.test
@@ -0,0 +1,8 @@
+# Basic test with system table scan
+select * from system.metrics
+---- PLAN
+00:SCAN SYSTEM_TABLE [system.metrics]
+---- DISTRIBUTEDPLAN
+01:EXCHANGE [UNPARTITIONED]
+|
+00:SCAN SYSTEM_TABLE [system.metrics]
diff --git a/testdata/workloads/functional-query/queries/QueryTest/system-database.test b/testdata/workloads/functional-query/queries/QueryTest/system-database.test
new file mode 100644
index 0000000..7692a0b
--- /dev/null
+++ b/testdata/workloads/functional-query/queries/QueryTest/system-database.test
@@ -0,0 +1,13 @@
+====
+---- QUERY
+# This query does a top-n on non-string cols.  This is different because without
+# string cols, scanners will handle io buffers differently.  They don't need to
+# be passed up the execution tree.
+select count(*)
+from system.metrics
+where name="tcmalloc.bytes-in-use"
+---- RESULTS
+3
+---- TYPES
+BIGINT
+====
diff --git a/tests/query_test/test_scanners.py b/tests/query_test/test_scanners.py
index e267f93..6b248e2 100644
--- a/tests/query_test/test_scanners.py
+++ b/tests/query_test/test_scanners.py
@@ -66,6 +66,7 @@
     new_vector = deepcopy(vector)
     new_vector.get_value('exec_option')['batch_size'] = vector.get_value('batch_size')
     self.run_test_case('QueryTest/scanners', new_vector)
+    self.run_test_case('QueryTest/system-database', new_vector)
 
 # Test all the scanners with a simple limit clause. The limit clause triggers
 # cancellation in the scanner code paths.
