diff --git ivy/libraries.properties ivy/libraries.properties
index 66d39fe..c09ec93 100644
--- ivy/libraries.properties
+++ ivy/libraries.properties
@@ -40,6 +40,7 @@ derby.version=10.4.2.0
guava.version=r06
hbase.version=0.92.0-SNAPSHOT
hbase-test.version=0.92.0-SNAPSHOT
+jackson.version=1.7.3
javaewah.version=0.3
jdo-api.version=2.3-ec
jdom.version=1.1
@@ -55,4 +56,3 @@ slf4j-api.version=1.6.1
slf4j-log4j12.version=1.6.1
velocity.version=1.5
zookeeper.version=3.4.2
-
diff --git ql/ivy.xml ql/ivy.xml
index 8a5f616..1203dd2 100644
--- ql/ivy.xml
+++ ql/ivy.xml
@@ -60,5 +60,7 @@
+
+
diff --git ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java
index 703bb9c..e85caba 100644
--- ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java
+++ ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java
@@ -23,6 +23,7 @@ import static org.apache.hadoop.util.StringUtils.stringifyException;
import java.io.BufferedWriter;
import java.io.DataOutput;
+import java.io.DataOutputStream;
import java.io.FileNotFoundException;
import java.io.IOException;
import java.io.OutputStreamWriter;
@@ -90,8 +91,11 @@ import org.apache.hadoop.hive.ql.metadata.HiveException;
import org.apache.hadoop.hive.ql.metadata.HiveMetaStoreChecker;
import org.apache.hadoop.hive.ql.metadata.HiveStorageHandler;
import org.apache.hadoop.hive.ql.metadata.InvalidTableException;
+import org.apache.hadoop.hive.ql.metadata.JsonMetaDataFormatter;
import org.apache.hadoop.hive.ql.metadata.MetaDataFormatUtils;
+import org.apache.hadoop.hive.ql.metadata.MetaDataFormatter;
import org.apache.hadoop.hive.ql.metadata.Partition;
+import org.apache.hadoop.hive.ql.metadata.TextMetaDataFormatter;
import org.apache.hadoop.hive.ql.metadata.Table;
import org.apache.hadoop.hive.ql.parse.AlterTablePartMergeFilesDesc;
import org.apache.hadoop.hive.ql.plan.AddPartitionDesc;
@@ -165,6 +169,8 @@ public class DDLTask extends Task implements Serializable {
private static String INTERMEDIATE_ORIGINAL_DIR_SUFFIX;
private static String INTERMEDIATE_EXTRACTED_DIR_SUFFIX;
+ private MetaDataFormatter formatter;
+
@Override
public boolean requireLock() {
return this.work != null && this.work.getNeedLock();
@@ -179,6 +185,13 @@ public class DDLTask extends Task implements Serializable {
super.initialize(conf, queryPlan, ctx);
this.conf = conf;
+ // Pick the formatter to use to display the results. Either the
+ // normal human readable output or a json object.
+ if ("json".equals(conf.get("hive.format")))
+ formatter = new JsonMetaDataFormatter();
+ else
+ formatter = new TextMetaDataFormatter();
+
INTERMEDIATE_ARCHIVED_DIR_SUFFIX =
HiveConf.getVar(conf, ConfVars.METASTORE_INT_ARCHIVED);
INTERMEDIATE_ORIGINAL_DIR_SUFFIX =
@@ -1803,18 +1816,14 @@ public class DDLTask extends Task implements Serializable {
}
// write the results in the file
- DataOutput outStream = null;
+ DataOutputStream outStream = null;
try {
Path resFile = new Path(showParts.getResFile());
FileSystem fs = resFile.getFileSystem(conf);
outStream = fs.create(resFile);
- Iterator iterParts = parts.iterator();
- while (iterParts.hasNext()) {
- // create a row per partition name
- outStream.writeBytes(iterParts.next());
- outStream.write(terminator);
- }
+ formatter.showTablePartitons(outStream, parts);
+
((FSDataOutputStream) outStream).close();
outStream = null;
} catch (FileNotFoundException e) {
@@ -2394,88 +2403,14 @@ public class DDLTask extends Task implements Serializable {
}
// write the results in the file
- DataOutput outStream = null;
+ DataOutputStream outStream = null;
try {
Path resFile = new Path(showTblStatus.getResFile());
FileSystem fs = resFile.getFileSystem(conf);
outStream = fs.create(resFile);
- Iterator iterTables = tbls.iterator();
- while (iterTables.hasNext()) {
- // create a row per table name
- Table tbl = iterTables.next();
- String tableName = tbl.getTableName();
- String tblLoc = null;
- String inputFormattCls = null;
- String outputFormattCls = null;
- if (part != null) {
- if (par != null) {
- if (par.getLocation() != null) {
- tblLoc = par.getDataLocation().toString();
- }
- inputFormattCls = par.getInputFormatClass().getName();
- outputFormattCls = par.getOutputFormatClass().getName();
- }
- } else {
- if (tbl.getPath() != null) {
- tblLoc = tbl.getDataLocation().toString();
- }
- inputFormattCls = tbl.getInputFormatClass().getName();
- outputFormattCls = tbl.getOutputFormatClass().getName();
- }
-
- String owner = tbl.getOwner();
- List cols = tbl.getCols();
- String ddlCols = MetaStoreUtils.getDDLFromFieldSchema("columns", cols);
- boolean isPartitioned = tbl.isPartitioned();
- String partitionCols = "";
- if (isPartitioned) {
- partitionCols = MetaStoreUtils.getDDLFromFieldSchema(
- "partition_columns", tbl.getPartCols());
- }
+ formatter.showTableStatus(outStream, db, conf, tbls, part, par);
- outStream.writeBytes("tableName:" + tableName);
- outStream.write(terminator);
- outStream.writeBytes("owner:" + owner);
- outStream.write(terminator);
- outStream.writeBytes("location:" + tblLoc);
- outStream.write(terminator);
- outStream.writeBytes("inputformat:" + inputFormattCls);
- outStream.write(terminator);
- outStream.writeBytes("outputformat:" + outputFormattCls);
- outStream.write(terminator);
- outStream.writeBytes("columns:" + ddlCols);
- outStream.write(terminator);
- outStream.writeBytes("partitioned:" + isPartitioned);
- outStream.write(terminator);
- outStream.writeBytes("partitionColumns:" + partitionCols);
- outStream.write(terminator);
- // output file system information
- Path tablLoc = tbl.getPath();
- List locations = new ArrayList();
- if (isPartitioned) {
- if (par == null) {
- for (Partition curPart : db.getPartitions(tbl)) {
- if (curPart.getLocation() != null) {
- locations.add(new Path(curPart.getLocation()));
- }
- }
- } else {
- if (par.getLocation() != null) {
- locations.add(new Path(par.getLocation()));
- }
- }
- } else {
- if (tablLoc != null) {
- locations.add(tablLoc);
- }
- }
- if (!locations.isEmpty()) {
- writeFileSystemStats(outStream, locations, tablLoc, false, 0);
- }
-
- outStream.write(terminator);
- }
((FSDataOutputStream) outStream).close();
outStream = null;
} catch (FileNotFoundException e) {
@@ -2511,14 +2446,14 @@ public class DDLTask extends Task implements Serializable {
// describe the table - populate the output stream
Table tbl = db.getTable(tableName, false);
Partition part = null;
- DataOutput outStream = null;
+ DataOutputStream outStream = null;
try {
Path resFile = new Path(descTbl.getResFile());
if (tbl == null) {
FileSystem fs = resFile.getFileSystem(conf);
outStream = fs.create(resFile);
String errMsg = "Table " + tableName + " does not exist";
- outStream.write(errMsg.getBytes("UTF-8"));
+ formatter.error(outStream, errMsg);
((FSDataOutputStream) outStream).close();
outStream = null;
return 0;
@@ -2530,7 +2465,7 @@ public class DDLTask extends Task implements Serializable {
outStream = fs.create(resFile);
String errMsg = "Partition " + descTbl.getPartSpec() + " for table "
+ tableName + " does not exist";
- outStream.write(errMsg.getBytes("UTF-8"));
+ formatter.error(outStream, errMsg);
((FSDataOutputStream) outStream).close();
outStream = null;
return 0;
@@ -2555,57 +2490,20 @@ public class DDLTask extends Task implements Serializable {
FileSystem fs = resFile.getFileSystem(conf);
outStream = fs.create(resFile);
+ List cols = null;
if (colPath.equals(tableName)) {
+ cols = (part == null) ? tbl.getCols() : part.getCols();
if (!descTbl.isFormatted()) {
- List cols = tbl.getCols();
if (tableName.equals(colPath)) {
cols.addAll(tbl.getPartCols());
}
- outStream.writeBytes(MetaDataFormatUtils.displayColsUnformatted(cols));
- } else {
- outStream.writeBytes(MetaDataFormatUtils.getAllColumnsInformation(tbl));
}
} else {
- List cols = Hive.getFieldsFromDeserializer(colPath, tbl.getDeserializer());
- if (descTbl.isFormatted()) {
- outStream.writeBytes(MetaDataFormatUtils.getAllColumnsInformation(cols));
- } else {
- outStream.writeBytes(MetaDataFormatUtils.displayColsUnformatted(cols));
- }
+ cols = Hive.getFieldsFromDeserializer(colPath, tbl.getDeserializer());
}
- if (tableName.equals(colPath)) {
-
- if (descTbl.isFormatted()) {
- if (part != null) {
- outStream.writeBytes(MetaDataFormatUtils.getPartitionInformation(part));
- } else {
- outStream.writeBytes(MetaDataFormatUtils.getTableInformation(tbl));
- }
- }
-
- // if extended desc table then show the complete details of the table
- if (descTbl.isExt()) {
- // add empty line
- outStream.write(terminator);
- if (part != null) {
- // show partition information
- outStream.writeBytes("Detailed Partition Information");
- outStream.write(separator);
- outStream.writeBytes(part.getTPartition().toString());
- outStream.write(separator);
- // comment column is empty
- outStream.write(terminator);
- } else {
- // show table information
- outStream.writeBytes("Detailed Table Information");
- outStream.write(separator);
- outStream.writeBytes(tbl.getTTable().toString());
- outStream.write(separator);
- outStream.write(terminator);
- }
- }
- }
+ formatter.describeTable(outStream, colPath, tableName, tbl, part, cols,
+ descTbl.isFormatted(), descTbl.isExt());
LOG.info("DDLTask: written data for " + tbl.getTableName());
((FSDataOutputStream) outStream).close();
@@ -2667,128 +2565,6 @@ public class DDLTask extends Task implements Serializable {
outStream.write(separator);
}
- private void writeFileSystemStats(DataOutput outStream, List locations,
- Path tabLoc, boolean partSpecified, int indent) throws IOException {
- long totalFileSize = 0;
- long maxFileSize = 0;
- long minFileSize = Long.MAX_VALUE;
- long lastAccessTime = 0;
- long lastUpdateTime = 0;
- int numOfFiles = 0;
-
- boolean unknown = false;
- FileSystem fs = tabLoc.getFileSystem(conf);
- // in case all files in locations do not exist
- try {
- FileStatus tmpStatus = fs.getFileStatus(tabLoc);
- lastAccessTime = ShimLoader.getHadoopShims().getAccessTime(tmpStatus);
- lastUpdateTime = tmpStatus.getModificationTime();
- if (partSpecified) {
- // check whether the part exists or not in fs
- tmpStatus = fs.getFileStatus(locations.get(0));
- }
- } catch (IOException e) {
- LOG.warn(
- "Cannot access File System. File System status will be unknown: ", e);
- unknown = true;
- }
-
- if (!unknown) {
- for (Path loc : locations) {
- try {
- FileStatus status = fs.getFileStatus(tabLoc);
- FileStatus[] files = fs.listStatus(loc);
- long accessTime = ShimLoader.getHadoopShims().getAccessTime(status);
- long updateTime = status.getModificationTime();
- // no matter loc is the table location or part location, it must be a
- // directory.
- if (!status.isDir()) {
- continue;
- }
- if (accessTime > lastAccessTime) {
- lastAccessTime = accessTime;
- }
- if (updateTime > lastUpdateTime) {
- lastUpdateTime = updateTime;
- }
- for (FileStatus currentStatus : files) {
- if (currentStatus.isDir()) {
- continue;
- }
- numOfFiles++;
- long fileLen = currentStatus.getLen();
- totalFileSize += fileLen;
- if (fileLen > maxFileSize) {
- maxFileSize = fileLen;
- }
- if (fileLen < minFileSize) {
- minFileSize = fileLen;
- }
- accessTime = ShimLoader.getHadoopShims().getAccessTime(
- currentStatus);
- updateTime = currentStatus.getModificationTime();
- if (accessTime > lastAccessTime) {
- lastAccessTime = accessTime;
- }
- if (updateTime > lastUpdateTime) {
- lastUpdateTime = updateTime;
- }
- }
- } catch (IOException e) {
- // ignore
- }
- }
- }
- String unknownString = "unknown";
-
- for (int k = 0; k < indent; k++) {
- outStream.writeBytes(Utilities.INDENT);
- }
- outStream.writeBytes("totalNumberFiles:");
- outStream.writeBytes(unknown ? unknownString : "" + numOfFiles);
- outStream.write(terminator);
-
- for (int k = 0; k < indent; k++) {
- outStream.writeBytes(Utilities.INDENT);
- }
- outStream.writeBytes("totalFileSize:");
- outStream.writeBytes(unknown ? unknownString : "" + totalFileSize);
- outStream.write(terminator);
-
- for (int k = 0; k < indent; k++) {
- outStream.writeBytes(Utilities.INDENT);
- }
- outStream.writeBytes("maxFileSize:");
- outStream.writeBytes(unknown ? unknownString : "" + maxFileSize);
- outStream.write(terminator);
-
- for (int k = 0; k < indent; k++) {
- outStream.writeBytes(Utilities.INDENT);
- }
- outStream.writeBytes("minFileSize:");
- if (numOfFiles > 0) {
- outStream.writeBytes(unknown ? unknownString : "" + minFileSize);
- } else {
- outStream.writeBytes(unknown ? unknownString : "" + 0);
- }
- outStream.write(terminator);
-
- for (int k = 0; k < indent; k++) {
- outStream.writeBytes(Utilities.INDENT);
- }
- outStream.writeBytes("lastAccessTime:");
- outStream.writeBytes((unknown || lastAccessTime < 0) ? unknownString : ""
- + lastAccessTime);
- outStream.write(terminator);
-
- for (int k = 0; k < indent; k++) {
- outStream.writeBytes(Utilities.INDENT);
- }
- outStream.writeBytes("lastUpdateTime:");
- outStream.writeBytes(unknown ? unknownString : "" + lastUpdateTime);
- outStream.write(terminator);
- }
-
/**
* Alter a given table.
*
diff --git ql/src/java/org/apache/hadoop/hive/ql/metadata/JsonMetaDataFormatter.java ql/src/java/org/apache/hadoop/hive/ql/metadata/JsonMetaDataFormatter.java
new file mode 100644
index 0000000..ad5fbaf
--- /dev/null
+++ ql/src/java/org/apache/hadoop/hive/ql/metadata/JsonMetaDataFormatter.java
@@ -0,0 +1,355 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hive.ql.metadata;
+
+import java.io.DataOutputStream;
+import java.io.IOException;
+import java.io.UnsupportedEncodingException;
+import java.net.URLDecoder;
+import java.net.URLEncoder;
+import java.util.ArrayList;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+import org.apache.commons.lang.StringUtils;
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.fs.FileStatus;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hive.conf.HiveConf;
+import org.apache.hadoop.hive.metastore.api.FieldSchema;
+import org.apache.hadoop.hive.ql.metadata.Partition;
+import org.apache.hadoop.hive.ql.metadata.Table;
+import org.apache.hadoop.hive.shims.ShimLoader;
+import org.codehaus.jackson.map.ObjectMapper;
+
+/**
+ * Format table and index information for machine readability using
+ * json.
+ */
+public class JsonMetaDataFormatter implements MetaDataFormatter {
+ private static final Log LOG = LogFactory.getLog("hive.ql.exec.DDLTask");
+
+ /**
+ * Convert the map to a JSON string.
+ */
+ public void asJson(DataOutputStream out, Map data)
+ throws HiveException
+ {
+ try {
+ new ObjectMapper().writeValue(out, data);
+ } catch (IOException e) {
+ throw new HiveException("Unable to convert to json", e);
+ }
+ }
+
+ /**
+ * Write error message.
+ */
+ public void error(DataOutputStream out, String msg)
+ throws HiveException
+ {
+ asJson(out,
+ MapBuilder.create()
+ .put("error", msg)
+ .build());
+ }
+
+ /**
+ * Describe table.
+ */
+ public void describeTable(DataOutputStream out,
+ String colPath, String tableName,
+ Table tbl, Partition part, List cols,
+ boolean isFormatted, boolean isExt)
+ throws HiveException
+ {
+ MapBuilder builder = MapBuilder.create();
+
+ builder.put("columns", makeColsUnformatted(cols));
+
+ if (isExt) {
+ if (part != null)
+ builder.put("partition", part.getTPartition());
+ else
+ builder.put("table", tbl.getTTable());
+ }
+
+ asJson(out, builder.build());
+ }
+
+ private List makeColsUnformatted(List cols) {
+ ArrayList res = new ArrayList();
+ for (FieldSchema col : cols)
+ res.add(makeOneColUnformatted(col));
+ return res;
+ }
+
+ private Map makeOneColUnformatted(FieldSchema col) {
+ return MapBuilder.create()
+ .put("name", col.getName())
+ .put("type", col.getType())
+ .put("comment", col.getComment())
+ .build();
+ }
+
+ public void showTableStatus(DataOutputStream out,
+ Hive db,
+ HiveConf conf,
+ List tbls,
+ Map part,
+ Partition par)
+ throws HiveException
+ {
+ asJson(out, MapBuilder
+ .create()
+ .put("tables", makeAllTableStatus(db, conf,
+ tbls, part, par))
+ .build());
+ }
+
+ private List makeAllTableStatus(Hive db,
+ HiveConf conf,
+ List tbls,
+ Map part,
+ Partition par)
+ throws HiveException
+ {
+ try {
+ ArrayList res = new ArrayList();
+ for (Table tbl : tbls)
+ res.add(makeOneTableStatus(tbl, db, conf, part, par));
+ return res;
+ } catch(IOException e) {
+ throw new HiveException(e);
+ }
+ }
+
+ private Map makeOneTableStatus(Table tbl,
+ Hive db,
+ HiveConf conf,
+ Map part,
+ Partition par)
+ throws HiveException, IOException
+ {
+ String tblLoc = null;
+ String inputFormattCls = null;
+ String outputFormattCls = null;
+ if (part != null) {
+ if (par != null) {
+ if (par.getLocation() != null) {
+ tblLoc = par.getDataLocation().toString();
+ }
+ inputFormattCls = par.getInputFormatClass().getName();
+ outputFormattCls = par.getOutputFormatClass().getName();
+ }
+ } else {
+ if (tbl.getPath() != null) {
+ tblLoc = tbl.getDataLocation().toString();
+ }
+ inputFormattCls = tbl.getInputFormatClass().getName();
+ outputFormattCls = tbl.getOutputFormatClass().getName();
+ }
+
+ MapBuilder builder = MapBuilder.create();
+
+ builder.put("tableName", tbl.getTableName());
+ builder.put("owner", tbl.getOwner());
+ builder.put("location", tblLoc);
+ builder.put("inputformat", inputFormattCls);
+ builder.put("outputformat", outputFormattCls);
+ builder.put("columns", makeColsUnformatted(tbl.getCols()));
+
+ builder.put("partitioned", tbl.isPartitioned());
+ if (tbl.isPartitioned())
+ builder.put("partitionColumns", makeColsUnformatted(tbl.getPartCols()));
+
+ putFileSystemsStats(builder, makeTableStatusLocations(tbl, db, par),
+ conf, tbl.getPath());
+
+ return builder.build();
+ }
+
+ private List makeTableStatusLocations(Table tbl, Hive db, Partition par)
+ throws HiveException
+ {
+ // output file system information
+ Path tblPath = tbl.getPath();
+ List locations = new ArrayList();
+ if (tbl.isPartitioned()) {
+ if (par == null) {
+ for (Partition curPart : db.getPartitions(tbl)) {
+ if (curPart.getLocation() != null) {
+ locations.add(new Path(curPart.getLocation()));
+ }
+ }
+ } else {
+ if (par.getLocation() != null) {
+ locations.add(new Path(par.getLocation()));
+ }
+ }
+ } else {
+ if (tblPath != null) {
+ locations.add(tblPath);
+ }
+ }
+
+ return locations;
+ }
+
+ // Duplicates logic in TextMetaDataFormatter
+ private void putFileSystemsStats(MapBuilder builder, List locations,
+ HiveConf conf, Path tblPath)
+ throws IOException
+ {
+ long totalFileSize = 0;
+ long maxFileSize = 0;
+ long minFileSize = Long.MAX_VALUE;
+ long lastAccessTime = 0;
+ long lastUpdateTime = 0;
+ int numOfFiles = 0;
+
+ boolean unknown = false;
+ FileSystem fs = tblPath.getFileSystem(conf);
+ // in case all files in locations do not exist
+ try {
+ FileStatus tmpStatus = fs.getFileStatus(tblPath);
+ lastAccessTime = ShimLoader.getHadoopShims().getAccessTime(tmpStatus);
+ lastUpdateTime = tmpStatus.getModificationTime();
+ } catch (IOException e) {
+ LOG.warn(
+ "Cannot access File System. File System status will be unknown: ", e);
+ unknown = true;
+ }
+
+ if (!unknown) {
+ for (Path loc : locations) {
+ try {
+ FileStatus status = fs.getFileStatus(tblPath);
+ FileStatus[] files = fs.listStatus(loc);
+ long accessTime = ShimLoader.getHadoopShims().getAccessTime(status);
+ long updateTime = status.getModificationTime();
+ // no matter loc is the table location or part location, it must be a
+ // directory.
+ if (!status.isDir()) {
+ continue;
+ }
+ if (accessTime > lastAccessTime) {
+ lastAccessTime = accessTime;
+ }
+ if (updateTime > lastUpdateTime) {
+ lastUpdateTime = updateTime;
+ }
+ for (FileStatus currentStatus : files) {
+ if (currentStatus.isDir()) {
+ continue;
+ }
+ numOfFiles++;
+ long fileLen = currentStatus.getLen();
+ totalFileSize += fileLen;
+ if (fileLen > maxFileSize) {
+ maxFileSize = fileLen;
+ }
+ if (fileLen < minFileSize) {
+ minFileSize = fileLen;
+ }
+ accessTime = ShimLoader.getHadoopShims().getAccessTime(
+ currentStatus);
+ updateTime = currentStatus.getModificationTime();
+ if (accessTime > lastAccessTime) {
+ lastAccessTime = accessTime;
+ }
+ if (updateTime > lastUpdateTime) {
+ lastUpdateTime = updateTime;
+ }
+ }
+ } catch (IOException e) {
+ // ignore
+ }
+ }
+ }
+
+ builder
+ .put("totalNumberFiles", numOfFiles, ! unknown)
+ .put("totalFileSize", totalFileSize, ! unknown)
+ .put("maxFileSize", maxFileSize, ! unknown)
+ .put("minFileSize", numOfFiles > 0 ? minFileSize : 0, ! unknown)
+ .put("lastAccessTime", lastAccessTime, ! (unknown || lastAccessTime < 0))
+ .put("lastUpdateTime", lastUpdateTime, ! unknown);
+ }
+
+ /**
+ * Show the table partitions.
+ */
+ public void showTablePartitons(DataOutputStream out, List parts)
+ throws HiveException
+ {
+ asJson(out,
+ MapBuilder.create()
+ .put("partitions", makeTablePartions(parts))
+ .build());
+ }
+
+ private List makeTablePartions(List parts)
+ throws HiveException
+ {
+ try {
+ ArrayList res = new ArrayList();
+ for (String part : parts)
+ res.add(makeOneTablePartition(part));
+ return res;
+ } catch (UnsupportedEncodingException e) {
+ throw new HiveException(e);
+ }
+ }
+
+ // This seems like a very wrong implementation.
+ private Map makeOneTablePartition(String partIdent)
+ throws UnsupportedEncodingException
+ {
+ ArrayList