diff --git ivy/libraries.properties ivy/libraries.properties
index 6851bdf..85dbedc 100644
--- ivy/libraries.properties
+++ ivy/libraries.properties
@@ -40,6 +40,7 @@ derby.version=10.4.2.0
guava.version=r06
hbase.version=0.92.1-SNAPSHOT
hbase-test.version=0.92.1-SNAPSHOT
+jackson.version=1.7.3
javaewah.version=0.3
jdo-api.version=2.3-ec
jdom.version=1.1
diff --git ql/ivy.xml ql/ivy.xml
index 8a5f616..1203dd2 100644
--- ql/ivy.xml
+++ ql/ivy.xml
@@ -60,5 +60,7 @@
+
+
diff --git ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java
index 703bb9c..b170230 100644
--- ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java
+++ ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java
@@ -23,6 +23,7 @@ import static org.apache.hadoop.util.StringUtils.stringifyException;
import java.io.BufferedWriter;
import java.io.DataOutput;
+import java.io.DataOutputStream;
import java.io.FileNotFoundException;
import java.io.IOException;
import java.io.OutputStreamWriter;
@@ -90,8 +91,11 @@ import org.apache.hadoop.hive.ql.metadata.HiveException;
import org.apache.hadoop.hive.ql.metadata.HiveMetaStoreChecker;
import org.apache.hadoop.hive.ql.metadata.HiveStorageHandler;
import org.apache.hadoop.hive.ql.metadata.InvalidTableException;
+import org.apache.hadoop.hive.ql.metadata.JsonMetaDataFormatter;
import org.apache.hadoop.hive.ql.metadata.MetaDataFormatUtils;
+import org.apache.hadoop.hive.ql.metadata.MetaDataFormatter;
import org.apache.hadoop.hive.ql.metadata.Partition;
+import org.apache.hadoop.hive.ql.metadata.TextMetaDataFormatter;
import org.apache.hadoop.hive.ql.metadata.Table;
import org.apache.hadoop.hive.ql.parse.AlterTablePartMergeFilesDesc;
import org.apache.hadoop.hive.ql.plan.AddPartitionDesc;
@@ -165,6 +169,8 @@ public class DDLTask extends Task implements Serializable {
private static String INTERMEDIATE_ORIGINAL_DIR_SUFFIX;
private static String INTERMEDIATE_EXTRACTED_DIR_SUFFIX;
+ private MetaDataFormatter formatter;
+
@Override
public boolean requireLock() {
return this.work != null && this.work.getNeedLock();
@@ -179,6 +185,13 @@ public class DDLTask extends Task implements Serializable {
super.initialize(conf, queryPlan, ctx);
this.conf = conf;
+ // Pick the formatter to use to display the results. Either the
+ // normal human readable output or a json object.
+ if ("json".equals(conf.get("hive.format")))
+ formatter = new JsonMetaDataFormatter();
+ else
+ formatter = new TextMetaDataFormatter();
+
INTERMEDIATE_ARCHIVED_DIR_SUFFIX =
HiveConf.getVar(conf, ConfVars.METASTORE_INT_ARCHIVED);
INTERMEDIATE_ORIGINAL_DIR_SUFFIX =
@@ -374,17 +387,31 @@ public class DDLTask extends Task implements Serializable {
}
} catch (InvalidTableException e) {
- console.printError("Table " + e.getTableName() + " does not exist");
+ formatter.consoleError(console, "Table " + e.getTableName() + " does not exist",
+ formatter.MISSING);
LOG.debug(stringifyException(e));
return 1;
+ } catch (AlreadyExistsException e) {
+ formatter.consoleError(console, e.getMessage(),
+ "\n" + stringifyException(e),
+ formatter.CONFLICT);
+ return 1;
+ } catch (NoSuchObjectException e) {
+ formatter.consoleError(console, e.getMessage(),
+ "\n" + stringifyException(e),
+ formatter.MISSING);
+ return 1;
} catch (HiveException e) {
- console.printError("FAILED: Error in metadata: " + e.getMessage(), "\n"
- + stringifyException(e));
+ formatter.consoleError(console,
+ "FAILED: Error in metadata: " + e.getMessage(),
+ "\n" + stringifyException(e),
+ formatter.ERROR);
LOG.debug(stringifyException(e));
return 1;
} catch (Exception e) {
- console.printError("Failed with exception " + e.getMessage(), "\n"
- + stringifyException(e));
+ formatter.consoleError(console, "Failed with exception " + e.getMessage(),
+ "\n" + stringifyException(e),
+ formatter.ERROR);
return (1);
}
assert false;
@@ -1792,7 +1819,9 @@ public class DDLTask extends Task implements Serializable {
tbl = db.getTable(tabName);
if (!tbl.isPartitioned()) {
- console.printError("Table " + tabName + " is not a partitioned table");
+ formatter.consoleError(console,
+ "Table " + tabName + " is not a partitioned table",
+ formatter.ERROR);
return 1;
}
if (showParts.getPartSpec() != null) {
@@ -1803,18 +1832,14 @@ public class DDLTask extends Task implements Serializable {
}
// write the results in the file
- DataOutput outStream = null;
+ DataOutputStream outStream = null;
try {
Path resFile = new Path(showParts.getResFile());
FileSystem fs = resFile.getFileSystem(conf);
outStream = fs.create(resFile);
- Iterator iterParts = parts.iterator();
- while (iterParts.hasNext()) {
- // create a row per partition name
- outStream.writeBytes(iterParts.next());
- outStream.write(terminator);
- }
+ formatter.showTablePartitons(outStream, parts);
+
((FSDataOutputStream) outStream).close();
outStream = null;
} catch (FileNotFoundException e) {
@@ -1911,24 +1936,22 @@ public class DDLTask extends Task implements Serializable {
LOG.info("results : " + databases.size());
// write the results in the file
- DataOutput outStream = null;
+ DataOutputStream outStream = null;
try {
Path resFile = new Path(showDatabasesDesc.getResFile());
FileSystem fs = resFile.getFileSystem(conf);
outStream = fs.create(resFile);
- for (String database : databases) {
- // create a row per database name
- outStream.writeBytes(database);
- outStream.write(terminator);
- }
+ formatter.showDatabases(outStream, databases);
((FSDataOutputStream) outStream).close();
outStream = null;
} catch (FileNotFoundException e) {
- LOG.warn("show databases: " + stringifyException(e));
+ formatter.logWarn(outStream, "show databases: " + stringifyException(e),
+ formatter.ERROR);
return 1;
} catch (IOException e) {
- LOG.warn("show databases: " + stringifyException(e));
+ formatter.logWarn(outStream, "show databases: " + stringifyException(e),
+ formatter.ERROR);
return 1;
} catch (Exception e) {
throw new HiveException(e.toString());
@@ -1967,26 +1990,23 @@ public class DDLTask extends Task implements Serializable {
}
// write the results in the file
- DataOutput outStream = null;
+ DataOutputStream outStream = null;
try {
Path resFile = new Path(showTbls.getResFile());
FileSystem fs = resFile.getFileSystem(conf);
outStream = fs.create(resFile);
- SortedSet sortedTbls = new TreeSet(tbls);
- Iterator iterTbls = sortedTbls.iterator();
- while (iterTbls.hasNext()) {
- // create a row per table name
- outStream.writeBytes(iterTbls.next());
- outStream.write(terminator);
- }
+ SortedSet sortedTbls = new TreeSet(tbls);
+ formatter.showTables(outStream, sortedTbls);
((FSDataOutputStream) outStream).close();
outStream = null;
} catch (FileNotFoundException e) {
- LOG.warn("show table: " + stringifyException(e));
+ formatter.logWarn(outStream, "show table: " + stringifyException(e),
+ formatter.ERROR);
return 1;
} catch (IOException e) {
- LOG.warn("show table: " + stringifyException(e));
+ formatter.logWarn(outStream, "show table: " + stringifyException(e),
+ formatter.ERROR);
return 1;
} catch (Exception e) {
throw new HiveException(e.toString());
@@ -2308,7 +2328,7 @@ public class DDLTask extends Task implements Serializable {
}
private int descDatabase(DescDatabaseDesc descDatabase) throws HiveException {
- DataOutput outStream = null;
+ DataOutputStream outStream = null;
try {
Path resFile = new Path(descDatabase.getResFile());
FileSystem fs = resFile.getFileSystem(conf);
@@ -2316,37 +2336,32 @@ public class DDLTask extends Task implements Serializable {
Database database = db.getDatabase(descDatabase.getDatabaseName());
- if (database != null) {
- outStream.writeBytes(database.getName());
- outStream.write(separator);
- if (database.getDescription() != null) {
- outStream.writeBytes(database.getDescription());
- }
- outStream.write(separator);
- if (database.getLocationUri() != null) {
- outStream.writeBytes(database.getLocationUri());
- }
-
- outStream.write(separator);
- if (descDatabase.isExt() && database.getParametersSize() > 0) {
- Map params = database.getParameters();
- outStream.writeBytes(params.toString());
- }
-
+ if (database == null) {
+ formatter.error(outStream,
+ "No such database: " + descDatabase.getDatabaseName(),
+ formatter.MISSING);
} else {
- outStream.writeBytes("No such database: " + descDatabase.getDatabaseName());
- }
-
- outStream.write(terminator);
+ Map params = null;
+ if(descDatabase.isExt())
+ params = database.getParameters();
+ formatter.showDatabaseDescription(outStream,
+ database.getName(),
+ database.getDescription(),
+ database.getLocationUri(),
+ params);
+ }
((FSDataOutputStream) outStream).close();
outStream = null;
-
} catch (FileNotFoundException e) {
- LOG.warn("describe database: " + stringifyException(e));
+ formatter.logWarn(outStream,
+ "describe database: " + stringifyException(e),
+ formatter.ERROR);
return 1;
} catch (IOException e) {
- LOG.warn("describe database: " + stringifyException(e));
+ formatter.logWarn(outStream,
+ "describe database: " + stringifyException(e),
+ formatter.ERROR);
return 1;
} catch (Exception e) {
throw new HiveException(e.toString());
@@ -2394,95 +2409,23 @@ public class DDLTask extends Task implements Serializable {
}
// write the results in the file
- DataOutput outStream = null;
+ DataOutputStream outStream = null;
try {
Path resFile = new Path(showTblStatus.getResFile());
FileSystem fs = resFile.getFileSystem(conf);
outStream = fs.create(resFile);
- Iterator iterTables = tbls.iterator();
- while (iterTables.hasNext()) {
- // create a row per table name
- Table tbl = iterTables.next();
- String tableName = tbl.getTableName();
- String tblLoc = null;
- String inputFormattCls = null;
- String outputFormattCls = null;
- if (part != null) {
- if (par != null) {
- if (par.getLocation() != null) {
- tblLoc = par.getDataLocation().toString();
- }
- inputFormattCls = par.getInputFormatClass().getName();
- outputFormattCls = par.getOutputFormatClass().getName();
- }
- } else {
- if (tbl.getPath() != null) {
- tblLoc = tbl.getDataLocation().toString();
- }
- inputFormattCls = tbl.getInputFormatClass().getName();
- outputFormattCls = tbl.getOutputFormatClass().getName();
- }
-
- String owner = tbl.getOwner();
- List cols = tbl.getCols();
- String ddlCols = MetaStoreUtils.getDDLFromFieldSchema("columns", cols);
- boolean isPartitioned = tbl.isPartitioned();
- String partitionCols = "";
- if (isPartitioned) {
- partitionCols = MetaStoreUtils.getDDLFromFieldSchema(
- "partition_columns", tbl.getPartCols());
- }
+ formatter.showTableStatus(outStream, db, conf, tbls, part, par);
- outStream.writeBytes("tableName:" + tableName);
- outStream.write(terminator);
- outStream.writeBytes("owner:" + owner);
- outStream.write(terminator);
- outStream.writeBytes("location:" + tblLoc);
- outStream.write(terminator);
- outStream.writeBytes("inputformat:" + inputFormattCls);
- outStream.write(terminator);
- outStream.writeBytes("outputformat:" + outputFormattCls);
- outStream.write(terminator);
- outStream.writeBytes("columns:" + ddlCols);
- outStream.write(terminator);
- outStream.writeBytes("partitioned:" + isPartitioned);
- outStream.write(terminator);
- outStream.writeBytes("partitionColumns:" + partitionCols);
- outStream.write(terminator);
- // output file system information
- Path tablLoc = tbl.getPath();
- List locations = new ArrayList();
- if (isPartitioned) {
- if (par == null) {
- for (Partition curPart : db.getPartitions(tbl)) {
- if (curPart.getLocation() != null) {
- locations.add(new Path(curPart.getLocation()));
- }
- }
- } else {
- if (par.getLocation() != null) {
- locations.add(new Path(par.getLocation()));
- }
- }
- } else {
- if (tablLoc != null) {
- locations.add(tablLoc);
- }
- }
- if (!locations.isEmpty()) {
- writeFileSystemStats(outStream, locations, tablLoc, false, 0);
- }
-
- outStream.write(terminator);
- }
((FSDataOutputStream) outStream).close();
outStream = null;
} catch (FileNotFoundException e) {
- LOG.info("show table status: " + stringifyException(e));
+ formatter.logInfo(outStream, "show table status: " + stringifyException(e),
+ formatter.ERROR);
return 1;
} catch (IOException e) {
- LOG.info("show table status: " + stringifyException(e));
+ formatter.logInfo(outStream, "show table status: " + stringifyException(e),
+ formatter.ERROR);
return 1;
} catch (Exception e) {
throw new HiveException(e);
@@ -2511,14 +2454,14 @@ public class DDLTask extends Task implements Serializable {
// describe the table - populate the output stream
Table tbl = db.getTable(tableName, false);
Partition part = null;
- DataOutput outStream = null;
+ DataOutputStream outStream = null;
try {
Path resFile = new Path(descTbl.getResFile());
if (tbl == null) {
FileSystem fs = resFile.getFileSystem(conf);
outStream = fs.create(resFile);
String errMsg = "Table " + tableName + " does not exist";
- outStream.write(errMsg.getBytes("UTF-8"));
+ formatter.error(outStream, errMsg, formatter.MISSING);
((FSDataOutputStream) outStream).close();
outStream = null;
return 0;
@@ -2530,7 +2473,7 @@ public class DDLTask extends Task implements Serializable {
outStream = fs.create(resFile);
String errMsg = "Partition " + descTbl.getPartSpec() + " for table "
+ tableName + " does not exist";
- outStream.write(errMsg.getBytes("UTF-8"));
+ formatter.error(outStream, errMsg, formatter.MISSING);
((FSDataOutputStream) outStream).close();
outStream = null;
return 0;
@@ -2538,10 +2481,12 @@ public class DDLTask extends Task implements Serializable {
tbl = part.getTable();
}
} catch (FileNotFoundException e) {
- LOG.info("describe table: " + stringifyException(e));
+ formatter.logInfo(outStream, "describe table: " + stringifyException(e),
+ formatter.ERROR);
return 1;
} catch (IOException e) {
- LOG.info("describe table: " + stringifyException(e));
+ formatter.logInfo(outStream, "describe table: " + stringifyException(e),
+ formatter.ERROR);
return 1;
} finally {
IOUtils.closeStream((FSDataOutputStream) outStream);
@@ -2555,66 +2500,32 @@ public class DDLTask extends Task implements Serializable {
FileSystem fs = resFile.getFileSystem(conf);
outStream = fs.create(resFile);
+ List cols = null;
if (colPath.equals(tableName)) {
+ cols = (part == null) ? tbl.getCols() : part.getCols();
if (!descTbl.isFormatted()) {
- List cols = tbl.getCols();
if (tableName.equals(colPath)) {
cols.addAll(tbl.getPartCols());
}
- outStream.writeBytes(MetaDataFormatUtils.displayColsUnformatted(cols));
- } else {
- outStream.writeBytes(MetaDataFormatUtils.getAllColumnsInformation(tbl));
}
} else {
- List cols = Hive.getFieldsFromDeserializer(colPath, tbl.getDeserializer());
- if (descTbl.isFormatted()) {
- outStream.writeBytes(MetaDataFormatUtils.getAllColumnsInformation(cols));
- } else {
- outStream.writeBytes(MetaDataFormatUtils.displayColsUnformatted(cols));
- }
+ cols = Hive.getFieldsFromDeserializer(colPath, tbl.getDeserializer());
}
- if (tableName.equals(colPath)) {
-
- if (descTbl.isFormatted()) {
- if (part != null) {
- outStream.writeBytes(MetaDataFormatUtils.getPartitionInformation(part));
- } else {
- outStream.writeBytes(MetaDataFormatUtils.getTableInformation(tbl));
- }
- }
-
- // if extended desc table then show the complete details of the table
- if (descTbl.isExt()) {
- // add empty line
- outStream.write(terminator);
- if (part != null) {
- // show partition information
- outStream.writeBytes("Detailed Partition Information");
- outStream.write(separator);
- outStream.writeBytes(part.getTPartition().toString());
- outStream.write(separator);
- // comment column is empty
- outStream.write(terminator);
- } else {
- // show table information
- outStream.writeBytes("Detailed Table Information");
- outStream.write(separator);
- outStream.writeBytes(tbl.getTTable().toString());
- outStream.write(separator);
- outStream.write(terminator);
- }
- }
- }
+ formatter.describeTable(outStream, colPath, tableName, tbl, part, cols,
+ descTbl.isFormatted(), descTbl.isExt());
LOG.info("DDLTask: written data for " + tbl.getTableName());
((FSDataOutputStream) outStream).close();
outStream = null;
} catch (FileNotFoundException e) {
- LOG.info("describe table: " + stringifyException(e));
+ formatter.logInfo(outStream, "describe table: " + stringifyException(e),
+ formatter.ERROR);
return 1;
} catch (IOException e) {
+ formatter.logInfo(outStream, "describe table: " + stringifyException(e),
+ formatter.ERROR);
LOG.info("describe table: " + stringifyException(e));
return 1;
} catch (Exception e) {
@@ -2667,128 +2578,6 @@ public class DDLTask extends Task implements Serializable {
outStream.write(separator);
}
- private void writeFileSystemStats(DataOutput outStream, List locations,
- Path tabLoc, boolean partSpecified, int indent) throws IOException {
- long totalFileSize = 0;
- long maxFileSize = 0;
- long minFileSize = Long.MAX_VALUE;
- long lastAccessTime = 0;
- long lastUpdateTime = 0;
- int numOfFiles = 0;
-
- boolean unknown = false;
- FileSystem fs = tabLoc.getFileSystem(conf);
- // in case all files in locations do not exist
- try {
- FileStatus tmpStatus = fs.getFileStatus(tabLoc);
- lastAccessTime = ShimLoader.getHadoopShims().getAccessTime(tmpStatus);
- lastUpdateTime = tmpStatus.getModificationTime();
- if (partSpecified) {
- // check whether the part exists or not in fs
- tmpStatus = fs.getFileStatus(locations.get(0));
- }
- } catch (IOException e) {
- LOG.warn(
- "Cannot access File System. File System status will be unknown: ", e);
- unknown = true;
- }
-
- if (!unknown) {
- for (Path loc : locations) {
- try {
- FileStatus status = fs.getFileStatus(tabLoc);
- FileStatus[] files = fs.listStatus(loc);
- long accessTime = ShimLoader.getHadoopShims().getAccessTime(status);
- long updateTime = status.getModificationTime();
- // no matter loc is the table location or part location, it must be a
- // directory.
- if (!status.isDir()) {
- continue;
- }
- if (accessTime > lastAccessTime) {
- lastAccessTime = accessTime;
- }
- if (updateTime > lastUpdateTime) {
- lastUpdateTime = updateTime;
- }
- for (FileStatus currentStatus : files) {
- if (currentStatus.isDir()) {
- continue;
- }
- numOfFiles++;
- long fileLen = currentStatus.getLen();
- totalFileSize += fileLen;
- if (fileLen > maxFileSize) {
- maxFileSize = fileLen;
- }
- if (fileLen < minFileSize) {
- minFileSize = fileLen;
- }
- accessTime = ShimLoader.getHadoopShims().getAccessTime(
- currentStatus);
- updateTime = currentStatus.getModificationTime();
- if (accessTime > lastAccessTime) {
- lastAccessTime = accessTime;
- }
- if (updateTime > lastUpdateTime) {
- lastUpdateTime = updateTime;
- }
- }
- } catch (IOException e) {
- // ignore
- }
- }
- }
- String unknownString = "unknown";
-
- for (int k = 0; k < indent; k++) {
- outStream.writeBytes(Utilities.INDENT);
- }
- outStream.writeBytes("totalNumberFiles:");
- outStream.writeBytes(unknown ? unknownString : "" + numOfFiles);
- outStream.write(terminator);
-
- for (int k = 0; k < indent; k++) {
- outStream.writeBytes(Utilities.INDENT);
- }
- outStream.writeBytes("totalFileSize:");
- outStream.writeBytes(unknown ? unknownString : "" + totalFileSize);
- outStream.write(terminator);
-
- for (int k = 0; k < indent; k++) {
- outStream.writeBytes(Utilities.INDENT);
- }
- outStream.writeBytes("maxFileSize:");
- outStream.writeBytes(unknown ? unknownString : "" + maxFileSize);
- outStream.write(terminator);
-
- for (int k = 0; k < indent; k++) {
- outStream.writeBytes(Utilities.INDENT);
- }
- outStream.writeBytes("minFileSize:");
- if (numOfFiles > 0) {
- outStream.writeBytes(unknown ? unknownString : "" + minFileSize);
- } else {
- outStream.writeBytes(unknown ? unknownString : "" + 0);
- }
- outStream.write(terminator);
-
- for (int k = 0; k < indent; k++) {
- outStream.writeBytes(Utilities.INDENT);
- }
- outStream.writeBytes("lastAccessTime:");
- outStream.writeBytes((unknown || lastAccessTime < 0) ? unknownString : ""
- + lastAccessTime);
- outStream.write(terminator);
-
- for (int k = 0; k < indent; k++) {
- outStream.writeBytes(Utilities.INDENT);
- }
- outStream.writeBytes("lastUpdateTime:");
- outStream.writeBytes(unknown ? unknownString : "" + lastUpdateTime);
- outStream.write(terminator);
- }
-
/**
* Alter a given table.
*
@@ -2808,8 +2597,10 @@ public class DDLTask extends Task implements Serializable {
if(alterTbl.getPartSpec() != null) {
part = db.getPartition(tbl, alterTbl.getPartSpec(), false);
if(part == null) {
- console.printError("Partition : " + alterTbl.getPartSpec().toString()
- + " does not exist.");
+ formatter.consoleError(console,
+ "Partition : " + alterTbl.getPartSpec().toString()
+ + " does not exist.",
+ formatter.MISSING);
return 1;
}
}
@@ -2839,7 +2630,9 @@ public class DDLTask extends Task implements Serializable {
while (iterOldCols.hasNext()) {
String oldColName = iterOldCols.next().getName();
if (oldColName.equalsIgnoreCase(newColName)) {
- console.printError("Column '" + newColName + "' exists");
+ formatter.consoleError(console,
+ "Column '" + newColName + "' exists",
+ formatter.CONFLICT);
return 1;
}
}
@@ -2871,7 +2664,9 @@ public class DDLTask extends Task implements Serializable {
String oldColName = col.getName();
if (oldColName.equalsIgnoreCase(newName)
&& !oldColName.equalsIgnoreCase(oldName)) {
- console.printError("Column '" + newName + "' exists");
+ formatter.consoleError(console,
+ "Column '" + newName + "' exists",
+ formatter.CONFLICT);
return 1;
} else if (oldColName.equalsIgnoreCase(oldName)) {
col.setName(newName);
@@ -2899,12 +2694,16 @@ public class DDLTask extends Task implements Serializable {
// did not find the column
if (!found) {
- console.printError("Column '" + oldName + "' does not exist");
+ formatter.consoleError(console,
+ "Column '" + oldName + "' does not exists",
+ formatter.MISSING);
return 1;
}
// after column is not null, but we did not find it.
if ((afterCol != null && !afterCol.trim().equals("")) && position < 0) {
- console.printError("Column '" + afterCol + "' does not exist");
+ formatter.consoleError(console,
+ "Column '" + afterCol + "' does not exists",
+ formatter.MISSING);
return 1;
}
@@ -2925,8 +2724,10 @@ public class DDLTask extends Task implements Serializable {
&& !tbl.getSerializationLib().equals(LazySimpleSerDe.class.getName())
&& !tbl.getSerializationLib().equals(ColumnarSerDe.class.getName())
&& !tbl.getSerializationLib().equals(DynamicSerDe.class.getName())) {
- console.printError("Replace columns is not supported for this table. "
- + "SerDe may be incompatible.");
+ formatter.consoleError(console,
+ "Replace columns is not supported for this table. "
+ + "SerDe may be incompatible.",
+ formatter.ERROR);
return 1;
}
tbl.getTTable().getSd().setCols(alterTbl.getNewCols());
@@ -3057,7 +2858,9 @@ public class DDLTask extends Task implements Serializable {
throw new HiveException(e);
}
} else {
- console.printError("Unsupported Alter commnad");
+ formatter.consoleError(console,
+ "Unsupported Alter commnad",
+ formatter.ERROR);
return 1;
}
@@ -3068,8 +2871,9 @@ public class DDLTask extends Task implements Serializable {
try {
tbl.checkValidity();
} catch (HiveException e) {
- console.printError("Invalid table columns : " + e.getMessage(),
- stringifyException(e));
+ formatter.consoleError(console,
+ "Invalid table columns : " + e.getMessage(),
+ formatter.ERROR);
return 1;
}
} else {
@@ -3235,8 +3039,10 @@ public class DDLTask extends Task implements Serializable {
try {
user = conf.getUser();
} catch (IOException e) {
- console.printError("Unable to get current user: " + e.getMessage(),
- stringifyException(e));
+ formatter.consoleError(console,
+ "Unable to get current user: " + e.getMessage(),
+ stringifyException(e),
+ formatter.ERROR);
return false;
}
@@ -3658,8 +3464,10 @@ public class DDLTask extends Task implements Serializable {
try {
tbl.setOwner(conf.getUser());
} catch (IOException e) {
- console.printError("Unable to get current user: " + e.getMessage(),
- stringifyException(e));
+ formatter.consoleError(console,
+ "Unable to get current user: " + e.getMessage(),
+ stringifyException(e),
+ formatter.ERROR);
return 1;
}
// set create time
diff --git ql/src/java/org/apache/hadoop/hive/ql/metadata/JsonMetaDataFormatter.java ql/src/java/org/apache/hadoop/hive/ql/metadata/JsonMetaDataFormatter.java
new file mode 100644
index 0000000..4e2eaa1
--- /dev/null
+++ ql/src/java/org/apache/hadoop/hive/ql/metadata/JsonMetaDataFormatter.java
@@ -0,0 +1,460 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements. See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership. The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hive.ql.metadata;
+
+import java.io.DataOutputStream;
+import java.io.OutputStream;
+import java.io.IOException;
+import java.io.UnsupportedEncodingException;
+import java.net.URLDecoder;
+import java.util.ArrayList;
+import java.util.List;
+import java.util.Map;
+import java.util.Set;
+import org.apache.commons.lang.StringUtils;
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.fs.FileStatus;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hive.conf.HiveConf;
+import org.apache.hadoop.hive.metastore.api.FieldSchema;
+import org.apache.hadoop.hive.ql.metadata.Partition;
+import org.apache.hadoop.hive.ql.metadata.Table;
+import org.apache.hadoop.hive.ql.session.SessionState.LogHelper;
+import org.apache.hadoop.hive.shims.ShimLoader;
+import org.codehaus.jackson.map.ObjectMapper;
+
+/**
+ * Format table and index information for machine readability using
+ * json.
+ */
+public class JsonMetaDataFormatter implements MetaDataFormatter {
+ private static final Log LOG = LogFactory.getLog("hive.ql.exec.DDLTask");
+
+ /**
+ * Convert the map to a JSON string.
+ */
+ public void asJson(OutputStream out, Map data)
+ throws HiveException
+ {
+ try {
+ new ObjectMapper().writeValue(out, data);
+ } catch (IOException e) {
+ throw new HiveException("Unable to convert to json", e);
+ }
+ }
+
+ /**
+ * Write an error message.
+ */
+ public void error(OutputStream out, String msg, int errorCode)
+ throws HiveException
+ {
+ asJson(out,
+ MapBuilder.create()
+ .put("error", msg)
+ .put("errorCode", errorCode)
+ .build());
+ }
+
+ /**
+ * Write a log warn message.
+ */
+ public void logWarn(OutputStream out, String msg, int errorCode)
+ throws HiveException
+ {
+ LOG.warn(msg);
+ error(out, msg, errorCode);
+ }
+
+ /**
+ * Write a log info message.
+ */
+ public void logInfo(OutputStream out, String msg, int errorCode)
+ throws HiveException
+ {
+ LOG.info(msg);
+ error(out, msg, errorCode);
+ }
+
+ /**
+ * Write a console error message.
+ */
+ public void consoleError(LogHelper console, String msg, int errorCode) {
+ try {
+ console.printError(msg);
+ error(console.getOutStream(), msg, errorCode);
+ } catch (HiveException e) {
+ console.printError("unable to create json: " + e);
+ }
+ }
+
+ /**
+ * Write a console error message.
+ */
+ public void consoleError(LogHelper console, String msg, String detail,
+ int errorCode)
+ {
+ try {
+ console.printError(msg, detail);
+ asJson(console.getOutStream(),
+ MapBuilder.create()
+ .put("error", msg)
+ .put("errorDetail", detail)
+ .put("errorCode", errorCode)
+ .build());
+ } catch (HiveException e) {
+ console.printError("unable to create json: " + e);
+ }
+ }
+
+ /**
+ * Show a list of tables.
+ */
+ public void showTables(DataOutputStream out, Set tables)
+ throws HiveException
+ {
+ asJson(out,
+ MapBuilder.create()
+ .put("tables", tables)
+ .build());
+ }
+
+ /**
+ * Describe table.
+ */
+ public void describeTable(DataOutputStream out,
+ String colPath, String tableName,
+ Table tbl, Partition part, List cols,
+ boolean isFormatted, boolean isExt)
+ throws HiveException
+ {
+ MapBuilder builder = MapBuilder.create();
+
+ builder.put("columns", makeColsUnformatted(cols));
+
+ if (isExt) {
+ if (part != null)
+ builder.put("partitionInfo", part.getTPartition());
+ else
+ builder.put("tableInfo", tbl.getTTable());
+ }
+
+ asJson(out, builder.build());
+ }
+
+ private List makeColsUnformatted(List cols) {
+ ArrayList res = new ArrayList();
+ for (FieldSchema col : cols)
+ res.add(makeOneColUnformatted(col));
+ return res;
+ }
+
+ private Map makeOneColUnformatted(FieldSchema col) {
+ return MapBuilder.create()
+ .put("name", col.getName())
+ .put("type", col.getType())
+ .put("comment", col.getComment())
+ .build();
+ }
+
+ public void showTableStatus(DataOutputStream out,
+ Hive db,
+ HiveConf conf,
+ List tbls,
+ Map part,
+ Partition par)
+ throws HiveException
+ {
+ asJson(out, MapBuilder
+ .create()
+ .put("tables", makeAllTableStatus(db, conf,
+ tbls, part, par))
+ .build());
+ }
+
+ private List makeAllTableStatus(Hive db,
+ HiveConf conf,
+ List tbls,
+ Map part,
+ Partition par)
+ throws HiveException
+ {
+ try {
+ ArrayList res = new ArrayList();
+ for (Table tbl : tbls)
+ res.add(makeOneTableStatus(tbl, db, conf, part, par));
+ return res;
+ } catch(IOException e) {
+ throw new HiveException(e);
+ }
+ }
+
+ private Map makeOneTableStatus(Table tbl,
+ Hive db,
+ HiveConf conf,
+ Map part,
+ Partition par)
+ throws HiveException, IOException
+ {
+ String tblLoc = null;
+ String inputFormattCls = null;
+ String outputFormattCls = null;
+ if (part != null) {
+ if (par != null) {
+ if (par.getLocation() != null) {
+ tblLoc = par.getDataLocation().toString();
+ }
+ inputFormattCls = par.getInputFormatClass().getName();
+ outputFormattCls = par.getOutputFormatClass().getName();
+ }
+ } else {
+ if (tbl.getPath() != null) {
+ tblLoc = tbl.getDataLocation().toString();
+ }
+ inputFormattCls = tbl.getInputFormatClass().getName();
+ outputFormattCls = tbl.getOutputFormatClass().getName();
+ }
+
+ MapBuilder builder = MapBuilder.create();
+
+ builder.put("tableName", tbl.getTableName());
+ builder.put("owner", tbl.getOwner());
+ builder.put("location", tblLoc);
+ builder.put("inputFormat", inputFormattCls);
+ builder.put("outputFormat", outputFormattCls);
+ builder.put("columns", makeColsUnformatted(tbl.getCols()));
+
+ builder.put("partitioned", tbl.isPartitioned());
+ if (tbl.isPartitioned())
+ builder.put("partitionColumns", makeColsUnformatted(tbl.getPartCols()));
+
+ putFileSystemsStats(builder, makeTableStatusLocations(tbl, db, par),
+ conf, tbl.getPath());
+
+ return builder.build();
+ }
+
+ private List makeTableStatusLocations(Table tbl, Hive db, Partition par)
+ throws HiveException
+ {
+ // output file system information
+ Path tblPath = tbl.getPath();
+ List locations = new ArrayList();
+ if (tbl.isPartitioned()) {
+ if (par == null) {
+ for (Partition curPart : db.getPartitions(tbl)) {
+ if (curPart.getLocation() != null) {
+ locations.add(new Path(curPart.getLocation()));
+ }
+ }
+ } else {
+ if (par.getLocation() != null) {
+ locations.add(new Path(par.getLocation()));
+ }
+ }
+ } else {
+ if (tblPath != null) {
+ locations.add(tblPath);
+ }
+ }
+
+ return locations;
+ }
+
+ // Duplicates logic in TextMetaDataFormatter
+ private void putFileSystemsStats(MapBuilder builder, List locations,
+ HiveConf conf, Path tblPath)
+ throws IOException
+ {
+ long totalFileSize = 0;
+ long maxFileSize = 0;
+ long minFileSize = Long.MAX_VALUE;
+ long lastAccessTime = 0;
+ long lastUpdateTime = 0;
+ int numOfFiles = 0;
+
+ boolean unknown = false;
+ FileSystem fs = tblPath.getFileSystem(conf);
+ // in case all files in locations do not exist
+ try {
+ FileStatus tmpStatus = fs.getFileStatus(tblPath);
+ lastAccessTime = ShimLoader.getHadoopShims().getAccessTime(tmpStatus);
+ lastUpdateTime = tmpStatus.getModificationTime();
+ } catch (IOException e) {
+ LOG.warn(
+ "Cannot access File System. File System status will be unknown: ", e);
+ unknown = true;
+ }
+
+ if (!unknown) {
+ for (Path loc : locations) {
+ try {
+ FileStatus status = fs.getFileStatus(tblPath);
+ FileStatus[] files = fs.listStatus(loc);
+ long accessTime = ShimLoader.getHadoopShims().getAccessTime(status);
+ long updateTime = status.getModificationTime();
+ // no matter loc is the table location or part location, it must be a
+ // directory.
+ if (!status.isDir()) {
+ continue;
+ }
+ if (accessTime > lastAccessTime) {
+ lastAccessTime = accessTime;
+ }
+ if (updateTime > lastUpdateTime) {
+ lastUpdateTime = updateTime;
+ }
+ for (FileStatus currentStatus : files) {
+ if (currentStatus.isDir()) {
+ continue;
+ }
+ numOfFiles++;
+ long fileLen = currentStatus.getLen();
+ totalFileSize += fileLen;
+ if (fileLen > maxFileSize) {
+ maxFileSize = fileLen;
+ }
+ if (fileLen < minFileSize) {
+ minFileSize = fileLen;
+ }
+ accessTime = ShimLoader.getHadoopShims().getAccessTime(
+ currentStatus);
+ updateTime = currentStatus.getModificationTime();
+ if (accessTime > lastAccessTime) {
+ lastAccessTime = accessTime;
+ }
+ if (updateTime > lastUpdateTime) {
+ lastUpdateTime = updateTime;
+ }
+ }
+ } catch (IOException e) {
+ // ignore
+ }
+ }
+ }
+
+ builder
+ .put("totalNumberFiles", numOfFiles, ! unknown)
+ .put("totalFileSize", totalFileSize, ! unknown)
+ .put("maxFileSize", maxFileSize, ! unknown)
+ .put("minFileSize", numOfFiles > 0 ? minFileSize : 0, ! unknown)
+ .put("lastAccessTime", lastAccessTime, ! (unknown || lastAccessTime < 0))
+ .put("lastUpdateTime", lastUpdateTime, ! unknown);
+ }
+
+ /**
+ * Show the table partitions.
+ */
+ public void showTablePartitons(DataOutputStream out, List parts)
+ throws HiveException
+ {
+ asJson(out,
+ MapBuilder.create()
+ .put("partitions", makeTablePartions(parts))
+ .build());
+ }
+
+ private List makeTablePartions(List parts)
+ throws HiveException
+ {
+ try {
+ ArrayList res = new ArrayList();
+ for (String part : parts)
+ res.add(makeOneTablePartition(part));
+ return res;
+ } catch (UnsupportedEncodingException e) {
+ throw new HiveException(e);
+ }
+ }
+
+ // This seems like a very wrong implementation.
+ private Map makeOneTablePartition(String partIdent)
+ throws UnsupportedEncodingException
+ {
+ ArrayList