diff --git a/itests/hive-unit/src/test/java/org/apache/hive/jdbc/TestJdbcDriver2.java b/itests/hive-unit/src/test/java/org/apache/hive/jdbc/TestJdbcDriver2.java index 4df4dd5..8210e75 100644 --- a/itests/hive-unit/src/test/java/org/apache/hive/jdbc/TestJdbcDriver2.java +++ b/itests/hive-unit/src/test/java/org/apache/hive/jdbc/TestJdbcDriver2.java @@ -1071,7 +1071,7 @@ private void getTablesTest(String tableTypeName, String viewTypeName) throws SQL tests.put("", new Object[]{}); for (String checkPattern: tests.keySet()) { - ResultSet rs = (ResultSet)con.getMetaData().getTables("default", null, checkPattern, null); + ResultSet rs = con.getMetaData().getTables("default", null, checkPattern, null); ResultSetMetaData resMeta = rs.getMetaData(); assertEquals(5, resMeta.getColumnCount()); assertEquals("TABLE_CAT", resMeta.getColumnName(1)); @@ -1100,7 +1100,7 @@ private void getTablesTest(String tableTypeName, String viewTypeName) throws SQL } // only ask for the views. - ResultSet rs = (ResultSet)con.getMetaData().getTables("default", null, null + ResultSet rs = con.getMetaData().getTables("default", null, null , new String[]{viewTypeName}); int cnt=0; while (rs.next()) { @@ -1112,7 +1112,7 @@ private void getTablesTest(String tableTypeName, String viewTypeName) throws SQL @Test public void testMetaDataGetCatalogs() throws SQLException { - ResultSet rs = (ResultSet)con.getMetaData().getCatalogs(); + ResultSet rs = con.getMetaData().getCatalogs(); ResultSetMetaData resMeta = rs.getMetaData(); assertEquals(1, resMeta.getColumnCount()); assertEquals("TABLE_CAT", resMeta.getColumnName(1)); @@ -1122,7 +1122,7 @@ public void testMetaDataGetCatalogs() throws SQLException { @Test public void testMetaDataGetSchemas() throws SQLException { - ResultSet rs = (ResultSet)con.getMetaData().getSchemas(); + ResultSet rs = con.getMetaData().getSchemas(); ResultSetMetaData resMeta = rs.getMetaData(); assertEquals(2, resMeta.getColumnCount()); assertEquals("TABLE_SCHEM", resMeta.getColumnName(1)); @@ -1172,7 +1172,7 @@ public void testMetaDataGetClassicTableTypes() throws SQLException { */ private void metaDataGetTableTypeTest(Set tabletypes) throws SQLException { - ResultSet rs = (ResultSet)con.getMetaData().getTableTypes(); + ResultSet rs = con.getMetaData().getTableTypes(); int cnt = 0; while (rs.next()) { @@ -1237,7 +1237,7 @@ public void testMetaDataGetColumns() throws SQLException { */ @Test public void testMetaDataGetColumnsMetaData() throws SQLException { - ResultSet rs = (ResultSet)con.getMetaData().getColumns(null, null + ResultSet rs = con.getMetaData().getColumns(null, null , "testhivejdbcdriver\\_table", null); ResultSetMetaData rsmd = rs.getMetaData(); @@ -1301,18 +1301,34 @@ public void testDescribeTable() throws SQLException { ResultSet res = stmt.executeQuery("describe " + tableName); res.next(); - assertEquals("Column name 'under_col' not found", "under_col", res.getString(1).trim()); + assertEquals("Column name 'under_col' not found", "under_col", res.getString(1)); assertEquals("Column type 'under_col' for column under_col not found", "int", res - .getString(2).trim()); + .getString(2)); res.next(); - assertEquals("Column name 'value' not found", "value", res.getString(1).trim()); + assertEquals("Column name 'value' not found", "value", res.getString(1)); assertEquals("Column type 'string' for column key not found", "string", res - .getString(2).trim()); + .getString(2)); assertFalse("More results found than expected", res.next()); } @Test + public void testShowColumns() throws SQLException { + Statement stmt = con.createStatement(); + assertNotNull("Statement is null", stmt); + + ResultSet res = stmt.executeQuery("show columns in " + tableName); + res.next(); + assertEquals("Column name 'under_col' not found", + "under_col", res.getString(1)); + + res.next(); + assertEquals("Column name 'value' not found", + "value", res.getString(1)); + assertFalse("More results found than expected", res.next()); + } + + @Test public void testDatabaseMetaData() throws SQLException { DatabaseMetaData meta = con.getMetaData(); @@ -1881,7 +1897,7 @@ public void testFetchFirstDfsCmds() throws Exception { public void testUnsupportedFetchTypes() throws Exception { try { con.createStatement(ResultSet.TYPE_SCROLL_SENSITIVE, - ResultSet.CONCUR_READ_ONLY); + ResultSet.CONCUR_READ_ONLY); fail("createStatement with TYPE_SCROLL_SENSITIVE should fail"); } catch(SQLException e) { assertEquals("HYC00", e.getSQLState().trim()); @@ -1889,7 +1905,7 @@ public void testUnsupportedFetchTypes() throws Exception { try { con.createStatement(ResultSet.TYPE_SCROLL_INSENSITIVE, - ResultSet.CONCUR_UPDATABLE); + ResultSet.CONCUR_UPDATABLE); fail("createStatement with CONCUR_UPDATABLE should fail"); } catch(SQLException e) { assertEquals("HYC00", e.getSQLState().trim()); @@ -1924,7 +1940,7 @@ public void testFetchFirstError() throws Exception { private void execFetchFirst(String sqlStmt, String colName, boolean oneRowOnly) throws Exception { Statement stmt = con.createStatement(ResultSet.TYPE_SCROLL_INSENSITIVE, - ResultSet.CONCUR_READ_ONLY); + ResultSet.CONCUR_READ_ONLY); ResultSet res = stmt.executeQuery(sqlStmt); List results = new ArrayList (); diff --git a/ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java b/ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java index 29f1e57..a3c9b23 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java @@ -176,7 +176,6 @@ import org.apache.hadoop.io.IOUtils; import org.apache.hadoop.util.ReflectionUtils; import org.apache.hadoop.util.ToolRunner; -import org.apache.thrift.TException; import org.stringtemplate.v4.ST; /** @@ -217,11 +216,11 @@ public void initialize(HiveConf conf, QueryPlan queryPlan, DriverContext ctx) { // normal human readable output or a json object. formatter = MetaDataFormatUtils.getFormatter(conf); INTERMEDIATE_ARCHIVED_DIR_SUFFIX = - HiveConf.getVar(conf, ConfVars.METASTORE_INT_ARCHIVED); + HiveConf.getVar(conf, ConfVars.METASTORE_INT_ARCHIVED); INTERMEDIATE_ORIGINAL_DIR_SUFFIX = - HiveConf.getVar(conf, ConfVars.METASTORE_INT_ORIGINAL); + HiveConf.getVar(conf, ConfVars.METASTORE_INT_ORIGINAL); INTERMEDIATE_EXTRACTED_DIR_SUFFIX = - HiveConf.getVar(conf, ConfVars.METASTORE_INT_EXTRACTED); + HiveConf.getVar(conf, ConfVars.METASTORE_INT_EXTRACTED); } @Override @@ -448,7 +447,7 @@ public int execute(DriverContext driverContext) { } AlterTableExchangePartition alterTableExchangePartition = - work.getAlterTableExchangePartition(); + work.getAlterTableExchangePartition(); if (alterTableExchangePartition != null) { return exchangeTablePartition(db, alterTableExchangePartition); } @@ -509,7 +508,7 @@ private int grantOrRevokeRole(GrantRevokeRoleDDL grantOrRevokeRoleDDL) if (grantRole) { db.grantRole(roleName, userName, principal.getType(), grantOrRevokeRoleDDL.getGrantor(), grantOrRevokeRoleDDL - .getGrantorType(), grantOrRevokeRoleDDL.isGrantOption()); + .getGrantorType(), grantOrRevokeRoleDDL.isGrantOption()); } else { db.revokeRole(roleName, userName, principal.getType()); } @@ -557,7 +556,7 @@ private int showGrants(ShowGrantDesc showGrantDesc) throws HiveException { try { if (hiveObjectDesc == null) { privs.addAll(db.showPrivilegeGrant(HiveObjectType.GLOBAL, principalName, type, - null, null, null, null)); + null, null, null, null)); } else if (hiveObjectDesc != null && hiveObjectDesc.getObject() == null) { privs.addAll(db.showPrivilegeGrant(null, principalName, type, null, null, null, null)); } else { @@ -657,9 +656,9 @@ private int showGrantsV2(ShowGrantDesc showGrantDesc) throws HiveException { //only grantInfo is used HiveObjectPrivilege thriftObjectPriv = new HiveObjectPrivilege(new HiveObjectRef( - AuthorizationUtils.getThriftHiveObjType(privObj.getType()),privObj.getDbname(), - privObj.getTableViewURI(),null,null), principal.getName(), - AuthorizationUtils.getThriftPrincipalType(principal.getType()), grantInfo); + AuthorizationUtils.getThriftHiveObjType(privObj.getType()),privObj.getDbname(), + privObj.getTableViewURI(),null,null), principal.getName(), + AuthorizationUtils.getThriftPrincipalType(principal.getType()), grantInfo); privList.add(thriftObjectPriv); } boolean testMode = conf.getBoolVar(HiveConf.ConfVars.HIVE_IN_TEST); @@ -793,9 +792,9 @@ private int grantOrRevokePrivileges(List principals, tableName, partValues, null), null, null, new PrivilegeGrantInfo(priv.toString(), 0, grantor, grantorType, grantOption))); } else { privBag - .addToPrivileges(new HiveObjectPrivilege( - new HiveObjectRef(HiveObjectType.TABLE, dbName, - tableName, null, null), null, null, new PrivilegeGrantInfo(priv.toString(), 0, grantor, grantorType, grantOption))); + .addToPrivileges(new HiveObjectPrivilege( + new HiveObjectRef(HiveObjectType.TABLE, dbName, + tableName, null, null), null, null, new PrivilegeGrantInfo(priv.toString(), 0, grantor, grantorType, grantOption))); } } else { privBag.addToPrivileges(new HiveObjectPrivilege( @@ -1040,7 +1039,7 @@ private int createIndex(Hive db, CreateIndexDesc crtIndex) throws HiveException } db - .createIndex( + .createIndex( crtIndex.getTableName(), crtIndex.getIndexName(), crtIndex.getIndexTypeHandlerClass(), crtIndex.getIndexedCols(), crtIndex.getIndexTableName(), crtIndex.getDeferredRebuild(), crtIndex.getInputFormat(), crtIndex.getOutputFormat(), crtIndex.getSerde(), @@ -1049,12 +1048,12 @@ private int createIndex(Hive db, CreateIndexDesc crtIndex) throws HiveException crtIndex.getLineDelim(), crtIndex.getMapKeyDelim(), crtIndex.getIndexComment() ); if (HiveUtils.getIndexHandler(conf, crtIndex.getIndexTypeHandlerClass()).usesIndexTable()) { - String indexTableName = - crtIndex.getIndexTableName() != null ? crtIndex.getIndexTableName() : + String indexTableName = + crtIndex.getIndexTableName() != null ? crtIndex.getIndexTableName() : MetaStoreUtils.getIndexTableName(SessionState.get().getCurrentDatabase(), - crtIndex.getTableName(), crtIndex.getIndexName()); - Table indexTable = db.getTable(indexTableName); - work.getOutputs().add(new WriteEntity(indexTable)); + crtIndex.getTableName(), crtIndex.getIndexName()); + Table indexTable = db.getTable(indexTableName); + work.getOutputs().add(new WriteEntity(indexTable)); } return 0; } @@ -1066,54 +1065,54 @@ private int alterIndex(Hive db, AlterIndexDesc alterIndex) throws HiveException Index idx = db.getIndex(dbName, baseTableName, indexName); switch(alterIndex.getOp()) { - case ADDPROPS: - idx.getParameters().putAll(alterIndex.getProps()); - break; - case UPDATETIMESTAMP: - try { - Map props = new HashMap(); - Map, Long> basePartTs = new HashMap, Long>(); + case ADDPROPS: + idx.getParameters().putAll(alterIndex.getProps()); + break; + case UPDATETIMESTAMP: + try { + Map props = new HashMap(); + Map, Long> basePartTs = new HashMap, Long>(); - Table baseTbl = db.getTable(SessionState.get().getCurrentDatabase(), - baseTableName); + Table baseTbl = db.getTable(SessionState.get().getCurrentDatabase(), + baseTableName); - if (baseTbl.isPartitioned()) { - List baseParts; - if (alterIndex.getSpec() != null) { - baseParts = db.getPartitions(baseTbl, alterIndex.getSpec()); - } else { - baseParts = db.getPartitions(baseTbl); - } - if (baseParts != null) { - for (Partition p : baseParts) { - FileSystem fs = p.getDataLocation().getFileSystem(db.getConf()); - FileStatus fss = fs.getFileStatus(p.getDataLocation()); - basePartTs.put(p.getSpec(), fss.getModificationTime()); - } - } + if (baseTbl.isPartitioned()) { + List baseParts; + if (alterIndex.getSpec() != null) { + baseParts = db.getPartitions(baseTbl, alterIndex.getSpec()); } else { - FileSystem fs = baseTbl.getPath().getFileSystem(db.getConf()); - FileStatus fss = fs.getFileStatus(baseTbl.getPath()); - basePartTs.put(null, fss.getModificationTime()); + baseParts = db.getPartitions(baseTbl); } - for (Map spec : basePartTs.keySet()) { - if (spec != null) { - props.put(spec.toString(), basePartTs.get(spec).toString()); - } else { - props.put("base_timestamp", basePartTs.get(null).toString()); + if (baseParts != null) { + for (Partition p : baseParts) { + FileSystem fs = p.getDataLocation().getFileSystem(db.getConf()); + FileStatus fss = fs.getFileStatus(p.getDataLocation()); + basePartTs.put(p.getSpec(), fss.getModificationTime()); } } - idx.getParameters().putAll(props); - } catch (HiveException e) { - throw new HiveException("ERROR: Failed to update index timestamps"); - } catch (IOException e) { - throw new HiveException("ERROR: Failed to look up timestamps on filesystem"); + } else { + FileSystem fs = baseTbl.getPath().getFileSystem(db.getConf()); + FileStatus fss = fs.getFileStatus(baseTbl.getPath()); + basePartTs.put(null, fss.getModificationTime()); } + for (Map spec : basePartTs.keySet()) { + if (spec != null) { + props.put(spec.toString(), basePartTs.get(spec).toString()); + } else { + props.put("base_timestamp", basePartTs.get(null).toString()); + } + } + idx.getParameters().putAll(props); + } catch (HiveException e) { + throw new HiveException("ERROR: Failed to update index timestamps"); + } catch (IOException e) { + throw new HiveException("ERROR: Failed to look up timestamps on filesystem"); + } - break; - default: - console.printError("Unsupported Alter commnad"); - return 1; + break; + default: + console.printError("Unsupported Alter commnad"); + return 1; } // set last modified by properties @@ -1178,17 +1177,17 @@ private int renamePartition(Hive db, RenamePartitionDesc renamePartitionDesc) th } /** - * Alter partition column type in a table - * - * @param db - * Database to rename the partition. - * @param alterPartitionDesc - * change partition column type. - * @return Returns 0 when execution succeeds and above 0 if it fails. - * @throws HiveException - */ + * Alter partition column type in a table + * + * @param db + * Database to rename the partition. + * @param alterPartitionDesc + * change partition column type. + * @return Returns 0 when execution succeeds and above 0 if it fails. + * @throws HiveException + */ private int alterTableAlterPart(Hive db, AlterTableAlterPartDesc alterPartitionDesc) - throws HiveException { + throws HiveException { Table tbl = db.getTable(alterPartitionDesc.getDbName(), alterPartitionDesc.getTableName()); String tabName = alterPartitionDesc.getTableName(); @@ -1389,7 +1388,7 @@ boolean partitionInCustomLocation(Table tbl, Partition p) private int archive(Hive db, AlterTableSimpleDesc simpleDesc, DriverContext driverContext) - throws HiveException { + throws HiveException { String dbName = simpleDesc.getDbName(); String tblName = simpleDesc.getTableName(); @@ -1417,7 +1416,7 @@ private int archive(Hive db, AlterTableSimpleDesc simpleDesc, for(Partition p: partitions){ if(partitionInCustomLocation(tbl, p)) { String message = String.format("ARCHIVE cannot run for partition " + - "groups with custom locations like %s", p.getLocation()); + "groups with custom locations like %s", p.getLocation()); throw new HiveException(message); } } @@ -1505,10 +1504,10 @@ private int archive(Hive db, AlterTableSimpleDesc simpleDesc, // First create the archive in a tmp dir so that if the job fails, the // bad files don't pollute the filesystem Path tmpPath = new Path(driverContext.getCtx() - .getExternalTmpPath(originalDir.toUri()), "partlevel"); + .getExternalTmpPath(originalDir.toUri()), "partlevel"); console.printInfo("Creating " + archiveName + - " for " + originalDir.toString()); + " for " + originalDir.toString()); console.printInfo("in " + tmpPath); console.printInfo("Please wait... (this may take a while)"); @@ -1517,7 +1516,7 @@ private int archive(Hive db, AlterTableSimpleDesc simpleDesc, try { int maxJobNameLen = conf.getIntVar(HiveConf.ConfVars.HIVEJOBNAMELENGTH); String jobname = String.format("Archiving %s@%s", - tbl.getTableName(), partSpecInfo.getName()); + tbl.getTableName(), partSpecInfo.getName()); jobname = Utilities.abbreviate(jobname, maxJobNameLen - 6); conf.setVar(HiveConf.ConfVars.HADOOPJOBNAME, jobname); ret = shim.createHadoopArchive(conf, originalDir, tmpPath, archiveName); @@ -1542,7 +1541,7 @@ private int archive(Hive db, AlterTableSimpleDesc simpleDesc, } else { if (pathExists(intermediateArchivedDir)) { console.printInfo("Intermediate archive directory " + intermediateArchivedDir + - " already exists. Assuming it contains an archived version of the partition"); + " already exists. Assuming it contains an archived version of the partition"); } } @@ -1650,7 +1649,7 @@ private int unarchive(Hive db, AlterTableSimpleDesc simpleDesc) for(Partition p: partitions){ if(partitionInCustomLocation(tbl, p)) { String message = String.format("UNARCHIVE cannot run for partition " + - "groups with custom locations like %s", p.getLocation()); + "groups with custom locations like %s", p.getLocation()); throw new HiveException(message); } } @@ -1780,7 +1779,7 @@ private int unarchive(Hive db, AlterTableSimpleDesc simpleDesc) } } else { console.printInfo(intermediateArchivedDir + " already exists. " + - "Assuming it contains the archived version of the partition"); + "Assuming it contains the archived version of the partition"); } // If there is a failure from here to until when the metadata is changed, @@ -1798,7 +1797,7 @@ private int unarchive(Hive db, AlterTableSimpleDesc simpleDesc) } } else { console.printInfo(originalDir + " already exists. " + - "Assuming it contains the extracted files in the partition"); + "Assuming it contains the extracted files in the partition"); } for(Partition p: partitions) { @@ -2164,27 +2163,27 @@ else if (sortCol.getOrder() == BaseSemanticAnalyzer.HIVE_COLUMN_ORDER_DESC) { if (delims.containsKey(serdeConstants.FIELD_DELIM)) { tbl_row_format += " FIELDS TERMINATED BY '" + escapeHiveCommand(StringEscapeUtils.escapeJava(delims.get( - serdeConstants.FIELD_DELIM))) + "' \n"; + serdeConstants.FIELD_DELIM))) + "' \n"; } if (delims.containsKey(serdeConstants.COLLECTION_DELIM)) { tbl_row_format += " COLLECTION ITEMS TERMINATED BY '" + escapeHiveCommand(StringEscapeUtils.escapeJava(delims.get( - serdeConstants.COLLECTION_DELIM))) + "' \n"; + serdeConstants.COLLECTION_DELIM))) + "' \n"; } if (delims.containsKey(serdeConstants.MAPKEY_DELIM)) { tbl_row_format += " MAP KEYS TERMINATED BY '" + escapeHiveCommand(StringEscapeUtils.escapeJava(delims.get( - serdeConstants.MAPKEY_DELIM))) + "' \n"; + serdeConstants.MAPKEY_DELIM))) + "' \n"; } if (delims.containsKey(serdeConstants.LINE_DELIM)) { tbl_row_format += " LINES TERMINATED BY '" + escapeHiveCommand(StringEscapeUtils.escapeJava(delims.get( - serdeConstants.LINE_DELIM))) + "' \n"; + serdeConstants.LINE_DELIM))) + "' \n"; } if (delims.containsKey(serdeConstants.SERIALIZATION_NULL_FORMAT)) { tbl_row_format += " NULL DEFINED AS '" + escapeHiveCommand(StringEscapeUtils.escapeJava(delims.get( - serdeConstants.SERIALIZATION_NULL_FORMAT))) + "' \n"; + serdeConstants.SERIALIZATION_NULL_FORMAT))) + "' \n"; } } else { @@ -2400,7 +2399,7 @@ private int showTables(Hive db, ShowTablesDesc showTbls) throws HiveException { } public int showColumns(Hive db, ShowColumnsDesc showCols) - throws HiveException { + throws HiveException { String dbName = showCols.getDbName(); String tableName = showCols.getTableName(); @@ -2421,8 +2420,11 @@ public int showColumns(Hive db, ShowColumnsDesc showCols) List cols = table.getCols(); cols.addAll(table.getPartCols()); - outStream.writeBytes( - MetaDataFormatUtils.getAllColumnsInformation(cols, false)); + // In case the query is served by HiveServer2, don't pad it with spaces, + // as HiveServer2 output is consumed by JDBC/ODBC clients. + boolean isOutputPadded = !SessionState.get().isHiveServerQuery(); + outStream.writeBytes(MetaDataFormatUtils.getAllColumnsInformation( + cols, false, isOutputPadded)); outStream.close(); outStream = null; } catch (IOException e) { @@ -2515,8 +2517,8 @@ private int showLocks(ShowLocksDesc showLocks) throws HiveException { } else { locks = lockMgr.getLocks(getHiveObject(showLocks.getTableName(), - showLocks.getPartSpec()), - true, isExt); + showLocks.getPartSpec()), + true, isExt); } Collections.sort(locks, new Comparator() { @@ -2602,10 +2604,10 @@ private int lockTable(LockTableDesc lockTbl) throws HiveException { Map partSpec = lockTbl.getPartSpec(); HiveLockObjectData lockData = - new HiveLockObjectData(lockTbl.getQueryId(), - String.valueOf(System.currentTimeMillis()), - "EXPLICIT", - lockTbl.getQueryStr()); + new HiveLockObjectData(lockTbl.getQueryId(), + String.valueOf(System.currentTimeMillis()), + "EXPLICIT", + lockTbl.getQueryStr()); if (partSpec == null) { HiveLock lck = lockMgr.lock(new HiveLockObject(tbl, lockData), mode, true); @@ -2651,9 +2653,9 @@ private int lockDatabase(LockDatabaseDesc lockDb) throws HiveException { } HiveLockObjectData lockData = - new HiveLockObjectData(lockDb.getQueryId(), - String.valueOf(System.currentTimeMillis()), - "EXPLICIT", lockDb.getQueryStr()); + new HiveLockObjectData(lockDb.getQueryId(), + String.valueOf(System.currentTimeMillis()), + "EXPLICIT", lockDb.getQueryStr()); HiveLock lck = lockMgr.lock(new HiveLockObject(dbObj.getName(), lockData), mode, true); if (lck == null) { @@ -2699,7 +2701,7 @@ private int unlockDatabase(UnlockDatabaseDesc unlockDb) throws HiveException { } private HiveLockObject getHiveObject(String tabName, - Map partSpec) throws HiveException { + Map partSpec) throws HiveException { Table tbl = db.getTable(tabName); if (tbl == null) { throw new HiveException("Table " + tabName + " does not exist "); @@ -2830,12 +2832,12 @@ private int descDatabase(DescDatabaseDesc descDatabase) throws HiveException { if (database == null) { throw new HiveException(ErrorMsg.DATABASE_NOT_EXISTS, descDatabase.getDatabaseName()); } else { - Map params = null; - if(descDatabase.isExt()) { - params = database.getParameters(); - } - PrincipalType ownerType = database.getOwnerType(); - formatter.showDatabaseDescription(outStream, database.getName(), + Map params = null; + if(descDatabase.isExt()) { + params = database.getParameters(); + } + PrincipalType ownerType = database.getOwnerType(); + formatter.showDatabaseDescription(outStream, database.getName(), database.getDescription(), database.getLocationUri(), database.getOwnerName(), (null == ownerType) ? null : ownerType.name(), params); } @@ -3018,7 +3020,7 @@ private int describeTable(Hive db, DescTableDesc descTbl) throws HiveException { outStream.close(); outStream = null; throw new HiveException(ErrorMsg.INVALID_PARTITION, - StringUtils.join(descTbl.getPartSpec().keySet(), ','), tableName); + StringUtils.join(descTbl.getPartSpec().keySet(), ','), tableName); } tbl = part.getTable(); } @@ -3039,19 +3041,22 @@ private int describeTable(Hive db, DescTableDesc descTbl) throws HiveException { cols = (part == null || tbl.getTableType() == TableType.VIRTUAL_VIEW) ? tbl.getCols() : part.getCols(); - if (!descTbl.isFormatted()) { - if (tableName.equals(colPath)) { - cols.addAll(tbl.getPartCols()); - } - } + if (!descTbl.isFormatted()) { + if (tableName.equals(colPath)) { + cols.addAll(tbl.getPartCols()); + } + } } else { cols = Hive.getFieldsFromDeserializer(colPath, tbl.getDeserializer()); } fixDecimalColumnTypeName(cols); - - formatter.describeTable(outStream, colPath, tableName, tbl, part, cols, - descTbl.isFormatted(), descTbl.isExt(), descTbl.isPretty()); + // In case the query is served by HiveServer2, don't pad it with spaces, + // as HiveServer2 output is consumed by JDBC/ODBC clients. + boolean isOutputPadded = !SessionState.get().isHiveServerQuery(); + formatter.describeTable(outStream, colPath, tableName, tbl, part, + cols, descTbl.isFormatted(), descTbl.isExt(), + descTbl.isPretty(), isOutputPadded); LOG.info("DDLTask: written data for " + tbl.getTableName()); outStream.close(); @@ -3195,7 +3200,7 @@ private int alterTable(Hive db, AlterTableDesc alterTbl) throws HiveException { part = db.getPartition(tbl, alterTbl.getPartSpec(), false); if (part == null) { throw new HiveException(ErrorMsg.INVALID_PARTITION, - StringUtils.join(alterTbl.getPartSpec().keySet(), ',') + " for table " + alterTbl.getOldName()); + StringUtils.join(alterTbl.getPartSpec().keySet(), ',') + " for table " + alterTbl.getOldName()); } } else { @@ -3213,7 +3218,7 @@ private int alterTable(Hive db, AlterTableDesc alterTbl) throws HiveException { if (tbl.getSerializationLib().equals( "org.apache.hadoop.hive.serde.thrift.columnsetSerDe")) { console - .printInfo("Replacing columns for columnsetSerDe and changing to LazySimpleSerDe"); + .printInfo("Replacing columns for columnsetSerDe and changing to LazySimpleSerDe"); tbl.setSerializationLib(LazySimpleSerDe.class.getName()); tbl.getTTable().getSd().setCols(newCols); } else { @@ -3302,7 +3307,7 @@ private int alterTable(Hive db, AlterTableDesc alterTbl) throws HiveException { if (tbl.getSerializationLib().equals( "org.apache.hadoop.hive.serde.thrift.columnsetSerDe")) { console - .printInfo("Replacing columns for columnsetSerDe and changing to LazySimpleSerDe"); + .printInfo("Replacing columns for columnsetSerDe and changing to LazySimpleSerDe"); tbl.setSerializationLib(LazySimpleSerDe.class.getName()); } else if (!tbl.getSerializationLib().equals( MetadataTypedColumnsetSerDe.class.getName()) @@ -3343,7 +3348,7 @@ private int alterTable(Hive db, AlterTableDesc alterTbl) throws HiveException { alterTbl.getProps()); } tbl.setFields(Hive.getFieldsFromDeserializer(tbl.getTableName(), tbl. - getDeserializer())); + getDeserializer())); } } else if (alterTbl.getOp() == AlterTableDesc.AlterTableTypes.ADDFILEFORMAT) { if(part != null) { @@ -3580,7 +3585,7 @@ private void dropTable(Hive db, Table tbl, DropTableDesc dropTbl) throws HiveExc return; } throw new HiveException( - "Cannot drop a base table with DROP VIEW"); + "Cannot drop a base table with DROP VIEW"); } } } @@ -3591,7 +3596,7 @@ private void dropTable(Hive db, Table tbl, DropTableDesc dropTbl) throws HiveExc } int partitionBatchSize = HiveConf.getIntVar(conf, - ConfVars.METASTORE_BATCH_RETRIEVE_TABLE_PARTITION_MAX); + ConfVars.METASTORE_BATCH_RETRIEVE_TABLE_PARTITION_MAX); // We should check that all the partitions of the table can be dropped if (tbl != null && tbl.isPartitioned()) { @@ -3599,13 +3604,13 @@ private void dropTable(Hive db, Table tbl, DropTableDesc dropTbl) throws HiveExc for(int i=0; i < partitionNames.size(); i+= partitionBatchSize) { List partNames = partitionNames.subList(i, Math.min(i+partitionBatchSize, - partitionNames.size())); + partitionNames.size())); List listPartitions = db.getPartitionsByNames(tbl, partNames); for (Partition p: listPartitions) { if (!p.canDrop()) { throw new HiveException("Table " + tbl.getTableName() + - " Partition" + p.getName() + - " is protected from being dropped"); + " Partition" + p.getName() + + " is protected from being dropped"); } } } @@ -3641,7 +3646,7 @@ private void validateSerDe(String serdeName) throws HiveException { try { Deserializer d = ReflectionUtils.newInstance(conf.getClassByName(serdeName). - asSubclass(Deserializer.class), conf); + asSubclass(Deserializer.class), conf); if (d != null) { LOG.debug("Found class for " + serdeName); } @@ -3755,8 +3760,8 @@ private int createTable(Hive db, CreateTableDesc crtTbl) throws HiveException { if (crtTbl.getStorageHandler() != null) { tbl.setProperty( - org.apache.hadoop.hive.metastore.api.hive_metastoreConstants.META_TABLE_STORAGE, - crtTbl.getStorageHandler()); + org.apache.hadoop.hive.metastore.api.hive_metastoreConstants.META_TABLE_STORAGE, + crtTbl.getStorageHandler()); } HiveStorageHandler storageHandler = tbl.getStorageHandler(); @@ -3773,7 +3778,7 @@ private int createTable(Hive db, CreateTableDesc crtTbl) throws HiveException { } else { String serDeClassName = storageHandler.getSerDeClass().getName(); LOG.info("Use StorageHandler-supplied " + serDeClassName - + " for table " + crtTbl.getTableName()); + + " for table " + crtTbl.getTableName()); tbl.setSerializationLib(serDeClassName); } } else { @@ -3804,7 +3809,7 @@ private int createTable(Hive db, CreateTableDesc crtTbl) throws HiveException { } if (crtTbl.getSerdeProps() != null) { Iterator> iter = crtTbl.getSerdeProps().entrySet() - .iterator(); + .iterator(); while (iter.hasNext()) { Entry m = iter.next(); tbl.setSerdeParam(m.getKey(), m.getValue()); @@ -3840,9 +3845,9 @@ private int createTable(Hive db, CreateTableDesc crtTbl) throws HiveException { tbl.setOutputFormatClass(crtTbl.getOutputFormat()); tbl.getTTable().getSd().setInputFormat( - tbl.getInputFormatClass().getName()); + tbl.getInputFormatClass().getName()); tbl.getTTable().getSd().setOutputFormat( - tbl.getOutputFormatClass().getName()); + tbl.getOutputFormatClass().getName()); if (crtTbl.isExternal()) { tbl.setProperty("EXTERNAL", "TRUE"); @@ -3937,7 +3942,7 @@ private int createTableLike(Hive db, CreateTableLikeDesc crtTbl) throws HiveExce if (crtTbl.getDefaultSerdeProps() != null) { Iterator> iter = crtTbl.getDefaultSerdeProps().entrySet() - .iterator(); + .iterator(); while (iter.hasNext()) { Entry m = iter.next(); tbl.setSerdeParam(m.getKey(), m.getValue()); diff --git a/ql/src/java/org/apache/hadoop/hive/ql/metadata/formatting/JsonMetaDataFormatter.java b/ql/src/java/org/apache/hadoop/hive/ql/metadata/formatting/JsonMetaDataFormatter.java index 7fceb65..96cba25 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/metadata/formatting/JsonMetaDataFormatter.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/metadata/formatting/JsonMetaDataFormatter.java @@ -47,371 +47,344 @@ * json. */ public class JsonMetaDataFormatter implements MetaDataFormatter { - private static final Log LOG = LogFactory.getLog(JsonMetaDataFormatter.class); - - /** - * Convert the map to a JSON string. - */ - private void asJson(OutputStream out, Map data) - throws HiveException - { - try { - new ObjectMapper().writeValue(out, data); - } catch (IOException e) { - throw new HiveException("Unable to convert to json", e); - } + private static final Log LOG = LogFactory.getLog(JsonMetaDataFormatter.class); + + /** + * Convert the map to a JSON string. + */ + private void asJson(OutputStream out, Map data) + throws HiveException + { + try { + new ObjectMapper().writeValue(out, data); + } catch (IOException e) { + throw new HiveException("Unable to convert to json", e); } + } - /** - * Write an error message. - */ - @Override - public void error(OutputStream out, String msg, int errorCode, String sqlState) - throws HiveException - { - error(out, msg, errorCode, sqlState, null); - } - @Override - public void error(OutputStream out, String errorMessage, int errorCode, String sqlState, String errorDetail) throws HiveException { - MapBuilder mb = MapBuilder.create().put("error", errorMessage); - if(errorDetail != null) { - mb.put("errorDetail", errorDetail); - } - mb.put("errorCode", errorCode); - if(sqlState != null) { - mb.put("sqlState", sqlState); - } - asJson(out,mb.build()); + /** + * Write an error message. + */ + @Override + public void error(OutputStream out, String msg, int errorCode, String sqlState) + throws HiveException + { + error(out, msg, errorCode, sqlState, null); + } + @Override + public void error(OutputStream out, String errorMessage, int errorCode, String sqlState, String errorDetail) throws HiveException { + MapBuilder mb = MapBuilder.create().put("error", errorMessage); + if(errorDetail != null) { + mb.put("errorDetail", errorDetail); } - - /** - * Show a list of tables. - */ - @Override - public void showTables(DataOutputStream out, Set tables) - throws HiveException - { - asJson(out, - MapBuilder.create() - .put("tables", tables) - .build()); + mb.put("errorCode", errorCode); + if(sqlState != null) { + mb.put("sqlState", sqlState); } - - /** - * Describe table. - */ - @Override - public void describeTable(DataOutputStream out, - String colPath, String tableName, - Table tbl, Partition part, List cols, - boolean isFormatted, boolean isExt, - boolean isPretty) - throws HiveException - { - MapBuilder builder = MapBuilder.create(); - - builder.put("columns", makeColsUnformatted(cols)); - - if (isExt) { - if (part != null) - builder.put("partitionInfo", part.getTPartition()); - else - builder.put("tableInfo", tbl.getTTable()); - } - - asJson(out, builder.build()); + asJson(out,mb.build()); + } + + /** + * Show a list of tables. + */ + @Override + public void showTables(DataOutputStream out, Set tables) + throws HiveException { + asJson(out, MapBuilder.create().put("tables", tables).build()); + } + + /** + * Describe table. + */ + @Override + public void describeTable(DataOutputStream out, String colPath, + String tableName, Table tbl, Partition part, List cols, + boolean isFormatted, boolean isExt, boolean isPretty, + boolean isOutputPadded) throws HiveException { + MapBuilder builder = MapBuilder.create(); + builder.put("columns", makeColsUnformatted(cols)); + + if (isExt) { + if (part != null) { + builder.put("partitionInfo", part.getTPartition()); + } + else { + builder.put("tableInfo", tbl.getTTable()); + } } - private List> makeColsUnformatted(List cols) { - ArrayList> res = new ArrayList>(); - for (FieldSchema col : cols) - res.add(makeOneColUnformatted(col)); - return res; - } + asJson(out, builder.build()); + } - private Map makeOneColUnformatted(FieldSchema col) { - return MapBuilder.create() - .put("name", col.getName()) - .put("type", col.getType()) - .put("comment", col.getComment()) - .build(); + private List> makeColsUnformatted(List cols) { + ArrayList> res = new ArrayList>(); + for (FieldSchema col : cols) { + res.add(makeOneColUnformatted(col)); } - - @Override - public void showTableStatus(DataOutputStream out, - Hive db, - HiveConf conf, - List tbls, - Map part, - Partition par) - throws HiveException - { - asJson(out, MapBuilder - .create() - .put("tables", makeAllTableStatus(db, conf, - tbls, part, par)) - .build()); + return res; + } + + private Map makeOneColUnformatted(FieldSchema col) { + return MapBuilder.create() + .put("name", col.getName()) + .put("type", col.getType()) + .put("comment", col.getComment()) + .build(); + } + + @Override + public void showTableStatus(DataOutputStream out, Hive db, HiveConf conf, + List
tbls, Map part, Partition par) + throws HiveException { + asJson(out, MapBuilder.create().put( + "tables", makeAllTableStatus(db, conf, tbls, part, par)).build()); + } + + private List> makeAllTableStatus(Hive db, HiveConf conf, + List
tbls, Map part, Partition par) + throws HiveException { + try { + ArrayList> res = new ArrayList>(); + for (Table tbl : tbls) { + res.add(makeOneTableStatus(tbl, db, conf, part, par)); + } + return res; + } catch(IOException e) { + throw new HiveException(e); } - - private List> makeAllTableStatus(Hive db, - HiveConf conf, - List
tbls, - Map part, - Partition par) - throws HiveException - { - try { - ArrayList> res = new ArrayList>(); - for (Table tbl : tbls) - res.add(makeOneTableStatus(tbl, db, conf, part, par)); - return res; - } catch(IOException e) { - throw new HiveException(e); + } + + private Map makeOneTableStatus(Table tbl, Hive db, + HiveConf conf, Map part, Partition par) + throws HiveException, IOException { + String tblLoc = null; + String inputFormattCls = null; + String outputFormattCls = null; + if (part != null) { + if (par != null) { + if (par.getLocation() != null) { + tblLoc = par.getDataLocation().toString(); } + inputFormattCls = par.getInputFormatClass().getName(); + outputFormattCls = par.getOutputFormatClass().getName(); + } + } else { + if (tbl.getPath() != null) { + tblLoc = tbl.getDataLocation().toString(); + } + inputFormattCls = tbl.getInputFormatClass().getName(); + outputFormattCls = tbl.getOutputFormatClass().getName(); } - private Map makeOneTableStatus(Table tbl, - Hive db, - HiveConf conf, - Map part, - Partition par) - throws HiveException, IOException - { - String tblLoc = null; - String inputFormattCls = null; - String outputFormattCls = null; - if (part != null) { - if (par != null) { - if (par.getLocation() != null) { - tblLoc = par.getDataLocation().toString(); - } - inputFormattCls = par.getInputFormatClass().getName(); - outputFormattCls = par.getOutputFormatClass().getName(); - } - } else { - if (tbl.getPath() != null) { - tblLoc = tbl.getDataLocation().toString(); - } - inputFormattCls = tbl.getInputFormatClass().getName(); - outputFormattCls = tbl.getOutputFormatClass().getName(); - } - - MapBuilder builder = MapBuilder.create(); + MapBuilder builder = MapBuilder.create(); - builder.put("tableName", tbl.getTableName()); - builder.put("owner", tbl.getOwner()); - builder.put("location", tblLoc); - builder.put("inputFormat", inputFormattCls); - builder.put("outputFormat", outputFormattCls); - builder.put("columns", makeColsUnformatted(tbl.getCols())); + builder.put("tableName", tbl.getTableName()); + builder.put("owner", tbl.getOwner()); + builder.put("location", tblLoc); + builder.put("inputFormat", inputFormattCls); + builder.put("outputFormat", outputFormattCls); + builder.put("columns", makeColsUnformatted(tbl.getCols())); - builder.put("partitioned", tbl.isPartitioned()); - if (tbl.isPartitioned()) - builder.put("partitionColumns", makeColsUnformatted(tbl.getPartCols())); + builder.put("partitioned", tbl.isPartitioned()); + if (tbl.isPartitioned()) { + builder.put("partitionColumns", makeColsUnformatted(tbl.getPartCols())); + } - putFileSystemsStats(builder, makeTableStatusLocations(tbl, db, par), - conf, tbl.getPath()); + putFileSystemsStats(builder, makeTableStatusLocations(tbl, db, par), + conf, tbl.getPath()); - return builder.build(); - } + return builder.build(); + } - private List makeTableStatusLocations(Table tbl, Hive db, Partition par) - throws HiveException - { - // output file system information - Path tblPath = tbl.getPath(); - List locations = new ArrayList(); - if (tbl.isPartitioned()) { - if (par == null) { - for (Partition curPart : db.getPartitions(tbl)) { - if (curPart.getLocation() != null) { - locations.add(new Path(curPart.getLocation())); - } - } - } else { - if (par.getLocation() != null) { - locations.add(new Path(par.getLocation())); - } - } - } else { - if (tblPath != null) { - locations.add(tblPath); + private List makeTableStatusLocations(Table tbl, Hive db, Partition par) + throws HiveException { + // output file system information + Path tblPath = tbl.getPath(); + List locations = new ArrayList(); + if (tbl.isPartitioned()) { + if (par == null) { + for (Partition curPart : db.getPartitions(tbl)) { + if (curPart.getLocation() != null) { + locations.add(new Path(curPart.getLocation())); } } - - return locations; + } else { + if (par.getLocation() != null) { + locations.add(new Path(par.getLocation())); + } + } + } else { + if (tblPath != null) { + locations.add(tblPath); + } } - // Duplicates logic in TextMetaDataFormatter - private void putFileSystemsStats(MapBuilder builder, List locations, - HiveConf conf, Path tblPath) - throws IOException - { - long totalFileSize = 0; - long maxFileSize = 0; - long minFileSize = Long.MAX_VALUE; - long lastAccessTime = 0; - long lastUpdateTime = 0; - int numOfFiles = 0; - - boolean unknown = false; - FileSystem fs = tblPath.getFileSystem(conf); - // in case all files in locations do not exist - try { - FileStatus tmpStatus = fs.getFileStatus(tblPath); - lastAccessTime = tmpStatus.getAccessTime(); - lastUpdateTime = tmpStatus.getModificationTime(); - } catch (IOException e) { - LOG.warn( - "Cannot access File System. File System status will be unknown: ", e); - unknown = true; - } + return locations; + } + + // Duplicates logic in TextMetaDataFormatter + private void putFileSystemsStats(MapBuilder builder, List locations, + HiveConf conf, Path tblPath) + throws IOException { + long totalFileSize = 0; + long maxFileSize = 0; + long minFileSize = Long.MAX_VALUE; + long lastAccessTime = 0; + long lastUpdateTime = 0; + int numOfFiles = 0; + + boolean unknown = false; + FileSystem fs = tblPath.getFileSystem(conf); + // in case all files in locations do not exist + try { + FileStatus tmpStatus = fs.getFileStatus(tblPath); + lastAccessTime = tmpStatus.getAccessTime(); + lastUpdateTime = tmpStatus.getModificationTime(); + } catch (IOException e) { + LOG.warn( + "Cannot access File System. File System status will be unknown: ", e); + unknown = true; + } - if (!unknown) { - for (Path loc : locations) { - try { - FileStatus status = fs.getFileStatus(tblPath); - FileStatus[] files = fs.listStatus(loc); - long accessTime = status.getAccessTime(); - long updateTime = status.getModificationTime(); - // no matter loc is the table location or part location, it must be a - // directory. - if (!status.isDir()) { + if (!unknown) { + for (Path loc : locations) { + try { + FileStatus status = fs.getFileStatus(tblPath); + FileStatus[] files = fs.listStatus(loc); + long accessTime = status.getAccessTime(); + long updateTime = status.getModificationTime(); + // no matter loc is the table location or part location, it must be a + // directory. + if (!status.isDir()) { + continue; + } + if (accessTime > lastAccessTime) { + lastAccessTime = accessTime; + } + if (updateTime > lastUpdateTime) { + lastUpdateTime = updateTime; + } + for (FileStatus currentStatus : files) { + if (currentStatus.isDir()) { continue; } + numOfFiles++; + long fileLen = currentStatus.getLen(); + totalFileSize += fileLen; + if (fileLen > maxFileSize) { + maxFileSize = fileLen; + } + if (fileLen < minFileSize) { + minFileSize = fileLen; + } + accessTime = currentStatus.getAccessTime(); + updateTime = currentStatus.getModificationTime(); if (accessTime > lastAccessTime) { lastAccessTime = accessTime; } if (updateTime > lastUpdateTime) { lastUpdateTime = updateTime; } - for (FileStatus currentStatus : files) { - if (currentStatus.isDir()) { - continue; - } - numOfFiles++; - long fileLen = currentStatus.getLen(); - totalFileSize += fileLen; - if (fileLen > maxFileSize) { - maxFileSize = fileLen; - } - if (fileLen < minFileSize) { - minFileSize = fileLen; - } - accessTime = currentStatus.getAccessTime(); - updateTime = currentStatus.getModificationTime(); - if (accessTime > lastAccessTime) { - lastAccessTime = accessTime; - } - if (updateTime > lastUpdateTime) { - lastUpdateTime = updateTime; - } - } - } catch (IOException e) { - // ignore } + } catch (IOException e) { + // ignore } } - - builder - .put("totalNumberFiles", numOfFiles, ! unknown) - .put("totalFileSize", totalFileSize, ! unknown) - .put("maxFileSize", maxFileSize, ! unknown) - .put("minFileSize", numOfFiles > 0 ? minFileSize : 0, ! unknown) - .put("lastAccessTime", lastAccessTime, ! (unknown || lastAccessTime < 0)) - .put("lastUpdateTime", lastUpdateTime, ! unknown); - } - - /** - * Show the table partitions. - */ - @Override - public void showTablePartitons(DataOutputStream out, List parts) - throws HiveException - { - asJson(out, - MapBuilder.create() - .put("partitions", makeTablePartions(parts)) - .build()); - } - - private List> makeTablePartions(List parts) - throws HiveException - { - try { - ArrayList> res = new ArrayList>(); - for (String part : parts) - res.add(makeOneTablePartition(part)); - return res; - } catch (UnsupportedEncodingException e) { - throw new HiveException(e); - } } - // This seems like a very wrong implementation. - private Map makeOneTablePartition(String partIdent) - throws UnsupportedEncodingException - { - ArrayList> res = new ArrayList>(); - - ArrayList names = new ArrayList(); - for (String part : StringUtils.split(partIdent, "/")) { - String name = part; - String val = null; - String[] kv = StringUtils.split(part, "=", 2); - if (kv != null) { - name = kv[0]; - if (kv.length > 1) - val = URLDecoder.decode(kv[1], "UTF-8"); - } - if (val != null) - names.add(name + "='" + val + "'"); - else - names.add(name); - - res.add(MapBuilder.create() - .put("columnName", name) - .put("columnValue", val) - .build()); - } + builder + .put("totalNumberFiles", numOfFiles, ! unknown) + .put("totalFileSize", totalFileSize, ! unknown) + .put("maxFileSize", maxFileSize, ! unknown) + .put("minFileSize", numOfFiles > 0 ? minFileSize : 0, ! unknown) + .put("lastAccessTime", lastAccessTime, ! (unknown || lastAccessTime < 0)) + .put("lastUpdateTime", lastUpdateTime, ! unknown); + } + + /** + * Show the table partitions. + */ + @Override + public void showTablePartitons(DataOutputStream out, List parts) + throws HiveException { + asJson(out, MapBuilder.create().put("partitions", + makeTablePartions(parts)).build()); + } - return MapBuilder.create() - .put("name", StringUtils.join(names, ",")) - .put("values", res) - .build(); + private List> makeTablePartions(List parts) + throws HiveException { + try { + ArrayList> res = new ArrayList>(); + for (String part : parts) { + res.add(makeOneTablePartition(part)); + } + return res; + } catch (UnsupportedEncodingException e) { + throw new HiveException(e); } + } + + // This seems like a very wrong implementation. + private Map makeOneTablePartition(String partIdent) + throws UnsupportedEncodingException { + ArrayList> res = new ArrayList>(); + + ArrayList names = new ArrayList(); + for (String part : StringUtils.split(partIdent, "/")) { + String name = part; + String val = null; + String[] kv = StringUtils.split(part, "=", 2); + if (kv != null) { + name = kv[0]; + if (kv.length > 1) + val = URLDecoder.decode(kv[1], "UTF-8"); + } + if (val != null) { + names.add(name + "='" + val + "'"); + } + else { + names.add(name); + } - /** - * Show a list of databases - */ - @Override - public void showDatabases(DataOutputStream out, List databases) - throws HiveException - { - asJson(out, - MapBuilder.create() - .put("databases", databases) - .build()); + res.add(MapBuilder.create() + .put("columnName", name) + .put("columnValue", val) + .build()); } - /** - * Show the description of a database - */ - @Override - public void showDatabaseDescription(DataOutputStream out, String database, String comment, - String location, String ownerName, String ownerType, Map params) + return MapBuilder.create() + .put("name", StringUtils.join(names, ",")) + .put("values", res) + .build(); + } + + /** + * Show a list of databases + */ + @Override + public void showDatabases(DataOutputStream out, List databases) throws HiveException { - MapBuilder builder = MapBuilder.create().put("database", database).put("comment", comment) + asJson(out, MapBuilder.create().put("databases", databases).build()); + } + + /** + * Show the description of a database + */ + @Override + public void showDatabaseDescription(DataOutputStream out, String database, String comment, + String location, String ownerName, String ownerType, Map params) + throws HiveException { + MapBuilder builder = MapBuilder.create().put("database", database).put("comment", comment) .put("location", location); - if (null != ownerName) { - builder.put("owner", ownerName); - } - if (null != ownerType) { - builder.put("ownerType", ownerType); - } - if (null != params && !params.isEmpty()) { - builder.put("params", params); - } - asJson(out, builder.build()); + if (null != ownerName) { + builder.put("owner", ownerName); + } + if (null != ownerType) { + builder.put("ownerType", ownerType); + } + if (null != params && !params.isEmpty()) { + builder.put("params", params); } + asJson(out, builder.build()); + } } diff --git a/ql/src/java/org/apache/hadoop/hive/ql/metadata/formatting/MetaDataFormatUtils.java b/ql/src/java/org/apache/hadoop/hive/ql/metadata/formatting/MetaDataFormatUtils.java index de788f7..de04cca 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/metadata/formatting/MetaDataFormatUtils.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/metadata/formatting/MetaDataFormatUtils.java @@ -63,40 +63,81 @@ private static void formatColumnsHeader(StringBuilder columnInformation) { columnInformation.append(LINE_DELIM); } + /** + * Write formatted information about the given columns to a string + * @param cols - list of columns + * @param printHeader - if header should be included + * @param isOutputPadded - make it more human readable by setting indentation + * with spaces. Turned off for use by HiveServer2 + * @return string with formatted column information + */ public static String getAllColumnsInformation(List cols, - boolean printHeader) { + boolean printHeader, boolean isOutputPadded) { StringBuilder columnInformation = new StringBuilder(DEFAULT_STRINGBUILDER_SIZE); if(printHeader){ formatColumnsHeader(columnInformation); } - formatAllFields(columnInformation, cols); + formatAllFields(columnInformation, cols, isOutputPadded); return columnInformation.toString(); } - public static String getAllColumnsInformation(List cols, List partCols, - boolean printHeader) { + /** + * Write formatted information about the given columns, including partition + * columns to a string + * @param cols - list of columns + * @param partCols - list of partition columns + * @param printHeader - if header should be included + * @param isOutputPadded - make it more human readable by setting indentation + * with spaces. Turned off for use by HiveServer2 + * @return string with formatted column information + */ + public static String getAllColumnsInformation(List cols, + List partCols, boolean printHeader, boolean isOutputPadded) { StringBuilder columnInformation = new StringBuilder(DEFAULT_STRINGBUILDER_SIZE); if(printHeader){ formatColumnsHeader(columnInformation); } - formatAllFields(columnInformation, cols); + formatAllFields(columnInformation, cols, isOutputPadded); if ((partCols != null) && (!partCols.isEmpty())) { columnInformation.append(LINE_DELIM).append("# Partition Information") - .append(LINE_DELIM); + .append(LINE_DELIM); formatColumnsHeader(columnInformation); - formatAllFields(columnInformation, partCols); + formatAllFields(columnInformation, partCols, isOutputPadded); } return columnInformation.toString(); } - private static void formatAllFields(StringBuilder tableInfo, List cols) { + /** + * Write formatted column information into given StringBuilder + * @param tableInfo - StringBuilder to append column information into + * @param cols - list of columns + * @param isOutputPadded - make it more human readable by setting indentation + * with spaces. Turned off for use by HiveServer2 + */ + private static void formatAllFields(StringBuilder tableInfo, + List cols, boolean isOutputPadded) { for (FieldSchema col : cols) { - formatOutput(col.getName(), col.getType(), getComment(col), tableInfo); + if(isOutputPadded) { + formatWithIndentation(col.getName(), col.getType(), getComment(col), tableInfo); + } + else { + formatWithoutIndentation(col.getName(), col.getType(), col.getComment(), tableInfo); + } } } + private static void formatWithoutIndentation(String name, String type, String comment, + StringBuilder colBuffer) { + colBuffer.append(name); + colBuffer.append(FIELD_DELIM); + colBuffer.append(type); + colBuffer.append(FIELD_DELIM); + colBuffer.append(comment == null ? "" : comment); + colBuffer.append(LINE_DELIM); + } + public static String getAllColumnsInformation(Index index) { StringBuilder indexInfo = new StringBuilder(DEFAULT_STRINGBUILDER_SIZE); @@ -133,7 +174,7 @@ public static String getAllColumnsInformation(Index index) { formatOutput(indexColumns.toArray(new String[0]), indexInfo); return indexInfo.toString(); -} + } public static String getPartitionInformation(Partition part) { StringBuilder tableInfo = new StringBuilder(DEFAULT_STRINGBUILDER_SIZE); @@ -176,7 +217,7 @@ private static void getViewInfo(StringBuilder tableInfo, Table tbl) { } private static void getStorageDescriptorInfo(StringBuilder tableInfo, - StorageDescriptor storageDesc) { + StorageDescriptor storageDesc) { formatOutput("SerDe Library:", storageDesc.getSerdeInfo().getSerializationLib(), tableInfo); formatOutput("InputFormat:", storageDesc.getInputFormat(), tableInfo); @@ -293,13 +334,13 @@ private static void formatOutput(String[] fields, StringBuilder tableInfo) { } private static void formatOutput(String name, String value, - StringBuilder tableInfo) { + StringBuilder tableInfo) { tableInfo.append(String.format("%-" + ALIGNMENT + "s", name)).append(FIELD_DELIM); tableInfo.append(String.format("%-" + ALIGNMENT + "s", value)).append(LINE_DELIM); } - private static void formatOutput(String colName, String colType, String colComment, - StringBuilder tableInfo) { + private static void formatWithIndentation(String colName, String colType, String colComment, + StringBuilder tableInfo) { tableInfo.append(String.format("%-" + ALIGNMENT + "s", colName)).append(FIELD_DELIM); tableInfo.append(String.format("%-" + ALIGNMENT + "s", colType)).append(FIELD_DELIM); @@ -313,7 +354,7 @@ private static void formatOutput(String colName, String colType, String colComme int colTypeLength = ALIGNMENT > colType.length() ? ALIGNMENT : colType.length(); for (int i = 1; i < commentSegments.length; i++) { tableInfo.append(String.format("%" + colNameLength + "s" + FIELD_DELIM + "%" - + colTypeLength + "s" + FIELD_DELIM + "%s", "", "", commentSegments[i])).append(LINE_DELIM); + + colTypeLength + "s" + FIELD_DELIM + "%s", "", "", commentSegments[i])).append(LINE_DELIM); } } diff --git a/ql/src/java/org/apache/hadoop/hive/ql/metadata/formatting/MetaDataFormatter.java b/ql/src/java/org/apache/hadoop/hive/ql/metadata/formatting/MetaDataFormatter.java index b9be932..b600155 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/metadata/formatting/MetaDataFormatter.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/metadata/formatting/MetaDataFormatter.java @@ -23,13 +23,13 @@ import java.util.List; import java.util.Map; import java.util.Set; + import org.apache.hadoop.hive.conf.HiveConf; import org.apache.hadoop.hive.metastore.api.FieldSchema; import org.apache.hadoop.hive.ql.metadata.Hive; import org.apache.hadoop.hive.ql.metadata.HiveException; import org.apache.hadoop.hive.ql.metadata.Partition; import org.apache.hadoop.hive.ql.metadata.Table; -import org.apache.hadoop.hive.ql.session.SessionState.LogHelper; /** * Interface to format table and index information. We can format it @@ -37,64 +37,75 @@ * (json). */ public interface MetaDataFormatter { - /** - * Write an error message. - * @param sqlState if {@code null}, will be ignored - */ - public void error(OutputStream out, String msg, int errorCode, String sqlState) - throws HiveException; + /** + * Write an error message. + * @param sqlState if {@code null}, will be ignored + */ + public void error(OutputStream out, String msg, int errorCode, String sqlState) + throws HiveException; /** * @param sqlState if {@code null}, will be skipped in output * @param errorDetail usually string version of some Exception, if {@code null}, will be ignored */ - public void error(OutputStream out, String errorMessage, int errorCode, String sqlState, String errorDetail) - throws HiveException; + public void error(OutputStream out, String errorMessage, int errorCode, String sqlState, String errorDetail) + throws HiveException; - /** - * Show a list of tables. - */ - public void showTables(DataOutputStream out, Set tables) - throws HiveException; + /** + * Show a list of tables. + */ + public void showTables(DataOutputStream out, Set tables) + throws HiveException; - /** - * Describe table. - */ - public void describeTable(DataOutputStream out, - String colPath, String tableName, - Table tbl, Partition part, List cols, - boolean isFormatted, boolean isExt, boolean isPretty) - throws HiveException; + /** + * Describe table. + * @param out + * @param colPath + * @param tableName + * @param tbl + * @param part + * @param cols + * @param isFormatted - describe with formatted keyword + * @param isExt + * @param isPretty + * @param isOutputPadded - if true, add spacing and indentation + * @throws HiveException + */ + public void describeTable(DataOutputStream out, String colPath, + String tableName, Table tbl, Partition part, List cols, + boolean isFormatted, boolean isExt, boolean isPretty, + boolean isOutputPadded) + throws HiveException; - /** - * Show the table status. - */ - public void showTableStatus(DataOutputStream out, - Hive db, - HiveConf conf, - List
tbls, - Map part, - Partition par) - throws HiveException; + /** + * Show the table status. + */ + public void showTableStatus(DataOutputStream out, + Hive db, + HiveConf conf, + List
tbls, + Map part, + Partition par) + throws HiveException; - /** - * Show the table partitions. - */ - public void showTablePartitons(DataOutputStream out, - List parts) - throws HiveException; + /** + * Show the table partitions. + */ + public void showTablePartitons(DataOutputStream out, + List parts) + throws HiveException; - /** - * Show the databases - */ - public void showDatabases(DataOutputStream out, List databases) - throws HiveException; + /** + * Show the databases + */ + public void showDatabases(DataOutputStream out, List databases) + throws HiveException; - /** - * Describe a database. - */ - public void showDatabaseDescription (DataOutputStream out, String database, String comment, + /** + * Describe a database. + */ + public void showDatabaseDescription (DataOutputStream out, String database, String comment, String location, String ownerName, String ownerType, Map params) - throws HiveException; + throws HiveException; } diff --git a/ql/src/java/org/apache/hadoop/hive/ql/metadata/formatting/TextMetaDataFormatter.java b/ql/src/java/org/apache/hadoop/hive/ql/metadata/formatting/TextMetaDataFormatter.java index 8173200..0c49250 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/metadata/formatting/TextMetaDataFormatter.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/metadata/formatting/TextMetaDataFormatter.java @@ -48,416 +48,415 @@ * simple lines of text. */ class TextMetaDataFormatter implements MetaDataFormatter { - private static final Log LOG = LogFactory.getLog(TextMetaDataFormatter.class); + private static final Log LOG = LogFactory.getLog(TextMetaDataFormatter.class); - private static final int separator = Utilities.tabCode; - private static final int terminator = Utilities.newLineCode; + private static final int separator = Utilities.tabCode; + private static final int terminator = Utilities.newLineCode; - /** The number of columns to be used in pretty formatting metadata output. - * If -1, then the current terminal width is auto-detected and used. - */ - private final int prettyOutputNumCols; + /** The number of columns to be used in pretty formatting metadata output. + * If -1, then the current terminal width is auto-detected and used. + */ + private final int prettyOutputNumCols; - public TextMetaDataFormatter(int prettyOutputNumCols) { - this.prettyOutputNumCols = prettyOutputNumCols; - } + public TextMetaDataFormatter(int prettyOutputNumCols) { + this.prettyOutputNumCols = prettyOutputNumCols; + } - /** - * Write an error message. - */ - @Override - public void error(OutputStream out, String msg, int errorCode, String sqlState) - throws HiveException - { - error(out, msg, errorCode, sqlState, null); - } + /** + * Write an error message. + */ + @Override + public void error(OutputStream out, String msg, int errorCode, String sqlState) + throws HiveException + { + error(out, msg, errorCode, sqlState, null); + } - @Override - public void error(OutputStream out, String errorMessage, int errorCode, String sqlState, String errorDetail) - throws HiveException - { - try { - out.write(errorMessage.getBytes("UTF-8")); - if(errorDetail != null) { - out.write(errorDetail.getBytes("UTF-8")); - } - out.write(errorCode); - if(sqlState != null) { - out.write(sqlState.getBytes("UTF-8"));//this breaks all the tests in .q files - } - out.write(terminator); - } catch (Exception e) { - throw new HiveException(e); - } + @Override + public void error(OutputStream out, String errorMessage, int errorCode, String sqlState, String errorDetail) + throws HiveException + { + try { + out.write(errorMessage.getBytes("UTF-8")); + if(errorDetail != null) { + out.write(errorDetail.getBytes("UTF-8")); + } + out.write(errorCode); + if(sqlState != null) { + out.write(sqlState.getBytes("UTF-8"));//this breaks all the tests in .q files + } + out.write(terminator); + } catch (Exception e) { + throw new HiveException(e); } - /** - * Show a list of tables. - */ - @Override - public void showTables(DataOutputStream out, Set tables) - throws HiveException - { - Iterator iterTbls = tables.iterator(); + } + /** + * Show a list of tables. + */ + @Override + public void showTables(DataOutputStream out, Set tables) + throws HiveException + { + Iterator iterTbls = tables.iterator(); - try { - while (iterTbls.hasNext()) { - // create a row per table name - out.writeBytes(iterTbls.next()); - out.write(terminator); - } - } catch (IOException e) { - throw new HiveException(e); - } + try { + while (iterTbls.hasNext()) { + // create a row per table name + out.writeBytes(iterTbls.next()); + out.write(terminator); + } + } catch (IOException e) { + throw new HiveException(e); } + } - @Override - public void describeTable(DataOutputStream outStream, - String colPath, String tableName, - Table tbl, Partition part, List cols, - boolean isFormatted, boolean isExt, boolean isPretty) - throws HiveException { - try { - String output; - if (colPath.equals(tableName)) { - List partCols = tbl.isPartitioned() ? tbl.getPartCols() : null; - output = isPretty ? - MetaDataPrettyFormatUtils.getAllColumnsInformation( - cols, partCols, prettyOutputNumCols) + @Override + public void describeTable(DataOutputStream outStream, String colPath, + String tableName, Table tbl, Partition part, List cols, + boolean isFormatted, boolean isExt, boolean isPretty, + boolean isOutputPadded) throws HiveException { + try { + String output; + if (colPath.equals(tableName)) { + List partCols = tbl.isPartitioned() ? tbl.getPartCols() : null; + output = isPretty ? + MetaDataPrettyFormatUtils.getAllColumnsInformation( + cols, partCols, prettyOutputNumCols) : - MetaDataFormatUtils.getAllColumnsInformation(cols, partCols, isFormatted); + MetaDataFormatUtils.getAllColumnsInformation(cols, partCols, isFormatted, isOutputPadded); + } else { + output = MetaDataFormatUtils.getAllColumnsInformation(cols, isFormatted, isOutputPadded); + } + outStream.write(output.getBytes("UTF-8")); + + if (tableName.equals(colPath)) { + if (isFormatted) { + if (part != null) { + output = MetaDataFormatUtils.getPartitionInformation(part); } else { - output = MetaDataFormatUtils.getAllColumnsInformation(cols, isFormatted); + output = MetaDataFormatUtils.getTableInformation(tbl); } outStream.write(output.getBytes("UTF-8")); + } - if (tableName.equals(colPath)) { - if (isFormatted) { - if (part != null) { - output = MetaDataFormatUtils.getPartitionInformation(part); - } else { - output = MetaDataFormatUtils.getTableInformation(tbl); - } - outStream.write(output.getBytes("UTF-8")); - } - - // if extended desc table then show the complete details of the table - if (isExt) { - // add empty line - outStream.write(terminator); - if (part != null) { - // show partition information - outStream.writeBytes("Detailed Partition Information"); - outStream.write(separator); - outStream.write(part.getTPartition().toString().getBytes("UTF-8")); - outStream.write(separator); - // comment column is empty - outStream.write(terminator); - } else { - // show table information - outStream.writeBytes("Detailed Table Information"); - outStream.write(separator); - outStream.write(tbl.getTTable().toString().getBytes("UTF-8")); - outStream.write(separator); - outStream.write(terminator); - } - } + // if extended desc table then show the complete details of the table + if (isExt) { + // add empty line + outStream.write(terminator); + if (part != null) { + // show partition information + outStream.writeBytes("Detailed Partition Information"); + outStream.write(separator); + outStream.write(part.getTPartition().toString().getBytes("UTF-8")); + outStream.write(separator); + // comment column is empty + outStream.write(terminator); + } else { + // show table information + outStream.writeBytes("Detailed Table Information"); + outStream.write(separator); + outStream.write(tbl.getTTable().toString().getBytes("UTF-8")); + outStream.write(separator); + outStream.write(terminator); } - } catch (IOException e) { - throw new HiveException(e); } + } + } catch (IOException e) { + throw new HiveException(e); } + } - @Override - public void showTableStatus(DataOutputStream outStream, - Hive db, - HiveConf conf, - List
tbls, - Map part, - Partition par) - throws HiveException - { - try { - Iterator
iterTables = tbls.iterator(); - while (iterTables.hasNext()) { - // create a row per table name - Table tbl = iterTables.next(); - String tableName = tbl.getTableName(); - String tblLoc = null; - String inputFormattCls = null; - String outputFormattCls = null; - if (part != null) { - if (par != null) { - if (par.getLocation() != null) { - tblLoc = par.getDataLocation().toString(); - } - inputFormattCls = par.getInputFormatClass().getName(); - outputFormattCls = par.getOutputFormatClass().getName(); - } - } else { - if (tbl.getPath() != null) { - tblLoc = tbl.getDataLocation().toString(); - } - inputFormattCls = tbl.getInputFormatClass().getName(); - outputFormattCls = tbl.getOutputFormatClass().getName(); - } + @Override + public void showTableStatus(DataOutputStream outStream, + Hive db, + HiveConf conf, + List
tbls, + Map part, + Partition par) + throws HiveException + { + try { + Iterator
iterTables = tbls.iterator(); + while (iterTables.hasNext()) { + // create a row per table name + Table tbl = iterTables.next(); + String tableName = tbl.getTableName(); + String tblLoc = null; + String inputFormattCls = null; + String outputFormattCls = null; + if (part != null) { + if (par != null) { + if (par.getLocation() != null) { + tblLoc = par.getDataLocation().toString(); + } + inputFormattCls = par.getInputFormatClass().getName(); + outputFormattCls = par.getOutputFormatClass().getName(); + } + } else { + if (tbl.getPath() != null) { + tblLoc = tbl.getDataLocation().toString(); + } + inputFormattCls = tbl.getInputFormatClass().getName(); + outputFormattCls = tbl.getOutputFormatClass().getName(); + } - String owner = tbl.getOwner(); - List cols = tbl.getCols(); - String ddlCols = MetaStoreUtils.getDDLFromFieldSchema("columns", cols); - boolean isPartitioned = tbl.isPartitioned(); - String partitionCols = ""; - if (isPartitioned) { - partitionCols = MetaStoreUtils.getDDLFromFieldSchema( - "partition_columns", tbl.getPartCols()); - } + String owner = tbl.getOwner(); + List cols = tbl.getCols(); + String ddlCols = MetaStoreUtils.getDDLFromFieldSchema("columns", cols); + boolean isPartitioned = tbl.isPartitioned(); + String partitionCols = ""; + if (isPartitioned) { + partitionCols = MetaStoreUtils.getDDLFromFieldSchema( + "partition_columns", tbl.getPartCols()); + } - outStream.writeBytes("tableName:" + tableName); - outStream.write(terminator); - outStream.writeBytes("owner:" + owner); - outStream.write(terminator); - outStream.writeBytes("location:" + tblLoc); - outStream.write(terminator); - outStream.writeBytes("inputformat:" + inputFormattCls); - outStream.write(terminator); - outStream.writeBytes("outputformat:" + outputFormattCls); - outStream.write(terminator); - outStream.writeBytes("columns:" + ddlCols); - outStream.write(terminator); - outStream.writeBytes("partitioned:" + isPartitioned); - outStream.write(terminator); - outStream.writeBytes("partitionColumns:" + partitionCols); - outStream.write(terminator); - // output file system information - Path tblPath = tbl.getPath(); - List locations = new ArrayList(); - if (isPartitioned) { - if (par == null) { - for (Partition curPart : db.getPartitions(tbl)) { - if (curPart.getLocation() != null) { - locations.add(new Path(curPart.getLocation())); - } - } - } else { - if (par.getLocation() != null) { - locations.add(new Path(par.getLocation())); - } - } - } else { - if (tblPath != null) { - locations.add(tblPath); - } - } - if (!locations.isEmpty()) { - writeFileSystemStats(outStream, conf, locations, tblPath, false, 0); + outStream.writeBytes("tableName:" + tableName); + outStream.write(terminator); + outStream.writeBytes("owner:" + owner); + outStream.write(terminator); + outStream.writeBytes("location:" + tblLoc); + outStream.write(terminator); + outStream.writeBytes("inputformat:" + inputFormattCls); + outStream.write(terminator); + outStream.writeBytes("outputformat:" + outputFormattCls); + outStream.write(terminator); + outStream.writeBytes("columns:" + ddlCols); + outStream.write(terminator); + outStream.writeBytes("partitioned:" + isPartitioned); + outStream.write(terminator); + outStream.writeBytes("partitionColumns:" + partitionCols); + outStream.write(terminator); + // output file system information + Path tblPath = tbl.getPath(); + List locations = new ArrayList(); + if (isPartitioned) { + if (par == null) { + for (Partition curPart : db.getPartitions(tbl)) { + if (curPart.getLocation() != null) { + locations.add(new Path(curPart.getLocation())); } - - outStream.write(terminator); } - } catch (IOException e) { - throw new HiveException(e); + } else { + if (par.getLocation() != null) { + locations.add(new Path(par.getLocation())); + } + } + } else { + if (tblPath != null) { + locations.add(tblPath); + } } + if (!locations.isEmpty()) { + writeFileSystemStats(outStream, conf, locations, tblPath, false, 0); + } + + outStream.write(terminator); + } + } catch (IOException e) { + throw new HiveException(e); } + } - private void writeFileSystemStats(DataOutputStream outStream, - HiveConf conf, - List locations, - Path tblPath, boolean partSpecified, int indent) - throws IOException - { - long totalFileSize = 0; - long maxFileSize = 0; - long minFileSize = Long.MAX_VALUE; - long lastAccessTime = 0; - long lastUpdateTime = 0; - int numOfFiles = 0; + private void writeFileSystemStats(DataOutputStream outStream, + HiveConf conf, + List locations, + Path tblPath, boolean partSpecified, int indent) + throws IOException + { + long totalFileSize = 0; + long maxFileSize = 0; + long minFileSize = Long.MAX_VALUE; + long lastAccessTime = 0; + long lastUpdateTime = 0; + int numOfFiles = 0; - boolean unknown = false; - FileSystem fs = tblPath.getFileSystem(conf); - // in case all files in locations do not exist - try { - FileStatus tmpStatus = fs.getFileStatus(tblPath); - lastAccessTime = tmpStatus.getAccessTime(); - lastUpdateTime = tmpStatus.getModificationTime(); - if (partSpecified) { - // check whether the part exists or not in fs - tmpStatus = fs.getFileStatus(locations.get(0)); - } - } catch (IOException e) { - LOG.warn( - "Cannot access File System. File System status will be unknown: ", e); - unknown = true; + boolean unknown = false; + FileSystem fs = tblPath.getFileSystem(conf); + // in case all files in locations do not exist + try { + FileStatus tmpStatus = fs.getFileStatus(tblPath); + lastAccessTime = tmpStatus.getAccessTime(); + lastUpdateTime = tmpStatus.getModificationTime(); + if (partSpecified) { + // check whether the part exists or not in fs + tmpStatus = fs.getFileStatus(locations.get(0)); } + } catch (IOException e) { + LOG.warn( + "Cannot access File System. File System status will be unknown: ", e); + unknown = true; + } - if (!unknown) { - for (Path loc : locations) { - try { - FileStatus status = fs.getFileStatus(tblPath); - FileStatus[] files = fs.listStatus(loc); - long accessTime = status.getAccessTime(); - long updateTime = status.getModificationTime(); - // no matter loc is the table location or part location, it must be a - // directory. - if (!status.isDir()) { + if (!unknown) { + for (Path loc : locations) { + try { + FileStatus status = fs.getFileStatus(tblPath); + FileStatus[] files = fs.listStatus(loc); + long accessTime = status.getAccessTime(); + long updateTime = status.getModificationTime(); + // no matter loc is the table location or part location, it must be a + // directory. + if (!status.isDir()) { + continue; + } + if (accessTime > lastAccessTime) { + lastAccessTime = accessTime; + } + if (updateTime > lastUpdateTime) { + lastUpdateTime = updateTime; + } + for (FileStatus currentStatus : files) { + if (currentStatus.isDir()) { continue; } + numOfFiles++; + long fileLen = currentStatus.getLen(); + totalFileSize += fileLen; + if (fileLen > maxFileSize) { + maxFileSize = fileLen; + } + if (fileLen < minFileSize) { + minFileSize = fileLen; + } + accessTime = currentStatus.getAccessTime(); + updateTime = currentStatus.getModificationTime(); if (accessTime > lastAccessTime) { lastAccessTime = accessTime; } if (updateTime > lastUpdateTime) { lastUpdateTime = updateTime; } - for (FileStatus currentStatus : files) { - if (currentStatus.isDir()) { - continue; - } - numOfFiles++; - long fileLen = currentStatus.getLen(); - totalFileSize += fileLen; - if (fileLen > maxFileSize) { - maxFileSize = fileLen; - } - if (fileLen < minFileSize) { - minFileSize = fileLen; - } - accessTime = currentStatus.getAccessTime(); - updateTime = currentStatus.getModificationTime(); - if (accessTime > lastAccessTime) { - lastAccessTime = accessTime; - } - if (updateTime > lastUpdateTime) { - lastUpdateTime = updateTime; - } - } - } catch (IOException e) { - // ignore } + } catch (IOException e) { + // ignore } } - String unknownString = "unknown"; + } + String unknownString = "unknown"; - for (int k = 0; k < indent; k++) { - outStream.writeBytes(Utilities.INDENT); - } - outStream.writeBytes("totalNumberFiles:"); - outStream.writeBytes(unknown ? unknownString : "" + numOfFiles); - outStream.write(terminator); + for (int k = 0; k < indent; k++) { + outStream.writeBytes(Utilities.INDENT); + } + outStream.writeBytes("totalNumberFiles:"); + outStream.writeBytes(unknown ? unknownString : "" + numOfFiles); + outStream.write(terminator); - for (int k = 0; k < indent; k++) { - outStream.writeBytes(Utilities.INDENT); - } - outStream.writeBytes("totalFileSize:"); - outStream.writeBytes(unknown ? unknownString : "" + totalFileSize); - outStream.write(terminator); + for (int k = 0; k < indent; k++) { + outStream.writeBytes(Utilities.INDENT); + } + outStream.writeBytes("totalFileSize:"); + outStream.writeBytes(unknown ? unknownString : "" + totalFileSize); + outStream.write(terminator); - for (int k = 0; k < indent; k++) { - outStream.writeBytes(Utilities.INDENT); - } - outStream.writeBytes("maxFileSize:"); - outStream.writeBytes(unknown ? unknownString : "" + maxFileSize); - outStream.write(terminator); + for (int k = 0; k < indent; k++) { + outStream.writeBytes(Utilities.INDENT); + } + outStream.writeBytes("maxFileSize:"); + outStream.writeBytes(unknown ? unknownString : "" + maxFileSize); + outStream.write(terminator); - for (int k = 0; k < indent; k++) { - outStream.writeBytes(Utilities.INDENT); - } - outStream.writeBytes("minFileSize:"); - if (numOfFiles > 0) { - outStream.writeBytes(unknown ? unknownString : "" + minFileSize); - } else { - outStream.writeBytes(unknown ? unknownString : "" + 0); - } - outStream.write(terminator); + for (int k = 0; k < indent; k++) { + outStream.writeBytes(Utilities.INDENT); + } + outStream.writeBytes("minFileSize:"); + if (numOfFiles > 0) { + outStream.writeBytes(unknown ? unknownString : "" + minFileSize); + } else { + outStream.writeBytes(unknown ? unknownString : "" + 0); + } + outStream.write(terminator); - for (int k = 0; k < indent; k++) { - outStream.writeBytes(Utilities.INDENT); - } - outStream.writeBytes("lastAccessTime:"); - outStream.writeBytes((unknown || lastAccessTime < 0) ? unknownString : "" - + lastAccessTime); - outStream.write(terminator); + for (int k = 0; k < indent; k++) { + outStream.writeBytes(Utilities.INDENT); + } + outStream.writeBytes("lastAccessTime:"); + outStream.writeBytes((unknown || lastAccessTime < 0) ? unknownString : "" + + lastAccessTime); + outStream.write(terminator); - for (int k = 0; k < indent; k++) { - outStream.writeBytes(Utilities.INDENT); - } - outStream.writeBytes("lastUpdateTime:"); - outStream.writeBytes(unknown ? unknownString : "" + lastUpdateTime); - outStream.write(terminator); - } + for (int k = 0; k < indent; k++) { + outStream.writeBytes(Utilities.INDENT); + } + outStream.writeBytes("lastUpdateTime:"); + outStream.writeBytes(unknown ? unknownString : "" + lastUpdateTime); + outStream.write(terminator); + } - /** - * Show the table partitions. - */ - @Override - public void showTablePartitons(DataOutputStream outStream, List parts) - throws HiveException - { - try { - for (String part : parts) { - // Partition names are URL encoded. We decode the names unless Hive - // is configured to use the encoded names. - SessionState ss = SessionState.get(); - if (ss != null && ss.getConf() != null && - !ss.getConf().getBoolVar(HiveConf.ConfVars.HIVE_DECODE_PARTITION_NAME)) { - outStream.writeBytes(part); - } else { - outStream.writeBytes(FileUtils.unescapePathName(part)); - } - outStream.write(terminator); - } - } catch (IOException e) { - throw new HiveException(e); + /** + * Show the table partitions. + */ + @Override + public void showTablePartitons(DataOutputStream outStream, List parts) + throws HiveException + { + try { + for (String part : parts) { + // Partition names are URL encoded. We decode the names unless Hive + // is configured to use the encoded names. + SessionState ss = SessionState.get(); + if (ss != null && ss.getConf() != null && + !ss.getConf().getBoolVar(HiveConf.ConfVars.HIVE_DECODE_PARTITION_NAME)) { + outStream.writeBytes(part); + } else { + outStream.writeBytes(FileUtils.unescapePathName(part)); } + outStream.write(terminator); + } + } catch (IOException e) { + throw new HiveException(e); } + } - /** - * Show the list of databases - */ - @Override - public void showDatabases(DataOutputStream outStream, List databases) - throws HiveException - { - try { - for (String database : databases) { - // create a row per database name - outStream.writeBytes(database); - outStream.write(terminator); - } - } catch (IOException e) { - throw new HiveException(e); - } + /** + * Show the list of databases + */ + @Override + public void showDatabases(DataOutputStream outStream, List databases) + throws HiveException + { + try { + for (String database : databases) { + // create a row per database name + outStream.writeBytes(database); + outStream.write(terminator); + } + } catch (IOException e) { + throw new HiveException(e); } + } - /** - * Describe a database - */ - @Override - public void showDatabaseDescription(DataOutputStream outStream, String database, String comment, + /** + * Describe a database + */ + @Override + public void showDatabaseDescription(DataOutputStream outStream, String database, String comment, String location, String ownerName, String ownerType, Map params) - throws HiveException { - try { - outStream.writeBytes(database); - outStream.write(separator); - if (comment != null) { - outStream.write(comment.getBytes("UTF-8")); - } - outStream.write(separator); - if (location != null) { - outStream.writeBytes(location); - } - outStream.write(separator); - if (ownerName != null) { - outStream.writeBytes(ownerName); - } - outStream.write(separator); - if (ownerType != null) { - outStream.writeBytes(ownerType); - } - outStream.write(separator); - if (params != null && !params.isEmpty()) { - outStream.writeBytes(params.toString()); - } - outStream.write(terminator); - } catch (IOException e) { - throw new HiveException(e); - } + throws HiveException { + try { + outStream.writeBytes(database); + outStream.write(separator); + if (comment != null) { + outStream.write(comment.getBytes("UTF-8")); + } + outStream.write(separator); + if (location != null) { + outStream.writeBytes(location); + } + outStream.write(separator); + if (ownerName != null) { + outStream.writeBytes(ownerName); + } + outStream.write(separator); + if (ownerType != null) { + outStream.writeBytes(ownerType); + } + outStream.write(separator); + if (params != null && !params.isEmpty()) { + outStream.writeBytes(params.toString()); + } + outStream.write(terminator); + } catch (IOException e) { + throw new HiveException(e); } + } } diff --git a/ql/src/java/org/apache/hadoop/hive/ql/session/SessionState.java b/ql/src/java/org/apache/hadoop/hive/ql/session/SessionState.java index 99b6d77..12af9e7 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/session/SessionState.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/session/SessionState.java @@ -89,6 +89,11 @@ */ protected boolean isVerbose; + /** + * Is the query served from HiveServer2 + */ + private boolean isHiveServerQuery = false; + /* * HiveHistory Object */ @@ -194,6 +199,10 @@ public boolean getIsSilent() { } } + public boolean isHiveServerQuery() { + return this.isHiveServerQuery; + } + public void setIsSilent(boolean isSilent) { if(conf != null) { conf.setBoolVar(HiveConf.ConfVars.HIVESESSIONSILENT, isSilent); @@ -209,6 +218,10 @@ public void setIsVerbose(boolean isVerbose) { this.isVerbose = isVerbose; } + public void setIsHiveServerQuery(boolean isHiveServerQuery) { + this.isHiveServerQuery = isHiveServerQuery; + } + public SessionState(HiveConf conf) { this(conf, null); } @@ -331,7 +344,7 @@ public static SessionState start(SessionState startSs) { throw new RuntimeException(e); } } else { - LOG.info("No Tez session required at this point. hive.execution.engine=mr."); + LOG.info("No Tez session required at this point. hive.execution.engine=mr."); } return startSs; } @@ -390,7 +403,7 @@ private void setupAuth() { if(LOG.isDebugEnabled()){ Object authorizationClass = getAuthorizationMode() == AuthorizationMode.V1 ? getAuthorizer() : getAuthorizerV2(); - LOG.debug("Session is using authorization class " + authorizationClass.getClass()); + LOG.debug("Session is using authorization class " + authorizationClass.getClass()); } return; } diff --git a/service/src/java/org/apache/hive/service/cli/session/HiveSessionImpl.java b/service/src/java/org/apache/hive/service/cli/session/HiveSessionImpl.java index 445c858..2be153f 100644 --- a/service/src/java/org/apache/hive/service/cli/session/HiveSessionImpl.java +++ b/service/src/java/org/apache/hive/service/cli/session/HiveSessionImpl.java @@ -86,6 +86,7 @@ public HiveSessionImpl(TProtocolVersion protocol, String username, String passwo this.password = password; this.sessionHandle = new SessionHandle(protocol); + //set conf properties specified by user from client side if (sessionConf != null) { for (Map.Entry entry : sessionConf.entrySet()) { hiveConf.set(entry.getKey(), entry.getValue()); @@ -99,17 +100,21 @@ public HiveSessionImpl(TProtocolVersion protocol, String username, String passwo FetchFormatter.ThriftFormatter.class.getName()); hiveConf.setInt(ListSinkOperator.OUTPUT_PROTOCOL, protocol.getValue()); sessionState = new SessionState(hiveConf); + sessionState.setIsHiveServerQuery(true); SessionState.start(sessionState); } + @Override public TProtocolVersion getProtocolVersion() { return sessionHandle.getProtocolVersion(); } + @Override public SessionManager getSessionManager() { return sessionManager; } + @Override public void setSessionManager(SessionManager sessionManager) { this.sessionManager = sessionManager; } @@ -118,6 +123,7 @@ private OperationManager getOperationManager() { return operationManager; } + @Override public void setOperationManager(OperationManager operationManager) { this.operationManager = operationManager; } @@ -133,23 +139,28 @@ protected synchronized void release() { // no need to release sessionState... } + @Override public SessionHandle getSessionHandle() { return sessionHandle; } + @Override public String getUsername() { return username; } + @Override public String getPassword() { return password; } + @Override public HiveConf getHiveConf() { hiveConf.setVar(HiveConf.ConfVars.HIVEFETCHOUTPUTSERDE, FETCH_WORK_SERDE_CLASS); return hiveConf; } + @Override public IMetaStoreClient getMetaStoreClient() throws HiveSQLException { if (metastoreClient == null) { try { @@ -161,6 +172,7 @@ public IMetaStoreClient getMetaStoreClient() throws HiveSQLException { return metastoreClient; } + @Override public GetInfoValue getInfo(GetInfoType getInfoType) throws HiveSQLException { acquire(); @@ -187,11 +199,13 @@ public GetInfoValue getInfo(GetInfoType getInfoType) } } + @Override public OperationHandle executeStatement(String statement, Map confOverlay) throws HiveSQLException { return executeStatementInternal(statement, confOverlay, false); } + @Override public OperationHandle executeStatementAsync(String statement, Map confOverlay) throws HiveSQLException { return executeStatementInternal(statement, confOverlay, true); @@ -199,12 +213,12 @@ public OperationHandle executeStatementAsync(String statement, Map confOverlay, boolean runAsync) - throws HiveSQLException { + throws HiveSQLException { acquire(); OperationManager operationManager = getOperationManager(); ExecuteStatementOperation operation = operationManager - .newExecuteStatementOperation(getSession(), statement, confOverlay, runAsync); + .newExecuteStatementOperation(getSession(), statement, confOverlay, runAsync); OperationHandle opHandle = operation.getHandle(); try { operation.run(); @@ -222,6 +236,7 @@ private OperationHandle executeStatementInternal(String statement, Map tableTypes) - throws HiveSQLException { + throws HiveSQLException { acquire(); OperationManager operationManager = getOperationManager(); @@ -301,6 +319,7 @@ public OperationHandle getTables(String catalogName, String schemaName, String t } } + @Override public OperationHandle getTableTypes() throws HiveSQLException { acquire(); @@ -320,6 +339,7 @@ public OperationHandle getTableTypes() } } + @Override public OperationHandle getColumns(String catalogName, String schemaName, String tableName, String columnName) throws HiveSQLException { acquire(); @@ -329,9 +349,9 @@ public OperationHandle getColumns(String catalogName, String schemaName, catalogName, schemaName, tableName, columnName); OperationHandle opHandle = operation.getHandle(); try { - operation.run(); - opHandleSet.add(opHandle); - return opHandle; + operation.run(); + opHandleSet.add(opHandle); + return opHandle; } catch (HiveSQLException e) { operationManager.closeOperation(opHandle); throw e; @@ -340,6 +360,7 @@ public OperationHandle getColumns(String catalogName, String schemaName, } } + @Override public OperationHandle getFunctions(String catalogName, String schemaName, String functionName) throws HiveSQLException { acquire(); @@ -360,6 +381,7 @@ public OperationHandle getFunctions(String catalogName, String schemaName, Strin } } + @Override public void close() throws HiveSQLException { try { acquire(); @@ -388,13 +410,16 @@ public void close() throws HiveSQLException { } } + @Override public SessionState getSessionState() { return sessionState; } + @Override public String getUserName() { return username; } + @Override public void setUserName(String userName) { this.username = userName; }