diff --git a/ql/src/java/org/apache/hadoop/hive/ql/io/parquet/MapredParquetOutputFormat.java b/ql/src/java/org/apache/hadoop/hive/ql/io/parquet/MapredParquetOutputFormat.java index 6e05526..a3af728 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/io/parquet/MapredParquetOutputFormat.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/io/parquet/MapredParquetOutputFormat.java @@ -14,8 +14,8 @@ package org.apache.hadoop.hive.ql.io.parquet; import java.io.IOException; -import java.util.ArrayList; import java.util.Arrays; +import java.util.Collections; import java.util.List; import java.util.Properties; @@ -101,7 +101,7 @@ public void checkOutputSpecs(final FileSystem ignored, final JobConf job) throws final Properties tableProperties, final Progressable progress) throws IOException { - LOG.info("creating new record writer..." + this); + LOG.info("Creating new record writer: {}", this); final String columnNameProperty = tableProperties.getProperty(IOConstants.COLUMNS); final String columnTypeProperty = tableProperties.getProperty(IOConstants.COLUMNS_TYPES); @@ -109,14 +109,14 @@ public void checkOutputSpecs(final FileSystem ignored, final JobConf job) throws List columnTypes; final String columnNameDelimiter = tableProperties.containsKey(serdeConstants.COLUMN_NAME_DELIMITER) ? tableProperties .getProperty(serdeConstants.COLUMN_NAME_DELIMITER) : String.valueOf(SerDeUtils.COMMA); - if (columnNameProperty.length() == 0) { - columnNames = new ArrayList(); + if (columnNameProperty.isEmpty()) { + columnNames = Collections.emptyList(); } else { columnNames = Arrays.asList(columnNameProperty.split(columnNameDelimiter)); } - if (columnTypeProperty.length() == 0) { - columnTypes = new ArrayList(); + if (columnTypeProperty.isEmpty()) { + columnTypes = Collections.emptyList(); } else { columnTypes = TypeInfoUtils.getTypeInfosFromTypeString(columnTypeProperty); }