diff --git a/ql/src/java/org/apache/hadoop/hive/ql/io/parquet/MapredParquetOutputFormat.java b/ql/src/java/org/apache/hadoop/hive/ql/io/parquet/MapredParquetOutputFormat.java index 6e05526..ba235f7 100644 --- a/ql/src/java/org/apache/hadoop/hive/ql/io/parquet/MapredParquetOutputFormat.java +++ b/ql/src/java/org/apache/hadoop/hive/ql/io/parquet/MapredParquetOutputFormat.java @@ -14,8 +14,8 @@ package org.apache.hadoop.hive.ql.io.parquet; import java.io.IOException; -import java.util.ArrayList; import java.util.Arrays; +import java.util.Collections; import java.util.List; import java.util.Properties; @@ -101,23 +101,20 @@ public void checkOutputSpecs(final FileSystem ignored, final JobConf job) throws final Properties tableProperties, final Progressable progress) throws IOException { - LOG.info("creating new record writer..." + this); + LOG.info("Creating new record writer: {}", this); final String columnNameProperty = tableProperties.getProperty(IOConstants.COLUMNS); final String columnTypeProperty = tableProperties.getProperty(IOConstants.COLUMNS_TYPES); - List columnNames; - List columnTypes; + List columnNames = Collections.emptyList(); + List columnTypes = Collections.emptyList(); final String columnNameDelimiter = tableProperties.containsKey(serdeConstants.COLUMN_NAME_DELIMITER) ? tableProperties .getProperty(serdeConstants.COLUMN_NAME_DELIMITER) : String.valueOf(SerDeUtils.COMMA); - if (columnNameProperty.length() == 0) { - columnNames = new ArrayList(); - } else { + + if (!columnNameProperty.isEmpty()) { columnNames = Arrays.asList(columnNameProperty.split(columnNameDelimiter)); } - if (columnTypeProperty.length() == 0) { - columnTypes = new ArrayList(); - } else { + if (!columnTypeProperty.isEmpty()) { columnTypes = TypeInfoUtils.getTypeInfosFromTypeString(columnTypeProperty); }