Index: src/java/org/apache/hcatalog/mapreduce/InitializeInput.java IDEA additional info: Subsystem: com.intellij.openapi.diff.impl.patch.CharsetEP <+>UTF-8 =================================================================== --- src/java/org/apache/hcatalog/mapreduce/InitializeInput.java (revision 1360982) +++ src/java/org/apache/hcatalog/mapreduce/InitializeInput.java (revision ) @@ -72,56 +72,51 @@ HiveMetaStoreClient client = null; HiveConf hiveConf = null; - try { - if (job != null){ + if (job != null) { - hiveConf = HCatUtil.getHiveConf(job.getConfiguration()); - } else { - hiveConf = new HiveConf(HCatInputFormat.class); - } + hiveConf = HCatUtil.getHiveConf(job.getConfiguration()); + } else { + hiveConf = new HiveConf(HCatInputFormat.class); + } - client = HCatUtil.createHiveClient(hiveConf); + client = HCatUtil.getHiveClient(hiveConf); - Table table = client.getTable(inputJobInfo.getDatabaseName(), - inputJobInfo.getTableName()); + Table table = client.getTable(inputJobInfo.getDatabaseName(), + inputJobInfo.getTableName()); - List partInfoList = new ArrayList(); + List partInfoList = new ArrayList(); - inputJobInfo.setTableInfo(HCatTableInfo.valueOf(table)); + inputJobInfo.setTableInfo(HCatTableInfo.valueOf(table)); - if( table.getPartitionKeys().size() != 0 ) { + if (table.getPartitionKeys().size() != 0) { - //Partitioned table - List parts = client.listPartitionsByFilter(inputJobInfo.getDatabaseName(), - inputJobInfo.getTableName(), - inputJobInfo.getFilter(), - (short) -1); + //Partitioned table + List parts = client.listPartitionsByFilter(inputJobInfo.getDatabaseName(), + inputJobInfo.getTableName(), + inputJobInfo.getFilter(), + (short) -1); - // Default to 100,000 partitions if hive.metastore.maxpartition is not defined - int maxPart = hiveConf.getInt("hcat.metastore.maxpartitions", 100000); - if (parts != null && parts.size() > maxPart) { - throw new HCatException(ErrorType.ERROR_EXCEED_MAXPART, "total number of partitions is " + parts.size()); - } + // Default to 100,000 partitions if hive.metastore.maxpartition is not defined + int maxPart = hiveConf.getInt("hcat.metastore.maxpartitions", 100000); + if (parts != null && parts.size() > maxPart) { + throw new HCatException(ErrorType.ERROR_EXCEED_MAXPART, "total number of partitions is " + parts.size()); + } - // populate partition info + // populate partition info - for (Partition ptn : parts){ + for (Partition ptn : parts) { - PartInfo partInfo = extractPartInfo(ptn.getSd(),ptn.getParameters(), + PartInfo partInfo = extractPartInfo(ptn.getSd(), ptn.getParameters(), - job.getConfiguration(), - inputJobInfo); - partInfo.setPartitionValues(createPtnKeyValueMap(table, ptn)); - partInfoList.add(partInfo); - } + job.getConfiguration(), + inputJobInfo); + partInfo.setPartitionValues(createPtnKeyValueMap(table, ptn)); + partInfoList.add(partInfo); + } - }else{ + } else { - //Non partitioned table + //Non partitioned table - PartInfo partInfo = extractPartInfo(table.getSd(),table.getParameters(), + PartInfo partInfo = extractPartInfo(table.getSd(), table.getParameters(), - job.getConfiguration(), - inputJobInfo); + job.getConfiguration(), + inputJobInfo); - partInfo.setPartitionValues(new HashMap()); + partInfo.setPartitionValues(new HashMap()); - partInfoList.add(partInfo); - } - inputJobInfo.setPartitions(partInfoList); + partInfoList.add(partInfo); + } + inputJobInfo.setPartitions(partInfoList); - return HCatUtil.serialize(inputJobInfo); + return HCatUtil.serialize(inputJobInfo); - } finally { - HCatUtil.closeHiveClientQuietly(client); - } - } private static Map createPtnKeyValueMap(Table table, Partition ptn) throws IOException{ Index: src/java/org/apache/hcatalog/mapreduce/DefaultOutputCommitterContainer.java IDEA additional info: Subsystem: com.intellij.openapi.diff.impl.patch.CharsetEP <+>UTF-8 =================================================================== --- src/java/org/apache/hcatalog/mapreduce/DefaultOutputCommitterContainer.java (revision 1360982) +++ src/java/org/apache/hcatalog/mapreduce/DefaultOutputCommitterContainer.java (revision ) @@ -18,8 +18,6 @@ package org.apache.hcatalog.mapreduce; -import java.io.IOException; - import org.apache.hadoop.hive.conf.HiveConf; import org.apache.hadoop.hive.metastore.HiveMetaStoreClient; import org.apache.hadoop.mapred.HCatMapRedUtil; @@ -31,6 +29,8 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import java.io.IOException; + /** * Part of the DefaultOutput*Container classes * See {@link DefaultOutputFormatContainer} for more information @@ -93,15 +93,13 @@ HiveMetaStoreClient client = null; try { HiveConf hiveConf = HCatUtil.getHiveConf(context.getConfiguration()); - client = HCatUtil.createHiveClient(hiveConf); + client = HCatUtil.getHiveClient(hiveConf); String tokenStrForm = client.getTokenStrForm(); if(tokenStrForm != null && context.getConfiguration().get(HCatConstants.HCAT_KEY_TOKEN_SIGNATURE) != null) { client.cancelDelegationToken(tokenStrForm); } } catch (Exception e) { LOG.warn("Failed to cancel delegation token", e); - } finally { - HCatUtil.closeHiveClientQuietly(client); } } } Index: src/java/org/apache/hcatalog/common/cache/HiveClientCache.java IDEA additional info: Subsystem: com.intellij.openapi.diff.impl.patch.CharsetEP <+>UTF-8 =================================================================== --- src/java/org/apache/hcatalog/common/cache/HiveClientCache.java (revision ) +++ src/java/org/apache/hcatalog/common/cache/HiveClientCache.java (revision ) @@ -0,0 +1,149 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hcatalog.common.cache; + +import com.google.common.cache.Cache; +import com.google.common.cache.CacheBuilder; +import org.apache.commons.lang.builder.EqualsBuilder; +import org.apache.commons.lang.builder.HashCodeBuilder; +import org.apache.hadoop.hive.conf.HiveConf; +import org.apache.hadoop.hive.metastore.HiveMetaStoreClient; +import org.apache.hadoop.hive.metastore.api.MetaException; +import org.apache.hadoop.hive.shims.ShimLoader; +import org.apache.hadoop.security.UserGroupInformation; +import org.apache.hcatalog.common.HCatUtil; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import javax.security.auth.login.LoginException; +import java.io.IOException; +import java.util.concurrent.Callable; +import java.util.concurrent.ExecutionException; +import java.util.concurrent.TimeUnit; + +/** + * A time expired cache for HiveMetaStoreClient + */ +public class HiveClientCache { + final private Cache hiveCache; + private static final Logger LOG = LoggerFactory.getLogger(HCatUtil.class); + + /** + * @param timeout the length of time in seconds after a client is created that it should be automatically removed + */ + public HiveClientCache(final int timeout) { + hiveCache = CacheBuilder.newBuilder(). + expireAfterWrite(timeout, TimeUnit.SECONDS) + .build(); + } + + public void cleanup() { + hiveCache.cleanUp(); + } + + /** + * Returns a cached client if exists or else creates one, caches and returns it + * @param hiveConf + * @return the hive client + * @throws MetaException + * @throws IOException + * @throws LoginException + */ + public HiveMetaStoreClient get(final HiveConf hiveConf) throws MetaException, IOException, LoginException { + try { + final HiveClientCacheKey cacheKey = HiveClientCacheKey.fromHiveConf(hiveConf); + return hiveCache.get(cacheKey, new Callable() { + @Override + public FinalizableHiveMetaStoreClient call() throws MetaException { + return new FinalizableHiveMetaStoreClient(cacheKey.getHiveConf()); + } + }); + } catch (ExecutionException e) { + throw new MetaException(e.getMessage()); + } + } + + /** + * A class to wrap HiveConf and expose equality based only on UserGroupInformation and the metaStoreURIs. + * This becomes the key for the cache and this way the same HiveMetaStoreClient would be returned if + * UserGroupInformation and metaStoreURIs are same. This function can evolve to express + * the cases when HiveConf is different but the same hiveMetaStoreClient can be used + */ + public static class HiveClientCacheKey { + final private String metaStoreURIs; + final private UserGroupInformation ugi; + final private HiveConf hiveConf; + + private HiveClientCacheKey(HiveConf hiveConf) throws IOException, LoginException { + this.metaStoreURIs = hiveConf.getVar(HiveConf.ConfVars.METASTOREURIS); + ugi = ShimLoader.getHadoopShims().getUGIForConf(hiveConf); + this.hiveConf = hiveConf; + } + + public static HiveClientCacheKey fromHiveConf(HiveConf hiveConf) throws IOException, LoginException { + return new HiveClientCacheKey(hiveConf); + } + + public HiveConf getHiveConf() { + return hiveConf; + } + + @Override + public boolean equals(Object o) { + if (this == o) return true; + if (o == null || getClass() != o.getClass()) return false; + HiveClientCacheKey that = (HiveClientCacheKey) o; + return new EqualsBuilder(). + append(this.metaStoreURIs, + that.metaStoreURIs). + append(this.ugi, that.ugi).isEquals(); + } + + @Override + public int hashCode() { + return new HashCodeBuilder(). + append(metaStoreURIs). + append(ugi).toHashCode(); + } + } + + /** + * Add finalze() to HiveMetaStoreClient as a safe-guard so it gets torn-down when GC calls finalize, whenever be it + */ + public static class FinalizableHiveMetaStoreClient extends HiveMetaStoreClient { + + public FinalizableHiveMetaStoreClient(final HiveConf conf) throws MetaException { + super(conf); + } + + /** + * HiveMetaStoreClient doesn't have a finalizer, we want keep reusing the same client and close only + * in the finalizer + */ + @Override + protected void finalize() throws Throwable { + try { + super.close(); + } catch(Exception e) { + LOG.debug("Error closing metastore client. Ignored", e); + } finally { + super.finalize(); + } + } + } +} Index: src/java/org/apache/hcatalog/common/HCatUtil.java IDEA additional info: Subsystem: com.intellij.openapi.diff.impl.patch.CharsetEP <+>UTF-8 =================================================================== --- src/java/org/apache/hcatalog/common/HCatUtil.java (revision 1360982) +++ src/java/org/apache/hcatalog/common/HCatUtil.java (revision ) @@ -53,6 +53,8 @@ import org.apache.hadoop.security.token.Token; import org.apache.hadoop.security.token.delegation.AbstractDelegationTokenIdentifier; import org.apache.hadoop.util.ReflectionUtils; +import org.apache.hcatalog.common.cache.CacheFactory; +import org.apache.hcatalog.common.cache.HiveClientCache; import org.apache.hcatalog.data.Pair; import org.apache.hcatalog.data.schema.HCatFieldSchema; import org.apache.hcatalog.data.schema.HCatSchema; @@ -68,9 +70,12 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; +import javax.security.auth.login.LoginException; + public class HCatUtil { private static final Logger LOG = LoggerFactory.getLogger(HCatUtil.class); + private static final HiveClientCache hiveCache = CacheFactory.createHiveClientCache(); public static boolean checkJobContextIfRunningFromBackend(JobContext j) { if (j.getConfiguration().get("mapred.task.id", "").equals("")) { @@ -515,20 +520,24 @@ } } - public static HiveMetaStoreClient createHiveClient(HiveConf hiveConf) + /** + * Get or create a hive client depedning on whether it exits in cache or not + * @param hiveConf The hive configuration + * @return the client + * @throws MetaException + */ + public static HiveMetaStoreClient getHiveClient(HiveConf hiveConf) throws MetaException { - return new HiveMetaStoreClient(hiveConf); - } - - public static void closeHiveClientQuietly(HiveMetaStoreClient client) { try { - if (client != null) - client.close(); - } catch (Exception e) { - LOG.debug("Error closing metastore client", e); + return hiveCache.get(hiveConf); + } catch (IOException e) { + LOG.debug("Couldn't create hiveClient. IOException", e); + throw new MetaException("Coulnd't create hiveClient. Error(IOException) getting UGI for user. " + e.getMessage()); + } catch (LoginException e) { + LOG.debug("Couldn't create hiveClient. LoginException", e); + throw new MetaException("Coulnd't create hiveClient. Error(LoginException) getting UGI for user. " + e.getMessage()); } } - public static HiveConf getHiveConf(Configuration conf) throws IOException { Index: src/java/org/apache/hcatalog/pig/PigHCatUtil.java IDEA additional info: Subsystem: com.intellij.openapi.diff.impl.patch.CharsetEP <+>UTF-8 =================================================================== --- src/java/org/apache/hcatalog/pig/PigHCatUtil.java (revision 1360982) +++ src/java/org/apache/hcatalog/pig/PigHCatUtil.java (revision ) @@ -105,7 +105,7 @@ } try { - return new HiveMetaStoreClient(hiveConf,null); + return HCatUtil.getHiveClient(hiveConf); } catch (Exception e){ throw new Exception("Could not instantiate a HiveMetaStoreClient connecting to server uri:["+serverUri+"]",e); } @@ -147,8 +147,6 @@ throw new PigException("Table not found : " + nsoe.getMessage(), PIG_EXCEPTION_CODE); // prettier error messages to frontend } catch (Exception e) { throw new IOException(e); - } finally { - HCatUtil.closeHiveClientQuietly(client); } hcatTableCache.put(loc_server, table); return table; Index: src/java/org/apache/hcatalog/common/cache/CacheFactory.java IDEA additional info: Subsystem: com.intellij.openapi.diff.impl.patch.CharsetEP <+>UTF-8 =================================================================== --- src/java/org/apache/hcatalog/common/cache/CacheFactory.java (revision ) +++ src/java/org/apache/hcatalog/common/cache/CacheFactory.java (revision ) @@ -0,0 +1,33 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hcatalog.common.cache; + +import org.apache.hadoop.hive.conf.HiveConf; + +/** + * A factory class for exposing different caching services + */ +public class CacheFactory { + // The configuration key should probably go in a class dealing with hive configuration + public static final String HCATALOG_HIVE_CLIENT_EXPIRY_TIME = "hcatalog.hive.client.expiry.time"; + + public static HiveClientCache createHiveClientCache() { + HiveConf hiveConf = new HiveConf(CacheFactory.class); + return new HiveClientCache(hiveConf.getInt(HCATALOG_HIVE_CLIENT_EXPIRY_TIME, 10*60)); + } +} Index: src/java/org/apache/hcatalog/mapreduce/FileOutputFormatContainer.java IDEA additional info: Subsystem: com.intellij.openapi.diff.impl.patch.CharsetEP <+>UTF-8 =================================================================== --- src/java/org/apache/hcatalog/mapreduce/FileOutputFormatContainer.java (revision 1360982) +++ src/java/org/apache/hcatalog/mapreduce/FileOutputFormatContainer.java (revision ) @@ -116,7 +116,7 @@ HiveMetaStoreClient client = null; try { HiveConf hiveConf = HCatUtil.getHiveConf(context.getConfiguration()); - client = HCatUtil.createHiveClient(hiveConf); + client = HCatUtil.getHiveClient(hiveConf); handleDuplicatePublish(context, jobInfo, client, @@ -127,8 +127,6 @@ throw new IOException(e); } catch (NoSuchObjectException e) { throw new IOException(e); - } finally { - HCatUtil.closeHiveClientQuietly(client); } if(!jobInfo.isDynamicPartitioningUsed()) { Index: src/java/org/apache/hcatalog/mapreduce/FileOutputCommitterContainer.java IDEA additional info: Subsystem: com.intellij.openapi.diff.impl.patch.CharsetEP <+>UTF-8 =================================================================== --- src/java/org/apache/hcatalog/mapreduce/FileOutputCommitterContainer.java (revision 1360982) +++ src/java/org/apache/hcatalog/mapreduce/FileOutputCommitterContainer.java (revision ) @@ -158,7 +158,7 @@ HiveMetaStoreClient client = null; try { HiveConf hiveConf = HCatUtil.getHiveConf(jobContext.getConfiguration()); - client = HCatUtil.createHiveClient(hiveConf); + client = HCatUtil.getHiveClient(hiveConf); // cancel the deleg. tokens that were acquired for this job now that // we are done - we should cancel if the tokens were acquired by // HCatOutputFormat and not if they were supplied by Oozie. @@ -175,8 +175,6 @@ } else { throw new HCatException(ErrorType.ERROR_PUBLISHING_PARTITION, e); } - } finally { - HCatUtil.closeHiveClientQuietly(client); } Path src; @@ -273,7 +271,7 @@ try { HiveConf hiveConf = HCatUtil.getHiveConf(conf); - client = HCatUtil.createHiveClient(hiveConf); + client = HCatUtil.getHiveClient(hiveConf); StorerInfo storer = InternalUtil.extractStorerInfo(table.getSd(),table.getParameters()); @@ -380,8 +378,6 @@ } else { throw new HCatException(ErrorType.ERROR_PUBLISHING_PARTITION, e); } - } finally { - HCatUtil.closeHiveClientQuietly(client); } } Index: src/test/org/apache/hcatalog/common/TestHiveClientCache.java IDEA additional info: Subsystem: com.intellij.openapi.diff.impl.patch.CharsetEP <+>UTF-8 =================================================================== --- src/test/org/apache/hcatalog/common/TestHiveClientCache.java (revision ) +++ src/test/org/apache/hcatalog/common/TestHiveClientCache.java (revision ) @@ -0,0 +1,82 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hcatalog.common; + +import junit.framework.TestCase; +import org.apache.hadoop.hive.conf.HiveConf; +import org.apache.hadoop.hive.metastore.HiveMetaStoreClient; +import org.apache.hadoop.hive.metastore.api.MetaException; +import org.apache.hcatalog.common.cache.CacheFactory; +import org.apache.hcatalog.common.cache.HiveClientCache; +import org.slf4j.Logger; +import org.slf4j.LoggerFactory; + +import javax.security.auth.login.LoginException; +import java.io.IOException; + +public class TestHiveClientCache extends TestCase { + + private static final Logger LOG = LoggerFactory.getLogger(TestHiveClientCache.class); + HiveConf hiveConf; + + @Override + protected void setUp() throws Exception { + super.setUp(); + hiveConf = new HiveConf(this.getClass()); + } + + public void testCacheHit() throws IOException, MetaException, LoginException { + HiveClientCache cache = new HiveClientCache(1000); + HiveMetaStoreClient client = cache.get(hiveConf); + assertNotNull(client); + + // Setting a non important configuration should return the same client only + hiveConf.setIntVar(HiveConf.ConfVars.DYNAMICPARTITIONMAXPARTS, 10); + HiveMetaStoreClient client2 = cache.get(hiveConf); + assertNotNull(client2); + assertEquals(client, client2); + } + + public void testCacheMiss() throws IOException, MetaException, LoginException { + HiveClientCache cache = new HiveClientCache(1000); + HiveMetaStoreClient client = cache.get(hiveConf); + assertNotNull(client); + + // Set different uri as it is one of the criteria deciding whether to return the same client or not + hiveConf.setVar(HiveConf.ConfVars.METASTOREURIS, "http://yahoo.com"); + HiveMetaStoreClient client2 = cache.get(hiveConf); + assertNotNull(client2); + assertNotSame(client, client2); + } + + /** + * Check that a new client is returned for the same configuration after the expiry time. + * Also verify that the expiry time configuration is honoured + */ + public void testCacheExpiry() throws IOException, MetaException, LoginException, InterruptedException { + HiveClientCache cache = new HiveClientCache(1); + HiveMetaStoreClient client = cache.get(hiveConf); + assertNotNull(client); + + Thread.sleep(2000); + HiveMetaStoreClient client2 = cache.get(hiveConf); + assertNotNull(client2); + + assertNotSame(client, client2); + } +} Index: ivy.xml IDEA additional info: Subsystem: com.intellij.openapi.diff.impl.patch.CharsetEP <+>UTF-8 =================================================================== --- ivy.xml (revision 1355186) +++ ivy.xml (revision ) @@ -90,6 +90,8 @@ conf="common->master"/> + UTF-8 =================================================================== --- src/java/org/apache/hcatalog/mapreduce/HCatOutputFormat.java (revision 1360982) +++ src/java/org/apache/hcatalog/mapreduce/HCatOutputFormat.java (revision ) @@ -18,24 +18,15 @@ package org.apache.hcatalog.mapreduce; -import java.io.IOException; -import java.util.ArrayList; -import java.util.HashMap; -import java.util.List; -import java.util.Map; -import java.util.Properties; - import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.Path; import org.apache.hadoop.fs.permission.FsPermission; import org.apache.hadoop.hive.conf.HiveConf; -import org.apache.hadoop.hive.conf.HiveConf.ConfVars; import org.apache.hadoop.hive.metastore.HiveMetaStoreClient; import org.apache.hadoop.hive.metastore.api.FieldSchema; import org.apache.hadoop.hive.metastore.api.Index; -import org.apache.hadoop.hive.metastore.api.MetaException; import org.apache.hadoop.hive.metastore.api.StorageDescriptor; import org.apache.hadoop.hive.metastore.api.Table; import org.apache.hadoop.io.WritableComparable; @@ -43,7 +34,6 @@ import org.apache.hadoop.mapreduce.OutputCommitter; import org.apache.hadoop.mapreduce.RecordWriter; import org.apache.hadoop.mapreduce.TaskAttemptContext; -import org.apache.hadoop.security.UserGroupInformation; import org.apache.hcatalog.common.ErrorType; import org.apache.hcatalog.common.HCatConstants; import org.apache.hcatalog.common.HCatException; @@ -51,6 +41,12 @@ import org.apache.hcatalog.data.HCatRecord; import org.apache.hcatalog.data.schema.HCatSchema; +import java.io.IOException; +import java.util.ArrayList; +import java.util.HashMap; +import java.util.List; +import java.util.Map; + /** The OutputFormat to use to write data to HCatalog. The key value is ignored and * should be given as null. The value is the HCatRecord to write.*/ public class HCatOutputFormat extends HCatBaseOutputFormat { @@ -76,7 +72,7 @@ Configuration conf = job.getConfiguration(); HiveConf hiveConf = HCatUtil.getHiveConf(conf); - client = HCatUtil.createHiveClient(hiveConf); + client = HCatUtil.getHiveClient(hiveConf); Table table = client.getTable(outputJobInfo.getDatabaseName(), outputJobInfo.getTableName()); List indexList = client.listIndexNames(outputJobInfo.getDatabaseName(), outputJobInfo.getTableName(), Short.MAX_VALUE); @@ -195,8 +191,6 @@ } else { throw new HCatException(ErrorType.ERROR_SET_OUTPUT, e); } - } finally { - HCatUtil.closeHiveClientQuietly(client); } }