diff --git ql/src/java/org/apache/hadoop/hive/ql/metadata/CheckJDOException.java ql/src/java/org/apache/hadoop/hive/ql/metadata/CheckJDOException.java new file mode 100644 index 0000000..a6360d4 --- /dev/null +++ ql/src/java/org/apache/hadoop/hive/ql/metadata/CheckJDOException.java @@ -0,0 +1,31 @@ +/** + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.hadoop.hive.ql.metadata; + +import javax.jdo.JDODataStoreException; + +/** + * Check if this is a javax.jdo.JDODataStoreException + */ +public class CheckJDOException { + + public static boolean isJDODataStoreException(Exception e) { + return (e instanceof JDODataStoreException); + } + +} diff --git ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java index b900627..e3efc81 100644 --- ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java +++ ql/src/java/org/apache/hadoop/hive/ql/metadata/Hive.java @@ -115,6 +115,7 @@ import com.google.common.collect.Sets; + /** * This class has functions that implement meta data/DDL operations using calls * to the metastore. @@ -1732,31 +1733,29 @@ public Partition getPartition(Table tbl, Map partSpec, if (tpart == null) { LOG.debug("creating partition for table " + tbl.getTableName() + " with partition spec : " + partSpec); - tpart = getMSC().appendPartition(tbl.getDbName(), tbl.getTableName(), pvals); + try { + tpart = getMSC().appendPartition(tbl.getDbName(), tbl.getTableName(), pvals); + } catch (AlreadyExistsException aee) { + LOG.debug("Caught already exists exception, trying to alter partition instead"); + tpart = getMSC().getPartitionWithAuthInfo(tbl.getDbName(), + tbl.getTableName(), pvals, getUserName(), getGroupNames()); + alterPartitionSpec(tbl, partSpec, tpart, inheritTableSpecs, partPath); + } catch (Exception e) { + if (CheckJDOException.isJDODataStoreException(e)) { + // Using utility method above, so that JDODataStoreException doesn't + // have to be used here. This helps avoid adding jdo dependency for + // hcatalog client uses + LOG.debug("Caught JDO exception, trying to alter partition instead"); + tpart = getMSC().getPartitionWithAuthInfo(tbl.getDbName(), + tbl.getTableName(), pvals, getUserName(), getGroupNames()); + alterPartitionSpec(tbl, partSpec, tpart, inheritTableSpecs, partPath); + } else { + throw e; + } + } } else { - LOG.debug("altering partition for table " + tbl.getTableName() - + " with partition spec : " + partSpec); - if (inheritTableSpecs) { - tpart.getSd().setOutputFormat(tbl.getTTable().getSd().getOutputFormat()); - tpart.getSd().setInputFormat(tbl.getTTable().getSd().getInputFormat()); - tpart.getSd().getSerdeInfo().setSerializationLib(tbl.getSerializationLib()); - tpart.getSd().getSerdeInfo().setParameters( - tbl.getTTable().getSd().getSerdeInfo().getParameters()); - tpart.getSd().setBucketCols(tbl.getBucketCols()); - tpart.getSd().setNumBuckets(tbl.getNumBuckets()); - tpart.getSd().setSortCols(tbl.getSortCols()); - } - if (partPath == null || partPath.trim().equals("")) { - throw new HiveException("new partition path should not be null or empty."); - } - tpart.getSd().setLocation(partPath); - tpart.getParameters().put(StatsSetupConst.STATS_GENERATED_VIA_STATS_TASK,"true"); - String fullName = tbl.getTableName(); - if (!org.apache.commons.lang.StringUtils.isEmpty(tbl.getDbName())) { - fullName = tbl.getDbName() + "." + tbl.getTableName(); - } - alterPartition(fullName, new Partition(tbl, tpart)); + alterPartitionSpec(tbl, partSpec, tpart, inheritTableSpecs, partPath); } } if (tpart == null) { @@ -1769,6 +1768,35 @@ public Partition getPartition(Table tbl, Map partSpec, return new Partition(tbl, tpart); } + private void alterPartitionSpec(Table tbl, + Map partSpec, + org.apache.hadoop.hive.metastore.api.Partition tpart, + boolean inheritTableSpecs, + String partPath) throws HiveException, InvalidOperationException { + LOG.debug("altering partition for table " + tbl.getTableName() + " with partition spec : " + + partSpec); + if (inheritTableSpecs) { + tpart.getSd().setOutputFormat(tbl.getTTable().getSd().getOutputFormat()); + tpart.getSd().setInputFormat(tbl.getTTable().getSd().getInputFormat()); + tpart.getSd().getSerdeInfo().setSerializationLib(tbl.getSerializationLib()); + tpart.getSd().getSerdeInfo().setParameters( + tbl.getTTable().getSd().getSerdeInfo().getParameters()); + tpart.getSd().setBucketCols(tbl.getBucketCols()); + tpart.getSd().setNumBuckets(tbl.getNumBuckets()); + tpart.getSd().setSortCols(tbl.getSortCols()); + } + if (partPath == null || partPath.trim().equals("")) { + throw new HiveException("new partition path should not be null or empty."); + } + tpart.getSd().setLocation(partPath); + tpart.getParameters().put(StatsSetupConst.STATS_GENERATED_VIA_STATS_TASK,"true"); + String fullName = tbl.getTableName(); + if (!org.apache.commons.lang.StringUtils.isEmpty(tbl.getDbName())) { + fullName = tbl.getDbName() + "." + tbl.getTableName(); + } + alterPartition(fullName, new Partition(tbl, tpart)); + } + public boolean dropPartition(String tblName, List part_vals, boolean deleteData) throws HiveException { String[] names = Utilities.getDbTableName(tblName);