diff --git ql/src/java/org/apache/hadoop/hive/ql/ErrorMsg.java ql/src/java/org/apache/hadoop/hive/ql/ErrorMsg.java index b62df35..8abcefb 100644 --- ql/src/java/org/apache/hadoop/hive/ql/ErrorMsg.java +++ ql/src/java/org/apache/hadoop/hive/ql/ErrorMsg.java @@ -332,8 +332,8 @@ + "fails to construct aggregation for the partition "), ANALYZE_TABLE_PARTIALSCAN_AUTOGATHER(10233, "Analyze partialscan is not allowed " + "if hive.stats.autogather is set to false"), - PARTITION_VALUE_NOT_CONTINUOUS(10234, "Parition values specifed are not continuous." + - " A subpartition value is specified without specififying the parent partition's value"), + PARTITION_VALUE_NOT_CONTINUOUS(10234, "Partition values specified are not continuous." + + " A subpartition value is specified without specifying the parent partition's value"), TABLES_INCOMPATIBLE_SCHEMAS(10235, "Tables have incompatible schemas and their partitions " + " cannot be exchanged."), @@ -440,7 +440,7 @@ CANNOT_DROP_INDEX(10317, "Error while dropping index"), INVALID_AST_TREE(10318, "Internal error : Invalid AST"), ERROR_SERIALIZE_METASTORE(10319, "Error while serializing the metastore objects"), - IO_ERROR(10320, "Error while peforming IO operation "), + IO_ERROR(10320, "Error while performing IO operation "), ERROR_SERIALIZE_METADATA(10321, "Error while serializing the metadata"), INVALID_LOAD_TABLE_FILE_WORK(10322, "Invalid Load Table Work or Load File Work"), CLASSPATH_ERROR(10323, "Classpath error"), @@ -457,8 +457,8 @@ MATERIALIZED_VIEW_DEF_EMPTY(10403, "Query for the materialized view rebuild could not be retrieved"), MERGE_PREDIACTE_REQUIRED(10404, "MERGE statement with both UPDATE and DELETE clauses " + "requires \"AND \" on the 1st WHEN MATCHED clause of <{0}>", true), - MERGE_TOO_MANY_DELETE(10405, "MERGE statment can have at most 1 WHEN MATCHED ... DELETE clause: <{0}>", true), - MERGE_TOO_MANY_UPDATE(10406, "MERGE statment can have at most 1 WHEN MATCHED ... UPDATE clause: <{0}>", true), + MERGE_TOO_MANY_DELETE(10405, "MERGE statement can have at most 1 WHEN MATCHED ... DELETE clause: <{0}>", true), + MERGE_TOO_MANY_UPDATE(10406, "MERGE statement can have at most 1 WHEN MATCHED ... UPDATE clause: <{0}>", true), INVALID_JOIN_CONDITION(10407, "Complex condition not supported for (LEFT|RIGHT|FULL) OUTER JOIN"), //========================== 20000 range starts here ========================// SCRIPT_INIT_ERROR(20000, "Unable to initialize custom script."), diff --git ql/src/java/org/apache/hadoop/hive/ql/exec/ArchiveUtils.java ql/src/java/org/apache/hadoop/hive/ql/exec/ArchiveUtils.java index be38b9a..6381a21 100644 --- ql/src/java/org/apache/hadoop/hive/ql/exec/ArchiveUtils.java +++ ql/src/java/org/apache/hadoop/hive/ql/exec/ArchiveUtils.java @@ -87,7 +87,7 @@ static public PartSpecInfo create(Table tbl, Map partSpec) } if (!itrPsKeys.next().toLowerCase().equals( fs.getName().toLowerCase())) { - throw new HiveException("Invalid partition specifiation: " + throw new HiveException("Invalid partition specification: " + partSpec); } prefixFields.add(fs); diff --git ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java index 0ac9053..d44a2f7 100644 --- ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java +++ ql/src/java/org/apache/hadoop/hive/ql/exec/DDLTask.java @@ -990,7 +990,7 @@ private int alterIndex(Hive db, AlterIndexDesc alterIndex) throws HiveException break; default: - console.printError("Unsupported Alter commnad"); + console.printError("Unsupported Alter command"); return 1; } @@ -1689,7 +1689,7 @@ private int unarchive(Hive db, AlterTableSimpleDesc simpleDesc) if (ret != 0) { throw new HiveException("Error while copying files from archive, return code=" + ret); } else { - console.printInfo("Succefully Copied " + copySource + " to " + copyDest); + console.printInfo("Successfully Copied " + copySource + " to " + copyDest); } console.printInfo("Moving " + tmpPath + " to " + intermediateExtractedDir); @@ -2630,7 +2630,7 @@ public static void dumpLockInfo(DataOutputStream os, ShowLocksResponse rsp) thro os.write(separator); os.writeBytes("Transaction ID"); os.write(separator); - os.writeBytes("Last Hearbeat"); + os.writeBytes("Last Heartbeat"); os.write(separator); os.writeBytes("Acquired At"); os.write(separator); diff --git ql/src/java/org/apache/hadoop/hive/ql/exec/FileSinkOperator.java ql/src/java/org/apache/hadoop/hive/ql/exec/FileSinkOperator.java index 54d619c..8bc424c 100644 --- ql/src/java/org/apache/hadoop/hive/ql/exec/FileSinkOperator.java +++ ql/src/java/org/apache/hadoop/hive/ql/exec/FileSinkOperator.java @@ -490,7 +490,7 @@ private void dpSetup() { this.maxPartitions = dpCtx.getMaxPartitionsPerNode(); assert numDynParts == dpColNames.size() - : "number of dynamic paritions should be the same as the size of DP mapping"; + : "number of dynamic partitions should be the same as the size of DP mapping"; if (dpColNames != null && dpColNames.size() > 0) { this.bDynParts = true; diff --git ql/src/java/org/apache/hadoop/hive/ql/exec/ReduceSinkOperator.java ql/src/java/org/apache/hadoop/hive/ql/exec/ReduceSinkOperator.java index 4eea6b9..9f8acc9 100644 --- ql/src/java/org/apache/hadoop/hive/ql/exec/ReduceSinkOperator.java +++ ql/src/java/org/apache/hadoop/hive/ql/exec/ReduceSinkOperator.java @@ -322,7 +322,7 @@ public void process(Object row, int tag) throws HiveException { if (conf.getWriteType() == AcidUtils.Operation.UPDATE || conf.getWriteType() == AcidUtils.Operation.DELETE) { assert rowInspector instanceof StructObjectInspector : - "Exptected rowInspector to be instance of StructObjectInspector but it is a " + + "Expected rowInspector to be instance of StructObjectInspector but it is a " + rowInspector.getClass().getName(); acidRowInspector = (StructObjectInspector)rowInspector; // The record identifier is always in the first column diff --git ql/src/java/org/apache/hadoop/hive/ql/exec/Registry.java ql/src/java/org/apache/hadoop/hive/ql/exec/Registry.java index c274933..b4d35a4 100644 --- ql/src/java/org/apache/hadoop/hive/ql/exec/Registry.java +++ ql/src/java/org/apache/hadoop/hive/ql/exec/Registry.java @@ -510,7 +510,7 @@ private void addFunction(String functionName, FunctionInfo function) { if (prev != null) { if (isBuiltInFunc(prev.getFunctionClass())) { throw new RuntimeException("Function " + functionName + " is hive builtin function, " + - "which cannot be overriden."); + "which cannot be overridden."); } prev.discarded(); } diff --git ql/src/java/org/apache/hadoop/hive/ql/exec/SkewJoinHandler.java ql/src/java/org/apache/hadoop/hive/ql/exec/SkewJoinHandler.java index 7fad34f..cec7c1a 100644 --- ql/src/java/org/apache/hadoop/hive/ql/exec/SkewJoinHandler.java +++ ql/src/java/org/apache/hadoop/hive/ql/exec/SkewJoinHandler.java @@ -233,7 +233,7 @@ public void handleSkew(int tag) throws HiveException { // right now we assume that the group by is an ArrayList object. It may // change in future. if (!(dummyKey instanceof List)) { - throw new RuntimeException("Bug in handle skew key in a seperate job."); + throw new RuntimeException("Bug in handle skew key in a separate job."); } skewKeyInCurrentGroup = true; diff --git ql/src/java/org/apache/hadoop/hive/ql/exec/Utilities.java ql/src/java/org/apache/hadoop/hive/ql/exec/Utilities.java index 2b1d1ce..e846513 100644 --- ql/src/java/org/apache/hadoop/hive/ql/exec/Utilities.java +++ ql/src/java/org/apache/hadoop/hive/ql/exec/Utilities.java @@ -3360,7 +3360,7 @@ public static File createTempDir(String baseDir){ } } throw new IllegalStateException("Failed to create a temp dir under " - + baseDir + " Giving up after " + MAX_ATTEMPS + " attemps"); + + baseDir + " Giving up after " + MAX_ATTEMPS + " attempts"); } diff --git ql/src/java/org/apache/hadoop/hive/ql/exec/persistence/MapJoinEagerRowContainer.java ql/src/java/org/apache/hadoop/hive/ql/exec/persistence/MapJoinEagerRowContainer.java index bb3c4be..4b55778 100644 --- ql/src/java/org/apache/hadoop/hive/ql/exec/persistence/MapJoinEagerRowContainer.java +++ ql/src/java/org/apache/hadoop/hive/ql/exec/persistence/MapJoinEagerRowContainer.java @@ -161,7 +161,7 @@ public void write(MapJoinObjectSerDeContext context, ObjectOutputStream out) ++numRowsWritten; } if(numRows != rowCount()) { - throw new ConcurrentModificationException("Values was modifified while persisting"); + throw new ConcurrentModificationException("Values was modified while persisting"); } if(numRowsWritten != numRows) { throw new IllegalStateException("Expected to write " + numRows + " but wrote " + numRowsWritten); diff --git ql/src/java/org/apache/hadoop/hive/ql/exec/persistence/MapJoinTableContainerSerDe.java ql/src/java/org/apache/hadoop/hive/ql/exec/persistence/MapJoinTableContainerSerDe.java index 83a4612..9fd5611 100644 --- ql/src/java/org/apache/hadoop/hive/ql/exec/persistence/MapJoinTableContainerSerDe.java +++ ql/src/java/org/apache/hadoop/hive/ql/exec/persistence/MapJoinTableContainerSerDe.java @@ -312,7 +312,7 @@ private MapJoinPersistableTableContainer create( clazz.getDeclaredConstructor(Map.class); return constructor.newInstance(metaData); } catch (Exception e) { - String msg = "Error while attemping to create table container" + + String msg = "Error while attempting to create table container" + " of type: " + name + ", with metaData: " + metaData; throw new HiveException(msg, e); } diff --git ql/src/java/org/apache/hadoop/hive/ql/io/orc/encoded/EncodedReaderImpl.java ql/src/java/org/apache/hadoop/hive/ql/io/orc/encoded/EncodedReaderImpl.java index 8f0c237..d4b487c 100644 --- ql/src/java/org/apache/hadoop/hive/ql/io/orc/encoded/EncodedReaderImpl.java +++ ql/src/java/org/apache/hadoop/hive/ql/io/orc/encoded/EncodedReaderImpl.java @@ -644,7 +644,7 @@ public DiskRangeList readEncodedStream(long baseOffset, DiskRangeList start, lon LOG.error("Failed " + (isCompressed ? "" : "un") + " compressed read; cOffset " + cOffset + ", endCOffset " + endCOffset + ", streamOffset " + streamOffset + ", unlockUntilCOffset " + unlockUntilCOffset + "; ranges passed in " - + RecordReaderUtils.stringifyDiskRanges(start) + "; ranges passed to prepate " + + RecordReaderUtils.stringifyDiskRanges(start) + "; ranges passed to prepare " + RecordReaderUtils.stringifyDiskRanges(current)); // Don't log exception here. throw (ex instanceof IOException) ? (IOException)ex : new IOException(ex); } diff --git ql/src/java/org/apache/hadoop/hive/ql/lockmgr/DbTxnManager.java ql/src/java/org/apache/hadoop/hive/ql/lockmgr/DbTxnManager.java index 867e445..4de96d1 100644 --- ql/src/java/org/apache/hadoop/hive/ql/lockmgr/DbTxnManager.java +++ ql/src/java/org/apache/hadoop/hive/ql/lockmgr/DbTxnManager.java @@ -452,7 +452,7 @@ public void heartbeat() throws LockException { Hive db; try { db = Hive.get(conf); - // Create a new threadlocal synchronized metastore client for use in hearbeater threads. + // Create a new threadlocal synchronized metastore client for use in heartbeater threads. // This makes the concurrent use of heartbeat thread safe, and won't cause transaction // abort due to a long metastore client call blocking the heartbeat call. heartbeaterClient = new SynchronizedMetaStoreClient(db.getMSC()); @@ -463,7 +463,7 @@ public void heartbeat() throws LockException { } // Increment the threadlocal metastore client count if (heartbeaterMSClientCount.incrementAndGet() >= heartbeaterThreadPoolSize) { - LOG.warn("The number of hearbeater metastore clients - + " + LOG.warn("The number of heartbeater metastore clients - + " + heartbeaterMSClientCount.get() + ", has exceeded the max limit - " + heartbeaterThreadPoolSize); } diff --git ql/src/java/org/apache/hadoop/hive/ql/optimizer/ConstantPropagateProcCtx.java ql/src/java/org/apache/hadoop/hive/ql/optimizer/ConstantPropagateProcCtx.java index 89de234..5102d81 100644 --- ql/src/java/org/apache/hadoop/hive/ql/optimizer/ConstantPropagateProcCtx.java +++ ql/src/java/org/apache/hadoop/hive/ql/optimizer/ConstantPropagateProcCtx.java @@ -236,7 +236,7 @@ public ConstantPropagateProcCtx(ConstantPropagateOption option) { } } } - LOG.debug("Offerring constants " + constants.keySet() + " to operator " + op.toString()); + LOG.debug("Offering constants " + constants.keySet() + " to operator " + op.toString()); return constants; } diff --git ql/src/java/org/apache/hadoop/hive/ql/optimizer/ConstantPropagateProcFactory.java ql/src/java/org/apache/hadoop/hive/ql/optimizer/ConstantPropagateProcFactory.java index 8d7b4ab..517ce31 100644 --- ql/src/java/org/apache/hadoop/hive/ql/optimizer/ConstantPropagateProcFactory.java +++ ql/src/java/org/apache/hadoop/hive/ql/optimizer/ConstantPropagateProcFactory.java @@ -357,7 +357,7 @@ private static ExprNodeDesc foldExprShortcut(ExprNodeDesc desc, Map return new ExprNodeConstantDesc(o).setFoldedFromVal(constStr); } catch (HiveException e) { LOG.error("Evaluation function " + udf.getClass() - + " failed in Constant Propagatation Optimizer."); + + " failed in Constant Propagation Optimizer."); throw new RuntimeException(e); } } diff --git ql/src/java/org/apache/hadoop/hive/ql/optimizer/GenMapRedUtils.java ql/src/java/org/apache/hadoop/hive/ql/optimizer/GenMapRedUtils.java index adc1188..7d2a06c 100644 --- ql/src/java/org/apache/hadoop/hive/ql/optimizer/GenMapRedUtils.java +++ ql/src/java/org/apache/hadoop/hive/ql/optimizer/GenMapRedUtils.java @@ -557,7 +557,7 @@ public static void setMapWork(MapWork plan, ParseContext parseCtx, Set 1) - throw new RuntimeException("Ambigous column mapping"); + throw new RuntimeException("Ambiguous column mapping"); } return ctxLookingFor; diff --git ql/src/java/org/apache/hadoop/hive/ql/optimizer/correlation/QueryPlanTreeTransformation.java ql/src/java/org/apache/hadoop/hive/ql/optimizer/correlation/QueryPlanTreeTransformation.java index c8aa48c..6841503 100644 --- ql/src/java/org/apache/hadoop/hive/ql/optimizer/correlation/QueryPlanTreeTransformation.java +++ ql/src/java/org/apache/hadoop/hive/ql/optimizer/correlation/QueryPlanTreeTransformation.java @@ -229,7 +229,7 @@ protected static void applyCorrelation( handledRSs.add((ReduceSinkOperator)op); parentsOfMux.add(CorrelationUtilities.getSingleParent(op, true)); } else { - throw new SemanticException("An slibing of ReduceSinkOperator is nethier a " + + throw new SemanticException("A sibling of ReduceSinkOperator is neither a " + "DemuxOperator nor a ReduceSinkOperator"); } } diff --git ql/src/java/org/apache/hadoop/hive/ql/optimizer/listbucketingpruner/ListBucketingPrunerUtils.java ql/src/java/org/apache/hadoop/hive/ql/optimizer/listbucketingpruner/ListBucketingPrunerUtils.java index ccb75eb..4d3e74e 100644 --- ql/src/java/org/apache/hadoop/hive/ql/optimizer/listbucketingpruner/ListBucketingPrunerUtils.java +++ ql/src/java/org/apache/hadoop/hive/ql/optimizer/listbucketingpruner/ListBucketingPrunerUtils.java @@ -247,7 +247,7 @@ private static Boolean startComparisonInEqualNode(final List skewedCols, String constantValueInFilter = ((ExprNodeConstantDesc) right).getValue().toString(); assert (skewedCols.contains(columnNameInFilter)) : "List bucketing pruner has a column name " + columnNameInFilter - + " which is not found in the partiton's skewed column list"; + + " which is not found in the partition's skewed column list"; int index = skewedCols.indexOf(columnNameInFilter); assert (index < cell.size()) : "GenericUDFOPEqual has a ExprNodeColumnDesc (" + columnNameInFilter + ") which is " + index + "th" + "skewed column. " diff --git ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/Vectorizer.java ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/Vectorizer.java index 37baaf6..6dc1f82 100644 --- ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/Vectorizer.java +++ ql/src/java/org/apache/hadoop/hive/ql/optimizer/physical/Vectorizer.java @@ -1931,7 +1931,7 @@ private boolean validateAggregationIsPrimitive(VectorAggregateExpression vectorA } catch (Exception e) { // We should have already attempted to vectorize in validateAggregationDesc. if (LOG.isDebugEnabled()) { - LOG.debug("Vectorization of aggreation should have succeeded ", e); + LOG.debug("Vectorization of aggregation should have succeeded ", e); } return new Pair(false, false); } diff --git ql/src/java/org/apache/hadoop/hive/ql/optimizer/ppr/PartitionPruner.java ql/src/java/org/apache/hadoop/hive/ql/optimizer/ppr/PartitionPruner.java index 9d7307e..52f1688 100644 --- ql/src/java/org/apache/hadoop/hive/ql/optimizer/ppr/PartitionPruner.java +++ ql/src/java/org/apache/hadoop/hive/ql/optimizer/ppr/PartitionPruner.java @@ -170,7 +170,7 @@ public static PrunedPartitionList prune(Table tab, ExprNodeDesc prunerExpr, throws SemanticException { if (LOG.isTraceEnabled()) { - LOG.trace("Started pruning partiton"); + LOG.trace("Started pruning partition"); LOG.trace("dbname = " + tab.getDbName()); LOG.trace("tabname = " + tab.getTableName()); LOG.trace("prune Expression = " + (prunerExpr == null ? "" : prunerExpr)); diff --git ql/src/java/org/apache/hadoop/hive/ql/parse/CalcitePlanner.java ql/src/java/org/apache/hadoop/hive/ql/parse/CalcitePlanner.java index f1f3bf9..5e86e87 100644 --- ql/src/java/org/apache/hadoop/hive/ql/parse/CalcitePlanner.java +++ ql/src/java/org/apache/hadoop/hive/ql/parse/CalcitePlanner.java @@ -3231,7 +3231,7 @@ private RelNode genSelectLogicalPlan(QB qb, RelNode srcRel, RelNode starSrcRel) } else if (ParseUtils.containsTokenOfType(expr, HiveParser.TOK_FUNCTIONDI) && !(srcRel instanceof HiveAggregate)) { // Likely a malformed query eg, select hash(distinct c1) from t1; - throw new CalciteSemanticException("Distinct without an aggreggation.", + throw new CalciteSemanticException("Distinct without an aggregation.", UnsupportedFeature.Distinct_without_an_aggreggation); } else { // Case when this is an expression diff --git ql/src/java/org/apache/hadoop/hive/ql/parse/DDLSemanticAnalyzer.java ql/src/java/org/apache/hadoop/hive/ql/parse/DDLSemanticAnalyzer.java index c7389a8..9b079a3 100644 --- ql/src/java/org/apache/hadoop/hive/ql/parse/DDLSemanticAnalyzer.java +++ ql/src/java/org/apache/hadoop/hive/ql/parse/DDLSemanticAnalyzer.java @@ -1644,7 +1644,7 @@ private void analyzeAlterTablePartMergeFiles(ASTNode ast, if (!((inputFormatClass.equals(RCFileInputFormat.class) || (inputFormatClass.equals(OrcInputFormat.class))))) { throw new SemanticException( - "Only RCFile and ORCFile Formats are supportted right now."); + "Only RCFile and ORCFile Formats are supported right now."); } mergeDesc.setInputFormatClass(inputFormatClass); diff --git ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java index d55db0a..f8cb65d 100644 --- ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java +++ ql/src/java/org/apache/hadoop/hive/ql/parse/SemanticAnalyzer.java @@ -7094,7 +7094,7 @@ private void genAutoColumnStatsGatheringPipeline(QB qb, TableDesc table_desc, } catch (HiveException e) { throw new SemanticException(e.getMessage()); } - LOG.info("Generate an operator pipleline to autogather column stats for table " + tableName + LOG.info("Generate an operator pipeline to autogather column stats for table " + tableName + " in query " + ctx.getCmd()); ColumnStatsAutoGatherContext columnStatsAutoGatherContext = null; columnStatsAutoGatherContext = new ColumnStatsAutoGatherContext(this, conf, curr, table, partSpec, isInsertInto, ctx); @@ -11725,7 +11725,7 @@ ASTNode analyzeCreateTable( } } catch (HiveException e) { // should not occur since second parameter to getTableWithQN is false - throw new IllegalStateException("Unxpected Exception thrown: " + e.getMessage(), e); + throw new IllegalStateException("Unexpected Exception thrown: " + e.getMessage(), e); } } diff --git ql/src/java/org/apache/hadoop/hive/ql/parse/TaskCompiler.java ql/src/java/org/apache/hadoop/hive/ql/parse/TaskCompiler.java index 8ce8ea3..e8b003e 100644 --- ql/src/java/org/apache/hadoop/hive/ql/parse/TaskCompiler.java +++ ql/src/java/org/apache/hadoop/hive/ql/parse/TaskCompiler.java @@ -202,7 +202,7 @@ public void compile(final ParseContext pCtx, final List fetchLimit) { LOG.info("For FetchTask, LIMIT " + globalLimitCtx.getGlobalLimit() + " > " + fetchLimit - + ". Doesn't qualify limit optimiztion."); + + ". Doesn't qualify limit optimization."); globalLimitCtx.disableOpt(); } diff --git ql/src/java/org/apache/hadoop/hive/ql/parse/TezCompiler.java ql/src/java/org/apache/hadoop/hive/ql/parse/TezCompiler.java index cd0b588..cdb9e1b 100644 --- ql/src/java/org/apache/hadoop/hive/ql/parse/TezCompiler.java +++ ql/src/java/org/apache/hadoop/hive/ql/parse/TezCompiler.java @@ -350,7 +350,7 @@ protected void generateTaskTree(List> rootTasks, Pa opRules.put(new RuleRegExp("No more walking on ReduceSink-MapJoin", MapJoinOperator.getOperatorName() + "%"), new ReduceSinkMapJoinProc()); - opRules.put(new RuleRegExp("Recoginze a Sorted Merge Join operator to setup the right edge and" + opRules.put(new RuleRegExp("Recognize a Sorted Merge Join operator to setup the right edge and" + " stop traversing the DummyStore-MapJoin", CommonMergeJoinOperator.getOperatorName() + "%"), new MergeJoinProc()); diff --git ql/src/java/org/apache/hadoop/hive/ql/plan/FetchWork.java ql/src/java/org/apache/hadoop/hive/ql/plan/FetchWork.java index 2ecb6f8..4430107 100644 --- ql/src/java/org/apache/hadoop/hive/ql/plan/FetchWork.java +++ ql/src/java/org/apache/hadoop/hive/ql/plan/FetchWork.java @@ -218,7 +218,7 @@ public void setPartDir(ArrayList partDir) { if (partDir != null && partDir.size() > 1) { if (partDesc == null || partDir.size() != partDesc.size()) { throw new RuntimeException( - "Partiton Directory list size doesn't match Partition Descriptor list size"); + "Partition Directory list size doesn't match Partition Descriptor list size"); } // Construct a sorted Map of Partition Dir - Partition Descriptor; ordering is based on diff --git ql/src/java/org/apache/hadoop/hive/ql/security/authorization/plugin/SettableConfigUpdater.java ql/src/java/org/apache/hadoop/hive/ql/security/authorization/plugin/SettableConfigUpdater.java index f12cd51..3d8b0cf 100644 --- ql/src/java/org/apache/hadoop/hive/ql/security/authorization/plugin/SettableConfigUpdater.java +++ ql/src/java/org/apache/hadoop/hive/ql/security/authorization/plugin/SettableConfigUpdater.java @@ -47,7 +47,7 @@ public static void setHiveConfWhiteList(HiveConf hiveConf) throws HiveAuthzPlugi if(whiteListParamsStr == null || whiteListParamsStr.trim().isEmpty()) { throw new HiveAuthzPluginException("Configuration parameter " + ConfVars.HIVE_AUTHORIZATION_SQL_STD_AUTH_CONFIG_WHITELIST.varname - + " is not iniatialized."); + + " is not initialized."); } // append regexes that user wanted to add diff --git ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDFStringToMap.java ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDFStringToMap.java index 093f2a3..501b0b8 100644 --- ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDFStringToMap.java +++ ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDFStringToMap.java @@ -42,7 +42,7 @@ */ @Description(name = "str_to_map", value = "_FUNC_(text, delimiter1, delimiter2) - " + "Creates a map by parsing text ", extended = "Split text into key-value pairs" - + " using two delimiters. The first delimiter seperates pairs, and the" + + " using two delimiters. The first delimiter separates pairs, and the" + " second delimiter sperates key and value. If only one parameter is given, default" + " delimiters are used: ',' as delimiter1 and '=' as delimiter2.") public class GenericUDFStringToMap extends GenericUDF { diff --git ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDFTrunc.java ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDFTrunc.java index 036d112..a95248f 100644 --- ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDFTrunc.java +++ ql/src/java/org/apache/hadoop/hive/ql/udf/generic/GenericUDFTrunc.java @@ -126,7 +126,7 @@ public ObjectInspector initialize(ObjectInspector[] arguments) throws UDFArgumen return initializeNumber(arguments); } else { throw new UDFArgumentException( - "Only primitive type arguments are accepted, when arguments lenght is one, got " + "Only primitive type arguments are accepted, when arguments length is one, got " + arguments[1].getTypeName()); } } @@ -478,4 +478,4 @@ protected BigDecimal trunc(BigDecimal input, int scale) { return output; } -} \ No newline at end of file +}