diff --git beeline/src/java/org/apache/hive/beeline/HiveSchemaTool.java beeline/src/java/org/apache/hive/beeline/HiveSchemaTool.java index 23f3cf2..be1eab5 100644 --- beeline/src/java/org/apache/hive/beeline/HiveSchemaTool.java +++ beeline/src/java/org/apache/hive/beeline/HiveSchemaTool.java @@ -432,7 +432,7 @@ private boolean checkMetaStorePartitionLocation(String locHeader) } pStmt.close(); } catch (SQLException e) { - throw new HiveMetaException("Failed to get Partiton Location Info.", e); + throw new HiveMetaException("Failed to get Partition Location Info.", e); } finally { try { diff --git beeline/src/test/org/apache/hive/beeline/ProxyAuthTest.java beeline/src/test/org/apache/hive/beeline/ProxyAuthTest.java index 0a08389..68bae9e 100644 --- beeline/src/test/org/apache/hive/beeline/ProxyAuthTest.java +++ beeline/src/test/org/apache/hive/beeline/ProxyAuthTest.java @@ -195,7 +195,7 @@ public static void main(String[] args) throws Exception { try { url = "jdbc:hive2://" + host + ":" + port + "/default;auth=delegationToken"; con = DriverManager.getConnection(url); - throw new Exception ("connection should have failed after token cancelation"); + throw new Exception ("connection should have failed after token cancellation"); } catch (SQLException e) { // Expected to fail due to canceled token } diff --git common/src/java/org/apache/hadoop/hive/common/HiveInterruptUtils.java common/src/java/org/apache/hadoop/hive/common/HiveInterruptUtils.java index 43f670e..244d746 100644 --- common/src/java/org/apache/hadoop/hive/common/HiveInterruptUtils.java +++ common/src/java/org/apache/hadoop/hive/common/HiveInterruptUtils.java @@ -64,7 +64,7 @@ public static void checkInterrupted() { } catch (InterruptedException e) { interrupt = e; } - throw new RuntimeException("Interuppted", interrupt); + throw new RuntimeException("Interrupted", interrupt); } } } diff --git common/src/java/org/apache/hadoop/hive/conf/HiveConf.java common/src/java/org/apache/hadoop/hive/conf/HiveConf.java index b809562..b9d5931 100644 --- common/src/java/org/apache/hadoop/hive/conf/HiveConf.java +++ common/src/java/org/apache/hadoop/hive/conf/HiveConf.java @@ -1017,7 +1017,7 @@ private static void populateLlapDaemonVarsSet(Set llapDaemonVarsSetLocal HIVESCRIPT_ENV_BLACKLIST("hive.script.operator.env.blacklist", "hive.txn.valid.txns,hive.script.operator.env.blacklist", "Comma separated list of keys from the configuration file not to convert to environment " + - "variables when envoking the script operator"), + "variables when invoking the script operator"), HIVE_STRICT_CHECKS_LARGE_QUERY("hive.strict.checks.large.query", false, "Enabling strict large query checks disallows the following:\n" + " Orderby without limit.\n" + @@ -1054,7 +1054,7 @@ private static void populateLlapDaemonVarsSet(Set llapDaemonVarsSetLocal HIVE_CBO_EXTENDED_COST_MODEL("hive.cbo.costmodel.extended", false, "Flag to control enabling the extended cost model based on" + "CPU, IO and cardinality. Otherwise, the cost model is based on cardinality."), HIVE_CBO_COST_MODEL_CPU("hive.cbo.costmodel.cpu", "0.000001", "Default cost of a comparison"), - HIVE_CBO_COST_MODEL_NET("hive.cbo.costmodel.network", "150.0", "Default cost of a transfering a byte over network;" + HIVE_CBO_COST_MODEL_NET("hive.cbo.costmodel.network", "150.0", "Default cost of a transferring a byte over network;" + " expressed as multiple of CPU cost"), HIVE_CBO_COST_MODEL_LFS_WRITE("hive.cbo.costmodel.local.fs.write", "4.0", "Default cost of writing a byte to local FS;" + " expressed as multiple of NETWORK cost"), @@ -1246,9 +1246,9 @@ private static void populateLlapDaemonVarsSet(Set llapDaemonVarsSetLocal "Whether Hive is running in test mode. If yes, it turns on sampling and prefixes the output tablename.", false), HIVETESTMODEPREFIX("hive.test.mode.prefix", "test_", - "In test mode, specfies prefixes for the output table", false), + "In test mode, specifies prefixes for the output table", false), HIVETESTMODESAMPLEFREQ("hive.test.mode.samplefreq", 32, - "In test mode, specfies sampling frequency for table, which is not bucketed,\n" + + "In test mode, specifies sampling frequency for table, which is not bucketed,\n" + "For example, the following query:\n" + " INSERT OVERWRITE TABLE dest SELECT col1 from src\n" + "would be converted to\n" + @@ -1392,7 +1392,7 @@ private static void populateLlapDaemonVarsSet(Set llapDaemonVarsSetLocal HIVE_LAZYSIMPLE_EXTENDED_BOOLEAN_LITERAL("hive.lazysimple.extended_boolean_literal", false, "LazySimpleSerde uses this property to determine if it treats 'T', 't', 'F', 'f',\n" + - "'1', and '0' as extened, legal boolean literal, in addition to 'TRUE' and 'FALSE'.\n" + + "'1', and '0' as extended, legal boolean literal, in addition to 'TRUE' and 'FALSE'.\n" + "The default is false, which means only 'TRUE' and 'FALSE' are treated as legal\n" + "boolean literal."), @@ -2444,7 +2444,7 @@ private static void populateLlapDaemonVarsSet(Set llapDaemonVarsSetLocal "This needs to be set only if SPNEGO is to be used in authentication."), HIVE_SERVER2_PLAIN_LDAP_URL("hive.server2.authentication.ldap.url", null, "LDAP connection URL(s),\n" + - "this value could contain URLs to mutiple LDAP servers instances for HA,\n" + + "this value could contain URLs to multiple LDAP servers instances for HA,\n" + "each LDAP URL is separated by a SPACE character. URLs are used in the \n" + " order specified until a connection is successful."), HIVE_SERVER2_PLAIN_LDAP_BASEDN("hive.server2.authentication.ldap.baseDN", null, "LDAP base DN"), @@ -2697,9 +2697,9 @@ private static void populateLlapDaemonVarsSet(Set llapDaemonVarsSetLocal "This flag should be set to true to enable vectorizing using row deserialize.\n" + "The default value is false."), HIVE_VECTOR_ADAPTOR_USAGE_MODE("hive.vectorized.adaptor.usage.mode", "all", new StringSet("none", "chosen", "all"), - "Specifies the extent to which the VectorUDFAdaptor will be used for UDFs that do not have a cooresponding vectorized class.\n" + + "Specifies the extent to which the VectorUDFAdaptor will be used for UDFs that do not have a corresponding vectorized class.\n" + "0. none : disable any usage of VectorUDFAdaptor\n" + - "1. chosen : use VectorUDFAdaptor for a small set of UDFs that were choosen for good performance\n" + + "1. chosen : use VectorUDFAdaptor for a small set of UDFs that were chosen for good performance\n" + "2. all : use VectorUDFAdaptor for all UDFs" ), @@ -3051,7 +3051,7 @@ private static void populateLlapDaemonVarsSet(Set llapDaemonVarsSetLocal LLAP_DAEMON_WAIT_QUEUE_COMPARATOR_CLASS_NAME( "hive.llap.daemon.wait.queue.comparator.class.name", "org.apache.hadoop.hive.llap.daemon.impl.comparator.ShortestJobFirstComparator", - "The priority comparator to use for LLAP scheduler prioroty queue. The built-in options\n" + + "The priority comparator to use for LLAP scheduler priority queue. The built-in options\n" + "are org.apache.hadoop.hive.llap.daemon.impl.comparator.ShortestJobFirstComparator and\n" + ".....FirstInFirstOutComparator", "llap.daemon.wait.queue.comparator.class.name"), LLAP_DAEMON_TASK_SCHEDULER_ENABLE_PREEMPTION( @@ -3131,7 +3131,7 @@ private static void populateLlapDaemonVarsSet(Set llapDaemonVarsSetLocal SPARK_RPC_SERVER_ADDRESS("hive.spark.client.rpc.server.address", "", "The server address of HiverServer2 host to be used for communication between Hive client and remote Spark driver. " + "Default is empty, which means the address will be determined in the same way as for hive.server2.thrift.bind.host." + - "This is only necessary if the host has mutiple network addresses and if a different network address other than " + + "This is only necessary if the host has multiple network addresses and if a different network address other than " + "hive.server2.thrift.bind.host is to be used."), SPARK_RPC_SERVER_PORT("hive.spark.client.rpc.server.port", "", "A list of port ranges which can be used by RPC server " + "with the format of 49152-49222,49228 and a random one is selected from the list. Default is empty, which randomly " + diff --git common/src/java/org/apache/hive/http/StackServlet.java common/src/java/org/apache/hive/http/StackServlet.java index 610b391..3345466 100644 --- common/src/java/org/apache/hive/http/StackServlet.java +++ common/src/java/org/apache/hive/http/StackServlet.java @@ -74,7 +74,7 @@ private synchronized void printThreadInfo( Thread.State state = info.getThreadState(); stream.println(" State: " + state); stream.println(" Blocked count: " + info.getBlockedCount()); - stream.println(" Wtaited count: " + info.getWaitedCount()); + stream.println(" Waited count: " + info.getWaitedCount()); if (contention) { stream.println(" Blocked time: " + info.getBlockedTime()); stream.println(" Waited time: " + info.getWaitedTime()); diff --git hbase-handler/src/java/org/apache/hadoop/hive/hbase/HiveHBaseTableInputFormat.java hbase-handler/src/java/org/apache/hadoop/hive/hbase/HiveHBaseTableInputFormat.java index 1ef4545..832d41b 100644 --- hbase-handler/src/java/org/apache/hadoop/hive/hbase/HiveHBaseTableInputFormat.java +++ hbase-handler/src/java/org/apache/hadoop/hive/hbase/HiveHBaseTableInputFormat.java @@ -275,7 +275,7 @@ private void setupKeyRange(Scan scan, List conditions, boo objInspector = (PrimitiveObjectInspector)eval.initialize(null); writable = eval.evaluate(null); } catch (ClassCastException cce) { - throw new IOException("Currently only primitve types are supported. Found: " + + throw new IOException("Currently only primitive types are supported. Found: " + sc.getConstantDesc().getTypeString()); } catch (HiveException e) { throw new IOException(e); diff --git hcatalog/server-extensions/src/main/java/org/apache/hive/hcatalog/listener/DbNotificationListener.java hcatalog/server-extensions/src/main/java/org/apache/hive/hcatalog/listener/DbNotificationListener.java index 494d01f..25fd439 100644 --- hcatalog/server-extensions/src/main/java/org/apache/hive/hcatalog/listener/DbNotificationListener.java +++ hcatalog/server-extensions/src/main/java/org/apache/hive/hcatalog/listener/DbNotificationListener.java @@ -362,7 +362,7 @@ public void run() { try { Thread.sleep(sleepTime); } catch (InterruptedException e) { - LOG.info("Cleaner thread sleep interupted", e); + LOG.info("Cleaner thread sleep interrupted", e); } } } diff --git hcatalog/streaming/src/java/org/apache/hive/hcatalog/streaming/mutate/client/lock/HeartbeatTimerTask.java hcatalog/streaming/src/java/org/apache/hive/hcatalog/streaming/mutate/client/lock/HeartbeatTimerTask.java index 826d1ad..81f99de 100644 --- hcatalog/streaming/src/java/org/apache/hive/hcatalog/streaming/mutate/client/lock/HeartbeatTimerTask.java +++ hcatalog/streaming/src/java/org/apache/hive/hcatalog/streaming/mutate/client/lock/HeartbeatTimerTask.java @@ -69,7 +69,7 @@ public void run() { } private void failLock(Exception e) { - LOG.debug("Lock " + lockId + " failed, cancelling heartbeat and notifiying listener: " + listener, e); + LOG.debug("Lock " + lockId + " failed, cancelling heartbeat and notifying listener: " + listener, e); // Cancel the heartbeat cancel(); listener.lockFailed(lockId, transactionId, Lock.asStrings(tableDescriptors), e); @@ -80,4 +80,4 @@ public String toString() { return "HeartbeatTimerTask [lockId=" + lockId + ", transactionId=" + transactionId + "]"; } -} \ No newline at end of file +} diff --git llap-server/src/java/org/apache/hadoop/hive/llap/daemon/impl/LlapDaemon.java llap-server/src/java/org/apache/hadoop/hive/llap/daemon/impl/LlapDaemon.java index 752e6ee..9109a3d 100644 --- llap-server/src/java/org/apache/hadoop/hive/llap/daemon/impl/LlapDaemon.java +++ llap-server/src/java/org/apache/hadoop/hive/llap/daemon/impl/LlapDaemon.java @@ -126,7 +126,7 @@ public LlapDaemon(Configuration daemonConf, int numExecutors, long executorMemor Preconditions.checkArgument(localDirs != null && localDirs.length > 0, "Work dirs must be specified"); Preconditions.checkArgument(shufflePort == 0 || (shufflePort > 1024 && shufflePort < 65536), - "Shuffle Port must be betwee 1024 and 65535, or 0 for automatic selection"); + "Shuffle Port must be between 1024 and 65535, or 0 for automatic selection"); int outputFormatServicePort = HiveConf.getIntVar(daemonConf, HiveConf.ConfVars.LLAP_DAEMON_OUTPUT_SERVICE_PORT); Preconditions.checkArgument(outputFormatServicePort == 0 || (outputFormatServicePort > 1024 && outputFormatServicePort < 65536), diff --git llap-server/src/java/org/apache/hadoop/hive/llap/daemon/impl/LlapProtocolServerImpl.java llap-server/src/java/org/apache/hadoop/hive/llap/daemon/impl/LlapProtocolServerImpl.java index eb7a8eb..3f3be25 100644 --- llap-server/src/java/org/apache/hadoop/hive/llap/daemon/impl/LlapProtocolServerImpl.java +++ llap-server/src/java/org/apache/hadoop/hive/llap/daemon/impl/LlapProtocolServerImpl.java @@ -299,7 +299,7 @@ public GetTokenResponseProto getDelegationToken(RpcController controller, if (isRestrictedToClusterUser && !clusterUser.equals(callingUser.getShortUserName())) { throw new ServiceException("Management protocol ACL is too permissive. The access has been" + " automatically restricted to " + clusterUser + "; " + callingUser.getShortUserName() - + " is denied acccess. Please set " + ConfVars.LLAP_VALIDATE_ACLS.varname + " to false," + + " is denied access. Please set " + ConfVars.LLAP_VALIDATE_ACLS.varname + " to false," + " or adjust " + ConfVars.LLAP_MANAGEMENT_ACL.varname + " and " + ConfVars.LLAP_MANAGEMENT_ACL_DENY.varname + " to a more restrictive ACL."); } diff --git llap-server/src/java/org/apache/hadoop/hive/llap/shufflehandler/ShuffleHandler.java llap-server/src/java/org/apache/hadoop/hive/llap/shufflehandler/ShuffleHandler.java index 9a3e221..0ed7ba8 100644 --- llap-server/src/java/org/apache/hadoop/hive/llap/shufflehandler/ShuffleHandler.java +++ llap-server/src/java/org/apache/hadoop/hive/llap/shufflehandler/ShuffleHandler.java @@ -227,7 +227,7 @@ public void registerAttemptDirs(AttemptPathIdentifier identifier, MutableCounterLong shuffleOutputBytes; @Metric("# of failed shuffle outputs") MutableCounterInt shuffleOutputsFailed; - @Metric("# of succeeeded shuffle outputs") + @Metric("# of succeeded shuffle outputs") MutableCounterInt shuffleOutputsOK; @Metric("# of current shuffle connections") MutableGaugeInt shuffleConnections; @@ -888,7 +888,7 @@ protected void verifyRequest(String appid, ChannelHandlerContext ctx, ShuffleHeader.DEFAULT_HTTP_HEADER_VERSION); if (LOG.isDebugEnabled()) { int len = reply.length(); - LOG.debug("Fetcher request verfied. enc_str=" + enc_str + ";reply=" + + LOG.debug("Fetcher request verified. enc_str=" + enc_str + ";reply=" + reply.substring(len-len/2, len-1)); } } diff --git orc/src/java/org/apache/orc/impl/ConvertTreeReaderFactory.java orc/src/java/org/apache/orc/impl/ConvertTreeReaderFactory.java index 5d5f991..7d41fcd 100644 --- orc/src/java/org/apache/orc/impl/ConvertTreeReaderFactory.java +++ orc/src/java/org/apache/orc/impl/ConvertTreeReaderFactory.java @@ -318,7 +318,7 @@ void skipRows(long items) throws IOException { */ // Override this to use convertVector. public void setConvertVectorElement(int elementNum) throws IOException { - throw new RuntimeException("Expected this method to be overriden"); + throw new RuntimeException("Expected this method to be overridden"); } // Common code used by the conversion. diff --git serde/src/java/org/apache/hadoop/hive/serde2/lazy/objectinspector/primitive/LazyPrimitiveObjectInspectorFactory.java serde/src/java/org/apache/hadoop/hive/serde2/lazy/objectinspector/primitive/LazyPrimitiveObjectInspectorFactory.java index 5601734..8416762 100644 --- serde/src/java/org/apache/hadoop/hive/serde2/lazy/objectinspector/primitive/LazyPrimitiveObjectInspectorFactory.java +++ serde/src/java/org/apache/hadoop/hive/serde2/lazy/objectinspector/primitive/LazyPrimitiveObjectInspectorFactory.java @@ -173,7 +173,7 @@ private LazyPrimitiveObjectInspectorFactory() { break; default: throw new RuntimeException( - "Primitve type " + typeInfo.getPrimitiveCategory() + " should not take parameters"); + "Primitive type " + typeInfo.getPrimitiveCategory() + " should not take parameters"); } AbstractPrimitiveLazyObjectInspector prev = diff --git shims/common/src/main/java/org/apache/hadoop/hive/thrift/TokenStoreDelegationTokenSecretManager.java shims/common/src/main/java/org/apache/hadoop/hive/thrift/TokenStoreDelegationTokenSecretManager.java index 4d910d8..5279a64 100644 --- shims/common/src/main/java/org/apache/hadoop/hive/thrift/TokenStoreDelegationTokenSecretManager.java +++ shims/common/src/main/java/org/apache/hadoop/hive/thrift/TokenStoreDelegationTokenSecretManager.java @@ -118,7 +118,7 @@ public TokenStoreDelegationTokenSecretManager(long delegationKeyUpdateInterval, public DelegationTokenIdentifier cancelToken(Token token, String canceller) throws IOException { DelegationTokenIdentifier id = getTokenIdentifier(token); - LOGGER.info("Token cancelation requested for identifier: "+id); + LOGGER.info("Token cancellation requested for identifier: "+id); this.tokenStore.removeToken(id); return id; } diff --git shims/common/src/main/java/org/apache/hadoop/hive/thrift/ZooKeeperTokenStore.java shims/common/src/main/java/org/apache/hadoop/hive/thrift/ZooKeeperTokenStore.java index 885ec56..6498ef0 100644 --- shims/common/src/main/java/org/apache/hadoop/hive/thrift/ZooKeeperTokenStore.java +++ shims/common/src/main/java/org/apache/hadoop/hive/thrift/ZooKeeperTokenStore.java @@ -444,7 +444,7 @@ public void init(Object hmsHandler, ServerMode smode) { HiveDelegationTokenManager.DELEGATION_TOKEN_STORE_ZK_CONNECT_STR_ALTERNATE, null); if (zkConnectString == null || zkConnectString.trim().isEmpty()) { - throw new IllegalArgumentException("Zookeeper connect string has to be specifed through " + throw new IllegalArgumentException("Zookeeper connect string has to be specified through " + "either " + HiveDelegationTokenManager.DELEGATION_TOKEN_STORE_ZK_CONNECT_STR + " or " + HiveDelegationTokenManager.DELEGATION_TOKEN_STORE_ZK_CONNECT_STR_ALTERNATE