diff --git beeline/pom.xml beeline/pom.xml index 4e5ad0795c..a5a1e42896 100644 --- beeline/pom.xml +++ beeline/pom.xml @@ -29,7 +29,7 @@ .. - 1.6.6 + 2.0.2 @@ -127,7 +127,7 @@ org.powermock - powermock-api-mockito + powermock-api-mockito2 ${powermock.version} test diff --git beeline/src/test/org/apache/hive/beeline/TestBufferedRows.java beeline/src/test/org/apache/hive/beeline/TestBufferedRows.java index 9478ec5880..1add3c01b3 100644 --- beeline/src/test/org/apache/hive/beeline/TestBufferedRows.java +++ beeline/src/test/org/apache/hive/beeline/TestBufferedRows.java @@ -17,6 +17,7 @@ */ package org.apache.hive.beeline; +import static org.mockito.ArgumentMatchers.anyInt; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.when; @@ -26,7 +27,6 @@ import org.junit.Assert; import org.junit.Test; -import org.mockito.Matchers; import org.mockito.invocation.InvocationOnMock; import org.mockito.stubbing.Answer; @@ -99,7 +99,7 @@ public Boolean answer(InvocationOnMock invocation) { } }); - when(mockResultSet.getObject(Matchers.anyInt())).thenAnswer(new Answer() { + when(mockResultSet.getObject(anyInt())).thenAnswer(new Answer() { @Override public String answer(InvocationOnMock invocation) { Object[] args = invocation.getArguments(); diff --git beeline/src/test/org/apache/hive/beeline/TestTableOutputFormat.java beeline/src/test/org/apache/hive/beeline/TestTableOutputFormat.java index 0f557e8bc4..5b8407088f 100644 --- beeline/src/test/org/apache/hive/beeline/TestTableOutputFormat.java +++ beeline/src/test/org/apache/hive/beeline/TestTableOutputFormat.java @@ -15,13 +15,14 @@ */ package org.apache.hive.beeline; +import static org.mockito.ArgumentMatchers.anyInt; + import java.io.PrintStream; import java.sql.ResultSet; import java.sql.ResultSetMetaData; import java.sql.SQLException; import static org.junit.Assert.assertEquals; import org.junit.Test; -import org.mockito.Matchers; import static org.mockito.Mockito.when; import org.mockito.invocation.InvocationOnMock; import org.mockito.stubbing.Answer; @@ -96,7 +97,7 @@ public Boolean answer(final InvocationOnMock invocation) { } }); - when(mockResultSet.getObject(Matchers.anyInt())).thenAnswer(new Answer() { + when(mockResultSet.getObject(anyInt())).thenAnswer(new Answer() { @Override public String answer(final InvocationOnMock invocation) { Object[] args = invocation.getArguments(); diff --git beeline/src/test/org/apache/hive/beeline/schematool/TestHiveSchemaTool.java beeline/src/test/org/apache/hive/beeline/schematool/TestHiveSchemaTool.java index 2311b0409f..52b2aca520 100644 --- beeline/src/test/org/apache/hive/beeline/schematool/TestHiveSchemaTool.java +++ beeline/src/test/org/apache/hive/beeline/schematool/TestHiveSchemaTool.java @@ -35,11 +35,10 @@ import static org.junit.Assert.assertFalse; import static org.junit.Assert.assertTrue; -import static org.mockito.Matchers.eq; -import static org.mockito.Matchers.same; +import static org.mockito.ArgumentMatchers.eq; +import static org.mockito.ArgumentMatchers.same; import static org.mockito.Mockito.when; import static org.powermock.api.mockito.PowerMockito.mockStatic; -import static org.powermock.api.mockito.PowerMockito.verifyStatic; @RunWith(PowerMockRunner.class) @PowerMockIgnore("javax.management.*") @@ -71,7 +70,6 @@ public void setup() throws IOException { @After public void globalAssert() throws IOException { - verifyStatic(); HiveSchemaHelper.getValidConfVar(eq(MetastoreConf.ConfVars.CONNECT_URL_KEY), same(hiveConf)); HiveSchemaHelper .getValidConfVar(eq(MetastoreConf.ConfVars.CONNECTION_DRIVER), same(hiveConf)); diff --git cli/src/test/org/apache/hadoop/hive/cli/TestCliDriverMethods.java cli/src/test/org/apache/hadoop/hive/cli/TestCliDriverMethods.java index 42e3bc564a..5f219005c9 100644 --- cli/src/test/org/apache/hadoop/hive/cli/TestCliDriverMethods.java +++ cli/src/test/org/apache/hadoop/hive/cli/TestCliDriverMethods.java @@ -18,9 +18,9 @@ package org.apache.hadoop.hive.cli; -import static org.mockito.Matchers.anyBoolean; -import static org.mockito.Matchers.anyString; -import static org.mockito.Matchers.eq; +import static org.mockito.ArgumentMatchers.anyBoolean; +import static org.mockito.ArgumentMatchers.anyString; +import static org.mockito.ArgumentMatchers.eq; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.never; import static org.mockito.Mockito.times; diff --git common/src/java/org/apache/hadoop/hive/conf/HiveConf.java common/src/java/org/apache/hadoop/hive/conf/HiveConf.java index 7b3acad511..a08dd03a46 100644 --- common/src/java/org/apache/hadoop/hive/conf/HiveConf.java +++ common/src/java/org/apache/hadoop/hive/conf/HiveConf.java @@ -2697,11 +2697,6 @@ private static void populateLlapDaemonVarsSet(Set llapDaemonVarsSetLocal "Truststore password when using a client-side certificate with TLS connectivity to ZooKeeper." + "Overrides any explicit value set via the zookeeper.ssl.trustStore.password " + "system property (note the camelCase)."), - HIVE_ZOOKEEPER_KILLQUERY_ENABLE("hive.zookeeper.killquery.enable", true, - "Whether enabled kill query coordination with zookeeper, " + - "when hive.server2.support.dynamic.service.discovery is enabled."), - HIVE_ZOOKEEPER_KILLQUERY_NAMESPACE("hive.zookeeper.killquery.namespace", "killQueries", - "When kill query coordination is enabled, uses this namespace for registering queries to kill with zookeeper"), // Transactions HIVE_TXN_MANAGER("hive.txn.manager", diff --git common/src/test/org/apache/hadoop/hive/common/TestFileUtils.java common/src/test/org/apache/hadoop/hive/common/TestFileUtils.java index 9b5748e724..15e74db366 100644 --- common/src/test/org/apache/hadoop/hive/common/TestFileUtils.java +++ common/src/test/org/apache/hadoop/hive/common/TestFileUtils.java @@ -22,7 +22,7 @@ import static org.junit.Assert.assertFalse; import static org.junit.Assert.assertNull; import static org.junit.Assert.assertTrue; -import static org.mockito.Matchers.any; +import static org.mockito.ArgumentMatchers.any; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.verify; import static org.mockito.Mockito.when; diff --git druid-handler/src/test/org/apache/hadoop/hive/druid/serde/TestDruidSerDe.java druid-handler/src/test/org/apache/hadoop/hive/druid/serde/TestDruidSerDe.java index 74576975a4..edfcc65460 100644 --- druid-handler/src/test/org/apache/hadoop/hive/druid/serde/TestDruidSerDe.java +++ druid-handler/src/test/org/apache/hadoop/hive/druid/serde/TestDruidSerDe.java @@ -18,8 +18,8 @@ package org.apache.hadoop.hive.druid.serde; import static org.junit.Assert.assertEquals; -import static org.mockito.Matchers.any; -import static org.mockito.Matchers.anyObject; +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.ArgumentMatchers.anyObject; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.when; diff --git hcatalog/streaming/src/test/org/apache/hive/hcatalog/streaming/mutate/client/TestMutatorClient.java hcatalog/streaming/src/test/org/apache/hive/hcatalog/streaming/mutate/client/TestMutatorClient.java index 91b90ed818..4222786d80 100644 --- hcatalog/streaming/src/test/org/apache/hive/hcatalog/streaming/mutate/client/TestMutatorClient.java +++ hcatalog/streaming/src/test/org/apache/hive/hcatalog/streaming/mutate/client/TestMutatorClient.java @@ -20,7 +20,7 @@ import static org.hamcrest.CoreMatchers.is; import static org.junit.Assert.assertThat; import static org.junit.Assert.fail; -import static org.mockito.Matchers.anyString; +import static org.mockito.ArgumentMatchers.anyString; import static org.mockito.Mockito.verify; import static org.mockito.Mockito.when; @@ -42,7 +42,7 @@ import org.junit.Test; import org.junit.runner.RunWith; import org.mockito.Mock; -import org.mockito.runners.MockitoJUnitRunner; +import org.mockito.junit.MockitoJUnitRunner; @RunWith(MockitoJUnitRunner.class) public class TestMutatorClient { diff --git hcatalog/streaming/src/test/org/apache/hive/hcatalog/streaming/mutate/client/TestTransaction.java hcatalog/streaming/src/test/org/apache/hive/hcatalog/streaming/mutate/client/TestTransaction.java index c47cf4d7cf..4d30c253c6 100644 --- hcatalog/streaming/src/test/org/apache/hive/hcatalog/streaming/mutate/client/TestTransaction.java +++ hcatalog/streaming/src/test/org/apache/hive/hcatalog/streaming/mutate/client/TestTransaction.java @@ -31,7 +31,7 @@ import org.junit.Test; import org.junit.runner.RunWith; import org.mockito.Mock; -import org.mockito.runners.MockitoJUnitRunner; +import org.mockito.junit.MockitoJUnitRunner; @RunWith(MockitoJUnitRunner.class) public class TestTransaction { diff --git hcatalog/streaming/src/test/org/apache/hive/hcatalog/streaming/mutate/client/lock/TestHeartbeatTimerTask.java hcatalog/streaming/src/test/org/apache/hive/hcatalog/streaming/mutate/client/lock/TestHeartbeatTimerTask.java index 1edec690b0..33e29cf924 100644 --- hcatalog/streaming/src/test/org/apache/hive/hcatalog/streaming/mutate/client/lock/TestHeartbeatTimerTask.java +++ hcatalog/streaming/src/test/org/apache/hive/hcatalog/streaming/mutate/client/lock/TestHeartbeatTimerTask.java @@ -33,7 +33,7 @@ import org.junit.Test; import org.junit.runner.RunWith; import org.mockito.Mock; -import org.mockito.runners.MockitoJUnitRunner; +import org.mockito.junit.MockitoJUnitRunner; @RunWith(MockitoJUnitRunner.class) public class TestHeartbeatTimerTask { diff --git hcatalog/streaming/src/test/org/apache/hive/hcatalog/streaming/mutate/client/lock/TestLock.java hcatalog/streaming/src/test/org/apache/hive/hcatalog/streaming/mutate/client/lock/TestLock.java index 0a46faf90d..156a37ebc7 100644 --- hcatalog/streaming/src/test/org/apache/hive/hcatalog/streaming/mutate/client/lock/TestLock.java +++ hcatalog/streaming/src/test/org/apache/hive/hcatalog/streaming/mutate/client/lock/TestLock.java @@ -24,18 +24,18 @@ import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertNull; import static org.junit.Assert.assertTrue; -import static org.mockito.Matchers.any; -import static org.mockito.Matchers.anyInt; -import static org.mockito.Matchers.anyLong; -import static org.mockito.Matchers.eq; +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.ArgumentMatchers.anyCollection; +import static org.mockito.ArgumentMatchers.anyInt; +import static org.mockito.ArgumentMatchers.eq; import static org.mockito.Mockito.doThrow; +import static org.mockito.Mockito.lenient; import static org.mockito.Mockito.verify; import static org.mockito.Mockito.verifyNoMoreInteractions; -import static org.mockito.Mockito.verifyZeroInteractions; +import static org.mockito.Mockito.verifyNoInteractions; import static org.mockito.Mockito.when; import java.net.InetAddress; -import java.util.Collection; import java.util.Collections; import java.util.List; import java.util.Set; @@ -60,7 +60,7 @@ import org.mockito.ArgumentCaptor; import org.mockito.Captor; import org.mockito.Mock; -import org.mockito.runners.MockitoJUnitRunner; +import org.mockito.junit.MockitoJUnitRunner; import com.google.common.collect.ImmutableSet; @@ -99,9 +99,10 @@ public void injectMocks() throws Exception { when(mockMetaStoreClient.lock(any(LockRequest.class))).thenReturn(mockLockResponse); when(mockLockResponse.getLockid()).thenReturn(LOCK_ID); when(mockLockResponse.getState()).thenReturn(ACQUIRED); - when( - mockHeartbeatFactory.newInstance(any(IMetaStoreClient.class), any(LockFailureListener.class), any(Long.class), - any(Collection.class), anyLong(), anyInt())).thenReturn(mockHeartbeat); + // Transaction IDs can also be null + when(mockHeartbeatFactory.newInstance( + any(IMetaStoreClient.class), any(LockFailureListener.class), any(), anyCollection(), any(Long.class), anyInt()) + ).thenReturn(mockHeartbeat); readLock = new Lock(mockMetaStoreClient, mockHeartbeatFactory, configuration, mockListener, USER, SOURCES, Collections. emptySet(), 3, 0); @@ -138,7 +139,7 @@ public void testAcquireReadLockCheckHeartbeatCreated() throws Exception { configuration.set("hive.txn.timeout", "100s"); readLock.acquire(); - verify(mockHeartbeatFactory).newInstance(eq(mockMetaStoreClient), eq(mockListener), any(Long.class), eq(SOURCES), + verify(mockHeartbeatFactory).newInstance(eq(mockMetaStoreClient), eq(mockListener), any(), eq(SOURCES), eq(LOCK_ID), eq(75)); } @@ -321,11 +322,11 @@ public void testHeartbeatFailsTxnAbortedException() throws Exception { @Test public void testHeartbeatContinuesTException() throws Exception { Throwable t = new TException(); - doThrow(t).when(mockMetaStoreClient).heartbeat(0, LOCK_ID); + lenient().doThrow(t).when(mockMetaStoreClient).heartbeat(0, LOCK_ID); HeartbeatTimerTask task = new HeartbeatTimerTask(mockMetaStoreClient, mockListener, TRANSACTION_ID, SOURCES, LOCK_ID); task.run(); - verifyZeroInteractions(mockListener); + verifyNoInteractions(mockListener); } private static Table createTable(String databaseName, String tableName) { diff --git hcatalog/streaming/src/test/org/apache/hive/hcatalog/streaming/mutate/worker/TestMetaStorePartitionHelper.java hcatalog/streaming/src/test/org/apache/hive/hcatalog/streaming/mutate/worker/TestMetaStorePartitionHelper.java index 335ecd2af8..4a7d358698 100644 --- hcatalog/streaming/src/test/org/apache/hive/hcatalog/streaming/mutate/worker/TestMetaStorePartitionHelper.java +++ hcatalog/streaming/src/test/org/apache/hive/hcatalog/streaming/mutate/worker/TestMetaStorePartitionHelper.java @@ -20,7 +20,7 @@ import static org.hamcrest.CoreMatchers.is; import static org.junit.Assert.assertThat; import static org.mockito.Mockito.verify; -import static org.mockito.Mockito.verifyZeroInteractions; +import static org.mockito.Mockito.verifyNoInteractions; import static org.mockito.Mockito.when; import java.io.IOException; @@ -40,7 +40,7 @@ import org.mockito.ArgumentCaptor; import org.mockito.Captor; import org.mockito.Mock; -import org.mockito.runners.MockitoJUnitRunner; +import org.mockito.junit.MockitoJUnitRunner; @RunWith(MockitoJUnitRunner.class) public class TestMetaStorePartitionHelper { @@ -95,7 +95,7 @@ public void injectMocks() throws Exception { public void getPathForUnpartitionedTable() throws Exception { Path path = helper.getPathForPartition(UNPARTITIONED_VALUES); assertThat(path, is(TABLE_PATH)); - verifyZeroInteractions(mockClient); + verifyNoInteractions(mockClient); } @Test @@ -107,7 +107,7 @@ public void getPathForPartitionedTable() throws Exception { @Test public void createOnUnpartitionTableDoesNothing() throws Exception { helper.createPartitionIfNotExists(UNPARTITIONED_VALUES); - verifyZeroInteractions(mockClient); + verifyNoInteractions(mockClient); } @Test diff --git hcatalog/streaming/src/test/org/apache/hive/hcatalog/streaming/mutate/worker/TestMutatorCoordinator.java hcatalog/streaming/src/test/org/apache/hive/hcatalog/streaming/mutate/worker/TestMutatorCoordinator.java index fab56b35b9..297e67c40d 100644 --- hcatalog/streaming/src/test/org/apache/hive/hcatalog/streaming/mutate/worker/TestMutatorCoordinator.java +++ hcatalog/streaming/src/test/org/apache/hive/hcatalog/streaming/mutate/worker/TestMutatorCoordinator.java @@ -17,15 +17,16 @@ */ package org.apache.hive.hcatalog.streaming.mutate.worker; -import static org.mockito.Matchers.any; -import static org.mockito.Matchers.anyInt; -import static org.mockito.Matchers.anyList; -import static org.mockito.Matchers.anyLong; -import static org.mockito.Matchers.eq; +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.ArgumentMatchers.anyInt; +import static org.mockito.ArgumentMatchers.anyList; +import static org.mockito.ArgumentMatchers.anyLong; +import static org.mockito.ArgumentMatchers.eq; +import static org.mockito.Mockito.lenient; import static org.mockito.Mockito.never; import static org.mockito.Mockito.times; import static org.mockito.Mockito.verify; -import static org.mockito.Mockito.verifyZeroInteractions; +import static org.mockito.Mockito.verifyNoMoreInteractions; import static org.mockito.Mockito.when; import java.util.Arrays; @@ -41,7 +42,7 @@ import org.junit.Test; import org.junit.runner.RunWith; import org.mockito.Mock; -import org.mockito.runners.MockitoJUnitRunner; +import org.mockito.junit.MockitoJUnitRunner; @RunWith(MockitoJUnitRunner.class) public class TestMutatorCoordinator { @@ -92,7 +93,7 @@ public void createCoordinator() throws Exception { mockMutator); when(mockPartitionHelper.getPathForPartition(any(List.class))).thenReturn(PATH_A); when(mockRecordInspector.extractRecordIdentifier(RECORD)).thenReturn(ROW__ID_INSERT); - when(mockSequenceValidator.isInSequence(any(RecordIdentifier.class))).thenReturn(true); + lenient().when(mockSequenceValidator.isInSequence(any(RecordIdentifier.class))).thenReturn(true); when(mockGroupingValidator.isInSequence(any(List.class), anyInt())).thenReturn(true); coordinator = new MutatorCoordinator(configuration, mockMutatorFactory, mockPartitionHelper, mockGroupingValidator, @@ -247,7 +248,7 @@ public void closeNoRecords() throws Exception { coordinator.close(); // No mutator created - verifyZeroInteractions(mockMutator); + verifyNoMoreInteractions(mockMutator); } @Test diff --git hcatalog/streaming/src/test/org/apache/hive/hcatalog/streaming/mutate/worker/TestMutatorImpl.java hcatalog/streaming/src/test/org/apache/hive/hcatalog/streaming/mutate/worker/TestMutatorImpl.java index d2c89e53ad..5219532d3e 100644 --- hcatalog/streaming/src/test/org/apache/hive/hcatalog/streaming/mutate/worker/TestMutatorImpl.java +++ hcatalog/streaming/src/test/org/apache/hive/hcatalog/streaming/mutate/worker/TestMutatorImpl.java @@ -19,8 +19,8 @@ import static org.hamcrest.CoreMatchers.is; import static org.junit.Assert.assertThat; -import static org.mockito.Matchers.any; -import static org.mockito.Matchers.eq; +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.ArgumentMatchers.eq; import static org.mockito.Mockito.never; import static org.mockito.Mockito.verify; import static org.mockito.Mockito.when; @@ -40,7 +40,7 @@ import org.mockito.ArgumentCaptor; import org.mockito.Captor; import org.mockito.Mock; -import org.mockito.runners.MockitoJUnitRunner; +import org.mockito.junit.MockitoJUnitRunner; @RunWith(MockitoJUnitRunner.class) public class TestMutatorImpl { diff --git hcatalog/webhcat/svr/src/test/java/org/apache/hive/hcatalog/templeton/ConcurrentJobRequestsTestBase.java hcatalog/webhcat/svr/src/test/java/org/apache/hive/hcatalog/templeton/ConcurrentJobRequestsTestBase.java index 05beccbcce..45ad767cf4 100644 --- hcatalog/webhcat/svr/src/test/java/org/apache/hive/hcatalog/templeton/ConcurrentJobRequestsTestBase.java +++ hcatalog/webhcat/svr/src/test/java/org/apache/hive/hcatalog/templeton/ConcurrentJobRequestsTestBase.java @@ -21,7 +21,6 @@ import java.io.IOException; import java.util.ArrayList; import java.util.List; -import java.util.Map; import java.util.concurrent.atomic.AtomicInteger; import java.util.concurrent.ThreadPoolExecutor; import java.util.concurrent.TimeUnit; @@ -37,6 +36,11 @@ import org.mockito.Mockito; import org.mockito.stubbing.Answer; +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.Mockito.doAnswer; +import static org.mockito.Mockito.doNothing; +import static org.mockito.Mockito.doReturn; + /* * Base class for mocking job operations with concurrent requests. */ @@ -88,8 +92,8 @@ public JobRunnable ConcurrentJobsStatus(final int threadCount, AppConfig appConf StatusDelegator delegator = new StatusDelegator(appConfig); final StatusDelegator mockDelegator = Mockito.spy(delegator); - Mockito.doAnswer(answer).when(mockDelegator).getJobStatus(Mockito.any(String.class), - Mockito.any(String.class)); + doAnswer(answer).when(mockDelegator).getJobStatus(any(String.class), + any(String.class)); JobRunnable statusJobRunnable = new JobRunnable() { @Override @@ -116,9 +120,9 @@ public JobRunnable ConcurrentListJobs(final int threadCount, AppConfig config, ListDelegator delegator = new ListDelegator(config); final ListDelegator mockDelegator = Mockito.spy(delegator); - Mockito.doAnswer(answer).when(mockDelegator).listJobs(Mockito.any(String.class), - Mockito.any(boolean.class), Mockito.any(String.class), - Mockito.any(int.class), Mockito.any(boolean.class)); + doAnswer(answer).when(mockDelegator).listJobs(any(String.class), + any(boolean.class), any(String.class), + any(int.class), any(boolean.class)); JobRunnable listJobRunnable = new JobRunnable() { @Override @@ -149,18 +153,19 @@ public JobRunnable SubmitConcurrentJobs(final int threadCount, AppConfig config, TempletonControllerJob mockCtrl = Mockito.mock(TempletonControllerJob.class); - Mockito.doReturn(jobIdResponse).when(mockCtrl).getSubmittedId(); + doReturn(jobIdResponse).when(mockCtrl).getSubmittedId(); - Mockito.doReturn(mockCtrl).when(mockDelegator).getTempletonController(); + doReturn(mockCtrl).when(mockDelegator).getTempletonController(); - Mockito.doAnswer(responseAnswer).when(mockDelegator).runTempletonControllerJob( - Mockito.any(TempletonControllerJob.class), Mockito.any(List.class)); + doAnswer(responseAnswer).when(mockDelegator).runTempletonControllerJob( + any(TempletonControllerJob.class), any(List.class)); - Mockito.doAnswer(timeoutResponseAnswer).when(mockDelegator).killJob( - Mockito.any(String.class), Mockito.any(String.class)); + doAnswer(timeoutResponseAnswer).when(mockDelegator).killJob( + any(String.class), any(String.class)); - Mockito.doNothing().when(mockDelegator).registerJob(Mockito.any(String.class), - Mockito.any(String.class), Mockito.any(String.class), Mockito.any(Map.class)); + // UserArgs Map can be null - thus use any() + doNothing().when(mockDelegator).registerJob(any(String.class), + any(String.class), any(String.class), any()); JobRunnable submitJobRunnable = new JobRunnable() { @Override diff --git hcatalog/webhcat/svr/src/test/java/org/apache/hive/hcatalog/templeton/TestConcurrentJobRequestsThreads.java hcatalog/webhcat/svr/src/test/java/org/apache/hive/hcatalog/templeton/TestConcurrentJobRequestsThreads.java index 12a211af3f..4865d1f0d0 100644 --- hcatalog/webhcat/svr/src/test/java/org/apache/hive/hcatalog/templeton/TestConcurrentJobRequestsThreads.java +++ hcatalog/webhcat/svr/src/test/java/org/apache/hive/hcatalog/templeton/TestConcurrentJobRequestsThreads.java @@ -18,10 +18,7 @@ */ package org.apache.hive.hcatalog.templeton; -import java.io.IOException; import java.util.ArrayList; -import java.util.concurrent.TimeoutException; -import org.eclipse.jetty.http.HttpStatus; import org.junit.BeforeClass; import org.junit.Rule; diff --git hcatalog/webhcat/svr/src/test/java/org/apache/hive/hcatalog/templeton/TestConcurrentJobRequestsThreadsAndTimeout.java hcatalog/webhcat/svr/src/test/java/org/apache/hive/hcatalog/templeton/TestConcurrentJobRequestsThreadsAndTimeout.java index 19d87b6126..d351a98a4c 100644 --- hcatalog/webhcat/svr/src/test/java/org/apache/hive/hcatalog/templeton/TestConcurrentJobRequestsThreadsAndTimeout.java +++ hcatalog/webhcat/svr/src/test/java/org/apache/hive/hcatalog/templeton/TestConcurrentJobRequestsThreadsAndTimeout.java @@ -21,7 +21,6 @@ import java.io.IOException; import java.util.ArrayList; import java.util.concurrent.TimeoutException; -import org.eclipse.jetty.http.HttpStatus; import org.junit.BeforeClass; import org.junit.Rule; diff --git itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/parse/TestScheduledReplicationScenarios.java itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/parse/TestScheduledReplicationScenarios.java index 4a64927bad..692d40d289 100644 --- itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/parse/TestScheduledReplicationScenarios.java +++ itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/parse/TestScheduledReplicationScenarios.java @@ -104,12 +104,12 @@ public void testAcidTablesReplLoadBootstrapIncr() throws Throwable { ScheduledQueryExecutionService.startScheduledQueryExecutorService(primary.hiveConf)) { int next = 0; ReplDumpWork.injectNextDumpDirForTest(String.valueOf(next)); - primary.run("create scheduled query s1_t1 every 10 minutes as repl dump " + primaryDbName); - primary.run("alter scheduled query s1_t1 execute"); + primary.run("create scheduled query s1 every 10 minutes as repl dump " + primaryDbName); + primary.run("alter scheduled query s1 execute"); Thread.sleep(6000); - replica.run("create scheduled query s2_t1 every 10 minutes as repl load " + primaryDbName + " INTO " + replica.run("create scheduled query s2 every 10 minutes as repl load " + primaryDbName + " INTO " + replicatedDbName); - replica.run("alter scheduled query s2_t1 execute"); + replica.run("alter scheduled query s2 execute"); Thread.sleep(20000); replica.run("use " + replicatedDbName) .run("show tables like 't1'") @@ -123,9 +123,9 @@ public void testAcidTablesReplLoadBootstrapIncr() throws Throwable { .run("insert into t1 values(4)"); next++; ReplDumpWork.injectNextDumpDirForTest(String.valueOf(next)); - primary.run("alter scheduled query s1_t1 execute"); + primary.run("alter scheduled query s1 execute"); Thread.sleep(20000); - replica.run("alter scheduled query s2_t1 execute"); + replica.run("alter scheduled query s2 execute"); Thread.sleep(20000); replica.run("use " + replicatedDbName) .run("show tables like 't1'") @@ -139,9 +139,9 @@ public void testAcidTablesReplLoadBootstrapIncr() throws Throwable { .run("insert into t1 values(6)"); next++; ReplDumpWork.injectNextDumpDirForTest(String.valueOf(next)); - primary.run("alter scheduled query s1_t1 execute"); + primary.run("alter scheduled query s1 execute"); Thread.sleep(30000); - replica.run("alter scheduled query s2_t1 execute"); + replica.run("alter scheduled query s2 execute"); Thread.sleep(30000); replica.run("use " + replicatedDbName) .run("show tables like 't1'") @@ -152,8 +152,8 @@ public void testAcidTablesReplLoadBootstrapIncr() throws Throwable { } finally { - primary.run("drop scheduled query s1_t1"); - replica.run("drop scheduled query s2_t1"); + primary.run("drop scheduled query s1"); + replica.run("drop scheduled query s2"); } } @@ -163,46 +163,46 @@ public void testExternalTablesReplLoadBootstrapIncr() throws Throwable { String withClause = " WITH('" + HiveConf.ConfVars.REPL_EXTERNAL_TABLE_BASE_DIR.varname + "'='/replica_external_base')"; primary.run("use " + primaryDbName) - .run("create external table t2 (id int)") - .run("insert into t2 values(1)") - .run("insert into t2 values(2)"); + .run("create external table t1 (id int)") + .run("insert into t1 values(1)") + .run("insert into t1 values(2)"); try (ScheduledQueryExecutionService schqS = ScheduledQueryExecutionService.startScheduledQueryExecutorService(primary.hiveConf)) { int next = 0; ReplDumpWork.injectNextDumpDirForTest(String.valueOf(next)); - primary.run("create scheduled query s1_t2 every 10 minutes as repl dump " + primaryDbName + withClause); - primary.run("alter scheduled query s1_t2 execute"); + primary.run("create scheduled query s1 every 10 minutes as repl dump " + primaryDbName + withClause); + primary.run("alter scheduled query s1 execute"); Thread.sleep(80000); - replica.run("create scheduled query s2_t2 every 10 minutes as repl load " + primaryDbName + " INTO " + replica.run("create scheduled query s2 every 10 minutes as repl load " + primaryDbName + " INTO " + replicatedDbName); - replica.run("alter scheduled query s2_t2 execute"); + replica.run("alter scheduled query s2 execute"); Thread.sleep(80000); replica.run("use " + replicatedDbName) - .run("show tables like 't2'") - .verifyResult("t2") - .run("select id from t2 order by id") + .run("show tables like 't1'") + .verifyResult("t1") + .run("select id from t1 order by id") .verifyResults(new String[]{"1", "2"}); // First incremental, after bootstrap primary.run("use " + primaryDbName) - .run("insert into t2 values(3)") - .run("insert into t2 values(4)"); + .run("insert into t1 values(3)") + .run("insert into t1 values(4)"); next++; ReplDumpWork.injectNextDumpDirForTest(String.valueOf(next)); - primary.run("alter scheduled query s1_t2 execute"); + primary.run("alter scheduled query s1 execute"); Thread.sleep(80000); - replica.run("alter scheduled query s2_t2 execute"); + replica.run("alter scheduled query s2 execute"); Thread.sleep(80000); replica.run("use " + replicatedDbName) - .run("show tables like 't2'") - .verifyResult("t2") - .run("select id from t2 order by id") + .run("show tables like 't1'") + .verifyResult("t1") + .run("select id from t1 order by id") .verifyResults(new String[]{"1", "2", "3", "4"}); } finally { - primary.run("drop scheduled query s1_t2"); - replica.run("drop scheduled query s2_t2"); + primary.run("drop scheduled query s1"); + replica.run("drop scheduled query s2"); } } } diff --git itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/security/authorization/plugin/TestHiveAuthorizerCheckInvocation.java itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/security/authorization/plugin/TestHiveAuthorizerCheckInvocation.java index 37bb6aded3..0db6eb74e8 100644 --- itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/security/authorization/plugin/TestHiveAuthorizerCheckInvocation.java +++ itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/security/authorization/plugin/TestHiveAuthorizerCheckInvocation.java @@ -23,7 +23,7 @@ import static org.junit.Assert.assertNull; import static org.junit.Assert.assertTrue; import static org.junit.Assert.fail; -import static org.mockito.Matchers.any; +import static org.mockito.ArgumentMatchers.any; import static org.mockito.Mockito.reset; import static org.mockito.Mockito.verify; import static org.mockito.Mockito.when; diff --git itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/security/authorization/plugin/TestHiveAuthorizerShowFilters.java itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/security/authorization/plugin/TestHiveAuthorizerShowFilters.java index cf120ea1f2..36ac85b730 100644 --- itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/security/authorization/plugin/TestHiveAuthorizerShowFilters.java +++ itests/hive-unit/src/test/java/org/apache/hadoop/hive/ql/security/authorization/plugin/TestHiveAuthorizerShowFilters.java @@ -19,7 +19,7 @@ package org.apache.hadoop.hive.ql.security.authorization.plugin; import static org.junit.Assert.assertEquals; -import static org.mockito.Matchers.any; +import static org.mockito.ArgumentMatchers.any; import java.util.ArrayList; import java.util.Arrays; @@ -71,8 +71,11 @@ * HiveAuthorizer.filterListCmdObjects, and stores the list argument in * filterArguments */ - protected static class MockedHiveAuthorizerFactory implements HiveAuthorizerFactory { - protected abstract class AuthorizerWithFilterCmdImpl implements HiveAuthorizer { + public static class MockedHiveAuthorizerFactory implements HiveAuthorizerFactory { + /** + * Abstracts HiveAuthorizer interface for hive authorization plugins + */ + public abstract class AuthorizerWithFilterCmdImpl implements HiveAuthorizer { @Override public List filterListCmdObjects(List listObjs, HiveAuthzContext context) throws HiveAuthzPluginException, HiveAccessControlException { diff --git itests/hive-unit/src/test/java/org/apache/hive/jdbc/BaseJdbcWithMiniLlap.java itests/hive-unit/src/test/java/org/apache/hive/jdbc/BaseJdbcWithMiniLlap.java index 45b22f9514..3973ec9270 100644 --- itests/hive-unit/src/test/java/org/apache/hive/jdbc/BaseJdbcWithMiniLlap.java +++ itests/hive-unit/src/test/java/org/apache/hive/jdbc/BaseJdbcWithMiniLlap.java @@ -40,7 +40,6 @@ import java.util.UUID; import org.apache.hadoop.fs.Path; -import org.apache.hadoop.hive.ql.ddl.process.kill.KillQueriesOperation; import org.apache.hadoop.mapred.InputSplit; import org.apache.hadoop.mapred.JobConf; import org.apache.hadoop.mapred.RecordReader; @@ -785,8 +784,6 @@ public void run() { con2.close(); assertNotNull("tExecute", tExecuteHolder.throwable); - assertEquals(HiveStatement.QUERY_CANCELLED_MESSAGE + " "+ KillQueriesOperation.KILL_QUERY_MESSAGE, - tExecuteHolder.throwable.getMessage()); assertNull("tCancel", tKillHolder.throwable); } diff --git itests/hive-unit/src/test/java/org/apache/hive/jdbc/TestJdbcWithMiniLlapArrow.java itests/hive-unit/src/test/java/org/apache/hive/jdbc/TestJdbcWithMiniLlapArrow.java index 1aab03d08f..68a515ccbe 100644 --- itests/hive-unit/src/test/java/org/apache/hive/jdbc/TestJdbcWithMiniLlapArrow.java +++ itests/hive-unit/src/test/java/org/apache/hive/jdbc/TestJdbcWithMiniLlapArrow.java @@ -31,7 +31,6 @@ import org.apache.hadoop.hive.llap.FieldDesc; import org.apache.hadoop.hive.llap.LlapBaseInputFormat; import org.apache.hadoop.hive.llap.Row; -import org.apache.hadoop.hive.ql.ddl.process.kill.KillQueriesOperation; import org.apache.hadoop.io.NullWritable; import org.apache.hadoop.mapred.InputSplit; import org.apache.hadoop.mapred.JobConf; @@ -413,8 +412,6 @@ public void testKillQueryById() throws Exception { testKillQueryInternal(System.getProperty("user.name"), System.getProperty("user.name"), false, tExecuteHolder, tKillHolder); assertNotNull("tExecute", tExecuteHolder.throwable); - assertEquals(HiveStatement.QUERY_CANCELLED_MESSAGE + " "+ KillQueriesOperation.KILL_QUERY_MESSAGE, - tExecuteHolder.throwable.getMessage()); assertNull("tCancel", tKillHolder.throwable); } @@ -434,8 +431,6 @@ public void testKillQueryByTagAdmin() throws Exception { ExceptionHolder tKillHolder = new ExceptionHolder(); testKillQueryInternal("user1", System.getProperty("user.name"), true, tExecuteHolder, tKillHolder); assertNotNull("tExecute", tExecuteHolder.throwable); - assertEquals(HiveStatement.QUERY_CANCELLED_MESSAGE + " "+ KillQueriesOperation.KILL_QUERY_MESSAGE, - tExecuteHolder.throwable.getMessage()); assertNull("tCancel", tKillHolder.throwable); } @@ -445,8 +440,6 @@ public void testKillQueryByTagOwner() throws Exception { ExceptionHolder tKillHolder = new ExceptionHolder(); testKillQueryInternal("user1", "user1", true, tExecuteHolder, tKillHolder); assertNotNull("tExecute", tExecuteHolder.throwable); - assertEquals(HiveStatement.QUERY_CANCELLED_MESSAGE + " "+ KillQueriesOperation.KILL_QUERY_MESSAGE, - tExecuteHolder.throwable.getMessage()); assertNull("tCancel", tKillHolder.throwable); } diff --git itests/hive-unit/src/test/java/org/apache/hive/jdbc/TestJdbcWithServiceDiscovery.java itests/hive-unit/src/test/java/org/apache/hive/jdbc/TestJdbcWithServiceDiscovery.java deleted file mode 100644 index 1621e7e52c..0000000000 --- itests/hive-unit/src/test/java/org/apache/hive/jdbc/TestJdbcWithServiceDiscovery.java +++ /dev/null @@ -1,326 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hive.jdbc; - -import org.apache.curator.test.TestingServer; -import org.apache.hadoop.fs.Path; -import org.apache.hadoop.hive.conf.HiveConf; -import org.apache.hadoop.hive.ql.ddl.process.kill.KillQueriesOperation; -import org.apache.hadoop.hive.ql.exec.UDF; -import org.apache.hadoop.hive.registry.impl.ZkRegistryBase; -import org.apache.hive.jdbc.miniHS2.MiniHS2; -import org.junit.AfterClass; -import org.junit.BeforeClass; -import org.junit.Test; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import java.io.File; -import java.net.URL; -import java.sql.Connection; -import java.sql.DriverManager; -import java.sql.SQLException; -import java.sql.Statement; -import java.util.HashMap; -import java.util.Map; -import java.util.UUID; -import java.util.concurrent.TimeUnit; - -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertNotNull; -import static org.junit.Assert.assertNull; -import static org.junit.Assert.assertTrue; - -/** - * Test JDBC driver when two HS2 instance is running with service discovery enabled. - */ -public class TestJdbcWithServiceDiscovery { - - private static final Logger LOG = LoggerFactory.getLogger(TestJdbcWithServiceDiscovery.class); - private static final String TABLE_NAME = "testJdbcMinihs2Tbl"; - private static final String DB_NAME = "testJdbcMinihs2"; - private static final String REMOTE_ERROR_MESSAGE = "Unable to kill query locally or on remote servers."; - - private static TestingServer zkServer; - private static MiniHS2 miniHS2server1; - private static MiniHS2 miniHS2server2; - private static String miniHS2directUrl1; - private static String miniHS2directUrl2; - private static Path kvDataFilePath; - - @BeforeClass - public static void setup() throws Exception { - MiniHS2.cleanupLocalDir(); - zkServer = new TestingServer(); - - // Create one MiniHS2 with Tez and one with Local FS only - HiveConf hiveConf1 = getTezConf(); - HiveConf hiveConf2 = new HiveConf(); - - setSDConfigs(hiveConf1); - setSDConfigs(hiveConf2); - - miniHS2server1 = new MiniHS2.Builder().withConf(hiveConf1).withMiniTez().build(); - miniHS2server2 = new MiniHS2.Builder().withConf(hiveConf2).cleanupLocalDirOnStartup(false).build(); - - Class.forName(MiniHS2.getJdbcDriverName()); - String instanceId1 = UUID.randomUUID().toString(); - miniHS2server1.start(getConfOverlay(instanceId1)); - miniHS2directUrl1 = - "jdbc:hive2://" + miniHS2server1.getHost() + ":" + miniHS2server1.getBinaryPort() + "/" + DB_NAME; - String instanceId2 = UUID.randomUUID().toString(); - miniHS2server2.start(getConfOverlay(instanceId2)); - miniHS2directUrl2 = - "jdbc:hive2://" + miniHS2server2.getHost() + ":" + miniHS2server2.getBinaryPort() + "/" + DB_NAME; - - String dataFileDir = hiveConf1.get("test.data.files").replace('\\', '/').replace("c:", ""); - kvDataFilePath = new Path(dataFileDir, "kv1.txt"); - - setupDb(); - } - - /** - * SleepMsUDF. - */ - public static class SleepMsUDF extends UDF { - public Integer evaluate(int value, int ms) { - try { - Thread.sleep(ms); - } catch (InterruptedException e) { - // No-op - } - return value; - } - } - - public static void setupDb() throws Exception { - Connection conDefault = DriverManager - .getConnection("jdbc:hive2://" + miniHS2server1.getHost() + ":" + miniHS2server1.getBinaryPort() + "/default", - System.getProperty("user.name"), "bar"); - Statement stmt = conDefault.createStatement(); - String tblName = DB_NAME + "." + TABLE_NAME; - stmt.execute("drop database if exists " + DB_NAME + " cascade"); - stmt.execute("create database " + DB_NAME); - stmt.execute("use " + DB_NAME); - stmt.execute("create table " + tblName + " (int_col int, value string) "); - stmt.execute("load data local inpath '" + kvDataFilePath.toString() + "' into table " + tblName); - stmt.execute("grant select on table " + tblName + " to role public"); - - stmt.close(); - conDefault.close(); - } - - @AfterClass - public static void afterTest() throws Exception { - if ((miniHS2server1 != null) && miniHS2server1.isStarted()) { - try { - miniHS2server1.stop(); - } catch (Exception e) { - LOG.warn("Error why shutting down Hs2", e); - } - } - if ((miniHS2server2 != null) && miniHS2server2.isStarted()) { - try { - miniHS2server2.stop(); - } catch (Exception e) { - LOG.warn("Error why shutting down Hs2", e); - } - } - if (zkServer != null) { - zkServer.close(); - zkServer = null; - } - MiniHS2.cleanupLocalDir(); - } - - private static HiveConf getTezConf() throws Exception { - String confDir = "../../data/conf/tez/"; - HiveConf.setHiveSiteLocation(new URL("file://" + new File(confDir).toURI().getPath() + "/hive-site.xml")); - System.out.println("Setting hive-site: " + HiveConf.getHiveSiteLocation()); - HiveConf defaultConf = new HiveConf(); - defaultConf.setBoolVar(HiveConf.ConfVars.HIVE_SUPPORT_CONCURRENCY, false); - defaultConf.setBoolVar(HiveConf.ConfVars.HIVE_SERVER2_ENABLE_DOAS, false); - defaultConf.addResource(new URL("file://" + new File(confDir).toURI().getPath() + "/tez-site.xml")); - return defaultConf; - } - - private static void setSDConfigs(HiveConf conf) { - conf.setBoolVar(HiveConf.ConfVars.HIVE_SUPPORT_CONCURRENCY, false); - conf.setBoolVar(HiveConf.ConfVars.HIVE_SERVER2_SUPPORT_DYNAMIC_SERVICE_DISCOVERY, true); - conf.setVar(HiveConf.ConfVars.HIVE_ZOOKEEPER_QUORUM, zkServer.getConnectString()); - conf.setBoolVar(HiveConf.ConfVars.HIVE_SERVER2_ACTIVE_PASSIVE_HA_ENABLE, false); - conf.setTimeVar(HiveConf.ConfVars.HIVE_ZOOKEEPER_CONNECTION_TIMEOUT, 2, TimeUnit.SECONDS); - conf.setTimeVar(HiveConf.ConfVars.HIVE_ZOOKEEPER_CONNECTION_BASESLEEPTIME, 100, TimeUnit.MILLISECONDS); - conf.setIntVar(HiveConf.ConfVars.HIVE_ZOOKEEPER_CONNECTION_MAX_RETRIES, 1); - conf.setBoolVar(HiveConf.ConfVars.HIVE_ZOOKEEPER_KILLQUERY_ENABLE, true); - } - - private static Map getConfOverlay(final String instanceId) { - Map confOverlay = new HashMap<>(); - confOverlay.put("hive.server2.zookeeper.publish.configs", "true"); - confOverlay.put(ZkRegistryBase.UNIQUE_IDENTIFIER, instanceId); - return confOverlay; - } - - private static class ExceptionHolder { - Throwable throwable; - } - - private void executeQueryAndKill(Connection con1, Connection con2, ExceptionHolder tExecuteHolder, - ExceptionHolder tKillHolder) throws SQLException, InterruptedException { - final HiveStatement stmt = (HiveStatement) con1.createStatement(); - final Statement stmt2 = con2.createStatement(); - final StringBuffer stmtQueryId = new StringBuffer(); - - // Thread executing the query - Thread tExecute = new Thread(() -> { - try { - LOG.info("Executing waiting query."); - // The test table has 500 rows, so total query time should be ~ 500*500ms - stmt.executeAsync( - "select sleepMsUDF(t1.int_col, 10), t1.int_col, t2.int_col " + "from " + TABLE_NAME + " t1 join " - + TABLE_NAME + " t2 on t1.int_col = t2.int_col"); - stmtQueryId.append(stmt.getQueryId()); - stmt.getUpdateCount(); - } catch (SQLException e) { - tExecuteHolder.throwable = e; - } - }); - - tExecute.start(); - - // wait for other thread to create the stmt handle - int count = 0; - while (count < 10) { - try { - Thread.sleep(2000); - String queryId; - if (stmtQueryId.length() != 0) { - queryId = stmtQueryId.toString(); - } else { - count++; - continue; - } - - LOG.info("Killing query: " + queryId); - stmt2.execute("kill query '" + queryId + "'"); - stmt2.close(); - break; - } catch (SQLException e) { - LOG.warn("Exception when kill query", e); - tKillHolder.throwable = e; - break; - } - } - - tExecute.join(); - try { - stmt.close(); - con1.close(); - con2.close(); - } catch (Exception e) { - LOG.warn("Exception when close stmt and con", e); - } - } - - @Test - public void testKillQueryWithSameServer() throws Exception { - Connection con1 = DriverManager.getConnection(miniHS2directUrl1, System.getProperty("user.name"), "bar"); - Connection con2 = DriverManager.getConnection(miniHS2directUrl1, System.getProperty("user.name"), "bar"); - - Statement stmt = con1.createStatement(); - stmt.execute("create temporary function sleepMsUDF as '" + SleepMsUDF.class.getName() + "'"); - stmt.close(); - - ExceptionHolder tExecuteHolder = new ExceptionHolder(); - ExceptionHolder tKillHolder = new ExceptionHolder(); - - executeQueryAndKill(con1, con2, tExecuteHolder, tKillHolder); - - assertNotNull("tExecute", tExecuteHolder.throwable); - assertEquals("Query was cancelled. User invoked KILL QUERY", tExecuteHolder.throwable.getMessage()); - assertNull("tCancel", tKillHolder.throwable); - } - - @Test - public void testKillQueryWithDifferentServer() throws Exception { - Connection con1 = DriverManager.getConnection(miniHS2directUrl1, System.getProperty("user.name"), "bar"); - Connection con2 = DriverManager.getConnection(miniHS2directUrl2, System.getProperty("user.name"), "bar"); - - Statement stmt = con1.createStatement(); - stmt.execute("create temporary function sleepMsUDF as '" + SleepMsUDF.class.getName() + "'"); - stmt.close(); - - ExceptionHolder tExecuteHolder = new ExceptionHolder(); - ExceptionHolder tKillHolder = new ExceptionHolder(); - - executeQueryAndKill(con1, con2, tExecuteHolder, tKillHolder); - - assertNotNull("tExecute", tExecuteHolder.throwable); - assertEquals(HiveStatement.QUERY_CANCELLED_MESSAGE + " " + KillQueriesOperation.KILL_QUERY_MESSAGE, - tExecuteHolder.throwable.getMessage()); - assertNull("tCancel", tKillHolder.throwable); - } - - @Test - public void testKillQueryWithDifferentServerZKTurnedOff() throws Exception { - Connection con1 = DriverManager.getConnection(miniHS2directUrl1, System.getProperty("user.name"), "bar"); - Connection con2 = DriverManager.getConnection(miniHS2directUrl2, System.getProperty("user.name"), "bar"); - - Statement stmt = con1.createStatement(); - stmt.execute("create temporary function sleepMsUDF as '" + SleepMsUDF.class.getName() + "'"); - stmt.close(); - - stmt = con2.createStatement(); - stmt.execute("set hive.zookeeper.killquery.enable = false"); - stmt.close(); - - ExceptionHolder tExecuteHolder = new ExceptionHolder(); - ExceptionHolder tKillHolder = new ExceptionHolder(); - - executeQueryAndKill(con1, con2, tExecuteHolder, tKillHolder); - - assertNull("tExecute", tExecuteHolder.throwable); - assertNull("tCancel", tKillHolder.throwable); - } - - @Test - public void testKillQueryWithRandomId() throws Exception { - Connection con1 = DriverManager.getConnection(miniHS2directUrl1, System.getProperty("user.name"), "bar"); - ExceptionHolder tKillHolder = new ExceptionHolder(); - - Statement stmt = con1.createStatement(); - String queryId = "randomId123"; - try { - LOG.info("Killing query: " + queryId); - stmt.execute("kill query '" + queryId + "'"); - stmt.close(); - } catch (SQLException e) { - LOG.warn("Exception when kill query", e); - tKillHolder.throwable = e; - } - try { - con1.close(); - } catch (Exception e) { - LOG.warn("Exception when close stmt and con", e); - } - - assertNotNull("tCancel", tKillHolder.throwable); - assertTrue(tKillHolder.throwable.getMessage(), tKillHolder.throwable.getMessage().contains(REMOTE_ERROR_MESSAGE)); - } -} diff --git itests/hive-unit/src/test/java/org/apache/hive/jdbc/authorization/TestHS2AuthzContext.java itests/hive-unit/src/test/java/org/apache/hive/jdbc/authorization/TestHS2AuthzContext.java index 4b1a101124..977fe43320 100644 --- itests/hive-unit/src/test/java/org/apache/hive/jdbc/authorization/TestHS2AuthzContext.java +++ itests/hive-unit/src/test/java/org/apache/hive/jdbc/authorization/TestHS2AuthzContext.java @@ -18,37 +18,33 @@ package org.apache.hive.jdbc.authorization; -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertTrue; -import static org.mockito.Matchers.any; -import static org.mockito.Mockito.verify; - -import java.sql.Connection; -import java.sql.DriverManager; -import java.sql.SQLException; -import java.sql.Statement; -import java.util.HashMap; - import org.apache.hadoop.hive.conf.HiveConf; import org.apache.hadoop.hive.conf.HiveConf.ConfVars; import org.apache.hadoop.hive.ql.security.HiveAuthenticationProvider; import org.apache.hadoop.hive.ql.security.SessionStateUserAuthenticator; -import org.apache.hadoop.hive.ql.security.authorization.plugin.HiveAccessControlException; import org.apache.hadoop.hive.ql.security.authorization.plugin.HiveAuthorizer; import org.apache.hadoop.hive.ql.security.authorization.plugin.HiveAuthorizerFactory; -import org.apache.hadoop.hive.ql.security.authorization.plugin.HiveAuthzPluginException; +import org.apache.hadoop.hive.ql.security.authorization.plugin.HiveAuthzContext; import org.apache.hadoop.hive.ql.security.authorization.plugin.HiveAuthzSessionContext; import org.apache.hadoop.hive.ql.security.authorization.plugin.HiveMetastoreClientFactory; import org.apache.hadoop.hive.ql.security.authorization.plugin.HiveOperationType; -import org.apache.hadoop.hive.ql.security.authorization.plugin.HivePrivilegeObject; -import org.apache.hadoop.hive.ql.security.authorization.plugin.HiveAuthzContext; import org.apache.hive.jdbc.miniHS2.MiniHS2; import org.junit.AfterClass; import org.junit.BeforeClass; import org.junit.Test; import org.mockito.ArgumentCaptor; -import org.mockito.Matchers; import org.mockito.Mockito; + +import java.sql.Connection; +import java.sql.DriverManager; +import java.sql.Statement; +import java.util.HashMap; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertTrue; +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.ArgumentMatchers.anyList; +import static org.mockito.Mockito.verify; /** * Test context information that gets passed to authorization api */ @@ -103,8 +99,7 @@ public void testAuthzContextContentsCmdProcessorCmd() throws Exception { verifyContextContents("dfs -ls /", "-ls /"); } - private void verifyContextContents(final String cmd, String ctxCmd) throws Exception, - HiveAuthzPluginException, HiveAccessControlException { + private void verifyContextContents(final String cmd, String ctxCmd) throws Exception { Connection hs2Conn = getConnection("user1"); Statement stmt = hs2Conn.createStatement(); @@ -116,8 +111,7 @@ private void verifyContextContents(final String cmd, String ctxCmd) throws Excep .forClass(HiveAuthzContext.class); verify(mockedAuthorizer).checkPrivileges(any(HiveOperationType.class), - Matchers.anyListOf(HivePrivilegeObject.class), - Matchers.anyListOf(HivePrivilegeObject.class), contextCapturer.capture()); + anyList(), any(), contextCapturer.capture()); HiveAuthzContext context = contextCapturer.getValue(); diff --git itests/hive-unit/src/test/java/org/apache/hive/service/cli/session/TestHiveSessionImpl.java itests/hive-unit/src/test/java/org/apache/hive/service/cli/session/TestHiveSessionImpl.java index 1ee3a501eb..a5784d4188 100644 --- itests/hive-unit/src/test/java/org/apache/hive/service/cli/session/TestHiveSessionImpl.java +++ itests/hive-unit/src/test/java/org/apache/hive/service/cli/session/TestHiveSessionImpl.java @@ -18,6 +18,9 @@ package org.apache.hive.service.cli.session; +import static org.mockito.ArgumentMatchers.eq; +import static org.mockito.ArgumentMatchers.same; + import org.apache.hadoop.hive.conf.HiveConf; import org.apache.hive.service.cli.HiveSQLException; import org.apache.hive.service.cli.OperationHandle; @@ -27,7 +30,6 @@ import org.junit.Assert; import org.junit.Test; import org.mockito.Mockito; -import static org.mockito.Matchers.*; import java.util.Arrays; import java.util.HashMap; diff --git itests/hive-unit/src/test/java/org/apache/hive/service/cli/thrift/TestMiniHS2StateWithNoZookeeper.java itests/hive-unit/src/test/java/org/apache/hive/service/cli/thrift/TestMiniHS2StateWithNoZookeeper.java index 0df3058359..99e681e5b2 100644 --- itests/hive-unit/src/test/java/org/apache/hive/service/cli/thrift/TestMiniHS2StateWithNoZookeeper.java +++ itests/hive-unit/src/test/java/org/apache/hive/service/cli/thrift/TestMiniHS2StateWithNoZookeeper.java @@ -50,14 +50,11 @@ private static HiveConf hiveConf = null; @BeforeClass - public static void beforeTest() throws Exception { - MiniHS2.cleanupLocalDir(); + public static void beforeTest() throws Exception { hiveConf = new HiveConf(); hiveConf.setBoolVar(ConfVars.HIVE_SERVER2_SUPPORT_DYNAMIC_SERVICE_DISCOVERY, true); hiveConf.setIntVar(ConfVars.HIVE_ZOOKEEPER_CONNECTION_MAX_RETRIES, 0); hiveConf.setTimeVar(ConfVars.HIVE_ZOOKEEPER_CONNECTION_BASESLEEPTIME, 0, TimeUnit.MILLISECONDS); - // Disable killquery, this way only HS2 start will fail, not the SessionManager service - hiveConf.setBoolVar(ConfVars.HIVE_ZOOKEEPER_KILLQUERY_ENABLE, false); miniHS2 = new MiniHS2(hiveConf); Map confOverlay = new HashMap(); try { diff --git itests/hive-unit/src/test/java/org/apache/hive/service/cli/thrift/TestThriftHttpCLIServiceFeatures.java itests/hive-unit/src/test/java/org/apache/hive/service/cli/thrift/TestThriftHttpCLIServiceFeatures.java index 901286775d..c073ace081 100644 --- itests/hive-unit/src/test/java/org/apache/hive/service/cli/thrift/TestThriftHttpCLIServiceFeatures.java +++ itests/hive-unit/src/test/java/org/apache/hive/service/cli/thrift/TestThriftHttpCLIServiceFeatures.java @@ -20,7 +20,7 @@ import static org.junit.Assert.assertNotNull; import static org.junit.Assert.assertTrue; -import static org.mockito.Matchers.any; +import static org.mockito.ArgumentMatchers.any; import static org.mockito.Mockito.verify; import java.io.IOException; @@ -45,7 +45,6 @@ import org.apache.hadoop.hive.ql.security.authorization.plugin.HiveAuthzContext; import org.apache.hive.jdbc.HttpBasicAuthInterceptor; import org.apache.hive.service.auth.HiveAuthConstants; -import org.apache.hive.service.auth.HiveAuthConstants.AuthTypes; import org.apache.hive.service.rpc.thrift.TCLIService; import org.apache.hive.service.rpc.thrift.TExecuteStatementReq; import org.apache.hive.service.rpc.thrift.TOpenSessionReq; diff --git itests/hive-unit/src/test/java/org/apache/hive/service/server/TestKillQueryZookeeperManager.java itests/hive-unit/src/test/java/org/apache/hive/service/server/TestKillQueryZookeeperManager.java deleted file mode 100644 index d9997a9c49..0000000000 --- itests/hive-unit/src/test/java/org/apache/hive/service/server/TestKillQueryZookeeperManager.java +++ /dev/null @@ -1,211 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hive.service.server; - -import org.apache.curator.framework.CuratorFramework; -import org.apache.curator.framework.CuratorFrameworkFactory; -import org.apache.curator.retry.RetryOneTime; -import org.apache.curator.test.TestingServer; -import org.apache.curator.utils.CloseableUtils; -import org.apache.zookeeper.KeeperException; -import org.junit.After; -import org.junit.Assert; -import org.junit.Before; -import org.junit.Test; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import java.util.concurrent.ExecutorService; -import java.util.concurrent.Executors; -import java.util.concurrent.Future; -import java.util.concurrent.TimeUnit; - -/** - * Tests for {@link KillQueryZookeeperManager}. - */ -public class TestKillQueryZookeeperManager { - - private static final Logger LOG = LoggerFactory.getLogger(TestKillQueryZookeeperManager.class); - private static final String BARRIER_ROOT_PATH = "/killqueries"; - private static final String QUERYID = "QUERY1"; - private static final String SERVER1 = "localhost:1234"; - private static final String SERVER2 = "localhost:1235"; - private static final String USER = "user"; - private static final int TIMEOUT = 1000; - - TestingServer server; - - @Before - public void setupZookeeper() throws Exception { - server = new TestingServer(); - } - - @After - public void shutdown() { - if (server != null) { - CloseableUtils.closeQuietly(server); - } - } - - private CuratorFramework getClient() { - return CuratorFrameworkFactory.builder().connectString(server.getConnectString()).sessionTimeoutMs(TIMEOUT * 100) - .connectionTimeoutMs(TIMEOUT).retryPolicy(new RetryOneTime(1)).build(); - } - - @Test - public void testBarrierServerCrash() throws Exception { - try (CuratorFramework client = getClient()) { - client.start(); - client.create().creatingParentContainersIfNeeded().forPath(BARRIER_ROOT_PATH); - final KillQueryZookeeperManager.KillQueryZookeeperBarrier barrier = - new KillQueryZookeeperManager.KillQueryZookeeperBarrier(client, BARRIER_ROOT_PATH); - barrier.setBarrier(QUERYID, SERVER1, USER, true); - - final ExecutorService service = Executors.newSingleThreadExecutor(); - Future future = service.submit(() -> { - Thread.sleep(TIMEOUT / 2); - server.stop(); - return null; - }); - - barrier.waitOnBarrier(1, TIMEOUT, TIMEOUT * 2, TimeUnit.MILLISECONDS); - future.get(); - Assert.fail(); - } catch (KeeperException.ConnectionLossException expected) { - // expected - } - } - - @Test - public void testNoBarrier() throws Exception { - try (CuratorFramework client = getClient()) { - client.start(); - client.create().creatingParentContainersIfNeeded().forPath(BARRIER_ROOT_PATH); - IllegalStateException result = null; - final KillQueryZookeeperManager.KillQueryZookeeperBarrier barrier = - new KillQueryZookeeperManager.KillQueryZookeeperBarrier(client, BARRIER_ROOT_PATH); - try { - barrier.confirmProgress(SERVER1); - } catch (IllegalStateException e) { - result = e; - } - Assert.assertNotNull(result); - Assert.assertEquals("Barrier is not initialised", result.getMessage()); - } - } - - @Test - public void testNo() throws Exception { - try (CuratorFramework client = getClient()) { - client.start(); - client.create().creatingParentContainersIfNeeded().forPath(BARRIER_ROOT_PATH); - final KillQueryZookeeperManager.KillQueryZookeeperBarrier barrier = - new KillQueryZookeeperManager.KillQueryZookeeperBarrier(client, BARRIER_ROOT_PATH); - barrier.setBarrier(QUERYID, SERVER1, USER, true); - - ExecutorService service = Executors.newSingleThreadExecutor(); - service.submit(() -> { - Thread.sleep(TIMEOUT / 2); - barrier.confirmNo(SERVER2); - return null; - }); - - Assert.assertFalse(barrier.waitOnBarrier(1, TIMEOUT, TIMEOUT * 2, TimeUnit.MILLISECONDS)); - } - } - - @Test - public void testDone() throws Exception { - try (CuratorFramework client = getClient()) { - client.start(); - client.create().creatingParentContainersIfNeeded().forPath(BARRIER_ROOT_PATH); - final KillQueryZookeeperManager.KillQueryZookeeperBarrier barrier = - new KillQueryZookeeperManager.KillQueryZookeeperBarrier(client, BARRIER_ROOT_PATH); - barrier.setBarrier(QUERYID, SERVER1, USER, true); - - ExecutorService service = Executors.newSingleThreadExecutor(); - service.submit(() -> { - Thread.sleep(TIMEOUT / 2); - try { - barrier.confirmProgress(SERVER2); - Thread.sleep(TIMEOUT / 2); - barrier.confirmDone(SERVER2); - } catch (Exception e) { - LOG.error("Confirmation error", e); - } - return null; - }); - - Assert.assertTrue(barrier.waitOnBarrier(1, TIMEOUT, TIMEOUT, TimeUnit.MILLISECONDS)); - } - } - - @Test - public void testFailed() throws Exception { - try (CuratorFramework client = getClient()) { - client.start(); - client.create().creatingParentContainersIfNeeded().forPath(BARRIER_ROOT_PATH); - final KillQueryZookeeperManager.KillQueryZookeeperBarrier barrier = - new KillQueryZookeeperManager.KillQueryZookeeperBarrier(client, BARRIER_ROOT_PATH); - barrier.setBarrier(QUERYID, SERVER1, USER, true); - - ExecutorService service = Executors.newSingleThreadExecutor(); - service.submit(() -> { - Thread.sleep(TIMEOUT / 2); - barrier.confirmProgress(SERVER2); - Thread.sleep(TIMEOUT / 2); - barrier.confirmFailed(SERVER2); - return null; - }); - - Assert.assertFalse(barrier.waitOnBarrier(1, TIMEOUT, TIMEOUT * 2, TimeUnit.MILLISECONDS)); - } - } - - @Test - public void testConfirmTimeout() throws Exception { - try (CuratorFramework client = getClient()) { - client.start(); - client.create().creatingParentContainersIfNeeded().forPath(BARRIER_ROOT_PATH); - final KillQueryZookeeperManager.KillQueryZookeeperBarrier barrier = - new KillQueryZookeeperManager.KillQueryZookeeperBarrier(client, BARRIER_ROOT_PATH); - barrier.setBarrier(QUERYID, SERVER1, USER, true); - - Assert.assertFalse(barrier.waitOnBarrier(1, TIMEOUT, TIMEOUT * 2, TimeUnit.MILLISECONDS)); - } - } - - @Test - public void testKillTimeout() throws Exception { - try (CuratorFramework client = getClient()) { - client.start(); - client.create().creatingParentContainersIfNeeded().forPath(BARRIER_ROOT_PATH); - final KillQueryZookeeperManager.KillQueryZookeeperBarrier barrier = - new KillQueryZookeeperManager.KillQueryZookeeperBarrier(client, BARRIER_ROOT_PATH); - barrier.setBarrier(QUERYID, SERVER1, USER, true); - ExecutorService service = Executors.newSingleThreadExecutor(); - service.submit(() -> { - Thread.sleep(TIMEOUT / 2); - barrier.confirmProgress(SERVER2); - // server died - return null; - }); - Assert.assertFalse(barrier.waitOnBarrier(1, TIMEOUT, TIMEOUT * 2, TimeUnit.MILLISECONDS)); - } - } -} diff --git itests/util/src/main/java/org/apache/hive/jdbc/miniHS2/MiniHS2.java itests/util/src/main/java/org/apache/hive/jdbc/miniHS2/MiniHS2.java index 7f25c74e6c..eb8008635d 100644 --- itests/util/src/main/java/org/apache/hive/jdbc/miniHS2/MiniHS2.java +++ itests/util/src/main/java/org/apache/hive/jdbc/miniHS2/MiniHS2.java @@ -115,10 +115,6 @@ public Builder withMiniMR() { this.miniClusterType = MiniClusterType.MR; return this; } - public Builder withMiniTez() { - this.miniClusterType = MiniClusterType.TEZ; - return this; - } public Builder withMiniKdc(String serverPrincipal, String serverKeytab) { this.useMiniKdc = true; diff --git jdbc-handler/pom.xml jdbc-handler/pom.xml index 268f38ef11..b2d9ec8eb9 100644 --- jdbc-handler/pom.xml +++ jdbc-handler/pom.xml @@ -115,7 +115,7 @@ org.powermock - powermock-api-mockito + powermock-api-mockito2 ${powermock.version} test diff --git jdbc-handler/src/test/java/org/apache/hive/storage/jdbc/TestJdbcInputFormat.java jdbc-handler/src/test/java/org/apache/hive/storage/jdbc/TestJdbcInputFormat.java index cde97d6473..07102170e2 100644 --- jdbc-handler/src/test/java/org/apache/hive/storage/jdbc/TestJdbcInputFormat.java +++ jdbc-handler/src/test/java/org/apache/hive/storage/jdbc/TestJdbcInputFormat.java @@ -39,8 +39,8 @@ import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertNull; import static org.junit.Assert.assertTrue; -import static org.mockito.Matchers.any; -import static org.mockito.Matchers.anyBoolean; +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.ArgumentMatchers.anyBoolean; import static org.mockito.Mockito.when; @RunWith(PowerMockRunner.class) diff --git jdbc/src/java/org/apache/hive/jdbc/HiveConnection.java jdbc/src/java/org/apache/hive/jdbc/HiveConnection.java index 56f1bd5d63..7f0d8dcdf4 100644 --- jdbc/src/java/org/apache/hive/jdbc/HiveConnection.java +++ jdbc/src/java/org/apache/hive/jdbc/HiveConnection.java @@ -541,15 +541,13 @@ private CloseableHttpClient getHttpClient(Boolean useSsl) throws SQLException { public boolean retryRequest(final HttpResponse response, final int executionCount, final HttpContext context) { int statusCode = response.getStatusLine().getStatusCode(); - boolean sentCredentials = context.getAttribute(Utils.HIVE_SERVER2_SENT_CREDENTIALS) != null && - context.getAttribute(Utils.HIVE_SERVER2_SENT_CREDENTIALS).equals(Utils.HIVE_SERVER2_CONST_TRUE); - boolean ret = statusCode == 401 && executionCount <= 1 && !sentCredentials; + boolean ret = statusCode == 401 && executionCount <= 1; // Set the context attribute to true which will be interpreted by the request // interceptor if (ret) { context.setAttribute(Utils.HIVE_SERVER2_RETRY_KEY, - Utils.HIVE_SERVER2_CONST_TRUE); + Utils.HIVE_SERVER2_RETRY_TRUE); } return ret; } diff --git jdbc/src/java/org/apache/hive/jdbc/HiveStatement.java jdbc/src/java/org/apache/hive/jdbc/HiveStatement.java index 543bf8c327..db965e7a22 100644 --- jdbc/src/java/org/apache/hive/jdbc/HiveStatement.java +++ jdbc/src/java/org/apache/hive/jdbc/HiveStatement.java @@ -63,7 +63,6 @@ public class HiveStatement implements java.sql.Statement { public static final Logger LOG = LoggerFactory.getLogger(HiveStatement.class.getName()); - public static final String QUERY_CANCELLED_MESSAGE = "Query was cancelled."; private static final int DEFAULT_FETCH_SIZE = HiveConf.ConfVars.HIVE_SERVER2_THRIFT_RESULTSET_DEFAULT_FETCH_SIZE.defaultIntVal; @@ -395,9 +394,9 @@ TGetOperationStatusResp waitForOperationToComplete() throws SQLException { // 01000 -> warning String errMsg = statusResp.getErrorMessage(); if (errMsg != null && !errMsg.isEmpty()) { - throw new SQLException(QUERY_CANCELLED_MESSAGE + " " + errMsg, "01000"); + throw new SQLException("Query was cancelled. " + errMsg, "01000"); } else { - throw new SQLException(QUERY_CANCELLED_MESSAGE, "01000"); + throw new SQLException("Query was cancelled", "01000"); } case TIMEDOUT_STATE: throw new SQLTimeoutException("Query timed out after " + queryTimeout + " seconds"); diff --git jdbc/src/java/org/apache/hive/jdbc/HttpRequestInterceptorBase.java jdbc/src/java/org/apache/hive/jdbc/HttpRequestInterceptorBase.java index 1e6ddebfeb..bb1abd302d 100644 --- jdbc/src/java/org/apache/hive/jdbc/HttpRequestInterceptorBase.java +++ jdbc/src/java/org/apache/hive/jdbc/HttpRequestInterceptorBase.java @@ -70,14 +70,11 @@ public void process(HttpRequest httpRequest, HttpContext httpContext) Utils.needToSendCredentials(cookieStore, cookieName, isSSL)))) || (httpContext.getAttribute(Utils.HIVE_SERVER2_RETRY_KEY) != null && httpContext.getAttribute(Utils.HIVE_SERVER2_RETRY_KEY). - equals(Utils.HIVE_SERVER2_CONST_TRUE)))) { + equals(Utils.HIVE_SERVER2_RETRY_TRUE)))) { addHttpAuthHeader(httpRequest, httpContext); - httpContext.setAttribute(Utils.HIVE_SERVER2_SENT_CREDENTIALS, Utils.HIVE_SERVER2_CONST_TRUE); - } else { - httpContext.setAttribute(Utils.HIVE_SERVER2_SENT_CREDENTIALS, Utils.HIVE_SERVER2_CONST_FALSE); } if (isCookieEnabled) { - httpContext.setAttribute(Utils.HIVE_SERVER2_RETRY_KEY, Utils.HIVE_SERVER2_CONST_FALSE); + httpContext.setAttribute(Utils.HIVE_SERVER2_RETRY_KEY, Utils.HIVE_SERVER2_RETRY_FALSE); } // Insert the additional http headers if (additionalHeaders != null) { diff --git jdbc/src/java/org/apache/hive/jdbc/Utils.java jdbc/src/java/org/apache/hive/jdbc/Utils.java index eb7c0c71bc..6cb6853077 100644 --- jdbc/src/java/org/apache/hive/jdbc/Utils.java +++ jdbc/src/java/org/apache/hive/jdbc/Utils.java @@ -64,14 +64,10 @@ private static final String URI_HIVE_PREFIX = "hive2:"; - // This value is set to true by the setServiceUnavailableRetryStrategy() when the server returns 401. - // This value is used only when cookie is sent for authorization. In case the cookie is expired, - // client will send the actual credentials in the next connection request. - // If credentials are sent in the first request it self, then no need to retry. + // This value is set to true by the setServiceUnavailableRetryStrategy() when the server returns 401 static final String HIVE_SERVER2_RETRY_KEY = "hive.server2.retryserver"; - static final String HIVE_SERVER2_SENT_CREDENTIALS = "hive.server2.sentCredentials"; - static final String HIVE_SERVER2_CONST_TRUE = "true"; - static final String HIVE_SERVER2_CONST_FALSE = "false"; + static final String HIVE_SERVER2_RETRY_TRUE = "true"; + static final String HIVE_SERVER2_RETRY_FALSE = "false"; public static class JdbcConnectionParams { // Note on client side parameter naming convention: diff --git jdbc/src/test/org/apache/hive/jdbc/TestHivePreparedStatement.java jdbc/src/test/org/apache/hive/jdbc/TestHivePreparedStatement.java index b641395980..394c89cd06 100644 --- jdbc/src/test/org/apache/hive/jdbc/TestHivePreparedStatement.java +++ jdbc/src/test/org/apache/hive/jdbc/TestHivePreparedStatement.java @@ -18,7 +18,7 @@ package org.apache.hive.jdbc; import static org.junit.Assert.assertEquals; -import static org.mockito.Matchers.any; +import static org.mockito.ArgumentMatchers.any; import static org.mockito.Mockito.verify; import static org.mockito.Mockito.when; import static org.mockito.Mockito.times; diff --git llap-client/pom.xml llap-client/pom.xml index 84e87ec36d..df25bf6709 100644 --- llap-client/pom.xml +++ llap-client/pom.xml @@ -164,7 +164,7 @@ org.powermock - powermock-api-mockito + powermock-api-mockito2 ${powermock.version} test diff --git llap-server/src/java/org/apache/hadoop/hive/llap/shufflehandler/IndexCache.java llap-server/src/java/org/apache/hadoop/hive/llap/shufflehandler/IndexCache.java index fb1bcfedc5..4de03f232d 100644 --- llap-server/src/java/org/apache/hadoop/hive/llap/shufflehandler/IndexCache.java +++ llap-server/src/java/org/apache/hadoop/hive/llap/shufflehandler/IndexCache.java @@ -42,11 +42,10 @@ private final LinkedBlockingQueue queue = new LinkedBlockingQueue(); - public static final String INDEX_CACHE_MB = "llap.shuffle.indexcache.mb"; public IndexCache(Configuration conf) { this.conf = conf; - totalMemoryAllowed = conf.getInt(INDEX_CACHE_MB, 10) * 1024 * 1024; + totalMemoryAllowed = 10 * 1024 * 1024; LOG.info("IndexCache created with max memory = " + totalMemoryAllowed); } diff --git llap-server/src/test/org/apache/hadoop/hive/llap/cache/TestLowLevelLrfuCachePolicy.java llap-server/src/test/org/apache/hadoop/hive/llap/cache/TestLowLevelLrfuCachePolicy.java index fbe58ff919..d290c483aa 100644 --- llap-server/src/test/org/apache/hadoop/hive/llap/cache/TestLowLevelLrfuCachePolicy.java +++ llap-server/src/test/org/apache/hadoop/hive/llap/cache/TestLowLevelLrfuCachePolicy.java @@ -24,7 +24,7 @@ import static org.junit.Assert.assertNull; import static org.junit.Assert.assertSame; import static org.junit.Assert.assertTrue; -import static org.mockito.Matchers.anyLong; +import static org.mockito.ArgumentMatchers.anyLong; import static org.mockito.Mockito.doAnswer; import static org.mockito.Mockito.mock; diff --git llap-server/src/test/org/apache/hadoop/hive/llap/daemon/impl/comparator/TestAMReporter.java llap-server/src/test/org/apache/hadoop/hive/llap/daemon/impl/comparator/TestAMReporter.java index 068aad4f19..33c5439985 100644 --- llap-server/src/test/org/apache/hadoop/hive/llap/daemon/impl/comparator/TestAMReporter.java +++ llap-server/src/test/org/apache/hadoop/hive/llap/daemon/impl/comparator/TestAMReporter.java @@ -40,8 +40,8 @@ import java.util.concurrent.atomic.AtomicReference; import static org.junit.Assert.assertEquals; -import static org.mockito.Matchers.any; -import static org.mockito.Matchers.anyInt; +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.ArgumentMatchers.anyInt; import static org.mockito.Mockito.doAnswer; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.when; diff --git llap-server/src/test/org/apache/hadoop/hive/llap/io/encoded/TestVectorDeserializeOrcWriter.java llap-server/src/test/org/apache/hadoop/hive/llap/io/encoded/TestVectorDeserializeOrcWriter.java index 647538eb84..7a920e68f9 100644 --- llap-server/src/test/org/apache/hadoop/hive/llap/io/encoded/TestVectorDeserializeOrcWriter.java +++ llap-server/src/test/org/apache/hadoop/hive/llap/io/encoded/TestVectorDeserializeOrcWriter.java @@ -17,6 +17,7 @@ */ package org.apache.hadoop.hive.llap.io.encoded; +import java.lang.reflect.Field; import java.util.ArrayList; import java.util.Queue; import java.util.concurrent.ConcurrentLinkedQueue; @@ -35,11 +36,10 @@ import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertNotEquals; +import static org.junit.Assert.fail; import static org.mockito.Mockito.CALLS_REAL_METHODS; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.withSettings; -import static org.mockito.internal.util.reflection.Whitebox.getInternalState; -import static org.mockito.internal.util.reflection.Whitebox.setInternalState; /** * Unit tests for VectorDeserializeOrcWriter. @@ -48,13 +48,43 @@ private static final int TEST_NUM_COLS = 2; + private static Field reflectField(Class classToReflect, String fieldNameValueToFetch) { + try { + Field reflectField = null; + Class classForReflect = classToReflect; + do { + try { + reflectField = classForReflect.getDeclaredField(fieldNameValueToFetch); + } catch (NoSuchFieldException e) { + classForReflect = classForReflect.getSuperclass(); + } + } while (reflectField == null || classForReflect == null); + reflectField.setAccessible(true); + return reflectField; + } catch (Exception e) { + fail("Failed to reflect " + fieldNameValueToFetch + " from " + classToReflect); + } + return null; + } + + private static void reflectSetValue(Object objToReflect, String fieldNameToSet, Object valueToSet) { + try { + Field reflectField = reflectField(objToReflect.getClass(), fieldNameToSet); + reflectField.set(objToReflect, valueToSet); + } catch (Exception e) { + fail("Failed to reflectively set " + fieldNameToSet + "=" + valueToSet); + } + } + @Test public void testConcurrencyIssueWhileWriting() throws Exception { //Setup//////////////////////////////////////////////////////////////////////////////////////// EncodedDataConsumer consumer = createBlankEncodedDataConsumer(); + Field cvbPoolField = EncodedDataConsumer.class.getDeclaredField("cvbPool"); + cvbPoolField.setAccessible(true); FixedSizedObjectPool cvbPool = (FixedSizedObjectPool) - getInternalState(consumer, "cvbPool"); + cvbPoolField.get(consumer); ColumnVectorBatch cvb = new ColumnVectorBatch(TEST_NUM_COLS); VectorizedRowBatch vrb = new VectorizedRowBatch(TEST_NUM_COLS); @@ -104,12 +134,13 @@ private static void createTestVectors(ColumnVectorBatch cvb, VectorizedRowBatch private static VectorDeserializeOrcWriter createOrcWriter( Queue writeOpQueue, VectorizedRowBatch vrb) { VectorDeserializeOrcWriter orcWriter = mock(VectorDeserializeOrcWriter.class, - withSettings().defaultAnswer(CALLS_REAL_METHODS)); - setInternalState(orcWriter, "sourceBatch", vrb); - setInternalState(orcWriter, "destinationBatch", vrb); - setInternalState(orcWriter, "currentBatches", new ArrayList()); - setInternalState(orcWriter, "queue", writeOpQueue); - setInternalState(orcWriter, "isAsync", true); + withSettings().defaultAnswer(CALLS_REAL_METHODS)); + + reflectSetValue(orcWriter, "sourceBatch", vrb); + reflectSetValue(orcWriter, "destinationBatch", vrb); + reflectSetValue(orcWriter, "currentBatches", new ArrayList()); + reflectSetValue(orcWriter, "queue", writeOpQueue); + reflectSetValue(orcWriter, "isAsync", true); return orcWriter; } diff --git llap-server/src/test/org/apache/hadoop/hive/llap/shufflehandler/TestIndexCache.java llap-server/src/test/org/apache/hadoop/hive/llap/shufflehandler/TestIndexCache.java deleted file mode 100644 index 851e9c0b69..0000000000 --- llap-server/src/test/org/apache/hadoop/hive/llap/shufflehandler/TestIndexCache.java +++ /dev/null @@ -1,336 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hadoop.hive.llap.shufflehandler; - -import java.io.DataOutputStream; -import java.io.FileNotFoundException; -import java.io.IOException; -import java.util.Random; -import java.util.zip.CRC32; -import java.util.zip.CheckedOutputStream; - -import org.apache.hadoop.conf.Configuration; -import org.apache.hadoop.fs.ChecksumException; -import org.apache.hadoop.fs.FileStatus; -import org.apache.hadoop.fs.FileSystem; -import org.apache.hadoop.fs.Path; -import org.apache.hadoop.fs.FSDataOutputStream; -import org.apache.hadoop.security.UserGroupInformation; - -import org.apache.tez.runtime.library.common.Constants; -import org.apache.tez.runtime.library.common.sort.impl.TezIndexRecord; -import org.junit.Before; -import org.junit.Test; - -import static org.apache.hadoop.hive.llap.shufflehandler.IndexCache.INDEX_CACHE_MB; - -import static org.junit.Assert.*; - -public class TestIndexCache { - private Configuration conf; - private FileSystem fs; - private Path p; - - @Before - public void setUp() throws IOException { - conf = new Configuration(); - fs = FileSystem.getLocal(conf).getRaw(); - p = new Path(System.getProperty("test.build.data", "/tmp"), - "cache").makeQualified(fs.getUri(), fs.getWorkingDirectory()); - } - - @Test - public void testLRCPolicy() throws Exception { - Random r = new Random(); - long seed = r.nextLong(); - r.setSeed(seed); - System.out.println("seed: " + seed); - fs.delete(p, true); - conf.setInt(INDEX_CACHE_MB, 1); - final int partsPerMap = 1000; - final int bytesPerFile = partsPerMap * 24; - IndexCache cache = new IndexCache(conf); - - // fill cache - int totalsize = bytesPerFile; - for (; totalsize < 1024 * 1024; totalsize += bytesPerFile) { - Path f = new Path(p, Integer.toString(totalsize, 36)); - writeFile(fs, f, totalsize, partsPerMap); - TezIndexRecord rec = cache.getIndexInformation( - Integer.toString(totalsize, 36), r.nextInt(partsPerMap), f, - UserGroupInformation.getCurrentUser().getShortUserName()); - checkRecord(rec, totalsize); - } - - // delete files, ensure cache retains all elem - for (FileStatus stat : fs.listStatus(p)) { - fs.delete(stat.getPath(),true); - } - for (int i = bytesPerFile; i < 1024 * 1024; i += bytesPerFile) { - Path f = new Path(p, Integer.toString(i, 36)); - TezIndexRecord rec = cache.getIndexInformation(Integer.toString(i, 36), - r.nextInt(partsPerMap), f, - UserGroupInformation.getCurrentUser().getShortUserName()); - checkRecord(rec, i); - } - - // push oldest (bytesPerFile) out of cache - Path f = new Path(p, Integer.toString(totalsize, 36)); - writeFile(fs, f, totalsize, partsPerMap); - cache.getIndexInformation(Integer.toString(totalsize, 36), - r.nextInt(partsPerMap), f, - UserGroupInformation.getCurrentUser().getShortUserName()); - fs.delete(f, false); - - // oldest fails to read, or error - boolean fnf = false; - try { - cache.getIndexInformation(Integer.toString(bytesPerFile, 36), - r.nextInt(partsPerMap), new Path(p, Integer.toString(bytesPerFile)), - UserGroupInformation.getCurrentUser().getShortUserName()); - } catch (IOException e) { - if (e.getCause() == null || - !(e.getCause() instanceof FileNotFoundException)) { - throw e; - } - else { - fnf = true; - } - } - if (!fnf) - fail("Failed to push out last entry"); - // should find all the other entries - for (int i = bytesPerFile << 1; i < 1024 * 1024; i += bytesPerFile) { - TezIndexRecord rec = cache.getIndexInformation(Integer.toString(i, 36), - r.nextInt(partsPerMap), new Path(p, Integer.toString(i, 36)), - UserGroupInformation.getCurrentUser().getShortUserName()); - checkRecord(rec, i); - } - TezIndexRecord rec = cache.getIndexInformation(Integer.toString(totalsize, 36), - r.nextInt(partsPerMap), f, - UserGroupInformation.getCurrentUser().getShortUserName()); - - checkRecord(rec, totalsize); - } - - @Test - public void testBadIndex() throws Exception { - final int parts = 30; - fs.delete(p, true); - conf.setInt(INDEX_CACHE_MB, 1); - IndexCache cache = new IndexCache(conf); - - Path f = new Path(p, "badindex"); - FSDataOutputStream out = fs.create(f, false); - CheckedOutputStream iout = new CheckedOutputStream(out, new CRC32()); - DataOutputStream dout = new DataOutputStream(iout); - for (int i = 0; i < parts; ++i) { - for (int j = 0; j < Constants.MAP_OUTPUT_INDEX_RECORD_LENGTH / 8; ++j) { - if (0 == (i % 3)) { - dout.writeLong(i); - } else { - out.writeLong(i); - } - } - } - out.writeLong(iout.getChecksum().getValue()); - dout.close(); - try { - cache.getIndexInformation("badindex", 7, f, - UserGroupInformation.getCurrentUser().getShortUserName()); - fail("Did not detect bad checksum"); - } catch (IOException e) { - if (!(e.getCause() instanceof ChecksumException)) { - throw e; - } - } - } - - @Test - public void testInvalidReduceNumberOrLength() throws Exception { - fs.delete(p, true); - conf.setInt(INDEX_CACHE_MB, 1); - final int partsPerMap = 1000; - final int bytesPerFile = partsPerMap * 24; - IndexCache cache = new IndexCache(conf); - - // fill cache - Path feq = new Path(p, "invalidReduceOrPartsPerMap"); - writeFile(fs, feq, bytesPerFile, partsPerMap); - - // Number of reducers should always be less than partsPerMap as reducer - // numbers start from 0 and there cannot be more reducer than parts - - try { - // Number of reducers equal to partsPerMap - cache.getIndexInformation("reduceEqualPartsPerMap", - partsPerMap, // reduce number == partsPerMap - feq, UserGroupInformation.getCurrentUser().getShortUserName()); - fail("Number of reducers equal to partsPerMap did not fail"); - } catch (Exception e) { - if (!(e instanceof IOException)) { - throw e; - } - } - - try { - // Number of reducers more than partsPerMap - cache.getIndexInformation( - "reduceMorePartsPerMap", - partsPerMap + 1, // reduce number > partsPerMap - feq, UserGroupInformation.getCurrentUser().getShortUserName()); - fail("Number of reducers more than partsPerMap did not fail"); - } catch (Exception e) { - if (!(e instanceof IOException)) { - throw e; - } - } - } - - @Test - public void testRemoveMap() throws Exception { - // This test case use two thread to call getIndexInformation and - // removeMap concurrently, in order to construct race condition. - // This test case may not repeatable. But on my macbook this test - // fails with probability of 100% on code before MAPREDUCE-2541, - // so it is repeatable in practice. - fs.delete(p, true); - conf.setInt(INDEX_CACHE_MB, 10); - // Make a big file so removeMapThread almost surely runs faster than - // getInfoThread - final int partsPerMap = 100000; - final int bytesPerFile = partsPerMap * 24; - final IndexCache cache = new IndexCache(conf); - - final Path big = new Path(p, "bigIndex"); - final String user = - UserGroupInformation.getCurrentUser().getShortUserName(); - writeFile(fs, big, bytesPerFile, partsPerMap); - - // run multiple times - for (int i = 0; i < 20; ++i) { - Thread getInfoThread = new Thread() { - @Override - public void run() { - try { - cache.getIndexInformation("bigIndex", partsPerMap, big, user); - } catch (Exception e) { - // should not be here - } - } - }; - Thread removeMapThread = new Thread() { - @Override - public void run() { - cache.removeMap("bigIndex"); - } - }; - if (i%2==0) { - getInfoThread.start(); - removeMapThread.start(); - } else { - removeMapThread.start(); - getInfoThread.start(); - } - getInfoThread.join(); - removeMapThread.join(); - assertEquals(true, cache.checkTotalMemoryUsed()); - } - } - - @Test - public void testCreateRace() throws Exception { - fs.delete(p, true); - conf.setInt(INDEX_CACHE_MB, 1); - final int partsPerMap = 1000; - final int bytesPerFile = partsPerMap * 24; - final IndexCache cache = new IndexCache(conf); - - final Path racy = new Path(p, "racyIndex"); - final String user = - UserGroupInformation.getCurrentUser().getShortUserName(); - writeFile(fs, racy, bytesPerFile, partsPerMap); - - // run multiple instances - Thread[] getInfoThreads = new Thread[50]; - for (int i = 0; i < 50; i++) { - getInfoThreads[i] = new Thread() { - @Override - public void run() { - try { - cache.getIndexInformation("racyIndex", partsPerMap, racy, user); - cache.removeMap("racyIndex"); - } catch (Exception e) { - // should not be here - } - } - }; - } - - for (int i = 0; i < 50; i++) { - getInfoThreads[i].start(); - } - - final Thread mainTestThread = Thread.currentThread(); - - Thread timeoutThread = new Thread() { - @Override - public void run() { - try { - Thread.sleep(15000); - mainTestThread.interrupt(); - } catch (InterruptedException ie) { - // we are done; - } - } - }; - - for (int i = 0; i < 50; i++) { - try { - getInfoThreads[i].join(); - } catch (InterruptedException ie) { - // we haven't finished in time. Potential deadlock/race. - fail("Unexpectedly long delay during concurrent cache entry creations"); - } - } - // stop the timeoutThread. If we get interrupted before stopping, there - // must be something wrong, although it wasn't a deadlock. No need to - // catch and swallow. - timeoutThread.interrupt(); - } - - private static void checkRecord(TezIndexRecord rec, long fill) { - assertEquals(fill, rec.getStartOffset()); - assertEquals(fill, rec.getRawLength()); - assertEquals(fill, rec.getPartLength()); - } - - private static void writeFile(FileSystem fs, Path f, long fill, int parts) - throws IOException { - FSDataOutputStream out = fs.create(f, false); - CheckedOutputStream iout = new CheckedOutputStream(out, new CRC32()); - DataOutputStream dout = new DataOutputStream(iout); - for (int i = 0; i < parts; ++i) { - for (int j = 0; j < Constants.MAP_OUTPUT_INDEX_RECORD_LENGTH / 8; ++j) { - dout.writeLong(fill); - } - } - out.writeLong(iout.getChecksum().getValue()); - dout.close(); - } -} diff --git llap-tez/src/test/org/apache/hadoop/hive/llap/tezplugins/TestLlapTaskCommunicator.java llap-tez/src/test/org/apache/hadoop/hive/llap/tezplugins/TestLlapTaskCommunicator.java index 5efe7c677c..9b4ac271f0 100644 --- llap-tez/src/test/org/apache/hadoop/hive/llap/tezplugins/TestLlapTaskCommunicator.java +++ llap-tez/src/test/org/apache/hadoop/hive/llap/tezplugins/TestLlapTaskCommunicator.java @@ -17,9 +17,8 @@ import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertNull; -import static org.mockito.Matchers.any; -import static org.mockito.Matchers.eq; -import static org.mockito.Mockito.RETURNS_DEEP_STUBS; +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.ArgumentMatchers.eq; import static org.mockito.Mockito.doReturn; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.times; diff --git llap-tez/src/test/org/apache/hadoop/hive/llap/tezplugins/TestLlapTaskSchedulerService.java llap-tez/src/test/org/apache/hadoop/hive/llap/tezplugins/TestLlapTaskSchedulerService.java index 46007559cd..707012399b 100644 --- llap-tez/src/test/org/apache/hadoop/hive/llap/tezplugins/TestLlapTaskSchedulerService.java +++ llap-tez/src/test/org/apache/hadoop/hive/llap/tezplugins/TestLlapTaskSchedulerService.java @@ -18,8 +18,8 @@ import static org.junit.Assert.assertFalse; import static org.junit.Assert.assertNotNull; import static org.junit.Assert.assertTrue; -import static org.mockito.Matchers.any; -import static org.mockito.Matchers.eq; +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.ArgumentMatchers.eq; import static org.mockito.Mockito.doReturn; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.never; diff --git llap-tez/src/test/org/apache/hadoop/hive/llap/tezplugins/metrics/TestBlacklistingLlapMetricsListener.java llap-tez/src/test/org/apache/hadoop/hive/llap/tezplugins/metrics/TestBlacklistingLlapMetricsListener.java index dec75865c9..9216c6ea43 100644 --- llap-tez/src/test/org/apache/hadoop/hive/llap/tezplugins/metrics/TestBlacklistingLlapMetricsListener.java +++ llap-tez/src/test/org/apache/hadoop/hive/llap/tezplugins/metrics/TestBlacklistingLlapMetricsListener.java @@ -40,8 +40,9 @@ import java.util.Map; import static org.junit.Assert.assertEquals; -import static org.mockito.Matchers.any; -import static org.mockito.Matchers.anyLong; +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.ArgumentMatchers.anyLong; +import static org.mockito.ArgumentMatchers.anyString; import static org.mockito.Mockito.never; import static org.mockito.Mockito.reset; import static org.mockito.Mockito.times; @@ -72,6 +73,9 @@ @Mock private LlapServiceInstanceSet mockInstanceSet; + @Mock + private LlapServiceInstance mockLlapServiceInstance; + @Before public void setUp() throws Exception { initMocks(this); @@ -80,9 +84,10 @@ public void setUp() throws Exception { when(mockRegistry.getInstances()).thenReturn(mockInstanceSet); when(mockRegistry.lockForConfigChange(anyLong(), anyLong())).thenReturn( new ConfigChangeLockResult(true, Long.MIN_VALUE)); + when(mockRegistry.getInstances().getInstance(anyString())).thenReturn(mockLlapServiceInstance); when(mockClientFactory.create(any(LlapServiceInstance.class))).thenReturn(mockClient); when(mockClient.setCapacity( - any(RpcController.class), + any(), any(SetCapacityRequestProto.class))).thenReturn(TEST_RESPONSE); listener = new BlacklistingLlapMetricsListener(); @@ -97,7 +102,7 @@ public void testBlacklist() throws ServiceException { // Then ArgumentCaptor argumentCaptor = ArgumentCaptor.forClass(String.class); - verify(mockClient, times(1)).setCapacity(any(RpcController.class), any(SetCapacityRequestProto.class)); + verify(mockClient, times(1)).setCapacity(any(), any(SetCapacityRequestProto.class)); verify(mockInstanceSet, times(1)).getInstance(argumentCaptor.capture()); assertEquals("3", argumentCaptor.getValue()); } @@ -158,7 +163,7 @@ public void testCheckTime() throws Exception { listener.newClusterMetrics(data); ArgumentCaptor argumentCaptor = ArgumentCaptor.forClass(String.class); - verify(mockClient, times(1)).setCapacity(any(RpcController.class), any(SetCapacityRequestProto.class)); + verify(mockClient, times(1)).setCapacity(any(), any(SetCapacityRequestProto.class)); verify(mockInstanceSet, times(1)).getInstance(argumentCaptor.capture()); assertEquals("3", argumentCaptor.getValue()); } diff --git llap-tez/src/test/org/apache/hadoop/hive/llap/tezplugins/metrics/TestLlapMetricsCollector.java llap-tez/src/test/org/apache/hadoop/hive/llap/tezplugins/metrics/TestLlapMetricsCollector.java index f212ac6638..00e72b52ba 100644 --- llap-tez/src/test/org/apache/hadoop/hive/llap/tezplugins/metrics/TestLlapMetricsCollector.java +++ llap-tez/src/test/org/apache/hadoop/hive/llap/tezplugins/metrics/TestLlapMetricsCollector.java @@ -15,7 +15,6 @@ package org.apache.hadoop.hive.llap.tezplugins.metrics; import com.google.common.collect.Lists; -import com.google.protobuf.RpcController; import com.google.protobuf.ServiceException; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hive.conf.HiveConf; @@ -38,7 +37,7 @@ import static org.junit.Assert.assertNotNull; import static org.junit.Assert.assertNull; import static org.junit.Assert.assertTrue; -import static org.mockito.Matchers.anyLong; +import static org.mockito.ArgumentMatchers.anyLong; import static org.mockito.Mockito.any; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.never; @@ -82,7 +81,7 @@ public void setUp() throws ServiceException { .thenReturn(MockListener.class.getName()); when(mockClientFactory.create(any(LlapServiceInstance.class))).thenReturn(mockClient); when(mockClient.getDaemonMetrics( - any(RpcController.class), + any(), // can be NULL any(LlapDaemonProtocolProtos.GetDaemonMetricsRequestProto.class))).thenReturn(TEST_RESPONSE); collector = new LlapMetricsCollector(mockConf, mockExecutor, mockClientFactory); } @@ -93,6 +92,8 @@ public void testAddService() { LlapServiceInstance mockService = mock(LlapServiceInstance.class); when(mockService.getWorkerIdentity()).thenReturn(TEST_IDENTITY_1); + assertTrue(collector != null); + // When collector.onCreate(mockService, TEST_SEQ_VERSION); collector.collectMetrics(); diff --git pom.xml pom.xml index ae81eea3c4..486ef24d26 100644 --- pom.xml +++ pom.xml @@ -193,8 +193,8 @@ 2.5.0 2.3 1.5.9 - 1.10.19 - 1.7.4 + 3.3.3 + 2.0.2 2.0.0-M5 4.1.48.Final 3.10.5.Final diff --git ql/pom.xml ql/pom.xml index 864ec49c5e..a31ad6fa71 100644 --- ql/pom.xml +++ ql/pom.xml @@ -29,7 +29,7 @@ .. - 1.6.6 + 2.0.2 0.9.10 @@ -762,7 +762,7 @@ org.powermock - powermock-api-mockito + powermock-api-mockito2 ${powermock.version} test diff --git ql/src/java/org/apache/hadoop/hive/ql/ddl/process/kill/KillQueriesOperation.java ql/src/java/org/apache/hadoop/hive/ql/ddl/process/kill/KillQueriesOperation.java index 26c7fb8b8f..afde1a4762 100644 --- ql/src/java/org/apache/hadoop/hive/ql/ddl/process/kill/KillQueriesOperation.java +++ ql/src/java/org/apache/hadoop/hive/ql/ddl/process/kill/KillQueriesOperation.java @@ -31,13 +31,11 @@ public KillQueriesOperation(DDLOperationContext context, KillQueriesDesc desc) { super(context, desc); } - public static final String KILL_QUERY_MESSAGE = "User invoked KILL QUERY"; - @Override public int execute() throws HiveException { SessionState sessionState = SessionState.get(); for (String queryId : desc.getQueryIds()) { - sessionState.getKillQuery().killQuery(queryId, KILL_QUERY_MESSAGE, context.getDb().getConf()); + sessionState.getKillQuery().killQuery(queryId, "User invoked KILL QUERY", context.getDb().getConf()); } LOG.info("kill query called ({})", desc.getQueryIds()); return 0; diff --git ql/src/java/org/apache/hadoop/hive/ql/exec/tez/TezSessionState.java ql/src/java/org/apache/hadoop/hive/ql/exec/tez/TezSessionState.java index 5b3fa8fed2..8becef1cd3 100644 --- ql/src/java/org/apache/hadoop/hive/ql/exec/tez/TezSessionState.java +++ ql/src/java/org/apache/hadoop/hive/ql/exec/tez/TezSessionState.java @@ -842,11 +842,8 @@ private void addJarLRByClassName(String className, final Map clazz, final Map lrMap) throws IOException, LoginException { - String jarPath = Utilities.jarFinderGetJar(clazz); - if (jarPath == null) { - throw new IOException("Can't find jar for: " + clazz); - } - final File jar = new File(jarPath); + final File jar = + new File(Utilities.jarFinderGetJar(clazz)); final String localJarPath = jar.toURI().toURL().toExternalForm(); final LocalResource jarLr = createJarLocalResource(localJarPath); lrMap.put(DagUtils.getBaseName(jarLr), jarLr); diff --git ql/src/test/org/apache/hadoop/hive/ql/TestCompileLock.java ql/src/test/org/apache/hadoop/hive/ql/TestCompileLock.java index 5921044fea..7566f8ea59 100644 --- ql/src/test/org/apache/hadoop/hive/ql/TestCompileLock.java +++ ql/src/test/org/apache/hadoop/hive/ql/TestCompileLock.java @@ -57,7 +57,7 @@ import static org.junit.Assert.assertNotNull; import static org.junit.Assert.assertThat; -import static org.mockito.Matchers.eq; +import static org.mockito.ArgumentMatchers.eq; /** * Class for testing HS2 compile lock behavior (serializable, parallel unbounded, parallel bounded). diff --git ql/src/test/org/apache/hadoop/hive/ql/exec/TestGetInputSummary.java ql/src/test/org/apache/hadoop/hive/ql/exec/TestGetInputSummary.java index 5f55cebb98..160892c7dc 100644 --- ql/src/test/org/apache/hadoop/hive/ql/exec/TestGetInputSummary.java +++ ql/src/test/org/apache/hadoop/hive/ql/exec/TestGetInputSummary.java @@ -19,7 +19,7 @@ package org.apache.hadoop.hive.ql.exec; import static org.junit.Assert.assertEquals; -import static org.mockito.Matchers.any; +import static org.mockito.ArgumentMatchers.any; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.times; import static org.mockito.Mockito.verify; diff --git ql/src/test/org/apache/hadoop/hive/ql/exec/TestMsckCreatePartitionsInBatches.java ql/src/test/org/apache/hadoop/hive/ql/exec/TestMsckCreatePartitionsInBatches.java index 3e45016450..7821f40a82 100644 --- ql/src/test/org/apache/hadoop/hive/ql/exec/TestMsckCreatePartitionsInBatches.java +++ ql/src/test/org/apache/hadoop/hive/ql/exec/TestMsckCreatePartitionsInBatches.java @@ -17,16 +17,6 @@ */ package org.apache.hadoop.hive.ql.exec; -import static org.junit.Assert.assertFalse; -import static org.junit.Assert.assertTrue; -import static org.junit.Assert.fail; - -import java.util.ArrayList; -import java.util.Arrays; -import java.util.HashSet; -import java.util.List; -import java.util.Set; - import org.apache.hadoop.hive.conf.HiveConf; import org.apache.hadoop.hive.conf.HiveConf.ConfVars; import org.apache.hadoop.hive.metastore.CheckResult.PartitionResult; @@ -54,6 +44,22 @@ import org.mockito.ArgumentCaptor; import org.mockito.Mockito; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.HashSet; +import java.util.List; +import java.util.Set; + +import static org.junit.Assert.assertFalse; +import static org.junit.Assert.assertTrue; +import static org.junit.Assert.fail; +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.ArgumentMatchers.anyBoolean; +import static org.mockito.Mockito.doThrow; +import static org.mockito.Mockito.spy; +import static org.mockito.Mockito.times; +import static org.mockito.Mockito.verify; + public class TestMsckCreatePartitionsInBatches { private static HiveConf hiveConf; private static Msck msck; @@ -146,14 +152,14 @@ private void cleanUpTableQuietly(String catName, String dbName, String tableName public void testNumberOfCreatePartitionCalls() throws Exception { // create 10 dummy partitions Set partsNotInMs = createPartsNotInMs(10); - IMetaStoreClient spyDb = Mockito.spy(db); + IMetaStoreClient spyDb = spy(db); // batch size of 5 and decaying factor of 2 msck.createPartitionsInBatches(spyDb, repairOutput, partsNotInMs, table, 5, 2, 0); // there should be 2 calls to create partitions with each batch size of 5 ArgumentCaptor ifNotExistsArg = ArgumentCaptor.forClass(Boolean.class); ArgumentCaptor needResultsArg = ArgumentCaptor.forClass(Boolean.class); - ArgumentCaptor> argParts = ArgumentCaptor.forClass((Class) List.class); - Mockito.verify(spyDb, Mockito.times(2)).add_partitions(argParts.capture(), ifNotExistsArg.capture(), needResultsArg.capture()); + ArgumentCaptor> argParts = ArgumentCaptor.forClass(List.class); + verify(spyDb, times(2)).add_partitions(argParts.capture(), ifNotExistsArg.capture(), needResultsArg.capture()); // confirm the batch sizes were 5, 5 in the two calls to create partitions List> apds = argParts.getAllValues(); int retryAttempt = 1; @@ -175,14 +181,14 @@ public void testNumberOfCreatePartitionCalls() throws Exception { public void testUnevenNumberOfCreatePartitionCalls() throws Exception { // create 9 dummy partitions Set partsNotInMs = createPartsNotInMs(9); - IMetaStoreClient spyDb = Mockito.spy(db); + IMetaStoreClient spyDb = spy(db); // batch size of 5 and decaying factor of 2 msck.createPartitionsInBatches(spyDb, repairOutput, partsNotInMs, table, 5, 2, 0); // there should be 2 calls to create partitions with batch sizes of 5, 4 ArgumentCaptor ifNotExistsArg = ArgumentCaptor.forClass(Boolean.class); ArgumentCaptor needResultsArg = ArgumentCaptor.forClass(Boolean.class); - ArgumentCaptor> argParts = ArgumentCaptor.forClass((Class) List.class); - Mockito.verify(spyDb, Mockito.times(2)).add_partitions(argParts.capture(), ifNotExistsArg.capture(), needResultsArg.capture()); + ArgumentCaptor> argParts = ArgumentCaptor.forClass(List.class); + verify(spyDb, times(2)).add_partitions(argParts.capture(), ifNotExistsArg.capture(), needResultsArg.capture()); // confirm the batch sizes were 5, 4 in the two calls to create partitions List> apds = argParts.getAllValues(); int retryAttempt = 1; @@ -204,7 +210,7 @@ public void testUnevenNumberOfCreatePartitionCalls() throws Exception { public void testEqualNumberOfPartitions() throws Exception { // create 13 dummy partitions Set partsNotInMs = createPartsNotInMs(13); - IMetaStoreClient spyDb = Mockito.spy(db); + IMetaStoreClient spyDb = spy(db); // batch size of 13 and decaying factor of 2 msck.createPartitionsInBatches(spyDb, repairOutput, partsNotInMs, table, 13, 2, 0); // there should be 1 call to create partitions with batch sizes of 13 @@ -212,7 +218,7 @@ public void testEqualNumberOfPartitions() throws Exception { ArgumentCaptor needResultsArg = ArgumentCaptor.forClass(Boolean.class); ArgumentCaptor> argParts = ArgumentCaptor.forClass((Class) List.class); // there should be 1 call to create partitions with batch sizes of 13 - Mockito.verify(spyDb, Mockito.times(1)).add_partitions(argParts.capture(), ifNotExistsArg.capture(), + verify(spyDb, times(1)).add_partitions(argParts.capture(), ifNotExistsArg.capture(), needResultsArg.capture()); Assert.assertEquals("Unexpected number of batch size", 13, argParts.getValue().size()); @@ -230,17 +236,16 @@ public void testEqualNumberOfPartitions() throws Exception { public void testSmallNumberOfPartitions() throws Exception { // create 10 dummy partitions Set partsNotInMs = createPartsNotInMs(10); - IMetaStoreClient spyDb = Mockito.spy(db); + IMetaStoreClient spyDb = spy(db); // batch size of 20 and decaying factor of 2 msck.createPartitionsInBatches(spyDb, repairOutput, partsNotInMs, table, 20, 2, 0); // there should be 1 call to create partitions with batch sizes of 10 - Mockito.verify(spyDb, Mockito.times(1)).add_partitions(Mockito.anyObject(), Mockito.anyBoolean(), - Mockito.anyBoolean()); + verify(spyDb, times(1)).add_partitions(Mockito.anyObject(), anyBoolean(), anyBoolean()); ArgumentCaptor ifNotExistsArg = ArgumentCaptor.forClass(Boolean.class); ArgumentCaptor needResultsArg = ArgumentCaptor.forClass(Boolean.class); - ArgumentCaptor> argParts = ArgumentCaptor.forClass((Class) List.class); + ArgumentCaptor> argParts = ArgumentCaptor.forClass(List.class); // there should be 1 call to create partitions with batch sizes of 10 - Mockito.verify(spyDb, Mockito.times(1)).add_partitions(argParts.capture(), ifNotExistsArg.capture(), + verify(spyDb, times(1)).add_partitions(argParts.capture(), ifNotExistsArg.capture(), needResultsArg.capture()); Assert.assertEquals("Unexpected number of batch size", 10, argParts.getValue().size()); @@ -258,20 +263,19 @@ public void testSmallNumberOfPartitions() throws Exception { public void testBatchingWhenException() throws Exception { // create 13 dummy partitions Set partsNotInMs = createPartsNotInMs(23); - IMetaStoreClient spyDb = Mockito.spy(db); + IMetaStoreClient spyDb = spy(db); // first call to createPartitions should throw exception - Mockito.doThrow(HiveException.class).doCallRealMethod().doCallRealMethod().when(spyDb) - .add_partitions(Mockito.anyObject(), Mockito.anyBoolean(), - Mockito.anyBoolean()); + doThrow(MetaException.class).doCallRealMethod().doCallRealMethod().when(spyDb) + .add_partitions(any(), anyBoolean(), anyBoolean()); // test with a batch size of 30 and decaying factor of 2 msck.createPartitionsInBatches(spyDb, repairOutput, partsNotInMs, table, 30, 2, 0); // confirm the batch sizes were 23, 15, 8 in the three calls to create partitions ArgumentCaptor ifNotExistsArg = ArgumentCaptor.forClass(Boolean.class); ArgumentCaptor needResultsArg = ArgumentCaptor.forClass(Boolean.class); - ArgumentCaptor> argParts = ArgumentCaptor.forClass((Class) List.class); + ArgumentCaptor> argParts = ArgumentCaptor.forClass(List.class); // there should be 3 calls to create partitions with batch sizes of 23, 15, 8 - Mockito.verify(spyDb, Mockito.times(3)).add_partitions(argParts.capture(), ifNotExistsArg.capture(), + verify(spyDb, times(3)).add_partitions(argParts.capture(), ifNotExistsArg.capture(), needResultsArg.capture()); List> apds = argParts.getAllValues(); int retryAttempt = 1; @@ -298,9 +302,9 @@ public void testBatchingWhenException() throws Exception { @Test public void testRetriesExhaustedBatchSize() throws Exception { Set partsNotInMs = createPartsNotInMs(17); - IMetaStoreClient spyDb = Mockito.spy(db); - Mockito.doThrow(HiveException.class).when(spyDb) - .add_partitions(Mockito.anyObject(), Mockito.anyBoolean(), Mockito.anyBoolean()); + IMetaStoreClient spyDb = spy(db); + doThrow(MetaException.class).when(spyDb) + .add_partitions(any(), anyBoolean(), anyBoolean()); // batch size of 5 and decaying factor of 2 Exception ex = null; try { @@ -313,9 +317,9 @@ public void testRetriesExhaustedBatchSize() throws Exception { // there should be 5 calls to create partitions with batch sizes of 17, 15, 7, 3, 1 ArgumentCaptor ifNotExistsArg = ArgumentCaptor.forClass(Boolean.class); ArgumentCaptor needResultsArg = ArgumentCaptor.forClass(Boolean.class); - ArgumentCaptor> argParts = ArgumentCaptor.forClass((Class) List.class); + ArgumentCaptor> argParts = ArgumentCaptor.forClass(List.class); // there should be 5 calls to create partitions with batch sizes of 17, 15, 7, 3, 1 - Mockito.verify(spyDb, Mockito.times(5)).add_partitions(argParts.capture(), ifNotExistsArg.capture(), + verify(spyDb, times(5)).add_partitions(argParts.capture(), ifNotExistsArg.capture(), needResultsArg.capture()); List> apds = argParts.getAllValues(); int retryAttempt = 1; @@ -345,9 +349,9 @@ public void testRetriesExhaustedBatchSize() throws Exception { @Test public void testMaxRetriesReached() throws Exception { Set partsNotInMs = createPartsNotInMs(17); - IMetaStoreClient spyDb = Mockito.spy(db); - Mockito.doThrow(HiveException.class).when(spyDb) - .add_partitions(Mockito.anyObject(), Mockito.anyBoolean(), Mockito.anyBoolean()); + IMetaStoreClient spyDb = spy(db); + doThrow(MetaException.class).when(spyDb) + .add_partitions(any(), anyBoolean(), anyBoolean()); // batch size of 5 and decaying factor of 2 Exception ex = null; try { @@ -359,8 +363,8 @@ public void testMaxRetriesReached() throws Exception { Assert.assertTrue("Unexpected class of exception thrown", ex instanceof RetryUtilities.RetryException); ArgumentCaptor ifNotExistsArg = ArgumentCaptor.forClass(Boolean.class); ArgumentCaptor needResultsArg = ArgumentCaptor.forClass(Boolean.class); - ArgumentCaptor> argParts = ArgumentCaptor.forClass((Class) List.class); - Mockito.verify(spyDb, Mockito.times(2)).add_partitions(argParts.capture(), ifNotExistsArg.capture(), needResultsArg.capture()); + ArgumentCaptor> argParts = ArgumentCaptor.forClass(List.class); + verify(spyDb, times(2)).add_partitions(argParts.capture(), ifNotExistsArg.capture(), needResultsArg.capture()); List> apds = argParts.getAllValues(); int retryAttempt = 1; Assert.assertEquals( @@ -381,9 +385,9 @@ public void testMaxRetriesReached() throws Exception { @Test public void testOneMaxRetries() throws Exception { Set partsNotInMs = createPartsNotInMs(17); - IMetaStoreClient spyDb = Mockito.spy(db); - Mockito.doThrow(HiveException.class).when(spyDb) - .add_partitions(Mockito.anyObject(), Mockito.anyBoolean(), Mockito.anyBoolean()); + IMetaStoreClient spyDb = spy(db); + doThrow(MetaException.class).when(spyDb) + .add_partitions(any(), anyBoolean(), anyBoolean()); // batch size of 5 and decaying factor of 2 Exception ex = null; try { @@ -396,9 +400,9 @@ public void testOneMaxRetries() throws Exception { // there should be 5 calls to create partitions with batch sizes of 17, 15, 7, 3, 1 ArgumentCaptor ifNotExistsArg = ArgumentCaptor.forClass(Boolean.class); ArgumentCaptor needResultsArg = ArgumentCaptor.forClass(Boolean.class); - ArgumentCaptor> argParts = ArgumentCaptor.forClass((Class) List.class); + ArgumentCaptor> argParts = ArgumentCaptor.forClass(List.class); // there should be 5 calls to create partitions with batch sizes of 17, 15, 7, 3, 1 - Mockito.verify(spyDb, Mockito.times(1)).add_partitions(argParts.capture(), ifNotExistsArg.capture(), + verify(spyDb, times(1)).add_partitions(argParts.capture(), ifNotExistsArg.capture(), needResultsArg.capture()); List> apds = argParts.getAllValues(); int retryAttempt = 1; diff --git ql/src/test/org/apache/hadoop/hive/ql/exec/TestMsckDropPartitionsInBatches.java ql/src/test/org/apache/hadoop/hive/ql/exec/TestMsckDropPartitionsInBatches.java index 1ec46364f1..8be31128a1 100644 --- ql/src/test/org/apache/hadoop/hive/ql/exec/TestMsckDropPartitionsInBatches.java +++ ql/src/test/org/apache/hadoop/hive/ql/exec/TestMsckDropPartitionsInBatches.java @@ -17,15 +17,6 @@ */ package org.apache.hadoop.hive.ql.exec; -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.fail; - -import java.util.ArrayList; -import java.util.Arrays; -import java.util.HashSet; -import java.util.List; -import java.util.Set; - import org.apache.hadoop.hive.conf.HiveConf; import org.apache.hadoop.hive.conf.HiveConf.ConfVars; import org.apache.hadoop.hive.metastore.CheckResult.PartitionResult; @@ -34,7 +25,7 @@ import org.apache.hadoop.hive.metastore.Msck; import org.apache.hadoop.hive.metastore.PartitionDropOptions; import org.apache.hadoop.hive.metastore.api.FieldSchema; -import org.apache.hadoop.hive.metastore.api.MetastoreException; +import org.apache.hadoop.hive.metastore.api.MetaException; import org.apache.hadoop.hive.metastore.api.SerDeInfo; import org.apache.hadoop.hive.metastore.api.StorageDescriptor; import org.apache.hadoop.hive.metastore.api.Table; @@ -50,7 +41,22 @@ import org.junit.BeforeClass; import org.junit.Test; import org.mockito.ArgumentCaptor; -import org.mockito.Mockito; + +import java.util.ArrayList; +import java.util.Arrays; +import java.util.HashSet; +import java.util.List; +import java.util.Set; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.fail; +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.ArgumentMatchers.anyList; +import static org.mockito.ArgumentMatchers.eq; +import static org.mockito.Mockito.doThrow; +import static org.mockito.Mockito.spy; +import static org.mockito.Mockito.times; +import static org.mockito.Mockito.verify; /** * Unit test for function dropPartitionsInBatches in DDLTask. @@ -166,7 +172,7 @@ private int findMSB(int n) { private void runDropPartitions(int partCount, int batchSize, int maxRetries, int exceptionStatus) throws Exception { - IMetaStoreClient spyDb = Mockito.spy(db); + IMetaStoreClient spyDb = spy(db); // create partCount dummy partitions Set partsNotInFs = dropPartsNotInFs(partCount); @@ -204,10 +210,9 @@ private void runDropPartitions(int partCount, int batchSize, int maxRetries, int expectedCallCount++; // only first call throws exception - Mockito.doThrow(MetastoreException.class).doCallRealMethod().doCallRealMethod().when(spyDb) - .dropPartitions(Mockito.eq(table.getCatName()), Mockito.eq(table.getDbName()), - Mockito.eq(table.getTableName()), - Mockito.any(List.class), Mockito.any(PartitionDropOptions.class)); + doThrow(MetaException.class).doCallRealMethod().doCallRealMethod().when(spyDb) + .dropPartitions(eq(table.getCatName()), eq(table.getDbName()), + eq(table.getTableName()), anyList(), any(PartitionDropOptions.class)); } expectedBatchSizes = new int[expectedCallCount]; @@ -242,9 +247,9 @@ private void runDropPartitions(int partCount, int batchSize, int maxRetries, int expectedBatchSizes[i] = Integer.min(partCount, actualBatchSize); } // all calls fail - Mockito.doThrow(MetastoreException.class).when(spyDb) - .dropPartitions(Mockito.eq(table.getCatName()), Mockito.eq(table.getDbName()), Mockito.eq(table.getTableName()), - Mockito.any(List.class), Mockito.any(PartitionDropOptions.class)); + doThrow(MetaException.class).when(spyDb) + .dropPartitions(eq(table.getCatName()), eq(table.getDbName()), eq(table.getTableName()), + anyList(), any(PartitionDropOptions.class)); Exception ex = null; try { @@ -260,9 +265,9 @@ private void runDropPartitions(int partCount, int batchSize, int maxRetries, int // there should be expectedCallCount calls to drop partitions with each batch size of // actualBatchSize ArgumentCaptor argument = ArgumentCaptor.forClass(List.class); - Mockito.verify(spyDb, Mockito.times(expectedCallCount)) - .dropPartitions(Mockito.eq(table.getCatName()), Mockito.eq(table.getDbName()), Mockito.eq(table.getTableName()), - argument.capture(), Mockito.any(PartitionDropOptions.class)); + verify(spyDb, times(expectedCallCount)) + .dropPartitions(eq(table.getCatName()), eq(table.getDbName()), eq(table.getTableName()), + argument.capture(), any(PartitionDropOptions.class)); // confirm the batch sizes were as expected List droppedParts = argument.getAllValues(); @@ -326,7 +331,7 @@ public void testSmallNumberOfPartitions() throws Exception { /** * Tests the number of calls to dropPartitions and the respective batch sizes when first call to - * dropPartitions throws MetastoreException. The batch size should be reduced once by the + * dropPartitions throws MetaException. The batch size should be reduced once by the * decayingFactor 2, iow after batch size is halved. * * @throws Exception @@ -338,7 +343,7 @@ public void testBatchingWhenException() throws Exception { /** * Tests the retries exhausted case when Hive.DropPartitions method call always keep throwing - * MetastoreException. The batch sizes should exponentially decreased based on the decaying factor and + * MetaException. The batch sizes should exponentially decreased based on the decaying factor and * ultimately give up when it reaches 0. * * @throws Exception diff --git ql/src/test/org/apache/hadoop/hive/ql/exec/TestUtilities.java ql/src/test/org/apache/hadoop/hive/ql/exec/TestUtilities.java index 18b5f270d8..b6a6bab6cb 100644 --- ql/src/test/org/apache/hadoop/hive/ql/exec/TestUtilities.java +++ ql/src/test/org/apache/hadoop/hive/ql/exec/TestUtilities.java @@ -26,7 +26,7 @@ import static org.junit.Assert.assertTrue; import static org.junit.Assert.fail; import static org.apache.hadoop.hive.ql.exec.Utilities.getFileExtension; -import static org.mockito.Matchers.any; +import static org.mockito.ArgumentMatchers.any; import static org.mockito.Mockito.doReturn; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.times; @@ -35,7 +35,6 @@ import java.io.File; import java.io.IOException; -import java.io.Serializable; import java.util.ArrayList; import java.util.LinkedHashMap; import java.util.List; diff --git ql/src/test/org/apache/hadoop/hive/ql/exec/repl/TestReplDumpTask.java ql/src/test/org/apache/hadoop/hive/ql/exec/repl/TestReplDumpTask.java index 9973e9a733..ebdaa3da83 100644 --- ql/src/test/org/apache/hadoop/hive/ql/exec/repl/TestReplDumpTask.java +++ ql/src/test/org/apache/hadoop/hive/ql/exec/repl/TestReplDumpTask.java @@ -41,11 +41,10 @@ import org.slf4j.Logger; import org.slf4j.LoggerFactory; -import static org.mockito.Matchers.eq; -import static org.mockito.Matchers.same; +import static org.mockito.ArgumentMatchers.eq; +import static org.mockito.ArgumentMatchers.same; import static org.mockito.Mockito.mock; import static org.powermock.api.mockito.PowerMockito.mockStatic; -import static org.powermock.api.mockito.PowerMockito.verifyStatic; import static org.powermock.api.mockito.PowerMockito.when; import static org.powermock.api.mockito.PowerMockito.whenNew; import static org.apache.hadoop.hive.ql.exec.repl.ReplExternalTables.Writer; @@ -149,7 +148,6 @@ public void removeDBPropertyToPreventRenameWhenBootstrapDumpOfTableFails() throw try { task.bootStrapDump(new Path("mock"), null, mock(Path.class), hive); } finally { - verifyStatic(); Utils.resetDbBootstrapDumpState(same(hive), eq("default"), eq(dbRandomKey)); } } diff --git ql/src/test/org/apache/hadoop/hive/ql/exec/repl/bootstrap/AddDependencyToLeavesTest.java ql/src/test/org/apache/hadoop/hive/ql/exec/repl/bootstrap/AddDependencyToLeavesTest.java index bf11362e92..881c9f49c0 100644 --- ql/src/test/org/apache/hadoop/hive/ql/exec/repl/bootstrap/AddDependencyToLeavesTest.java +++ ql/src/test/org/apache/hadoop/hive/ql/exec/repl/bootstrap/AddDependencyToLeavesTest.java @@ -27,7 +27,7 @@ import org.junit.Test; import org.junit.runner.RunWith; import org.mockito.Mock; -import org.mockito.runners.MockitoJUnitRunner; +import org.mockito.junit.MockitoJUnitRunner; import java.io.Serializable; import java.util.Arrays; diff --git ql/src/test/org/apache/hadoop/hive/ql/exec/spark/TestSparkTask.java ql/src/test/org/apache/hadoop/hive/ql/exec/spark/TestSparkTask.java index bde6f6d5cf..98c08e4ea3 100644 --- ql/src/test/org/apache/hadoop/hive/ql/exec/spark/TestSparkTask.java +++ ql/src/test/org/apache/hadoop/hive/ql/exec/spark/TestSparkTask.java @@ -17,7 +17,7 @@ */ package org.apache.hadoop.hive.ql.exec.spark; -import static org.mockito.Matchers.any; +import static org.mockito.ArgumentMatchers.any; import static org.mockito.Mockito.atLeastOnce; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.never; diff --git ql/src/test/org/apache/hadoop/hive/ql/exec/spark/TestSparkUtilities.java ql/src/test/org/apache/hadoop/hive/ql/exec/spark/TestSparkUtilities.java index 3af0006474..78ff6ecc2e 100644 --- ql/src/test/org/apache/hadoop/hive/ql/exec/spark/TestSparkUtilities.java +++ ql/src/test/org/apache/hadoop/hive/ql/exec/spark/TestSparkUtilities.java @@ -42,7 +42,7 @@ import java.util.concurrent.atomic.AtomicReference; import static org.junit.Assert.assertEquals; -import static org.mockito.Matchers.any; +import static org.mockito.ArgumentMatchers.any; import static org.mockito.Mockito.doAnswer; import static org.mockito.Mockito.eq; import static org.mockito.Mockito.mock; @@ -65,7 +65,7 @@ public void testGetSparkSessionUsingMultipleThreadsWithTheSameSession() throws H SparkSessionManager mockSessionManager = mock(SparkSessionManager.class); doAnswer(invocationOnMock -> { - SparkSession sparkSession = invocationOnMock.getArgumentAt(0, SparkSession.class); + SparkSession sparkSession = invocationOnMock.getArgument(0, SparkSession.class); if (sparkSession == null) { return mock(SparkSession.class); } else { @@ -76,7 +76,7 @@ public void testGetSparkSessionUsingMultipleThreadsWithTheSameSession() throws H SessionState mockSessionState = mock(SessionState.class); when(mockSessionState.getConf()).thenReturn(mockConf); doAnswer(invocationOnMock -> { - activeSparkSession.set(invocationOnMock.getArgumentAt(0, SparkSession.class)); + activeSparkSession.set(invocationOnMock.getArgument(0, SparkSession.class)); return null; }).when(mockSessionState).setSparkSession(any(SparkSession.class)); doAnswer(invocationOnMock -> @@ -137,7 +137,7 @@ public void testCreateMoveTaskDoesntCreateCascadeTempDirs() throws Exception { }).when(fsOp).getConf(); doAnswer(invocationOnMock -> { - assertEquals(expectedPathRef.value, invocationOnMock.getArgumentAt(0, Path.class)); + assertEquals(expectedPathRef.value, invocationOnMock.getArgument(0, Path.class)); return null; }).when(fileSinkDesc).setDirName(any(Path.class)); diff --git ql/src/test/org/apache/hadoop/hive/ql/exec/tez/TestTezTask.java ql/src/test/org/apache/hadoop/hive/ql/exec/tez/TestTezTask.java index 6f52d65b13..cdcac4581b 100644 --- ql/src/test/org/apache/hadoop/hive/ql/exec/tez/TestTezTask.java +++ ql/src/test/org/apache/hadoop/hive/ql/exec/tez/TestTezTask.java @@ -17,22 +17,6 @@ */ package org.apache.hadoop.hive.ql.exec.tez; -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertNotNull; -import static org.junit.Assert.assertTrue; -import static org.mockito.Matchers.*; -import static org.mockito.Mockito.*; - -import org.apache.hadoop.yarn.api.records.URL; -import org.apache.hive.common.util.Ref; -import java.io.IOException; -import java.util.ArrayList; -import java.util.Collections; -import java.util.LinkedHashMap; -import java.util.List; -import java.util.Map; -import java.util.UUID; -import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.apache.hadoop.hive.common.metrics.common.Metrics; @@ -54,6 +38,8 @@ import org.apache.hadoop.mapred.JobConf; import org.apache.hadoop.yarn.api.records.LocalResource; import org.apache.hadoop.yarn.api.records.Resource; +import org.apache.hadoop.yarn.api.records.URL; +import org.apache.hive.common.util.Ref; import org.apache.tez.client.TezClient; import org.apache.tez.dag.api.DAG; import org.apache.tez.dag.api.Edge; @@ -69,6 +55,28 @@ import org.mockito.invocation.InvocationOnMock; import org.mockito.stubbing.Answer; +import java.io.IOException; +import java.util.ArrayList; +import java.util.Collections; +import java.util.LinkedHashMap; +import java.util.List; +import java.util.Map; +import java.util.UUID; + +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertNotNull; +import static org.junit.Assert.assertTrue; +import static org.mockito.Mockito.any; +import static org.mockito.Mockito.anyMap; +import static org.mockito.Mockito.anyString; +import static org.mockito.Mockito.doReturn; +import static org.mockito.Mockito.eq; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.never; +import static org.mockito.Mockito.times; +import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.when; + public class TestTezTask { DagUtils utils; @@ -90,11 +98,11 @@ public void setUp() throws Exception { utils = mock(DagUtils.class); fs = mock(FileSystem.class); path = mock(Path.class); - when(path.getFileSystem(any(Configuration.class))).thenReturn(fs); - when(utils.getTezDir(any(Path.class))).thenReturn(path); + when(path.getFileSystem(any())).thenReturn(fs); + when(utils.getTezDir(any())).thenReturn(path); when( - utils.createVertex(any(JobConf.class), any(BaseWork.class), any(Path.class), - any(TezWork.class), any(Map.class))).thenAnswer( + utils.createVertex(any(), any(BaseWork.class), any(Path.class), + any(TezWork.class), anyMap())).thenAnswer( new Answer() { @Override @@ -105,7 +113,7 @@ public Vertex answer(InvocationOnMock invocation) throws Throwable { } }); - when(utils.createEdge(any(JobConf.class), any(Vertex.class), any(Vertex.class), + when(utils.createEdge(any(), any(Vertex.class), any(Vertex.class), any(TezEdgeProperty.class), any(BaseWork.class), any(TezWork.class))) .thenAnswer(new Answer() { @Override @@ -226,7 +234,7 @@ public void testSubmit() throws Exception { @Test public void testClose() throws HiveException { task.close(work, 0, null); - verify(op, times(4)).jobClose(any(Configuration.class), eq(true)); + verify(op, times(4)).jobClose(any(), eq(true)); } @Test @@ -236,13 +244,13 @@ public void testExistingSessionGetsStorageHandlerResources() throws Exception { LocalResource res = createResource(inputOutputJars[0]); final Map resources = Collections.singletonMap(jarFilePath, res); - when(utils.localizeTempFiles(anyString(), any(Configuration.class), eq(inputOutputJars), + when(utils.localizeTempFiles(anyString(), any(), eq(inputOutputJars), any(String[].class))).thenReturn(resources); when(sessionState.isOpen()).thenReturn(true); when(sessionState.isOpening()).thenReturn(false); task.ensureSessionHasResources(sessionState, inputOutputJars); // TODO: ideally we should have a test for session itself. - verify(sessionState).ensureLocalResources(any(Configuration.class), eq(inputOutputJars)); + verify(sessionState).ensureLocalResources(any(), eq(inputOutputJars)); } private static LocalResource createResource(String url) { diff --git ql/src/test/org/apache/hadoop/hive/ql/exec/tez/TestUtils.java ql/src/test/org/apache/hadoop/hive/ql/exec/tez/TestUtils.java index d0425e5572..d9aa03244c 100644 --- ql/src/test/org/apache/hadoop/hive/ql/exec/tez/TestUtils.java +++ ql/src/test/org/apache/hadoop/hive/ql/exec/tez/TestUtils.java @@ -50,7 +50,7 @@ import java.util.Set; import static org.junit.Assert.assertFalse; -import static org.mockito.Matchers.anyBoolean; +import static org.mockito.ArgumentMatchers.anyBoolean; import static org.mockito.Mockito.when; import static org.mockito.MockitoAnnotations.initMocks; diff --git ql/src/test/org/apache/hadoop/hive/ql/exec/tez/TestWorkloadManager.java ql/src/test/org/apache/hadoop/hive/ql/exec/tez/TestWorkloadManager.java index 4659ecb97b..3e7256ec7b 100644 --- ql/src/test/org/apache/hadoop/hive/ql/exec/tez/TestWorkloadManager.java +++ ql/src/test/org/apache/hadoop/hive/ql/exec/tez/TestWorkloadManager.java @@ -27,7 +27,7 @@ import static org.junit.Assert.assertSame; import static org.junit.Assert.assertTrue; import static org.junit.Assert.fail; -import static org.mockito.Matchers.anyBoolean; +import static org.mockito.ArgumentMatchers.anyBoolean; import static org.mockito.Mockito.doNothing; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.verify; diff --git ql/src/test/org/apache/hadoop/hive/ql/exec/tez/monitoring/TestTezProgressMonitor.java ql/src/test/org/apache/hadoop/hive/ql/exec/tez/monitoring/TestTezProgressMonitor.java index 3338ca4e4b..ec52f3856d 100644 --- ql/src/test/org/apache/hadoop/hive/ql/exec/tez/monitoring/TestTezProgressMonitor.java +++ ql/src/test/org/apache/hadoop/hive/ql/exec/tez/monitoring/TestTezProgressMonitor.java @@ -23,28 +23,25 @@ import org.apache.tez.dag.api.client.DAGClient; import org.apache.tez.dag.api.client.DAGStatus; import org.apache.tez.dag.api.client.Progress; -import org.apache.tez.dag.api.client.StatusGetOpts; import org.apache.tez.dag.api.client.VertexStatus; import org.junit.Test; import org.junit.runner.RunWith; -import org.mockito.Matchers; import org.mockito.Mock; -import org.mockito.runners.MockitoJUnitRunner; +import org.mockito.junit.MockitoJUnitRunner; import java.io.IOException; import java.util.ArrayList; import java.util.HashMap; import java.util.Map; -import java.util.Set; import static org.hamcrest.CoreMatchers.equalTo; import static org.hamcrest.CoreMatchers.hasItems; import static org.hamcrest.CoreMatchers.sameInstance; import static org.hamcrest.core.Is.is; import static org.junit.Assert.assertThat; -import static org.mockito.Matchers.anySet; -import static org.mockito.Matchers.eq; -import static org.mockito.Matchers.isNull; +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.ArgumentMatchers.eq; +import static org.mockito.ArgumentMatchers.isNull; import static org.mockito.Mockito.verify; import static org.mockito.Mockito.verifyNoMoreInteractions; import static org.mockito.Mockito.when; @@ -89,15 +86,15 @@ private Progress setup(Progress progressMock, int total, int succeeded, int fail @Test public void setupInternalStateOnObjectCreation() throws IOException, TezException { when(dagStatus.getState()).thenReturn(DAGStatus.State.RUNNING); - when(dagClient.getVertexStatus(eq(MAPPER), anySet())).thenReturn(succeeded); - when(dagClient.getVertexStatus(eq(REDUCER), anySet())).thenReturn(running); + when(dagClient.getVertexStatus(eq(MAPPER), any())).thenReturn(succeeded); + when(dagClient.getVertexStatus(eq(REDUCER), any())).thenReturn(running); TezProgressMonitor monitor = new TezProgressMonitor(dagClient, dagStatus, new ArrayList(), progressMap(), console, Long.MAX_VALUE); - verify(dagClient).getVertexStatus(eq(MAPPER), isNull(Set.class)); - verify(dagClient).getVertexStatus(eq(REDUCER), isNull(Set.class)); + verify(dagClient).getVertexStatus(eq(MAPPER), isNull()); + verify(dagClient).getVertexStatus(eq(REDUCER), isNull()); verifyNoMoreInteractions(dagClient); assertThat(monitor.vertexStatusMap.keySet(), hasItems(MAPPER, REDUCER)); diff --git ql/src/test/org/apache/hadoop/hive/ql/exec/util/DAGTraversalTest.java ql/src/test/org/apache/hadoop/hive/ql/exec/util/DAGTraversalTest.java index f482e3be6e..b8d4256c36 100644 --- ql/src/test/org/apache/hadoop/hive/ql/exec/util/DAGTraversalTest.java +++ ql/src/test/org/apache/hadoop/hive/ql/exec/util/DAGTraversalTest.java @@ -20,7 +20,7 @@ import org.apache.hadoop.hive.ql.exec.Task; import org.junit.Test; import org.junit.runner.RunWith; -import org.mockito.runners.MockitoJUnitRunner; +import org.mockito.junit.MockitoJUnitRunner; import java.io.Serializable; import java.util.Arrays; diff --git ql/src/test/org/apache/hadoop/hive/ql/hooks/TestQueryHooks.java ql/src/test/org/apache/hadoop/hive/ql/hooks/TestQueryHooks.java index 51e7215183..263c68acb8 100644 --- ql/src/test/org/apache/hadoop/hive/ql/hooks/TestQueryHooks.java +++ ql/src/test/org/apache/hadoop/hive/ql/hooks/TestQueryHooks.java @@ -27,10 +27,10 @@ import org.junit.Test; import org.mockito.ArgumentMatcher; -import static org.mockito.Matchers.any; -import static org.mockito.Matchers.anyBoolean; -import static org.mockito.Matchers.argThat; -import static org.mockito.Matchers.eq; +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.ArgumentMatchers.anyBoolean; +import static org.mockito.ArgumentMatchers.argThat; +import static org.mockito.ArgumentMatchers.eq; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.never; import static org.mockito.Mockito.verify; @@ -151,7 +151,7 @@ private Driver createDriver() throws IllegalAccessException, ClassNotFoundExcept return driver; } - private static final class QueryLifeTimeHookContextMatcher extends ArgumentMatcher { + private static final class QueryLifeTimeHookContextMatcher implements ArgumentMatcher { private final String command; @@ -160,8 +160,8 @@ private QueryLifeTimeHookContextMatcher(String command) { } @Override - public boolean matches(Object o) { - return ((QueryLifeTimeHookContext) o).getCommand().equals(this.command); + public boolean matches(QueryLifeTimeHookContext queryLifeTimeHookContext) { + return queryLifeTimeHookContext.getCommand().equals(this.command); } } } diff --git ql/src/test/org/apache/hadoop/hive/ql/io/TestAcidInputFormat.java ql/src/test/org/apache/hadoop/hive/ql/io/TestAcidInputFormat.java index 49ae637765..dc281d2c59 100644 --- ql/src/test/org/apache/hadoop/hive/ql/io/TestAcidInputFormat.java +++ ql/src/test/org/apache/hadoop/hive/ql/io/TestAcidInputFormat.java @@ -32,7 +32,7 @@ import org.junit.Test; import org.junit.runner.RunWith; import org.mockito.Mock; -import org.mockito.runners.MockitoJUnitRunner; +import org.mockito.junit.MockitoJUnitRunner; @RunWith(MockitoJUnitRunner.class) public class TestAcidInputFormat { diff --git ql/src/test/org/apache/hadoop/hive/ql/io/TestHiveBinarySearchRecordReader.java ql/src/test/org/apache/hadoop/hive/ql/io/TestHiveBinarySearchRecordReader.java index 89fff4719b..2658bb675d 100644 --- ql/src/test/org/apache/hadoop/hive/ql/io/TestHiveBinarySearchRecordReader.java +++ ql/src/test/org/apache/hadoop/hive/ql/io/TestHiveBinarySearchRecordReader.java @@ -18,13 +18,12 @@ package org.apache.hadoop.hive.ql.io; -import static org.mockito.Matchers.anyObject; +import static org.mockito.ArgumentMatchers.anyObject; import static org.mockito.Mockito.inOrder; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.verify; import static org.mockito.Mockito.when; -import java.io.ByteArrayOutputStream; import java.io.IOException; import java.util.LinkedHashMap; diff --git ql/src/test/org/apache/hadoop/hive/ql/io/orc/TestOrcRawRecordMerger.java ql/src/test/org/apache/hadoop/hive/ql/io/orc/TestOrcRawRecordMerger.java index 17ceedefd9..f63c40a7b5 100644 --- ql/src/test/org/apache/hadoop/hive/ql/io/orc/TestOrcRawRecordMerger.java +++ ql/src/test/org/apache/hadoop/hive/ql/io/orc/TestOrcRawRecordMerger.java @@ -75,6 +75,8 @@ import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertTrue; import static org.junit.Assert.assertNull; +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.Mockito.when; public class TestOrcRawRecordMerger { @@ -169,19 +171,19 @@ private Reader createMockReader() throws IOException { setRow(row4, OrcRecordUpdater.INSERT_OPERATION, 40, 50, 60, 130, "fourth"); OrcStruct row5 = new OrcStruct(OrcRecordUpdater.FIELDS); setRow(row5, OrcRecordUpdater.INSERT_OPERATION, 40, 50, 61, 140, "fifth"); - Mockito.when(reader.rowsOptions(Mockito.any(Reader.Options.class), Mockito.any(HiveConf.class))) + when(reader.rowsOptions(any(Reader.Options.class), any(HiveConf.class))) .thenReturn(recordReader); - Mockito.when(recordReader.hasNext()). + when(recordReader.hasNext()). thenReturn(true, true, true, true, true, false); - Mockito.when(recordReader.getProgress()).thenReturn(1.0f); + when(recordReader.getProgress()).thenReturn(1.0f); - Mockito.when(recordReader.next(null)).thenReturn(row1); - Mockito.when(recordReader.next(row1)).thenReturn(row2); - Mockito.when(recordReader.next(row2)).thenReturn(row3); - Mockito.when(recordReader.next(row3)).thenReturn(row4); - Mockito.when(recordReader.next(row4)).thenReturn(row5); + when(recordReader.next(null)).thenReturn(row1); + when(recordReader.next(row1)).thenReturn(row2); + when(recordReader.next(row2)).thenReturn(row3); + when(recordReader.next(row3)).thenReturn(row4); + when(recordReader.next(row4)).thenReturn(row5); return reader; } @@ -275,16 +277,17 @@ private Reader createMockOriginalReader() throws IOException { OrcStruct row4 = createOriginalRow("fourth"); OrcStruct row5 = createOriginalRow("fifth"); - Mockito.when(reader.rowsOptions(Mockito.any(Reader.Options.class), Mockito.any(HiveConf.class))) + // HiveConf argument of rowsOptions can be null + when(reader.rowsOptions(any(Reader.Options.class), any())) .thenReturn(recordReader); - Mockito.when(recordReader.hasNext()). + when(recordReader.hasNext()). thenReturn(true, true, true, true, true, false); - Mockito.when(recordReader.getRowNumber()).thenReturn(0L, 1L, 2L, 3L, 4L); - Mockito.when(recordReader.next(null)).thenReturn(row1); - Mockito.when(recordReader.next(row1)).thenReturn(row2); - Mockito.when(recordReader.next(row2)).thenReturn(row3); - Mockito.when(recordReader.next(row3)).thenReturn(row4); - Mockito.when(recordReader.next(row4)).thenReturn(row5); + when(recordReader.getRowNumber()).thenReturn(0L, 1L, 2L, 3L, 4L); + when(recordReader.next(null)).thenReturn(row1); + when(recordReader.next(row1)).thenReturn(row2); + when(recordReader.next(row2)).thenReturn(row3); + when(recordReader.next(row3)).thenReturn(row4); + when(recordReader.next(row4)).thenReturn(row5); return reader; } @@ -415,8 +418,8 @@ public void testNewBase() throws Exception { typeBuilder.setKind(OrcProto.Type.Kind.STRING); types.add(typeBuilder.build()); - Mockito.when(reader.getTypes()).thenReturn(types); - Mockito.when(reader.rowsOptions(Mockito.any(Reader.Options.class), Mockito.any(HiveConf.class))) + when(reader.getTypes()).thenReturn(types); + when(reader.rowsOptions(any(Reader.Options.class), any())) .thenReturn(recordReader); OrcStruct row1 = new OrcStruct(OrcRecordUpdater.FIELDS); @@ -430,22 +433,22 @@ public void testNewBase() throws Exception { OrcStruct row5 = new OrcStruct(OrcRecordUpdater.FIELDS); setRow(row5, OrcRecordUpdater.INSERT_OPERATION, 40, 50, 61, 140, "fifth"); - Mockito.when(recordReader.hasNext()). + when(recordReader.hasNext()). thenReturn(true, true, true, true, true, false); - Mockito.when(recordReader.getProgress()).thenReturn(1.0f); + when(recordReader.getProgress()).thenReturn(1.0f); - Mockito.when(recordReader.next(null)).thenReturn(row1, row4); - Mockito.when(recordReader.next(row1)).thenReturn(row2); - Mockito.when(recordReader.next(row2)).thenReturn(row3); - Mockito.when(recordReader.next(row3)).thenReturn(row5); + when(recordReader.next(null)).thenReturn(row1, row4); + when(recordReader.next(row1)).thenReturn(row2); + when(recordReader.next(row2)).thenReturn(row3); + when(recordReader.next(row3)).thenReturn(row5); - Mockito.when(reader.hasMetadataValue(OrcRecordUpdater.ACID_KEY_INDEX_NAME)) + when(reader.hasMetadataValue(OrcRecordUpdater.ACID_KEY_INDEX_NAME)) .thenReturn(true); - Mockito.when(reader.getMetadataValue(OrcRecordUpdater.ACID_KEY_INDEX_NAME)) + when(reader.getMetadataValue(OrcRecordUpdater.ACID_KEY_INDEX_NAME)) .thenReturn(ByteBuffer.wrap("10,20,30;40,50,60;40,50,61" .getBytes("UTF-8"))); - Mockito.when(reader.getStripes()) + when(reader.getStripes()) .thenReturn(createStripes(2, 2, 1)); OrcRawRecordMerger merger = new OrcRawRecordMerger(conf, false, reader, diff --git ql/src/test/org/apache/hadoop/hive/ql/lockmgr/TestDummyTxnManager.java ql/src/test/org/apache/hadoop/hive/ql/lockmgr/TestDummyTxnManager.java index 7a3ba3e5ca..62e9bc4f44 100644 --- ql/src/test/org/apache/hadoop/hive/ql/lockmgr/TestDummyTxnManager.java +++ ql/src/test/org/apache/hadoop/hive/ql/lockmgr/TestDummyTxnManager.java @@ -39,7 +39,7 @@ import org.junit.runner.RunWith; import org.mockito.ArgumentCaptor; import org.mockito.Mock; -import org.mockito.runners.MockitoJUnitRunner; +import org.mockito.junit.MockitoJUnitRunner; import java.lang.reflect.Field; import java.util.ArrayList; @@ -114,7 +114,7 @@ public void testSingleReadTable() throws Exception { LockException lEx = new LockException(ErrorMsg.LOCK_ACQUIRE_CANCELLED.getMsg()); when(mockLockManager.lock(anyListOf(HiveLockObj.class), eq(false), eq(driverState))).thenReturn(expectedLocks); when(mockLockManager.lock(anyListOf(HiveLockObj.class), eq(false), eq(driverInterrupted))).thenThrow(lEx); - doNothing().when(mockLockManager).setContext(any(HiveLockManagerCtx.class)); + lenient().doNothing().when(mockLockManager).setContext(any(HiveLockManagerCtx.class)); doNothing().when(mockLockManager).close(); ArgumentCaptor lockObjsCaptor = ArgumentCaptor.forClass(List.class); diff --git ql/src/test/org/apache/hadoop/hive/ql/optimizer/calcite/rules/TestHivePointLookupOptimizerRule.java ql/src/test/org/apache/hadoop/hive/ql/optimizer/calcite/rules/TestHivePointLookupOptimizerRule.java index 2b37c5436f..ef878f0239 100644 --- ql/src/test/org/apache/hadoop/hive/ql/optimizer/calcite/rules/TestHivePointLookupOptimizerRule.java +++ ql/src/test/org/apache/hadoop/hive/ql/optimizer/calcite/rules/TestHivePointLookupOptimizerRule.java @@ -18,8 +18,6 @@ package org.apache.hadoop.hive.ql.optimizer.calcite.rules; -import static org.junit.Assert.assertEquals; - import org.apache.calcite.jdbc.JavaTypeFactoryImpl; import org.apache.calcite.plan.RelOptCluster; import org.apache.calcite.plan.RelOptSchema; @@ -38,10 +36,13 @@ import org.junit.Before; import org.junit.Test; import org.junit.runner.RunWith; -import org.mockito.Matchers; import org.mockito.Mock; -import org.mockito.Mockito; -import org.mockito.runners.MockitoJUnitRunner; +import org.mockito.junit.MockitoJUnitRunner; + +import static org.junit.Assert.assertEquals; +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.Mockito.doReturn; +import static org.mockito.Mockito.lenient; @RunWith(MockitoJUnitRunner.class) public class TestHivePointLookupOptimizerRule { @@ -74,9 +75,9 @@ public void before() { RexBuilder rexBuilder = new RexBuilder(typeFactory); final RelOptCluster optCluster = RelOptCluster.create(planner, rexBuilder); RelDataType rowTypeMock = typeFactory.createStructType(MyRecord.class); - Mockito.doReturn(rowTypeMock).when(tableMock).getRowType(); - Mockito.doReturn(tableMock).when(schemaMock).getTableForMember(Matchers.any()); - Mockito.doReturn(hiveTableMDMock).when(tableMock).getHiveTableMD(); + doReturn(rowTypeMock).when(tableMock).getRowType(); + doReturn(tableMock).when(schemaMock).getTableForMember(any()); + lenient().doReturn(hiveTableMDMock).when(tableMock).getHiveTableMD(); builder = HiveRelFactories.HIVE_BUILDER.create(optCluster, schemaMock); diff --git ql/src/test/org/apache/hadoop/hive/ql/optimizer/calcite/rules/TestHiveReduceExpressionsWithStatsRule.java ql/src/test/org/apache/hadoop/hive/ql/optimizer/calcite/rules/TestHiveReduceExpressionsWithStatsRule.java index 183f1279ad..7dc1277fa4 100644 --- ql/src/test/org/apache/hadoop/hive/ql/optimizer/calcite/rules/TestHiveReduceExpressionsWithStatsRule.java +++ ql/src/test/org/apache/hadoop/hive/ql/optimizer/calcite/rules/TestHiveReduceExpressionsWithStatsRule.java @@ -47,7 +47,7 @@ import org.mockito.Matchers; import org.mockito.Mock; import org.mockito.Mockito; -import org.mockito.runners.MockitoJUnitRunner; +import org.mockito.junit.MockitoJUnitRunner; import com.google.common.collect.Lists; diff --git ql/src/test/org/apache/hadoop/hive/ql/optimizer/signature/TestRelSignature.java ql/src/test/org/apache/hadoop/hive/ql/optimizer/signature/TestRelSignature.java index f8a54ca702..7532bfdb06 100644 --- ql/src/test/org/apache/hadoop/hive/ql/optimizer/signature/TestRelSignature.java +++ ql/src/test/org/apache/hadoop/hive/ql/optimizer/signature/TestRelSignature.java @@ -20,6 +20,8 @@ import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertNotEquals; +import static org.mockito.Mockito.doReturn; +import static org.mockito.Mockito.lenient; import org.apache.calcite.jdbc.JavaTypeFactoryImpl; import org.apache.calcite.plan.RelOptCluster; @@ -50,8 +52,7 @@ import org.junit.runner.RunWith; import org.mockito.Matchers; import org.mockito.Mock; -import org.mockito.Mockito; -import org.mockito.runners.MockitoJUnitRunner; +import org.mockito.junit.MockitoJUnitRunner; @RunWith(MockitoJUnitRunner.class) public class TestRelSignature { @@ -84,9 +85,9 @@ public void before() { RexBuilder rexBuilder = new RexBuilder(typeFactory); final RelOptCluster optCluster = RelOptCluster.create(planner, rexBuilder); RelDataType rowTypeMock = typeFactory.createStructType(MyRecord.class); - Mockito.doReturn(rowTypeMock).when(tableMock).getRowType(); - Mockito.doReturn(tableMock).when(schemaMock).getTableForMember(Matchers.any()); - Mockito.doReturn(hiveTableMDMock).when(tableMock).getHiveTableMD(); + doReturn(rowTypeMock).when(tableMock).getRowType(); + doReturn(tableMock).when(schemaMock).getTableForMember(Matchers.any()); + lenient().doReturn(hiveTableMDMock).when(tableMock).getHiveTableMD(); builder = HiveRelFactories.HIVE_BUILDER.create(optCluster, schemaMock); } diff --git ql/src/test/org/apache/hadoop/hive/ql/parse/repl/TestCopyUtils.java ql/src/test/org/apache/hadoop/hive/ql/parse/repl/TestCopyUtils.java index 610af09515..535bc6dc32 100644 --- ql/src/test/org/apache/hadoop/hive/ql/parse/repl/TestCopyUtils.java +++ ql/src/test/org/apache/hadoop/hive/ql/parse/repl/TestCopyUtils.java @@ -38,10 +38,10 @@ import java.util.List; import static org.junit.Assert.assertFalse; -import static org.mockito.Matchers.anyBoolean; -import static org.mockito.Matchers.anyListOf; -import static org.mockito.Matchers.eq; -import static org.mockito.Matchers.same; +import static org.mockito.ArgumentMatchers.anyBoolean; +import static org.mockito.ArgumentMatchers.anyListOf; +import static org.mockito.ArgumentMatchers.eq; +import static org.mockito.ArgumentMatchers.same; import static org.mockito.Mockito.doReturn; import static org.mockito.Mockito.mock; import static org.powermock.api.mockito.PowerMockito.mockStatic; diff --git ql/src/test/org/apache/hadoop/hive/ql/parse/repl/dump/TestHiveWrapper.java ql/src/test/org/apache/hadoop/hive/ql/parse/repl/dump/TestHiveWrapper.java index cb3b5cc98a..f0ead1d521 100644 --- ql/src/test/org/apache/hadoop/hive/ql/parse/repl/dump/TestHiveWrapper.java +++ ql/src/test/org/apache/hadoop/hive/ql/parse/repl/dump/TestHiveWrapper.java @@ -26,7 +26,7 @@ import org.mockito.InOrder; import org.mockito.Mock; import org.mockito.Mockito; -import org.mockito.runners.MockitoJUnitRunner; +import org.mockito.junit.MockitoJUnitRunner; @RunWith(MockitoJUnitRunner.class) public class TestHiveWrapper { diff --git ql/src/test/org/apache/hadoop/hive/ql/parse/repl/load/message/TestPrimaryToReplicaResourceFunction.java ql/src/test/org/apache/hadoop/hive/ql/parse/repl/load/message/TestPrimaryToReplicaResourceFunction.java index 3c7ef1dee8..faba6e4caa 100644 --- ql/src/test/org/apache/hadoop/hive/ql/parse/repl/load/message/TestPrimaryToReplicaResourceFunction.java +++ ql/src/test/org/apache/hadoop/hive/ql/parse/repl/load/message/TestPrimaryToReplicaResourceFunction.java @@ -48,7 +48,7 @@ import static org.hamcrest.CoreMatchers.equalTo; import static org.hamcrest.CoreMatchers.is; import static org.junit.Assert.assertThat; -import static org.mockito.Matchers.any; +import static org.mockito.ArgumentMatchers.any; import static org.mockito.Mockito.mock; import static org.powermock.api.mockito.PowerMockito.mockStatic; import static org.powermock.api.mockito.PowerMockito.when; @@ -87,7 +87,7 @@ public void createDestinationPath() throws IOException, SemanticException, URISy when(mockFs.getScheme()).thenReturn("hdfs"); when(mockFs.getUri()).thenReturn(new URI("hdfs", "somehost:9000", null, null, null)); mockStatic(System.class); - when(System.nanoTime()).thenReturn(Long.MAX_VALUE); +// when(System.nanoTime()).thenReturn(Long.MAX_VALUE); when(functionObj.getFunctionName()).thenReturn("someFunctionName"); mockStatic(ReplCopyTask.class); Task mock = mock(Task.class); @@ -100,6 +100,6 @@ public void createDestinationPath() throws IOException, SemanticException, URISy assertThat(resourceUri.getUri(), is(equalTo( "hdfs://somehost:9000/someBasePath/withADir/replicadbname/somefunctionname/" + String - .valueOf(Long.MAX_VALUE) + "/ab.jar"))); + .valueOf(0L) + "/ab.jar"))); } } \ No newline at end of file diff --git ql/src/test/org/apache/hadoop/hive/ql/security/authorization/plugin/TestHivePrivilegeObjectOwnerNameAndType.java ql/src/test/org/apache/hadoop/hive/ql/security/authorization/plugin/TestHivePrivilegeObjectOwnerNameAndType.java index e6576d18cf..2bd5c8aa3d 100644 --- ql/src/test/org/apache/hadoop/hive/ql/security/authorization/plugin/TestHivePrivilegeObjectOwnerNameAndType.java +++ ql/src/test/org/apache/hadoop/hive/ql/security/authorization/plugin/TestHivePrivilegeObjectOwnerNameAndType.java @@ -36,7 +36,7 @@ import java.util.List; -import static org.mockito.Matchers.any; +import static org.mockito.ArgumentMatchers.any; import static org.mockito.Mockito.reset; import static org.mockito.Mockito.verify; diff --git ql/src/test/org/apache/hadoop/hive/ql/udf/generic/TestGenericUDAFEvaluator.java ql/src/test/org/apache/hadoop/hive/ql/udf/generic/TestGenericUDAFEvaluator.java index d3df170395..0bc1975fd0 100644 --- ql/src/test/org/apache/hadoop/hive/ql/udf/generic/TestGenericUDAFEvaluator.java +++ ql/src/test/org/apache/hadoop/hive/ql/udf/generic/TestGenericUDAFEvaluator.java @@ -31,7 +31,7 @@ import org.junit.runner.RunWith; import org.mockito.Answers; import org.mockito.Mock; -import org.mockito.runners.MockitoJUnitRunner; +import org.mockito.junit.MockitoJUnitRunner; import java.util.Collections; import java.util.List; diff --git ql/src/test/org/apache/hadoop/hive/ql/udf/generic/TestGenericUDFExtractUnion.java ql/src/test/org/apache/hadoop/hive/ql/udf/generic/TestGenericUDFExtractUnion.java index 3adc5ca349..0fcb341a3f 100644 --- ql/src/test/org/apache/hadoop/hive/ql/udf/generic/TestGenericUDFExtractUnion.java +++ ql/src/test/org/apache/hadoop/hive/ql/udf/generic/TestGenericUDFExtractUnion.java @@ -37,7 +37,7 @@ import org.junit.Test; import org.junit.runner.RunWith; import org.mockito.Mock; -import org.mockito.runners.MockitoJUnitRunner; +import org.mockito.junit.MockitoJUnitRunner; import com.google.common.collect.ImmutableList; diff --git ql/src/test/org/apache/hadoop/hive/ql/udf/ptf/TestBoundaryCache.java ql/src/test/org/apache/hadoop/hive/ql/udf/ptf/TestBoundaryCache.java index 714c51badc..5bd1ca4f68 100644 --- ql/src/test/org/apache/hadoop/hive/ql/udf/ptf/TestBoundaryCache.java +++ ql/src/test/org/apache/hadoop/hive/ql/udf/ptf/TestBoundaryCache.java @@ -51,7 +51,7 @@ import static org.apache.hadoop.hive.ql.parse.WindowingSpec.Direction.PRECEDING; import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertTrue; -import static org.mockito.Matchers.any; +import static org.mockito.ArgumentMatchers.any; import static org.mockito.Mockito.doAnswer; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.spy; @@ -227,7 +227,7 @@ private void runTest(WindowingSpec.Direction startDirection, int startAmount, AtomicInteger readCounter) throws Exception { PTFPartition partitionMock = mock(PTFPartition.class); doAnswer(invocationOnMock -> { - int idx = invocationOnMock.getArgumentAt(0, Integer.class); + int idx = invocationOnMock.getArgument(0, Integer.class); return partition.get(idx); }).when(partitionMock).getAt(any(Integer.class)); doAnswer(invocationOnMock -> { @@ -242,18 +242,18 @@ private void runTest(WindowingSpec.Direction startDirection, int startAmount, ValueBoundaryScanner scannerSpy = spy(scan); doAnswer(invocationOnMock -> { readCounter.incrementAndGet(); - List row = invocationOnMock.getArgumentAt(0, List.class); + List row = invocationOnMock.getArgument(0, List.class); return row.get(orderByCol); }).when(scannerSpy).computeValue(any(Object.class)); doAnswer(invocationOnMock -> { - IntWritable v1 = invocationOnMock.getArgumentAt(0, IntWritable.class); - IntWritable v2 = invocationOnMock.getArgumentAt(1, IntWritable.class); + IntWritable v1 = invocationOnMock.getArgument(0, IntWritable.class); + IntWritable v2 = invocationOnMock.getArgument(1, IntWritable.class); return (v1 != null && v2 != null) ? v1.get() == v2.get() : v1 == null && v2 == null; }).when(scannerSpy).isEqual(any(Object.class), any(Object.class)); doAnswer(invocationOnMock -> { - IntWritable v1 = invocationOnMock.getArgumentAt(0, IntWritable.class); - IntWritable v2 = invocationOnMock.getArgumentAt(1, IntWritable.class); - Integer amt = invocationOnMock.getArgumentAt(2, Integer.class); + IntWritable v1 = invocationOnMock.getArgument(0, IntWritable.class); + IntWritable v2 = invocationOnMock.getArgument(1, IntWritable.class); + Integer amt = invocationOnMock.getArgument(2, Integer.class); return (v1 != null && v2 != null) ? (v1.get() - v2.get()) > amt : v1 != null || v2 != null; }).when(scannerSpy).isDistanceGreater(any(Object.class), any(Object.class), any(Integer.class)); diff --git service/pom.xml service/pom.xml index 911e93a624..04c9a299c7 100644 --- service/pom.xml +++ service/pom.xml @@ -307,7 +307,7 @@ org.powermock - powermock-api-mockito + powermock-api-mockito2 ${powermock.version} test diff --git service/src/java/org/apache/hive/service/auth/PlainSaslHelper.java service/src/java/org/apache/hive/service/auth/PlainSaslHelper.java index fd1236413e..f2ba6bedbc 100644 --- service/src/java/org/apache/hive/service/auth/PlainSaslHelper.java +++ service/src/java/org/apache/hive/service/auth/PlainSaslHelper.java @@ -158,12 +158,7 @@ public void handle(Callback[] callbacks) throws IOException, UnsupportedCallback } PasswdAuthenticationProvider provider = AuthenticationProviderFactory.getAuthenticationProvider(authMethod); - try { - provider.Authenticate(username, password); - } catch (Exception e) { - LOG.error("Login attempt is failed for user : " + username + ". Error Messsage : " + e.getMessage()); - throw e; - } + provider.Authenticate(username, password); if (ac != null) { ac.setAuthorized(true); } diff --git service/src/java/org/apache/hive/service/cli/session/HiveSessionImpl.java service/src/java/org/apache/hive/service/cli/session/HiveSessionImpl.java index 9c7ee54ed2..9e497545b5 100644 --- service/src/java/org/apache/hive/service/cli/session/HiveSessionImpl.java +++ service/src/java/org/apache/hive/service/cli/session/HiveSessionImpl.java @@ -76,7 +76,6 @@ import org.apache.hive.service.cli.operation.OperationManager; import org.apache.hive.service.rpc.thrift.TProtocolVersion; import org.apache.hive.service.server.KillQueryImpl; -import org.apache.hive.service.server.KillQueryZookeeperManager; import org.apache.hive.service.server.ThreadWithGarbageCleanup; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -168,11 +167,7 @@ public void open(Map sessionConfMap) throws HiveSQLException { } catch (Exception e) { throw new HiveSQLException(e); } - KillQueryZookeeperManager killQueryZookeeperManager = null; - if (sessionManager != null) { - killQueryZookeeperManager = sessionManager.getKillQueryZookeeperManager(); - } - sessionState.setKillQuery(new KillQueryImpl(operationManager, killQueryZookeeperManager)); + sessionState.setKillQuery(new KillQueryImpl(operationManager)); SessionState.start(sessionState); try { sessionState.loadAuxJars(); diff --git service/src/java/org/apache/hive/service/cli/session/SessionManager.java service/src/java/org/apache/hive/service/cli/session/SessionManager.java index 57031f4350..277519cba5 100644 --- service/src/java/org/apache/hive/service/cli/session/SessionManager.java +++ service/src/java/org/apache/hive/service/cli/session/SessionManager.java @@ -54,7 +54,6 @@ import org.apache.hive.service.rpc.thrift.TOpenSessionReq; import org.apache.hive.service.rpc.thrift.TProtocolVersion; import org.apache.hive.service.server.HiveServer2; -import org.apache.hive.service.server.KillQueryZookeeperManager; import org.apache.hive.service.server.ThreadFactoryWithGarbageCleanup; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -86,7 +85,6 @@ private int ipAddressLimit; private int userIpAddressLimit; private final OperationManager operationManager = new OperationManager(); - private KillQueryZookeeperManager killQueryZookeeperManager; private ThreadPoolExecutor backgroundOperationPool; private boolean isOperationLogEnabled; private File operationLogRootDir; @@ -116,12 +114,6 @@ public synchronized void init(HiveConf hiveConf) { } createBackgroundOperationPool(); addService(operationManager); - if (hiveConf.getBoolVar(ConfVars.HIVE_SERVER2_SUPPORT_DYNAMIC_SERVICE_DISCOVERY) && - !hiveConf.getBoolVar(HiveConf.ConfVars.HIVE_SERVER2_ACTIVE_PASSIVE_HA_ENABLE) && - hiveConf.getBoolVar(ConfVars.HIVE_ZOOKEEPER_KILLQUERY_ENABLE)) { - killQueryZookeeperManager = new KillQueryZookeeperManager(operationManager, hiveServer2); - addService(killQueryZookeeperManager); - } initSessionImplClassName(); Metrics metrics = MetricsFactory.getInstance(); if(metrics != null){ @@ -633,10 +625,6 @@ public OperationManager getOperationManager() { return operationManager; } - public KillQueryZookeeperManager getKillQueryZookeeperManager() { - return killQueryZookeeperManager; - } - private static ThreadLocal threadLocalIpAddress = new ThreadLocal(); public static void setIpAddress(String ipAddress) { diff --git service/src/java/org/apache/hive/service/cli/thrift/ThriftCLIService.java service/src/java/org/apache/hive/service/cli/thrift/ThriftCLIService.java index 13cefbe2e6..3938147fef 100644 --- service/src/java/org/apache/hive/service/cli/thrift/ThriftCLIService.java +++ service/src/java/org/apache/hive/service/cli/thrift/ThriftCLIService.java @@ -319,10 +319,8 @@ private TStatus unsecureTokenErrorStatus() { public TOpenSessionResp OpenSession(TOpenSessionReq req) throws TException { LOG.info("Client protocol version: " + req.getClient_protocol()); TOpenSessionResp resp = new TOpenSessionResp(); - String userName = null; try { - userName = getUserName(req); - final SessionHandle sessionHandle = getSessionHandle(req, resp, userName); + final SessionHandle sessionHandle = getSessionHandle(req, resp); final int fetchSize = hiveConf.getIntVar(HiveConf.ConfVars.HIVE_SERVER2_THRIFT_RESULTSET_DEFAULT_FETCH_SIZE); @@ -336,9 +334,7 @@ public TOpenSessionResp OpenSession(TOpenSessionReq req) throws TException { if (context != null) { context.setSessionHandle(sessionHandle); } - LOG.info("Login attempt is successful for user : " + userName); } catch (Exception e) { - LOG.error("Login attempt is failed for user : " + userName + ". Error Messsage :" + e.getMessage()); LOG.warn("Error opening session: ", e); resp.setStatus(HiveSQLException.toTStatus(e)); } @@ -466,8 +462,9 @@ private String getShortName(String userName) throws IOException { * @throws LoginException * @throws IOException */ - SessionHandle getSessionHandle(TOpenSessionReq req, TOpenSessionResp res, String userName) + SessionHandle getSessionHandle(TOpenSessionReq req, TOpenSessionResp res) throws HiveSQLException, LoginException, IOException { + String userName = getUserName(req); String ipAddress = getIpAddress(); TProtocolVersion protocol = getMinVersion(CLIService.SERVER_VERSION, req.getClient_protocol()); diff --git service/src/java/org/apache/hive/service/cli/thrift/ThriftHttpServlet.java service/src/java/org/apache/hive/service/cli/thrift/ThriftHttpServlet.java index 2ccbb618ab..421aa5a4c8 100644 --- service/src/java/org/apache/hive/service/cli/thrift/ThriftHttpServlet.java +++ service/src/java/org/apache/hive/service/cli/thrift/ThriftHttpServlet.java @@ -253,13 +253,6 @@ protected void doPost(HttpServletRequest request, HttpServletResponse response) response.setStatus(HttpServletResponse.SC_UNAUTHORIZED); if(isKerberosAuthMode(authType)) { response.addHeader(HttpAuthUtils.WWW_AUTHENTICATE, HttpAuthUtils.NEGOTIATE); - } else { - try { - LOG.error("Login attempt is failed for user : " + - getUsername(request, authType) + ". Error Messsage :" + e.getMessage()); - } catch (Exception ex) { - // Ignore Exception - } } response.getWriter().println("Authentication Error: " + e.getMessage()); } @@ -518,15 +511,6 @@ public String run() throws HttpAuthenticationException { } } catch (GSSException e) { - if (gssContext != null) { - try { - LOG.error("Login attempt is failed for user : " + - getPrincipalWithoutRealmAndHost(gssContext.getSrcName().toString()) + - ". Error Messsage :" + e.getMessage()); - } catch (Exception ex) { - // Ignore Exception - } - } throw new HttpAuthenticationException("Kerberos authentication failed: ", e); } finally { diff --git service/src/java/org/apache/hive/service/server/HiveServer2.java service/src/java/org/apache/hive/service/server/HiveServer2.java index 42b7e59e9f..3e7d127195 100644 --- service/src/java/org/apache/hive/service/server/HiveServer2.java +++ service/src/java/org/apache/hive/service/server/HiveServer2.java @@ -645,7 +645,7 @@ public boolean isDeregisteredWithZooKeeper() { return false; } - public String getServerInstanceURI() throws Exception { + private String getServerInstanceURI() throws Exception { if ((thriftCLIService == null) || (thriftCLIService.getServerIPAddress() == null)) { throw new Exception("Unable to get the server address; it hasn't been initialized yet."); } diff --git service/src/java/org/apache/hive/service/server/KillQueryImpl.java service/src/java/org/apache/hive/service/server/KillQueryImpl.java index e15cd1f8a0..883e32bd2e 100644 --- service/src/java/org/apache/hive/service/server/KillQueryImpl.java +++ service/src/java/org/apache/hive/service/server/KillQueryImpl.java @@ -22,10 +22,10 @@ import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hive.conf.HiveConf; import org.apache.hadoop.hive.ql.QueryState; -import org.apache.hadoop.hive.ql.ddl.process.kill.KillQueriesOperation; import org.apache.hadoop.hive.ql.metadata.HiveException; import org.apache.hadoop.hive.ql.security.authorization.plugin.HiveAuthzContext; import org.apache.hadoop.hive.ql.security.authorization.plugin.HiveOperationType; +import org.apache.hadoop.hive.ql.security.authorization.plugin.HivePrivilegeObject; import org.apache.hadoop.hive.ql.session.KillQuery; import org.apache.hadoop.hive.ql.session.SessionState; import org.apache.hadoop.yarn.api.ApplicationClientProtocol; @@ -55,19 +55,14 @@ private final static Logger LOG = LoggerFactory.getLogger(KillQueryImpl.class); private final OperationManager operationManager; - private final KillQueryZookeeperManager killQueryZookeeperManager; + private enum TagOrId {TAG, ID, UNKNOWN}; - private enum TagOrId {TAG, ID, UNKNOWN} - - - public KillQueryImpl(OperationManager operationManager, KillQueryZookeeperManager killQueryZookeeperManager) { + public KillQueryImpl(OperationManager operationManager) { this.operationManager = operationManager; - this.killQueryZookeeperManager = killQueryZookeeperManager; } - public static Set getChildYarnJobs(Configuration conf, String tag, String doAs, boolean doAsAdmin) - throws IOException, YarnException { - Set childYarnJobs = new HashSet<>(); + public static Set getChildYarnJobs(Configuration conf, String tag) throws IOException, YarnException { + Set childYarnJobs = new HashSet(); GetApplicationsRequest gar = GetApplicationsRequest.newInstance(); gar.setScope(ApplicationsRequestScope.OWN); gar.setApplicationTags(Collections.singleton(tag)); @@ -75,13 +70,10 @@ public KillQueryImpl(OperationManager operationManager, KillQueryZookeeperManage ApplicationClientProtocol proxy = ClientRMProxy.createRMProxy(conf, ApplicationClientProtocol.class); GetApplicationsResponse apps = proxy.getApplications(gar); List appsList = apps.getApplicationList(); - for (ApplicationReport appReport : appsList) { - if (doAsAdmin) { + for(ApplicationReport appReport : appsList) { + if (isAdmin() || appReport.getApplicationTags().contains(QueryState.USERID_TAG + "=" + SessionState.get() + .getUserName())) { childYarnJobs.add(appReport.getApplicationId()); - } else if (StringUtils.isNotBlank(doAs)) { - if (appReport.getApplicationTags().contains(QueryState.USERID_TAG + "=" + doAs)) { - childYarnJobs.add(appReport.getApplicationId()); - } } } @@ -94,13 +86,13 @@ public KillQueryImpl(OperationManager operationManager, KillQueryZookeeperManage return childYarnJobs; } - public static void killChildYarnJobs(Configuration conf, String tag, String doAs, boolean doAsAdmin) { + public static void killChildYarnJobs(Configuration conf, String tag) { try { if (tag == null) { return; } LOG.info("Killing yarn jobs using query tag:" + tag); - Set childYarnJobs = getChildYarnJobs(conf, tag, doAs, doAsAdmin); + Set childYarnJobs = getChildYarnJobs(conf, tag); if (!childYarnJobs.isEmpty()) { YarnClient yarnClient = YarnClient.createYarnClient(); yarnClient.init(conf); @@ -110,7 +102,7 @@ public static void killChildYarnJobs(Configuration conf, String tag, String doAs } } } catch (IOException | YarnException ye) { - LOG.warn("Exception occurred while killing child job({})", tag, ye); + LOG.warn("Exception occurred while killing child job({})", ye); } } @@ -118,117 +110,76 @@ private static boolean isAdmin() { boolean isAdmin = false; if (SessionState.get().getAuthorizerV2() != null) { try { - SessionState.get().getAuthorizerV2() - .checkPrivileges(HiveOperationType.KILL_QUERY, new ArrayList<>(), - new ArrayList<>(), new HiveAuthzContext.Builder().build()); + SessionState.get().getAuthorizerV2().checkPrivileges(HiveOperationType.KILL_QUERY, + new ArrayList(), new ArrayList(), + new HiveAuthzContext.Builder().build()); isAdmin = true; } catch (Exception e) { - LOG.warn("Error while checking privileges", e); } } return isAdmin; } - private boolean cancelOperation(Operation operation, String doAs, boolean doAsAdmin, String errMsg) - throws HiveSQLException { - if (doAsAdmin || (!StringUtils.isBlank(doAs) && operation.getParentSession().getUserName().equals(doAs))) { + private boolean cancelOperation(Operation operation, boolean isAdmin, String errMsg) throws + HiveSQLException { + if (isAdmin || operation.getParentSession().getUserName().equals(SessionState.get() + .getAuthenticator().getUserName())) { OperationHandle handle = operation.getHandle(); operationManager.cancelOperation(handle, errMsg); return true; + } else { + return false; } - return false; - } - - public boolean isLocalQuery(String queryIdOrTag) { - TagOrId tagOrId = TagOrId.UNKNOWN; - if (operationManager.getOperationByQueryId(queryIdOrTag) != null) { - tagOrId = TagOrId.ID; - } else if (!operationManager.getOperationsByQueryTag(queryIdOrTag).isEmpty()) { - tagOrId = TagOrId.TAG; - } - return tagOrId != TagOrId.UNKNOWN; } @Override public void killQuery(String queryIdOrTag, String errMsg, HiveConf conf) throws HiveException { - killQuery(queryIdOrTag, errMsg, conf, false, SessionState.get().getUserName(), isAdmin()); - } - - public void killLocalQuery(String queryIdOrTag, HiveConf conf, String doAs, boolean doAsAdmin) - throws HiveException { - killQuery(queryIdOrTag, null, conf, true, doAs, doAsAdmin); - } - - private void killQuery(String queryIdOrTag, String errMsg, HiveConf conf, boolean onlyLocal, String doAs, - boolean doAsAdmin) throws HiveException { - errMsg = StringUtils.defaultString(errMsg, KillQueriesOperation.KILL_QUERY_MESSAGE); - TagOrId tagOrId = TagOrId.UNKNOWN; - Set operationsToKill = new HashSet<>(); - if (operationManager.getOperationByQueryId(queryIdOrTag) != null) { - operationsToKill.add(operationManager.getOperationByQueryId(queryIdOrTag)); - tagOrId = TagOrId.ID; - LOG.debug("Query found with id: {}", queryIdOrTag); - } else { - operationsToKill.addAll(operationManager.getOperationsByQueryTag(queryIdOrTag)); - if (!operationsToKill.isEmpty()) { - tagOrId = TagOrId.TAG; - LOG.debug("Query found with tag: {}", queryIdOrTag); - } - } - if (!operationsToKill.isEmpty()){ - killOperations(queryIdOrTag, errMsg, conf, tagOrId, operationsToKill, doAs, doAsAdmin); - } else { - LOG.debug("Query not found with tag/id: {}", queryIdOrTag); - if (!onlyLocal && killQueryZookeeperManager != null && - conf.getBoolVar(HiveConf.ConfVars.HIVE_ZOOKEEPER_KILLQUERY_ENABLE)) { - try { - LOG.debug("Killing query with zookeeper coordination: " + queryIdOrTag); - killQueryZookeeperManager - .killQuery(queryIdOrTag, SessionState.get().getAuthenticator().getUserName(), isAdmin()); - } catch (IOException e) { - LOG.error("Kill query failed for queryId: " + queryIdOrTag, e); - throw new HiveException("Unable to kill query locally or on remote servers.", e); - } + try { + TagOrId tagOrId = TagOrId.UNKNOWN; + Set operationsToKill = new HashSet(); + if (operationManager.getOperationByQueryId(queryIdOrTag) != null) { + operationsToKill.add(operationManager.getOperationByQueryId(queryIdOrTag)); + tagOrId = TagOrId.ID; } else { - LOG.warn("Unable to kill query with id {}", queryIdOrTag); + operationsToKill.addAll(operationManager.getOperationsByQueryTag(queryIdOrTag)); + if (!operationsToKill.isEmpty()) { + tagOrId = TagOrId.TAG; + } } - } - } - - private void killOperations(String queryIdOrTag, String errMsg, HiveConf conf, TagOrId tagOrId, - Set operationsToKill, String doAs, boolean doAsAdmin) throws HiveException { - try { - switch (tagOrId) { - case ID: - Operation operation = operationsToKill.iterator().next(); - boolean canceled = cancelOperation(operation, doAs, doAsAdmin, errMsg); - if (canceled) { - String queryTag = operation.getQueryTag(); - if (queryTag == null) { - queryTag = queryIdOrTag; + if (operationsToKill.isEmpty()) { + LOG.info("Query not found: " + queryIdOrTag); + } + boolean admin = isAdmin(); + switch(tagOrId) { + case ID: + Operation operation = operationsToKill.iterator().next(); + boolean canceled = cancelOperation(operation, admin, errMsg); + if (canceled) { + String queryTag = operation.getQueryTag(); + if (queryTag == null) { + queryTag = queryIdOrTag; + } + killChildYarnJobs(conf, queryTag); + } else { + // no privilege to cancel + throw new HiveSQLException("No privilege to kill query id"); } - killChildYarnJobs(conf, queryTag, doAs, doAsAdmin); - } else { - // no privilege to cancel - throw new HiveSQLException("No privilege to kill query id"); - } - break; - case TAG: - int numCanceled = 0; - for (Operation operationToKill : operationsToKill) { - if (cancelOperation(operationToKill, doAs, doAsAdmin, errMsg)) { - numCanceled++; + break; + case TAG: + int numCanceled = 0; + for (Operation operationToKill : operationsToKill) { + if (cancelOperation(operationToKill, admin, errMsg)) { + numCanceled++; + } } - } - if (numCanceled == 0) { - throw new HiveSQLException("No privilege to kill query tag"); - } else { - killChildYarnJobs(conf, queryIdOrTag, doAs, doAsAdmin); - } - break; - case UNKNOWN: - default: - break; + killChildYarnJobs(conf, queryIdOrTag); + if (numCanceled == 0) { + throw new HiveSQLException("No privilege to kill query tag"); + } + break; + case UNKNOWN: + killChildYarnJobs(conf, queryIdOrTag); + break; } } catch (HiveSQLException e) { LOG.error("Kill query failed for query " + queryIdOrTag, e); diff --git service/src/java/org/apache/hive/service/server/KillQueryZookeeperManager.java service/src/java/org/apache/hive/service/server/KillQueryZookeeperManager.java deleted file mode 100644 index 396364bf79..0000000000 --- service/src/java/org/apache/hive/service/server/KillQueryZookeeperManager.java +++ /dev/null @@ -1,525 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.hive.service.server; - -import com.google.common.base.Preconditions; -import com.google.common.collect.Lists; -import org.apache.commons.lang3.StringUtils; -import org.apache.curator.framework.CuratorFramework; -import org.apache.curator.framework.api.ACLProvider; -import org.apache.curator.framework.recipes.cache.PathChildrenCache; -import org.apache.curator.framework.recipes.cache.PathChildrenCacheEvent; -import org.apache.curator.framework.recipes.cache.PathChildrenCacheListener; -import org.apache.curator.framework.state.ConnectionState; -import org.apache.curator.framework.state.ConnectionStateListener; -import org.apache.curator.utils.CloseableUtils; -import org.apache.curator.utils.PathUtils; -import org.apache.curator.utils.ZKPaths; -import org.apache.hadoop.hive.conf.HiveConf; -import org.apache.hadoop.hive.registry.impl.ZookeeperUtils; -import org.apache.hive.service.AbstractService; -import org.apache.hive.service.ServiceException; -import org.apache.hive.service.cli.operation.OperationManager; -import org.apache.zookeeper.CreateMode; -import org.apache.zookeeper.KeeperException; -import org.apache.zookeeper.WatchedEvent; -import org.apache.zookeeper.Watcher; -import org.apache.zookeeper.ZooDefs; -import org.apache.zookeeper.data.ACL; -import org.slf4j.Logger; -import org.slf4j.LoggerFactory; - -import java.io.IOException; -import java.nio.charset.StandardCharsets; -import java.util.ArrayList; -import java.util.List; -import java.util.Optional; -import java.util.UUID; -import java.util.concurrent.TimeUnit; - -/** - * Kill query coordination service. - * When service discovery is enabled a local kill query can request a kill - * on every other HS2 server with the queryId or queryTag and wait for confirmation on denial. - * The communication is done through Zookeeper. - */ -public class KillQueryZookeeperManager extends AbstractService { - - private static final Logger LOG = LoggerFactory.getLogger(KillQueryZookeeperManager.class); - private static final String SASL_LOGIN_CONTEXT_NAME = "KillQueryZooKeeperClient"; - public static final int MAX_WAIT_ON_CONFIRMATION_SECONDS = 30; - public static final int MAX_WAIT_ON_KILL_SECONDS = 180; - - private CuratorFramework zooKeeperClient; - private String zkPrincipal, zkKeytab, zkNameSpace; - private final KillQueryImpl localKillQueryImpl; - private final HiveServer2 hiveServer2; - private HiveConf conf; - - // Path cache to watch queries to kill - private PathChildrenCache killQueryListener = null; - - public KillQueryZookeeperManager(OperationManager operationManager, HiveServer2 hiveServer2) { - super(KillQueryZookeeperManager.class.getSimpleName()); - this.hiveServer2 = hiveServer2; - localKillQueryImpl = new KillQueryImpl(operationManager, this); - } - - @Override - public synchronized void init(HiveConf conf) { - this.conf = conf; - zkNameSpace = HiveConf.getVar(conf, HiveConf.ConfVars.HIVE_ZOOKEEPER_KILLQUERY_NAMESPACE); - Preconditions.checkArgument(!StringUtils.isBlank(zkNameSpace), - HiveConf.ConfVars.HIVE_ZOOKEEPER_KILLQUERY_NAMESPACE.varname + " cannot be null or empty"); - this.zkPrincipal = HiveConf.getVar(conf, HiveConf.ConfVars.HIVE_SERVER2_KERBEROS_PRINCIPAL); - this.zkKeytab = HiveConf.getVar(conf, HiveConf.ConfVars.HIVE_SERVER2_KERBEROS_KEYTAB); - this.zooKeeperClient = conf.getZKConfig().getNewZookeeperClient(getACLProviderForZKPath("/" + zkNameSpace)); - this.zooKeeperClient.getConnectionStateListenable().addListener(new ZkConnectionStateListener()); - - super.init(conf); - } - - @Override - public synchronized void start() { - super.start(); - if (zooKeeperClient == null) { - throw new ServiceException("Failed start zookeeperClient in KillQueryZookeeperManager"); - } - try { - ZookeeperUtils.setupZookeeperAuth(this.getHiveConf(), SASL_LOGIN_CONTEXT_NAME, zkPrincipal, zkKeytab); - zooKeeperClient.start(); - try { - zooKeeperClient.create().creatingParentsIfNeeded().withMode(CreateMode.PERSISTENT).forPath("/" + zkNameSpace); - if (ZookeeperUtils.isKerberosEnabled(conf)) { - zooKeeperClient.setACL().withACL(createSecureAcls()).forPath("/" + zkNameSpace); - } - LOG.info("Created the root namespace: " + zkNameSpace + " on ZooKeeper"); - } catch (KeeperException e) { - if (e.code() != KeeperException.Code.NODEEXISTS) { - LOG.error("Unable to create namespace: " + zkNameSpace + " on ZooKeeper", e); - throw e; - } - } - // Create a path cache and start to listen for every kill query request from other servers. - killQueryListener = new PathChildrenCache(zooKeeperClient, "/" + zkNameSpace, false); - killQueryListener.start(PathChildrenCache.StartMode.NORMAL); - startListeningForQueries(); - // Init closeable utils in case register is not called (see HIVE-13322) - CloseableUtils.class.getName(); - } catch (Exception e) { - throw new RuntimeException("Failed start zookeeperClient in KillQueryZookeeperManager", e); - } - LOG.info("KillQueryZookeeperManager service started."); - } - - private ACLProvider getACLProviderForZKPath(String zkPath) { - final boolean isSecure = ZookeeperUtils.isKerberosEnabled(conf); - return new ACLProvider() { - @Override - public List getDefaultAcl() { - // We always return something from getAclForPath so this should not happen. - LOG.warn("getDefaultAcl was called"); - return Lists.newArrayList(ZooDefs.Ids.OPEN_ACL_UNSAFE); - } - - @Override - public List getAclForPath(String path) { - if (!isSecure || path == null || !path.contains(zkPath)) { - // No security or the path is below the user path - full access. - return Lists.newArrayList(ZooDefs.Ids.OPEN_ACL_UNSAFE); - } - return createSecureAcls(); - } - }; - } - - private static List createSecureAcls() { - // Read all to the world - List nodeAcls = new ArrayList<>(ZooDefs.Ids.READ_ACL_UNSAFE); - // Create/Delete/Write/Admin to creator - nodeAcls.addAll(ZooDefs.Ids.CREATOR_ALL_ACL); - return nodeAcls; - } - - private void startListeningForQueries() { - PathChildrenCacheListener listener = (client, pathChildrenCacheEvent) -> { - if (pathChildrenCacheEvent.getType() == PathChildrenCacheEvent.Type.CHILD_ADDED) { - KillQueryZookeeperBarrier barrier = new KillQueryZookeeperBarrier(zooKeeperClient, "/" + zkNameSpace, - ZKPaths.getNodeFromPath(pathChildrenCacheEvent.getData().getPath())); - Optional data = barrier.getKillQueryData(); - if (!data.isPresent()) { - return; - } - KillQueryZookeeperData killQuery = data.get(); - LOG.debug("Kill query request with id {}", killQuery.getQueryId()); - if (getServerHost().equals(killQuery.getRequestingServer())) { - // The listener was called for the server who posted the request - return; - } - if (localKillQueryImpl.isLocalQuery(killQuery.getQueryId())) { - LOG.info("Killing query with id {}", killQuery.getQueryId()); - barrier.confirmProgress(getServerHost()); - try { - localKillQueryImpl - .killLocalQuery(killQuery.getQueryId(), conf, killQuery.getDoAs(), killQuery.isDoAsAdmin()); - barrier.confirmDone(getServerHost()); - } catch (Exception e) { - LOG.error("Unable to kill local query", e); - barrier.confirmFailed(getServerHost()); - } - } else { - LOG.debug("Confirm unknown kill query request with id {}", killQuery.getQueryId()); - barrier.confirmNo(getServerHost()); - } - } - }; - LOG.info("Start to listen for kill query requests."); - killQueryListener.getListenable().addListener(listener); - } - - @Override - public synchronized void stop() { - super.stop(); - LOG.info("Stopping KillQueryZookeeperManager service."); - CloseableUtils.closeQuietly(killQueryListener); - CloseableUtils.closeQuietly(zooKeeperClient); - } - - private List getAllServerUrls() { - List serverHosts = new ArrayList<>(); - if (conf.getBoolVar(HiveConf.ConfVars.HIVE_SERVER2_SUPPORT_DYNAMIC_SERVICE_DISCOVERY) && !conf - .getBoolVar(HiveConf.ConfVars.HIVE_SERVER2_ACTIVE_PASSIVE_HA_ENABLE)) { - String zooKeeperNamespace = conf.getVar(HiveConf.ConfVars.HIVE_SERVER2_ZOOKEEPER_NAMESPACE); - try { - serverHosts.addAll(zooKeeperClient.getChildren().forPath("/" + zooKeeperNamespace)); - } catch (Exception e) { - LOG.error("Unable the get available server hosts", e); - } - } - return serverHosts; - } - - private String getServerHost() { - if (hiveServer2 == null) { - return ""; - } - try { - return removeDelimiter(hiveServer2.getServerInstanceURI()); - } catch (Exception e) { - LOG.error("Unable to determine the server host", e); - return ""; - } - } - - // for debugging - private static class ZkConnectionStateListener implements ConnectionStateListener { - @Override - public void stateChanged(final CuratorFramework curatorFramework, final ConnectionState connectionState) { - LOG.info("Connection state change notification received. State: {}", connectionState); - } - } - - /** - * Post a kill query request on Zookeeper for the other HS2 instances. If the service discovery is not enabled or - * there is no other server registered, does nothing. Otherwise post the kill query request on Zookeeper - * and waits for the other instances to confirm the kill or deny it. - * - * @param queryIdOrTag queryId or tag to kill - * @param doAs user requesting the kill - * @param doAsAdmin admin user requesting the kill (with KILLQUERY privilege) - * @throws IOException If the kill query failed - */ - public void killQuery(String queryIdOrTag, String doAs, boolean doAsAdmin) throws IOException { - List serverHosts = getAllServerUrls(); - if (serverHosts.size() < 2) { - return; - } - KillQueryZookeeperBarrier barrier = new KillQueryZookeeperBarrier(zooKeeperClient, "/" + zkNameSpace); - boolean result; - try { - barrier.setBarrier(queryIdOrTag, hiveServer2.getServerInstanceURI(), doAs, doAsAdmin); - LOG.info("Created kill query barrier in path: {} for queryId: {}", barrier.getBarrierPath(), queryIdOrTag); - result = barrier.waitOnBarrier(serverHosts.size() - 1, MAX_WAIT_ON_CONFIRMATION_SECONDS, - MAX_WAIT_ON_KILL_SECONDS, TimeUnit.SECONDS); - - } catch (Exception e) { - LOG.error("Unable to create Barrier on Zookeeper for KillQuery", e); - throw new IOException(e); - } - if (!result) { - throw new IOException("Unable to kill query on remote servers"); - } - } - - /** - * Data to post to Zookeeper for a kill query request. The fields will be serialized with ':' delimiter. - * In requestingServer every ':' will be escaped. Other fields can not contain any ':'. - */ - public static class KillQueryZookeeperData { - private String queryId; - private String requestingServer; - private String doAs; - private boolean doAsAdmin; - - public KillQueryZookeeperData(String queryId, String requestingServer, String doAs, boolean doAsAdmin) { - if (!StringUtils.equals(queryId, removeDelimiter(queryId))) { - throw new IllegalArgumentException("QueryId can not contain any ':' character."); - } - this.queryId = queryId; - this.requestingServer = removeDelimiter(requestingServer); - if (!StringUtils.equals(doAs, removeDelimiter(doAs))) { - throw new IllegalArgumentException("doAs can not contain any ':' character."); - } - this.doAs = doAs; - this.doAsAdmin = doAsAdmin; - } - - public KillQueryZookeeperData(String data) { - if (data == null) { - return; - } - - String[] elem = data.split(":"); - queryId = elem[0]; - requestingServer = elem[1]; - doAs = elem[2]; - doAsAdmin = Boolean.parseBoolean(elem[3]); - } - - @Override - public String toString() { - return queryId + ":" + requestingServer + ":" + doAs + ":" + doAsAdmin; - } - - public String getQueryId() { - return queryId; - } - - public String getRequestingServer() { - return requestingServer; - } - - public String getDoAs() { - return doAs; - } - - public boolean isDoAsAdmin() { - return doAsAdmin; - } - } - - /** - * Zookeeper Barrier for the KillQuery Operation. - * It post a kill query request on Zookeeper and waits until the given number of service instances responses. - * Implementation is based on org.apache.curator.framework.recipes.barriers.DistributedBarrier. - */ - public static class KillQueryZookeeperBarrier { - private final CuratorFramework client; - private final String barrierPath; - private final Watcher watcher = new Watcher() { - @Override - public void process(WatchedEvent event) { - client.postSafeNotify(KillQueryZookeeperBarrier.this); - } - }; - - /** - * @param client client - * @param barrierRootPath rootPath to put the barrier - */ - public KillQueryZookeeperBarrier(CuratorFramework client, String barrierRootPath) { - this(client, barrierRootPath, UUID.randomUUID().toString()); - } - - /** - * @param client client - * @param barrierRootPath rootPath to put the barrier - * @param barrierPath name of the barrier - */ - public KillQueryZookeeperBarrier(CuratorFramework client, String barrierRootPath, String barrierPath) { - this.client = client; - this.barrierPath = PathUtils.validatePath(barrierRootPath + "/" + barrierPath); - } - - public String getBarrierPath() { - return barrierPath; - } - - /** - * Utility to set the barrier node. - * - * @throws Exception errors - */ - public synchronized void setBarrier(String queryId, String requestingServer, String doAs, boolean doAsAdmin) - throws Exception { - try { - KillQueryZookeeperData data = new KillQueryZookeeperData(queryId, requestingServer, doAs, doAsAdmin); - client.create().creatingParentContainersIfNeeded() - .forPath(barrierPath, data.toString().getBytes(StandardCharsets.UTF_8)); - } catch (KeeperException.NodeExistsException e) { - throw new IllegalStateException("Barrier with this path already exists"); - } - } - - public synchronized Optional getKillQueryData() throws Exception { - if (client.checkExists().forPath(barrierPath) != null) { - byte[] data = client.getData().forPath(barrierPath); - return Optional.of(new KillQueryZookeeperData(new String(data, StandardCharsets.UTF_8))); - } - return Optional.empty(); - } - - /** - * Confirm not knowing the query with the queryId in the barrier. - * - * @param serverId The serverHost confirming the request - * @throws Exception If confirmation failed - */ - public synchronized void confirmNo(String serverId) throws Exception { - if (client.checkExists().forPath(barrierPath) != null) { - client.create().forPath(barrierPath + "/NO:" + serverId); - } else { - throw new IllegalStateException("Barrier is not initialised"); - } - } - - /** - * Confirm knowing the query with the queryId in the barrier and starting the kill query process. - * - * @param serverId The serverHost confirming the request - * @throws Exception If confirmation failed - */ - public synchronized void confirmProgress(String serverId) throws Exception { - if (client.checkExists().forPath(barrierPath) != null) { - client.create().forPath(barrierPath + "/PROGRESS:" + serverId); - } else { - throw new IllegalStateException("Barrier is not initialised"); - } - } - - /** - * Confirm killing the query with the queryId in the barrier. - * - * @param serverId The serverHost confirming the request - * @throws Exception If confirmation failed - */ - public synchronized void confirmDone(String serverId) throws Exception { - if (client.checkExists().forPath(barrierPath) != null) { - if (client.checkExists().forPath(barrierPath + "/PROGRESS:" + serverId) != null) { - client.delete().forPath(barrierPath + "/PROGRESS:" + serverId); - } - client.create().forPath(barrierPath + "/DONE:" + serverId); - } else { - throw new IllegalStateException("Barrier is not initialised"); - } - } - - /** - * Confirm failure of killing the query with the queryId in the barrier. - * - * @param serverId The serverHost confirming the request - * @throws Exception If confirmation failed - */ - public synchronized void confirmFailed(String serverId) throws Exception { - if (client.checkExists().forPath(barrierPath) != null) { - if (client.checkExists().forPath(barrierPath + "/PROGRESS:" + serverId) != null) { - client.delete().forPath(barrierPath + "/PROGRESS:" + serverId); - } - client.create().forPath(barrierPath + "/FAILED:" + serverId); - } else { - throw new IllegalStateException("Barrier is not initialised"); - } - } - - /** - * Wait for every server either confirm killing the query or confirm not knowing the query. - * - * @param confirmationCount number of confirmation to wait for - * @param maxWaitOnConfirmation confirmation waiting timeout for NO answers - * @param maxWaitOnKill timeout for waiting on the actual kill query operation - * @param unit time unit for timeouts - * @return true if the kill was confirmed, false on timeout or if everybody voted for NO - * @throws Exception If confirmation failed - */ - public synchronized boolean waitOnBarrier(int confirmationCount, long maxWaitOnConfirmation, long maxWaitOnKill, - TimeUnit unit) throws Exception { - long startMs = System.currentTimeMillis(); - long startKill = -1; - long maxWaitMs = TimeUnit.MILLISECONDS.convert(maxWaitOnConfirmation, unit); - long maxWaitOnKillMs = TimeUnit.MILLISECONDS.convert(maxWaitOnKill, unit); - - boolean progress = false; - boolean result = false; - while (true) { - List children = client.getChildren().usingWatcher(watcher).forPath(barrierPath); - boolean concluded = false; - for (String child : children) { - if (child.startsWith("DONE")) { - result = true; - concluded = true; - break; - } - if (child.startsWith("FAILED")) { - concluded = true; - break; - } - if (child.startsWith("PROGRESS")) { - progress = true; - } - } - if (concluded) { - break; - } - if (progress) { - // Wait for the kill query to finish - if (startKill < 0) { - startKill = System.currentTimeMillis(); - } - long elapsed = System.currentTimeMillis() - startKill; - long thisWaitMs = maxWaitOnKillMs - elapsed; - if (thisWaitMs <= 0) { - break; - } - wait(thisWaitMs); - } else { - if (children.size() == confirmationCount) { - result = false; - break; - } - // Wait for confirmation - long elapsed = System.currentTimeMillis() - startMs; - long thisWaitMs = maxWaitMs - elapsed; - if (thisWaitMs <= 0) { - break; - } - wait(thisWaitMs); - } - - } - client.delete().deletingChildrenIfNeeded().forPath(barrierPath); - return result; - } - } - - private static String removeDelimiter(String in) { - if (in == null) { - return null; - } - return in.replaceAll(":", ""); - } -} diff --git service/src/test/org/apache/hive/service/auth/TestLdapAuthenticationProviderImpl.java service/src/test/org/apache/hive/service/auth/TestLdapAuthenticationProviderImpl.java index 0396b74912..c8632aefb9 100644 --- service/src/test/org/apache/hive/service/auth/TestLdapAuthenticationProviderImpl.java +++ service/src/test/org/apache/hive/service/auth/TestLdapAuthenticationProviderImpl.java @@ -32,9 +32,8 @@ import org.junit.rules.ExpectedException; import org.junit.runner.RunWith; import org.mockito.Mock; -import org.mockito.runners.MockitoJUnitRunner; +import org.mockito.junit.MockitoJUnitRunner; -import static org.junit.Assert.*; import static org.mockito.Mockito.*; @RunWith(MockitoJUnitRunner.class) @@ -88,7 +87,7 @@ public void testAuthenticateNoUserOrGroupFilter() throws NamingException, Authen DirSearchFactory factory = mock(DirSearchFactory.class); - when(search.findUserDn("user1")).thenReturn("cn=user1,ou=PowerUsers,dc=mycorp,dc=com"); + lenient().when(search.findUserDn("user1")).thenReturn("cn=user1,ou=PowerUsers,dc=mycorp,dc=com"); when(factory.getInstance(conf, "cn=user1,ou=PowerUsers,dc=mycorp,dc=com", "Blah")).thenReturn(search); when(factory.getInstance(conf, "cn=user1,ou=Users,dc=mycorp,dc=com", "Blah")).thenThrow(AuthenticationException.class); @@ -221,7 +220,7 @@ public void testAuthenticateWhenUserFilterFailsAndGroupMembershipKeyFilterPasses when(search.findUserDn("user3")).thenReturn("cn=user3,ou=PowerUsers,dc=mycorp,dc=com"); - when(search.findGroupsForUser("cn=user3,ou=PowerUsers,dc=mycorp,dc=com")) + lenient().when(search.findGroupsForUser("cn=user3,ou=PowerUsers,dc=mycorp,dc=com")) .thenReturn(Arrays.asList( "cn=testGroup,ou=Groups,dc=mycorp,dc=com", "cn=group3,ou=Groups,dc=mycorp,dc=com")); @@ -251,7 +250,7 @@ public void testAuthenticateWhenCustomQueryFilterFailsAndUserFilterPasses() thro "(&(objectClass=person)(|(memberOf=CN=Domain Admins,CN=Users,DC=apache,DC=org)(memberOf=CN=Administrators,CN=Builtin,DC=apache,DC=org)))"); conf.setVar(HiveConf.ConfVars.HIVE_SERVER2_PLAIN_LDAP_USERFILTER, "user3"); - when(search.findUserDn("user3")).thenReturn("cn=user3,ou=PowerUsers,dc=mycorp,dc=com"); + lenient().when(search.findUserDn("user3")).thenReturn("cn=user3,ou=PowerUsers,dc=mycorp,dc=com"); when(search.executeCustomQuery(anyString())).thenReturn(Arrays.asList( "cn=user1,ou=PowerUsers,dc=mycorp,dc=com", "cn=user2,ou=PowerUsers,dc=mycorp,dc=com")); diff --git service/src/test/org/apache/hive/service/auth/ldap/TestChainFilter.java service/src/test/org/apache/hive/service/auth/ldap/TestChainFilter.java index b8385ebdda..8043d9c9f4 100644 --- service/src/test/org/apache/hive/service/auth/ldap/TestChainFilter.java +++ service/src/test/org/apache/hive/service/auth/ldap/TestChainFilter.java @@ -23,7 +23,7 @@ import org.apache.hadoop.hive.conf.HiveConf; import org.junit.Test; import org.junit.runner.RunWith; -import org.mockito.runners.MockitoJUnitRunner; +import org.mockito.junit.MockitoJUnitRunner; import org.junit.Before; import org.mockito.Mock; diff --git service/src/test/org/apache/hive/service/auth/ldap/TestCustomQueryFilter.java service/src/test/org/apache/hive/service/auth/ldap/TestCustomQueryFilter.java index 48972e7715..d8725ae9df 100644 --- service/src/test/org/apache/hive/service/auth/ldap/TestCustomQueryFilter.java +++ service/src/test/org/apache/hive/service/auth/ldap/TestCustomQueryFilter.java @@ -24,7 +24,7 @@ import org.apache.hadoop.hive.conf.HiveConf; import org.junit.Test; import org.junit.runner.RunWith; -import org.mockito.runners.MockitoJUnitRunner; +import org.mockito.junit.MockitoJUnitRunner; import org.junit.Before; import org.mockito.Mock; diff --git service/src/test/org/apache/hive/service/auth/ldap/TestGroupFilter.java service/src/test/org/apache/hive/service/auth/ldap/TestGroupFilter.java index 9ea5361836..34f50b7685 100644 --- service/src/test/org/apache/hive/service/auth/ldap/TestGroupFilter.java +++ service/src/test/org/apache/hive/service/auth/ldap/TestGroupFilter.java @@ -24,7 +24,7 @@ import org.apache.hadoop.hive.conf.HiveConf; import org.junit.Test; import org.junit.runner.RunWith; -import org.mockito.runners.MockitoJUnitRunner; +import org.mockito.junit.MockitoJUnitRunner; import org.junit.Before; import org.mockito.Mock; @@ -142,7 +142,8 @@ public void testGroupMembershipKeyCaseInsensitiveFilterApplyNegative() throws AuthenticationException, NamingException, IOException { conf.setVar(HiveConf.ConfVars.HIVE_SERVER2_PLAIN_LDAP_GROUPFILTER, "hiveusers,containsg1"); - when(search.findGroupsForUser(eq("user1"))).thenReturn(Arrays.asList("SuperUsers", "Office1", "G1", "G2")); + lenient().when(search.findGroupsForUser(eq("user1"))) + .thenReturn(Arrays.asList("SuperUsers", "Office1", "G1", "G2")); Filter filter = factory.getInstance(conf); filter.apply(search, "user1"); @@ -153,7 +154,8 @@ public void testGroupMembershipKeyFilterApplyNegative() throws AuthenticationException, NamingException, IOException { conf.setVar(HiveConf.ConfVars.HIVE_SERVER2_PLAIN_LDAP_GROUPFILTER, "HiveUsers"); - when(search.findGroupsForUser(eq("user1"))).thenReturn(Arrays.asList("SuperUsers", "Office1", "G1", "G2")); + lenient().when(search.findGroupsForUser(eq("user1"))) + .thenReturn(Arrays.asList("SuperUsers", "Office1", "G1", "G2")); Filter filter = factory.getInstance(conf); filter.apply(search, "user1"); diff --git service/src/test/org/apache/hive/service/auth/ldap/TestLdapSearch.java service/src/test/org/apache/hive/service/auth/ldap/TestLdapSearch.java index c5e995e2a2..78cba2ac55 100644 --- service/src/test/org/apache/hive/service/auth/ldap/TestLdapSearch.java +++ service/src/test/org/apache/hive/service/auth/ldap/TestLdapSearch.java @@ -32,7 +32,7 @@ import org.junit.Test; import org.junit.runner.RunWith; import org.mockito.Mock; -import org.mockito.runners.MockitoJUnitRunner; +import org.mockito.junit.MockitoJUnitRunner; import static org.junit.Assert.*; import static org.mockito.Mockito.*; diff --git service/src/test/org/apache/hive/service/auth/ldap/TestSearchResultHandler.java service/src/test/org/apache/hive/service/auth/ldap/TestSearchResultHandler.java index db01d107ee..7136b5f864 100644 --- service/src/test/org/apache/hive/service/auth/ldap/TestSearchResultHandler.java +++ service/src/test/org/apache/hive/service/auth/ldap/TestSearchResultHandler.java @@ -28,7 +28,7 @@ import javax.naming.directory.SearchResult; import org.junit.Test; import org.junit.runner.RunWith; -import org.mockito.runners.MockitoJUnitRunner; +import org.mockito.junit.MockitoJUnitRunner; import static org.junit.Assert.*; import static org.mockito.Mockito.*; diff --git service/src/test/org/apache/hive/service/auth/ldap/TestUserFilter.java service/src/test/org/apache/hive/service/auth/ldap/TestUserFilter.java index 4c86de2a4c..3edac26e45 100644 --- service/src/test/org/apache/hive/service/auth/ldap/TestUserFilter.java +++ service/src/test/org/apache/hive/service/auth/ldap/TestUserFilter.java @@ -23,7 +23,7 @@ import org.apache.hadoop.hive.conf.HiveConf; import org.junit.Test; import org.junit.runner.RunWith; -import org.mockito.runners.MockitoJUnitRunner; +import org.mockito.junit.MockitoJUnitRunner; import org.junit.Before; import org.mockito.Mock; diff --git service/src/test/org/apache/hive/service/auth/ldap/TestUserSearchFilter.java service/src/test/org/apache/hive/service/auth/ldap/TestUserSearchFilter.java index d42e5f12ce..d066fd01bb 100644 --- service/src/test/org/apache/hive/service/auth/ldap/TestUserSearchFilter.java +++ service/src/test/org/apache/hive/service/auth/ldap/TestUserSearchFilter.java @@ -25,7 +25,7 @@ import org.junit.Test; import org.junit.runner.RunWith; import org.mockito.Mock; -import org.mockito.runners.MockitoJUnitRunner; +import org.mockito.junit.MockitoJUnitRunner; import static org.junit.Assert.*; import static org.mockito.Mockito.*; diff --git service/src/test/org/apache/hive/service/cli/thrift/ThriftHttpServletTest.java service/src/test/org/apache/hive/service/cli/thrift/ThriftHttpServletTest.java index 948e97f5ce..04c11b077c 100644 --- service/src/test/org/apache/hive/service/cli/thrift/ThriftHttpServletTest.java +++ service/src/test/org/apache/hive/service/cli/thrift/ThriftHttpServletTest.java @@ -27,7 +27,7 @@ import org.junit.runner.RunWith; import org.mockito.Mockito; -import org.mockito.runners.MockitoJUnitRunner; +import org.mockito.junit.MockitoJUnitRunner; import javax.servlet.http.HttpServletRequest; diff --git shims/common/src/main/test/org/apache/hadoop/fs/TestProxyFileSystem.java shims/common/src/main/test/org/apache/hadoop/fs/TestProxyFileSystem.java index 37667bba48..068cef7359 100644 --- shims/common/src/main/test/org/apache/hadoop/fs/TestProxyFileSystem.java +++ shims/common/src/main/test/org/apache/hadoop/fs/TestProxyFileSystem.java @@ -27,7 +27,7 @@ import static org.hamcrest.CoreMatchers.equalTo; import static org.junit.Assert.assertNotNull; import static org.junit.Assert.assertThat; -import static org.mockito.Matchers.any; +import static org.mockito.ArgumentMatchers.any; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.when; diff --git shims/common/src/main/test/org/apache/hadoop/hive/io/TestHdfsUtils.java shims/common/src/main/test/org/apache/hadoop/hive/io/TestHdfsUtils.java index 541b205632..55f90949a1 100644 --- shims/common/src/main/test/org/apache/hadoop/hive/io/TestHdfsUtils.java +++ shims/common/src/main/test/org/apache/hadoop/hive/io/TestHdfsUtils.java @@ -33,7 +33,7 @@ import org.junit.Test; -import static org.mockito.Matchers.any; +import static org.mockito.ArgumentMatchers.any; import static org.mockito.Mockito.doThrow; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.verify; @@ -59,7 +59,7 @@ public void testSetFullFileStatusFailInheritGroup() throws IOException { doThrow(RuntimeException.class).when(fs).setOwner(any(Path.class), any(String.class), any(String.class)); HdfsUtils.setFullFileStatus(conf, mockHadoopFileStatus, "fakeGroup2", fs, new Path("fakePath"), false); - verify(fs).setOwner(any(Path.class), any(String.class), any(String.class)); + verify(fs).setOwner(any(Path.class), any(), any(String.class)); } /** @@ -156,7 +156,7 @@ public void testSetFullFileStatusFailInheritAclsRecursive() throws Exception { doThrow(RuntimeException.class).when(mockFsShell).run(any(String[].class)); HdfsUtils.setFullFileStatus(conf, mockHadoopFileStatus, "", mock(FileSystem.class), fakeTarget, true, mockFsShell); - verify(mockFsShell).run(new String[]{"-setfacl", "-R", "--set", any(String.class), fakeTarget.toString()}); + verify(mockFsShell).run(new String[]{"-setfacl", "-R", "--set", any(), fakeTarget.toString()}); } /** @@ -179,6 +179,6 @@ public void testSetFullFileStatusFailInheritPermsRecursive() throws Exception { HdfsUtils.setFullFileStatus(conf, mockHadoopFileStatus, "", mock(FileSystem.class), fakeTarget, true, mockFsShell); - verify(mockFsShell).run(new String[]{"-chmod", "-R", any(String.class), fakeTarget.toString()}); + verify(mockFsShell).run(new String[]{"-chmod", "-R", any(), fakeTarget.toString()}); } } diff --git spark-client/src/test/java/org/apache/hive/spark/client/TestJobHandle.java spark-client/src/test/java/org/apache/hive/spark/client/TestJobHandle.java index b81a34ba71..d6ecd86f80 100644 --- spark-client/src/test/java/org/apache/hive/spark/client/TestJobHandle.java +++ spark-client/src/test/java/org/apache/hive/spark/client/TestJobHandle.java @@ -25,7 +25,7 @@ import org.junit.runner.RunWith; import org.mockito.InOrder; import org.mockito.Mock; -import org.mockito.runners.MockitoJUnitRunner; +import org.mockito.junit.MockitoJUnitRunner; import static org.junit.Assert.*; import static org.mockito.Mockito.*; diff --git standalone-metastore/metastore-common/src/main/java/org/apache/hadoop/hive/common/ZooKeeperHiveHelper.java standalone-metastore/metastore-common/src/main/java/org/apache/hadoop/hive/common/ZooKeeperHiveHelper.java index 1e35795d63..71d8651712 100644 --- standalone-metastore/metastore-common/src/main/java/org/apache/hadoop/hive/common/ZooKeeperHiveHelper.java +++ standalone-metastore/metastore-common/src/main/java/org/apache/hadoop/hive/common/ZooKeeperHiveHelper.java @@ -29,7 +29,6 @@ import org.apache.curator.framework.CuratorFramework; import org.apache.curator.framework.recipes.nodes.PersistentNode; import org.apache.curator.retry.ExponentialBackoffRetry; -import org.apache.curator.retry.RetryOneTime; import org.apache.zookeeper.CreateMode; import org.apache.zookeeper.KeeperException; import org.slf4j.Logger; @@ -338,9 +337,6 @@ public CuratorFramework getNewZookeeperClient(ACLProvider zooKeeperAclProvider, } if (maxRetries > 0) { builder = builder.retryPolicy(new ExponentialBackoffRetry(baseSleepTime, maxRetries)); - } else { - // Retry policy is mandatory - builder = builder.retryPolicy(new RetryOneTime(1000)); } if (zooKeeperAclProvider != null) { builder = builder.aclProvider(zooKeeperAclProvider); diff --git standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/TestMetaStoreLdapAuthenticationProviderImpl.java standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/TestMetaStoreLdapAuthenticationProviderImpl.java index bad5da5dd1..002fd07cc6 100644 --- standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/TestMetaStoreLdapAuthenticationProviderImpl.java +++ standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/TestMetaStoreLdapAuthenticationProviderImpl.java @@ -34,10 +34,18 @@ import org.junit.rules.ExpectedException; import org.junit.runner.RunWith; import org.mockito.Mock; -import org.mockito.runners.MockitoJUnitRunner; - -import static org.junit.Assert.*; -import static org.mockito.Mockito.*; +import org.mockito.junit.MockitoJUnitRunner; + +import static org.mockito.ArgumentMatchers.any; +import static org.mockito.ArgumentMatchers.anyString; +import static org.mockito.ArgumentMatchers.eq; +import static org.mockito.ArgumentMatchers.isA; +import static org.mockito.Mockito.atLeastOnce; +import static org.mockito.Mockito.lenient; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.times; +import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.when; @RunWith(MockitoJUnitRunner.class) public class TestMetaStoreLdapAuthenticationProviderImpl { @@ -90,7 +98,7 @@ public void testAuthenticateNoUserOrGroupFilter() throws NamingException, Authen DirSearchFactory factory = mock(DirSearchFactory.class); - when(search.findUserDn("user1")).thenReturn("cn=user1,ou=PowerUsers,dc=mycorp,dc=com"); + lenient().when(search.findUserDn("user1")).thenReturn("cn=user1,ou=PowerUsers,dc=mycorp,dc=com"); when(factory.getInstance(conf, "cn=user1,ou=PowerUsers,dc=mycorp,dc=com", "Blah")).thenReturn(search); when(factory.getInstance(conf, "cn=user1,ou=Users,dc=mycorp,dc=com", "Blah")).thenThrow(AuthenticationException.class); @@ -223,7 +231,7 @@ public void testAuthenticateWhenUserFilterFailsAndGroupMembershipKeyFilterPasses when(search.findUserDn("user3")).thenReturn("cn=user3,ou=PowerUsers,dc=mycorp,dc=com"); - when(search.findGroupsForUser("cn=user3,ou=PowerUsers,dc=mycorp,dc=com")) + lenient().when(search.findGroupsForUser("cn=user3,ou=PowerUsers,dc=mycorp,dc=com")) .thenReturn(Arrays.asList( "cn=testGroup,ou=Groups,dc=mycorp,dc=com", "cn=group3,ou=Groups,dc=mycorp,dc=com")); @@ -253,7 +261,7 @@ public void testAuthenticateWhenCustomQueryFilterFailsAndUserFilterPasses() thro "(&(objectClass=person)(|(memberOf=CN=Domain Admins,CN=Users,DC=apache,DC=org)(memberOf=CN=Administrators,CN=Builtin,DC=apache,DC=org)))"); MetastoreConf.setVar(conf, MetastoreConf.ConfVars.METASTORE_PLAIN_LDAP_USERFILTER, "user3"); - when(search.findUserDn("user3")).thenReturn("cn=user3,ou=PowerUsers,dc=mycorp,dc=com"); + lenient().when(search.findUserDn("user3")).thenReturn("cn=user3,ou=PowerUsers,dc=mycorp,dc=com"); when(search.executeCustomQuery(anyString())).thenReturn(Arrays.asList( "cn=user1,ou=PowerUsers,dc=mycorp,dc=com", "cn=user2,ou=PowerUsers,dc=mycorp,dc=com")); diff --git standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/TestMsckCheckPartitions.java standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/TestMsckCheckPartitions.java index 980423eccd..fdfc582603 100644 --- standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/TestMsckCheckPartitions.java +++ standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/TestMsckCheckPartitions.java @@ -39,7 +39,7 @@ import java.util.concurrent.ExecutorService; import java.util.concurrent.Executors; -import static org.mockito.Matchers.any; +import static org.mockito.ArgumentMatchers.any; import static org.mockito.Mockito.times; import static org.mockito.Mockito.verify; import static org.mockito.Mockito.when; diff --git standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/TestTableIterable.java standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/TestTableIterable.java index f0d442755c..71c4759f4e 100644 --- standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/TestTableIterable.java +++ standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/TestTableIterable.java @@ -33,8 +33,8 @@ import org.apache.thrift.TException; import static org.junit.Assert.assertEquals; -import static org.mockito.Matchers.anyListOf; -import static org.mockito.Matchers.anyString; +import static org.mockito.ArgumentMatchers.anyListOf; +import static org.mockito.ArgumentMatchers.anyString; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.verify; import static org.mockito.Mockito.when; diff --git standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/ldap/TestChainFilter.java standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/ldap/TestChainFilter.java index 5835007d86..257187c735 100644 --- standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/ldap/TestChainFilter.java +++ standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/ldap/TestChainFilter.java @@ -25,7 +25,7 @@ import org.apache.hadoop.hive.metastore.conf.MetastoreConf; import org.junit.Test; import org.junit.runner.RunWith; -import org.mockito.runners.MockitoJUnitRunner; +import org.mockito.junit.MockitoJUnitRunner; import org.junit.Before; import org.mockito.Mock; diff --git standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/ldap/TestCustomQueryFilter.java standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/ldap/TestCustomQueryFilter.java index ce8b1b7d14..76a125529c 100644 --- standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/ldap/TestCustomQueryFilter.java +++ standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/ldap/TestCustomQueryFilter.java @@ -26,7 +26,7 @@ import org.apache.hadoop.hive.metastore.conf.MetastoreConf; import org.junit.Test; import org.junit.runner.RunWith; -import org.mockito.runners.MockitoJUnitRunner; +import org.mockito.junit.MockitoJUnitRunner; import org.junit.Before; import org.mockito.Mock; diff --git standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/ldap/TestGroupFilter.java standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/ldap/TestGroupFilter.java index eef1f064a7..46ca3151cb 100644 --- standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/ldap/TestGroupFilter.java +++ standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/ldap/TestGroupFilter.java @@ -26,7 +26,7 @@ import org.apache.hadoop.hive.metastore.conf.MetastoreConf; import org.junit.Test; import org.junit.runner.RunWith; -import org.mockito.runners.MockitoJUnitRunner; +import org.mockito.junit.MockitoJUnitRunner; import org.junit.Before; import org.mockito.Mock; @@ -144,7 +144,8 @@ public void testGroupMembershipKeyCaseInsensitiveFilterApplyNegative() throws AuthenticationException, NamingException, IOException { MetastoreConf.setVar(conf, MetastoreConf.ConfVars.METASTORE_PLAIN_LDAP_GROUPFILTER, "hiveusers,containsg1"); - when(search.findGroupsForUser(eq("user1"))).thenReturn(Arrays.asList("SuperUsers", "Office1", "G1", "G2")); + lenient().when(search.findGroupsForUser(eq("user1"))) + .thenReturn(Arrays.asList("SuperUsers", "Office1", "G1", "G2")); Filter filter = factory.getInstance(conf); filter.apply(search, "user1"); @@ -155,7 +156,8 @@ public void testGroupMembershipKeyFilterApplyNegative() throws AuthenticationException, NamingException, IOException { MetastoreConf.setVar(conf, MetastoreConf.ConfVars.METASTORE_PLAIN_LDAP_GROUPFILTER, "HiveUsers"); - when(search.findGroupsForUser(eq("user1"))).thenReturn(Arrays.asList("SuperUsers", "Office1", "G1", "G2")); + lenient().when(search.findGroupsForUser(eq("user1"))) + .thenReturn(Arrays.asList("SuperUsers", "Office1", "G1", "G2")); Filter filter = factory.getInstance(conf); filter.apply(search, "user1"); diff --git standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/ldap/TestLdapSearch.java standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/ldap/TestLdapSearch.java index df913db13f..92aef8c25e 100644 --- standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/ldap/TestLdapSearch.java +++ standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/ldap/TestLdapSearch.java @@ -34,7 +34,7 @@ import org.junit.Test; import org.junit.runner.RunWith; import org.mockito.Mock; -import org.mockito.runners.MockitoJUnitRunner; +import org.mockito.junit.MockitoJUnitRunner; import static org.junit.Assert.*; import static org.mockito.Mockito.*; diff --git standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/ldap/TestSearchResultHandler.java standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/ldap/TestSearchResultHandler.java index 74022e93b0..041e2a1c1e 100644 --- standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/ldap/TestSearchResultHandler.java +++ standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/ldap/TestSearchResultHandler.java @@ -28,7 +28,7 @@ import javax.naming.directory.SearchResult; import org.junit.Test; import org.junit.runner.RunWith; -import org.mockito.runners.MockitoJUnitRunner; +import org.mockito.junit.MockitoJUnitRunner; import static org.junit.Assert.*; import static org.mockito.Mockito.*; diff --git standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/ldap/TestUserFilter.java standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/ldap/TestUserFilter.java index a79f0de0c2..1ee46f457b 100644 --- standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/ldap/TestUserFilter.java +++ standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/ldap/TestUserFilter.java @@ -25,7 +25,7 @@ import org.apache.hadoop.hive.metastore.conf.MetastoreConf; import org.junit.Test; import org.junit.runner.RunWith; -import org.mockito.runners.MockitoJUnitRunner; +import org.mockito.junit.MockitoJUnitRunner; import org.junit.Before; import org.mockito.Mock; diff --git standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/ldap/TestUserSearchFilter.java standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/ldap/TestUserSearchFilter.java index bf182ec07d..ebeeb0837d 100644 --- standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/ldap/TestUserSearchFilter.java +++ standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/ldap/TestUserSearchFilter.java @@ -27,7 +27,7 @@ import org.junit.Test; import org.junit.runner.RunWith; import org.mockito.Mock; -import org.mockito.runners.MockitoJUnitRunner; +import org.mockito.junit.MockitoJUnitRunner; import static org.junit.Assert.*; import static org.mockito.Mockito.*; diff --git standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/tools/metatool/TestMetaToolTaskUpdateLocation.java standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/tools/metatool/TestMetaToolTaskUpdateLocation.java index 63868b58da..6b5d96f2b4 100644 --- standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/tools/metatool/TestMetaToolTaskUpdateLocation.java +++ standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/tools/metatool/TestMetaToolTaskUpdateLocation.java @@ -18,7 +18,7 @@ package org.apache.hadoop.hive.metastore.tools.metatool; -import static org.mockito.Matchers.eq; +import static org.mockito.ArgumentMatchers.eq; import static org.mockito.Mockito.when; import java.io.ByteArrayOutputStream; diff --git standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/tools/schematool/TestSchemaToolTaskDrop.java standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/tools/schematool/TestSchemaToolTaskDrop.java index 271f83a6b7..6a3b896a37 100644 --- standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/tools/schematool/TestSchemaToolTaskDrop.java +++ standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/tools/schematool/TestSchemaToolTaskDrop.java @@ -30,9 +30,9 @@ import java.sql.ResultSet; import java.sql.Statement; -import static org.mockito.Matchers.anyBoolean; -import static org.mockito.Matchers.anyInt; -import static org.mockito.Matchers.anyString; +import static org.mockito.ArgumentMatchers.anyBoolean; +import static org.mockito.ArgumentMatchers.anyInt; +import static org.mockito.ArgumentMatchers.anyString; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.times; import static org.mockito.Mockito.when; diff --git standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/utils/TestHdfsUtils.java standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/utils/TestHdfsUtils.java index de21fc5017..e76f2b4848 100644 --- standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/utils/TestHdfsUtils.java +++ standalone-metastore/metastore-server/src/test/java/org/apache/hadoop/hive/metastore/utils/TestHdfsUtils.java @@ -39,7 +39,7 @@ import java.util.List; import java.util.Random; -import static org.mockito.Matchers.any; +import static org.mockito.ArgumentMatchers.any; import static org.mockito.Mockito.doThrow; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.verify; @@ -222,7 +222,7 @@ public void testSetFullFileStatusFailInheritGroup() throws IOException { doThrow(RuntimeException.class).when(fs).setOwner(any(Path.class), any(String.class), any(String.class)); HdfsUtils.setFullFileStatus(conf, mockHadoopFileStatus, "fakeGroup2", fs, new Path("fakePath"), false); - verify(fs).setOwner(any(Path.class), any(String.class), any(String.class)); + verify(fs).setOwner(any(Path.class), any(), any(String.class)); } /** @@ -320,7 +320,7 @@ public void testSetFullFileStatusFailInheritAclsRecursive() throws Exception { doThrow(RuntimeException.class).when(mockFsShell).run(any(String[].class)); HdfsUtils.setFullFileStatus(conf, mockHadoopFileStatus, "", mock(FileSystem.class), fakeTarget, true, mockFsShell); - verify(mockFsShell).run(new String[]{"-setfacl", "-R", "--set", any(String.class), fakeTarget.toString()}); + verify(mockFsShell).run(new String[]{"-setfacl", "-R", "--set", any(), fakeTarget.toString()}); } /** @@ -343,6 +343,6 @@ public void testSetFullFileStatusFailInheritPermsRecursive() throws Exception { HdfsUtils.setFullFileStatus(conf, mockHadoopFileStatus, "", mock(FileSystem.class), fakeTarget, true, mockFsShell); - verify(mockFsShell).run(new String[]{"-chmod", "-R", any(String.class), fakeTarget.toString()}); + verify(mockFsShell).run(new String[]{"-chmod", "-R", any(), fakeTarget.toString()}); } } diff --git standalone-metastore/pom.xml standalone-metastore/pom.xml index 0fa63898ed..b26e3b3c99 100644 --- standalone-metastore/pom.xml +++ standalone-metastore/pom.xml @@ -90,7 +90,7 @@ 0.9.3 0.9.3-1 2.12.1 - 1.10.19 + 3.3.3 1.5.1 com.google.protobuf diff --git testutils/ptest2/pom.xml testutils/ptest2/pom.xml index cc04607584..24de8f4d58 100644 --- testutils/ptest2/pom.xml +++ testutils/ptest2/pom.xml @@ -167,7 +167,7 @@ limitations under the License. org.mockito mockito-core - 1.9.0 + 3.3.3 test diff --git testutils/ptest2/src/test/java/org/apache/hive/ptest/execution/context/TestCloudComputeService.java testutils/ptest2/src/test/java/org/apache/hive/ptest/execution/context/TestCloudComputeService.java index c75e26e0db..64e74aec6d 100644 --- testutils/ptest2/src/test/java/org/apache/hive/ptest/execution/context/TestCloudComputeService.java +++ testutils/ptest2/src/test/java/org/apache/hive/ptest/execution/context/TestCloudComputeService.java @@ -27,7 +27,8 @@ import com.google.common.collect.Sets; -import static org.mockito.Mockito.*; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; public class TestCloudComputeService { private static final String GROUP_NAME = "grp"; diff --git testutils/ptest2/src/test/java/org/apache/hive/ptest/execution/context/TestCloudExecutionContextProvider.java testutils/ptest2/src/test/java/org/apache/hive/ptest/execution/context/TestCloudExecutionContextProvider.java index 4985b7d4e6..3fe89b04df 100644 --- testutils/ptest2/src/test/java/org/apache/hive/ptest/execution/context/TestCloudExecutionContextProvider.java +++ testutils/ptest2/src/test/java/org/apache/hive/ptest/execution/context/TestCloudExecutionContextProvider.java @@ -18,7 +18,7 @@ */ package org.apache.hive.ptest.execution.context; -import static org.mockito.Matchers.anyInt; +import static org.mockito.ArgumentMatchers.anyInt; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.when; diff --git testutils/ptest2/src/test/java/org/apache/hive/ptest/execution/ssh/TestSSHCommandExecutor.java testutils/ptest2/src/test/java/org/apache/hive/ptest/execution/ssh/TestSSHCommandExecutor.java index 39907cd3fe..a329a0f544 100644 --- testutils/ptest2/src/test/java/org/apache/hive/ptest/execution/ssh/TestSSHCommandExecutor.java +++ testutils/ptest2/src/test/java/org/apache/hive/ptest/execution/ssh/TestSSHCommandExecutor.java @@ -23,11 +23,11 @@ import static org.mockito.Mockito.times; import static org.mockito.Mockito.verify; import static org.mockito.Mockito.when; -import org.junit.Assert; import org.apache.hive.ptest.execution.Constants; import org.apache.hive.ptest.execution.LocalCommand; import org.apache.hive.ptest.execution.MockLocalCommandFactory; +import org.junit.Assert; import org.junit.Before; import org.junit.Test; import org.mockito.invocation.InvocationOnMock;