diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/Time.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/Time.java index 42005f0b09b..db5a56730d0 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/Time.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/Time.java @@ -18,8 +18,6 @@ package org.apache.hadoop.util; import java.text.SimpleDateFormat; -import java.util.Calendar; -import java.util.TimeZone; import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; @@ -36,8 +34,6 @@ */ private static final long NANOSECONDS_PER_MILLISECOND = 1000000; - private static final TimeZone UTC_ZONE = TimeZone.getTimeZone("UTC"); - private static final ThreadLocal DATE_FORMAT = new ThreadLocal() { @Override @@ -86,12 +82,4 @@ public static long monotonicNowNanos() { public static String formatTime(long millis) { return DATE_FORMAT.get().format(millis); } - - /** - * Get the current UTC time in milliseconds. - * @return the current UTC time in milliseconds. - */ - public static long getUtcTime() { - return Calendar.getInstance(UTC_ZONE).getTimeInMillis(); - } } diff --git a/hadoop-dist/pom.xml b/hadoop-dist/pom.xml index 43836eb3884..f9b8573d806 100644 --- a/hadoop-dist/pom.xml +++ b/hadoop-dist/pom.xml @@ -229,32 +229,30 @@ org.apache.hadoop hadoop-ozone-ozone-manager - provided org.apache.hadoop hadoop-hdds-server-scm - provided org.apache.hadoop hadoop-hdds-tools - provided org.apache.hadoop hadoop-hdds-container-service - provided org.apache.hadoop hadoop-ozone-objectstore-service - provided + + + org.apache.hadoop + hadoop-hdds-tools org.apache.hadoop hadoop-ozone-tools - provided diff --git a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientManager.java b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientManager.java index dcaa57621d1..75851042c29 100644 --- a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientManager.java +++ b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/XceiverClientManager.java @@ -60,7 +60,7 @@ //TODO : change this to SCM configuration class private final Configuration conf; - private final Cache clientCache; + private final Cache clientCache; private final boolean useRatis; private static XceiverClientMetrics metrics; @@ -84,10 +84,10 @@ public XceiverClientManager(Configuration conf) { .expireAfterAccess(staleThresholdMs, TimeUnit.MILLISECONDS) .maximumSize(maxSize) .removalListener( - new RemovalListener() { + new RemovalListener() { @Override public void onRemoval( - RemovalNotification + RemovalNotification removalNotification) { synchronized (clientCache) { // Mark the entry as evicted @@ -99,7 +99,7 @@ public void onRemoval( } @VisibleForTesting - public Cache getClientCache() { + public Cache getClientCache() { return clientCache; } @@ -114,14 +114,14 @@ public void onRemoval( * @return XceiverClientSpi connected to a container * @throws IOException if a XceiverClientSpi cannot be acquired */ - public XceiverClientSpi acquireClient(Pipeline pipeline, long containerID) + public XceiverClientSpi acquireClient(Pipeline pipeline) throws IOException { Preconditions.checkNotNull(pipeline); Preconditions.checkArgument(pipeline.getMachines() != null); Preconditions.checkArgument(!pipeline.getMachines().isEmpty()); synchronized (clientCache) { - XceiverClientSpi info = getClient(pipeline, containerID); + XceiverClientSpi info = getClient(pipeline); info.incrementReference(); return info; } @@ -139,10 +139,11 @@ public void releaseClient(XceiverClientSpi client) { } } - private XceiverClientSpi getClient(Pipeline pipeline, long containerID) + private XceiverClientSpi getClient(Pipeline pipeline) throws IOException { + String containerName = pipeline.getContainerName(); try { - return clientCache.get(containerID, + return clientCache.get(containerName, new Callable() { @Override public XceiverClientSpi call() throws Exception { diff --git a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/client/ContainerOperationClient.java b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/client/ContainerOperationClient.java index 15d197c405d..8f30a7fad10 100644 --- a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/client/ContainerOperationClient.java +++ b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/client/ContainerOperationClient.java @@ -86,16 +86,15 @@ public static void setContainerSizeB(long size) { * @inheritDoc */ @Override - public ContainerInfo createContainer(String owner) + public Pipeline createContainer(String containerId, String owner) throws IOException { XceiverClientSpi client = null; try { - ContainerInfo container = + Pipeline pipeline = storageContainerLocationClient.allocateContainer( xceiverClientManager.getType(), - xceiverClientManager.getFactor(), owner); - Pipeline pipeline = container.getPipeline(); - client = xceiverClientManager.acquireClient(pipeline, container.getContainerID()); + xceiverClientManager.getFactor(), containerId, owner); + client = xceiverClientManager.acquireClient(pipeline); // Allocated State means that SCM has allocated this pipeline in its // namespace. The client needs to create the pipeline on the machines @@ -105,8 +104,10 @@ public ContainerInfo createContainer(String owner) if (pipeline.getLifeCycleState() == ALLOCATED) { createPipeline(client, pipeline); } - createContainer(client, container.getContainerID()); - return container; + // TODO : Container Client State needs to be updated. + // TODO : Return ContainerInfo instead of Pipeline + createContainer(containerId, client, pipeline); + return pipeline; } finally { if (client != null) { xceiverClientManager.releaseClient(client); @@ -117,19 +118,20 @@ public ContainerInfo createContainer(String owner) /** * Create a container over pipeline specified by the SCM. * - * @param client - Client to communicate with Datanodes. - * @param containerId - Container ID. + * @param containerId - Container ID + * @param client - Client to communicate with Datanodes + * @param pipeline - A pipeline that is already created. * @throws IOException */ - public void createContainer(XceiverClientSpi client, - long containerId) throws IOException { + public void createContainer(String containerId, XceiverClientSpi client, + Pipeline pipeline) throws IOException { String traceID = UUID.randomUUID().toString(); storageContainerLocationClient.notifyObjectStageChange( ObjectStageChangeRequestProto.Type.container, containerId, ObjectStageChangeRequestProto.Op.create, ObjectStageChangeRequestProto.Stage.begin); - ContainerProtocolCalls.createContainer(client, containerId, traceID); + ContainerProtocolCalls.createContainer(client, traceID); storageContainerLocationClient.notifyObjectStageChange( ObjectStageChangeRequestProto.Type.container, containerId, @@ -140,8 +142,8 @@ public void createContainer(XceiverClientSpi client, // creation state. if (LOG.isDebugEnabled()) { LOG.debug("Created container " + containerId - + " leader:" + client.getPipeline().getLeader() - + " machines:" + client.getPipeline().getMachines()); + + " leader:" + pipeline.getLeader() + + " machines:" + pipeline.getMachines()); } } @@ -166,25 +168,20 @@ private void createPipeline(XceiverClientSpi client, Pipeline pipeline) // 2. Talk to Datanodes to create the pipeline. // // 3. update SCM that pipeline creation was successful. - - // TODO: this has not been fully implemented on server side - // SCMClientProtocolServer#notifyObjectStageChange - // TODO: when implement the pipeline state machine, change - // the pipeline name (string) to pipeline id (long) - //storageContainerLocationClient.notifyObjectStageChange( - // ObjectStageChangeRequestProto.Type.pipeline, - // pipeline.getPipelineName(), - // ObjectStageChangeRequestProto.Op.create, - // ObjectStageChangeRequestProto.Stage.begin); + storageContainerLocationClient.notifyObjectStageChange( + ObjectStageChangeRequestProto.Type.pipeline, + pipeline.getPipelineName(), + ObjectStageChangeRequestProto.Op.create, + ObjectStageChangeRequestProto.Stage.begin); client.createPipeline(pipeline.getPipelineName(), pipeline.getMachines()); - //storageContainerLocationClient.notifyObjectStageChange( - // ObjectStageChangeRequestProto.Type.pipeline, - // pipeline.getPipelineName(), - // ObjectStageChangeRequestProto.Op.create, - // ObjectStageChangeRequestProto.Stage.complete); + storageContainerLocationClient.notifyObjectStageChange( + ObjectStageChangeRequestProto.Type.pipeline, + pipeline.getPipelineName(), + ObjectStageChangeRequestProto.Op.create, + ObjectStageChangeRequestProto.Stage.complete); // TODO : Should we change the state on the client side ?? // That makes sense, but it is not needed for the client to work. @@ -196,17 +193,16 @@ private void createPipeline(XceiverClientSpi client, Pipeline pipeline) * @inheritDoc */ @Override - public ContainerInfo createContainer(HddsProtos.ReplicationType type, - HddsProtos.ReplicationFactor factor, String owner) throws IOException { + public Pipeline createContainer(HddsProtos.ReplicationType type, + HddsProtos.ReplicationFactor factor, + String containerId, String owner) throws IOException { XceiverClientSpi client = null; try { // allocate container on SCM. - ContainerInfo container = + Pipeline pipeline = storageContainerLocationClient.allocateContainer(type, factor, - owner); - Pipeline pipeline = container.getPipeline(); - client = xceiverClientManager.acquireClient(pipeline, - container.getContainerID()); + containerId, owner); + client = xceiverClientManager.acquireClient(pipeline); // Allocated State means that SCM has allocated this pipeline in its // namespace. The client needs to create the pipeline on the machines @@ -214,11 +210,12 @@ public ContainerInfo createContainer(HddsProtos.ReplicationType type, if (pipeline.getLifeCycleState() == ALLOCATED) { createPipeline(client, pipeline); } + + // TODO : Return ContainerInfo instead of Pipeline // connect to pipeline leader and allocate container on leader datanode. - client = xceiverClientManager.acquireClient(pipeline, - container.getContainerID()); - createContainer(client, container.getContainerID()); - return container; + client = xceiverClientManager.acquireClient(pipeline); + createContainer(containerId, client, pipeline); + return pipeline; } finally { if (client != null) { xceiverClientManager.releaseClient(client); @@ -261,18 +258,18 @@ public Pipeline createReplicationPipeline(HddsProtos.ReplicationType type, * @throws IOException */ @Override - public void deleteContainer(long containerID, Pipeline pipeline, boolean force) + public void deleteContainer(Pipeline pipeline, boolean force) throws IOException { XceiverClientSpi client = null; try { - client = xceiverClientManager.acquireClient(pipeline, containerID); + client = xceiverClientManager.acquireClient(pipeline); String traceID = UUID.randomUUID().toString(); - ContainerProtocolCalls.deleteContainer(client, containerID, force, traceID); + ContainerProtocolCalls.deleteContainer(client, force, traceID); storageContainerLocationClient - .deleteContainer(containerID); + .deleteContainer(pipeline.getContainerName()); if (LOG.isDebugEnabled()) { LOG.debug("Deleted container {}, leader: {}, machines: {} ", - containerID, + pipeline.getContainerName(), pipeline.getLeader(), pipeline.getMachines()); } @@ -287,10 +284,11 @@ public void deleteContainer(long containerID, Pipeline pipeline, boolean force) * {@inheritDoc} */ @Override - public List listContainer(long startContainerID, - int count) throws IOException { + public List listContainer(String startName, + String prefixName, int count) + throws IOException { return storageContainerLocationClient.listContainer( - startContainerID, count); + startName, prefixName, count); } /** @@ -302,17 +300,17 @@ public void deleteContainer(long containerID, Pipeline pipeline, boolean force) * @throws IOException */ @Override - public ContainerData readContainer(long containerID, - Pipeline pipeline) throws IOException { + public ContainerData readContainer(Pipeline pipeline) throws IOException { XceiverClientSpi client = null; try { - client = xceiverClientManager.acquireClient(pipeline, containerID); + client = xceiverClientManager.acquireClient(pipeline); String traceID = UUID.randomUUID().toString(); ReadContainerResponseProto response = - ContainerProtocolCalls.readContainer(client, containerID, traceID); + ContainerProtocolCalls.readContainer(client, + pipeline.getContainerName(), traceID); if (LOG.isDebugEnabled()) { LOG.debug("Read container {}, leader: {}, machines: {} ", - containerID, + pipeline.getContainerName(), pipeline.getLeader(), pipeline.getMachines()); } @@ -331,7 +329,7 @@ public ContainerData readContainer(long containerID, * @throws IOException */ @Override - public ContainerInfo getContainer(long containerId) throws + public Pipeline getContainer(String containerId) throws IOException { return storageContainerLocationClient.getContainer(containerId); } @@ -343,8 +341,7 @@ public ContainerInfo getContainer(long containerId) throws * @throws IOException */ @Override - public void closeContainer(long containerId, Pipeline pipeline) - throws IOException { + public void closeContainer(Pipeline pipeline) throws IOException { XceiverClientSpi client = null; try { LOG.debug("Close container {}", pipeline); @@ -367,16 +364,18 @@ public void closeContainer(long containerId, Pipeline pipeline) For now, take the #2 way. */ // Actually close the container on Datanode - client = xceiverClientManager.acquireClient(pipeline, containerId); + client = xceiverClientManager.acquireClient(pipeline); String traceID = UUID.randomUUID().toString(); + String containerId = pipeline.getContainerName(); + storageContainerLocationClient.notifyObjectStageChange( ObjectStageChangeRequestProto.Type.container, containerId, ObjectStageChangeRequestProto.Op.close, ObjectStageChangeRequestProto.Stage.begin); - ContainerProtocolCalls.closeContainer(client, containerId, traceID); + ContainerProtocolCalls.closeContainer(client, traceID); // Notify SCM to close the container storageContainerLocationClient.notifyObjectStageChange( ObjectStageChangeRequestProto.Type.container, @@ -392,13 +391,13 @@ public void closeContainer(long containerId, Pipeline pipeline) /** * Get the the current usage information. - * @param containerID - ID of the container. + * @param pipeline - Pipeline * @return the size of the given container. * @throws IOException */ @Override - public long getContainerSize(long containerID) throws IOException { - // TODO : Fix this, it currently returns the capacity but not the current usage. + public long getContainerSize(Pipeline pipeline) throws IOException { + // TODO : Pipeline can be null, handle it correctly. long size = getContainerSizeB(); if (size == -1) { throw new IOException("Container size unknown!"); diff --git a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/ChunkInputStream.java b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/ChunkInputStream.java index c4c336221c5..9b8eaa9661f 100644 --- a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/ChunkInputStream.java +++ b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/ChunkInputStream.java @@ -25,7 +25,6 @@ import org.apache.hadoop.hdds.protocol.proto.ContainerProtos.ChunkInfo; import org.apache.hadoop.hdds.protocol.proto.ContainerProtos .ReadChunkResponseProto; -import org.apache.hadoop.hdds.client.BlockID; import java.io.EOFException; import java.io.IOException; @@ -46,7 +45,7 @@ private static final int EOF = -1; - private final BlockID blockID; + private final String key; private final String traceID; private XceiverClientManager xceiverClientManager; private XceiverClientSpi xceiverClient; @@ -59,15 +58,15 @@ /** * Creates a new ChunkInputStream. * - * @param blockID block ID of the chunk + * @param key chunk key * @param xceiverClientManager client manager that controls client * @param xceiverClient client to perform container calls * @param chunks list of chunks to read * @param traceID container protocol call traceID */ - public ChunkInputStream(BlockID blockID, XceiverClientManager xceiverClientManager, + public ChunkInputStream(String key, XceiverClientManager xceiverClientManager, XceiverClientSpi xceiverClient, List chunks, String traceID) { - this.blockID = blockID; + this.key = key; this.traceID = traceID; this.xceiverClientManager = xceiverClientManager; this.xceiverClient = xceiverClient; @@ -197,7 +196,7 @@ private synchronized void readChunkFromContainer() throws IOException { final ReadChunkResponseProto readChunkResponse; try { readChunkResponse = ContainerProtocolCalls.readChunk(xceiverClient, - chunks.get(chunkIndex), blockID, traceID); + chunks.get(chunkIndex), key, traceID); } catch (IOException e) { throw new IOException("Unexpected OzoneException: " + e.toString(), e); } @@ -212,7 +211,7 @@ public synchronized void seek(long pos) throws IOException { || pos >= chunkOffset[chunks.size() - 1] + chunks.get(chunks.size() - 1) .getLen()) { throw new EOFException( - "EOF encountered pos: " + pos + " container key: " + blockID.getLocalID()); + "EOF encountered pos: " + pos + " container key: " + key); } if (chunkIndex == -1) { chunkIndex = Arrays.binarySearch(chunkOffset, pos); diff --git a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/ChunkOutputStream.java b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/ChunkOutputStream.java index 325f110512d..b65df9f89b0 100644 --- a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/ChunkOutputStream.java +++ b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/ChunkOutputStream.java @@ -25,7 +25,6 @@ import org.apache.hadoop.hdds.protocol.proto.ContainerProtos.ChunkInfo; import org.apache.hadoop.hdds.protocol.proto.ContainerProtos.KeyData; import org.apache.hadoop.hdds.protocol.proto.HddsProtos.KeyValue; -import org.apache.hadoop.hdds.client.BlockID; import java.io.IOException; import java.io.OutputStream; @@ -54,7 +53,7 @@ */ public class ChunkOutputStream extends OutputStream { - private final BlockID blockID; + private final String containerKey; private final String key; private final String traceID; private final KeyData.Builder containerKeyData; @@ -68,24 +67,25 @@ /** * Creates a new ChunkOutputStream. * - * @param blockID block ID + * @param containerKey container key * @param key chunk key * @param xceiverClientManager client manager that controls client * @param xceiverClient client to perform container calls * @param traceID container protocol call args * @param chunkSize chunk size */ - public ChunkOutputStream(BlockID blockID, String key, - XceiverClientManager xceiverClientManager, XceiverClientSpi xceiverClient, - String traceID, int chunkSize) { - this.blockID = blockID; + public ChunkOutputStream(String containerKey, String key, + XceiverClientManager xceiverClientManager, XceiverClientSpi xceiverClient, + String traceID, int chunkSize) { + this.containerKey = containerKey; this.key = key; this.traceID = traceID; this.chunkSize = chunkSize; KeyValue keyValue = KeyValue.newBuilder() .setKey("TYPE").setValue("KEY").build(); this.containerKeyData = KeyData.newBuilder() - .setBlockID(blockID.getProtobuf()) + .setContainerName(xceiverClient.getPipeline().getContainerName()) + .setName(containerKey) .addMetadata(keyValue); this.xceiverClientManager = xceiverClientManager; this.xceiverClient = xceiverClient; @@ -217,7 +217,7 @@ private synchronized void writeChunkToContainer() throws IOException { .setLen(data.size()) .build(); try { - writeChunk(xceiverClient, chunk, blockID, data, traceID); + writeChunk(xceiverClient, chunk, key, data, traceID); } catch (IOException e) { throw new IOException( "Unexpected Storage Container Exception: " + e.toString(), e); diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/client/BlockID.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/client/BlockID.java deleted file mode 100644 index 7236af7e30a..00000000000 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/client/BlockID.java +++ /dev/null @@ -1,59 +0,0 @@ -/** - * Licensed to the Apache Software Foundation (ASF) under one or more - * contributor license agreements. See the NOTICE file distributed with this - * work for additional information regarding copyright ownership. The ASF - * licenses this file to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - *

- * http://www.apache.org/licenses/LICENSE-2.0 - *

- * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS,WITHOUT - * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - * License for the specific language governing permissions and limitations under - * the License. - */ -package org.apache.hadoop.hdds.client; - -import org.apache.commons.lang.builder.ToStringBuilder; -import org.apache.hadoop.hdds.protocol.proto.HddsProtos; - -/** - * BlockID of ozone (containerID + localID) - */ -public class BlockID { - private long containerID; - private long localID; - - public BlockID(long containerID, long localID) { - this.containerID = containerID; - this.localID = localID; - } - - public long getContainerID() { - return containerID; - } - - public long getLocalID() { - return localID; - } - - @Override - public String toString() { - return new ToStringBuilder(this). - append("containerID", containerID). - append("localID", localID). - toString(); - } - - public HddsProtos.BlockID getProtobuf() { - return HddsProtos.BlockID.newBuilder(). - setContainerID(containerID).setLocalID(localID).build(); - } - - public static BlockID getFromProtobuf(HddsProtos.BlockID blockID) { - return new BlockID(blockID.getContainerID(), - blockID.getLocalID()); - } -} diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/client/ScmClient.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/client/ScmClient.java index dcf9fed800c..0d4a2990b6c 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/client/ScmClient.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/client/ScmClient.java @@ -41,76 +41,78 @@ public interface ScmClient { /** * Creates a Container on SCM and returns the pipeline. - * @return ContainerInfo + * @param containerId - String container ID + * @return Pipeline * @throws IOException */ - ContainerInfo createContainer(String owner) throws IOException; + Pipeline createContainer(String containerId, String owner) throws IOException; /** * Gets a container by Name -- Throws if the container does not exist. - * @param containerId - Container ID + * @param containerId - String Container ID * @return Pipeline * @throws IOException */ - ContainerInfo getContainer(long containerId) throws IOException; + Pipeline getContainer(String containerId) throws IOException; /** - * Close a container. + * Close a container by name. * - * @param containerId - ID of the container. - * @param pipeline - Pipeline where the container is located. + * @param pipeline the container to be closed. * @throws IOException */ - void closeContainer(long containerId, Pipeline pipeline) throws IOException; + void closeContainer(Pipeline pipeline) throws IOException; /** * Deletes an existing container. - * @param containerId - ID of the container. * @param pipeline - Pipeline that represents the container. * @param force - true to forcibly delete the container. * @throws IOException */ - void deleteContainer(long containerId, Pipeline pipeline, boolean force) throws IOException; + void deleteContainer(Pipeline pipeline, boolean force) throws IOException; /** * Lists a range of containers and get their info. * - * @param startContainerID start containerID. - * @param count count must be > 0. + * @param startName start name, if null, start searching at the head. + * @param prefixName prefix name, if null, then filter is disabled. + * @param count count, if count < 0, the max size is unlimited.( + * Usually the count will be replace with a very big + * value instead of being unlimited in case the db is very big) * * @return a list of pipeline. * @throws IOException */ - List listContainer(long startContainerID, + List listContainer(String startName, String prefixName, int count) throws IOException; /** * Read meta data from an existing container. - * @param containerID - ID of the container. - * @param pipeline - Pipeline where the container is located. + * @param pipeline - Pipeline that represents the container. * @return ContainerInfo * @throws IOException */ - ContainerData readContainer(long containerID, Pipeline pipeline) - throws IOException; + ContainerData readContainer(Pipeline pipeline) throws IOException; + /** * Gets the container size -- Computed by SCM from Container Reports. - * @param containerID - ID of the container. + * @param pipeline - Pipeline * @return number of bytes used by this container. * @throws IOException */ - long getContainerSize(long containerID) throws IOException; + long getContainerSize(Pipeline pipeline) throws IOException; /** * Creates a Container on SCM and returns the pipeline. * @param type - Replication Type. * @param replicationFactor - Replication Factor - * @return ContainerInfo + * @param containerId - Container ID + * @return Pipeline * @throws IOException - in case of error. */ - ContainerInfo createContainer(HddsProtos.ReplicationType type, - HddsProtos.ReplicationFactor replicationFactor, + Pipeline createContainer(HddsProtos.ReplicationType type, + HddsProtos.ReplicationFactor replicationFactor, String containerId, String owner) throws IOException; /** diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/common/helpers/AllocatedBlock.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/common/helpers/AllocatedBlock.java index 9b8946978cb..d253b15cd2f 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/common/helpers/AllocatedBlock.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/common/helpers/AllocatedBlock.java @@ -18,15 +18,13 @@ package org.apache.hadoop.hdds.scm.container.common.helpers; -import org.apache.hadoop.hdds.client.BlockID; - /** * Allocated block wraps the result returned from SCM#allocateBlock which * contains a Pipeline and the key. */ public final class AllocatedBlock { private Pipeline pipeline; - private BlockID blockID; + private String key; // Indicates whether the client should create container before writing block. private boolean shouldCreateContainer; @@ -35,7 +33,7 @@ */ public static class Builder { private Pipeline pipeline; - private BlockID blockID; + private String key; private boolean shouldCreateContainer; public Builder setPipeline(Pipeline p) { @@ -43,8 +41,8 @@ public Builder setPipeline(Pipeline p) { return this; } - public Builder setBlockID(BlockID blockID) { - this.blockID = blockID; + public Builder setKey(String k) { + this.key = k; return this; } @@ -54,14 +52,14 @@ public Builder setShouldCreateContainer(boolean shouldCreate) { } public AllocatedBlock build() { - return new AllocatedBlock(pipeline, blockID, shouldCreateContainer); + return new AllocatedBlock(pipeline, key, shouldCreateContainer); } } - private AllocatedBlock(Pipeline pipeline, BlockID blockID, + private AllocatedBlock(Pipeline pipeline, String key, boolean shouldCreateContainer) { this.pipeline = pipeline; - this.blockID = blockID; + this.key = key; this.shouldCreateContainer = shouldCreateContainer; } @@ -69,8 +67,8 @@ public Pipeline getPipeline() { return pipeline; } - public BlockID getBlockID() { - return blockID; + public String getKey() { + return key; } public boolean getCreateContainer() { diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/common/helpers/ContainerInfo.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/common/helpers/ContainerInfo.java index 0bd4c26d42a..823a7fbc05a 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/common/helpers/ContainerInfo.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/common/helpers/ContainerInfo.java @@ -43,9 +43,11 @@ // The wall-clock ms since the epoch at which the current state enters. private long stateEnterTime; private String owner; + private String containerName; private long containerID; ContainerInfo( long containerID, + final String containerName, HddsProtos.LifeCycleState state, Pipeline pipeline, long allocatedBytes, @@ -54,6 +56,7 @@ long stateEnterTime, String owner) { this.containerID = containerID; + this.containerName = containerName; this.pipeline = pipeline; this.allocatedBytes = allocatedBytes; this.usedBytes = usedBytes; @@ -79,6 +82,7 @@ public static ContainerInfo fromProtobuf(HddsProtos.SCMContainerInfo info) { builder.setState(info.getState()); builder.setStateEnterTime(info.getStateEnterTime()); builder.setOwner(info.getOwner()); + builder.setContainerName(info.getContainerName()); builder.setContainerID(info.getContainerID()); return builder.build(); } @@ -87,6 +91,10 @@ public long getContainerID() { return containerID; } + public String getContainerName() { + return containerName; + } + public HddsProtos.LifeCycleState getState() { return state; } @@ -162,6 +170,7 @@ public void allocate(long size) { if (getOwner() != null) { builder.setOwner(getOwner()); } + builder.setContainerName(getContainerName()); return builder.build(); } @@ -180,6 +189,7 @@ public String toString() { + ", pipeline=" + pipeline + ", stateEnterTime=" + stateEnterTime + ", owner=" + owner + + ", containerName='" + containerName + '}'; } @@ -196,7 +206,7 @@ public boolean equals(Object o) { ContainerInfo that = (ContainerInfo) o; return new EqualsBuilder() - .append(getContainerID(), that.getContainerID()) + .append(pipeline.getContainerName(), that.pipeline.getContainerName()) // TODO : Fix this later. If we add these factors some tests fail. // So Commenting this to continue and will enforce this with @@ -211,7 +221,7 @@ public boolean equals(Object o) { @Override public int hashCode() { return new HashCodeBuilder(11, 811) - .append(getContainerID()) + .append(pipeline.getContainerName()) .append(pipeline.getFactor()) .append(pipeline.getType()) .append(owner) @@ -265,6 +275,7 @@ public int compareTo(ContainerInfo o) { private long keys; private long stateEnterTime; private String owner; + private String containerName; private long containerID; public Builder setContainerID(long id) { @@ -308,9 +319,14 @@ public Builder setOwner(String containerOwner) { return this; } + public Builder setContainerName(String container) { + this.containerName = container; + return this; + } + public ContainerInfo build() { return new - ContainerInfo(containerID, state, pipeline, + ContainerInfo(containerID, containerName, state, pipeline, allocated, used, keys, stateEnterTime, owner); } } diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/common/helpers/DeleteBlockResult.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/common/helpers/DeleteBlockResult.java index 5f5aaceb16a..fd97eae3b70 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/common/helpers/DeleteBlockResult.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/common/helpers/DeleteBlockResult.java @@ -17,8 +17,6 @@ package org.apache.hadoop.hdds.scm.container.common.helpers; -import org.apache.hadoop.hdds.client.BlockID; - import static org.apache.hadoop.hdds.protocol.proto .ScmBlockLocationProtocolProtos.DeleteScmBlockResult; @@ -26,21 +24,21 @@ * Class wraps storage container manager block deletion results. */ public class DeleteBlockResult { - private BlockID blockID; + private String key; private DeleteScmBlockResult.Result result; - public DeleteBlockResult(final BlockID blockID, + public DeleteBlockResult(final String key, final DeleteScmBlockResult.Result result) { - this.blockID = blockID; + this.key = key; this.result = result; } /** - * Get block id deleted. - * @return block id. + * Get key deleted. + * @return key name. */ - public BlockID getBlockID() { - return blockID; + public String getKey() { + return key; } /** diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/common/helpers/Pipeline.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/common/helpers/Pipeline.java index 87408385ecc..32d0a2d85aa 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/common/helpers/Pipeline.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/common/helpers/Pipeline.java @@ -57,6 +57,7 @@ WRITER = mapper.writer(filters); } + private String containerName; private PipelineChannel pipelineChannel; /** * Allows you to maintain private data on pipelines. This is not serialized @@ -67,9 +68,11 @@ /** * Constructs a new pipeline data structure. * + * @param containerName - Container * @param pipelineChannel - transport information for this container */ - public Pipeline(PipelineChannel pipelineChannel) { + public Pipeline(String containerName, PipelineChannel pipelineChannel) { + this.containerName = containerName; this.pipelineChannel = pipelineChannel; data = null; } @@ -84,7 +87,7 @@ public static Pipeline getFromProtoBuf(HddsProtos.Pipeline pipeline) { Preconditions.checkNotNull(pipeline); PipelineChannel pipelineChannel = PipelineChannel.getFromProtoBuf(pipeline.getPipelineChannel()); - return new Pipeline(pipelineChannel); + return new Pipeline(pipeline.getContainerName(), pipelineChannel); } public HddsProtos.ReplicationFactor getFactor() { @@ -143,11 +146,21 @@ public String getLeaderHost() { public HddsProtos.Pipeline getProtobufMessage() { HddsProtos.Pipeline.Builder builder = HddsProtos.Pipeline.newBuilder(); + builder.setContainerName(this.containerName); builder.setPipelineChannel(this.pipelineChannel.getProtobufMessage()); return builder.build(); } /** + * Returns containerName if available. + * + * @return String. + */ + public String getContainerName() { + return containerName; + } + + /** * Returns private data that is set on this pipeline. * * @return blob, the user can interpret it any way they like. @@ -210,6 +223,7 @@ public String toString() { pipelineChannel.getDatanodes().keySet().stream() .forEach(id -> b. append(id.endsWith(pipelineChannel.getLeaderID()) ? "*" + id : id)); + b.append("] container:").append(containerName); b.append(" name:").append(getPipelineName()); if (getType() != null) { b.append(" type:").append(getType().toString()); diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/common/helpers/PipelineChannel.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/common/helpers/PipelineChannel.java index 655751d737a..ebd52e99844 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/common/helpers/PipelineChannel.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/container/common/helpers/PipelineChannel.java @@ -40,8 +40,6 @@ private ReplicationType type; private ReplicationFactor factor; private String name; - // TODO: change to long based id - //private long id; public PipelineChannel(String leaderID, LifeCycleState lifeCycleState, ReplicationType replicationType, ReplicationFactor replicationFactor, diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/protocol/ScmBlockLocationProtocol.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/protocol/ScmBlockLocationProtocol.java index c8d4a809fa5..f100fc702ce 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/protocol/ScmBlockLocationProtocol.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/protocol/ScmBlockLocationProtocol.java @@ -26,6 +26,7 @@ import java.io.IOException; import java.util.List; +import java.util.Set; /** * ScmBlockLocationProtocol is used by an HDFS node to find the set of nodes @@ -34,6 +35,17 @@ public interface ScmBlockLocationProtocol { /** + * Find the set of nodes to read/write a block, as + * identified by the block key. This method supports batch lookup by + * passing multiple keys. + * + * @param keys batch of block keys to find + * @return allocated blocks for each block key + * @throws IOException if there is any failure + */ + Set getBlockLocations(Set keys) throws IOException; + + /** * Asks SCM where a block should be allocated. SCM responds with the * set of datanodes that should be used creating this block. * @param size - size of the block. diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/protocol/StorageContainerLocationProtocol.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/protocol/StorageContainerLocationProtocol.java index e8d85e0084a..a60fbb2f222 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/protocol/StorageContainerLocationProtocol.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/protocol/StorageContainerLocationProtocol.java @@ -38,20 +38,19 @@ * set of datanodes that should be used creating this container. * */ - ContainerInfo allocateContainer(HddsProtos.ReplicationType replicationType, - HddsProtos.ReplicationFactor factor, String owner) + Pipeline allocateContainer(HddsProtos.ReplicationType replicationType, + HddsProtos.ReplicationFactor factor, String containerName, String owner) throws IOException; /** * Ask SCM the location of the container. SCM responds with a group of * nodes where this container and its replicas are located. * - * @param containerID - ID of the container. - * @return ContainerInfo - the container info such as where the pipeline - * is located. + * @param containerName - Name of the container. + * @return Pipeline - the pipeline where container locates. * @throws IOException */ - ContainerInfo getContainer(long containerID) throws IOException; + Pipeline getContainer(String containerName) throws IOException; /** * Ask SCM a list of containers with a range of container names @@ -60,7 +59,8 @@ ContainerInfo allocateContainer(HddsProtos.ReplicationType replicationType, * use prefix name to filter the result. the max size of the * searching range cannot exceed the value of count. * - * @param startContainerID start container ID. + * @param startName start name, if null, start searching at the head. + * @param prefixName prefix name, if null, then filter is disabled. * @param count count, if count < 0, the max size is unlimited.( * Usually the count will be replace with a very big * value instead of being unlimited in case the db is very big) @@ -68,18 +68,18 @@ ContainerInfo allocateContainer(HddsProtos.ReplicationType replicationType, * @return a list of container. * @throws IOException */ - List listContainer(long startContainerID, int count) - throws IOException; + List listContainer(String startName, String prefixName, + int count) throws IOException; /** * Deletes a container in SCM. * - * @param containerID + * @param containerName * @throws IOException * if failed to delete the container mapping from db store * or container doesn't exist. */ - void deleteContainer(long containerID) throws IOException; + void deleteContainer(String containerName) throws IOException; /** * Queries a list of Node Statuses. @@ -94,12 +94,12 @@ ContainerInfo allocateContainer(HddsProtos.ReplicationType replicationType, * or containers on datanodes. * Container will be in Operational state after that. * @param type object type - * @param id object id + * @param name object name * @param op operation type (e.g., create, close, delete) * @param stage creation stage */ void notifyObjectStageChange( - ObjectStageChangeRequestProto.Type type, long id, + ObjectStageChangeRequestProto.Type type, String name, ObjectStageChangeRequestProto.Op op, ObjectStageChangeRequestProto.Stage stage) throws IOException; diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/protocolPB/ScmBlockLocationProtocolClientSideTranslatorPB.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/protocolPB/ScmBlockLocationProtocolClientSideTranslatorPB.java index aed0fb7c22c..0012f3e4a8a 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/protocolPB/ScmBlockLocationProtocolClientSideTranslatorPB.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/protocolPB/ScmBlockLocationProtocolClientSideTranslatorPB.java @@ -17,10 +17,10 @@ package org.apache.hadoop.hdds.scm.protocolPB; import com.google.common.base.Preconditions; +import com.google.common.collect.Sets; import com.google.protobuf.RpcController; import com.google.protobuf.ServiceException; import org.apache.hadoop.classification.InterfaceAudience; -import org.apache.hadoop.hdds.client.BlockID; import org.apache.hadoop.hdds.scm.ScmInfo; import org.apache.hadoop.hdds.scm.container.common.helpers.AllocatedBlock; import org.apache.hadoop.hdds.scm.container.common.helpers.Pipeline; @@ -35,7 +35,13 @@ import org.apache.hadoop.hdds.protocol.proto.ScmBlockLocationProtocolProtos .DeleteScmKeyBlocksResponseProto; import org.apache.hadoop.hdds.protocol.proto.ScmBlockLocationProtocolProtos + .GetScmBlockLocationsRequestProto; +import org.apache.hadoop.hdds.protocol.proto.ScmBlockLocationProtocolProtos + .GetScmBlockLocationsResponseProto; +import org.apache.hadoop.hdds.protocol.proto.ScmBlockLocationProtocolProtos .KeyBlocks; +import org.apache.hadoop.hdds.protocol.proto.ScmBlockLocationProtocolProtos + .ScmLocatedBlockProto; import org.apache.hadoop.ipc.ProtobufHelper; import org.apache.hadoop.ipc.ProtocolTranslator; import org.apache.hadoop.ipc.RPC; @@ -46,6 +52,7 @@ import java.io.IOException; import java.util.ArrayList; import java.util.List; +import java.util.Set; import java.util.stream.Collectors; /** @@ -75,6 +82,41 @@ public ScmBlockLocationProtocolClientSideTranslatorPB( } /** + * Find the set of nodes to read/write a block, as + * identified by the block key. This method supports batch lookup by + * passing multiple keys. + * + * @param keys batch of block keys to find + * @return allocated blocks for each block key + * @throws IOException if there is any failure + */ + @Override + public Set getBlockLocations(Set keys) + throws IOException { + GetScmBlockLocationsRequestProto.Builder req = + GetScmBlockLocationsRequestProto.newBuilder(); + for (String key : keys) { + req.addKeys(key); + } + final GetScmBlockLocationsResponseProto resp; + try { + resp = rpcProxy.getScmBlockLocations(NULL_RPC_CONTROLLER, + req.build()); + } catch (ServiceException e) { + throw ProtobufHelper.getRemoteException(e); + } + Set locatedBlocks = + Sets.newLinkedHashSetWithExpectedSize(resp.getLocatedBlocksCount()); + for (ScmLocatedBlockProto locatedBlock : resp.getLocatedBlocksList()) { + locatedBlocks.add(new AllocatedBlock.Builder() + .setKey(locatedBlock.getKey()) + .setPipeline(Pipeline.getFromProtoBuf(locatedBlock.getPipeline())) + .build()); + } + return locatedBlocks; + } + + /** * Asks SCM where a block should be allocated. SCM responds with the * set of datanodes that should be used creating this block. * @param size - size of the block. @@ -102,7 +144,7 @@ public AllocatedBlock allocateBlock(long size, response.getErrorMessage() : "Allocate block failed."); } AllocatedBlock.Builder builder = new AllocatedBlock.Builder() - .setBlockID(BlockID.getFromProtobuf(response.getBlockID())) + .setKey(response.getKey()) .setPipeline(Pipeline.getFromProtoBuf(response.getPipeline())) .setShouldCreateContainer(response.getCreateContainer()); return builder.build(); diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/protocolPB/StorageContainerLocationProtocolClientSideTranslatorPB.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/protocolPB/StorageContainerLocationProtocolClientSideTranslatorPB.java index bba4e172be1..3638f63e65d 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/protocolPB/StorageContainerLocationProtocolClientSideTranslatorPB.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/protocolPB/StorageContainerLocationProtocolClientSideTranslatorPB.java @@ -17,6 +17,7 @@ package org.apache.hadoop.hdds.scm.protocolPB; import com.google.common.base.Preconditions; +import com.google.common.base.Strings; import com.google.protobuf.RpcController; import com.google.protobuf.ServiceException; import org.apache.hadoop.classification.InterfaceAudience; @@ -91,14 +92,20 @@ public StorageContainerLocationProtocolClientSideTranslatorPB( * supports replication factor of either 1 or 3. * @param type - Replication Type * @param factor - Replication Count + * @param containerName - Name * @return * @throws IOException */ @Override - public ContainerInfo allocateContainer(HddsProtos.ReplicationType type, - HddsProtos.ReplicationFactor factor, String owner) throws IOException { + public Pipeline allocateContainer(HddsProtos.ReplicationType type, + HddsProtos.ReplicationFactor factor, String + containerName, String owner) throws IOException { + Preconditions.checkNotNull(containerName, "Container Name cannot be Null"); + Preconditions.checkState(!containerName.isEmpty(), "Container name cannot" + + " be empty"); ContainerRequestProto request = ContainerRequestProto.newBuilder() + .setContainerName(containerName) .setReplicationFactor(factor) .setReplicationType(type) .setOwner(owner) @@ -114,20 +121,22 @@ public ContainerInfo allocateContainer(HddsProtos.ReplicationType type, throw new IOException(response.hasErrorMessage() ? response.getErrorMessage() : "Allocate container failed."); } - return ContainerInfo.fromProtobuf(response.getContainerInfo()); + return Pipeline.getFromProtoBuf(response.getPipeline()); } - public ContainerInfo getContainer(long containerID) throws IOException { - Preconditions.checkState(containerID >= 0, - "Container ID cannot be negative"); + public Pipeline getContainer(String containerName) throws IOException { + Preconditions.checkNotNull(containerName, + "Container Name cannot be Null"); + Preconditions.checkState(!containerName.isEmpty(), + "Container name cannot be empty"); GetContainerRequestProto request = GetContainerRequestProto .newBuilder() - .setContainerID(containerID) + .setContainerName(containerName) .build(); try { GetContainerResponseProto response = rpcProxy.getContainer(NULL_RPC_CONTROLLER, request); - return ContainerInfo.fromProtobuf(response.getContainerInfo()); + return Pipeline.getFromProtoBuf(response.getPipeline()); } catch (ServiceException e) { throw ProtobufHelper.getRemoteException(e); } @@ -137,15 +146,16 @@ public ContainerInfo getContainer(long containerID) throws IOException { * {@inheritDoc} */ @Override - public List listContainer(long startContainerID, int count) - throws IOException { - Preconditions.checkState(startContainerID >= 0, - "Container ID cannot be negative."); - Preconditions.checkState(count > 0, - "Container count must be greater than 0."); + public List listContainer(String startName, String prefixName, + int count) throws IOException { SCMListContainerRequestProto.Builder builder = SCMListContainerRequestProto .newBuilder(); - builder.setStartContainerID(startContainerID); + if (prefixName != null) { + builder.setPrefixName(prefixName); + } + if (startName != null) { + builder.setStartName(startName); + } builder.setCount(count); SCMListContainerRequestProto request = builder.build(); @@ -167,17 +177,17 @@ public ContainerInfo getContainer(long containerID) throws IOException { * Ask SCM to delete a container by name. SCM will remove * the container mapping in its database. * - * @param containerID + * @param containerName * @throws IOException */ @Override - public void deleteContainer(long containerID) + public void deleteContainer(String containerName) throws IOException { - Preconditions.checkState(containerID >= 0, - "Container ID cannot be negative"); + Preconditions.checkState(!Strings.isNullOrEmpty(containerName), + "Container name cannot be null or empty"); SCMDeleteContainerRequestProto request = SCMDeleteContainerRequestProto .newBuilder() - .setContainerID(containerID) + .setContainerName(containerName) .build(); try { rpcProxy.deleteContainer(NULL_RPC_CONTROLLER, request); @@ -216,21 +226,21 @@ public void deleteContainer(long containerID) /** * Notify from client that creates object on datanodes. * @param type object type - * @param id object id + * @param name object name * @param op operation type (e.g., create, close, delete) * @param stage object creation stage : begin/complete */ @Override public void notifyObjectStageChange( - ObjectStageChangeRequestProto.Type type, long id, + ObjectStageChangeRequestProto.Type type, String name, ObjectStageChangeRequestProto.Op op, ObjectStageChangeRequestProto.Stage stage) throws IOException { - Preconditions.checkState(id >= 0, - "Object id cannot be negative."); + Preconditions.checkState(!Strings.isNullOrEmpty(name), + "Object name cannot be null or empty"); ObjectStageChangeRequestProto request = ObjectStageChangeRequestProto.newBuilder() .setType(type) - .setId(id) + .setName(name) .setOp(op) .setStage(stage) .build(); diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/storage/ContainerProtocolCalls.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/storage/ContainerProtocolCalls.java index 970e932b10a..1559816bc4d 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/storage/ContainerProtocolCalls.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/scm/storage/ContainerProtocolCalls.java @@ -51,7 +51,6 @@ import org.apache.hadoop.hdds.protocol.proto.ContainerProtos .WriteChunkRequestProto; import org.apache.hadoop.hdds.protocol.proto.HddsProtos.KeyValue; -import org.apache.hadoop.hdds.client.BlockID; import java.io.IOException; @@ -80,6 +79,7 @@ public static GetKeyResponseProto getKey(XceiverClientSpi xceiverClient, KeyData containerKeyData, String traceID) throws IOException { GetKeyRequestProto.Builder readKeyRequest = GetKeyRequestProto .newBuilder() + .setPipeline(xceiverClient.getPipeline().getProtobufMessage()) .setKeyData(containerKeyData); String id = xceiverClient.getPipeline().getLeader().getUuidString(); ContainerCommandRequestProto request = ContainerCommandRequestProto @@ -106,6 +106,7 @@ public static void putKey(XceiverClientSpi xceiverClient, KeyData containerKeyData, String traceID) throws IOException { PutKeyRequestProto.Builder createKeyRequest = PutKeyRequestProto .newBuilder() + .setPipeline(xceiverClient.getPipeline().getProtobufMessage()) .setKeyData(containerKeyData); String id = xceiverClient.getPipeline().getLeader().getUuidString(); ContainerCommandRequestProto request = ContainerCommandRequestProto @@ -124,16 +125,18 @@ public static void putKey(XceiverClientSpi xceiverClient, * * @param xceiverClient client to perform call * @param chunk information about chunk to read - * @param blockID ID of the block + * @param key the key name * @param traceID container protocol call args * @return container protocol read chunk response * @throws IOException if there is an I/O error while performing the call */ public static ReadChunkResponseProto readChunk(XceiverClientSpi xceiverClient, - ChunkInfo chunk, BlockID blockID, String traceID) throws IOException { + ChunkInfo chunk, String key, String traceID) + throws IOException { ReadChunkRequestProto.Builder readChunkRequest = ReadChunkRequestProto .newBuilder() - .setBlockID(blockID.getProtobuf()) + .setPipeline(xceiverClient.getPipeline().getProtobufMessage()) + .setKeyName(key) .setChunkData(chunk); String id = xceiverClient.getPipeline().getLeader().getUuidString(); ContainerCommandRequestProto request = ContainerCommandRequestProto @@ -153,17 +156,18 @@ public static ReadChunkResponseProto readChunk(XceiverClientSpi xceiverClient, * * @param xceiverClient client to perform call * @param chunk information about chunk to write - * @param blockID ID of the block + * @param key the key name * @param data the data of the chunk to write * @param traceID container protocol call args * @throws IOException if there is an I/O error while performing the call */ public static void writeChunk(XceiverClientSpi xceiverClient, ChunkInfo chunk, - BlockID blockID, ByteString data, String traceID) + String key, ByteString data, String traceID) throws IOException { WriteChunkRequestProto.Builder writeChunkRequest = WriteChunkRequestProto .newBuilder() - .setBlockID(blockID.getProtobuf()) + .setPipeline(xceiverClient.getPipeline().getProtobufMessage()) + .setKeyName(key) .setChunkData(chunk) .setData(data); String id = xceiverClient.getPipeline().getLeader().getUuidString(); @@ -185,29 +189,30 @@ public static void writeChunk(XceiverClientSpi xceiverClient, ChunkInfo chunk, * than 1 MB. * * @param client - client that communicates with the container. - * @param blockID - ID of the block + * @param containerName - Name of the container + * @param key - Name of the Key * @param data - Data to be written into the container. * @param traceID - Trace ID for logging purpose. * @throws IOException */ public static void writeSmallFile(XceiverClientSpi client, - BlockID blockID, byte[] data, String traceID) + String containerName, String key, byte[] data, String traceID) throws IOException { KeyData containerKeyData = - KeyData.newBuilder().setBlockID(blockID.getProtobuf()) + KeyData.newBuilder().setContainerName(containerName).setName(key) .build(); PutKeyRequestProto.Builder createKeyRequest = PutKeyRequestProto.newBuilder() + .setPipeline(client.getPipeline().getProtobufMessage()) .setKeyData(containerKeyData); KeyValue keyValue = KeyValue.newBuilder().setKey("OverWriteRequested").setValue("true") .build(); ChunkInfo chunk = - ChunkInfo.newBuilder().setChunkName(blockID.getLocalID() - + "_chunk").setOffset(0).setLen(data.length). - addMetadata(keyValue).build(); + ChunkInfo.newBuilder().setChunkName(key + "_chunk").setOffset(0) + .setLen(data.length).addMetadata(keyValue).build(); PutSmallFileRequestProto putSmallFileRequest = PutSmallFileRequestProto.newBuilder().setChunkInfo(chunk) @@ -229,18 +234,17 @@ public static void writeSmallFile(XceiverClientSpi client, /** * createContainer call that creates a container on the datanode. * @param client - client - * @param containerID - ID of container * @param traceID - traceID * @throws IOException */ - public static void createContainer(XceiverClientSpi client, long containerID, - String traceID) throws IOException { + public static void createContainer(XceiverClientSpi client, String traceID) + throws IOException { ContainerProtos.CreateContainerRequestProto.Builder createRequest = ContainerProtos.CreateContainerRequestProto .newBuilder(); ContainerProtos.ContainerData.Builder containerData = ContainerProtos .ContainerData.newBuilder(); - containerData.setContainerID(containerID); + containerData.setName(client.getPipeline().getContainerName()); createRequest.setPipeline(client.getPipeline().getProtobufMessage()); createRequest.setContainerData(containerData.build()); @@ -264,11 +268,12 @@ public static void createContainer(XceiverClientSpi client, long containerID, * @param traceID * @throws IOException */ - public static void deleteContainer(XceiverClientSpi client, long containerID, + public static void deleteContainer(XceiverClientSpi client, boolean force, String traceID) throws IOException { ContainerProtos.DeleteContainerRequestProto.Builder deleteRequest = ContainerProtos.DeleteContainerRequestProto.newBuilder(); - deleteRequest.setContainerID(containerID); + deleteRequest.setName(client.getPipeline().getContainerName()); + deleteRequest.setPipeline(client.getPipeline().getProtobufMessage()); deleteRequest.setForceDelete(force); String id = client.getPipeline().getLeader().getUuidString(); ContainerCommandRequestProto.Builder request = @@ -286,15 +291,14 @@ public static void deleteContainer(XceiverClientSpi client, long containerID, * Close a container. * * @param client - * @param containerID * @param traceID * @throws IOException */ - public static void closeContainer(XceiverClientSpi client, - long containerID, String traceID) throws IOException { + public static void closeContainer(XceiverClientSpi client, String traceID) + throws IOException { ContainerProtos.CloseContainerRequestProto.Builder closeRequest = ContainerProtos.CloseContainerRequestProto.newBuilder(); - closeRequest.setContainerID(containerID); + closeRequest.setPipeline(client.getPipeline().getProtobufMessage()); String id = client.getPipeline().getLeader().getUuidString(); ContainerCommandRequestProto.Builder request = @@ -316,11 +320,11 @@ public static void closeContainer(XceiverClientSpi client, * @throws IOException */ public static ReadContainerResponseProto readContainer( - XceiverClientSpi client, long containerID, + XceiverClientSpi client, String containerName, String traceID) throws IOException { ReadContainerRequestProto.Builder readRequest = ReadContainerRequestProto.newBuilder(); - readRequest.setContainerID(containerID); + readRequest.setName(containerName); readRequest.setPipeline(client.getPipeline().getProtobufMessage()); String id = client.getPipeline().getLeader().getUuidString(); ContainerCommandRequestProto.Builder request = @@ -336,23 +340,25 @@ public static ReadContainerResponseProto readContainer( } /** - * Reads the data given the blockID + * Reads the data given the container name and key. * * @param client - * @param blockID - ID of the block + * @param containerName - name of the container + * @param key - key * @param traceID - trace ID * @return GetSmallFileResponseProto * @throws IOException */ public static GetSmallFileResponseProto readSmallFile(XceiverClientSpi client, - BlockID blockID, String traceID) throws IOException { + String containerName, String key, String traceID) throws IOException { KeyData containerKeyData = KeyData .newBuilder() - .setBlockID(blockID.getProtobuf()) - .build(); + .setContainerName(containerName) + .setName(key).build(); GetKeyRequestProto.Builder getKey = GetKeyRequestProto .newBuilder() + .setPipeline(client.getPipeline().getProtobufMessage()) .setKeyData(containerKeyData); ContainerProtos.GetSmallFileRequestProto getSmallFileRequest = GetSmallFileRequestProto diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/BlockGroup.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/BlockGroup.java index 7a5403f290d..38ce6ccb0a6 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/BlockGroup.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/BlockGroup.java @@ -17,12 +17,9 @@ package org.apache.hadoop.ozone.common; -import org.apache.hadoop.hdds.client.BlockID; -import org.apache.hadoop.hdds.protocol.proto.HddsProtos; import org.apache.hadoop.hdds.protocol.proto.ScmBlockLocationProtocolProtos .KeyBlocks; -import java.util.ArrayList; import java.util.List; /** @@ -31,13 +28,13 @@ public final class BlockGroup { private String groupID; - private List blockIDs; - private BlockGroup(String groupID, List blockIDs) { + private List blockIDs; + private BlockGroup(String groupID, List blockIDs) { this.groupID = groupID; this.blockIDs = blockIDs; } - public List getBlockIDList() { + public List getBlockIDList() { return blockIDs; } @@ -46,11 +43,8 @@ public String getGroupID() { } public KeyBlocks getProto() { - KeyBlocks.Builder kbb = KeyBlocks.newBuilder(); - for (BlockID block : blockIDs) { - kbb.addBlocks(block.getProtobuf()); - } - return kbb.setKey(groupID).build(); + return KeyBlocks.newBuilder().setKey(groupID) + .addAllBlocks(blockIDs).build(); } /** @@ -59,12 +53,8 @@ public KeyBlocks getProto() { * @return a group of blocks. */ public static BlockGroup getFromProto(KeyBlocks proto) { - List blockIDs = new ArrayList<>(); - for (HddsProtos.BlockID block : proto.getBlocksList()) { - blockIDs.add(new BlockID(block.getContainerID(), block.getLocalID())); - } return BlockGroup.newBuilder().setKeyName(proto.getKey()) - .addAllBlockIDs(blockIDs).build(); + .addAllBlockIDs(proto.getBlocksList()).build(); } public static Builder newBuilder() { @@ -77,14 +67,14 @@ public static Builder newBuilder() { public static class Builder { private String groupID; - private List blockIDs; + private List blockIDs; public Builder setKeyName(String blockGroupID) { this.groupID = blockGroupID; return this; } - public Builder addAllBlockIDs(List keyBlocks) { + public Builder addAllBlockIDs(List keyBlocks) { this.blockIDs = keyBlocks; return this; } diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/DeleteBlockGroupResult.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/DeleteBlockGroupResult.java index 892b6951534..ec54ac54076 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/DeleteBlockGroupResult.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/common/DeleteBlockGroupResult.java @@ -17,7 +17,6 @@ */ package org.apache.hadoop.ozone.common; -import org.apache.hadoop.hdds.client.BlockID; import org.apache.hadoop.hdds.scm.container.common.helpers.DeleteBlockResult; import org.apache.hadoop.hdds.protocol.proto.ScmBlockLocationProtocolProtos .DeleteScmBlockResult; @@ -53,7 +52,7 @@ public String getObjectKey() { new ArrayList<>(blockResultList.size()); for (DeleteBlockResult result : blockResultList) { DeleteScmBlockResult proto = DeleteScmBlockResult.newBuilder() - .setBlockID(result.getBlockID().getProtobuf()) + .setKey(result.getKey()) .setResult(result.getResult()).build(); resultProtoList.add(proto); } @@ -64,8 +63,8 @@ public String getObjectKey() { List results) { List protoResults = new ArrayList<>(results.size()); for (DeleteScmBlockResult result : results) { - protoResults.add(new DeleteBlockResult(BlockID.getFromProtobuf( - result.getBlockID()), result.getResult())); + protoResults.add(new DeleteBlockResult(result.getKey(), + result.getResult())); } return protoResults; } @@ -88,10 +87,10 @@ public boolean isSuccess() { /** * @return A list of deletion failed block IDs. */ - public List getFailedBlocks() { - List failedBlocks = blockResultList.stream() + public List getFailedBlocks() { + List failedBlocks = blockResultList.stream() .filter(result -> result.getResult() != Result.success) - .map(DeleteBlockResult::getBlockID).collect(Collectors.toList()); + .map(DeleteBlockResult::getKey).collect(Collectors.toList()); return failedBlocks; } } diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/container/common/helpers/KeyData.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/container/common/helpers/KeyData.java index c3de5ed25fa..be546c75c3a 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/container/common/helpers/KeyData.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/container/common/helpers/KeyData.java @@ -19,7 +19,6 @@ import org.apache.hadoop.hdds.protocol.proto.ContainerProtos; import org.apache.hadoop.hdds.protocol.proto.HddsProtos; -import org.apache.hadoop.hdds.client.BlockID; import java.io.IOException; import java.util.Collections; @@ -31,7 +30,8 @@ * Helper class to convert Protobuf to Java classes. */ public class KeyData { - private final BlockID blockID; + private final String containerName; + private final String keyName; private final Map metadata; /** @@ -44,10 +44,12 @@ /** * Constructs a KeyData Object. * - * @param blockID + * @param containerName + * @param keyName */ - public KeyData(BlockID blockID) { - this.blockID = blockID; + public KeyData(String containerName, String keyName) { + this.containerName = containerName; + this.keyName = keyName; this.metadata = new TreeMap<>(); } @@ -60,7 +62,7 @@ public KeyData(BlockID blockID) { */ public static KeyData getFromProtoBuf(ContainerProtos.KeyData data) throws IOException { - KeyData keyData = new KeyData(BlockID.getFromProtobuf(data.getBlockID())); + KeyData keyData = new KeyData(data.getContainerName(), data.getName()); for (int x = 0; x < data.getMetadataCount(); x++) { keyData.addMetadata(data.getMetadata(x).getKey(), data.getMetadata(x).getValue()); @@ -76,7 +78,8 @@ public static KeyData getFromProtoBuf(ContainerProtos.KeyData data) throws public ContainerProtos.KeyData getProtoBufMessage() { ContainerProtos.KeyData.Builder builder = ContainerProtos.KeyData.newBuilder(); - builder.setBlockID(this.blockID.getProtobuf()); + builder.setContainerName(this.containerName); + builder.setName(this.getKeyName()); builder.addAllChunks(this.chunks); for (Map.Entry entry : metadata.entrySet()) { HddsProtos.KeyValue.Builder keyValBuilder = @@ -132,27 +135,19 @@ public synchronized void deleteKey(String key) { } /** - * Returns container ID. - * @return long. + * Returns container Name. + * @return String. */ - public long getContainerID() { - return blockID.getContainerID(); + public String getContainerName() { + return containerName; } /** - * Returns LocalID. - * @return long. + * Returns KeyName. + * @return String. */ - public long getLocalID() { - return blockID.getLocalID(); - } - - /** - * Return Block ID. - * @return BlockID. - */ - public BlockID getBlockID() { - return blockID; + public String getKeyName() { + return keyName; } /** diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/protocolPB/ScmBlockLocationProtocolServerSideTranslatorPB.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/protocolPB/ScmBlockLocationProtocolServerSideTranslatorPB.java index 37a13095f6c..fa793419bf0 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/protocolPB/ScmBlockLocationProtocolServerSideTranslatorPB.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/protocolPB/ScmBlockLocationProtocolServerSideTranslatorPB.java @@ -17,6 +17,7 @@ */ package org.apache.hadoop.ozone.protocolPB; +import com.google.common.collect.Sets; import com.google.protobuf.RpcController; import com.google.protobuf.ServiceException; import org.apache.hadoop.classification.InterfaceAudience; @@ -37,11 +38,18 @@ .DeleteScmKeyBlocksRequestProto; import org.apache.hadoop.hdds.protocol.proto.ScmBlockLocationProtocolProtos .DeleteScmKeyBlocksResponseProto; +import org.apache.hadoop.hdds.protocol.proto.ScmBlockLocationProtocolProtos + .GetScmBlockLocationsRequestProto; +import org.apache.hadoop.hdds.protocol.proto.ScmBlockLocationProtocolProtos + .GetScmBlockLocationsResponseProto; +import org.apache.hadoop.hdds.protocol.proto.ScmBlockLocationProtocolProtos + .ScmLocatedBlockProto; import org.apache.hadoop.ozone.common.BlockGroup; import org.apache.hadoop.ozone.common.DeleteBlockGroupResult; import java.io.IOException; import java.util.List; +import java.util.Set; import java.util.stream.Collectors; /** @@ -65,6 +73,34 @@ public ScmBlockLocationProtocolServerSideTranslatorPB( this.impl = impl; } + + @Override + public GetScmBlockLocationsResponseProto getScmBlockLocations( + RpcController controller, GetScmBlockLocationsRequestProto req) + throws ServiceException { + Set keys = Sets.newLinkedHashSetWithExpectedSize( + req.getKeysCount()); + for (String key : req.getKeysList()) { + keys.add(key); + } + final Set blocks; + try { + blocks = impl.getBlockLocations(keys); + } catch (IOException ex) { + throw new ServiceException(ex); + } + GetScmBlockLocationsResponseProto.Builder resp = + GetScmBlockLocationsResponseProto.newBuilder(); + for (AllocatedBlock block: blocks) { + ScmLocatedBlockProto.Builder locatedBlock = + ScmLocatedBlockProto.newBuilder() + .setKey(block.getKey()) + .setPipeline(block.getPipeline().getProtobufMessage()); + resp.addLocatedBlocks(locatedBlock.build()); + } + return resp.build(); + } + @Override public AllocateScmBlockResponseProto allocateScmBlock( RpcController controller, AllocateScmBlockRequestProto request) @@ -76,7 +112,7 @@ public AllocateScmBlockResponseProto allocateScmBlock( if (allocatedBlock != null) { return AllocateScmBlockResponseProto.newBuilder() - .setBlockID(allocatedBlock.getBlockID().getProtobuf()) + .setKey(allocatedBlock.getKey()) .setPipeline(allocatedBlock.getPipeline().getProtobufMessage()) .setCreateContainer(allocatedBlock.getCreateContainer()) .setErrorCode(AllocateScmBlockResponseProto.Error.success) diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/protocolPB/StorageContainerLocationProtocolServerSideTranslatorPB.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/protocolPB/StorageContainerLocationProtocolServerSideTranslatorPB.java index 70a0e8a8dac..4974268bcb7 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/protocolPB/StorageContainerLocationProtocolServerSideTranslatorPB.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/protocolPB/StorageContainerLocationProtocolServerSideTranslatorPB.java @@ -23,6 +23,7 @@ import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.hdds.scm.ScmInfo; import org.apache.hadoop.hdds.scm.container.common.helpers.ContainerInfo; +import org.apache.hadoop.hdds.scm.container.common.helpers.Pipeline; import org.apache.hadoop.hdds.scm.protocol.StorageContainerLocationProtocol; import org.apache.hadoop.hdds.scm.protocolPB.StorageContainerLocationProtocolPB; import org.apache.hadoop.hdds.protocol.proto.HddsProtos; @@ -82,10 +83,11 @@ public StorageContainerLocationProtocolServerSideTranslatorPB( public ContainerResponseProto allocateContainer(RpcController unused, ContainerRequestProto request) throws ServiceException { try { - ContainerInfo container = impl.allocateContainer(request.getReplicationType(), - request.getReplicationFactor(), request.getOwner()); + Pipeline pipeline = impl.allocateContainer(request.getReplicationType(), + request.getReplicationFactor(), request.getContainerName(), + request.getOwner()); return ContainerResponseProto.newBuilder() - .setContainerInfo(container.getProtobuf()) + .setPipeline(pipeline.getProtobufMessage()) .setErrorCode(ContainerResponseProto.Error.success) .build(); @@ -99,9 +101,9 @@ public GetContainerResponseProto getContainer( RpcController controller, GetContainerRequestProto request) throws ServiceException { try { - ContainerInfo container = impl.getContainer(request.getContainerID()); + Pipeline pipeline = impl.getContainer(request.getContainerName()); return GetContainerResponseProto.newBuilder() - .setContainerInfo(container.getProtobuf()) + .setPipeline(pipeline.getProtobufMessage()) .build(); } catch (IOException e) { throw new ServiceException(e); @@ -112,17 +114,23 @@ public GetContainerResponseProto getContainer( public SCMListContainerResponseProto listContainer(RpcController controller, SCMListContainerRequestProto request) throws ServiceException { try { - long startContainerID = 0; + String startName = null; + String prefixName = null; int count = -1; // Arguments check. - if (request.hasStartContainerID()) { + if (request.hasPrefixName()) { // End container name is given. - startContainerID = request.getStartContainerID(); + prefixName = request.getPrefixName(); } + if (request.hasStartName()) { + // End container name is given. + startName = request.getStartName(); + } + count = request.getCount(); List containerList = - impl.listContainer(startContainerID, count); + impl.listContainer(startName, prefixName, count); SCMListContainerResponseProto.Builder builder = SCMListContainerResponseProto.newBuilder(); for (ContainerInfo container : containerList) { @@ -139,7 +147,7 @@ public SCMDeleteContainerResponseProto deleteContainer( RpcController controller, SCMDeleteContainerRequestProto request) throws ServiceException { try { - impl.deleteContainer(request.getContainerID()); + impl.deleteContainer(request.getContainerName()); return SCMDeleteContainerResponseProto.newBuilder().build(); } catch (IOException e) { throw new ServiceException(e); @@ -170,7 +178,7 @@ public ObjectStageChangeResponseProto notifyObjectStageChange( RpcController controller, ObjectStageChangeRequestProto request) throws ServiceException { try { - impl.notifyObjectStageChange(request.getType(), request.getId(), + impl.notifyObjectStageChange(request.getType(), request.getName(), request.getOp(), request.getStage()); return ObjectStageChangeResponseProto.newBuilder().build(); } catch (IOException e) { diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/LevelDBStore.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/LevelDBStore.java index 13b918015e0..83ca83d80d5 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/LevelDBStore.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/LevelDBStore.java @@ -76,9 +76,7 @@ public LevelDBStore(File dbPath, Options options) } private void openDB(File dbPath, Options options) throws IOException { - if (dbPath.getParentFile().mkdirs()) { - LOG.debug("Db path {} created.", dbPath.getParentFile()); - } + dbPath.getParentFile().mkdirs(); db = JniDBFactory.factory.open(dbPath, options); if (LOG.isDebugEnabled()) { LOG.debug("LevelDB successfully opened"); diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/MetadataKeyFilters.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/MetadataKeyFilters.java index d3a29435de3..3ff0a948a87 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/MetadataKeyFilters.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/MetadataKeyFilters.java @@ -17,8 +17,8 @@ */ package org.apache.hadoop.utils; -import com.google.common.base.Preconditions; import com.google.common.base.Strings; +import org.apache.hadoop.hdfs.DFSUtil; import org.apache.hadoop.ozone.OzoneConsts; /** @@ -94,8 +94,8 @@ public boolean filterKey(byte[] preKey, byte[] currentKey, if (Strings.isNullOrEmpty(keyPrefix)) { accept = true; } else { - byte [] prefixBytes = keyPrefix.getBytes(); - if (currentKey != null && prefixMatch(prefixBytes, currentKey)) { + if (currentKey != null && + DFSUtil.bytes2String(currentKey).startsWith(keyPrefix)) { keysHinted++; accept = true; } else { @@ -114,19 +114,5 @@ public int getKeysScannedNum() { public int getKeysHintedNum() { return keysHinted; } - - private boolean prefixMatch(byte[] prefix, byte[] key) { - Preconditions.checkNotNull(prefix); - Preconditions.checkNotNull(key); - if (key.length < prefix.length) { - return false; - } - for (int i = 0; i < prefix.length; i++) { - if (key[i] != prefix[i]) { - return false; - } - } - return true; - } } } diff --git a/hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/RocksDBStore.java b/hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/RocksDBStore.java index 0dfca20a8fb..a60e98d9ab2 100644 --- a/hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/RocksDBStore.java +++ b/hadoop-hdds/common/src/main/java/org/apache/hadoop/utils/RocksDBStore.java @@ -367,7 +367,6 @@ public void iterate(byte[] from, EntryConsumer consumer) public void close() throws IOException { if (statMBeanName != null) { MBeans.unregister(statMBeanName); - statMBeanName = null; } if (db != null) { db.close(); diff --git a/hadoop-hdds/common/src/main/proto/DatanodeContainerProtocol.proto b/hadoop-hdds/common/src/main/proto/DatanodeContainerProtocol.proto index e7494ee4c67..a6270eff506 100644 --- a/hadoop-hdds/common/src/main/proto/DatanodeContainerProtocol.proto +++ b/hadoop-hdds/common/src/main/proto/DatanodeContainerProtocol.proto @@ -197,15 +197,17 @@ message ContainerCommandResponseProto { } message ContainerData { - required int64 containerID = 1; + required string name = 1; repeated KeyValue metadata = 2; optional string dbPath = 3; optional string containerPath = 4; - optional string hash = 5; - optional int64 bytesUsed = 6; - optional int64 size = 7; - optional int64 keyCount = 8; - optional LifeCycleState state = 9 [default = OPEN]; + optional string hash = 6; + optional int64 bytesUsed = 7; + optional int64 size = 8; + optional int64 keyCount = 9; + //TODO: change required after we switch container ID from string to long + optional int64 containerID = 10; + optional LifeCycleState state = 11 [default = OPEN]; } message ContainerMeta { @@ -224,7 +226,7 @@ message CreateContainerResponseProto { message ReadContainerRequestProto { required Pipeline pipeline = 1; - required int64 containerID = 2; + required string name = 2; } message ReadContainerResponseProto { @@ -241,16 +243,19 @@ message UpdateContainerResponseProto { } message DeleteContainerRequestProto { - required int64 containerID = 1; - optional bool forceDelete = 2 [default = false]; + required Pipeline pipeline = 1; + required string name = 2; + optional bool forceDelete = 3 [default = false]; } message DeleteContainerResponseProto { } message ListContainerRequestProto { - required int64 startContainerID = 1; - optional uint32 count = 2; // Max Results to return + required Pipeline pipeline = 1; + optional string prefix = 2; + required uint32 count = 3; // Max Results to return + optional string prevKey = 4; // if this is not set query from start. } message ListContainerResponseProto { @@ -258,31 +263,34 @@ message ListContainerResponseProto { } message CloseContainerRequestProto { - required int64 containerID = 1; + required Pipeline pipeline = 1; } message CloseContainerResponseProto { + optional Pipeline pipeline = 1; optional string hash = 2; - optional int64 containerID = 3; } message KeyData { - required BlockID blockID = 1; - optional int64 flags = 2; // for future use. - repeated KeyValue metadata = 3; - repeated ChunkInfo chunks = 4; + required string containerName = 1; + required string name = 2; + optional int64 flags = 3; // for future use. + repeated KeyValue metadata = 4; + repeated ChunkInfo chunks = 5; } // Key Messages. message PutKeyRequestProto { - required KeyData keyData = 1; + required Pipeline pipeline = 1; + required KeyData keyData = 2; } message PutKeyResponseProto { } message GetKeyRequestProto { - required KeyData keyData = 1; + required Pipeline pipeline = 1; + required KeyData keyData = 2; } message GetKeyResponseProto { @@ -291,15 +299,17 @@ message GetKeyResponseProto { message DeleteKeyRequestProto { - required BlockID blockID = 1; + required Pipeline pipeline = 1; + required string name = 2; } message DeleteKeyResponseProto { } message ListKeyRequestProto { - required int64 containerID = 1; - optional int64 startLocalID = 2; + required Pipeline pipeline = 1; + optional string prefix = 2; // if specified returns keys that match prefix. + required string prevKey = 3; required uint32 count = 4; } @@ -325,28 +335,31 @@ enum Stage { } message WriteChunkRequestProto { - required BlockID blockID = 1; - required ChunkInfo chunkData = 2; - optional bytes data = 3; - optional Stage stage = 4 [default = COMBINED]; + required Pipeline pipeline = 1; + required string keyName = 2; + required ChunkInfo chunkData = 3; + optional bytes data = 4; + optional Stage stage = 5 [default = COMBINED]; } message WriteChunkResponseProto { } message ReadChunkRequestProto { - required BlockID blockID = 1; - required ChunkInfo chunkData = 2; + required Pipeline pipeline = 1; + required string keyName = 2; + required ChunkInfo chunkData = 3; } message ReadChunkResponseProto { - required BlockID blockID = 1; + required Pipeline pipeline = 1; required ChunkInfo chunkData = 2; required bytes data = 3; } message DeleteChunkRequestProto { - required BlockID blockID = 1; + required Pipeline pipeline = 1; + required string keyName = 2; required ChunkInfo chunkData = 3; } @@ -354,9 +367,10 @@ message DeleteChunkResponseProto { } message ListChunkRequestProto { - required BlockID blockID = 1; - required string prevChunkName = 2; - required uint32 count = 3; + required Pipeline pipeline = 1; + required string keyName = 2; + required string prevChunkName = 3; + required uint32 count = 4; } message ListChunkResponseProto { @@ -386,7 +400,7 @@ message GetSmallFileResponseProto { } message CopyContainerRequestProto { - required int64 containerID = 1; + required string containerName = 1; required uint64 readOffset = 2; optional uint64 len = 3; } diff --git a/hadoop-hdds/common/src/main/proto/ScmBlockLocationProtocol.proto b/hadoop-hdds/common/src/main/proto/ScmBlockLocationProtocol.proto index 7bea82ab860..38d2e16ce8b 100644 --- a/hadoop-hdds/common/src/main/proto/ScmBlockLocationProtocol.proto +++ b/hadoop-hdds/common/src/main/proto/ScmBlockLocationProtocol.proto @@ -33,6 +33,28 @@ import "hdds.proto"; // SCM Block protocol +/** + * keys - batch of block keys to find + */ +message GetScmBlockLocationsRequestProto { + repeated string keys = 1; +} + +/** + * locatedBlocks - for each requested hash, nodes that currently host the + * container for that object key hash + */ +message GetScmBlockLocationsResponseProto { + repeated ScmLocatedBlockProto locatedBlocks = 1; +} + +/** + * Holds the nodes that currently host the blocks for a key. + */ +message ScmLocatedBlockProto { + required string key = 1; + required hadoop.hdds.Pipeline pipeline = 2; +} /** * Request send to SCM asking allocate block of specified size. @@ -62,7 +84,7 @@ message DeleteScmKeyBlocksRequestProto { */ message KeyBlocks { required string key = 1; - repeated BlockID blocks = 2; + repeated string blocks = 2; } /** @@ -90,7 +112,7 @@ message DeleteScmBlockResult { unknownFailure = 4; } required Result result = 1; - required BlockID blockID = 2; + required string key = 2; } /** @@ -104,7 +126,7 @@ message AllocateScmBlockResponseProto { unknownFailure = 4; } required Error errorCode = 1; - required BlockID blockID = 2; + required string key = 2; required hadoop.hdds.Pipeline pipeline = 3; required bool createContainer = 4; optional string errorMessage = 5; @@ -117,6 +139,14 @@ message AllocateScmBlockResponseProto { service ScmBlockLocationProtocolService { /** + * Find the set of nodes that currently host the block, as + * identified by the key. This method supports batch lookup by + * passing multiple keys. + */ + rpc getScmBlockLocations(GetScmBlockLocationsRequestProto) + returns (GetScmBlockLocationsResponseProto); + + /** * Creates a block entry in SCM. */ rpc allocateScmBlock(AllocateScmBlockRequestProto) diff --git a/hadoop-hdds/common/src/main/proto/StorageContainerLocationProtocol.proto b/hadoop-hdds/common/src/main/proto/StorageContainerLocationProtocol.proto index 090e6ebde49..d7540a3fe4f 100644 --- a/hadoop-hdds/common/src/main/proto/StorageContainerLocationProtocol.proto +++ b/hadoop-hdds/common/src/main/proto/StorageContainerLocationProtocol.proto @@ -35,6 +35,7 @@ import "hdds.proto"; * Request send to SCM asking where the container should be created. */ message ContainerRequestProto { + required string containerName = 1; // Ozone only support replciation of either 1 or 3. required ReplicationFactor replicationFactor = 2; required ReplicationType replicationType = 3; @@ -52,29 +53,30 @@ message ContainerResponseProto { errorContainerMissing = 3; } required Error errorCode = 1; - required SCMContainerInfo containerInfo = 2; + required Pipeline pipeline = 2; optional string errorMessage = 3; } message GetContainerRequestProto { - required int64 containerID = 1; + required string containerName = 1; } message GetContainerResponseProto { - required SCMContainerInfo containerInfo = 1; + required Pipeline pipeline = 1; } message SCMListContainerRequestProto { required uint32 count = 1; - optional uint64 startContainerID = 2; - } + optional string startName = 2; + optional string prefixName = 3; +} message SCMListContainerResponseProto { repeated SCMContainerInfo containers = 1; } message SCMDeleteContainerRequestProto { - required int64 containerID = 1; + required string containerName = 1; } message SCMDeleteContainerResponseProto { @@ -95,7 +97,7 @@ message ObjectStageChangeRequestProto { begin = 1; complete = 2; } - required int64 id = 1; + required string name = 1; required Type type = 2; required Op op= 3; required Stage stage = 4; diff --git a/hadoop-hdds/common/src/main/proto/hdds.proto b/hadoop-hdds/common/src/main/proto/hdds.proto index 6ea57279639..0b650b45ea3 100644 --- a/hadoop-hdds/common/src/main/proto/hdds.proto +++ b/hadoop-hdds/common/src/main/proto/hdds.proto @@ -50,6 +50,7 @@ message PipelineChannel { // A pipeline is composed of PipelineChannel (Ratis/StandAlone) that back a // container. message Pipeline { + required string containerName = 1; required PipelineChannel pipelineChannel = 2; } @@ -134,7 +135,8 @@ enum LifeCycleEvent { } message SCMContainerInfo { - required int64 containerID = 1; + // TODO : Remove the container name from pipeline. + required string containerName = 1; required LifeCycleState state = 2; required Pipeline pipeline = 3; // This is not total size of container, but space allocated by SCM for @@ -144,6 +146,7 @@ message SCMContainerInfo { required uint64 numberOfKeys = 6; optional int64 stateEnterTime = 7; required string owner = 8; + required int64 containerID = 9; } message GetScmInfoRequestProto { @@ -165,11 +168,3 @@ enum ReplicationFactor { ONE = 1; THREE = 3; } - -/** - * Block ID that uniquely identify a block by SCM. - */ -message BlockID { - required int64 containerID = 1; - required int64 localID = 2; -} diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/helpers/ChunkUtils.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/helpers/ChunkUtils.java index 8c5609d63c0..68bf4421f67 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/helpers/ChunkUtils.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/helpers/ChunkUtils.java @@ -21,6 +21,7 @@ import com.google.protobuf.ByteString; import org.apache.commons.codec.binary.Hex; import org.apache.commons.codec.digest.DigestUtils; +import org.apache.hadoop.hdds.scm.container.common.helpers.Pipeline; import org.apache.hadoop.hdds.scm.container.common.helpers .StorageContainerException; import org.apache.hadoop.hdds.protocol.proto.ContainerProtos; @@ -104,17 +105,18 @@ public static boolean isOverWritePermitted(ChunkInfo chunkInfo) { * Validates chunk data and returns a file object to Chunk File that we are * expected to write data to. * + * @param pipeline - pipeline. * @param data - container data. * @param info - chunk info. * @return File * @throws StorageContainerException */ - public static File validateChunk(ContainerData data, + public static File validateChunk(Pipeline pipeline, ContainerData data, ChunkInfo info) throws StorageContainerException { Logger log = LoggerFactory.getLogger(ChunkManagerImpl.class); - File chunkFile = getChunkFile(data, info); + File chunkFile = getChunkFile(pipeline, data, info); if (ChunkUtils.isOverWriteRequested(chunkFile, info)) { if (!ChunkUtils.isOverWritePermitted(info)) { log.error("Rejecting write chunk request. Chunk overwrite " + @@ -130,21 +132,21 @@ public static File validateChunk(ContainerData data, /** * Validates that Path to chunk file exists. * + * @param pipeline - Container Info. * @param data - Container Data * @param info - Chunk info * @return - File. * @throws StorageContainerException */ - public static File getChunkFile(ContainerData data, + public static File getChunkFile(Pipeline pipeline, ContainerData data, ChunkInfo info) throws StorageContainerException { - Preconditions.checkNotNull(data, "Container data can't be null"); Logger log = LoggerFactory.getLogger(ChunkManagerImpl.class); - if (data.getContainerID() < 0) { - log.error("Invalid container id: {}", data.getContainerID()); - throw new StorageContainerException("Unable to find the container id:" + + if (data == null) { + log.error("Invalid container Name: {}", pipeline.getContainerName()); + throw new StorageContainerException("Unable to find the container Name:" + " " + - data.getContainerID(), CONTAINER_NOT_FOUND); + pipeline.getContainerName(), CONTAINER_NOT_FOUND); } File dataDir = ContainerUtils.getDataDirectory(data).toFile(); @@ -333,7 +335,7 @@ public static ByteBuffer readData(File chunkFile, ChunkInfo data) throws ContainerProtos.ReadChunkResponseProto.newBuilder(); response.setChunkData(info.getProtoBufMessage()); response.setData(ByteString.copyFrom(data)); - response.setBlockID(msg.getReadChunk().getBlockID()); + response.setPipeline(msg.getReadChunk().getPipeline()); ContainerProtos.ContainerCommandResponseProto.Builder builder = ContainerUtils.getContainerResponse(msg, ContainerProtos.Result diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/helpers/ContainerData.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/helpers/ContainerData.java index c20282adafe..c29374c07c6 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/helpers/ContainerData.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/helpers/ContainerData.java @@ -40,6 +40,7 @@ */ public class ContainerData { + private final String containerName; private final Map metadata; private String dbPath; // Path to Level DB Store. // Path to Physical file system where container and checksum are stored. @@ -47,18 +48,18 @@ private String hash; private AtomicLong bytesUsed; private long maxSize; - private long containerID; + private Long containerID; private HddsProtos.LifeCycleState state; /** * Constructs a ContainerData Object. * - * @param containerID - ID - * @param conf - Configuration + * @param containerName - Name */ - public ContainerData(long containerID, + public ContainerData(String containerName, Long containerID, Configuration conf) { this.metadata = new TreeMap<>(); + this.containerName = containerName; this.maxSize = conf.getLong(ScmConfigKeys.SCM_CONTAINER_CLIENT_MAX_SIZE_KEY, ScmConfigKeys.SCM_CONTAINER_CLIENT_MAX_SIZE_DEFAULT) * OzoneConsts.GB; this.bytesUsed = new AtomicLong(0L); @@ -75,7 +76,7 @@ public ContainerData(long containerID, public static ContainerData getFromProtBuf( ContainerProtos.ContainerData protoData, Configuration conf) throws IOException { - ContainerData data = new ContainerData( + ContainerData data = new ContainerData(protoData.getName(), protoData.getContainerID(), conf); for (int x = 0; x < protoData.getMetadataCount(); x++) { data.addMetadata(protoData.getMetadata(x).getKey(), @@ -116,6 +117,7 @@ public static ContainerData getFromProtBuf( public ContainerProtos.ContainerData getProtoBufMessage() { ContainerProtos.ContainerData.Builder builder = ContainerProtos .ContainerData.newBuilder(); + builder.setName(this.getContainerName()); builder.setContainerID(this.getContainerID()); if (this.getDBPath() != null) { @@ -155,6 +157,15 @@ public static ContainerData getFromProtBuf( } /** + * Returns the name of the container. + * + * @return - name + */ + public String getContainerName() { + return containerName; + } + + /** * Adds metadata. */ public void addMetadata(String key, String value) throws IOException { @@ -220,11 +231,9 @@ public void setDBPath(String path) { * * @return String Name. */ - // TODO: check the ContainerCache class to see if we are using the ContainerID instead. - /* - public String getName() { - return getContainerID(); - }*/ + public String getName() { + return getContainerName(); + } /** * Get container file path. @@ -246,7 +255,7 @@ public void setContainerPath(String containerPath) { * Get container ID. * @return - container ID. */ - public synchronized long getContainerID() { + public synchronized Long getContainerID() { return containerID; } @@ -275,7 +284,7 @@ public synchronized void closeContainer() { // Some thing brain dead for now. name + Time stamp of when we get the close // container message. - setHash(DigestUtils.sha256Hex(this.getContainerID() + + setHash(DigestUtils.sha256Hex(this.getContainerName() + Long.toString(Time.monotonicNow()))); } diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/helpers/ContainerReport.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/helpers/ContainerReport.java index 19634f48b81..50d2da3975a 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/helpers/ContainerReport.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/helpers/ContainerReport.java @@ -26,6 +26,7 @@ */ public class ContainerReport { private static final int UNKNOWN = -1; + private final String containerName; private final String finalhash; private long size; private long keyCount; @@ -50,11 +51,11 @@ public void setContainerID(long containerID) { /** * Constructs the ContainerReport. * - * @param containerID - Container ID. + * @param containerName - Container Name. * @param finalhash - Final Hash. */ - public ContainerReport(long containerID, String finalhash) { - this.containerID = containerID; + public ContainerReport(String containerName, String finalhash) { + this.containerName = containerName; this.finalhash = finalhash; this.size = UNKNOWN; this.keyCount = UNKNOWN; @@ -73,7 +74,7 @@ public ContainerReport(long containerID, String finalhash) { */ public static ContainerReport getFromProtoBuf(ContainerInfo info) { Preconditions.checkNotNull(info); - ContainerReport report = new ContainerReport(info.getContainerID(), + ContainerReport report = new ContainerReport(info.getContainerName(), info.getFinalhash()); if (info.hasSize()) { report.setSize(info.getSize()); @@ -102,6 +103,15 @@ public static ContainerReport getFromProtoBuf(ContainerInfo info) { } /** + * Gets the container name. + * + * @return - Name + */ + public String getContainerName() { + return containerName; + } + + /** * Returns the final signature for this container. * * @return - hash @@ -193,6 +203,7 @@ public void setBytesUsed(long bytesUsed) { */ public ContainerInfo getProtoBufMessage() { return ContainerInfo.newBuilder() + .setContainerName(this.getContainerName()) .setKeyCount(this.getKeyCount()) .setSize(this.getSize()) .setUsed(this.getBytesUsed()) diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/helpers/ContainerUtils.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/helpers/ContainerUtils.java index e24435418d8..1818188cb68 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/helpers/ContainerUtils.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/helpers/ContainerUtils.java @@ -184,12 +184,6 @@ public static String getContainerNameFromFile(File containerFile) { removeExtension(containerFile.getName())).toString(); } - public static long getContainerIDFromFile(File containerFile) { - Preconditions.checkNotNull(containerFile); - String containerID = getContainerNameFromFile(containerFile); - return Long.parseLong(containerID); - } - /** * Verifies that this in indeed a new container. * @@ -295,8 +289,8 @@ public static Path createMetadata(Path containerPath, String containerName, */ public static File getMetadataFile(ContainerData containerData, Path location) { - return location.resolve(Long.toString(containerData - .getContainerID()).concat(CONTAINER_META)) + return location.resolve(containerData + .getContainerName().concat(CONTAINER_META)) .toFile(); } @@ -309,8 +303,8 @@ public static File getMetadataFile(ContainerData containerData, */ public static File getContainerFile(ContainerData containerData, Path location) { - return location.resolve(Long.toString(containerData - .getContainerID()).concat(CONTAINER_EXTENSION)) + return location.resolve(containerData + .getContainerName().concat(CONTAINER_EXTENSION)) .toFile(); } diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/helpers/DeletedContainerBlocksSummary.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/helpers/DeletedContainerBlocksSummary.java index 9d0ec957f28..ade162a2637 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/helpers/DeletedContainerBlocksSummary.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/helpers/DeletedContainerBlocksSummary.java @@ -19,7 +19,6 @@ import com.google.common.collect.Maps; import org.apache.hadoop.hdds.protocol.proto .StorageContainerDatanodeProtocolProtos.DeletedBlocksTransaction; -import org.apache.hadoop.util.StringUtils; import java.util.List; import java.util.Map; @@ -38,7 +37,7 @@ // value : the number of blocks need to be deleted in this container // if the message contains multiple entries for same block, // blocks will be merged - private final Map blockSummary; + private final Map blockSummary; // total number of blocks in this message private int numOfBlocks; @@ -48,14 +47,14 @@ private DeletedContainerBlocksSummary(List blocks) { blockSummary = Maps.newHashMap(); blocks.forEach(entry -> { txSummary.put(entry.getTxID(), entry.getCount()); - if (blockSummary.containsKey(entry.getContainerID())) { - blockSummary.put(entry.getContainerID(), - blockSummary.get(entry.getContainerID()) - + entry.getLocalIDCount()); + if (blockSummary.containsKey(entry.getContainerName())) { + blockSummary.put(entry.getContainerName(), + blockSummary.get(entry.getContainerName()) + + entry.getBlockIDCount()); } else { - blockSummary.put(entry.getContainerID(), entry.getLocalIDCount()); + blockSummary.put(entry.getContainerName(), entry.getBlockIDCount()); } - numOfBlocks += entry.getLocalIDCount(); + numOfBlocks += entry.getBlockIDCount(); }); } @@ -94,9 +93,9 @@ public String getTxIDSummary() { .append("TimesProceed=") .append(blks.getCount()) .append(", ") - .append(blks.getContainerID()) + .append(blks.getContainerName()) .append(" : [") - .append(StringUtils.join(',', blks.getLocalIDList())).append("]") + .append(String.join(",", blks.getBlockIDList())).append("]") .append("\n"); } return sb.toString(); diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/helpers/FileUtils.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/helpers/FileUtils.java index ec274525e4f..566db025103 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/helpers/FileUtils.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/helpers/FileUtils.java @@ -65,8 +65,7 @@ private FileUtils() { ContainerProtos.ReadChunkResponseProto.newBuilder(); readChunkresponse.setChunkData(info.getProtoBufMessage()); readChunkresponse.setData(ByteString.copyFrom(data)); - readChunkresponse.setBlockID(msg.getGetSmallFile().getKey(). - getKeyData().getBlockID()); + readChunkresponse.setPipeline(msg.getGetSmallFile().getKey().getPipeline()); ContainerProtos.GetSmallFileResponseProto.Builder getSmallFile = ContainerProtos.GetSmallFileResponseProto.newBuilder(); diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/helpers/KeyUtils.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/helpers/KeyUtils.java index dbd5772d321..33eb911d4e9 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/helpers/KeyUtils.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/helpers/KeyUtils.java @@ -63,11 +63,11 @@ public static MetadataStore getDB(ContainerData container, ContainerCache cache = ContainerCache.getInstance(conf); Preconditions.checkNotNull(cache); try { - return cache.getDB(container.getContainerID(), container.getDBPath()); + return cache.getDB(container.getContainerName(), container.getDBPath()); } catch (IOException ex) { String message = String.format("Unable to open DB. DB Name: %s, Path: %s. ex: %s", - container.getContainerID(), container.getDBPath(), ex.getMessage()); + container.getContainerName(), container.getDBPath(), ex.getMessage()); throw new StorageContainerException(message, UNABLE_TO_READ_METADATA_DB); } } @@ -83,7 +83,7 @@ public static void removeDB(ContainerData container, Preconditions.checkNotNull(container); ContainerCache cache = ContainerCache.getInstance(conf); Preconditions.checkNotNull(cache); - cache.removeDB(container.getContainerID()); + cache.removeDB(container.getContainerName()); } /** * Shutdown all DB Handles. diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/ChunkManagerImpl.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/ChunkManagerImpl.java index 350519692c1..457c417b104 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/ChunkManagerImpl.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/ChunkManagerImpl.java @@ -19,11 +19,11 @@ import com.google.common.base.Preconditions; import org.apache.hadoop.fs.FileUtil; +import org.apache.hadoop.hdds.scm.container.common.helpers.Pipeline; import org.apache.hadoop.hdds.scm.container.common.helpers .StorageContainerException; import org.apache.hadoop.hdds.protocol.proto.ContainerProtos; import org.apache.hadoop.ozone.OzoneConsts; -import org.apache.hadoop.hdds.client.BlockID; import org.apache.hadoop.ozone.container.common.helpers.ChunkInfo; import org.apache.hadoop.ozone.container.common.helpers.ChunkUtils; import org.apache.hadoop.ozone.container.common.helpers.ContainerData; @@ -66,12 +66,13 @@ public ChunkManagerImpl(ContainerManager manager) { /** * writes a given chunk. * - * @param blockID - ID of the block. + * @param pipeline - Name and the set of machines that make this container. + * @param keyName - Name of the Key. * @param info - ChunkInfo. * @throws StorageContainerException */ @Override - public void writeChunk(BlockID blockID, ChunkInfo info, + public void writeChunk(Pipeline pipeline, String keyName, ChunkInfo info, byte[] data, ContainerProtos.Stage stage) throws StorageContainerException { // we don't want container manager to go away while we are writing chunks. @@ -79,13 +80,13 @@ public void writeChunk(BlockID blockID, ChunkInfo info, // TODO : Take keyManager Write lock here. try { - Preconditions.checkNotNull(blockID, "Block ID cannot be null."); - long containerID = blockID.getContainerID(); - Preconditions.checkState(containerID >= 0, - "Container ID cannot be negative"); + Preconditions.checkNotNull(pipeline, "Pipeline cannot be null"); + String containerName = pipeline.getContainerName(); + Preconditions.checkNotNull(containerName, + "Container name cannot be null"); ContainerData container = - containerManager.readContainer(containerID); - File chunkFile = ChunkUtils.validateChunk(container, info); + containerManager.readContainer(containerName); + File chunkFile = ChunkUtils.validateChunk(pipeline, container, info); File tmpChunkFile = getTmpChunkFile(chunkFile, info); LOG.debug("writing chunk:{} chunk stage:{} chunk file:{} tmp chunk file", @@ -95,16 +96,16 @@ public void writeChunk(BlockID blockID, ChunkInfo info, ChunkUtils.writeData(tmpChunkFile, info, data); break; case COMMIT_DATA: - commitChunk(tmpChunkFile, chunkFile, containerID, info.getLen()); + commitChunk(tmpChunkFile, chunkFile, containerName, info.getLen()); break; case COMBINED: // directly write to the chunk file long oldSize = chunkFile.length(); ChunkUtils.writeData(chunkFile, info, data); long newSize = chunkFile.length(); - containerManager.incrBytesUsed(containerID, newSize - oldSize); - containerManager.incrWriteCount(containerID); - containerManager.incrWriteBytes(containerID, info.getLen()); + containerManager.incrBytesUsed(containerName, newSize - oldSize); + containerManager.incrWriteCount(containerName); + containerManager.incrWriteBytes(containerName, info.getLen()); break; default: throw new IOException("Can not identify write operation."); @@ -135,21 +136,22 @@ private static File getTmpChunkFile(File chunkFile, ChunkInfo info) // Commit the chunk by renaming the temporary chunk file to chunk file private void commitChunk(File tmpChunkFile, File chunkFile, - long containerID, long chunkLen) throws IOException { + String containerName, long chunkLen) throws IOException { long sizeDiff = tmpChunkFile.length() - chunkFile.length(); // It is safe to replace here as the earlier chunk if existing should be // caught as part of validateChunk Files.move(tmpChunkFile.toPath(), chunkFile.toPath(), StandardCopyOption.REPLACE_EXISTING); - containerManager.incrBytesUsed(containerID, sizeDiff); - containerManager.incrWriteCount(containerID); - containerManager.incrWriteBytes(containerID, chunkLen); + containerManager.incrBytesUsed(containerName, sizeDiff); + containerManager.incrWriteCount(containerName); + containerManager.incrWriteBytes(containerName, chunkLen); } /** * reads the data defined by a chunk. * - * @param blockID - ID of the block. + * @param pipeline - container pipeline. + * @param keyName - Name of the Key * @param info - ChunkInfo. * @return byte array * @throws StorageContainerException @@ -157,20 +159,20 @@ private void commitChunk(File tmpChunkFile, File chunkFile, * TODO: Explore if we need to do that for ozone. */ @Override - public byte[] readChunk(BlockID blockID, ChunkInfo info) + public byte[] readChunk(Pipeline pipeline, String keyName, ChunkInfo info) throws StorageContainerException { containerManager.readLock(); try { - Preconditions.checkNotNull(blockID, "Block ID cannot be null."); - long containerID = blockID.getContainerID(); - Preconditions.checkState(containerID >= 0, - "Container ID cannot be negative"); + Preconditions.checkNotNull(pipeline, "Pipeline cannot be null"); + String containerName = pipeline.getContainerName(); + Preconditions.checkNotNull(containerName, + "Container name cannot be null"); ContainerData container = - containerManager.readContainer(containerID); - File chunkFile = ChunkUtils.getChunkFile(container, info); + containerManager.readContainer(containerName); + File chunkFile = ChunkUtils.getChunkFile(pipeline, container, info); ByteBuffer data = ChunkUtils.readData(chunkFile, info); - containerManager.incrReadCount(containerID); - containerManager.incrReadBytes(containerID, chunkFile.length()); + containerManager.incrReadCount(containerName); + containerManager.incrReadBytes(containerName, chunkFile.length()); return data.array(); } catch (ExecutionException | NoSuchAlgorithmException e) { LOG.error("read data failed. error: {}", e); @@ -189,25 +191,25 @@ private void commitChunk(File tmpChunkFile, File chunkFile, /** * Deletes a given chunk. * - * @param blockID - ID of the block. + * @param pipeline - Pipeline. + * @param keyName - Key Name * @param info - Chunk Info * @throws StorageContainerException */ @Override - public void deleteChunk(BlockID blockID, ChunkInfo info) + public void deleteChunk(Pipeline pipeline, String keyName, ChunkInfo info) throws StorageContainerException { containerManager.readLock(); try { - Preconditions.checkNotNull(blockID, "Block ID cannot be null."); - long containerID = blockID.getContainerID(); - Preconditions.checkState(containerID >= 0, - "Container ID cannot be negative"); - - File chunkFile = ChunkUtils.getChunkFile(containerManager - .readContainer(containerID), info); + Preconditions.checkNotNull(pipeline, "Pipeline cannot be null"); + String containerName = pipeline.getContainerName(); + Preconditions.checkNotNull(containerName, + "Container name cannot be null"); + File chunkFile = ChunkUtils.getChunkFile(pipeline, containerManager + .readContainer(containerName), info); if ((info.getOffset() == 0) && (info.getLen() == chunkFile.length())) { FileUtil.fullyDelete(chunkFile); - containerManager.decrBytesUsed(containerID, chunkFile.length()); + containerManager.decrBytesUsed(containerName, chunkFile.length()); } else { LOG.error("Not Supported Operation. Trying to delete a " + "chunk that is in shared file. chunk info : " + info.toString()); diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/ContainerManagerImpl.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/ContainerManagerImpl.java index 1893b3b3b89..5e7375cd9df 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/ContainerManagerImpl.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/ContainerManagerImpl.java @@ -24,6 +24,7 @@ import org.apache.commons.io.FileUtils; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hdds.scm.ScmConfigKeys; +import org.apache.hadoop.hdds.scm.container.common.helpers.Pipeline; import org.apache.hadoop.hdds.scm.container.common.helpers .StorageContainerException; import org.apache.hadoop.hdfs.server.datanode.StorageLocation; @@ -112,9 +113,7 @@ static final Logger LOG = LoggerFactory.getLogger(ContainerManagerImpl.class); - // TODO: consider primitive collection like eclipse-collections - // to avoid autoboxing overhead - private final ConcurrentSkipListMap + private final ConcurrentSkipListMap containerMap = new ConcurrentSkipListMap<>(); // Use a non-fair RW lock for better throughput, we may revisit this decision @@ -230,7 +229,6 @@ private void readContainerInfo(String containerName) Preconditions.checkNotNull(keyName, "Container Name to container key mapping is null"); - long containerID = Long.parseLong(keyName); try { String containerFileName = containerName.concat(CONTAINER_EXTENSION); String metaFileName = containerName.concat(CONTAINER_META); @@ -251,7 +249,7 @@ private void readContainerInfo(String containerName) // when loading the info we get a null, this often means last time // SCM was ending up at some middle phase causing that the metadata // was not populated. Such containers are marked as inactive. - containerMap.put(containerID, new ContainerStatus(null)); + containerMap.put(keyName, new ContainerStatus(null)); return; } containerData = ContainerData.getFromProtBuf(containerDataProto, conf); @@ -265,7 +263,7 @@ private void readContainerInfo(String containerName) // Hopefully SCM will ask us to delete this container and rebuild it. LOG.error("Invalid SHA found for container data. Name :{}" + "cowardly refusing to read invalid data", containerName); - containerMap.put(containerID, new ContainerStatus(null)); + containerMap.put(keyName, new ContainerStatus(null)); return; } @@ -297,7 +295,7 @@ private void readContainerInfo(String containerName) }).sum(); containerStatus.setBytesUsed(bytesUsed); - containerMap.put(containerID, containerStatus); + containerMap.put(keyName, containerStatus); } catch (IOException | NoSuchAlgorithmException ex) { LOG.error("read failed for file: {} ex: {}", containerName, ex.getMessage()); @@ -305,7 +303,7 @@ private void readContainerInfo(String containerName) // TODO : Add this file to a recovery Queue. // Remember that this container is busted and we cannot use it. - containerMap.put(containerID, new ContainerStatus(null)); + containerMap.put(keyName, new ContainerStatus(null)); throw new StorageContainerException("Unable to read container info", UNABLE_TO_READ_METADATA_DB); } finally { @@ -318,17 +316,18 @@ private void readContainerInfo(String containerName) /** * Creates a container with the given name. * + * @param pipeline -- Nodes which make up this container. * @param containerData - Container Name and metadata. * @throws StorageContainerException - Exception */ @Override - public void createContainer(ContainerData containerData) + public void createContainer(Pipeline pipeline, ContainerData containerData) throws StorageContainerException { Preconditions.checkNotNull(containerData, "Container data cannot be null"); writeLock(); try { - if (containerMap.containsKey(containerData.getContainerID())) { - LOG.debug("container already exists. {}", containerData.getContainerID()); + if (containerMap.containsKey(containerData.getName())) { + LOG.debug("container already exists. {}", containerData.getName()); throw new StorageContainerException("container already exists.", CONTAINER_EXISTS); } @@ -400,7 +399,7 @@ private void writeContainerInfo(ContainerData containerData, location); File metadataFile = ContainerUtils.getMetadataFile(containerData, location); - String containerName = Long.toString(containerData.getContainerID()); + String containerName = containerData.getContainerName(); if(!overwrite) { ContainerUtils.verifyIsNewContainer(containerFile, metadataFile); @@ -447,7 +446,7 @@ private void writeContainerInfo(ContainerData containerData, LOG.error("Creation of container failed. Name: {}, we might need to " + "cleanup partially created artifacts. ", - containerData.getContainerID(), ex); + containerData.getContainerName(), ex); throw new StorageContainerException("Container creation failed. ", ex, CONTAINER_INTERNAL_ERROR); } finally { @@ -460,45 +459,45 @@ private void writeContainerInfo(ContainerData containerData, /** * Deletes an existing container. * - * @param containerID - ID of the container. + * @param pipeline - nodes that make this container. + * @param containerName - name of the container. * @param forceDelete - whether this container should be deleted forcibly. * @throws StorageContainerException */ @Override - public void deleteContainer(long containerID, + public void deleteContainer(Pipeline pipeline, String containerName, boolean forceDelete) throws StorageContainerException { - Preconditions.checkState(containerID >= 0, - "Container ID cannot be negative."); + Preconditions.checkNotNull(containerName, "Container name cannot be null"); + Preconditions.checkState(containerName.length() > 0, + "Container name length cannot be zero."); writeLock(); try { - if (isOpen(containerID)) { + if (isOpen(pipeline.getContainerName())) { throw new StorageContainerException( "Deleting an open container is not allowed.", UNCLOSED_CONTAINER_IO); } - ContainerStatus status = containerMap.get(containerID); + ContainerStatus status = containerMap.get(containerName); if (status == null) { - LOG.debug("No such container. ID: {}", containerID); - throw new StorageContainerException("No such container. ID : " + - containerID, CONTAINER_NOT_FOUND); + LOG.debug("No such container. Name: {}", containerName); + throw new StorageContainerException("No such container. Name : " + + containerName, CONTAINER_NOT_FOUND); } if (status.getContainer() == null) { - LOG.debug("Invalid container data. ID: {}", containerID); + LOG.debug("Invalid container data. Name: {}", containerName); throw new StorageContainerException("Invalid container data. Name : " + - containerID, CONTAINER_NOT_FOUND); + containerName, CONTAINER_NOT_FOUND); } ContainerUtils.removeContainer(status.getContainer(), conf, forceDelete); - containerMap.remove(containerID); + containerMap.remove(containerName); } catch (StorageContainerException e) { throw e; } catch (IOException e) { // TODO : An I/O error during delete can leave partial artifacts on the // disk. We will need the cleaner thread to cleanup this information. - String errMsg = String.format("Failed to cleanup container. ID: %d", - containerID); - LOG.error(errMsg, e); - throw new StorageContainerException(errMsg, e, IO_EXCEPTION); + LOG.error("Failed to cleanup container. Name: {}", containerName, e); + throw new StorageContainerException(containerName, e, IO_EXCEPTION); } finally { writeUnlock(); } @@ -512,29 +511,25 @@ public void deleteContainer(long containerID, * time. It is possible that using this iteration you can miss certain * container from the listing. * - * @param startContainerID - Return containers with ID >= startContainerID. + * @param prefix - Return keys that match this prefix. * @param count - how many to return + * @param prevKey - Previous Key Value or empty String. * @param data - Actual containerData * @throws StorageContainerException */ @Override - public void listContainer(long startContainerID, long count, + public void listContainer(String prefix, long count, String prevKey, List data) throws StorageContainerException { + // TODO : Support list with Prefix and PrevKey Preconditions.checkNotNull(data, "Internal assertion: data cannot be null"); - Preconditions.checkState(startContainerID >= 0, - "Start container ID cannot be negative"); - Preconditions.checkState(count > 0, - "max number of containers returned " + - "must be positive"); - readLock(); try { - ConcurrentNavigableMap map; - if (startContainerID == 0) { + ConcurrentNavigableMap map; + if (prevKey == null || prevKey.isEmpty()) { map = containerMap.tailMap(containerMap.firstKey(), true); } else { - map = containerMap.tailMap(startContainerID, false); + map = containerMap.tailMap(prevKey, false); } int currentCount = 0; @@ -554,23 +549,24 @@ public void listContainer(long startContainerID, long count, /** * Get metadata about a specific container. * - * @param containerID - ID of the container + * @param containerName - Name of the container * @return ContainerData - Container Data. * @throws StorageContainerException */ @Override - public ContainerData readContainer(long containerID) - throws StorageContainerException { - Preconditions.checkState(containerID >= 0, - "Container ID cannot be negative."); - if (!containerMap.containsKey(containerID)) { - throw new StorageContainerException("Unable to find the container. ID: " - + containerID, CONTAINER_NOT_FOUND); + public ContainerData readContainer(String containerName) throws + StorageContainerException { + Preconditions.checkNotNull(containerName, "Container name cannot be null"); + Preconditions.checkState(containerName.length() > 0, + "Container name length cannot be zero."); + if (!containerMap.containsKey(containerName)) { + throw new StorageContainerException("Unable to find the container. Name: " + + containerName, CONTAINER_NOT_FOUND); } - ContainerData cData = containerMap.get(containerID).getContainer(); + ContainerData cData = containerMap.get(containerName).getContainer(); if (cData == null) { - throw new StorageContainerException("Invalid container data. ID: " - + containerID, CONTAINER_INTERNAL_ERROR); + throw new StorageContainerException("Invalid container data. Name: " + + containerName, CONTAINER_INTERNAL_ERROR); } return cData; } @@ -579,13 +575,13 @@ public ContainerData readContainer(long containerID) * Closes a open container, if it is already closed or does not exist a * StorageContainerException is thrown. * - * @param containerID - ID of the container. + * @param containerName - Name of the container. * @throws StorageContainerException */ @Override - public void closeContainer(long containerID) + public void closeContainer(String containerName) throws StorageContainerException, NoSuchAlgorithmException { - ContainerData containerData = readContainer(containerID); + ContainerData containerData = readContainer(containerName); containerData.closeContainer(); writeContainerInfo(containerData, true); MetadataStore db = KeyUtils.getDB(containerData, conf); @@ -606,13 +602,15 @@ public void closeContainer(long containerID) // issues. ContainerStatus status = new ContainerStatus(containerData); - containerMap.put(containerID, status); + containerMap.put(containerName, status); } @Override - public void updateContainer(long containerID, ContainerData data, - boolean forceUpdate) throws StorageContainerException { - Preconditions.checkState(containerID >= 0, "Container ID cannot be negative."); + public void updateContainer(Pipeline pipeline, String containerName, + ContainerData data, boolean forceUpdate) + throws StorageContainerException { + Preconditions.checkNotNull(pipeline, "Pipeline cannot be null"); + Preconditions.checkNotNull(containerName, "Container name cannot be null"); Preconditions.checkNotNull(data, "Container data cannot be null"); FileOutputStream containerStream = null; DigestOutputStream dos = null; @@ -620,9 +618,9 @@ public void updateContainer(long containerID, ContainerData data, File containerFileBK = null, containerFile = null; boolean deleted = false; - if(!containerMap.containsKey(containerID)) { + if(!containerMap.containsKey(containerName)) { throw new StorageContainerException("Container doesn't exist. Name :" - + containerID, CONTAINER_NOT_FOUND); + + containerName, CONTAINER_NOT_FOUND); } try { @@ -635,7 +633,7 @@ public void updateContainer(long containerID, ContainerData data, try { Path location = locationManager.getContainerPath(); - ContainerData orgData = containerMap.get(containerID).getContainer(); + ContainerData orgData = containerMap.get(containerName).getContainer(); if (orgData == null) { // updating a invalid container throw new StorageContainerException("Update a container with invalid" + @@ -644,7 +642,7 @@ public void updateContainer(long containerID, ContainerData data, if (!forceUpdate && !orgData.isOpen()) { throw new StorageContainerException( - "Update a closed container is not allowed. ID: " + containerID, + "Update a closed container is not allowed. Name: " + containerName, UNSUPPORTED_REQUEST); } @@ -654,7 +652,7 @@ public void updateContainer(long containerID, ContainerData data, if (!forceUpdate) { if (!containerFile.exists() || !containerFile.canWrite()) { throw new StorageContainerException( - "Container file not exists or corrupted. ID: " + containerID, + "Container file not exists or corrupted. Name: " + containerName, CONTAINER_INTERNAL_ERROR); } @@ -674,7 +672,7 @@ public void updateContainer(long containerID, ContainerData data, // Update the in-memory map ContainerStatus newStatus = new ContainerStatus(data); - containerMap.replace(containerID, newStatus); + containerMap.replace(containerName, newStatus); } catch (IOException e) { // Restore the container file from backup if(containerFileBK != null && containerFileBK.exists() && deleted) { @@ -685,8 +683,8 @@ public void updateContainer(long containerID, ContainerData data, CONTAINER_INTERNAL_ERROR); } else { throw new StorageContainerException( - "Failed to restore container data from the backup. ID: " - + containerID, CONTAINER_INTERNAL_ERROR); + "Failed to restore container data from the backup. Name: " + + containerName, CONTAINER_INTERNAL_ERROR); } } else { throw new StorageContainerException( @@ -713,22 +711,22 @@ protected File getContainerFile(ContainerData data) throws IOException { /** * Checks if a container exists. * - * @param containerID - ID of the container. + * @param containerName - Name of the container. * @return true if the container is open false otherwise. * @throws StorageContainerException - Throws Exception if we are not able to * find the container. */ @Override - public boolean isOpen(long containerID) throws StorageContainerException { - final ContainerStatus status = containerMap.get(containerID); + public boolean isOpen(String containerName) throws StorageContainerException { + final ContainerStatus status = containerMap.get(containerName); if (status == null) { throw new StorageContainerException( - "Container status not found: " + containerID, CONTAINER_NOT_FOUND); + "Container status not found: " + containerName, CONTAINER_NOT_FOUND); } final ContainerData cData = status.getContainer(); if (cData == null) { throw new StorageContainerException( - "Container not found: " + containerID, CONTAINER_NOT_FOUND); + "Container not found: " + containerName, CONTAINER_NOT_FOUND); } return cData.isOpen(); } @@ -748,7 +746,7 @@ public void shutdown() throws IOException { @VisibleForTesting - public ConcurrentSkipListMap getContainerMap() { + public ConcurrentSkipListMap getContainerMap() { return containerMap; } @@ -903,7 +901,7 @@ public ContainerReportsRequestProto getContainerReport() throws IOException { for (ContainerStatus container: containers) { StorageContainerDatanodeProtocolProtos.ContainerInfo.Builder ciBuilder = StorageContainerDatanodeProtocolProtos.ContainerInfo.newBuilder(); - ciBuilder.setContainerID(container.getContainer().getContainerID()) + ciBuilder.setContainerName(container.getContainer().getContainerName()) .setSize(container.getContainer().getMaxSize()) .setUsed(container.getContainer().getBytesUsed()) .setKeyCount(container.getContainer().getKeyCount()) @@ -968,7 +966,7 @@ public ContainerDeletionChoosingPolicy getContainerDeletionChooser() { } @Override - public void incrPendingDeletionBlocks(int numBlocks, long containerId) { + public void incrPendingDeletionBlocks(int numBlocks, String containerId) { writeLock(); try { ContainerStatus status = containerMap.get(containerId); @@ -979,7 +977,7 @@ public void incrPendingDeletionBlocks(int numBlocks, long containerId) { } @Override - public void decrPendingDeletionBlocks(int numBlocks, long containerId) { + public void decrPendingDeletionBlocks(int numBlocks, String containerId) { writeLock(); try { ContainerStatus status = containerMap.get(containerId); @@ -992,35 +990,35 @@ public void decrPendingDeletionBlocks(int numBlocks, long containerId) { /** * Increase the read count of the container. * - * @param containerId - ID of the container. + * @param containerName - Name of the container. */ @Override - public void incrReadCount(long containerId) { - ContainerStatus status = containerMap.get(containerId); + public void incrReadCount(String containerName) { + ContainerStatus status = containerMap.get(containerName); status.incrReadCount(); } - public long getReadCount(long containerId) { - ContainerStatus status = containerMap.get(containerId); + public long getReadCount(String containerName) { + ContainerStatus status = containerMap.get(containerName); return status.getReadCount(); } /** * Increse the read counter for bytes read from the container. * - * @param containerId - ID of the container. + * @param containerName - Name of the container. * @param readBytes - bytes read from the container. */ @Override - public void incrReadBytes(long containerId, long readBytes) { - ContainerStatus status = containerMap.get(containerId); + public void incrReadBytes(String containerName, long readBytes) { + ContainerStatus status = containerMap.get(containerName); status.incrReadBytes(readBytes); } - public long getReadBytes(long containerId) { + public long getReadBytes(String containerName) { readLock(); try { - ContainerStatus status = containerMap.get(containerId); + ContainerStatus status = containerMap.get(containerName); return status.getReadBytes(); } finally { readUnlock(); @@ -1030,76 +1028,76 @@ public long getReadBytes(long containerId) { /** * Increase the write count of the container. * - * @param containerId - Name of the container. + * @param containerName - Name of the container. */ @Override - public void incrWriteCount(long containerId) { - ContainerStatus status = containerMap.get(containerId); + public void incrWriteCount(String containerName) { + ContainerStatus status = containerMap.get(containerName); status.incrWriteCount(); } - public long getWriteCount(long containerId) { - ContainerStatus status = containerMap.get(containerId); + public long getWriteCount(String containerName) { + ContainerStatus status = containerMap.get(containerName); return status.getWriteCount(); } /** * Increse the write counter for bytes write into the container. * - * @param containerId - ID of the container. + * @param containerName - Name of the container. * @param writeBytes - bytes write into the container. */ @Override - public void incrWriteBytes(long containerId, long writeBytes) { - ContainerStatus status = containerMap.get(containerId); + public void incrWriteBytes(String containerName, long writeBytes) { + ContainerStatus status = containerMap.get(containerName); status.incrWriteBytes(writeBytes); } - public long getWriteBytes(long containerId) { - ContainerStatus status = containerMap.get(containerId); + public long getWriteBytes(String containerName) { + ContainerStatus status = containerMap.get(containerName); return status.getWriteBytes(); } /** * Increase the bytes used by the container. * - * @param containerId - ID of the container. + * @param containerName - Name of the container. * @param used - additional bytes used by the container. * @return the current bytes used. */ @Override - public long incrBytesUsed(long containerId, long used) { - ContainerStatus status = containerMap.get(containerId); + public long incrBytesUsed(String containerName, long used) { + ContainerStatus status = containerMap.get(containerName); return status.incrBytesUsed(used); } /** * Decrease the bytes used by the container. * - * @param containerId - ID of the container. + * @param containerName - Name of the container. * @param used - additional bytes reclaimed by the container. * @return the current bytes used. */ @Override - public long decrBytesUsed(long containerId, long used) { - ContainerStatus status = containerMap.get(containerId); + public long decrBytesUsed(String containerName, long used) { + ContainerStatus status = containerMap.get(containerName); return status.decrBytesUsed(used); } - public long getBytesUsed(long containerId) { - ContainerStatus status = containerMap.get(containerId); + public long getBytesUsed(String containerName) { + ContainerStatus status = containerMap.get(containerName); return status.getBytesUsed(); } /** * Get the number of keys in the container. * - * @param containerId - ID of the container. + * @param containerName - Name of the container. * @return the current key count. */ @Override - public long getNumKeys(long containerId) { - ContainerStatus status = containerMap.get(containerId); + public long getNumKeys(String containerName) { + ContainerStatus status = containerMap.get(containerName); return status.getNumKeys(); } /** diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/Dispatcher.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/Dispatcher.java index 46bd8429537..d319565012e 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/Dispatcher.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/Dispatcher.java @@ -30,7 +30,6 @@ import org.apache.hadoop.hdds.protocol.proto.ContainerProtos .ContainerCommandResponseProto; import org.apache.hadoop.hdds.protocol.proto.ContainerProtos.Type; -import org.apache.hadoop.hdds.client.BlockID; import org.apache.hadoop.ozone.container.common.helpers.ChunkInfo; import org.apache.hadoop.ozone.container.common.helpers.ChunkUtils; import org.apache.hadoop.ozone.container.common.helpers.ContainerData; @@ -187,7 +186,7 @@ private ContainerCommandResponseProto containerProcessHandler( } catch (IOException ex) { LOG.warn("Container operation failed. " + "Container: {} Operation: {} trace ID: {} Error: {}", - msg.getCreateContainer().getContainerData().getContainerID(), + msg.getCreateContainer().getContainerData().getName(), msg.getCmdType().name(), msg.getTraceID(), ex.toString(), ex); @@ -231,7 +230,7 @@ private ContainerCommandResponseProto keyProcessHandler( } catch (IOException ex) { LOG.warn("Container operation failed. " + "Container: {} Operation: {} trace ID: {} Error: {}", - msg.getCreateContainer().getContainerData().getContainerID(), + msg.getCreateContainer().getContainerData().getName(), msg.getCmdType().name(), msg.getTraceID(), ex.toString(), ex); @@ -274,7 +273,7 @@ private ContainerCommandResponseProto chunkProcessHandler( } catch (IOException ex) { LOG.warn("Container operation failed. " + "Container: {} Operation: {} trace ID: {} Error: {}", - msg.getCreateContainer().getContainerData().getContainerID(), + msg.getCreateContainer().getContainerData().getName(), msg.getCmdType().name(), msg.getTraceID(), ex.toString(), ex); @@ -319,14 +318,17 @@ private ContainerCommandResponseProto handleUpdateContainer( msg.getTraceID()); return ContainerUtils.malformedRequest(msg); } - long containerID = msg.getUpdateContainer() - .getContainerData().getContainerID(); + + Pipeline pipeline = Pipeline.getFromProtoBuf( + msg.getUpdateContainer().getPipeline()); + String containerName = msg.getUpdateContainer() + .getContainerData().getName(); ContainerData data = ContainerData.getFromProtBuf( msg.getUpdateContainer().getContainerData(), conf); boolean forceUpdate = msg.getUpdateContainer().getForceUpdate(); - this.containerManager.updateContainer(containerID, - data, forceUpdate); + this.containerManager.updateContainer( + pipeline, containerName, data, forceUpdate); return ContainerUtils.getContainerResponse(msg); } @@ -347,9 +349,8 @@ private ContainerCommandResponseProto handleReadContainer( return ContainerUtils.malformedRequest(msg); } - long containerID = msg.getReadContainer().getContainerID(); - ContainerData container = this.containerManager. - readContainer(containerID); + String name = msg.getReadContainer().getName(); + ContainerData container = this.containerManager.readContainer(name); return ContainerUtils.getReadContainerResponse(msg, container); } @@ -369,9 +370,12 @@ private ContainerCommandResponseProto handleDeleteContainer( return ContainerUtils.malformedRequest(msg); } - long containerID = msg.getDeleteContainer().getContainerID(); + Pipeline pipeline = Pipeline.getFromProtoBuf( + msg.getDeleteContainer().getPipeline()); + Preconditions.checkNotNull(pipeline); + String name = msg.getDeleteContainer().getName(); boolean forceDelete = msg.getDeleteContainer().getForceDelete(); - this.containerManager.deleteContainer(containerID, forceDelete); + this.containerManager.deleteContainer(pipeline, name, forceDelete); return ContainerUtils.getContainerResponse(msg); } @@ -397,7 +401,7 @@ private ContainerCommandResponseProto handleCreateContainer( msg.getCreateContainer().getPipeline()); Preconditions.checkNotNull(pipeline, "Pipeline cannot be null"); - this.containerManager.createContainer(cData); + this.containerManager.createContainer(pipeline, cData); return ContainerUtils.getContainerResponse(msg); } @@ -416,12 +420,14 @@ private ContainerCommandResponseProto handleCloseContainer( msg.getTraceID()); return ContainerUtils.malformedRequest(msg); } - long containerID = msg.getCloseContainer().getContainerID(); - if (!this.containerManager.isOpen(containerID)) { + Pipeline pipeline = Pipeline.getFromProtoBuf(msg.getCloseContainer() + .getPipeline()); + Preconditions.checkNotNull(pipeline, "Pipeline cannot be null"); + if (!this.containerManager.isOpen(pipeline.getContainerName())) { throw new StorageContainerException("Attempting to close a closed " + "container.", CLOSED_CONTAINER_IO); } - this.containerManager.closeContainer(containerID); + this.containerManager.closeContainer(pipeline.getContainerName()); return ContainerUtils.getContainerResponse(msg); } catch (NoSuchAlgorithmException e) { throw new StorageContainerException("No such Algorithm", e, @@ -443,9 +449,11 @@ private ContainerCommandResponseProto handleWriteChunk( msg.getTraceID()); return ContainerUtils.malformedRequest(msg); } - BlockID blockID = BlockID.getFromProtobuf( - msg.getWriteChunk().getBlockID()); - if (!this.containerManager.isOpen(blockID.getContainerID())) { + String keyName = msg.getWriteChunk().getKeyName(); + Pipeline pipeline = Pipeline.getFromProtoBuf( + msg.getWriteChunk().getPipeline()); + Preconditions.checkNotNull(pipeline); + if (!this.containerManager.isOpen(pipeline.getContainerName())) { throw new StorageContainerException("Write to closed container.", CLOSED_CONTAINER_IO); } @@ -461,7 +469,7 @@ private ContainerCommandResponseProto handleWriteChunk( } this.containerManager.getChunkManager() - .writeChunk(blockID, chunkInfo, + .writeChunk(pipeline, keyName, chunkInfo, data, msg.getWriteChunk().getStage()); return ChunkUtils.getChunkResponse(msg); @@ -481,13 +489,17 @@ private ContainerCommandResponseProto handleReadChunk( msg.getTraceID()); return ContainerUtils.malformedRequest(msg); } - BlockID blockID = BlockID.getFromProtobuf( - msg.getReadChunk().getBlockID()); + + String keyName = msg.getReadChunk().getKeyName(); + Pipeline pipeline = Pipeline.getFromProtoBuf( + msg.getReadChunk().getPipeline()); + Preconditions.checkNotNull(pipeline); + ChunkInfo chunkInfo = ChunkInfo.getFromProtoBuf(msg.getReadChunk() .getChunkData()); Preconditions.checkNotNull(chunkInfo); - byte[] data = this.containerManager.getChunkManager(). - readChunk(blockID, chunkInfo); + byte[] data = this.containerManager.getChunkManager().readChunk(pipeline, + keyName, chunkInfo); metrics.incContainerBytesStats(Type.ReadChunk, data.length); return ChunkUtils.getReadChunkResponse(msg, data, chunkInfo); } @@ -507,10 +519,11 @@ private ContainerCommandResponseProto handleDeleteChunk( return ContainerUtils.malformedRequest(msg); } - BlockID blockID = BlockID.getFromProtobuf(msg.getDeleteChunk() - .getBlockID()); - long containerID = blockID.getContainerID(); - if (!this.containerManager.isOpen(containerID)) { + String keyName = msg.getDeleteChunk().getKeyName(); + Pipeline pipeline = Pipeline.getFromProtoBuf( + msg.getDeleteChunk().getPipeline()); + Preconditions.checkNotNull(pipeline); + if (!this.containerManager.isOpen(pipeline.getContainerName())) { throw new StorageContainerException("Write to closed container.", CLOSED_CONTAINER_IO); } @@ -518,7 +531,7 @@ private ContainerCommandResponseProto handleDeleteChunk( .getChunkData()); Preconditions.checkNotNull(chunkInfo); - this.containerManager.getChunkManager().deleteChunk(blockID, + this.containerManager.getChunkManager().deleteChunk(pipeline, keyName, chunkInfo); return ChunkUtils.getChunkResponse(msg); } @@ -537,16 +550,15 @@ private ContainerCommandResponseProto handlePutKey( msg.getTraceID()); return ContainerUtils.malformedRequest(msg); } - BlockID blockID = BlockID.getFromProtobuf( - msg.getPutKey().getKeyData().getBlockID()); - long containerID = blockID.getContainerID(); - if (!this.containerManager.isOpen(containerID)) { + Pipeline pipeline = Pipeline.getFromProtoBuf(msg.getPutKey().getPipeline()); + Preconditions.checkNotNull(pipeline); + if (!this.containerManager.isOpen(pipeline.getContainerName())) { throw new StorageContainerException("Write to closed container.", CLOSED_CONTAINER_IO); } KeyData keyData = KeyData.getFromProtoBuf(msg.getPutKey().getKeyData()); Preconditions.checkNotNull(keyData); - this.containerManager.getKeyManager().putKey(keyData); + this.containerManager.getKeyManager().putKey(pipeline, keyData); long numBytes = keyData.getProtoBufMessage().toByteArray().length; metrics.incContainerBytesStats(Type.PutKey, numBytes); return KeyUtils.getKeyResponse(msg); @@ -589,15 +601,17 @@ private ContainerCommandResponseProto handleDeleteKey( msg.getTraceID()); return ContainerUtils.malformedRequest(msg); } - BlockID blockID = BlockID.getFromProtobuf(msg.getDeleteKey() - .getBlockID()); - Preconditions.checkNotNull(blockID); - long containerID = blockID.getContainerID(); - if (!this.containerManager.isOpen(containerID)) { + Pipeline pipeline = + Pipeline.getFromProtoBuf(msg.getDeleteKey().getPipeline()); + Preconditions.checkNotNull(pipeline); + if (!this.containerManager.isOpen(pipeline.getContainerName())) { throw new StorageContainerException("Write to closed container.", CLOSED_CONTAINER_IO); } - this.containerManager.getKeyManager().deleteKey(blockID); + String keyName = msg.getDeleteKey().getName(); + Preconditions.checkNotNull(keyName); + Preconditions.checkState(!keyName.isEmpty()); + this.containerManager.getKeyManager().deleteKey(pipeline, keyName); return KeyUtils.getKeyResponse(msg); } @@ -618,11 +632,12 @@ private ContainerCommandResponseProto handlePutSmallFile( } try { - BlockID blockID = BlockID.getFromProtobuf(msg. - getPutSmallFile().getKey().getKeyData().getBlockID()); - long containerID = blockID.getContainerID(); + Pipeline pipeline = + Pipeline.getFromProtoBuf(msg.getPutSmallFile() + .getKey().getPipeline()); - if (!this.containerManager.isOpen(containerID)) { + Preconditions.checkNotNull(pipeline); + if (!this.containerManager.isOpen(pipeline.getContainerName())) { throw new StorageContainerException("Write to closed container.", CLOSED_CONTAINER_IO); } @@ -633,12 +648,12 @@ private ContainerCommandResponseProto handlePutSmallFile( byte[] data = msg.getPutSmallFile().getData().toByteArray(); metrics.incContainerBytesStats(Type.PutSmallFile, data.length); - this.containerManager.getChunkManager().writeChunk(blockID, - chunkInfo, data, ContainerProtos.Stage.COMBINED); + this.containerManager.getChunkManager().writeChunk(pipeline, keyData + .getKeyName(), chunkInfo, data, ContainerProtos.Stage.COMBINED); List chunks = new LinkedList<>(); chunks.add(chunkInfo.getProtoBufMessage()); keyData.setChunks(chunks); - this.containerManager.getKeyManager().putKey(keyData); + this.containerManager.getKeyManager().putKey(pipeline, keyData); return FileUtils.getPutFileResponse(msg); } catch (StorageContainerException e) { return ContainerUtils.logAndReturnError(LOG, e, msg); @@ -665,7 +680,12 @@ private ContainerCommandResponseProto handleGetSmallFile( return ContainerUtils.malformedRequest(msg); } try { + Pipeline pipeline = + Pipeline.getFromProtoBuf(msg.getGetSmallFile() + .getKey().getPipeline()); + long bytes = 0; + Preconditions.checkNotNull(pipeline); KeyData keyData = KeyData.getFromProtoBuf(msg.getGetSmallFile() .getKey().getKeyData()); KeyData data = this.containerManager.getKeyManager().getKey(keyData); @@ -674,8 +694,9 @@ private ContainerCommandResponseProto handleGetSmallFile( bytes += chunk.getSerializedSize(); ByteString current = ByteString.copyFrom(this.containerManager.getChunkManager() - .readChunk(keyData.getBlockID(), - ChunkInfo.getFromProtoBuf(chunk))); + .readChunk( + pipeline, keyData.getKeyName(), ChunkInfo.getFromProtoBuf( + chunk))); dataBuf = dataBuf.concat(current); c = chunk; } diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/KeyManagerImpl.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/KeyManagerImpl.java index f920ded2935..cf6bf12214d 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/KeyManagerImpl.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/KeyManagerImpl.java @@ -19,18 +19,20 @@ package org.apache.hadoop.ozone.container.common.impl; import com.google.common.base.Preconditions; -import com.google.common.primitives.Longs; import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hdds.scm.container.common.helpers.Pipeline; import org.apache.hadoop.hdds.scm.container.common.helpers .StorageContainerException; +import org.apache.hadoop.hdfs.DFSUtil; import org.apache.hadoop.hdds.protocol.proto.ContainerProtos; -import org.apache.hadoop.hdds.client.BlockID; import org.apache.hadoop.ozone.container.common.helpers.ContainerData; import org.apache.hadoop.ozone.container.common.helpers.KeyData; import org.apache.hadoop.ozone.container.common.helpers.KeyUtils; import org.apache.hadoop.ozone.container.common.interfaces.ContainerManager; import org.apache.hadoop.ozone.container.common.interfaces.KeyManager; import org.apache.hadoop.ozone.container.common.utils.ContainerCache; +import org.apache.hadoop.utils.MetadataKeyFilters.KeyPrefixFilter; +import org.apache.hadoop.utils.MetadataKeyFilters.MetadataKeyFilter; import org.apache.hadoop.utils.MetadataStore; import org.slf4j.Logger; import org.slf4j.LoggerFactory; @@ -71,21 +73,22 @@ public KeyManagerImpl(ContainerManager containerManager, Configuration conf) { * {@inheritDoc} */ @Override - public void putKey(KeyData data) throws IOException { - Preconditions.checkNotNull(data, "KeyData cannot be null for put operation."); - Preconditions.checkState(data.getContainerID() >= 0, "Container ID cannot be negative"); + public void putKey(Pipeline pipeline, KeyData data) throws IOException { containerManager.readLock(); try { // We are not locking the key manager since LevelDb serializes all actions // against a single DB. We rely on DB level locking to avoid conflicts. - ContainerData cData = containerManager.readContainer( - data.getContainerID()); + Preconditions.checkNotNull(pipeline, "Pipeline cannot be null"); + String containerName = pipeline.getContainerName(); + Preconditions.checkNotNull(containerName, + "Container name cannot be null"); + ContainerData cData = containerManager.readContainer(containerName); MetadataStore db = KeyUtils.getDB(cData, conf); // This is a post condition that acts as a hint to the user. // Should never fail. Preconditions.checkNotNull(db, "DB cannot be null here"); - db.put(Longs.toByteArray(data.getLocalID()), data + db.put(data.getKeyName().getBytes(KeyUtils.ENCODING), data .getProtoBufMessage().toByteArray()); } finally { containerManager.readUnlock(); @@ -100,17 +103,17 @@ public KeyData getKey(KeyData data) throws IOException { containerManager.readLock(); try { Preconditions.checkNotNull(data, "Key data cannot be null"); - Preconditions.checkNotNull(data.getContainerID(), + Preconditions.checkNotNull(data.getContainerName(), "Container name cannot be null"); ContainerData cData = containerManager.readContainer(data - .getContainerID()); + .getContainerName()); MetadataStore db = KeyUtils.getDB(cData, conf); // This is a post condition that acts as a hint to the user. // Should never fail. Preconditions.checkNotNull(db, "DB cannot be null here"); - byte[] kData = db.get(Longs.toByteArray(data.getLocalID())); + byte[] kData = db.get(data.getKeyName().getBytes(KeyUtils.ENCODING)); if (kData == null) { throw new StorageContainerException("Unable to find the key.", NO_SUCH_KEY); @@ -127,19 +130,15 @@ public KeyData getKey(KeyData data) throws IOException { * {@inheritDoc} */ @Override - public void deleteKey(BlockID blockID) + public void deleteKey(Pipeline pipeline, String keyName) throws IOException { - Preconditions.checkNotNull(blockID, "block ID cannot be null."); - Preconditions.checkState(blockID.getContainerID() >= 0, - "Container ID cannot be negative."); - Preconditions.checkState(blockID.getLocalID() >= 0, - "Local ID cannot be negative."); - containerManager.readLock(); try { - - ContainerData cData = containerManager - .readContainer(blockID.getContainerID()); + Preconditions.checkNotNull(pipeline, "Pipeline cannot be null"); + String containerName = pipeline.getContainerName(); + Preconditions.checkNotNull(containerName, + "Container name cannot be null"); + ContainerData cData = containerManager.readContainer(containerName); MetadataStore db = KeyUtils.getDB(cData, conf); // This is a post condition that acts as a hint to the user. @@ -150,13 +149,12 @@ public void deleteKey(BlockID blockID) // to delete a key which might have just gotten inserted after // the get check. - byte[] kKey = Longs.toByteArray(blockID.getLocalID()); - byte[] kData = db.get(kKey); + byte[] kData = db.get(keyName.getBytes(KeyUtils.ENCODING)); if (kData == null) { throw new StorageContainerException("Unable to find the key.", NO_SUCH_KEY); } - db.delete(kKey); + db.delete(keyName.getBytes(KeyUtils.ENCODING)); } finally { containerManager.readUnlock(); } @@ -167,22 +165,26 @@ public void deleteKey(BlockID blockID) */ @Override public List listKey( - long containerID, long startLocalID, int count) + Pipeline pipeline, String prefix, String startKey, int count) throws IOException { - Preconditions.checkState(containerID >= 0, "Container ID cannot be negative"); - Preconditions.checkState(startLocalID >= 0, "startLocal ID cannot be negative"); + Preconditions.checkNotNull(pipeline, + "Pipeline cannot be null."); Preconditions.checkArgument(count > 0, "Count must be a positive number."); - ContainerData cData = containerManager.readContainer(containerID); + ContainerData cData = containerManager.readContainer(pipeline + .getContainerName()); MetadataStore db = KeyUtils.getDB(cData, conf); - List result = new ArrayList<>(); - byte[] startKeyInBytes = Longs.toByteArray(startLocalID); + List result = new ArrayList(); + byte[] startKeyInBytes = startKey == null ? null : + DFSUtil.string2Bytes(startKey); + MetadataKeyFilter prefixFilter = new KeyPrefixFilter(prefix); List> range = - db.getSequentialRangeKVs(startKeyInBytes, count, null); + db.getSequentialRangeKVs(startKeyInBytes, count, prefixFilter); for (Map.Entry entry : range) { + String keyName = KeyUtils.getKeyName(entry.getKey()); KeyData value = KeyUtils.getKeyData(entry.getValue()); - KeyData data = new KeyData(value.getBlockID()); + KeyData data = new KeyData(value.getContainerName(), keyName); result.add(data); } return result; diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/RandomContainerDeletionChoosingPolicy.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/RandomContainerDeletionChoosingPolicy.java index 06177cb3326..3e267d2b37e 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/RandomContainerDeletionChoosingPolicy.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/RandomContainerDeletionChoosingPolicy.java @@ -41,7 +41,7 @@ @Override public List chooseContainerForBlockDeletion(int count, - Map candidateContainers) + Map candidateContainers) throws StorageContainerException { Preconditions.checkNotNull(candidateContainers, "Internal assertion: candidate containers cannot be null"); @@ -58,7 +58,7 @@ LOG.debug("Select container {} for block deletion, " + "pending deletion blocks num: {}.", - entry.getContainer().getContainerID(), + entry.getContainer().getContainerName(), entry.getNumPendingDeletionBlocks()); } else { break; diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/TopNOrderedContainerDeletionChoosingPolicy.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/TopNOrderedContainerDeletionChoosingPolicy.java index 246342672f0..0169a96cf98 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/TopNOrderedContainerDeletionChoosingPolicy.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/TopNOrderedContainerDeletionChoosingPolicy.java @@ -53,7 +53,7 @@ public int compare(ContainerStatus c1, ContainerStatus c2) { @Override public List chooseContainerForBlockDeletion(int count, - Map candidateContainers) + Map candidateContainers) throws StorageContainerException { Preconditions.checkNotNull(candidateContainers, "Internal assertion: candidate containers cannot be null"); @@ -74,7 +74,7 @@ public int compare(ContainerStatus c1, ContainerStatus c2) { LOG.debug( "Select container {} for block deletion, " + "pending deletion blocks num: {}.", - entry.getContainer().getContainerID(), + entry.getContainer().getContainerName(), entry.getNumPendingDeletionBlocks()); } else { LOG.debug("Stop looking for next container, there is no" diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/interfaces/ChunkManager.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/interfaces/ChunkManager.java index 26dcf217cdf..f55d74ca2b2 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/interfaces/ChunkManager.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/interfaces/ChunkManager.java @@ -18,10 +18,10 @@ package org.apache.hadoop.ozone.container.common.interfaces; +import org.apache.hadoop.hdds.scm.container.common.helpers.Pipeline; import org.apache.hadoop.hdds.scm.container.common.helpers .StorageContainerException; import org.apache.hadoop.hdds.protocol.proto.ContainerProtos; -import org.apache.hadoop.hdds.client.BlockID; import org.apache.hadoop.ozone.container.common.helpers.ChunkInfo; /** @@ -32,18 +32,20 @@ /** * writes a given chunk. - * @param blockID - ID of the block. + * @param pipeline - Name and the set of machines that make this container. + * @param keyName - Name of the Key. * @param info - ChunkInfo. * @param stage - Chunk Stage write. * @throws StorageContainerException */ - void writeChunk(BlockID blockID, - ChunkInfo info, byte[] data, ContainerProtos.Stage stage) + void writeChunk(Pipeline pipeline, String keyName, + ChunkInfo info, byte[] data, ContainerProtos.Stage stage) throws StorageContainerException; /** * reads the data defined by a chunk. - * @param blockID - ID of the block. + * @param pipeline - container pipeline. + * @param keyName - Name of the Key * @param info - ChunkInfo. * @return byte array * @throws StorageContainerException @@ -51,16 +53,17 @@ void writeChunk(BlockID blockID, * TODO: Right now we do not support partial reads and writes of chunks. * TODO: Explore if we need to do that for ozone. */ - byte[] readChunk(BlockID blockID, ChunkInfo info) throws + byte[] readChunk(Pipeline pipeline, String keyName, ChunkInfo info) throws StorageContainerException; /** * Deletes a given chunk. - * @param blockID - ID of the block. + * @param pipeline - Pipeline. + * @param keyName - Key Name * @param info - Chunk Info * @throws StorageContainerException */ - void deleteChunk(BlockID blockID, ChunkInfo info) throws + void deleteChunk(Pipeline pipeline, String keyName, ChunkInfo info) throws StorageContainerException; // TODO : Support list operations. diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/interfaces/ContainerDeletionChoosingPolicy.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/interfaces/ContainerDeletionChoosingPolicy.java index 6b60c527191..f7280e2a3c6 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/interfaces/ContainerDeletionChoosingPolicy.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/interfaces/ContainerDeletionChoosingPolicy.java @@ -41,6 +41,6 @@ * @throws StorageContainerException */ List chooseContainerForBlockDeletion(int count, - Map candidateContainers) + Map candidateContainers) throws StorageContainerException; } diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/interfaces/ContainerManager.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/interfaces/ContainerManager.java index 84d95f86123..2ff636e87fd 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/interfaces/ContainerManager.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/interfaces/ContainerManager.java @@ -21,6 +21,7 @@ import org.apache.hadoop.classification.InterfaceAudience; import org.apache.hadoop.classification.InterfaceStability; import org.apache.hadoop.conf.Configuration; +import org.apache.hadoop.hdds.scm.container.common.helpers.Pipeline; import org.apache.hadoop.hdds.scm.container.common.helpers .StorageContainerException; import org.apache.hadoop.hdfs.server.datanode.StorageLocation; @@ -59,43 +60,48 @@ void init(Configuration config, List containerDirs, /** * Creates a container with the given name. * + * @param pipeline -- Nodes which make up this container. * @param containerData - Container Name and metadata. * @throws StorageContainerException */ - void createContainer(ContainerData containerData) + void createContainer(Pipeline pipeline, ContainerData containerData) throws StorageContainerException; /** * Deletes an existing container. * - * @param containerID - ID of the container. + * @param pipeline - nodes that make this container. + * @param containerName - name of the container. * @param forceDelete - whether this container should be deleted forcibly. * @throws StorageContainerException */ - void deleteContainer(long containerID, + void deleteContainer(Pipeline pipeline, String containerName, boolean forceDelete) throws StorageContainerException; /** * Update an existing container. * - * @param containerID ID of the container + * @param pipeline container nodes + * @param containerName name of the container * @param data container data * @param forceUpdate if true, update container forcibly. * @throws StorageContainerException */ - void updateContainer(long containerID, ContainerData data, - boolean forceUpdate) throws StorageContainerException; + void updateContainer(Pipeline pipeline, String containerName, + ContainerData data, boolean forceUpdate) throws StorageContainerException; /** * As simple interface for container Iterations. * - * @param startContainerID - Return containers with ID >= startContainerID. - * @param count - how many to return - * @param data - Actual containerData + * @param prefix - Return only values matching this prefix + * @param count - how many to return + * @param prevKey - Previous key - Server returns results from this point. + * @param data - Actual containerData * @throws StorageContainerException */ - void listContainer(long startContainerID, long count, - List data) throws StorageContainerException; + void listContainer(String prefix, long count, String prevKey, + List data) + throws StorageContainerException; /** * Choose containers for block deletion. @@ -109,30 +115,30 @@ void listContainer(long startContainerID, long count, /** * Get metadata about a specific container. * - * @param containerID - ID of the container. + * @param containerName - Name of the container * @return ContainerData - Container Data. * @throws StorageContainerException */ - ContainerData readContainer(long containerID) + ContainerData readContainer(String containerName) throws StorageContainerException; /** * Closes a open container, if it is already closed or does not exist a * StorageContainerException is thrown. - * @param containerID - ID of the container. + * @param containerName - Name of the container. * @throws StorageContainerException */ - void closeContainer(long containerID) + void closeContainer(String containerName) throws StorageContainerException, NoSuchAlgorithmException; /** * Checks if a container exists. - * @param containerID - ID of the container. + * @param containerName - Name of the container. * @return true if the container is open false otherwise. * @throws StorageContainerException - Throws Exception if we are not * able to find the container. */ - boolean isOpen(long containerID) throws StorageContainerException; + boolean isOpen(String containerName) throws StorageContainerException; /** * Supports clean shutdown of container. @@ -197,7 +203,7 @@ void closeContainer(long containerID) * @param containerId * container id */ - void incrPendingDeletionBlocks(int numBlocks, long containerId); + void incrPendingDeletionBlocks(int numBlocks, String containerId); /** * Decrease pending deletion blocks count number of specified container. @@ -207,64 +213,64 @@ void closeContainer(long containerID) * @param containerId * container id */ - void decrPendingDeletionBlocks(int numBlocks, long containerId); + void decrPendingDeletionBlocks(int numBlocks, String containerId); /** * Increase the read count of the container. - * @param containerId - ID of the container. + * @param containerName - Name of the container. */ - void incrReadCount(long containerId); + void incrReadCount(String containerName); /** * Increse the read counter for bytes read from the container. - * @param containerId - ID of the container. + * @param containerName - Name of the container. * @param readBytes - bytes read from the container. */ - void incrReadBytes(long containerId, long readBytes); + void incrReadBytes(String containerName, long readBytes); /** * Increase the write count of the container. - * @param containerId - ID of the container. + * @param containerName - Name of the container. */ - void incrWriteCount(long containerId); + void incrWriteCount(String containerName); /** * Increase the write counter for bytes write into the container. - * @param containerId - ID of the container. + * @param containerName - Name of the container. * @param writeBytes - bytes write into the container. */ - void incrWriteBytes(long containerId, long writeBytes); + void incrWriteBytes(String containerName, long writeBytes); /** * Increase the bytes used by the container. - * @param containerId - ID of the container. + * @param containerName - Name of the container. * @param used - additional bytes used by the container. * @return the current bytes used. */ - long incrBytesUsed(long containerId, long used); + long incrBytesUsed(String containerName, long used); /** * Decrease the bytes used by the container. - * @param containerId - ID of the container. + * @param containerName - Name of the container. * @param used - additional bytes reclaimed by the container. * @return the current bytes used. */ - long decrBytesUsed(long containerId, long used); + long decrBytesUsed(String containerName, long used); /** * Get the bytes used by the container. - * @param containerId - ID of the container. + * @param containerName - Name of the container. * @return the current bytes used by the container. */ - long getBytesUsed(long containerId); + long getBytesUsed(String containerName); /** * Get the number of keys in the container. - * @param containerId - ID of the container. + * @param containerName - Name of the container. * @return the current key count. */ - long getNumKeys(long containerId); + long getNumKeys(String containerName); /** * Get the container report state to send via HB to SCM. diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/interfaces/KeyManager.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/interfaces/KeyManager.java index 158ce38efc7..8c27ba94c42 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/interfaces/KeyManager.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/interfaces/KeyManager.java @@ -17,9 +17,9 @@ */ package org.apache.hadoop.ozone.container.common.interfaces; +import org.apache.hadoop.hdds.scm.container.common.helpers.Pipeline; import org.apache.hadoop.hdds.scm.container.common.helpers .StorageContainerException; -import org.apache.hadoop.hdds.client.BlockID; import org.apache.hadoop.ozone.container.common.helpers.KeyData; import java.io.IOException; @@ -32,10 +32,11 @@ /** * Puts or overwrites a key. * + * @param pipeline - Pipeline. * @param data - Key Data. * @throws IOException */ - void putKey(KeyData data) throws IOException; + void putKey(Pipeline pipeline, KeyData data) throws IOException; /** * Gets an existing key. @@ -49,21 +50,23 @@ /** * Deletes an existing Key. * - * @param blockID - ID of the block. + * @param pipeline - Pipeline. + * @param keyName Key Data. * @throws StorageContainerException */ - void deleteKey(BlockID blockID) + void deleteKey(Pipeline pipeline, String keyName) throws IOException; /** * List keys in a container. * - * @param containerID - ID of the container. - * @param startLocalID - Key to start from, 0 to begin. + * @param pipeline - pipeline. + * @param prefix - Prefix in needed. + * @param startKey - Key to start from, EMPTY_STRING to begin. * @param count - Number of keys to return. * @return List of Keys that match the criteria. */ - List listKey(long containerID, long startLocalID, + List listKey(Pipeline pipeline, String prefix, String startKey, int count) throws IOException; /** diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/background/BlockDeletingService.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/background/BlockDeletingService.java index 7c3fa30bd1d..ac95b2a12cb 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/background/BlockDeletingService.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/background/BlockDeletingService.java @@ -180,12 +180,12 @@ public BackgroundTaskResult call() throws Exception { meta.getSequentialRangeKVs(null, blockLimitPerTask, filter); if (toDeleteBlocks.isEmpty()) { LOG.debug("No under deletion block found in container : {}", - containerData.getContainerID()); + containerData.getContainerName()); } List succeedBlocks = new LinkedList<>(); LOG.debug("Container : {}, To-Delete blocks : {}", - containerData.getContainerID(), toDeleteBlocks.size()); + containerData.getContainerName(), toDeleteBlocks.size()); File dataDir = ContainerUtils.getDataDirectory(containerData).toFile(); if (!dataDir.exists() || !dataDir.isDirectory()) { LOG.error("Invalid container data dir {} : " @@ -220,11 +220,11 @@ public BackgroundTaskResult call() throws Exception { meta.writeBatch(batch); // update count of pending deletion blocks in in-memory container status containerManager.decrPendingDeletionBlocks(succeedBlocks.size(), - containerData.getContainerID()); + containerData.getContainerName()); if (!succeedBlocks.isEmpty()) { LOG.info("Container: {}, deleted blocks: {}, task elapsed time: {}ms", - containerData.getContainerID(), succeedBlocks.size(), + containerData.getContainerName(), succeedBlocks.size(), Time.monotonicNow() - startTime); } crr.addAll(succeedBlocks); diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/CloseContainerHandler.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/CloseContainerHandler.java index d8adc7df0f6..f7b49b75905 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/CloseContainerHandler.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/CloseContainerHandler.java @@ -58,20 +58,19 @@ public void handle(SCMCommand command, OzoneContainer container, LOG.debug("Processing Close Container command."); invocationCount++; long startTime = Time.monotonicNow(); - // TODO: define this as INVALID_CONTAINER_ID in HddsConsts.java (TBA) - long containerID = -1; + String containerName = "UNKNOWN"; try { SCMCloseContainerCmdResponseProto closeContainerProto = SCMCloseContainerCmdResponseProto .parseFrom(command.getProtoBufMessage()); - containerID = closeContainerProto.getContainerID(); + containerName = closeContainerProto.getContainerName(); - container.getContainerManager().closeContainer(containerID); + container.getContainerManager().closeContainer(containerName); } catch (Exception e) { - LOG.error("Can't close container " + containerID, e); + LOG.error("Can't close container " + containerName, e); } finally { long endTime = Time.monotonicNow(); totalTime += endTime - startTime; diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/DeleteBlocksCommandHandler.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/DeleteBlocksCommandHandler.java index 5231660b6d1..f106e3d55fc 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/DeleteBlocksCommandHandler.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/DeleteBlocksCommandHandler.java @@ -16,7 +16,6 @@ */ package org.apache.hadoop.ozone.container.common.statemachine.commandhandler; -import com.google.common.primitives.Longs; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hdfs.DFSUtil; import org.apache.hadoop.hdds.protocol.proto @@ -109,7 +108,7 @@ public void handle(SCMCommand command, OzoneContainer container, txResultBuilder.setSuccess(true); } catch (IOException e) { LOG.warn("Failed to delete blocks for container={}, TXID={}", - entry.getContainerID(), entry.getTxID(), e); + entry.getContainerName(), entry.getTxID(), e); txResultBuilder.setSuccess(false); } resultBuilder.addResults(txResultBuilder.build()); @@ -151,7 +150,7 @@ public void handle(SCMCommand command, OzoneContainer container, */ private void deleteContainerBlocks(DeletedBlocksTransaction delTX, Configuration config) throws IOException { - long containerId = delTX.getContainerID(); + String containerId = delTX.getContainerName(); ContainerData containerInfo = containerManager.readContainer(containerId); if (LOG.isDebugEnabled()) { LOG.debug("Processing Container : {}, DB path : {}", containerId, @@ -160,9 +159,9 @@ private void deleteContainerBlocks(DeletedBlocksTransaction delTX, int newDeletionBlocks = 0; MetadataStore containerDB = KeyUtils.getDB(containerInfo, config); - for (Long blk : delTX.getLocalIDList()) { + for (String blk : delTX.getBlockIDList()) { BatchOperation batch = new BatchOperation(); - byte[] blkBytes = Longs.toByteArray(blk); + byte[] blkBytes = DFSUtil.string2Bytes(blk); byte[] blkInfo = containerDB.get(blkBytes); if (blkInfo != null) { // Found the block in container db, diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/states/endpoint/HeartbeatEndpointTask.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/states/endpoint/HeartbeatEndpointTask.java index eba565db640..5dee10f44b4 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/states/endpoint/HeartbeatEndpointTask.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/states/endpoint/HeartbeatEndpointTask.java @@ -170,7 +170,7 @@ private void processResponse(SCMHeartbeatResponseProto response, commandResponseProto.getCloseContainerProto()); if (LOG.isDebugEnabled()) { LOG.debug("Received SCM container close request for container {}", - closeContainer.getContainerID()); + closeContainer.getContainerName()); } this.context.addCommand(closeContainer); break; diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/states/endpoint/RegisterEndpointTask.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/states/endpoint/RegisterEndpointTask.java index 42568e3ad38..ca3bef0b357 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/states/endpoint/RegisterEndpointTask.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/states/endpoint/RegisterEndpointTask.java @@ -99,7 +99,8 @@ public void setDatanodeDetails( // TODO : Add responses to the command Queue. SCMRegisteredCmdResponseProto response = rpcEndPoint.getEndPoint() - .register(datanodeDetails.getProtoBufMessage()); + .register(datanodeDetails.getProtoBufMessage(), + conf.getStrings(ScmConfigKeys.OZONE_SCM_NAMES)); Preconditions.checkState(UUID.fromString(response.getDatanodeUUID()) .equals(datanodeDetails.getUuid()), "Unexpected datanode ID in the response."); diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/ContainerStateMachine.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/ContainerStateMachine.java index 89eaaced032..1a89e44bd18 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/ContainerStateMachine.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/ratis/ContainerStateMachine.java @@ -94,7 +94,7 @@ private ThreadPoolExecutor writeChunkExecutor; private final ConcurrentHashMap> writeChunkFutureMap; - private final ConcurrentHashMap> + private final ConcurrentHashMap> createContainerFutureMap; ContainerStateMachine(ContainerDispatcher dispatcher, @@ -146,7 +146,8 @@ public TransactionContext startTransaction(RaftClientRequest request) // create the log entry proto final WriteChunkRequestProto commitWriteChunkProto = WriteChunkRequestProto.newBuilder() - .setBlockID(write.getBlockID()) + .setPipeline(write.getPipeline()) + .setKeyName(write.getKeyName()) .setChunkData(write.getChunkData()) // skipping the data field as it is // already set in statemachine data proto @@ -195,9 +196,9 @@ private Message runCommand(ContainerCommandRequestProto requestProto) { private CompletableFuture handleWriteChunk( ContainerCommandRequestProto requestProto, long entryIndex) { final WriteChunkRequestProto write = requestProto.getWriteChunk(); - long containerID = write.getBlockID().getContainerID(); + String containerName = write.getPipeline().getContainerName(); CompletableFuture future = - createContainerFutureMap.get(containerID); + createContainerFutureMap.get(containerName); CompletableFuture writeChunkFuture; if (future != null) { writeChunkFuture = future.thenApplyAsync( @@ -212,10 +213,10 @@ private Message runCommand(ContainerCommandRequestProto requestProto) { private CompletableFuture handleCreateContainer( ContainerCommandRequestProto requestProto) { - long containerID = - requestProto.getCreateContainer().getContainerData().getContainerID(); + String containerName = + requestProto.getCreateContainer().getContainerData().getName(); createContainerFutureMap. - computeIfAbsent(containerID, k -> new CompletableFuture<>()); + computeIfAbsent(containerName, k -> new CompletableFuture<>()); return CompletableFuture.completedFuture(() -> ByteString.EMPTY); } @@ -269,9 +270,9 @@ private Message runCommand(ContainerCommandRequestProto requestProto) { } else { Message message = runCommand(requestProto); if (cmdType == ContainerProtos.Type.CreateContainer) { - long containerID = - requestProto.getCreateContainer().getContainerData().getContainerID(); - createContainerFutureMap.remove(containerID).complete(message); + String containerName = + requestProto.getCreateContainer().getContainerData().getName(); + createContainerFutureMap.remove(containerName).complete(message); } return CompletableFuture.completedFuture(message); } diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/utils/ContainerCache.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/utils/ContainerCache.java index 4d9c6903111..6ae45b6d08b 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/utils/ContainerCache.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/utils/ContainerCache.java @@ -69,15 +69,15 @@ public synchronized static ContainerCache getInstance(Configuration conf) { /** * Closes a db instance. * - * @param containerID - ID of the container to be closed. + * @param container - name of the container to be closed. * @param db - db instance to close. */ - private void closeDB(long containerID, MetadataStore db) { + private void closeDB(String container, MetadataStore db) { if (db != null) { try { db.close(); } catch (IOException e) { - LOG.error("Error closing DB. Container: " + containerID, e); + LOG.error("Error closing DB. Container: " + container, e); } } } @@ -93,7 +93,7 @@ public void shutdownCache() { while (iterator.hasNext()) { iterator.next(); MetadataStore db = (MetadataStore) iterator.getValue(); - closeDB(((Number)iterator.getKey()).longValue(), db); + closeDB(iterator.getKey().toString(), db); } // reset the cache cache.clear(); @@ -110,7 +110,7 @@ protected boolean removeLRU(LinkEntry entry) { lock.lock(); try { MetadataStore db = (MetadataStore) entry.getValue(); - closeDB(((Number)entry.getKey()).longValue(), db); + closeDB(entry.getKey().toString(), db); } finally { lock.unlock(); } @@ -120,27 +120,28 @@ protected boolean removeLRU(LinkEntry entry) { /** * Returns a DB handle if available, create the handler otherwise. * - * @param containerID - ID of the container. + * @param containerName - Name of the container. * @return MetadataStore. */ - public MetadataStore getDB(long containerID, String containerDBPath) + public MetadataStore getDB(String containerName, String containerDBPath) throws IOException { - Preconditions.checkState(containerID >= 0, "Container ID cannot be negative."); + Preconditions.checkNotNull(containerName); + Preconditions.checkState(!containerName.isEmpty()); lock.lock(); try { - MetadataStore db = (MetadataStore) this.get(containerID); + MetadataStore db = (MetadataStore) this.get(containerName); if (db == null) { db = MetadataStoreBuilder.newBuilder() .setDbFile(new File(containerDBPath)) .setCreateIfMissing(false) .build(); - this.put(containerID, db); + this.put(containerName, db); } return db; } catch (Exception e) { LOG.error("Error opening DB. Container:{} ContainerPath:{}", - containerID, containerDBPath, e); + containerName, containerDBPath, e); throw e; } finally { lock.unlock(); @@ -150,15 +151,16 @@ public MetadataStore getDB(long containerID, String containerDBPath) /** * Remove a DB handler from cache. * - * @param containerID - ID of the container. + * @param containerName - Name of the container. */ - public void removeDB(long containerID) { - Preconditions.checkState(containerID >= 0, "Container ID cannot be negative."); + public void removeDB(String containerName) { + Preconditions.checkNotNull(containerName); + Preconditions.checkState(!containerName.isEmpty()); lock.lock(); try { - MetadataStore db = (MetadataStore)this.get(containerID); - closeDB(containerID, db); - this.remove(containerID); + MetadataStore db = (MetadataStore)this.get(containerName); + closeDB(containerName, db); + this.remove(containerName); } finally { lock.unlock(); } diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocol/StorageContainerDatanodeProtocol.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocol/StorageContainerDatanodeProtocol.java index cb657276306..43e7412aeae 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocol/StorageContainerDatanodeProtocol.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocol/StorageContainerDatanodeProtocol.java @@ -69,11 +69,12 @@ SCMHeartbeatResponseProto sendHeartbeat(DatanodeDetailsProto datanodeDetails, /** * Register Datanode. * @param datanodeDetails - Datanode Details. - * + * @param scmAddresses - List of SCMs this datanode is configured to + * communicate. * @return SCM Command. */ - SCMRegisteredCmdResponseProto register(DatanodeDetailsProto datanodeDetails) - throws IOException; + SCMRegisteredCmdResponseProto register(DatanodeDetailsProto datanodeDetails, + String[] scmAddresses) throws IOException; /** * Send a container report. diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocol/commands/CloseContainerCommand.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocol/commands/CloseContainerCommand.java index d1d64881344..b1cdbc49136 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocol/commands/CloseContainerCommand.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocol/commands/CloseContainerCommand.java @@ -32,10 +32,10 @@ public class CloseContainerCommand extends SCMCommand { - private long containerID; + private String containerName; - public CloseContainerCommand(long containerID) { - this.containerID = containerID; + public CloseContainerCommand(String containerName) { + this.containerName = containerName; } /** @@ -60,17 +60,17 @@ public SCMCmdType getType() { public SCMCloseContainerCmdResponseProto getProto() { return SCMCloseContainerCmdResponseProto.newBuilder() - .setContainerID(containerID).build(); + .setContainerName(containerName).build(); } public static CloseContainerCommand getFromProtobuf( SCMCloseContainerCmdResponseProto closeContainerProto) { Preconditions.checkNotNull(closeContainerProto); - return new CloseContainerCommand(closeContainerProto.getContainerID()); + return new CloseContainerCommand(closeContainerProto.getContainerName()); } - public long getContainerID() { - return containerID; + public String getContainerName() { + return containerName; } } diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocolPB/StorageContainerDatanodeProtocolClientSideTranslatorPB.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocolPB/StorageContainerDatanodeProtocolClientSideTranslatorPB.java index 13162de7d39..12fed1cd0f2 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocolPB/StorageContainerDatanodeProtocolClientSideTranslatorPB.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocolPB/StorageContainerDatanodeProtocolClientSideTranslatorPB.java @@ -156,7 +156,8 @@ public SCMHeartbeatResponseProto sendHeartbeat( */ @Override public SCMRegisteredCmdResponseProto register( - DatanodeDetailsProto datanodeDetailsProto) throws IOException { + DatanodeDetailsProto datanodeDetailsProto, + String[] scmAddresses) throws IOException { SCMRegisterRequestProto.Builder req = SCMRegisterRequestProto.newBuilder(); req.setDatanodeDetails(datanodeDetailsProto); diff --git a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocolPB/StorageContainerDatanodeProtocolServerSideTranslatorPB.java b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocolPB/StorageContainerDatanodeProtocolServerSideTranslatorPB.java index 25757aa3459..985b75acd67 100644 --- a/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocolPB/StorageContainerDatanodeProtocolServerSideTranslatorPB.java +++ b/hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocolPB/StorageContainerDatanodeProtocolServerSideTranslatorPB.java @@ -68,8 +68,15 @@ public StorageContainerDatanodeProtocolServerSideTranslatorPB( public StorageContainerDatanodeProtocolProtos.SCMRegisteredCmdResponseProto register(RpcController controller, StorageContainerDatanodeProtocolProtos .SCMRegisterRequestProto request) throws ServiceException { + String[] addressArray = null; + + if (request.hasAddressList()) { + addressArray = request.getAddressList().getAddressListList() + .toArray(new String[0]); + } + try { - return impl.register(request.getDatanodeDetails()); + return impl.register(request.getDatanodeDetails(), addressArray); } catch (IOException e) { throw new ServiceException(e); } diff --git a/hadoop-hdds/container-service/src/main/proto/StorageContainerDatanodeProtocol.proto b/hadoop-hdds/container-service/src/main/proto/StorageContainerDatanodeProtocol.proto index 4d5795aae8f..03b85e500a5 100644 --- a/hadoop-hdds/container-service/src/main/proto/StorageContainerDatanodeProtocol.proto +++ b/hadoop-hdds/container-service/src/main/proto/StorageContainerDatanodeProtocol.proto @@ -87,6 +87,7 @@ message NodeContianerMapping { A container report contains the following information. */ message ContainerInfo { + required string containerName = 1; optional string finalhash = 2; optional int64 size = 3; optional int64 used = 4; @@ -101,12 +102,10 @@ message ContainerInfo { } // The deleted blocks which are stored in deletedBlock.db of scm. -// We don't use BlockID because this only contians multiple localIDs -// of the same containerID. message DeletedBlocksTransaction { required int64 txID = 1; - required int64 containerID = 2; - repeated int64 localID = 3; + required string containerName = 2; + repeated string blockID = 3; // the retry time of sending deleting command to datanode. required int32 count = 4; } @@ -146,6 +145,7 @@ message SCMStorageReport { message SCMRegisterRequestProto { required DatanodeDetailsProto datanodeDetails = 1; + optional SCMNodeAddressList addressList = 2; } /** @@ -201,7 +201,7 @@ message SendContainerReportProto { This command asks the datanode to close a specific container. */ message SCMCloseContainerCmdResponseProto { - required int64 containerID = 1; + required string containerName = 1; } /** diff --git a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/ScmTestMock.java b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/ScmTestMock.java index 703878c1173..41a8a8012c9 100644 --- a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/ScmTestMock.java +++ b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/ScmTestMock.java @@ -195,12 +195,14 @@ private void sleepIfNeeded() { * Register Datanode. * * @param datanodeDetailsProto DatanodDetailsProto. + * @param scmAddresses - List of SCMs this datanode is configured to + * communicate. * @return SCM Command. */ @Override public StorageContainerDatanodeProtocolProtos .SCMRegisteredCmdResponseProto register( - DatanodeDetailsProto datanodeDetailsProto) + DatanodeDetailsProto datanodeDetailsProto, String[] scmAddresses) throws IOException { rpcCount.incrementAndGet(); sleepIfNeeded(); @@ -237,7 +239,7 @@ private void sleepIfNeeded() { for (StorageContainerDatanodeProtocolProtos.ContainerInfo report: reports.getReportsList()) { - containers.put(report.getContainerID(), report); + containers.put(report.getContainerName(), report); } } diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/BlockManager.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/BlockManager.java index f9aa0cd4f72..4ab251641c5 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/BlockManager.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/BlockManager.java @@ -18,8 +18,8 @@ package org.apache.hadoop.hdds.scm.block; import org.apache.hadoop.hdds.scm.container.common.helpers.AllocatedBlock; +import org.apache.hadoop.hdds.scm.container.common.helpers.Pipeline; import org.apache.hadoop.hdds.protocol.proto.HddsProtos; -import org.apache.hadoop.hdds.client.BlockID; import java.io.Closeable; import java.io.IOException; @@ -43,6 +43,14 @@ AllocatedBlock allocateBlock(long size, HddsProtos.ReplicationType type, HddsProtos.ReplicationFactor factor, String owner) throws IOException; /** + * Give the key to the block, get the pipeline info. + * @param key - key to the block. + * @return - Pipeline that used to access the block. + * @throws IOException + */ + Pipeline getBlock(String key) throws IOException; + + /** * Deletes a list of blocks in an atomic operation. Internally, SCM * writes these blocks into a {@link DeletedBlockLog} and deletes them * from SCM DB. If this is successful, given blocks are entering pending @@ -52,7 +60,7 @@ AllocatedBlock allocateBlock(long size, HddsProtos.ReplicationType type, * a particular object key. * @throws IOException if exception happens, non of the blocks is deleted. */ - void deleteBlocks(List blockIDs) throws IOException; + void deleteBlocks(List blockIDs) throws IOException; /** * @return the block deletion transaction log maintained by SCM. diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/BlockManagerImpl.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/BlockManagerImpl.java index 5a98e8537fd..d9661124f57 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/BlockManagerImpl.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/BlockManagerImpl.java @@ -16,25 +16,30 @@ */ package org.apache.hadoop.hdds.scm.block; +import com.google.common.annotations.VisibleForTesting; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hdds.scm.ScmConfigKeys; import org.apache.hadoop.hdds.scm.container.Mapping; import org.apache.hadoop.hdds.scm.container.common.helpers.AllocatedBlock; import org.apache.hadoop.hdds.scm.container.common.helpers.ContainerInfo; +import org.apache.hadoop.hdds.scm.container.common.helpers.Pipeline; import org.apache.hadoop.hdds.scm.exceptions.SCMException; import org.apache.hadoop.hdds.scm.node.NodeManager; +import org.apache.hadoop.hdfs.DFSUtil; import org.apache.hadoop.hdds.protocol.proto.HddsProtos; import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor; import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationType; import org.apache.hadoop.metrics2.util.MBeans; import org.apache.hadoop.ozone.OzoneConsts; -import org.apache.hadoop.hdds.client.BlockID; import org.apache.hadoop.util.StringUtils; -import org.apache.hadoop.util.Time; +import org.apache.hadoop.utils.BatchOperation; +import org.apache.hadoop.utils.MetadataStore; +import org.apache.hadoop.utils.MetadataStoreBuilder; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import javax.management.ObjectName; +import java.io.File; import java.io.IOException; import java.util.ArrayList; import java.util.HashMap; @@ -49,7 +54,10 @@ import static org.apache.hadoop.hdds.scm.exceptions.SCMException.ResultCodes .CHILL_MODE_EXCEPTION; import static org.apache.hadoop.hdds.scm.exceptions.SCMException.ResultCodes + .FAILED_TO_FIND_BLOCK; +import static org.apache.hadoop.hdds.scm.exceptions.SCMException.ResultCodes .INVALID_BLOCK_SIZE; +import static org.apache.hadoop.hdds.server.ServerUtils.getOzoneMetaDirPath; import static org.apache.hadoop.ozone.OzoneConfigKeys .OZONE_BLOCK_DELETING_SERVICE_INTERVAL; import static org.apache.hadoop.ozone.OzoneConfigKeys @@ -58,6 +66,7 @@ .OZONE_BLOCK_DELETING_SERVICE_TIMEOUT; import static org.apache.hadoop.ozone.OzoneConfigKeys .OZONE_BLOCK_DELETING_SERVICE_TIMEOUT_DEFAULT; +import static org.apache.hadoop.ozone.OzoneConsts.BLOCK_DB; /** Block Manager manages the block access for SCM. */ public class BlockManagerImpl implements BlockManager, BlockmanagerMXBean { @@ -69,9 +78,11 @@ private final NodeManager nodeManager; private final Mapping containerManager; + private final MetadataStore blockStore; private final Lock lock; private final long containerSize; + private final long cacheSize; private final DeletedBlockLog deletedBlockLog; private final SCMBlockDeletingService blockDeletingService; @@ -86,17 +97,30 @@ * @param conf - configuration. * @param nodeManager - node manager. * @param containerManager - container manager. + * @param cacheSizeMB - cache size for level db store. * @throws IOException */ public BlockManagerImpl(final Configuration conf, - final NodeManager nodeManager, final Mapping containerManager) - throws IOException { + final NodeManager nodeManager, final Mapping containerManager, + final int cacheSizeMB) throws IOException { this.nodeManager = nodeManager; this.containerManager = containerManager; + this.cacheSize = cacheSizeMB; this.containerSize = OzoneConsts.GB * conf.getInt( ScmConfigKeys.OZONE_SCM_CONTAINER_SIZE_GB, ScmConfigKeys.OZONE_SCM_CONTAINER_SIZE_DEFAULT); + File metaDir = getOzoneMetaDirPath(conf); + String scmMetaDataDir = metaDir.getPath(); + + // Write the block key to container name mapping. + File blockContainerDbPath = new File(scmMetaDataDir, BLOCK_DB); + blockStore = + MetadataStoreBuilder.newBuilder() + .setConf(conf) + .setDbFile(blockContainerDbPath) + .setCacheSize(this.cacheSize * OzoneConsts.MB) + .build(); this.containerProvisionBatchSize = conf.getInt( @@ -157,11 +181,12 @@ private void preAllocateContainers(int count, ReplicationType type, lock.lock(); try { for (int i = 0; i < count; i++) { + String containerName = UUID.randomUUID().toString(); ContainerInfo containerInfo = null; try { // TODO: Fix this later when Ratis is made the Default. containerInfo = containerManager.allocateContainer(type, factor, - owner); + containerName, owner); if (containerInfo == null) { LOG.warn("Unable to allocate container."); @@ -242,7 +267,7 @@ public AllocatedBlock allocateBlock(final long size, size, owner, type, factor, HddsProtos.LifeCycleState .ALLOCATED); if (containerInfo != null) { - containerManager.updateContainerState(containerInfo.getContainerID(), + containerManager.updateContainerState(containerInfo.getContainerName(), HddsProtos.LifeCycleEvent.CREATE); return newBlock(containerInfo, HddsProtos.LifeCycleState.ALLOCATED); } @@ -272,7 +297,7 @@ public AllocatedBlock allocateBlock(final long size, size, owner, type, factor, HddsProtos.LifeCycleState .ALLOCATED); if (containerInfo != null) { - containerManager.updateContainerState(containerInfo.getContainerID(), + containerManager.updateContainerState(containerInfo.getContainerName(), HddsProtos.LifeCycleEvent.CREATE); return newBlock(containerInfo, HddsProtos.LifeCycleState.ALLOCATED); } @@ -302,30 +327,71 @@ private AllocatedBlock newBlock( ContainerInfo containerInfo, HddsProtos.LifeCycleState state) throws IOException { - if (containerInfo.getPipeline().getMachines().size() == 0) { - LOG.error("Pipeline Machine count is zero."); - return null; - } - - // TODO : Revisit this local ID allocation when HA is added. - // TODO: this does not work well if multiple allocation kicks in a tight - // loop. - long localID = Time.getUtcTime(); - long containerID = containerInfo.getContainerID(); - + // TODO : Replace this with Block ID. + String blockKey = UUID.randomUUID().toString(); boolean createContainer = (state == HddsProtos.LifeCycleState.ALLOCATED); AllocatedBlock.Builder abb = new AllocatedBlock.Builder() - .setBlockID(new BlockID(containerID, localID)) + .setKey(blockKey) + // TODO : Use containerinfo instead of pipeline. .setPipeline(containerInfo.getPipeline()) .setShouldCreateContainer(createContainer); - LOG.trace("New block allocated : {} Container ID: {}", localID, - containerID); + LOG.trace("New block allocated : {} Container ID: {}", blockKey, + containerInfo.toString()); + + if (containerInfo.getPipeline().getMachines().size() == 0) { + LOG.error("Pipeline Machine count is zero."); + return null; + } + + // Persist this block info to the blockStore DB, so getBlock(key) can + // find which container the block lives. + // TODO : Remove this DB in future + // and make this a KSM operation. Category: SCALABILITY. + if (containerInfo.getPipeline().getMachines().size() > 0) { + blockStore.put( + DFSUtil.string2Bytes(blockKey), + DFSUtil.string2Bytes(containerInfo.getPipeline().getContainerName())); + } return abb.build(); } /** + * Given a block key, return the Pipeline information. + * + * @param key - block key assigned by SCM. + * @return Pipeline (list of DNs and leader) to access the block. + * @throws IOException + */ + @Override + public Pipeline getBlock(final String key) throws IOException { + lock.lock(); + try { + byte[] containerBytes = blockStore.get(DFSUtil.string2Bytes(key)); + if (containerBytes == null) { + throw new SCMException( + "Specified block key does not exist. key : " + key, + FAILED_TO_FIND_BLOCK); + } + + String containerName = DFSUtil.bytes2String(containerBytes); + ContainerInfo containerInfo = containerManager.getContainer( + containerName); + if (containerInfo == null) { + LOG.debug("Container {} allocated by block service" + + "can't be found in SCM", containerName); + throw new SCMException( + "Unable to find container for the block", + SCMException.ResultCodes.FAILED_TO_FIND_CONTAINER); + } + return containerInfo.getPipeline(); + } finally { + lock.unlock(); + } + } + + /** * Deletes a list of blocks in an atomic operation. Internally, SCM writes * these blocks into a * {@link DeletedBlockLog} and deletes them from SCM DB. If this is @@ -337,28 +403,40 @@ private AllocatedBlock newBlock( * @throws IOException if exception happens, non of the blocks is deleted. */ @Override - public void deleteBlocks(List blockIDs) throws IOException { + public void deleteBlocks(List blockIDs) throws IOException { if (!nodeManager.isOutOfChillMode()) { throw new SCMException("Unable to delete block while in chill mode", CHILL_MODE_EXCEPTION); } lock.lock(); - LOG.info("Deleting blocks {}", StringUtils.join(",", blockIDs)); - Map> containerBlocks = new HashMap<>(); + LOG.info("Deleting blocks {}", String.join(",", blockIDs)); + Map> containerBlocks = new HashMap<>(); + BatchOperation batch = new BatchOperation(); + BatchOperation rollbackBatch = new BatchOperation(); // TODO: track the block size info so that we can reclaim the container // TODO: used space when the block is deleted. try { - for (BlockID block : blockIDs) { + for (String blockKey : blockIDs) { + byte[] blockKeyBytes = DFSUtil.string2Bytes(blockKey); + byte[] containerBytes = blockStore.get(blockKeyBytes); + if (containerBytes == null) { + throw new SCMException( + "Specified block key does not exist. key : " + blockKey, + FAILED_TO_FIND_BLOCK); + } + batch.delete(blockKeyBytes); + rollbackBatch.put(blockKeyBytes, containerBytes); + // Merge blocks to a container to blocks mapping, // prepare to persist this info to the deletedBlocksLog. - long containerID = block.getContainerID(); - if (containerBlocks.containsKey(containerID)) { - containerBlocks.get(containerID).add(block.getLocalID()); + String containerName = DFSUtil.bytes2String(containerBytes); + if (containerBlocks.containsKey(containerName)) { + containerBlocks.get(containerName).add(blockKey); } else { - List item = new ArrayList<>(); - item.add(block.getLocalID()); - containerBlocks.put(containerID, item); + List item = new ArrayList<>(); + item.add(blockKey); + containerBlocks.put(containerName, item); } } @@ -367,13 +445,34 @@ public void deleteBlocks(List blockIDs) throws IOException { // removed. If we write the log first, once log is written, the // async deleting service will start to scan and might be picking // up some blocks to do real deletions, that might cause data loss. + blockStore.writeBatch(batch); try { deletedBlockLog.addTransactions(containerBlocks); } catch (IOException e) { + try { + // If delLog update is failed, we need to rollback the changes. + blockStore.writeBatch(rollbackBatch); + } catch (IOException rollbackException) { + // This is a corner case. AddTX fails and rollback also fails, + // this will leave these blocks in inconsistent state. They were + // moved to pending deletion state in SCM DB but were not written + // into delLog so real deletions would not be done. Blocks become + // to be invisible from namespace but actual data are not removed. + // We log an error here so admin can manually check and fix such + // errors. + LOG.error( + "Blocks might be in inconsistent state because" + + " they were moved to pending deletion state in SCM DB but" + + " not written into delLog. Admin can manually add them" + + " into delLog for deletions. Inconsistent block list: {}", + String.join(",", blockIDs), + e); + throw rollbackException; + } throw new IOException( "Skip writing the deleted blocks info to" + " the delLog because addTransaction fails. Batch skipped: " - + StringUtils.join(",", blockIDs), + + String.join(",", blockIDs), e); } // TODO: Container report handling of the deleted blocks: @@ -389,6 +488,11 @@ public DeletedBlockLog getDeletedBlockLog() { return this.deletedBlockLog; } + @VisibleForTesting + public String getDeletedKeyName(String key) { + return StringUtils.format(".Deleted/%s", key); + } + /** * Close the resources for BlockManager. * @@ -396,6 +500,9 @@ public DeletedBlockLog getDeletedBlockLog() { */ @Override public void close() throws IOException { + if (blockStore != null) { + blockStore.close(); + } if (deletedBlockLog != null) { deletedBlockLog.close(); } diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/DatanodeDeletedBlockTransactions.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/DatanodeDeletedBlockTransactions.java index 32290cc99ba..47074d28ecb 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/DatanodeDeletedBlockTransactions.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/DatanodeDeletedBlockTransactions.java @@ -56,7 +56,7 @@ public void addTransaction(DeletedBlocksTransaction tx) throws IOException { ContainerInfo info = null; try { - info = mappingService.getContainer(tx.getContainerID()); + info = mappingService.getContainer(tx.getContainerName()); } catch (IOException e) { SCMBlockDeletingService.LOG.warn("Got container info error.", e); } @@ -64,7 +64,7 @@ public void addTransaction(DeletedBlocksTransaction tx) throws IOException { if (info == null) { SCMBlockDeletingService.LOG.warn( "Container {} not found, continue to process next", - tx.getContainerID()); + tx.getContainerName()); return; } @@ -75,7 +75,7 @@ public void addTransaction(DeletedBlocksTransaction tx) throws IOException { if (txs != null && txs.size() < maximumAllowedTXNum) { boolean hasContained = false; for (DeletedBlocksTransaction t : txs) { - if (t.getContainerID() == tx.getContainerID()) { + if (t.getContainerName().equals(tx.getContainerName())) { hasContained = true; break; } diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/DeletedBlockLog.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/DeletedBlockLog.java index cc32b35a4da..f7b770eca0e 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/DeletedBlockLog.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/DeletedBlockLog.java @@ -89,12 +89,12 @@ void incrementCount(List txIDs) /** * Creates a block deletion transaction and adds that into the log. * - * @param containerID - container ID. + * @param containerName - container name. * @param blocks - blocks that belong to the same container. * * @throws IOException */ - void addTransaction(long containerID, List blocks) + void addTransaction(String containerName, List blocks) throws IOException; /** @@ -110,7 +110,7 @@ void addTransaction(long containerID, List blocks) * @param containerBlocksMap a map of containerBlocks. * @throws IOException */ - void addTransactions(Map> containerBlocksMap) + void addTransactions(Map> containerBlocksMap) throws IOException; /** diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/DeletedBlockLogImpl.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/DeletedBlockLogImpl.java index cabcb46e1d9..0f4988afa33 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/DeletedBlockLogImpl.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/block/DeletedBlockLogImpl.java @@ -190,14 +190,8 @@ public void incrementCount(List txIDs) throws IOException { try { for(Long txID : txIDs) { try { - byte [] deleteBlockBytes = - deletedStore.get(Longs.toByteArray(txID)); - if (deleteBlockBytes == null) { - LOG.warn("Delete txID {} not found", txID); - continue; - } DeletedBlocksTransaction block = DeletedBlocksTransaction - .parseFrom(deleteBlockBytes); + .parseFrom(deletedStore.get(Longs.toByteArray(txID))); DeletedBlocksTransaction.Builder builder = block.toBuilder(); int currentCount = block.getCount(); if (currentCount > -1) { @@ -222,11 +216,11 @@ public void incrementCount(List txIDs) throws IOException { } private DeletedBlocksTransaction constructNewTransaction(long txID, - long containerID, List blocks) { + String containerName, List blocks) { return DeletedBlocksTransaction.newBuilder() .setTxID(txID) - .setContainerID(containerID) - .addAllLocalID(blocks) + .setContainerName(containerName) + .addAllBlockID(blocks) .setCount(0) .build(); } @@ -256,18 +250,18 @@ public void commitTransactions(List txIDs) throws IOException { /** * {@inheritDoc} * - * @param containerID - container ID. + * @param containerName - container name. * @param blocks - blocks that belong to the same container. * @throws IOException */ @Override - public void addTransaction(long containerID, List blocks) + public void addTransaction(String containerName, List blocks) throws IOException { BatchOperation batch = new BatchOperation(); lock.lock(); try { DeletedBlocksTransaction tx = constructNewTransaction(lastTxID + 1, - containerID, blocks); + containerName, blocks); byte[] key = Longs.toByteArray(lastTxID + 1); batch.put(key, tx.toByteArray()); @@ -309,13 +303,13 @@ public int getNumOfValidTransactions() throws IOException { * @throws IOException */ @Override - public void addTransactions(Map> containerBlocksMap) + public void addTransactions(Map> containerBlocksMap) throws IOException { BatchOperation batch = new BatchOperation(); lock.lock(); try { long currentLatestID = lastTxID; - for (Map.Entry> entry : + for (Map.Entry> entry : containerBlocksMap.entrySet()) { currentLatestID += 1; byte[] key = Longs.toByteArray(currentLatestID); diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerMapping.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerMapping.java index e569874aaff..63cb3a3c1fd 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerMapping.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerMapping.java @@ -18,7 +18,6 @@ import com.google.common.annotations.VisibleForTesting; import com.google.common.base.Preconditions; -import com.google.common.primitives.Longs; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.hdds.scm.ScmConfigKeys; import org.apache.hadoop.hdds.scm.container.closer.ContainerCloser; @@ -27,6 +26,7 @@ import org.apache.hadoop.hdds.scm.exceptions.SCMException; import org.apache.hadoop.hdds.scm.node.NodeManager; import org.apache.hadoop.hdds.scm.pipelines.PipelineSelector; +import org.apache.hadoop.hdfs.DFSUtil; import org.apache.hadoop.hdds.protocol.proto.HddsProtos; import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor; import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationType; @@ -38,6 +38,8 @@ import org.apache.hadoop.ozone.lease.Lease; import org.apache.hadoop.ozone.lease.LeaseException; import org.apache.hadoop.ozone.lease.LeaseManager; +import org.apache.hadoop.utils.MetadataKeyFilters.KeyPrefixFilter; +import org.apache.hadoop.utils.MetadataKeyFilters.MetadataKeyFilter; import org.apache.hadoop.utils.MetadataStore; import org.apache.hadoop.utils.MetadataStoreBuilder; import org.slf4j.Logger; @@ -147,15 +149,16 @@ public ContainerMapping( * {@inheritDoc} */ @Override - public ContainerInfo getContainer(final long containerID) throws + public ContainerInfo getContainer(final String containerName) throws IOException { ContainerInfo containerInfo; lock.lock(); try { - byte[] containerBytes = containerStore.get(Longs.toByteArray(containerID)); + byte[] containerBytes = containerStore.get(containerName.getBytes( + encoding)); if (containerBytes == null) { throw new SCMException( - "Specified key does not exist. key : " + containerID, + "Specified key does not exist. key : " + containerName, SCMException.ResultCodes.FAILED_TO_FIND_CONTAINER); } @@ -172,18 +175,19 @@ public ContainerInfo getContainer(final long containerID) throws * {@inheritDoc} */ @Override - public List listContainer(long startContainerID, - int count) throws IOException { + public List listContainer(String startName, + String prefixName, int count) throws IOException { List containerList = new ArrayList<>(); lock.lock(); try { if (containerStore.isEmpty()) { throw new IOException("No container exists in current db"); } - byte[] startKey = startContainerID <= 0 ? null : - Longs.toByteArray(startContainerID); + MetadataKeyFilter prefixFilter = new KeyPrefixFilter(prefixName); + byte[] startKey = startName == null ? null : DFSUtil.string2Bytes( + startName); List> range = - containerStore.getSequentialRangeKVs(startKey, count, null); + containerStore.getSequentialRangeKVs(startKey, count, prefixFilter); // Transform the values into the pipelines. // TODO: filter by container state @@ -205,6 +209,7 @@ public ContainerInfo getContainer(final long containerID) throws * Allocates a new container. * * @param replicationFactor - replication factor of the container. + * @param containerName - Name of the container. * @param owner - The string name of the Service that owns this container. * @return - Pipeline that makes up this container. * @throws IOException - Exception @@ -213,8 +218,11 @@ public ContainerInfo getContainer(final long containerID) throws public ContainerInfo allocateContainer( ReplicationType type, ReplicationFactor replicationFactor, + final String containerName, String owner) throws IOException { + Preconditions.checkNotNull(containerName); + Preconditions.checkState(!containerName.isEmpty()); ContainerInfo containerInfo; if (!nodeManager.isOutOfChillMode()) { @@ -225,12 +233,19 @@ public ContainerInfo allocateContainer( lock.lock(); try { + byte[] containerBytes = containerStore.get(containerName.getBytes( + encoding)); + if (containerBytes != null) { + throw new SCMException( + "Specified container already exists. key : " + containerName, + SCMException.ResultCodes.CONTAINER_EXISTS); + } containerInfo = containerStateManager.allocateContainer( - pipelineSelector, type, replicationFactor, owner); - - byte[] containerIDBytes = Longs.toByteArray(containerInfo.getContainerID()); - containerStore.put(containerIDBytes, containerInfo.getProtobuf() + pipelineSelector, type, replicationFactor, containerName, + owner); + containerStore.put( + containerName.getBytes(encoding), containerInfo.getProtobuf() .toByteArray()); } finally { lock.unlock(); @@ -241,20 +256,20 @@ public ContainerInfo allocateContainer( /** * Deletes a container from SCM. * - * @param containerID - Container ID + * @param containerName - Container name * @throws IOException if container doesn't exist or container store failed * to delete the * specified key. */ @Override - public void deleteContainer(long containerID) throws IOException { + public void deleteContainer(String containerName) throws IOException { lock.lock(); try { - byte[] dbKey = Longs.toByteArray(containerID); + byte[] dbKey = containerName.getBytes(encoding); byte[] containerBytes = containerStore.get(dbKey); if (containerBytes == null) { throw new SCMException( - "Failed to delete container " + containerID + ", reason : " + + "Failed to delete container " + containerName + ", reason : " + "container doesn't exist.", SCMException.ResultCodes.FAILED_TO_FIND_CONTAINER); } @@ -269,17 +284,17 @@ public void deleteContainer(long containerID) throws IOException { */ @Override public HddsProtos.LifeCycleState updateContainerState( - long containerID, HddsProtos.LifeCycleEvent event) throws + String containerName, HddsProtos.LifeCycleEvent event) throws IOException { ContainerInfo containerInfo; lock.lock(); try { - byte[] dbKey = Longs.toByteArray(containerID); + byte[] dbKey = containerName.getBytes(encoding); byte[] containerBytes = containerStore.get(dbKey); if (containerBytes == null) { throw new SCMException( "Failed to update container state" - + containerID + + containerName + ", reason : container doesn't exist.", SCMException.ResultCodes.FAILED_TO_FIND_CONTAINER); } @@ -295,7 +310,7 @@ public void deleteContainer(long containerID) throws IOException { containerLeaseManager.acquire(containerInfo); // Register callback to be executed in case of timeout containerLease.registerCallBack(() -> { - updateContainerState(containerID, + updateContainerState(containerName, HddsProtos.LifeCycleEvent.TIMEOUT); return null; }); @@ -373,7 +388,7 @@ public void processContainerReports(ContainerReportsRequestProto reports) containerSupervisor.handleContainerReport(reports); for (StorageContainerDatanodeProtocolProtos.ContainerInfo datanodeState : containerInfos) { - byte[] dbKey = Longs.toByteArray(datanodeState.getContainerID()); + byte[] dbKey = datanodeState.getContainerNameBytes().toByteArray(); lock.lock(); try { byte[] containerBytes = containerStore.get(dbKey); @@ -394,14 +409,14 @@ public void processContainerReports(ContainerReportsRequestProto reports) // If the container is closed, then state is already written to SCM // DB.TODO: So can we can write only once to DB. if (closeContainerIfNeeded(newState)) { - LOG.info("Closing the Container: {}", newState.getContainerID()); + LOG.info("Closing the Container: {}", newState.getContainerName()); } } else { // Container not found in our container db. LOG.error("Error while processing container report from datanode :" + " {}, for container: {}, reason: container doesn't exist in" + "container database.", reports.getDatanodeDetails(), - datanodeState.getContainerID()); + datanodeState.getContainerName()); } } finally { lock.unlock(); @@ -421,7 +436,7 @@ public void processContainerReports(ContainerReportsRequestProto reports) HddsProtos.SCMContainerInfo knownState) { HddsProtos.SCMContainerInfo.Builder builder = HddsProtos.SCMContainerInfo.newBuilder(); - builder.setContainerID(knownState.getContainerID()); + builder.setContainerName(knownState.getContainerName()); builder.setPipeline(knownState.getPipeline()); // If used size is greater than allocated size, we will be updating // allocated size with used size. This update is done as a fallback @@ -458,7 +473,7 @@ private boolean closeContainerIfNeeded(HddsProtos.SCMContainerInfo newState) float containerUsedPercentage = 1.0f * newState.getUsedBytes() / this.size; - ContainerInfo scmInfo = getContainer(newState.getContainerID()); + ContainerInfo scmInfo = getContainer(newState.getContainerName()); if (containerUsedPercentage >= containerCloseThreshold && !isClosed(scmInfo)) { // We will call closer till get to the closed state. @@ -473,13 +488,13 @@ private boolean closeContainerIfNeeded(HddsProtos.SCMContainerInfo newState) // closed state from container reports. This state change should be // invoked once and only once. HddsProtos.LifeCycleState state = updateContainerState( - scmInfo.getContainerID(), + scmInfo.getContainerName(), HddsProtos.LifeCycleEvent.FINALIZE); if (state != HddsProtos.LifeCycleState.CLOSING) { LOG.error("Failed to close container {}, reason : Not able " + "to " + "update container state, current container state: {}.", - newState.getContainerID(), state); + newState.getContainerName(), state); return false; } return true; @@ -546,11 +561,11 @@ public void close() throws IOException { @VisibleForTesting public void flushContainerInfo() throws IOException { List containers = containerStateManager.getAllContainers(); - List failedContainers = new ArrayList<>(); + List failedContainers = new ArrayList<>(); for (ContainerInfo info : containers) { // even if some container updated failed, others can still proceed try { - byte[] dbKey = Longs.toByteArray(info.getContainerID()); + byte[] dbKey = info.getContainerName().getBytes(encoding); byte[] containerBytes = containerStore.get(dbKey); // TODO : looks like when a container is deleted, the container is // removed from containerStore but not containerStateManager, so it can @@ -562,6 +577,7 @@ public void flushContainerInfo() throws IOException { ContainerInfo oldInfo = ContainerInfo.fromProtobuf(oldInfoProto); ContainerInfo newInfo = new ContainerInfo.Builder() .setAllocatedBytes(info.getAllocatedBytes()) + .setContainerName(oldInfo.getContainerName()) .setNumberOfKeys(oldInfo.getNumberOfKeys()) .setOwner(oldInfo.getOwner()) .setPipeline(oldInfo.getPipeline()) @@ -572,10 +588,10 @@ public void flushContainerInfo() throws IOException { } else { LOG.debug("Container state manager has container {} but not found " + "in container store, a deleted container?", - info.getContainerID()); + info.getContainerName()); } } catch (IOException ioe) { - failedContainers.add(info.getContainerID()); + failedContainers.add(info.getContainerName()); } } if (!failedContainers.isEmpty()) { diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerStateManager.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerStateManager.java index f11a50cf71f..227eca04e88 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerStateManager.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/ContainerStateManager.java @@ -157,7 +157,8 @@ private void loadExistingContainers(Mapping containerMapping) { List containerList; try { - containerList = containerMapping.listContainer(0, Integer.MAX_VALUE); + containerList = containerMapping.listContainer(null, + null, Integer.MAX_VALUE); // if there are no container to load, let us return. if (containerList == null || containerList.size() == 0) { @@ -279,21 +280,24 @@ private void initializeStateMachine() { * @param selector -- Pipeline selector class. * @param type -- Replication type. * @param replicationFactor - Replication replicationFactor. + * @param containerName - Container Name. * @return Container Info. * @throws IOException on Failure. */ public ContainerInfo allocateContainer(PipelineSelector selector, HddsProtos .ReplicationType type, HddsProtos.ReplicationFactor replicationFactor, - String owner) throws IOException { + final String containerName, String owner) throws + IOException { Pipeline pipeline = selector.getReplicationPipeline(type, - replicationFactor); + replicationFactor, containerName); Preconditions.checkNotNull(pipeline, "Pipeline type=%s/" + "replication=%s couldn't be found for the new container. " + "Do you have enough nodes?", type, replicationFactor); ContainerInfo containerInfo = new ContainerInfo.Builder() + .setContainerName(containerName) .setState(HddsProtos.LifeCycleState.ALLOCATED) .setPipeline(pipeline) // This is bytes allocated for blocks inside container, not the @@ -328,7 +332,7 @@ public ContainerInfo updateContainerState(ContainerInfo String error = String.format("Failed to update container state %s, " + "reason: invalid state transition from state: %s upon " + "event: %s.", - info.getContainerID(), info.getState(), event); + info.getPipeline().getContainerName(), info.getState(), event); LOG.error(error); throw new SCMException(error, FAILED_TO_CHANGE_CONTAINER_STATE); } diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/Mapping.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/Mapping.java index 61dee2b35d8..c949c6c4b54 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/Mapping.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/Mapping.java @@ -31,57 +31,62 @@ */ public interface Mapping extends Closeable { /** - * Returns the ContainerInfo from the container ID. + * Returns the ContainerInfo from the container name. * - * @param containerID - ID of container. + * @param containerName - Name * @return - ContainerInfo such as creation state and the pipeline. * @throws IOException */ - ContainerInfo getContainer(long containerID) throws IOException; + ContainerInfo getContainer(String containerName) throws IOException; /** * Returns containers under certain conditions. - * Search container IDs from start ID(exclusive), - * The max size of the searching range cannot exceed the + * Search container names from start name(exclusive), + * and use prefix name to filter the result. The max + * size of the searching range cannot exceed the * value of count. * - * @param startContainerID start containerID, >=0, start searching at the head if 0. - * @param count count must be >= 0 + * @param startName start name, if null, start searching at the head. + * @param prefixName prefix name, if null, then filter is disabled. + * @param count count, if count < 0, the max size is unlimited.( * Usually the count will be replace with a very big - * value instead of being unlimited in case the db is very big. + * value instead of being unlimited in case the db is very big) * * @return a list of container. * @throws IOException */ - List listContainer(long startContainerID, int count) throws IOException; + List listContainer(String startName, String prefixName, + int count) throws IOException; /** * Allocates a new container for a given keyName and replication factor. * * @param replicationFactor - replication factor of the container. + * @param containerName - Name. * @param owner * @return - Container Info. * @throws IOException */ ContainerInfo allocateContainer(HddsProtos.ReplicationType type, - HddsProtos.ReplicationFactor replicationFactor, String owner) throws IOException; + HddsProtos.ReplicationFactor replicationFactor, + String containerName, String owner) throws IOException; /** * Deletes a container from SCM. * - * @param containerID - Container ID + * @param containerName - Container Name * @throws IOException */ - void deleteContainer(long containerID) throws IOException; + void deleteContainer(String containerName) throws IOException; /** * Update container state. - * @param containerID - Container ID + * @param containerName - Container Name * @param event - container life cycle event * @return - new container state * @throws IOException */ - HddsProtos.LifeCycleState updateContainerState(long containerID, + HddsProtos.LifeCycleState updateContainerState(String containerName, HddsProtos.LifeCycleEvent event) throws IOException; /** diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/closer/ContainerCloser.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/closer/ContainerCloser.java index 75ec8e103c1..b5d4da9ed1f 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/closer/ContainerCloser.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/closer/ContainerCloser.java @@ -51,7 +51,7 @@ private static final long MULTIPLIER = 3L; private static final int CLEANUP_WATER_MARK = 1000; private final NodeManager nodeManager; - private final Map commandIssued; + private final Map commandIssued; private final Configuration configuration; private final AtomicInteger mapCount; private final long reportInterval; @@ -93,12 +93,12 @@ public static int getCleanupWaterMark() { */ public void close(HddsProtos.SCMContainerInfo info) { - if (commandIssued.containsKey(info.getContainerID())) { + if (commandIssued.containsKey(info.getContainerName())) { // We check if we issued a close command in last 3 * reportInterval secs. - long commandQueueTime = commandIssued.get(info.getContainerID()); + long commandQueueTime = commandIssued.get(info.getContainerName()); long currentTime = TimeUnit.MILLISECONDS.toSeconds(Time.monotonicNow()); if (currentTime > commandQueueTime + (MULTIPLIER * reportInterval)) { - commandIssued.remove(info.getContainerID()); + commandIssued.remove(info.getContainerName()); mapCount.decrementAndGet(); } else { // Ignore this request, since we just issued a close command. We @@ -131,10 +131,10 @@ public void close(HddsProtos.SCMContainerInfo info) { pipeline.getPipelineChannel().getMembersList()) { nodeManager.addDatanodeCommand( DatanodeDetails.getFromProtoBuf(datanodeDetails).getUuid(), - new CloseContainerCommand(info.getContainerID())); + new CloseContainerCommand(info.getContainerName())); } - if (!commandIssued.containsKey(info.getContainerID())) { - commandIssued.put(info.getContainerID(), + if (!commandIssued.containsKey(info.getContainerName())) { + commandIssued.put(info.getContainerName(), TimeUnit.MILLISECONDS.toSeconds(Time.monotonicNow())); mapCount.incrementAndGet(); } @@ -150,7 +150,7 @@ private void runCleanerThreadIfNeeded() { Runnable entryCleaner = () -> { LOG.debug("Starting close container Hash map cleaner."); try { - for (Map.Entry entry : commandIssued.entrySet()) { + for (Map.Entry entry : commandIssued.entrySet()) { long commandQueueTime = entry.getValue(); if (commandQueueTime + (MULTIPLIER * reportInterval) > TimeUnit.MILLISECONDS.toSeconds(Time.monotonicNow())) { diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/replication/InProgressPool.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/replication/InProgressPool.java index af878bf97f5..ddbd2134026 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/replication/InProgressPool.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/container/replication/InProgressPool.java @@ -61,7 +61,7 @@ private final NodeManager nodeManager; private final NodePoolManager poolManager; private final ExecutorService executorService; - private final Map containerCountMap; + private final Map containerCountMap; private final Map processedNodeSet; private final long startTime; private ProgressStatus status; @@ -258,12 +258,12 @@ private Runnable processContainerReport( for (ContainerInfo info : reports.getReportsList()) { containerProcessedCount.incrementAndGet(); LOG.debug("Total Containers processed: {} Container Name: {}", - containerProcessedCount.get(), info.getContainerID()); + containerProcessedCount.get(), info.getContainerName()); // Update the container map with count + 1 if the key exists or // update the map with 1. Since this is a concurrentMap the // computation and update is atomic. - containerCountMap.merge(info.getContainerID(), 1, Integer::sum); + containerCountMap.merge(info.getContainerName(), 1, Integer::sum); } } }; @@ -275,8 +275,8 @@ private Runnable processContainerReport( * @param predicate -- Predicate to filter by * @return A list of map entries. */ - public List> filterContainer( - Predicate> predicate) { + public List> filterContainer( + Predicate> predicate) { return containerCountMap.entrySet().stream() .filter(predicate).collect(Collectors.toList()); } diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/SCMNodePoolManager.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/SCMNodePoolManager.java index faf330ea1d2..a4a6c51cdf4 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/SCMNodePoolManager.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/SCMNodePoolManager.java @@ -248,7 +248,7 @@ public String getNodePool(final DatanodeDetails datanodeDetails) throws SCMException { Preconditions.checkNotNull(datanodeDetails, "node is null"); try { - byte[] result = nodePoolStore.get( + byte[] result = nodePoolStore.get( datanodeDetails.getProtoBufMessage().toByteArray()); return result == null ? null : DFSUtil.bytes2String(result); } catch (IOException e) { diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipelines/PipelineManager.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipelines/PipelineManager.java index 832fcc669a3..8e435289145 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipelines/PipelineManager.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipelines/PipelineManager.java @@ -50,10 +50,11 @@ public PipelineManager() { * needed and based on the replication type in the request appropriate * Interface is invoked. * + * @param containerName Name of the container * @param replicationFactor - Replication Factor * @return a Pipeline. */ - public synchronized final Pipeline getPipeline( + public synchronized final Pipeline getPipeline(String containerName, ReplicationFactor replicationFactor, ReplicationType replicationType) throws IOException { /** @@ -73,17 +74,15 @@ public synchronized final Pipeline getPipeline( PipelineChannel pipelineChannel = allocatePipelineChannel(replicationFactor); if (pipelineChannel != null) { - LOG.debug("created new pipelineChannel:{} for container with " + - "replicationType:{} replicationFactor:{}", - pipelineChannel.getName(), replicationType, replicationFactor); + LOG.debug("created new pipelineChannel:{} for container:{}", + pipelineChannel.getName(), containerName); activePipelineChannels.add(pipelineChannel); } else { pipelineChannel = findOpenPipelineChannel(replicationType, replicationFactor); if (pipelineChannel != null) { - LOG.debug("re-used pipelineChannel:{} for container with " + - "replicationType:{} replicationFactor:{}", - pipelineChannel.getName(), replicationType, replicationFactor); + LOG.debug("re-used pipelineChannel:{} for container:{}", + pipelineChannel.getName(), containerName); } } if (pipelineChannel == null) { @@ -91,7 +90,7 @@ public synchronized final Pipeline getPipeline( "free nodes or operational pipelineChannel."); return null; } else { - return new Pipeline(pipelineChannel); + return new Pipeline(containerName, pipelineChannel); } } diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipelines/PipelineSelector.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipelines/PipelineSelector.java index d29bb84c783..f0c9eea4416 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipelines/PipelineSelector.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipelines/PipelineSelector.java @@ -166,14 +166,14 @@ private PipelineManager getPipelineManager(ReplicationType replicationType) */ public Pipeline getReplicationPipeline(ReplicationType replicationType, - HddsProtos.ReplicationFactor replicationFactor) + HddsProtos.ReplicationFactor replicationFactor, String containerName) throws IOException { PipelineManager manager = getPipelineManager(replicationType); Preconditions.checkNotNull(manager, "Found invalid pipeline manager"); - LOG.debug("Getting replication pipeline forReplicationType {} : ReplicationFactor {}", - replicationType.toString(), replicationFactor.toString()); + LOG.debug("Getting replication pipeline for {} : Replication {}", + containerName, replicationFactor.toString()); return manager. - getPipeline(replicationFactor, replicationType); + getPipeline(containerName, replicationFactor, replicationType); } /** diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipelines/ratis/RatisManagerImpl.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipelines/ratis/RatisManagerImpl.java index 70489b9253c..089a1374a4e 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipelines/ratis/RatisManagerImpl.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/pipelines/ratis/RatisManagerImpl.java @@ -95,7 +95,7 @@ public PipelineChannel allocatePipelineChannel(ReplicationFactor factor) { PipelineSelector.newPipelineFromNodes(newNodesList, LifeCycleState.OPEN, ReplicationType.RATIS, factor, conduitName); Pipeline pipeline = - new Pipeline(pipelineChannel); + new Pipeline("setup", pipelineChannel); try (XceiverClientRatis client = XceiverClientRatis.newXceiverClientRatis(pipeline, conf)) { client.createPipeline(pipeline.getPipelineName(), newNodesList); diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMBlockProtocolServer.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMBlockProtocolServer.java index 98fe9a11374..e0560a1c036 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMBlockProtocolServer.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMBlockProtocolServer.java @@ -29,6 +29,7 @@ import org.apache.hadoop.hdds.scm.ScmInfo; import org.apache.hadoop.hdds.scm.container.common.helpers.AllocatedBlock; import org.apache.hadoop.hdds.scm.container.common.helpers.DeleteBlockResult; +import org.apache.hadoop.hdds.scm.container.common.helpers.Pipeline; import org.apache.hadoop.hdds.scm.exceptions.SCMException; import org.apache.hadoop.hdds.scm.protocol.ScmBlockLocationProtocol; import org.apache.hadoop.hdds.scm.protocolPB.ScmBlockLocationProtocolPB; @@ -36,7 +37,6 @@ import org.apache.hadoop.ipc.ProtobufRpcEngine; import org.apache.hadoop.ipc.RPC; import org.apache.hadoop.ozone.common.BlockGroup; -import org.apache.hadoop.hdds.client.BlockID; import org.apache.hadoop.ozone.common.DeleteBlockGroupResult; import org.apache.hadoop.ozone.protocolPB .ScmBlockLocationProtocolServerSideTranslatorPB; @@ -46,7 +46,9 @@ import java.io.IOException; import java.net.InetSocketAddress; import java.util.ArrayList; +import java.util.HashSet; import java.util.List; +import java.util.Set; import static org.apache.hadoop.hdds.scm.ScmConfigKeys .OZONE_SCM_BLOCK_CLIENT_ADDRESS_KEY; @@ -137,6 +139,20 @@ public void join() throws InterruptedException { } @Override + public Set getBlockLocations(Set keys) throws + IOException { + Set locatedBlocks = new HashSet<>(); + for (String key : keys) { + Pipeline pipeline = scm.getScmBlockManager().getBlock(key); + AllocatedBlock block = new AllocatedBlock.Builder().setKey(key) + .setPipeline(pipeline).build(); + locatedBlocks.add(block); + } + return locatedBlocks; + + } + + @Override public AllocatedBlock allocateBlock(long size, HddsProtos.ReplicationType type, HddsProtos.ReplicationFactor factor, String owner) throws IOException { @@ -186,7 +202,7 @@ public AllocatedBlock allocateBlock(long size, HddsProtos.ReplicationType .Result.unknownFailure; } List blockResultList = new ArrayList<>(); - for (BlockID blockKey : keyBlocks.getBlockIDList()) { + for (String blockKey : keyBlocks.getBlockIDList()) { blockResultList.add(new DeleteBlockResult(blockKey, resultCode)); } results.add(new DeleteBlockGroupResult(keyBlocks.getGroupID(), diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMClientProtocolServer.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMClientProtocolServer.java index 246d053abc8..42cce2f9557 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMClientProtocolServer.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMClientProtocolServer.java @@ -21,7 +21,6 @@ */ package org.apache.hadoop.hdds.scm.server; -import com.google.common.annotations.VisibleForTesting; import com.google.common.base.Preconditions; import com.google.protobuf.BlockingService; import org.apache.hadoop.hdds.conf.OzoneConfiguration; @@ -138,31 +137,32 @@ public void join() throws InterruptedException { } @Override - public ContainerInfo allocateContainer(HddsProtos.ReplicationType - replicationType, HddsProtos.ReplicationFactor factor, - String owner) throws IOException { - getScm().checkAdminAccess(); + public Pipeline allocateContainer(HddsProtos.ReplicationType + replicationType, HddsProtos.ReplicationFactor factor, String + containerName, String owner) throws IOException { + scm.checkAdminAccess(); return scm.getScmContainerManager() - .allocateContainer(replicationType, factor, owner); + .allocateContainer(replicationType, factor, containerName, owner) + .getPipeline(); } @Override - public ContainerInfo getContainer(long containerID) throws IOException { + public Pipeline getContainer(String containerName) throws IOException { return scm.getScmContainerManager() - .getContainer(containerID); + .getContainer(containerName).getPipeline(); } @Override - public List listContainer(long startContainerID, - int count) throws IOException { - return scm.getScmContainerManager(). - listContainer(startContainerID, count); + public List listContainer(String startName, + String prefixName, int count) throws IOException { + return scm.getScmContainerManager() + .listContainer(startName, prefixName, count); } @Override - public void deleteContainer(long containerID) throws IOException { - getScm().checkAdminAccess(); - scm.getScmContainerManager().deleteContainer(containerID); + public void deleteContainer(String containerName) throws IOException { + scm.checkAdminAccess(); + scm.getScmContainerManager().deleteContainer(containerName); } @@ -193,12 +193,12 @@ public void deleteContainer(long containerID) throws IOException { @Override public void notifyObjectStageChange(StorageContainerLocationProtocolProtos - .ObjectStageChangeRequestProto.Type type, long id, + .ObjectStageChangeRequestProto.Type type, String name, StorageContainerLocationProtocolProtos.ObjectStageChangeRequestProto.Op op, StorageContainerLocationProtocolProtos .ObjectStageChangeRequestProto.Stage stage) throws IOException { - LOG.info("Object type {} id {} op {} new stage {}", type, id, op, + LOG.info("Object type {} name {} op {} new stage {}", type, name, op, stage); if (type == StorageContainerLocationProtocolProtos .ObjectStageChangeRequestProto.Type.container) { @@ -206,10 +206,10 @@ public void notifyObjectStageChange(StorageContainerLocationProtocolProtos .ObjectStageChangeRequestProto.Op.create) { if (stage == StorageContainerLocationProtocolProtos .ObjectStageChangeRequestProto.Stage.begin) { - scm.getScmContainerManager().updateContainerState(id, HddsProtos + scm.getScmContainerManager().updateContainerState(name, HddsProtos .LifeCycleEvent.CREATE); } else { - scm.getScmContainerManager().updateContainerState(id, HddsProtos + scm.getScmContainerManager().updateContainerState(name, HddsProtos .LifeCycleEvent.CREATED); } } else { @@ -217,10 +217,10 @@ public void notifyObjectStageChange(StorageContainerLocationProtocolProtos .ObjectStageChangeRequestProto.Op.close) { if (stage == StorageContainerLocationProtocolProtos .ObjectStageChangeRequestProto.Stage.begin) { - scm.getScmContainerManager().updateContainerState(id, HddsProtos + scm.getScmContainerManager().updateContainerState(name, HddsProtos .LifeCycleEvent.FINALIZE); } else { - scm.getScmContainerManager().updateContainerState(id, HddsProtos + scm.getScmContainerManager().updateContainerState(name, HddsProtos .LifeCycleEvent.CLOSE); } } @@ -292,11 +292,6 @@ public ScmInfo getScmInfo() throws IOException { return resultList; } - @VisibleForTesting - public StorageContainerManager getScm() { - return scm; - } - /** * Query the System for Nodes. * diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMDatanodeProtocolServer.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMDatanodeProtocolServer.java index cb7a484f19e..e42b8870446 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMDatanodeProtocolServer.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMDatanodeProtocolServer.java @@ -167,16 +167,17 @@ public SCMHeartbeatResponseProto sendHeartbeat( @Override public SCMRegisteredCmdResponseProto register( - HddsProtos.DatanodeDetailsProto datanodeDetails) + HddsProtos.DatanodeDetailsProto datanodeDetails, String[] scmAddresses) throws IOException { // TODO : Return the list of Nodes that forms the SCM HA. return getRegisteredResponse(scm.getScmNodeManager() - .register(datanodeDetails)); + .register(datanodeDetails), null); } @VisibleForTesting public static SCMRegisteredCmdResponseProto getRegisteredResponse( - SCMCommand cmd) { + SCMCommand cmd, + StorageContainerDatanodeProtocolProtos.SCMNodeAddressList addressList) { Preconditions.checkState(cmd.getClass() == RegisteredCommand.class); RegisteredCommand rCmd = (RegisteredCommand) cmd; SCMCmdType type = cmd.getType(); diff --git a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/StorageContainerManager.java b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/StorageContainerManager.java index a7248bb85b1..af7dd3fac15 100644 --- a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/StorageContainerManager.java +++ b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/StorageContainerManager.java @@ -87,7 +87,7 @@ * create a container, which then can be used to store data. */ @InterfaceAudience.LimitedPrivate({"HDFS", "CBLOCK", "OZONE", "HBASE"}) -public class StorageContainerManager extends ServiceRuntimeInfoImpl +public final class StorageContainerManager extends ServiceRuntimeInfoImpl implements SCMMXBean { private static final Logger LOG = LoggerFactory @@ -168,7 +168,8 @@ private StorageContainerManager(OzoneConfiguration conf) throws IOException { cacheSize); scmBlockManager = - new BlockManagerImpl(conf, getScmNodeManager(), scmContainerManager); + new BlockManagerImpl(conf, getScmNodeManager(), scmContainerManager, + cacheSize); scmAdminUsernames = conf.getTrimmedStringCollection(OzoneConfigKeys .OZONE_ADMINISTRATORS); @@ -458,9 +459,9 @@ private void unregisterMXBean() { } @VisibleForTesting - public ContainerInfo getContainerInfo(long containerID) throws + public ContainerInfo getContainerInfo(String containerName) throws IOException { - return scmContainerManager.getContainer(containerID); + return scmContainerManager.getContainer(containerName); } /** diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/block/TestBlockManager.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/block/TestBlockManager.java index f3e42ea7316..0eff702cded 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/block/TestBlockManager.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/block/TestBlockManager.java @@ -23,6 +23,7 @@ import org.apache.hadoop.hdds.scm.container.ContainerMapping; import org.apache.hadoop.hdds.scm.container.MockNodeManager; import org.apache.hadoop.hdds.scm.container.common.helpers.AllocatedBlock; +import org.apache.hadoop.hdds.scm.container.common.helpers.Pipeline; import org.apache.hadoop.hdds.protocol.proto.HddsProtos; import org.apache.hadoop.ozone.OzoneConfigKeys; import org.apache.hadoop.ozone.container.common.SCMTestUtils; @@ -39,6 +40,7 @@ import java.io.IOException; import java.nio.file.Paths; import java.util.Collections; +import java.util.UUID; import static org.apache.hadoop.ozone.OzoneConsts.GB; import static org.apache.hadoop.ozone.OzoneConsts.MB; @@ -74,7 +76,7 @@ public static void setUp() throws Exception { } nodeManager = new MockNodeManager(true, 10); mapping = new ContainerMapping(conf, nodeManager, 128); - blockManager = new BlockManagerImpl(conf, nodeManager, mapping); + blockManager = new BlockManagerImpl(conf, nodeManager, mapping, 128); if(conf.getBoolean(ScmConfigKeys.DFS_CONTAINER_RATIS_ENABLED_KEY, ScmConfigKeys.DFS_CONTAINER_RATIS_ENABLED_DEFAULT)){ factor = HddsProtos.ReplicationFactor.THREE; @@ -105,12 +107,32 @@ public void testAllocateBlock() throws Exception { } @Test + public void testGetAllocatedBlock() throws IOException { + AllocatedBlock block = blockManager.allocateBlock(DEFAULT_BLOCK_SIZE, + type, factor, containerOwner); + Assert.assertNotNull(block); + Pipeline pipeline = blockManager.getBlock(block.getKey()); + Assert.assertEquals(pipeline.getLeader().getUuid(), + block.getPipeline().getLeader().getUuid()); + } + + @Test public void testDeleteBlock() throws Exception { AllocatedBlock block = blockManager.allocateBlock(DEFAULT_BLOCK_SIZE, type, factor, containerOwner); Assert.assertNotNull(block); - blockManager.deleteBlocks(Collections.singletonList( - block.getBlockID())); + blockManager.deleteBlocks(Collections.singletonList(block.getKey())); + + // Deleted block can not be retrieved + thrown.expectMessage("Specified block key does not exist."); + blockManager.getBlock(block.getKey()); + + // Tombstone of the deleted block can be retrieved if it has not been + // cleaned yet. + String deletedKeyName = blockManager.getDeletedKeyName(block.getKey()); + Pipeline pipeline = blockManager.getBlock(deletedKeyName); + Assert.assertEquals(pipeline.getLeader().getUuid(), + block.getPipeline().getLeader().getUuid()); } @Test @@ -121,6 +143,12 @@ public void testAllocateOversizedBlock() throws IOException { type, factor, containerOwner); } + @Test + public void testGetNoneExistentContainer() throws IOException { + String nonExistBlockKey = UUID.randomUUID().toString(); + thrown.expectMessage("Specified block key does not exist."); + blockManager.getBlock(nonExistBlockKey); + } @Test public void testChillModeAllocateBlockFails() throws IOException { diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/block/TestDeletedBlockLog.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/block/TestDeletedBlockLog.java index f872e231590..77030cdd54f 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/block/TestDeletedBlockLog.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/block/TestDeletedBlockLog.java @@ -82,22 +82,17 @@ public void tearDown() throws Exception { FileUtils.deleteDirectory(testDir); } - private Map> generateData(int dataSize) { - Map> blockMap = new HashMap<>(); + private Map> generateData(int dataSize) { + Map> blockMap = new HashMap<>(); Random random = new Random(1); - int continerIDBase = random.nextInt(100); - int localIDBase = random.nextInt(1000); for (int i = 0; i < dataSize; i++) { - //String containerName = "container-" + UUID.randomUUID().toString(); - long containerID = continerIDBase + i; - List blocks = new ArrayList<>(); + String containerName = "container-" + UUID.randomUUID().toString(); + List blocks = new ArrayList<>(); int blockSize = random.nextInt(30) + 1; for (int j = 0; j < blockSize; j++) { - //blocks.add("block-" + UUID.randomUUID().toString()); - long localID = localIDBase + j; - blocks.add(localID); + blocks.add("block-" + UUID.randomUUID().toString()); } - blockMap.put(containerID, blocks); + blockMap.put(containerName, blocks); } return blockMap; } @@ -109,7 +104,7 @@ public void testGetTransactions() throws Exception { Assert.assertEquals(0, blocks.size()); // Creates 40 TX in the log. - for (Map.Entry> entry : generateData(40).entrySet()){ + for (Map.Entry> entry : generateData(40).entrySet()){ deletedBlockLog.addTransaction(entry.getKey(), entry.getValue()); } @@ -148,7 +143,7 @@ public void testIncrementCount() throws Exception { int maxRetry = conf.getInt(OZONE_SCM_BLOCK_DELETION_MAX_RETRY, 20); // Create 30 TXs in the log. - for (Map.Entry> entry : generateData(30).entrySet()){ + for (Map.Entry> entry : generateData(30).entrySet()){ deletedBlockLog.addTransaction(entry.getKey(), entry.getValue()); } @@ -177,7 +172,7 @@ public void testIncrementCount() throws Exception { @Test public void testCommitTransactions() throws Exception { - for (Map.Entry> entry : generateData(50).entrySet()){ + for (Map.Entry> entry : generateData(50).entrySet()){ deletedBlockLog.addTransaction(entry.getKey(), entry.getValue()); } List blocks = @@ -208,7 +203,7 @@ public void testRandomOperateTransactions() throws Exception { for (int i = 0; i < 100; i++) { int state = random.nextInt(4); if (state == 0) { - for (Map.Entry> entry : + for (Map.Entry> entry : generateData(10).entrySet()){ deletedBlockLog.addTransaction(entry.getKey(), entry.getValue()); } @@ -239,7 +234,7 @@ public void testRandomOperateTransactions() throws Exception { @Test public void testPersistence() throws Exception { - for (Map.Entry> entry : generateData(50).entrySet()){ + for (Map.Entry> entry : generateData(50).entrySet()){ deletedBlockLog.addTransaction(entry.getKey(), entry.getValue()); } // close db and reopen it again to make sure @@ -262,10 +257,10 @@ public void testDeletedBlockTransactions() throws IOException { int txNum = 10; int maximumAllowedTXNum = 5; List blocks = null; - List containerIDs = new LinkedList<>(); + List containerNames = new LinkedList<>(); int count = 0; - long containerID = 0L; + String containerName = null; DatanodeDetails dnDd1 = DatanodeDetails.newBuilder() .setUuid(UUID.randomUUID().toString()) .setIpAddress("127.0.0.1") @@ -284,18 +279,18 @@ public void testDeletedBlockTransactions() throws IOException { .build(); Mapping mappingService = mock(ContainerMapping.class); // Creates {TXNum} TX in the log. - for (Map.Entry> entry : generateData(txNum) + for (Map.Entry> entry : generateData(txNum) .entrySet()) { count++; - containerID = entry.getKey(); - containerIDs.add(containerID); - deletedBlockLog.addTransaction(containerID, entry.getValue()); + containerName = entry.getKey(); + containerNames.add(containerName); + deletedBlockLog.addTransaction(containerName, entry.getValue()); // make TX[1-6] for datanode1; TX[7-10] for datanode2 if (count <= (maximumAllowedTXNum + 1)) { - mockContainerInfo(mappingService, containerID, dnDd1); + mockContainerInfo(mappingService, containerName, dnDd1); } else { - mockContainerInfo(mappingService, containerID, dnId2); + mockContainerInfo(mappingService, containerName, dnId2); } } @@ -330,7 +325,7 @@ public void testDeletedBlockTransactions() throws IOException { DeletedBlocksTransaction.Builder builder = DeletedBlocksTransaction.newBuilder(); builder.setTxID(11); - builder.setContainerID(containerID); + builder.setContainerName(containerName); builder.setCount(0); transactions.addTransaction(builder.build()); @@ -339,29 +334,30 @@ public void testDeletedBlockTransactions() throws IOException { transactions.getDatanodeTransactions(dnId2.getUuid()).size()); // Add new TX in dnID2, then dnID2 will reach maximum value. + containerName = "newContainer"; builder = DeletedBlocksTransaction.newBuilder(); builder.setTxID(12); - builder.setContainerID(containerID); + builder.setContainerName(containerName); builder.setCount(0); - mockContainerInfo(mappingService, containerID, dnId2); + mockContainerInfo(mappingService, containerName, dnId2); transactions.addTransaction(builder.build()); // Since all node are full, then transactions is full. Assert.assertTrue(transactions.isFull()); } - private void mockContainerInfo(Mapping mappingService, long containerID, + private void mockContainerInfo(Mapping mappingService, String containerName, DatanodeDetails dd) throws IOException { PipelineChannel pipelineChannel = new PipelineChannel("fake", LifeCycleState.OPEN, ReplicationType.STAND_ALONE, ReplicationFactor.ONE, "fake"); pipelineChannel.addMember(dd); - Pipeline pipeline = new Pipeline(pipelineChannel); + Pipeline pipeline = new Pipeline(containerName, pipelineChannel); ContainerInfo.Builder builder = new ContainerInfo.Builder(); builder.setPipeline(pipeline); ContainerInfo conatinerInfo = builder.build(); Mockito.doReturn(conatinerInfo).when(mappingService) - .getContainer(containerID); + .getContainer(containerName); } } diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestContainerMapping.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestContainerMapping.java index a27068bb339..200a611d0f8 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestContainerMapping.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/TestContainerMapping.java @@ -45,7 +45,6 @@ import java.util.ArrayList; import java.util.List; import java.util.NavigableSet; -import java.util.Random; import java.util.Set; import java.util.TreeSet; import java.util.UUID; @@ -60,7 +59,6 @@ private static File testDir; private static XceiverClientManager xceiverClientManager; private static String containerOwner = "OZONE"; - private static Random random; private static final long TIMEOUT = 10000; @@ -85,7 +83,6 @@ public static void setUp() throws Exception { nodeManager = new MockNodeManager(true, 10); mapping = new ContainerMapping(conf, nodeManager, 128); xceiverClientManager = new XceiverClientManager(conf); - random = new Random(); } @AfterClass @@ -106,7 +103,7 @@ public void testallocateContainer() throws Exception { ContainerInfo containerInfo = mapping.allocateContainer( xceiverClientManager.getType(), xceiverClientManager.getFactor(), - containerOwner); + UUID.randomUUID().toString(), containerOwner); Assert.assertNotNull(containerInfo); } @@ -123,7 +120,7 @@ public void testallocateContainerDistributesAllocation() throws Exception { ContainerInfo containerInfo = mapping.allocateContainer( xceiverClientManager.getType(), xceiverClientManager.getFactor(), - containerOwner); + UUID.randomUUID().toString(), containerOwner); Assert.assertNotNull(containerInfo); Assert.assertNotNull(containerInfo.getPipeline()); @@ -135,41 +132,59 @@ public void testallocateContainerDistributesAllocation() throws Exception { @Test public void testGetContainer() throws IOException { - ContainerInfo containerInfo = mapping.allocateContainer( + String containerName = UUID.randomUUID().toString(); + Pipeline pipeline = mapping.allocateContainer( xceiverClientManager.getType(), - xceiverClientManager.getFactor(), - containerOwner); - Pipeline pipeline = containerInfo.getPipeline(); + xceiverClientManager.getFactor(), containerName, + containerOwner).getPipeline(); Assert.assertNotNull(pipeline); - Pipeline newPipeline = mapping.getContainer( - containerInfo.getContainerID()).getPipeline(); + Pipeline newPipeline = mapping.getContainer(containerName).getPipeline(); Assert.assertEquals(pipeline.getLeader().getUuid(), newPipeline.getLeader().getUuid()); } @Test + public void testDuplicateAllocateContainerFails() throws IOException { + String containerName = UUID.randomUUID().toString(); + Pipeline pipeline = mapping.allocateContainer( + xceiverClientManager.getType(), + xceiverClientManager.getFactor(), containerName, + containerOwner).getPipeline(); + Assert.assertNotNull(pipeline); + thrown.expectMessage("Specified container already exists."); + mapping.allocateContainer(xceiverClientManager.getType(), + xceiverClientManager.getFactor(), containerName, + containerOwner); + } + + @Test public void testgetNoneExistentContainer() throws IOException { + String containerName = UUID.randomUUID().toString(); thrown.expectMessage("Specified key does not exist."); - mapping.getContainer(random.nextLong()); + mapping.getContainer(containerName); } @Test public void testChillModeAllocateContainerFails() throws IOException { + String containerName = UUID.randomUUID().toString(); nodeManager.setChillmode(true); thrown.expectMessage("Unable to create container while in chill mode"); mapping.allocateContainer(xceiverClientManager.getType(), - xceiverClientManager.getFactor(), containerOwner); + xceiverClientManager.getFactor(), containerName, + containerOwner); } @Test public void testContainerCreationLeaseTimeout() throws IOException, InterruptedException { + String containerName = UUID.randomUUID().toString(); nodeManager.setChillmode(false); ContainerInfo containerInfo = mapping.allocateContainer( xceiverClientManager.getType(), xceiverClientManager.getFactor(), + containerName, containerOwner); - mapping.updateContainerState(containerInfo.getContainerID(), + mapping.updateContainerState(containerInfo.getContainerName(), HddsProtos.LifeCycleEvent.CREATE); Thread.sleep(TIMEOUT + 1000); @@ -183,13 +198,14 @@ public void testContainerCreationLeaseTimeout() throws IOException, thrown.expect(IOException.class); thrown.expectMessage("Lease Exception"); - mapping.updateContainerState(containerInfo.getContainerID(), + mapping.updateContainerState(containerInfo.getContainerName(), HddsProtos.LifeCycleEvent.CREATED); } @Test public void testFullContainerReport() throws IOException { - ContainerInfo info = createContainer(); + String containerName = UUID.randomUUID().toString(); + ContainerInfo info = createContainer(containerName); DatanodeDetails datanodeDetails = TestUtils.getDatanodeDetails(); ContainerReportsRequestProto.reportType reportType = ContainerReportsRequestProto.reportType.fullReport; @@ -197,7 +213,9 @@ public void testFullContainerReport() throws IOException { new ArrayList<>(); StorageContainerDatanodeProtocolProtos.ContainerInfo.Builder ciBuilder = StorageContainerDatanodeProtocolProtos.ContainerInfo.newBuilder(); - ciBuilder.setFinalhash("e16cc9d6024365750ed8dbd194ea46d2") + ciBuilder.setContainerName(containerName) + //setting some random hash + .setFinalhash("e16cc9d6024365750ed8dbd194ea46d2") .setSize(5368709120L) .setUsed(2000000000L) .setKeyCount(100000000L) @@ -216,14 +234,15 @@ public void testFullContainerReport() throws IOException { mapping.processContainerReports(crBuilder.build()); - ContainerInfo updatedContainer = mapping.getContainer(info.getContainerID()); + ContainerInfo updatedContainer = mapping.getContainer(containerName); Assert.assertEquals(100000000L, updatedContainer.getNumberOfKeys()); Assert.assertEquals(2000000000L, updatedContainer.getUsedBytes()); } @Test public void testContainerCloseWithContainerReport() throws IOException { - ContainerInfo info = createContainer(); + String containerName = UUID.randomUUID().toString(); + ContainerInfo info = createContainer(containerName); DatanodeDetails datanodeDetails = TestUtils.getDatanodeDetails(); ContainerReportsRequestProto.reportType reportType = ContainerReportsRequestProto.reportType.fullReport; @@ -232,7 +251,9 @@ public void testContainerCloseWithContainerReport() throws IOException { StorageContainerDatanodeProtocolProtos.ContainerInfo.Builder ciBuilder = StorageContainerDatanodeProtocolProtos.ContainerInfo.newBuilder(); - ciBuilder.setFinalhash("7c45eb4d7ed5e0d2e89aaab7759de02e") + ciBuilder.setContainerName(containerName) + //setting some random hash + .setFinalhash("7c45eb4d7ed5e0d2e89aaab7759de02e") .setSize(5368709120L) .setUsed(5368705120L) .setKeyCount(500000000L) @@ -251,7 +272,7 @@ public void testContainerCloseWithContainerReport() throws IOException { mapping.processContainerReports(crBuilder.build()); - ContainerInfo updatedContainer = mapping.getContainer(info.getContainerID()); + ContainerInfo updatedContainer = mapping.getContainer(containerName); Assert.assertEquals(500000000L, updatedContainer.getNumberOfKeys()); Assert.assertEquals(5368705120L, updatedContainer.getUsedBytes()); NavigableSet pendingCloseContainers = mapping.getStateManager() @@ -266,8 +287,9 @@ public void testContainerCloseWithContainerReport() throws IOException { @Test public void testCloseContainer() throws IOException { - ContainerInfo info = createContainer(); - mapping.updateContainerState(info.getContainerID(), + String containerName = UUID.randomUUID().toString(); + ContainerInfo info = createContainer(containerName); + mapping.updateContainerState(containerName, HddsProtos.LifeCycleEvent.FINALIZE); NavigableSet pendingCloseContainers = mapping.getStateManager() .getMatchingContainerIDs( @@ -276,7 +298,7 @@ public void testCloseContainer() throws IOException { xceiverClientManager.getFactor(), HddsProtos.LifeCycleState.CLOSING); Assert.assertTrue(pendingCloseContainers.contains(info.containerID())); - mapping.updateContainerState(info.getContainerID(), + mapping.updateContainerState(containerName, HddsProtos.LifeCycleEvent.CLOSE); NavigableSet closeContainers = mapping.getStateManager() .getMatchingContainerIDs( @@ -289,18 +311,21 @@ public void testCloseContainer() throws IOException { /** * Creates a container with the given name in ContainerMapping. + * @param containerName + * Name of the container * @throws IOException */ - private ContainerInfo createContainer() + private ContainerInfo createContainer(String containerName) throws IOException { nodeManager.setChillmode(false); ContainerInfo containerInfo = mapping.allocateContainer( xceiverClientManager.getType(), xceiverClientManager.getFactor(), + containerName, containerOwner); - mapping.updateContainerState(containerInfo.getContainerID(), + mapping.updateContainerState(containerInfo.getContainerName(), HddsProtos.LifeCycleEvent.CREATE); - mapping.updateContainerState(containerInfo.getContainerID(), + mapping.updateContainerState(containerInfo.getContainerName(), HddsProtos.LifeCycleEvent.CREATED); return containerInfo; } diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/closer/TestContainerCloser.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/closer/TestContainerCloser.java index f3f37c72d1f..2fec2324215 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/closer/TestContainerCloser.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/closer/TestContainerCloser.java @@ -18,6 +18,7 @@ package org.apache.hadoop.hdds.scm.container.closer; +import org.apache.commons.lang.RandomStringUtils; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileUtil; import org.apache.hadoop.hdds.scm.TestUtils; @@ -91,13 +92,15 @@ public static void tearDown() throws Exception { @Test public void testClose() throws IOException { + String containerName = "container-" + RandomStringUtils.randomNumeric(5); + ContainerInfo info = mapping.allocateContainer( HddsProtos.ReplicationType.STAND_ALONE, - HddsProtos.ReplicationFactor.ONE, "ozone"); + HddsProtos.ReplicationFactor.ONE, containerName, "ozone"); //Execute these state transitions so that we can close the container. - mapping.updateContainerState(info.getContainerID(), CREATE); - mapping.updateContainerState(info.getContainerID(), CREATED); + mapping.updateContainerState(containerName, CREATE); + mapping.updateContainerState(containerName, CREATED); long currentCount = mapping.getCloser().getCloseCount(); long runCount = mapping.getCloser().getThreadRunCount(); @@ -117,7 +120,7 @@ public void testClose() throws IOException { long newUsed = (long) (size * 0.91f); sendContainerReport(info, newUsed); - // with only one container the cleaner thread should not run. + // with only one container the cleaner thread should not run. Assert.assertEquals(runCount, mapping.getCloser().getThreadRunCount()); // and close count will be one. @@ -137,13 +140,14 @@ public void testRepeatedClose() throws IOException, configuration.setTimeDuration(OZONE_CONTAINER_REPORT_INTERVAL, 1, TimeUnit.SECONDS); + String containerName = "container-" + RandomStringUtils.randomNumeric(5); ContainerInfo info = mapping.allocateContainer( HddsProtos.ReplicationType.STAND_ALONE, - HddsProtos.ReplicationFactor.ONE, "ozone"); + HddsProtos.ReplicationFactor.ONE, containerName, "ozone"); //Execute these state transitions so that we can close the container. - mapping.updateContainerState(info.getContainerID(), CREATE); + mapping.updateContainerState(containerName, CREATE); long currentCount = mapping.getCloser().getCloseCount(); long runCount = mapping.getCloser().getThreadRunCount(); @@ -183,11 +187,12 @@ public void testCleanupThreadRuns() throws IOException, long runCount = mapping.getCloser().getThreadRunCount(); for (int x = 0; x < ContainerCloser.getCleanupWaterMark() + 10; x++) { + String containerName = "container-" + RandomStringUtils.randomNumeric(7); ContainerInfo info = mapping.allocateContainer( HddsProtos.ReplicationType.STAND_ALONE, - HddsProtos.ReplicationFactor.ONE, "ozone"); - mapping.updateContainerState(info.getContainerID(), CREATE); - mapping.updateContainerState(info.getContainerID(), CREATED); + HddsProtos.ReplicationFactor.ONE, containerName, "ozone"); + mapping.updateContainerState(containerName, CREATE); + mapping.updateContainerState(containerName, CREATED); sendContainerReport(info, 5 * GIGABYTE); } @@ -205,7 +210,7 @@ private void sendContainerReport(ContainerInfo info, long used) throws StorageContainerDatanodeProtocolProtos.ContainerInfo.Builder ciBuilder = StorageContainerDatanodeProtocolProtos.ContainerInfo.newBuilder(); - ciBuilder.setContainerID(info.getContainerID()) + ciBuilder.setContainerName(info.getContainerName()) .setFinalhash("e16cc9d6024365750ed8dbd194ea46d2") .setSize(size) .setUsed(used) diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestContainerPlacement.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestContainerPlacement.java index 6f994a98977..ad50d971b31 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestContainerPlacement.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/node/TestContainerPlacement.java @@ -25,7 +25,6 @@ import org.apache.hadoop.hdds.scm.TestUtils; import org.apache.hadoop.hdds.scm.XceiverClientManager; import org.apache.hadoop.hdds.scm.container.ContainerMapping; -import org.apache.hadoop.hdds.scm.container.common.helpers.ContainerInfo; import org.apache.hadoop.hdds.scm.container.common.helpers.Pipeline; import org.apache.hadoop.hdds.scm.container.placement.algorithms .ContainerPlacementPolicy; @@ -161,11 +160,13 @@ public void testContainerPlacementCapacity() throws IOException, assertTrue(nodeManager.isOutOfChillMode()); - ContainerInfo containerInfo = containerManager.allocateContainer( + String container1 = UUID.randomUUID().toString(); + Pipeline pipeline1 = containerManager.allocateContainer( xceiverClientManager.getType(), - xceiverClientManager.getFactor(), "OZONE"); + xceiverClientManager.getFactor(), container1, "OZONE") + .getPipeline(); assertEquals(xceiverClientManager.getFactor().getNumber(), - containerInfo.getPipeline().getMachines().size()); + pipeline1.getMachines().size()); } finally { IOUtils.closeQuietly(containerManager); IOUtils.closeQuietly(nodeManager); diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/common/TestEndPoint.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/common/TestEndPoint.java index 0f9125baa14..d0839c52fbb 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/common/TestEndPoint.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/common/TestEndPoint.java @@ -17,7 +17,6 @@ package org.apache.hadoop.ozone.container.common; import org.apache.commons.codec.digest.DigestUtils; -import org.apache.commons.lang3.RandomUtils; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FileUtil; import org.apache.hadoop.hdds.scm.TestUtils; @@ -203,12 +202,14 @@ public void testGetVersionAssertRpcTimeOut() throws Exception { @Test public void testRegister() throws Exception { + String[] scmAddressArray = new String[1]; + scmAddressArray[0] = serverAddress.toString(); DatanodeDetails nodeToRegister = getDatanodeDetails(); try (EndpointStateMachine rpcEndPoint = createEndpoint( SCMTestUtils.getConf(), serverAddress, 1000)) { SCMRegisteredCmdResponseProto responseProto = rpcEndPoint.getEndPoint() - .register(nodeToRegister.getProtoBufMessage()); + .register(nodeToRegister.getProtoBufMessage(), scmAddressArray); Assert.assertNotNull(responseProto); Assert.assertEquals(nodeToRegister.getUuidString(), responseProto.getDatanodeUUID()); @@ -361,7 +362,7 @@ public void testHeartbeatTaskRpcTimeOut() throws Exception { * @return */ ContainerReport getRandomContainerReport() { - return new ContainerReport(RandomUtils.nextLong(), + return new ContainerReport(UUID.randomUUID().toString(), DigestUtils.sha256Hex("Random")); } @@ -435,8 +436,7 @@ private ContainerReportsRequestProto createContainerReport(int count) { reportsBuilder = StorageContainerDatanodeProtocolProtos .ContainerReportsRequestProto.newBuilder(); for (int x = 0; x < count; x++) { - long containerID = RandomUtils.nextLong(); - ContainerReport report = new ContainerReport(containerID, + ContainerReport report = new ContainerReport(UUID.randomUUID().toString(), DigestUtils.sha256Hex("Simulated")); report.setKeyCount(1000); report.setSize(OzoneConsts.GB * 5); @@ -445,6 +445,7 @@ private ContainerReportsRequestProto createContainerReport(int count) { report.setReadBytes(OzoneConsts.GB * 1); report.setWriteCount(50); report.setWriteBytes(OzoneConsts.GB * 2); + report.setContainerID(1); reportsBuilder.addReports(report.getProtoBufMessage()); } diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/replication/TestContainerSupervisor.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/replication/TestContainerSupervisor.java index 01f70b1628e..8eb07e61c24 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/replication/TestContainerSupervisor.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/replication/TestContainerSupervisor.java @@ -149,17 +149,17 @@ public void testAssertPoolsAreProcessed() { */ public void testDetectSingleContainerReplica() throws TimeoutException, InterruptedException { - long singleNodeContainerID = 9001; - long threeNodeContainerID = 9003; + String singleNodeContainer = "SingleNodeContainer"; + String threeNodeContainer = "ThreeNodeContainer"; InProgressPool ppool = containerSupervisor.getInProcessPoolList().get(0); // Only single datanode reporting that "SingleNodeContainer" exists. List clist = - datanodeStateManager.getContainerReport(singleNodeContainerID, + datanodeStateManager.getContainerReport(singleNodeContainer, ppool.getPool().getPoolName(), 1); ppool.handleContainerReport(clist.get(0)); // Three nodes are going to report that ThreeNodeContainer exists. - clist = datanodeStateManager.getContainerReport(threeNodeContainerID, + clist = datanodeStateManager.getContainerReport(threeNodeContainer, ppool.getPool().getPoolName(), 3); for (ContainerReportsRequestProto reportsProto : clist) { @@ -169,10 +169,9 @@ public void testDetectSingleContainerReplica() throws TimeoutException, 200, 1000); ppool.setDoneProcessing(); - List> containers = ppool.filterContainer(p -> p + List> containers = ppool.filterContainer(p -> p .getValue() == 1); - Assert.assertEquals(singleNodeContainerID, - containers.get(0).getKey().longValue()); + Assert.assertEquals(singleNodeContainer, containers.get(0).getKey()); int count = containers.get(0).getValue(); Assert.assertEquals(1L, count); } @@ -185,24 +184,24 @@ public void testDetectSingleContainerReplica() throws TimeoutException, */ public void testDetectOverReplica() throws TimeoutException, InterruptedException { - long normalContainerID = 9000; - long overReplicatedContainerID = 9001; - long wayOverReplicatedContainerID = 9002; + String normalContainer = "NormalContainer"; + String overReplicated = "OverReplicatedContainer"; + String wayOverReplicated = "WayOverReplicated"; InProgressPool ppool = containerSupervisor.getInProcessPoolList().get(0); List clist = - datanodeStateManager.getContainerReport(normalContainerID, + datanodeStateManager.getContainerReport(normalContainer, ppool.getPool().getPoolName(), 3); ppool.handleContainerReport(clist.get(0)); - clist = datanodeStateManager.getContainerReport(overReplicatedContainerID, + clist = datanodeStateManager.getContainerReport(overReplicated, ppool.getPool().getPoolName(), 4); for (ContainerReportsRequestProto reportsProto : clist) { ppool.handleContainerReport(reportsProto); } - clist = datanodeStateManager.getContainerReport(wayOverReplicatedContainerID, + clist = datanodeStateManager.getContainerReport(wayOverReplicated, ppool.getPool().getPoolName(), 7); for (ContainerReportsRequestProto reportsProto : clist) { @@ -216,7 +215,7 @@ public void testDetectOverReplica() throws TimeoutException, 200, 1000); ppool.setDoneProcessing(); - List> containers = ppool.filterContainer(p -> p + List> containers = ppool.filterContainer(p -> p .getValue() > 3); Assert.assertEquals(2, containers.size()); } @@ -256,15 +255,14 @@ public void testAddingNewPoolWorks() logCapturer.getOutput().contains("PoolNew"), 200, 15 * 1000); - long newContainerID = 7001; // Assert that we are able to send a container report to this new // pool and datanode. List clist = - datanodeStateManager.getContainerReport(newContainerID, + datanodeStateManager.getContainerReport("NewContainer1", "PoolNew", 1); containerSupervisor.handleContainerReport(clist.get(0)); GenericTestUtils.waitFor(() -> - inProgressLog.getOutput().contains(Long.toString(newContainerID)) && inProgressLog + inProgressLog.getOutput().contains("NewContainer1") && inProgressLog .getOutput().contains(id.getUuidString()), 200, 10 * 1000); } finally { diff --git a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/testutils/ReplicationDatanodeStateManager.java b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/testutils/ReplicationDatanodeStateManager.java index 50fd18f5655..26f35144975 100644 --- a/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/testutils/ReplicationDatanodeStateManager.java +++ b/hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/testutils/ReplicationDatanodeStateManager.java @@ -16,7 +16,6 @@ */ package org.apache.hadoop.ozone.container.testutils; -import com.google.common.primitives.Longs; import org.apache.commons.codec.digest.DigestUtils; import org.apache.hadoop.hdds.scm.node.NodeManager; import org.apache.hadoop.hdds.scm.node.NodePoolManager; @@ -57,13 +56,13 @@ public ReplicationDatanodeStateManager(NodeManager nodeManager, /** * Get Container Report as if it is from a datanode in the cluster. - * @param containerID - Container ID. + * @param containerName - Container Name. * @param poolName - Pool Name. * @param dataNodeCount - Datanode Count. * @return List of Container Reports. */ public List getContainerReport( - long containerID, String poolName, int dataNodeCount) { + String containerName, String poolName, int dataNodeCount) { List containerList = new LinkedList<>(); List nodesInPool = poolManager.getNodes(poolName); @@ -76,6 +75,7 @@ public ReplicationDatanodeStateManager(NodeManager nodeManager, "required container reports"); } + int containerID = 1; while (containerList.size() < dataNodeCount && nodesInPool.size() > 0) { DatanodeDetails id = nodesInPool.get(r.nextInt(nodesInPool.size())); nodesInPool.remove(id); @@ -83,9 +83,8 @@ public ReplicationDatanodeStateManager(NodeManager nodeManager, // We return container reports only for nodes that are healthy. if (nodeManager.getNodeState(id) == HEALTHY) { ContainerInfo info = ContainerInfo.newBuilder() - .setContainerID(containerID) - .setFinalhash(DigestUtils.sha256Hex( - Longs.toByteArray(containerID))) + .setContainerName(containerName) + .setFinalhash(DigestUtils.sha256Hex(containerName)) .setContainerID(containerID) .build(); ContainerReportsRequestProto containerReport = diff --git a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/container/CloseContainerHandler.java b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/container/CloseContainerHandler.java index 4f3b14385cc..9a44525461b 100644 --- a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/container/CloseContainerHandler.java +++ b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/container/CloseContainerHandler.java @@ -24,7 +24,7 @@ import org.apache.hadoop.hdds.scm.cli.OzoneCommandHandler; import org.apache.hadoop.hdds.scm.cli.SCMCLI; import org.apache.hadoop.hdds.scm.client.ScmClient; -import org.apache.hadoop.hdds.scm.container.common.helpers.ContainerInfo; +import org.apache.hadoop.hdds.scm.container.common.helpers.Pipeline; import java.io.IOException; @@ -34,32 +34,30 @@ public class CloseContainerHandler extends OzoneCommandHandler { public static final String CONTAINER_CLOSE = "close"; - public static final String OPT_CONTAINER_ID = "c"; + public static final String OPT_CONTAINER_NAME = "c"; @Override public void execute(CommandLine cmd) throws IOException { if (!cmd.hasOption(CONTAINER_CLOSE)) { throw new IOException("Expecting container close"); } - if (!cmd.hasOption(OPT_CONTAINER_ID)) { + if (!cmd.hasOption(OPT_CONTAINER_NAME)) { displayHelp(); if (!cmd.hasOption(SCMCLI.HELP_OP)) { - throw new IOException("Expecting container id"); + throw new IOException("Expecting container name"); } else { return; } } - String containerID = cmd.getOptionValue(OPT_CONTAINER_ID); + String containerName = cmd.getOptionValue(OPT_CONTAINER_NAME); - ContainerInfo container = getScmClient(). - getContainer(Long.parseLong(containerID)); - if (container == null) { + Pipeline pipeline = getScmClient().getContainer(containerName); + if (pipeline == null) { throw new IOException("Cannot close an non-exist container " - + containerID); + + containerName); } - logOut("Closing container : %s.", containerID); - getScmClient().closeContainer(container.getContainerID(), - container.getPipeline()); + logOut("Closing container : %s.", containerName); + getScmClient().closeContainer(pipeline); logOut("Container closed."); } @@ -74,8 +72,8 @@ public void displayHelp() { } public static void addOptions(Options options) { - Option containerNameOpt = new Option(OPT_CONTAINER_ID, - true, "Specify container ID"); + Option containerNameOpt = new Option(OPT_CONTAINER_NAME, + true, "Specify container name"); options.addOption(containerNameOpt); } diff --git a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/container/ContainerCommandHandler.java b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/container/ContainerCommandHandler.java index 428f179932d..980388f28ca 100644 --- a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/container/ContainerCommandHandler.java +++ b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/container/ContainerCommandHandler.java @@ -119,6 +119,7 @@ private static void addCommandsOption(Options options) { public static void addOptions(Options options) { addCommandsOption(options); // for create container options. + CreateContainerHandler.addOptions(options); DeleteContainerHandler.addOptions(options); InfoContainerHandler.addOptions(options); ListContainerHandler.addOptions(options); diff --git a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/container/CreateContainerHandler.java b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/container/CreateContainerHandler.java index c0ff1f7a6d6..2961831e67c 100644 --- a/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/container/CreateContainerHandler.java +++ b/hadoop-hdds/tools/src/main/java/org/apache/hadoop/hdds/scm/cli/container/CreateContainerHandler.java @@ -19,6 +19,7 @@ import org.apache.commons.cli.CommandLine; import org.apache.commons.cli.HelpFormatter; +import org.apache.commons.cli.Option; import org.apache.commons.cli.Options; import org.apache.hadoop.hdds.scm.cli.OzoneCommandHandler; import org.apache.hadoop.hdds.scm.client.ScmClient; @@ -34,6 +35,7 @@ public class CreateContainerHandler extends OzoneCommandHandler { public static final String CONTAINER_CREATE = "create"; + public static final String OPT_CONTAINER_NAME = "c"; public static final String CONTAINER_OWNER = "OZONE"; // TODO Support an optional -p option to create // container on given datanodes. @@ -47,17 +49,33 @@ public void execute(CommandLine cmd) throws IOException { if (!cmd.hasOption(CONTAINER_CREATE)) { throw new IOException("Expecting container create"); } + if (!cmd.hasOption(OPT_CONTAINER_NAME)) { + displayHelp(); + if (!cmd.hasOption(HELP_OP)) { + throw new IOException("Expecting container name"); + } else { + return; + } + } + String containerName = cmd.getOptionValue(OPT_CONTAINER_NAME); - logOut("Creating container..."); - getScmClient().createContainer(CONTAINER_OWNER); + logOut("Creating container : %s.", containerName); + getScmClient().createContainer(containerName, CONTAINER_OWNER); logOut("Container created."); } @Override public void displayHelp() { Options options = new Options(); + addOptions(options); HelpFormatter helpFormatter = new HelpFormatter(); helpFormatter.printHelp(CMD_WIDTH, "hdfs scm -container -create