diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
index de66e7525e1..e7046b2692a 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
@@ -3495,8 +3495,6 @@ public static boolean isAclEnabled(Configuration conf) {
* One or more socket addresses for csi-adaptor.
* Multiple addresses are delimited by ",".
*/
- public static final String NM_CSI_ADAPTOR_ADDRESSES =
- NM_CSI_ADAPTOR_PREFIX + "addresses";
public static final String NM_CSI_DRIVER_NAMES =
NM_CSI_DRIVER_PREFIX + "names";
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/util/csi/CsiConfigUtils.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/util/csi/CsiConfigUtils.java
index e1177053fdf..11485a1a7f1 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/util/csi/CsiConfigUtils.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/util/csi/CsiConfigUtils.java
@@ -76,4 +76,26 @@ public static InetSocketAddress getCsiAdaptorAddressForDriver(
throw new YarnException(errorMessage);
}
}
+
+ /**
+ * Get plain TCP address of the csi-driver-adaptor for the given
+ * csi-driver. If the property is missing from conf, a YarnException
+ * is thrown.
+ * @param driverName name of the csi-driver
+ * @param conf configuration
+ * @return csi-driver-adaptor server address
+ * @throws YarnException
+ */
+ public static String getPlainCsiAdaptorAddressForDriver(
+ String driverName, Configuration conf) throws YarnException {
+ String configName = YarnConfiguration.NM_CSI_ADAPTOR_PREFIX
+ + driverName + YarnConfiguration.NM_CSI_ADAPTOR_ADDRESS_SUFFIX;
+ String address = conf.get(configName);
+ if (Strings.isNullOrEmpty(address)) {
+ throw new YarnException("Failed to load CSI adaptor address for driver "
+ + driverName + ", configuration property " + configName
+ + " is not defined.");
+ }
+ return address;
+ }
}
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
index 45894e9461f..fff1d11127d 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/resources/yarn-default.xml
@@ -4080,18 +4080,6 @@
-
-
- CSI driver adaptor addresses on a node manager.
- This configuration will be loaded by the resource manager to initiate
- a client for each adaptor in order to communicate with CSI drivers.
- Note, these addresses should be mapped to the adaptor addresses which
- runs the controller plugin.
-
- yarn.nodemanager.csi-driver-adaptor.addresses
-
-
-
CSI driver names running on this node, multiple driver names need to
@@ -4104,7 +4092,7 @@
2nd property defines where the mapping csi-driver-adaptor's address is.
What's more, an optional csi-driver-adaptor class can be defined
for each csi-driver:
- "yarn.nodemanager.csi-driver.${NAME}.class"
+ "yarn.nodemanager.csi-driver-adaptor.${NAME}.class"
once given, the adaptor will be initiated with the given class instead
of the default implementation
org.apache.hadoop.yarn.csi.adaptor.DefaultCsiAdaptorImpl. User can plug
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-csi/dev-support/findbugs-exclude.xml b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-csi/dev-support/findbugs-exclude.xml
index 6ab0a7fc711..51180d70851 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-csi/dev-support/findbugs-exclude.xml
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-csi/dev-support/findbugs-exclude.xml
@@ -16,6 +16,6 @@
-->
-
+
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-csi/pom.xml b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-csi/pom.xml
index 44c2607a8e0..a93fa437959 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-csi/pom.xml
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-csi/pom.xml
@@ -177,7 +177,7 @@
org.apache.maven.plugins
maven-javadoc-plugin
- csi.v0
+ csi.v1
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-csi/src/main/java/org/apache/hadoop/yarn/csi/adaptor/DefaultCsiAdaptorImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-csi/src/main/java/org/apache/hadoop/yarn/csi/adaptor/DefaultCsiAdaptorImpl.java
index a2035878bb7..e4620d39321 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-csi/src/main/java/org/apache/hadoop/yarn/csi/adaptor/DefaultCsiAdaptorImpl.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-csi/src/main/java/org/apache/hadoop/yarn/csi/adaptor/DefaultCsiAdaptorImpl.java
@@ -17,7 +17,7 @@
*/
package org.apache.hadoop.yarn.csi.adaptor;
-import csi.v0.Csi;
+import csi.v1.Csi;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.yarn.api.CsiAdaptorPlugin;
import org.apache.hadoop.yarn.api.protocolrecords.GetPluginInfoRequest;
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-csi/src/main/java/org/apache/hadoop/yarn/csi/client/CsiClient.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-csi/src/main/java/org/apache/hadoop/yarn/csi/client/CsiClient.java
index 837b667a5e6..6b878339850 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-csi/src/main/java/org/apache/hadoop/yarn/csi/client/CsiClient.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-csi/src/main/java/org/apache/hadoop/yarn/csi/client/CsiClient.java
@@ -18,8 +18,8 @@
package org.apache.hadoop.yarn.csi.client;
-import csi.v0.Csi;
-import csi.v0.Csi.GetPluginInfoResponse;
+import csi.v1.Csi;
+import csi.v1.Csi.GetPluginInfoResponse;
import java.io.IOException;
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-csi/src/main/java/org/apache/hadoop/yarn/csi/client/CsiClientImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-csi/src/main/java/org/apache/hadoop/yarn/csi/client/CsiClientImpl.java
index 0a107e16b5e..8550e6aab30 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-csi/src/main/java/org/apache/hadoop/yarn/csi/client/CsiClientImpl.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-csi/src/main/java/org/apache/hadoop/yarn/csi/client/CsiClientImpl.java
@@ -18,9 +18,9 @@
package org.apache.hadoop.yarn.csi.client;
-import csi.v0.Csi;
-import csi.v0.Csi.GetPluginInfoRequest;
-import csi.v0.Csi.GetPluginInfoResponse;
+import csi.v1.Csi;
+import csi.v1.Csi.GetPluginInfoRequest;
+import csi.v1.Csi.GetPluginInfoResponse;
import org.apache.hadoop.yarn.csi.utils.GrpcHelper;
import java.io.IOException;
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-csi/src/main/java/org/apache/hadoop/yarn/csi/client/CsiGrpcClient.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-csi/src/main/java/org/apache/hadoop/yarn/csi/client/CsiGrpcClient.java
index 5dc1b3f794a..1969ee9ff01 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-csi/src/main/java/org/apache/hadoop/yarn/csi/client/CsiGrpcClient.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-csi/src/main/java/org/apache/hadoop/yarn/csi/client/CsiGrpcClient.java
@@ -18,9 +18,9 @@
package org.apache.hadoop.yarn.csi.client;
-import csi.v0.ControllerGrpc;
-import csi.v0.IdentityGrpc;
-import csi.v0.NodeGrpc;
+import csi.v1.ControllerGrpc;
+import csi.v1.IdentityGrpc;
+import csi.v1.NodeGrpc;
import io.grpc.ManagedChannel;
import io.grpc.netty.NettyChannelBuilder;
import io.netty.channel.epoll.EpollDomainSocketChannel;
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-csi/src/main/java/org/apache/hadoop/yarn/csi/translator/GetPluginInfoResponseProtoTranslator.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-csi/src/main/java/org/apache/hadoop/yarn/csi/translator/GetPluginInfoResponseProtoTranslator.java
index bcf634addcb..da74e4f5d43 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-csi/src/main/java/org/apache/hadoop/yarn/csi/translator/GetPluginInfoResponseProtoTranslator.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-csi/src/main/java/org/apache/hadoop/yarn/csi/translator/GetPluginInfoResponseProtoTranslator.java
@@ -17,7 +17,7 @@
*/
package org.apache.hadoop.yarn.csi.translator;
-import csi.v0.Csi;
+import csi.v1.Csi;
import org.apache.hadoop.yarn.api.protocolrecords.GetPluginInfoResponse;
import org.apache.hadoop.yarn.exceptions.YarnException;
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-csi/src/main/java/org/apache/hadoop/yarn/csi/translator/NodePublishVolumeRequestProtoTranslator.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-csi/src/main/java/org/apache/hadoop/yarn/csi/translator/NodePublishVolumeRequestProtoTranslator.java
index e86dd3fcc9f..3c809453a1d 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-csi/src/main/java/org/apache/hadoop/yarn/csi/translator/NodePublishVolumeRequestProtoTranslator.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-csi/src/main/java/org/apache/hadoop/yarn/csi/translator/NodePublishVolumeRequestProtoTranslator.java
@@ -17,7 +17,7 @@
*/
package org.apache.hadoop.yarn.csi.translator;
-import csi.v0.Csi;
+import csi.v1.Csi;
import org.apache.hadoop.yarn.api.protocolrecords.NodePublishVolumeRequest;
import org.apache.hadoop.yarn.api.protocolrecords.ValidateVolumeCapabilitiesRequest;
import org.apache.hadoop.yarn.exceptions.YarnException;
@@ -52,8 +52,8 @@
builder.setVolumeId(messageA.getVolumeId());
builder.setTargetPath(messageA.getTargetPath());
builder.setReadonly(messageA.getReadOnly());
- builder.putAllNodePublishSecrets(messageA.getSecrets());
- builder.putAllPublishInfo(messageA.getPublishContext());
+ builder.putAllSecrets(messageA.getSecrets());
+ builder.putAllPublishContext(messageA.getPublishContext());
builder.setStagingTargetPath(messageA.getStagingPath());
return builder.build();
}
@@ -71,7 +71,7 @@ public NodePublishVolumeRequest convertFrom(
return NodePublishVolumeRequest.newInstance(
messageB.getVolumeId(), messageB.getReadonly(),
messageB.getTargetPath(), messageB.getStagingTargetPath(),
- cap, messageB.getPublishInfoMap(),
- messageB.getNodePublishSecretsMap());
+ cap, messageB.getPublishContextMap(),
+ messageB.getSecretsMap());
}
}
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-csi/src/main/java/org/apache/hadoop/yarn/csi/translator/NodeUnpublishVolumeRequestProtoTranslator.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-csi/src/main/java/org/apache/hadoop/yarn/csi/translator/NodeUnpublishVolumeRequestProtoTranslator.java
index 485237e3697..db08749f133 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-csi/src/main/java/org/apache/hadoop/yarn/csi/translator/NodeUnpublishVolumeRequestProtoTranslator.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-csi/src/main/java/org/apache/hadoop/yarn/csi/translator/NodeUnpublishVolumeRequestProtoTranslator.java
@@ -17,7 +17,7 @@
*/
package org.apache.hadoop.yarn.csi.translator;
-import csi.v0.Csi;
+import csi.v1.Csi;
import org.apache.hadoop.yarn.api.protocolrecords.NodeUnpublishVolumeRequest;
import org.apache.hadoop.yarn.exceptions.YarnException;
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-csi/src/main/java/org/apache/hadoop/yarn/csi/translator/ProtoTranslatorFactory.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-csi/src/main/java/org/apache/hadoop/yarn/csi/translator/ProtoTranslatorFactory.java
index 1a7306f0bf2..c0b8e604fee 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-csi/src/main/java/org/apache/hadoop/yarn/csi/translator/ProtoTranslatorFactory.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-csi/src/main/java/org/apache/hadoop/yarn/csi/translator/ProtoTranslatorFactory.java
@@ -17,7 +17,7 @@
*/
package org.apache.hadoop.yarn.csi.translator;
-import csi.v0.Csi;
+import csi.v1.Csi;
import org.apache.hadoop.yarn.api.protocolrecords.NodePublishVolumeRequest;
import org.apache.hadoop.yarn.api.protocolrecords.NodeUnpublishVolumeRequest;
import org.apache.hadoop.yarn.api.protocolrecords.ValidateVolumeCapabilitiesRequest;
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-csi/src/main/java/org/apache/hadoop/yarn/csi/translator/ValidateVolumeCapabilitiesRequestProtoTranslator.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-csi/src/main/java/org/apache/hadoop/yarn/csi/translator/ValidateVolumeCapabilitiesRequestProtoTranslator.java
index a74c47a9f6e..6ecdbbb7e5f 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-csi/src/main/java/org/apache/hadoop/yarn/csi/translator/ValidateVolumeCapabilitiesRequestProtoTranslator.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-csi/src/main/java/org/apache/hadoop/yarn/csi/translator/ValidateVolumeCapabilitiesRequestProtoTranslator.java
@@ -18,7 +18,7 @@
package org.apache.hadoop.yarn.csi.translator;
-import csi.v0.Csi;
+import csi.v1.Csi;
import org.apache.hadoop.yarn.api.protocolrecords.ValidateVolumeCapabilitiesRequest;
import org.apache.hadoop.yarn.api.protocolrecords.ValidateVolumeCapabilitiesRequest.VolumeCapability;
import org.apache.hadoop.yarn.api.protocolrecords.ValidateVolumeCapabilitiesRequest.VolumeType;
@@ -42,10 +42,7 @@
Csi.ValidateVolumeCapabilitiesRequest.Builder buidler =
Csi.ValidateVolumeCapabilitiesRequest.newBuilder();
buidler.setVolumeId(request.getVolumeId());
- if (request.getVolumeCapabilities() != null
- && request.getVolumeCapabilities().size() > 0) {
- buidler.putAllVolumeAttributes(request.getVolumeAttributes());
- }
+ buidler.putAllParameters(request.getVolumeAttributes());
for (VolumeCapability cap :
request.getVolumeCapabilities()) {
Csi.VolumeCapability.AccessMode accessMode =
@@ -70,7 +67,7 @@
public ValidateVolumeCapabilitiesRequest convertFrom(
Csi.ValidateVolumeCapabilitiesRequest request) throws YarnException {
ValidateVolumeCapabilitiesRequest result = ValidateVolumeCapabilitiesRequest
- .newInstance(request.getVolumeId(), request.getVolumeAttributesMap());
+ .newInstance(request.getVolumeId(), request.getParametersMap());
for (Csi.VolumeCapability csiCap :
request.getVolumeCapabilitiesList()) {
ValidateVolumeCapabilitiesRequest.AccessMode mode =
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-csi/src/main/java/org/apache/hadoop/yarn/csi/translator/ValidationVolumeCapabilitiesResponseProtoTranslator.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-csi/src/main/java/org/apache/hadoop/yarn/csi/translator/ValidationVolumeCapabilitiesResponseProtoTranslator.java
index fd42712625e..ccf64799558 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-csi/src/main/java/org/apache/hadoop/yarn/csi/translator/ValidationVolumeCapabilitiesResponseProtoTranslator.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-csi/src/main/java/org/apache/hadoop/yarn/csi/translator/ValidationVolumeCapabilitiesResponseProtoTranslator.java
@@ -17,7 +17,7 @@
*/
package org.apache.hadoop.yarn.csi.translator;
-import csi.v0.Csi;
+import csi.v1.Csi;
import org.apache.hadoop.yarn.api.protocolrecords.ValidateVolumeCapabilitiesResponse;
import org.apache.hadoop.yarn.exceptions.YarnException;
@@ -34,7 +34,9 @@
public Csi.ValidateVolumeCapabilitiesResponse convertTo(
ValidateVolumeCapabilitiesResponse response) throws YarnException {
return Csi.ValidateVolumeCapabilitiesResponse.newBuilder()
- .setSupported(response.isSupported())
+ .setConfirmed(
+ Csi.ValidateVolumeCapabilitiesResponse.Confirmed.newBuilder()
+ .getDefaultInstanceForType())
.setMessage(response.getResponseMessage())
.build();
}
@@ -43,6 +45,6 @@
public ValidateVolumeCapabilitiesResponse convertFrom(
Csi.ValidateVolumeCapabilitiesResponse response) throws YarnException {
return ValidateVolumeCapabilitiesResponse.newInstance(
- response.getSupported(), response.getMessage());
+ true, response.getMessage());
}
}
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-csi/src/main/proto/csi.proto b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-csi/src/main/proto/csi.proto
index e9301bc6bc9..688a84dd096 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-csi/src/main/proto/csi.proto
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-csi/src/main/proto/csi.proto
@@ -16,16 +16,24 @@
* limitations under the License.
*/
-// https://github.com/container-storage-interface/spec/blob/v0.3.0/csi.proto
-// ** v0.3 **
+// https://github.com/container-storage-interface/spec/blob/v1.0.0/csi.proto
+// ** v1.0 **
// Code generated by make; DO NOT EDIT.
syntax = "proto3";
-package csi.v0;
+package csi.v1;
+import "google/protobuf/descriptor.proto";
+import "google/protobuf/timestamp.proto";
import "google/protobuf/wrappers.proto";
option go_package = "csi";
+
+extend google.protobuf.FieldOptions {
+ // Indicates that a field MAY contain information that is sensitive
+ // and MUST be treated as such (e.g. not logged).
+ bool csi_secret = 1059;
+}
service Identity {
rpc GetPluginInfo(GetPluginInfoRequest)
returns (GetPluginInfoResponse) {}
@@ -85,20 +93,12 @@ service Node {
rpc NodeUnpublishVolume (NodeUnpublishVolumeRequest)
returns (NodeUnpublishVolumeResponse) {}
- // NodeGetId is being deprecated in favor of NodeGetInfo and will be
- // removed in CSI 1.0. Existing drivers, however, may depend on this
- // RPC call and hence this RPC call MUST be implemented by the CSI
- // plugin prior to v1.0.
- rpc NodeGetId (NodeGetIdRequest)
- returns (NodeGetIdResponse) {
- option deprecated = true;
- }
+ rpc NodeGetVolumeStats (NodeGetVolumeStatsRequest)
+ returns (NodeGetVolumeStatsResponse) {}
rpc NodeGetCapabilities (NodeGetCapabilitiesRequest)
returns (NodeGetCapabilitiesResponse) {}
- // Prior to CSI 1.0 - CSI plugins MUST implement both NodeGetId and
- // NodeGetInfo RPC calls.
rpc NodeGetInfo (NodeGetInfoRequest)
returns (NodeGetInfoResponse) {}
}
@@ -107,13 +107,13 @@ message GetPluginInfoRequest {
}
message GetPluginInfoResponse {
- // The name MUST follow reverse domain name notation format
- // (https://en.wikipedia.org/wiki/Reverse_domain_name_notation).
- // It SHOULD include the plugin's host company name and the plugin
- // name, to minimize the possibility of collisions. It MUST be 63
+ // The name MUST follow domain name notation format
+ // (https://tools.ietf.org/html/rfc1035#section-2.3.1). It SHOULD
+ // include the plugin's host company name and the plugin name,
+ // to minimize the possibility of collisions. It MUST be 63
// characters or less, beginning and ending with an alphanumeric
- // character ([a-z0-9A-Z]) with dashes (-), underscores (_),
- // dots (.), and alphanumerics between. This field is REQUIRED.
+ // character ([a-z0-9A-Z]) with dashes (-), dots (.), and
+ // alphanumerics between. This field is REQUIRED.
string name = 1;
// This field is REQUIRED. Value of this field is opaque to the CO.
@@ -129,7 +129,7 @@ message GetPluginCapabilitiesRequest {
message GetPluginCapabilitiesResponse {
// All the capabilities that the controller service supports. This
// field is OPTIONAL.
- repeated PluginCapability capabilities = 2;
+ repeated PluginCapability capabilities = 1;
}
// Specifies a capability of the plugin.
@@ -140,7 +140,7 @@ message PluginCapability {
// CONTROLLER_SERVICE indicates that the Plugin provides RPCs for
// the ControllerService. Plugins SHOULD provide this capability.
- // In rare cases certain plugins may wish to omit the
+ // In rare cases certain plugins MAY wish to omit the
// ControllerService entirely from their implementation, but such
// SHOULD NOT be the common case.
// The presence of this capability determines whether the CO will
@@ -148,13 +148,13 @@ message PluginCapability {
// as specific RPCs as indicated by ControllerGetCapabilities.
CONTROLLER_SERVICE = 1;
- // ACCESSIBILITY_CONSTRAINTS indicates that the volumes for this
- // plugin may not be equally accessible by all nodes in the
+ // VOLUME_ACCESSIBILITY_CONSTRAINTS indicates that the volumes for
+ // this plugin MAY NOT be equally accessible by all nodes in the
// cluster. The CO MUST use the topology information returned by
// CreateVolumeRequest along with the topology information
// returned by NodeGetInfo to ensure that a given volume is
// accessible from a given node when scheduling workloads.
- ACCESSIBILITY_CONSTRAINTS = 2;
+ VOLUME_ACCESSIBILITY_CONSTRAINTS = 2;
}
Type type = 1;
}
@@ -195,37 +195,53 @@ message CreateVolumeRequest {
// The suggested name for the storage space. This field is REQUIRED.
// It serves two purposes:
// 1) Idempotency - This name is generated by the CO to achieve
- // idempotency. If `CreateVolume` fails, the volume may or may not
- // be provisioned. In this case, the CO may call `CreateVolume`
- // again, with the same name, to ensure the volume exists. The
- // Plugin should ensure that multiple `CreateVolume` calls for the
- // same name do not result in more than one piece of storage
- // provisioned corresponding to that name. If a Plugin is unable to
- // enforce idempotency, the CO's error recovery logic could result
- // in multiple (unused) volumes being provisioned.
+ // idempotency. The Plugin SHOULD ensure that multiple
+ // `CreateVolume` calls for the same name do not result in more
+ // than one piece of storage provisioned corresponding to that
+ // name. If a Plugin is unable to enforce idempotency, the CO's
+ // error recovery logic could result in multiple (unused) volumes
+ // being provisioned.
+ // In the case of error, the CO MUST handle the gRPC error codes
+ // per the recovery behavior defined in the "CreateVolume Errors"
+ // section below.
+ // The CO is responsible for cleaning up volumes it provisioned
+ // that it no longer needs. If the CO is uncertain whether a volume
+ // was provisioned or not when a `CreateVolume` call fails, the CO
+ // MAY call `CreateVolume` again, with the same name, to ensure the
+ // volume exists and to retrieve the volume's `volume_id` (unless
+ // otherwise prohibited by "CreateVolume Errors").
// 2) Suggested name - Some storage systems allow callers to specify
// an identifier by which to refer to the newly provisioned
// storage. If a storage system supports this, it can optionally
// use this name as the identifier for the new volume.
+ // Any Unicode string that conforms to the length limit is allowed
+ // except those containing the following banned characters:
+ // U+0000-U+0008, U+000B, U+000C, U+000E-U+001F, U+007F-U+009F.
+ // (These are control characters other than commonly used whitespace.)
string name = 1;
// This field is OPTIONAL. This allows the CO to specify the capacity
// requirement of the volume to be provisioned. If not specified, the
// Plugin MAY choose an implementation-defined capacity range. If
// specified it MUST always be honored, even when creating volumes
- // from a source; which may force some backends to internally extend
+ // from a source; which MAY force some backends to internally extend
// the volume after creating it.
-
CapacityRange capacity_range = 2;
- // The capabilities that the provisioned volume MUST have: the Plugin
- // MUST provision a volume that could satisfy ALL of the
- // capabilities specified in this list. The Plugin MUST assume that
- // the CO MAY use the provisioned volume later with ANY of the
- // capabilities specified in this list. This also enables the CO to do
- // early validation: if ANY of the specified volume capabilities are
- // not supported by the Plugin, the call SHALL fail. This field is
- // REQUIRED.
+ // The capabilities that the provisioned volume MUST have. SP MUST
+ // provision a volume that will satisfy ALL of the capabilities
+ // specified in this list. Otherwise SP MUST return the appropriate
+ // gRPC error code.
+ // The Plugin MUST assume that the CO MAY use the provisioned volume
+ // with ANY of the capabilities specified in this list.
+ // For example, a CO MAY specify two volume capabilities: one with
+ // access mode SINGLE_NODE_WRITER and another with access mode
+ // MULTI_NODE_READER_ONLY. In this case, the SP MUST verify that the
+ // provisioned volume can be used in either mode.
+ // This also enables the CO to do early validation: If ANY of the
+ // specified volume capabilities are not supported by the SP, the call
+ // MUST return the appropriate gRPC error code.
+ // This field is REQUIRED.
repeated VolumeCapability volume_capabilities = 3;
// Plugin specific parameters passed in as opaque key-value pairs.
@@ -236,7 +252,7 @@ message CreateVolumeRequest {
// Secrets required by plugin to complete volume creation request.
// This field is OPTIONAL. Refer to the `Secrets Requirements`
// section on how to use this field.
- map controller_create_secrets = 5;
+ map secrets = 5 [(csi_secret) = true];
// If specified, the new volume will be pre-populated with data from
// this source. This field is OPTIONAL.
@@ -249,10 +265,10 @@ message CreateVolumeRequest {
// topological accessibility information supported by the SP.
// This field is OPTIONAL.
// This field SHALL NOT be specified unless the SP has the
- // ACCESSIBILITY_CONSTRAINTS plugin capability.
+ // VOLUME_ACCESSIBILITY_CONSTRAINTS plugin capability.
// If this field is not specified and the SP has the
- // ACCESSIBILITY_CONSTRAINTS plugin capability, the SP MAY choose
- // where the provisioned volume is accessible from.
+ // VOLUME_ACCESSIBILITY_CONSTRAINTS plugin capability, the SP MAY
+ // choose where the provisioned volume is accessible from.
TopologyRequirement accessibility_requirements = 7;
}
@@ -264,11 +280,19 @@ message VolumeContentSource {
// This field is REQUIRED. Plugin is REQUIRED to support creating
// volume from snapshot if it supports the capability
// CREATE_DELETE_SNAPSHOT.
- string id = 1;
+ string snapshot_id = 1;
+ }
+
+ message VolumeSource {
+ // Contains identity information for the existing source volume.
+ // This field is REQUIRED. Plugins reporting CLONE_VOLUME
+ // capability MUST support creating a volume from another volume.
+ string volume_id = 1;
}
oneof type {
SnapshotSource snapshot = 1;
+ VolumeSource volume = 2;
}
}
@@ -355,7 +379,7 @@ message CapacityRange {
int64 limit_bytes = 2;
}
-// The information about a provisioned volume.
+// Information about a specific volume.
message Volume {
// The capacity of the volume in bytes. This field is OPTIONAL. If not
// set (value of 0), it indicates that the capacity of the volume is
@@ -363,20 +387,32 @@ message Volume {
// The value of this field MUST NOT be negative.
int64 capacity_bytes = 1;
- // Contains identity information for the created volume. This field is
- // REQUIRED. The identity information will be used by the CO in
- // subsequent calls to refer to the provisioned volume.
- string id = 2;
-
- // Attributes reflect static properties of a volume and MUST be passed
- // to volume validation and publishing calls.
- // Attributes SHALL be opaque to a CO. Attributes SHALL NOT be mutable
- // and SHALL be safe for the CO to cache. Attributes SHOULD NOT
- // contain sensitive information. Attributes MAY NOT uniquely identify
- // a volume. A volume uniquely identified by `id` SHALL always report
- // the same attributes. This field is OPTIONAL and when present MUST
- // be passed to volume validation and publishing calls.
- map attributes = 3;
+ // The identifier for this volume, generated by the plugin.
+ // This field is REQUIRED.
+ // This field MUST contain enough information to uniquely identify
+ // this specific volume vs all other volumes supported by this plugin.
+ // This field SHALL be used by the CO in subsequent calls to refer to
+ // this volume.
+ // The SP is NOT responsible for global uniqueness of volume_id across
+ // multiple SPs.
+ string volume_id = 2;
+
+ // Opaque static properties of the volume. SP MAY use this field to
+ // ensure subsequent volume validation and publishing calls have
+ // contextual information.
+ // The contents of this field SHALL be opaque to a CO.
+ // The contents of this field SHALL NOT be mutable.
+ // The contents of this field SHALL be safe for the CO to cache.
+ // The contents of this field SHOULD NOT contain sensitive
+ // information.
+ // The contents of this field SHOULD NOT be used for uniquely
+ // identifying a volume. The `volume_id` alone SHOULD be sufficient to
+ // identify the volume.
+ // A volume uniquely identified by `volume_id` SHALL always report the
+ // same volume_context.
+ // This field is OPTIONAL and when present MUST be passed to volume
+ // validation and publishing calls.
+ map volume_context = 3;
// If specified, indicates that the volume is not empty and is
// pre-populated with data from the specified source.
@@ -386,7 +422,7 @@ message Volume {
// Specifies where (regions, zones, racks, etc.) the provisioned
// volume is accessible from.
// A plugin that returns this field MUST also set the
- // ACCESSIBILITY_CONSTRAINTS plugin capability.
+ // VOLUME_ACCESSIBILITY_CONSTRAINTS plugin capability.
// An SP MAY specify multiple topologies to indicate the volume is
// accessible from multiple locations.
// COs MAY use this information along with the topology information
@@ -394,7 +430,7 @@ message Volume {
// from a given node when scheduling workloads.
// This field is OPTIONAL. If it is not specified, the CO MAY assume
// the volume is equally accessible from all nodes in the cluster and
- // may schedule workloads referencing the volume on any available
+ // MAY schedule workloads referencing the volume on any available
// node.
//
// Example 1:
@@ -548,15 +584,18 @@ message TopologyRequirement {
// A topological segment is a specific instance of a topological domain,
// like "zone3", "rack3", etc.
// For example {"com.company/zone": "Z1", "com.company/rack": "R3"}
-// Valid keys have two segments: an optional prefix and name, separated
+// Valid keys have two segments: an OPTIONAL prefix and name, separated
// by a slash (/), for example: "com.company.example/zone".
-// The key name segment is required. The prefix is optional.
-// Both the key name and the prefix MUST each be 63 characters or less,
-// begin and end with an alphanumeric character ([a-z0-9A-Z]) and
-// contain only dashes (-), underscores (_), dots (.), or alphanumerics
-// in between, for example "zone".
-// The key prefix MUST follow reverse domain name notation format
-// (https://en.wikipedia.org/wiki/Reverse_domain_name_notation).
+// The key name segment is REQUIRED. The prefix is OPTIONAL.
+// The key name MUST be 63 characters or less, begin and end with an
+// alphanumeric character ([a-z0-9A-Z]), and contain only dashes (-),
+// underscores (_), dots (.), or alphanumerics in between, for example
+// "zone".
+// The key prefix MUST be 63 characters or less, begin and end with a
+// lower-case alphanumeric character ([a-z0-9]), contain only
+// dashes (-), dots (.), or lower-case alphanumerics in between, and
+// follow domain name notation format
+// (https://tools.ietf.org/html/rfc1035#section-2.3.1).
// The key prefix SHOULD include the plugin's host company name and/or
// the plugin name, to minimize the possibility of collisions with keys
// from other plugins.
@@ -579,7 +618,7 @@ message DeleteVolumeRequest {
// Secrets required by plugin to complete volume deletion request.
// This field is OPTIONAL. Refer to the `Secrets Requirements`
// section on how to use this field.
- map controller_delete_secrets = 2;
+ map secrets = 2 [(csi_secret) = true];
}
message DeleteVolumeResponse {
@@ -594,31 +633,44 @@ message ControllerPublishVolumeRequest {
// field to match the node ID returned by `NodeGetInfo`.
string node_id = 2;
- // The capability of the volume the CO expects the volume to have.
+ // Volume capability describing how the CO intends to use this volume.
+ // SP MUST ensure the CO can use the published volume as described.
+ // Otherwise SP MUST return the appropriate gRPC error code.
// This is a REQUIRED field.
VolumeCapability volume_capability = 3;
- // Whether to publish the volume in readonly mode. This field is
- // REQUIRED.
+ // Indicates SP MUST publish the volume in readonly mode.
+ // CO MUST set this field to false if SP does not have the
+ // PUBLISH_READONLY controller capability.
+ // This is a REQUIRED field.
bool readonly = 4;
// Secrets required by plugin to complete controller publish volume
// request. This field is OPTIONAL. Refer to the
// `Secrets Requirements` section on how to use this field.
- map controller_publish_secrets = 5;
+ map secrets = 5 [(csi_secret) = true];
- // Attributes of the volume to be used on a node. This field is
- // OPTIONAL and MUST match the attributes of the Volume identified
- // by `volume_id`.
- map volume_attributes = 6;
+ // Volume context as returned by CO in CreateVolumeRequest. This field
+ // is OPTIONAL and MUST match the volume_context of the volume
+ // identified by `volume_id`.
+ map volume_context = 6;
}
message ControllerPublishVolumeResponse {
- // The SP specific information that will be passed to the Plugin in
- // the subsequent `NodeStageVolume` or `NodePublishVolume` calls
- // for the given volume.
- // This information is opaque to the CO. This field is OPTIONAL.
- map publish_info = 1;
+ // Opaque static publish properties of the volume. SP MAY use this
+ // field to ensure subsequent `NodeStageVolume` or `NodePublishVolume`
+ // calls calls have contextual information.
+ // The contents of this field SHALL be opaque to a CO.
+ // The contents of this field SHALL NOT be mutable.
+ // The contents of this field SHALL be safe for the CO to cache.
+ // The contents of this field SHOULD NOT contain sensitive
+ // information.
+ // The contents of this field SHOULD NOT be used for uniquely
+ // identifying a volume. The `volume_id` alone SHOULD be sufficient to
+ // identify the volume.
+ // This field is OPTIONAL and when present MUST be passed to
+ // subsequent `NodeStageVolume` or `NodePublishVolume` calls
+ map publish_context = 1;
}
message ControllerUnpublishVolumeRequest {
// The ID of the volume. This field is REQUIRED.
@@ -636,7 +688,7 @@ message ControllerUnpublishVolumeRequest {
// ControllerPublishVolume call for the specified volume.
// This field is OPTIONAL. Refer to the `Secrets Requirements`
// section on how to use this field.
- map controller_unpublish_secrets = 3;
+ map secrets = 3 [(csi_secret) = true];
}
message ControllerUnpublishVolumeResponse {
@@ -646,30 +698,52 @@ message ValidateVolumeCapabilitiesRequest {
// The ID of the volume to check. This field is REQUIRED.
string volume_id = 1;
+ // Volume context as returned by CO in CreateVolumeRequest. This field
+ // is OPTIONAL and MUST match the volume_context of the volume
+ // identified by `volume_id`.
+ map volume_context = 2;
+
// The capabilities that the CO wants to check for the volume. This
- // call SHALL return "supported" only if all the volume capabilities
+ // call SHALL return "confirmed" only if all the volume capabilities
// specified below are supported. This field is REQUIRED.
- repeated VolumeCapability volume_capabilities = 2;
+ repeated VolumeCapability volume_capabilities = 3;
- // Attributes of the volume to check. This field is OPTIONAL and MUST
- // match the attributes of the Volume identified by `volume_id`.
- map volume_attributes = 3;
+ // See CreateVolumeRequest.parameters.
+ // This field is OPTIONAL.
+ map parameters = 4;
- // Specifies where (regions, zones, racks, etc.) the caller believes
- // the volume is accessible from.
- // A caller MAY specify multiple topologies to indicate they believe
- // the volume to be accessible from multiple locations.
- // This field is OPTIONAL. This field SHALL NOT be set unless the
- // plugin advertises the ACCESSIBILITY_CONSTRAINTS capability.
- repeated Topology accessible_topology = 4;
+ // Secrets required by plugin to complete volume validation request.
+ // This field is OPTIONAL. Refer to the `Secrets Requirements`
+ // section on how to use this field.
+ map secrets = 5 [(csi_secret) = true];
}
message ValidateVolumeCapabilitiesResponse {
- // True if the Plugin supports the specified capabilities for the
- // given volume. This field is REQUIRED.
- bool supported = 1;
+ message Confirmed {
+ // Volume context validated by the plugin.
+ // This field is OPTIONAL.
+ map volume_context = 1;
+
+ // Volume capabilities supported by the plugin.
+ // This field is REQUIRED.
+ repeated VolumeCapability volume_capabilities = 2;
+
+ // The volume creation parameters validated by the plugin.
+ // This field is OPTIONAL.
+ map parameters = 3;
+ }
- // Message to the CO if `supported` above is false. This field is
+ // Confirmed indicates to the CO the set of capabilities that the
+ // plugin has validated. This field SHALL only be set to a non-empty
+ // value for successful validation responses.
+ // For successful validation responses, the CO SHALL compare the
+ // fields of this message to the originally requested capabilities in
+ // order to guard against an older plugin reporting "valid" for newer
+ // capability fields that it does not yet understand.
+ // This field is OPTIONAL.
+ Confirmed confirmed = 1;
+
+ // Message to the CO if `confirmed` above is empty. This field is
// OPTIONAL.
// An empty string is equal to an unspecified field value.
string message = 2;
@@ -726,7 +800,7 @@ message GetCapacityRequest {
// `accessible_topology`. This is the same as the
// `accessible_topology` the CO returns in a `CreateVolumeResponse`.
// This field is OPTIONAL. This field SHALL NOT be set unless the
- // plugin advertises the ACCESSIBILITY_CONSTRAINTS capability.
+ // plugin advertises the VOLUME_ACCESSIBILITY_CONSTRAINTS capability.
Topology accessible_topology = 3;
}
@@ -746,7 +820,7 @@ message ControllerGetCapabilitiesRequest {
message ControllerGetCapabilitiesResponse {
// All the capabilities that the controller service supports. This
// field is OPTIONAL.
- repeated ControllerServiceCapability capabilities = 2;
+ repeated ControllerServiceCapability capabilities = 1;
}
// Specifies a capability of the controller service.
@@ -763,11 +837,15 @@ message ControllerServiceCapability {
// CREATE_DELETE_SNAPSHOT MUST support creating volume from
// snapshot.
CREATE_DELETE_SNAPSHOT = 5;
- // LIST_SNAPSHOTS is NOT REQUIRED. For plugins that need to upload
- // a snapshot after it is being cut, LIST_SNAPSHOTS COULD be used
- // with the snapshot_id as the filter to query whether the
- // uploading process is complete or not.
LIST_SNAPSHOTS = 6;
+ // Plugins supporting volume cloning at the storage level MAY
+ // report this capability. The source volume MUST be managed by
+ // the same plugin. Not all volume sources and parameters
+ // combinations MAY work.
+ CLONE_VOLUME = 7;
+ // Indicates the SP supports ControllerPublishVolume.readonly
+ // field.
+ PUBLISH_READONLY = 8;
}
Type type = 1;
@@ -785,12 +863,16 @@ message CreateSnapshotRequest {
// The suggested name for the snapshot. This field is REQUIRED for
// idempotency.
+ // Any Unicode string that conforms to the length limit is allowed
+ // except those containing the following banned characters:
+ // U+0000-U+0008, U+000B, U+000C, U+000E-U+001F, U+007F-U+009F.
+ // (These are control characters other than commonly used whitespace.)
string name = 2;
// Secrets required by plugin to complete snapshot creation request.
// This field is OPTIONAL. Refer to the `Secrets Requirements`
// section on how to use this field.
- map create_snapshot_secrets = 3;
+ map secrets = 3 [(csi_secret) = true];
// Plugin specific parameters passed in as opaque key-value pairs.
// This field is OPTIONAL. The Plugin is responsible for parsing and
@@ -812,7 +894,7 @@ message CreateSnapshotResponse {
Snapshot snapshot = 1;
}
-// The information about a provisioned snapshot.
+// Information about a specific snapshot.
message Snapshot {
// This is the complete size of the snapshot in bytes. The purpose of
// this field is to give CO guidance on how much space is needed to
@@ -823,11 +905,16 @@ message Snapshot {
// zero means it is unspecified.
int64 size_bytes = 1;
- // Uniquely identifies a snapshot and is generated by the plugin. It
- // will not change over time. This field is REQUIRED. The identity
- // information will be used by the CO in subsequent calls to refer to
- // the provisioned snapshot.
- string id = 2;
+ // The identifier for this snapshot, generated by the plugin.
+ // This field is REQUIRED.
+ // This field MUST contain enough information to uniquely identify
+ // this specific snapshot vs all other snapshots supported by this
+ // plugin.
+ // This field SHALL be used by the CO in subsequent calls to refer to
+ // this snapshot.
+ // The SP is NOT responsible for global uniqueness of snapshot_id
+ // across multiple SPs.
+ string snapshot_id = 2;
// Identity information for the source volume. Note that creating a
// snapshot from a snapshot is not supported here so the source has to
@@ -835,43 +922,13 @@ message Snapshot {
string source_volume_id = 3;
// Timestamp when the point-in-time snapshot is taken on the storage
- // system. The format of this field should be a Unix nanoseconds time
- // encoded as an int64. On Unix, the command `date +%s%N` returns the
- // current time in nanoseconds since 1970-01-01 00:00:00 UTC. This
- // field is REQUIRED.
- int64 created_at = 4;
-
- // The status of a snapshot.
- SnapshotStatus status = 5;
-}
+ // system. This field is REQUIRED.
+ .google.protobuf.Timestamp creation_time = 4;
-// The status of a snapshot.
-message SnapshotStatus {
- enum Type {
- UNKNOWN = 0;
- // A snapshot is ready for use.
- READY = 1;
- // A snapshot is cut and is now being uploaded.
- // Some cloud providers and storage systems uploads the snapshot
- // to the cloud after the snapshot is cut. During this phase,
- // `thaw` can be done so the application can be running again if
- // `freeze` was done before taking the snapshot.
- UPLOADING = 2;
- // An error occurred during the snapshot uploading process.
- // This error status is specific for uploading because
- // `CreateSnaphot` is a blocking call before the snapshot is
- // cut and therefore it SHOULD NOT come back with an error
- // status when an error occurs. Instead a gRPC error code SHALL
- // be returned by `CreateSnapshot` when an error occurs before
- // a snapshot is cut.
- ERROR_UPLOADING = 3;
- }
- // This field is REQUIRED.
- Type type = 1;
-
- // Additional information to describe why a snapshot ended up in the
- // `ERROR_UPLOADING` status. This field is OPTIONAL.
- string details = 2;
+ // Indicates if a snapshot is ready to use as a
+ // `volume_content_source` in a `CreateVolumeRequest`. The default
+ // value is false. This field is REQUIRED.
+ bool ready_to_use = 5;
}
message DeleteSnapshotRequest {
// The ID of the snapshot to be deleted.
@@ -881,7 +938,7 @@ message DeleteSnapshotRequest {
// Secrets required by plugin to complete snapshot deletion request.
// This field is OPTIONAL. Refer to the `Secrets Requirements`
// section on how to use this field.
- map delete_snapshot_secrets = 2;
+ map secrets = 2 [(csi_secret) = true];
}
message DeleteSnapshotResponse {}
@@ -911,7 +968,8 @@ message ListSnapshotsRequest {
// Identity information for a specific snapshot. This field is
// OPTIONAL. It can be used to list only a specific snapshot.
// ListSnapshots will return with current snapshot information
- // and will not block if the snapshot is being uploaded.
+ // and will not block if the snapshot is being processed after
+ // it is cut.
string snapshot_id = 4;
}
@@ -939,28 +997,34 @@ message NodeStageVolumeRequest {
// has `PUBLISH_UNPUBLISH_VOLUME` controller capability, and SHALL be
// left unset if the corresponding Controller Plugin does not have
// this capability. This is an OPTIONAL field.
- map publish_info = 2;
+ map publish_context = 2;
- // The path to which the volume will be published. It MUST be an
+ // The path to which the volume MAY be staged. It MUST be an
// absolute path in the root filesystem of the process serving this
- // request. The CO SHALL ensure that there is only one
- // staging_target_path per volume.
+ // request, and MUST be a directory. The CO SHALL ensure that there
+ // is only one `staging_target_path` per volume. The CO SHALL ensure
+ // that the path is directory and that the process serving the
+ // request has `read` and `write` permission to that directory. The
+ // CO SHALL be responsible for creating the directory if it does not
+ // exist.
// This is a REQUIRED field.
string staging_target_path = 3;
- // The capability of the volume the CO expects the volume to have.
+ // Volume capability describing how the CO intends to use this volume.
+ // SP MUST ensure the CO can use the staged volume as described.
+ // Otherwise SP MUST return the appropriate gRPC error code.
// This is a REQUIRED field.
VolumeCapability volume_capability = 4;
// Secrets required by plugin to complete node stage volume request.
// This field is OPTIONAL. Refer to the `Secrets Requirements`
// section on how to use this field.
- map node_stage_secrets = 5;
+ map secrets = 5 [(csi_secret) = true];
- // Attributes of the volume to publish. This field is OPTIONAL and
- // MUST match the attributes of the `Volume` identified by
- // `volume_id`.
- map volume_attributes = 6;
+ // Volume context as returned by CO in CreateVolumeRequest. This field
+ // is OPTIONAL and MUST match the volume_context of the volume
+ // identified by `volume_id`.
+ map volume_context = 6;
}
message NodeStageVolumeResponse {
@@ -970,7 +1034,7 @@ message NodeUnstageVolumeRequest {
// The ID of the volume. This field is REQUIRED.
string volume_id = 1;
- // The path at which the volume was published. It MUST be an absolute
+ // The path at which the volume was staged. It MUST be an absolute
// path in the root filesystem of the process serving this request.
// This is a REQUIRED field.
string staging_target_path = 2;
@@ -988,9 +1052,9 @@ message NodePublishVolumeRequest {
// has `PUBLISH_UNPUBLISH_VOLUME` controller capability, and SHALL be
// left unset if the corresponding Controller Plugin does not have
// this capability. This is an OPTIONAL field.
- map publish_info = 2;
+ map publish_context = 2;
- // The path to which the device was mounted by `NodeStageVolume`.
+ // The path to which the volume was staged by `NodeStageVolume`.
// It MUST be an absolute path in the root filesystem of the process
// serving this request.
// It MUST be set if the Node Plugin implements the
@@ -1001,28 +1065,36 @@ message NodePublishVolumeRequest {
// The path to which the volume will be published. It MUST be an
// absolute path in the root filesystem of the process serving this
// request. The CO SHALL ensure uniqueness of target_path per volume.
- // The CO SHALL ensure that the path exists, and that the process
- // serving the request has `read` and `write` permissions to the path.
+ // The CO SHALL ensure that the parent directory of this path exists
+ // and that the process serving the request has `read` and `write`
+ // permissions to that parent directory.
+ // For volumes with an access type of block, the SP SHALL place the
+ // block device at target_path.
+ // For volumes with an access type of mount, the SP SHALL place the
+ // mounted directory at target_path.
+ // Creation of target_path is the responsibility of the SP.
// This is a REQUIRED field.
string target_path = 4;
- // The capability of the volume the CO expects the volume to have.
+ // Volume capability describing how the CO intends to use this volume.
+ // SP MUST ensure the CO can use the published volume as described.
+ // Otherwise SP MUST return the appropriate gRPC error code.
// This is a REQUIRED field.
VolumeCapability volume_capability = 5;
- // Whether to publish the volume in readonly mode. This field is
- // REQUIRED.
+ // Indicates SP MUST publish the volume in readonly mode.
+ // This field is REQUIRED.
bool readonly = 6;
// Secrets required by plugin to complete node publish volume request.
// This field is OPTIONAL. Refer to the `Secrets Requirements`
// section on how to use this field.
- map node_publish_secrets = 7;
+ map secrets = 7 [(csi_secret) = true];
- // Attributes of the volume to publish. This field is OPTIONAL and
- // MUST match the attributes of the Volume identified by
- // `volume_id`.
- map volume_attributes = 8;
+ // Volume context as returned by CO in CreateVolumeRequest. This field
+ // is OPTIONAL and MUST match the volume_context of the volume
+ // identified by `volume_id`.
+ map volume_context = 8;
}
message NodePublishVolumeResponse {
@@ -1034,6 +1106,7 @@ message NodeUnpublishVolumeRequest {
// The path at which the volume was published. It MUST be an absolute
// path in the root filesystem of the process serving this request.
+ // The SP MUST delete the file or directory it created at this path.
// This is a REQUIRED field.
string target_path = 2;
}
@@ -1041,15 +1114,43 @@ message NodeUnpublishVolumeRequest {
message NodeUnpublishVolumeResponse {
// Intentionally empty.
}
-message NodeGetIdRequest {
- // Intentionally empty.
-}
+message NodeGetVolumeStatsRequest {
+ // The ID of the volume. This field is REQUIRED.
+ string volume_id = 1;
-message NodeGetIdResponse {
- // The ID of the node as understood by the SP which SHALL be used by
- // CO in subsequent `ControllerPublishVolume`.
+ // It can be any valid path where volume was previously
+ // staged or published.
+ // It MUST be an absolute path in the root filesystem of
+ // the process serving this request.
// This is a REQUIRED field.
- string node_id = 1;
+ string volume_path = 2;
+}
+
+message NodeGetVolumeStatsResponse {
+ // This field is OPTIONAL.
+ repeated VolumeUsage usage = 1;
+}
+
+message VolumeUsage {
+ enum Unit {
+ UNKNOWN = 0;
+ BYTES = 1;
+ INODES = 2;
+ }
+ // The available capacity in specified Unit. This field is OPTIONAL.
+ // The value of this field MUST NOT be negative.
+ int64 available = 1;
+
+ // The total capacity in specified Unit. This field is REQUIRED.
+ // The value of this field MUST NOT be negative.
+ int64 total = 2;
+
+ // The used capacity in specified Unit. This field is OPTIONAL.
+ // The value of this field MUST NOT be negative.
+ int64 used = 3;
+
+ // Units by which values are measured. This field is REQUIRED.
+ Unit unit = 4;
}
message NodeGetCapabilitiesRequest {
// Intentionally empty.
@@ -1067,6 +1168,10 @@ message NodeServiceCapability {
enum Type {
UNKNOWN = 0;
STAGE_UNSTAGE_VOLUME = 1;
+ // If Plugin implements GET_VOLUME_STATS capability
+ // then it MUST implement NodeGetVolumeStats RPC
+ // call for fetching volume statistics.
+ GET_VOLUME_STATS = 2;
}
Type type = 1;
@@ -1081,9 +1186,14 @@ message NodeGetInfoRequest {
}
message NodeGetInfoResponse {
- // The ID of the node as understood by the SP which SHALL be used by
- // CO in subsequent calls to `ControllerPublishVolume`.
- // This is a REQUIRED field.
+ // The identifier of the node as understood by the SP.
+ // This field is REQUIRED.
+ // This field MUST contain enough information to uniquely identify
+ // this specific node vs all other nodes supported by this plugin.
+ // This field SHALL be used by the CO in subsequent calls, including
+ // `ControllerPublishVolume`, to refer to this node.
+ // The SP is NOT responsible for global uniqueness of node_id across
+ // multiple SPs.
string node_id = 1;
// Maximum number of volumes that controller can publish to the node.
@@ -1096,7 +1206,7 @@ message NodeGetInfoResponse {
// Specifies where (regions, zones, racks, etc.) the node is
// accessible from.
// A plugin that returns this field MUST also set the
- // ACCESSIBILITY_CONSTRAINTS plugin capability.
+ // VOLUME_ACCESSIBILITY_CONSTRAINTS plugin capability.
// COs MAY use this information along with the topology information
// returned in CreateVolumeResponse to ensure that a given volume is
// accessible from a given node when scheduling workloads.
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-csi/src/test/java/org/apache/hadoop/yarn/csi/adaptor/TestCsiAdaptorService.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-csi/src/test/java/org/apache/hadoop/yarn/csi/adaptor/TestCsiAdaptorService.java
index c415ced7488..4ee8f519c83 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-csi/src/test/java/org/apache/hadoop/yarn/csi/adaptor/TestCsiAdaptorService.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-csi/src/test/java/org/apache/hadoop/yarn/csi/adaptor/TestCsiAdaptorService.java
@@ -19,7 +19,7 @@
import com.google.common.collect.ImmutableList;
import com.google.common.collect.ImmutableMap;
-import csi.v0.Csi;
+import csi.v1.Csi;
import org.apache.commons.io.FileUtils;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.net.NetUtils;
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-csi/src/test/java/org/apache/hadoop/yarn/csi/client/FakeCsiIdentityService.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-csi/src/test/java/org/apache/hadoop/yarn/csi/client/FakeCsiIdentityService.java
index 0ecdcbe3780..727d6b2e120 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-csi/src/test/java/org/apache/hadoop/yarn/csi/client/FakeCsiIdentityService.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-csi/src/test/java/org/apache/hadoop/yarn/csi/client/FakeCsiIdentityService.java
@@ -18,9 +18,9 @@
package org.apache.hadoop.yarn.csi.client;
-import csi.v0.Csi.GetPluginInfoRequest;
-import csi.v0.Csi.GetPluginInfoResponse;
-import csi.v0.IdentityGrpc;
+import csi.v1.Csi.GetPluginInfoRequest;
+import csi.v1.Csi.GetPluginInfoResponse;
+import csi.v1.IdentityGrpc;
import io.grpc.stub.StreamObserver;
/**
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-csi/src/test/java/org/apache/hadoop/yarn/csi/client/ICsiClientTest.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-csi/src/test/java/org/apache/hadoop/yarn/csi/client/ICsiClientTest.java
index 2f150cb8dfb..934cb0b0d12 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-csi/src/test/java/org/apache/hadoop/yarn/csi/client/ICsiClientTest.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-csi/src/test/java/org/apache/hadoop/yarn/csi/client/ICsiClientTest.java
@@ -17,7 +17,7 @@
*/
package org.apache.hadoop.yarn.csi.client;
-import csi.v0.Csi;
+import csi.v1.Csi;
import java.io.IOException;
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-csi/src/test/java/org/apache/hadoop/yarn/csi/client/TestCsiClient.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-csi/src/test/java/org/apache/hadoop/yarn/csi/client/TestCsiClient.java
index 7eed98f5517..cfb13dc02bc 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-csi/src/test/java/org/apache/hadoop/yarn/csi/client/TestCsiClient.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-csi/src/test/java/org/apache/hadoop/yarn/csi/client/TestCsiClient.java
@@ -18,7 +18,7 @@
package org.apache.hadoop.yarn.csi.client;
-import csi.v0.Csi;
+import csi.v1.Csi;
import org.apache.commons.io.FileUtils;
import org.apache.hadoop.test.GenericTestUtils;
import org.junit.AfterClass;
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/volume/csi/VolumeManagerImpl.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/volume/csi/VolumeManagerImpl.java
index 839d1bc61f3..872c98464ba 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/volume/csi/VolumeManagerImpl.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/volume/csi/VolumeManagerImpl.java
@@ -18,7 +18,6 @@
package org.apache.hadoop.yarn.server.resourcemanager.volume.csi;
import com.google.common.annotations.VisibleForTesting;
-import com.google.common.base.Strings;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.conf.Configuration;
@@ -26,15 +25,13 @@
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.service.AbstractService;
import org.apache.hadoop.yarn.api.CsiAdaptorProtocol;
-import org.apache.hadoop.yarn.api.protocolrecords.GetPluginInfoRequest;
-import org.apache.hadoop.yarn.api.protocolrecords.GetPluginInfoResponse;
import org.apache.hadoop.yarn.client.NMProxy;
-import org.apache.hadoop.yarn.conf.YarnConfiguration;
import org.apache.hadoop.yarn.exceptions.YarnException;
import org.apache.hadoop.yarn.ipc.YarnRPC;
import org.apache.hadoop.yarn.server.resourcemanager.volume.csi.lifecycle.Volume;
import org.apache.hadoop.yarn.server.resourcemanager.volume.csi.provisioner.VolumeProvisioningResults;
import org.apache.hadoop.yarn.server.resourcemanager.volume.csi.provisioner.VolumeProvisioningTask;
+import org.apache.hadoop.yarn.util.csi.CsiConfigUtils;
import java.io.IOException;
import java.net.InetSocketAddress;
@@ -67,47 +64,41 @@ public VolumeManagerImpl() {
.newScheduledThreadPool(PROVISIONING_TASK_THREAD_POOL_SIZE);
}
- // Init the CSI adaptor cache according to the configuration.
+ // Init the CSI adaptor client cache according to the configuration.
// user only needs to configure a list of adaptor addresses,
- // this method extracts each address and init an adaptor client,
- // then proceed with a hand-shake by calling adaptor's getPluginInfo
- // method to retrieve the driver info. If the driver can be resolved,
- // it is then added to the cache. Note, we don't allow two drivers
- // specified with same driver-name even version is different.
+ // this method extracts each address and init an adaptor client.
+ // The csi-driver-adaptor client is responsible to contact with a
+ // csi-driver-adaptor to achieve identity/controller service calls.
+ // Currently in this initiation phase, RM doesn't hand-shake with
+ // the adaptor service to verify if that is a valid address.
+ // Otherwise it will make RM's state depending on a NM service.
+ // Note, we don't allow two drivers specified with same driver-name
+ // even version is different.
private void initCsiAdaptorCache(
final Map adaptorMap, Configuration conf)
throws IOException, YarnException {
LOG.info("Initializing cache for csi-driver-adaptors");
- String[] addresses =
- conf.getStrings(YarnConfiguration.NM_CSI_ADAPTOR_ADDRESSES);
- if (addresses != null && addresses.length > 0) {
- for (String addr : addresses) {
- LOG.info("Found csi-driver-adaptor socket address: " + addr);
- InetSocketAddress address = NetUtils.createSocketAddr(addr);
+ String[] driverNames = CsiConfigUtils.getCsiDriverNames(conf);
+ if (driverNames != null && driverNames.length > 0) {
+ for (String driverName : CsiConfigUtils.getCsiDriverNames(conf)) {
+ LOG.info("Found csi-driver : " + driverName);
+ String addressStr = CsiConfigUtils.
+ getPlainCsiAdaptorAddressForDriver(driverName, conf);
+ LOG.info("Csi-driver-adaptor server location: "
+ + addressStr + "(" + driverName + ")");
+ InetSocketAddress address = NetUtils.createSocketAddr(addressStr);
YarnRPC rpc = YarnRPC.create(conf);
UserGroupInformation currentUser =
UserGroupInformation.getCurrentUser();
CsiAdaptorProtocol adaptorClient = NMProxy
.createNMProxy(conf, CsiAdaptorProtocol.class, currentUser, rpc,
address);
- // Attempt to resolve the driver by contacting to
- // the diver's identity service on the given address.
- // If the call failed, the initialization is also failed
- // in order running into inconsistent state.
- LOG.info("Retrieving info from csi-driver-adaptor on address " + addr);
- GetPluginInfoResponse response =
- adaptorClient.getPluginInfo(GetPluginInfoRequest.newInstance());
- if (!Strings.isNullOrEmpty(response.getDriverName())) {
- String driverName = response.getDriverName();
- if (adaptorMap.containsKey(driverName)) {
- throw new YarnException(
- "Duplicate driver adaptor found," + " driver name: "
- + driverName);
- }
- adaptorMap.put(driverName, adaptorClient);
- LOG.info("CSI Adaptor added to the cache, adaptor name: " + driverName
- + ", driver version: " + response.getVersion());
+ if (adaptorMap.containsKey(driverName)) {
+ throw new YarnException(
+ "Duplicate driver adaptor found," + " driver name: "
+ + driverName);
}
+ adaptorMap.put(driverName, adaptorClient);
}
}
}
diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/volume/csi/processor/VolumeAMSProcessor.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/volume/csi/processor/VolumeAMSProcessor.java
index 814634ad314..0fccf1b0c76 100644
--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/volume/csi/processor/VolumeAMSProcessor.java
+++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/volume/csi/processor/VolumeAMSProcessor.java
@@ -30,7 +30,6 @@
import org.apache.hadoop.yarn.api.records.Resource;
import org.apache.hadoop.yarn.api.records.ResourceInformation;
import org.apache.hadoop.yarn.api.records.SchedulingRequest;
-import org.apache.hadoop.yarn.conf.YarnConfiguration;
import org.apache.hadoop.yarn.exceptions.YarnException;
import org.apache.hadoop.yarn.server.resourcemanager.RMContext;
import org.apache.hadoop.yarn.server.resourcemanager.volume.csi.VolumeManager;
@@ -152,11 +151,9 @@ private Volume checkAndGetVolume(VolumeMetaData metaData)
if (adaptor == null) {
throw new InvalidVolumeException("It seems for the driver name"
+ " specified in the volume " + metaData.getDriverName()
- + " ,there is no matched driver-adaptor can be found. "
- + "Is the driver probably registered? Please check if"
- + " adaptors service addresses defined in "
- + YarnConfiguration.NM_CSI_ADAPTOR_ADDRESSES
- + " are correct and services are started.");
+ + ", there is no matched driver-adaptor can be found. "
+ + "Is the csi-driver name, endpoint and csi-driver-adaptor's "
+ + "service address probably configured in yarn-site.xml.");
}
toAdd.setClient(adaptor);
return this.volumeManager.addOrGetVolume(toAdd);