Skip to content
Draft
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
Expand Up @@ -17,17 +17,21 @@

package org.apache.hadoop.hdds.scm;

import org.apache.hadoop.hdds.protocol.StorageType;

/**
* The information of the request of pipeline.
*/
public final class PipelineRequestInformation {
private final long size;
private final StorageType storageType;

/**
* Builder for PipelineRequestInformation.
*/
public static class Builder {
private long size;
private StorageType storageType;

public static Builder getBuilder() {
return new Builder();
Expand All @@ -43,16 +47,26 @@ public Builder setSize(long sz) {
return this;
}

public Builder setStorageType(StorageType st) {
this.storageType = st;
return this;
}

public PipelineRequestInformation build() {
return new PipelineRequestInformation(size);
return new PipelineRequestInformation(size, storageType);
}
}

private PipelineRequestInformation(long size) {
private PipelineRequestInformation(long size, StorageType storageType) {
this.size = size;
this.storageType = storageType;
}

public long getSize() {
return size;
}

public StorageType getStorageType() {
return storageType;
}
}
Original file line number Diff line number Diff line change
Expand Up @@ -445,6 +445,11 @@ public final class ScmConfigKeys {
public static final String OZONE_SCM_PIPELINE_SCRUB_INTERVAL_DEFAULT =
"150s";

public static final String OZONE_SCM_PIPELINE_CREATION_STORAGE_TYPE_AWARE =
"ozone.scm.pipeline.creation.storage-type-aware.enabled";
public static final boolean
OZONE_SCM_PIPELINE_CREATION_STORAGE_TYPE_AWARE_DEFAULT = false;

// Allow SCM to auto create factor ONE ratis pipeline.
public static final String OZONE_SCM_PIPELINE_AUTO_CREATE_FACTOR_ONE =
"ozone.scm.pipeline.creation.auto.factor.one";
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -556,7 +556,7 @@ public static void createRecoveringContainer(XceiverClientSpi client,
*/
public static void createContainer(XceiverClientSpi client, long containerID,
String encodedToken) throws IOException {
createContainer(client, containerID, encodedToken, null, 0);
createContainer(client, containerID, encodedToken, null, 0, null);
}

/**
Expand All @@ -571,6 +571,24 @@ public static void createContainer(XceiverClientSpi client,
long containerID, String encodedToken,
ContainerProtos.ContainerDataProto.State state, int replicaIndex)
throws IOException {
createContainer(client, containerID, encodedToken, state, replicaIndex,
null);
}

/**
* createContainer call that creates a container on the datanode.
* @param client - client
* @param containerID - ID of container
* @param encodedToken - encodedToken if security is enabled
* @param state - state of the container
* @param replicaIndex - index position of the container replica
* @param storageType - storage type for volume selection on the datanode
*/
public static void createContainer(XceiverClientSpi client,
long containerID, String encodedToken,
ContainerProtos.ContainerDataProto.State state, int replicaIndex,
ContainerProtos.StorageTypeProto storageType)
throws IOException {
ContainerProtos.CreateContainerRequestProto.Builder createRequest =
ContainerProtos.CreateContainerRequestProto.newBuilder();
createRequest
Expand All @@ -581,6 +599,9 @@ public static void createContainer(XceiverClientSpi client,
if (replicaIndex > 0) {
createRequest.setReplicaIndex(replicaIndex);
}
if (storageType != null) {
createRequest.setStorageType(storageType);
}

String id = client.getPipeline().getFirstNode().getUuidString();
ContainerCommandRequestProto.Builder request =
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -289,6 +289,7 @@ public final class OzoneConsts {
public static final String MAX_NUM_OF_BUCKETS = "maxNumOfBuckets";
public static final String HAS_SNAPSHOT = "hasSnapshot";
public static final String STORAGE_TYPE = "storageType";
public static final String STORAGE_POLICY = "storagePolicy";
public static final String RESOURCE_TYPE = "resourceType";
public static final String IS_VERSION_ENABLED = "isVersionEnabled";
public static final String CREATION_TIME = "creationTime";
Expand Down
35 changes: 35 additions & 0 deletions hadoop-hdds/common/src/main/resources/ozone-default.xml
Original file line number Diff line number Diff line change
Expand Up @@ -1693,6 +1693,19 @@
If enabled, SCM will auto create RATIS factor ONE pipeline.
</description>
</property>
<property>
<name>ozone.scm.pipeline.creation.storage-type-aware.enabled</name>
<value>false</value>
<tag>OZONE, SCM, PIPELINE</tag>
<description>
If enabled, the background pipeline creator will proactively create
storage-type-constrained pipelines for each StorageType (SSD, DISK,
ARCHIVE) in addition to untyped pipelines. This ensures that
pipelines suitable for HOT/WARM/COLD storage policies are available
when containers need to be allocated. Only enable on clusters that
use storage tiering with mixed StorageType datanodes.
</description>
</property>
<property>
<name>hdds.scm.safemode.threshold.pct</name>
<value>0.99</value>
Expand Down Expand Up @@ -4195,6 +4208,28 @@
</description>
</property>

<property>
<name>ozone.storage.policy.enabled</name>
<value>false</value>
<tag>OZONE, MANAGEMENT</tag>
<description>
When enabled, Ozone Manager will resolve and enforce storage policies
(HOT, WARM, COLD, ALL_SSD) on buckets and keys. When disabled, all
storage policy metadata is ignored and default placement is used.
</description>
</property>

<property>
<name>ozone.default.storage.policy</name>
<value>WARM</value>
<tag>OZONE, MANAGEMENT</tag>
<description>
Default storage policy used by Ozone Manager when a client does not
specify a storage policy. Supported values are HOT, WARM, COLD,
ALL_SSD.
</description>
</property>

<property>
<name>ozone.client.max.ec.stripe.write.retries</name>
<value>10</value>
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -53,6 +53,7 @@
import java.util.Set;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.locks.ReentrantReadWriteLock;
import java.util.stream.Collectors;
import org.apache.commons.io.FileUtils;
import org.apache.hadoop.fs.FileAlreadyExistsException;
import org.apache.hadoop.fs.FileUtil;
Expand Down Expand Up @@ -148,6 +149,23 @@ public void setCheckChunksFilePath(boolean bCheckChunksDirFilePath) {
@Override
public void create(VolumeSet volumeSet, VolumeChoosingPolicy
volumeChoosingPolicy, String clusterId) throws StorageContainerException {
create(volumeSet, volumeChoosingPolicy, clusterId, null);
}

/**
* Creates a container, filtering volumes by the requested StorageType
* before choosing a volume. If no volumes match the requested type,
* falls back to all available volumes.
*
* @param volumeSet the set of available volumes
* @param volumeChoosingPolicy policy for choosing among candidate volumes
* @param clusterId the cluster ID
* @param storageType the requested storage type, or null for no filtering
*/
public void create(VolumeSet volumeSet, VolumeChoosingPolicy
volumeChoosingPolicy, String clusterId,
org.apache.hadoop.hdds.protocol.StorageType storageType)
throws StorageContainerException {
Objects.requireNonNull(volumeChoosingPolicy, "VolumeChoosingPolicy == null");
Objects.requireNonNull(volumeSet, "volumeSet == null");
Objects.requireNonNull(clusterId, "clusterId == null");
Expand All @@ -159,6 +177,20 @@ public void create(VolumeSet volumeSet, VolumeChoosingPolicy
try {
List<HddsVolume> volumes
= StorageVolumeUtil.getHddsVolumesList(volumeSet.getVolumesList());
if (storageType != null) {
org.apache.hadoop.fs.StorageType fsStorageType =
org.apache.hadoop.fs.StorageType.valueOf(storageType.name());
List<HddsVolume> filtered = volumes.stream()
.filter(v -> v.getStorageType() == fsStorageType)
.collect(Collectors.toList());
if (!filtered.isEmpty()) {
volumes = filtered;
} else {
LOG.warn("No volumes found with storage type {}, falling back to" +
" all volumes for container {}", storageType,
containerData.getContainerID());
}
}
while (true) {
HddsVolume containerVolume;
String hddsVolumeDir;
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -100,6 +100,7 @@
import org.apache.hadoop.hdds.conf.OzoneConfiguration;
import org.apache.hadoop.hdds.conf.StorageUnit;
import org.apache.hadoop.hdds.protocol.DatanodeDetails;
import org.apache.hadoop.hdds.protocol.StorageType;
import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos;
import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ChecksumType;
import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.ContainerCommandRequestProto;
Expand Down Expand Up @@ -475,12 +476,20 @@ ContainerCommandResponseProto handleCreateContainer(
KeyValueContainer newContainer = new KeyValueContainer(
newContainerData, conf);

// Extract storageType for volume selection on heterogeneous nodes.
StorageType requestedStorageType = null;
if (request.getCreateContainer().hasStorageType()) {
requestedStorageType = StorageType.valueOf(
request.getCreateContainer().getStorageType().name());
}

boolean created = false;
Lock containerIdLock = containerCreationLocks.get(containerID);
containerIdLock.lock();
try {
if (containerSet.getContainer(containerID) == null) {
newContainer.create(volumeSet, volumeChoosingPolicy, clusterId);
newContainer.create(volumeSet, volumeChoosingPolicy, clusterId,
requestedStorageType);
if (RECOVERING == newContainer.getContainerState()) {
created = containerSet.addContainerByOverwriteMissingContainer(newContainer);
} else {
Expand Down
Loading