Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
Expand Up @@ -310,11 +310,13 @@ public DatanodeStorageInfo[] getStorageInfos() {
}

public EnumSet<StorageType> getStorageTypes() {
EnumSet<StorageType> storageTypes = EnumSet.noneOf(StorageType.class);
for (DatanodeStorageInfo dsi : getStorageInfos()) {
storageTypes.add(dsi.getStorageType());
synchronized (storageMap) {
EnumSet<StorageType> storageTypes = EnumSet.noneOf(StorageType.class);
for (DatanodeStorageInfo dsi : storageMap.values()) {
storageTypes.add(dsi.getStorageType());
}
return storageTypes;
}
return storageTypes;
}

public StorageReport[] getStorageReports() {
Expand Down Expand Up @@ -1136,9 +1138,16 @@ public boolean isRegistered() {
}

public boolean hasStorageType(StorageType type) {
for (DatanodeStorageInfo dnStorage : getStorageInfos()) {
if (dnStorage.getStorageType() == type) {
return true;
// Iterate storageMap.values() directly under the lock instead of calling
// getStorageInfos() which acquires the lock a second time and allocates a
// new array. On clusters with many storages per DataNode this matters
// because hasStorageType() is called frequently (block placement, heartbeat
// processing, topology updates) — HDFS-17639.
synchronized (storageMap) {
for (DatanodeStorageInfo dnStorage : storageMap.values()) {
if (dnStorage.getStorageType() == type) {
return true;
}
}
}
return false;
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -23,7 +23,9 @@
import static org.junit.jupiter.api.Assertions.assertTrue;

import java.util.ArrayList;
import java.util.EnumSet;

import org.apache.hadoop.fs.StorageType;
import org.apache.hadoop.hdfs.DFSTestUtil;
import org.apache.hadoop.hdfs.protocol.Block;
import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeStorageInfo.AddBlockResult;
Expand Down Expand Up @@ -82,4 +84,48 @@ public void testBlocksCounter() throws Exception {
assertTrue(BlocksMap.removeBlock(dd, blk1));
assertEquals(0, dd.numBlocks());
}

/**
* HDFS-17639: hasStorageType() previously called getStorageInfos() which
* acquired storageMap lock and allocated a new array on every call. The fix
* iterates storageMap.values() directly under the lock. This test verifies
* correctness: hasStorageType() and getStorageTypes() must accurately reflect
* which types are present after storages are injected.
*/
@Test
public void testHasStorageTypeAndGetStorageTypes() {
DatanodeDescriptor dd = DFSTestUtil.getLocalDatanodeDescriptor();

// Before any storage is injected, no type should be present.
assertFalse(dd.hasStorageType(StorageType.DISK),
"No DISK storage expected before injection");
assertFalse(dd.hasStorageType(StorageType.SSD),
"No SSD storage expected before injection");
assertTrue(dd.getStorageTypes().isEmpty(),
"getStorageTypes() should be empty before injection");

// Inject a DISK storage.
DatanodeStorageInfo diskStorage = DFSTestUtil.createDatanodeStorageInfo(
"storage-disk", "127.0.0.1", "/rack1", "host1",
StorageType.DISK, null);
dd.injectStorage(diskStorage);

assertTrue(dd.hasStorageType(StorageType.DISK),
"DISK storage should be present after injection");
assertFalse(dd.hasStorageType(StorageType.SSD),
"SSD storage should still be absent");
EnumSet<StorageType> types = dd.getStorageTypes();
assertTrue(types.contains(StorageType.DISK));
assertEquals(1, types.size());

// Inject an SSD storage.
DatanodeStorageInfo ssdStorage = DFSTestUtil.createDatanodeStorageInfo(
"storage-ssd", "127.0.0.1", "/rack1", "host1",
StorageType.SSD, null);
dd.injectStorage(ssdStorage);

assertTrue(dd.hasStorageType(StorageType.DISK));
assertTrue(dd.hasStorageType(StorageType.SSD));
assertEquals(2, dd.getStorageTypes().size());
}
}