Skip to content

Commit ea8762e

Browse files
szetszwoadoroszlai
authored andcommitted
HDDS-12750. Move StorageTypeProto from ScmServerDatanodeHeartbeatProtocol.proto to hdds.proto (#8208)
(cherry picked from commit bba8a67)
1 parent 24060a5 commit ea8762e

File tree

19 files changed

+59
-115
lines changed

19 files changed

+59
-115
lines changed

hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/StorageLocationReport.java

Lines changed: 1 addition & 31 deletions
Original file line numberDiff line numberDiff line change
@@ -20,9 +20,9 @@
2020
import java.io.IOException;
2121
import org.apache.hadoop.fs.StorageType;
2222
import org.apache.hadoop.hdds.conf.ConfigurationSource;
23+
import org.apache.hadoop.hdds.protocol.proto.HddsProtos.StorageTypeProto;
2324
import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.MetadataStorageReportProto;
2425
import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.StorageReportProto;
25-
import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.StorageTypeProto;
2626
import org.apache.hadoop.ozone.container.common.interfaces.StorageLocationReportMXBean;
2727
import org.apache.hadoop.ozone.container.common.volume.VolumeUsage;
2828

@@ -237,36 +237,6 @@ public static StorageLocationReport getFromProtobuf(StorageReportProto report)
237237
return builder.build();
238238
}
239239

240-
/**
241-
* Returns the StorageLocationReport from the protoBuf message.
242-
* @param report MetadataStorageReportProto
243-
* @return StorageLocationReport
244-
* @throws IOException in case of invalid storage type
245-
*/
246-
247-
public static StorageLocationReport getMetadataFromProtobuf(
248-
MetadataStorageReportProto report) throws IOException {
249-
StorageLocationReport.Builder builder = StorageLocationReport.newBuilder();
250-
builder.setStorageLocation(report.getStorageLocation());
251-
if (report.hasCapacity()) {
252-
builder.setCapacity(report.getCapacity());
253-
}
254-
if (report.hasScmUsed()) {
255-
builder.setScmUsed(report.getScmUsed());
256-
}
257-
if (report.hasStorageType()) {
258-
builder.setStorageType(getStorageType(report.getStorageType()));
259-
}
260-
if (report.hasRemaining()) {
261-
builder.setRemaining(report.getRemaining());
262-
}
263-
264-
if (report.hasFailed()) {
265-
builder.setFailed(report.getFailed());
266-
}
267-
return builder.build();
268-
}
269-
270240
/**
271241
* Returns StorageLocation.Builder instance.
272242
*

hadoop-hdds/interface-client/src/main/proto/hdds.proto

Lines changed: 11 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -150,6 +150,17 @@ message KeyValue {
150150
optional string value = 2;
151151
}
152152

153+
/**
154+
* Types of storage media.
155+
*/
156+
enum StorageTypeProto {
157+
DISK = 1;
158+
SSD = 2;
159+
ARCHIVE = 3;
160+
RAM_DISK = 4;
161+
PROVIDED = 5;
162+
}
163+
153164
/**
154165
* Type of the node.
155166
*/

hadoop-hdds/interface-client/src/main/resources/proto.lock

Lines changed: 25 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1827,6 +1827,31 @@
18271827
}
18281828
]
18291829
},
1830+
{
1831+
"name": "StorageTypeProto",
1832+
"enum_fields": [
1833+
{
1834+
"name": "DISK",
1835+
"integer": 1
1836+
},
1837+
{
1838+
"name": "SSD",
1839+
"integer": 2
1840+
},
1841+
{
1842+
"name": "ARCHIVE",
1843+
"integer": 3
1844+
},
1845+
{
1846+
"name": "RAM_DISK",
1847+
"integer": 4
1848+
},
1849+
{
1850+
"name": "PROVIDED",
1851+
"integer": 5
1852+
}
1853+
]
1854+
},
18301855
{
18311856
"name": "NodeType",
18321857
"enum_fields": [

hadoop-hdds/interface-server/src/main/proto/ScmServerDatanodeHeartbeatProtocol.proto

Lines changed: 0 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -193,16 +193,6 @@ message MetadataStorageReportProto {
193193
optional bool failed = 6 [default = false];
194194
}
195195

196-
/**
197-
* Types of recognized storage media.
198-
*/
199-
enum StorageTypeProto {
200-
DISK = 1;
201-
SSD = 2;
202-
ARCHIVE = 3;
203-
RAM_DISK = 4;
204-
PROVIDED = 5;
205-
}
206196

207197
message ContainerReportsProto {
208198
repeated ContainerReplicaProto reports = 1;

hadoop-hdds/interface-server/src/main/resources/proto.lock

Lines changed: 0 additions & 25 deletions
Original file line numberDiff line numberDiff line change
@@ -805,31 +805,6 @@
805805
}
806806
]
807807
},
808-
{
809-
"name": "StorageTypeProto",
810-
"enum_fields": [
811-
{
812-
"name": "DISK",
813-
"integer": 1
814-
},
815-
{
816-
"name": "SSD",
817-
"integer": 2
818-
},
819-
{
820-
"name": "ARCHIVE",
821-
"integer": 3
822-
},
823-
{
824-
"name": "RAM_DISK",
825-
"integer": 4
826-
},
827-
{
828-
"name": "PROVIDED",
829-
"integer": 5
830-
}
831-
]
832-
},
833808
{
834809
"name": "ContainerReplicaProto.State",
835810
"enum_fields": [

hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/SCMNodeManager.java

Lines changed: 3 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -56,7 +56,7 @@
5656
import org.apache.hadoop.hdds.protocol.DatanodeID;
5757
import org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeOperationalState;
5858
import org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeState;
59-
import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos;
59+
import org.apache.hadoop.hdds.protocol.proto.HddsProtos.StorageTypeProto;
6060
import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.CommandQueueReportProto;
6161
import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.LayoutVersionProto;
6262
import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.NodeReportProto;
@@ -1098,16 +1098,14 @@ public Map<String, Long> getNodeInfo() {
10981098
}
10991099
List<StorageReportProto> storageReportProtos = node.getStorageReports();
11001100
for (StorageReportProto reportProto : storageReportProtos) {
1101-
if (reportProto.getStorageType() ==
1102-
StorageContainerDatanodeProtocolProtos.StorageTypeProto.DISK) {
1101+
if (reportProto.getStorageType() == StorageTypeProto.DISK) {
11031102
nodeInfo.compute(keyPrefix + UsageMetrics.DiskCapacity.name(),
11041103
(k, v) -> v + reportProto.getCapacity());
11051104
nodeInfo.compute(keyPrefix + UsageMetrics.DiskRemaining.name(),
11061105
(k, v) -> v + reportProto.getRemaining());
11071106
nodeInfo.compute(keyPrefix + UsageMetrics.DiskUsed.name(),
11081107
(k, v) -> v + reportProto.getScmUsed());
1109-
} else if (reportProto.getStorageType() ==
1110-
StorageContainerDatanodeProtocolProtos.StorageTypeProto.SSD) {
1108+
} else if (reportProto.getStorageType() == StorageTypeProto.SSD) {
11111109
nodeInfo.compute(keyPrefix + UsageMetrics.SSDCapacity.name(),
11121110
(k, v) -> v + reportProto.getCapacity());
11131111
nodeInfo.compute(keyPrefix + UsageMetrics.SSDRemaining.name(),

hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/HddsTestUtils.java

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -39,6 +39,7 @@
3939
import org.apache.hadoop.hdds.protocol.MockDatanodeDetails;
4040
import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
4141
import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor;
42+
import org.apache.hadoop.hdds.protocol.proto.HddsProtos.StorageTypeProto;
4243
import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.ClosePipelineInfo;
4344
import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.CommandStatus;
4445
import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.CommandStatusReportsProto;
@@ -51,7 +52,6 @@
5152
import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.PipelineReport;
5253
import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.PipelineReportsProto;
5354
import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.StorageReportProto;
54-
import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.StorageTypeProto;
5555
import org.apache.hadoop.hdds.scm.container.ContainerID;
5656
import org.apache.hadoop.hdds.scm.container.ContainerInfo;
5757
import org.apache.hadoop.hdds.scm.container.ContainerManager;

hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/TestSCMCommonPlacementPolicy.java

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -17,8 +17,8 @@
1717

1818
package org.apache.hadoop.hdds.scm;
1919

20+
import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.StorageTypeProto.DISK;
2021
import static org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.ContainerReplicaProto.State.CLOSED;
21-
import static org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos.StorageTypeProto.DISK;
2222
import static org.assertj.core.api.Assertions.assertThat;
2323
import static org.junit.jupiter.api.Assertions.assertEquals;
2424
import static org.junit.jupiter.api.Assertions.assertFalse;

hadoop-ozone/common/src/main/java/org/apache/hadoop/hdds/protocol/StorageType.java

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -17,7 +17,7 @@
1717

1818
package org.apache.hadoop.hdds.protocol;
1919

20-
import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.StorageTypeProto;
20+
import org.apache.hadoop.hdds.protocol.proto.HddsProtos.StorageTypeProto;
2121

2222
/**
2323
* Ozone specific storage types.

hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestBucketLayoutWithOlderClient.java

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -21,6 +21,7 @@
2121
import static org.junit.jupiter.api.Assertions.assertNotNull;
2222

2323
import java.util.UUID;
24+
import org.apache.hadoop.hdds.protocol.proto.HddsProtos.StorageTypeProto;
2425
import org.apache.hadoop.hdds.utils.IOUtils;
2526
import org.apache.hadoop.ozone.ClientVersion;
2627
import org.apache.hadoop.ozone.MiniOzoneCluster;
@@ -88,7 +89,7 @@ public void testCreateBucketWithOlderClient() throws Exception {
8889
OzoneManagerProtocolProtos.BucketInfo.newBuilder()
8990
.setVolumeName(volumeName).setBucketName(buckName)
9091
.setIsVersionEnabled(false).setStorageType(
91-
OzoneManagerProtocolProtos.StorageTypeProto.DISK)
92+
StorageTypeProto.DISK)
9293
.build())
9394
.build()).build();
9495
createBucketReq = createBucketReq.toBuilder()

0 commit comments

Comments
 (0)