Skip to content

Commit 8eaca4c

Browse files
HDDS-14010. [Recon] Endpoint to retrieve pending deletion metrics from DataNodes, SCM, and OM. (#9413)
1 parent 609399a commit 8eaca4c

16 files changed

Lines changed: 1194 additions & 77 deletions

hadoop-hdds/common/src/main/resources/ozone-default.xml

Lines changed: 18 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -3504,6 +3504,24 @@
35043504
If the buffer overflows, task reinitialization will be triggered.
35053505
</description>
35063506
</property>
3507+
<property>
3508+
<name>ozone.recon.dn.metrics.collection.minimum.api.delay</name>
3509+
<value>30s</value>
3510+
<tag>OZONE, RECON, DN</tag>
3511+
<description>
3512+
Minimum delay in API to start a new task for Jmx collection.
3513+
It behaves like a rate limiter to avoid unnecessary task creation.
3514+
</description>
3515+
</property>
3516+
<property>
3517+
<name>ozone.recon.dn.metrics.collection.timeout</name>
3518+
<value>10m</value>
3519+
<tag>OZONE, RECON, DN</tag>
3520+
<description>
3521+
Maximum time taken for the api to complete.
3522+
If it exceeds pending tasks will be cancelled.
3523+
</description>
3524+
</property>
35073525
<property>
35083526
<name>ozone.scm.datanode.admin.monitor.interval</name>
35093527
<value>30s</value>

hadoop-ozone/dist/src/main/compose/ozone/docker-config

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -58,7 +58,7 @@ OZONE-SITE.XML_ozone.http.basedir=/tmp/ozone_http
5858
OZONE-SITE.XML_hdds.container.ratis.datastream.enabled=true
5959

6060
OZONE-SITE.XML_ozone.fs.hsync.enabled=true
61-
61+
OZONE-SITE.XML_ozone.recon.dn.metrics.collection.minimum.api.delay=5s
6262
OZONE_CONF_DIR=/etc/hadoop
6363
OZONE_LOG_DIR=/var/log/hadoop
6464

hadoop-ozone/integration-test-recon/src/test/java/org/apache/hadoop/ozone/recon/TestStorageDistributionEndpoint.java

Lines changed: 155 additions & 35 deletions
Original file line numberDiff line numberDiff line change
@@ -22,19 +22,23 @@
2222
import static org.apache.hadoop.hdds.HddsConfigKeys.HDDS_SCM_WAIT_TIME_AFTER_SAFE_MODE_EXIT;
2323
import static org.apache.hadoop.hdds.client.ReplicationFactor.THREE;
2424
import static org.apache.hadoop.hdds.client.ReplicationType.RATIS;
25+
import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_HA_DBTRANSACTIONBUFFER_FLUSH_INTERVAL;
2526
import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_HA_RATIS_SNAPSHOT_GAP;
2627
import static org.apache.hadoop.hdds.scm.ScmConfigKeys.OZONE_SCM_HEARTBEAT_PROCESS_INTERVAL;
2728
import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_BLOCK_DELETING_SERVICE_INTERVAL;
28-
import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_BLOCK_DELETING_SERVICE_TIMEOUT;
29+
import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_DIR_DELETING_SERVICE_INTERVAL;
2930
import static org.apache.hadoop.ozone.recon.TestReconEndpointUtil.getReconWebAddress;
3031
import static org.junit.jupiter.api.Assertions.assertDoesNotThrow;
3132
import static org.junit.jupiter.api.Assertions.assertEquals;
3233
import static org.junit.jupiter.api.Assertions.assertNotNull;
34+
import static org.junit.jupiter.api.Assertions.assertTrue;
3335

3436
import com.fasterxml.jackson.databind.ObjectMapper;
3537
import java.time.Duration;
3638
import java.util.Collections;
3739
import java.util.List;
40+
import java.util.Map;
41+
import java.util.Objects;
3842
import java.util.concurrent.TimeUnit;
3943
import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
4044
import org.apache.hadoop.fs.FSDataOutputStream;
@@ -46,6 +50,8 @@
4650
import org.apache.hadoop.hdds.client.ReplicationConfig;
4751
import org.apache.hadoop.hdds.conf.OzoneConfiguration;
4852
import org.apache.hadoop.hdds.scm.ScmConfig;
53+
import org.apache.hadoop.hdds.scm.container.ContainerInfo;
54+
import org.apache.hadoop.hdds.scm.events.SCMEvents;
4955
import org.apache.hadoop.hdds.scm.server.StorageContainerManager;
5056
import org.apache.hadoop.hdds.utils.IOUtils;
5157
import org.apache.hadoop.ozone.HddsDatanodeService;
@@ -66,6 +72,9 @@
6672
import org.apache.hadoop.ozone.om.helpers.OmKeyArgs;
6773
import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo;
6874
import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfoGroup;
75+
import org.apache.hadoop.ozone.recon.api.DataNodeMetricsService;
76+
import org.apache.hadoop.ozone.recon.api.types.DataNodeMetricsServiceResponse;
77+
import org.apache.hadoop.ozone.recon.api.types.ScmPendingDeletion;
6978
import org.apache.hadoop.ozone.recon.api.types.StorageCapacityDistributionResponse;
7079
import org.apache.hadoop.ozone.recon.spi.impl.OzoneManagerServiceProviderImpl;
7180
import org.apache.ozone.test.GenericTestUtils;
@@ -100,6 +109,7 @@ public class TestStorageDistributionEndpoint {
100109
private static final ObjectMapper MAPPER = new ObjectMapper();
101110

102111
private static final String STORAGE_DIST_ENDPOINT = "/api/v1/storageDistribution";
112+
private static final String PENDING_DELETION_ENDPOINT = "/api/v1/pendingDeletion";
103113

104114
static List<Arguments> replicationConfigs() {
105115
return Collections.singletonList(
@@ -110,17 +120,14 @@ static List<Arguments> replicationConfigs() {
110120
@BeforeAll
111121
public static void setup() throws Exception {
112122
conf = new OzoneConfiguration();
113-
conf.setTimeDuration(OZONE_BLOCK_DELETING_SERVICE_INTERVAL, 100,
114-
TimeUnit.MILLISECONDS);
115-
conf.setTimeDuration(OZONE_BLOCK_DELETING_SERVICE_TIMEOUT, 100,
116-
TimeUnit.MILLISECONDS);
117-
conf.setTimeDuration(OZONE_SCM_HEARTBEAT_PROCESS_INTERVAL,
118-
100, TimeUnit.MILLISECONDS);
123+
conf.setTimeDuration(OZONE_DIR_DELETING_SERVICE_INTERVAL, 100, TimeUnit.MILLISECONDS);
124+
conf.setTimeDuration(OZONE_BLOCK_DELETING_SERVICE_INTERVAL, 100, TimeUnit.MILLISECONDS);
125+
conf.setTimeDuration(OZONE_SCM_HEARTBEAT_PROCESS_INTERVAL, 100, TimeUnit.MILLISECONDS);
119126
conf.setLong(OZONE_SCM_HA_RATIS_SNAPSHOT_GAP, 1L);
120-
conf.setTimeDuration(HDDS_HEARTBEAT_INTERVAL, 50,
121-
TimeUnit.MILLISECONDS);
122-
conf.setTimeDuration(HDDS_CONTAINER_REPORT_INTERVAL, 200,
123-
TimeUnit.MILLISECONDS);
127+
conf.setTimeDuration(HDDS_HEARTBEAT_INTERVAL, 50, TimeUnit.MILLISECONDS);
128+
conf.setTimeDuration(HDDS_CONTAINER_REPORT_INTERVAL, 200, TimeUnit.MILLISECONDS);
129+
conf.setTimeDuration(OZONE_SCM_HA_DBTRANSACTIONBUFFER_FLUSH_INTERVAL, 500, TimeUnit.MILLISECONDS);
130+
conf.set(ReconServerConfigKeys.OZONE_RECON_DN_METRICS_COLLECTION_MINIMUM_API_DELAY, "5s");
124131

125132
// Enhanced SCM configuration for faster block deletion processing
126133
ScmConfig scmConfig = conf.getObject(ScmConfig.class);
@@ -129,18 +136,9 @@ public static void setup() throws Exception {
129136
conf.set(HDDS_SCM_WAIT_TIME_AFTER_SAFE_MODE_EXIT, "0s");
130137

131138
// Enhanced DataNode configuration to move pending deletion from SCM to DN faster
132-
DatanodeConfiguration dnConf =
133-
conf.getObject(DatanodeConfiguration.class);
134-
dnConf.setBlockDeletionInterval(Duration.ofMillis(100));
135-
// Increase block delete queue limit to allow more queued commands on DN
136-
dnConf.setBlockDeleteQueueLimit(50);
137-
// Reduce the interval for delete command worker processing
138-
dnConf.setBlockDeleteCommandWorkerInterval(Duration.ofMillis(100));
139-
// Increase blocks deleted per interval to speed up deletion
140-
dnConf.setBlockDeletionLimit(5000);
139+
DatanodeConfiguration dnConf = conf.getObject(DatanodeConfiguration.class);
140+
dnConf.setBlockDeletionInterval(Duration.ofMillis(30000));
141141
conf.setFromObject(dnConf);
142-
// Increase DN delete threads for faster parallel processing
143-
conf.setInt("ozone.datanode.block.delete.threads.max", 10);
144142

145143
recon = new ReconService(conf);
146144
cluster = MiniOzoneCluster.newBuilder(conf)
@@ -190,19 +188,133 @@ public void testStorageDistributionEndpoint(ReplicationConfig replicationConfig)
190188
}
191189
}
192190
waitForKeysCreated(replicationConfig);
193-
Thread.sleep(10000);
194-
StringBuilder urlBuilder = new StringBuilder();
195-
urlBuilder.append(getReconWebAddress(conf))
196-
.append(STORAGE_DIST_ENDPOINT);
197-
String response = TestReconEndpointUtil.makeHttpCall(conf, urlBuilder);
198-
StorageCapacityDistributionResponse storageResponse =
199-
MAPPER.readValue(response, StorageCapacityDistributionResponse.class);
200-
201-
assertEquals(20, storageResponse.getGlobalNamespace().getTotalKeys());
202-
assertEquals(60, storageResponse.getGlobalNamespace().getTotalUsedSpace());
203-
assertEquals(0, storageResponse.getUsedSpaceBreakDown().getOpenKeyBytes());
204-
assertEquals(60, storageResponse.getUsedSpaceBreakDown().getCommittedKeyBytes());
205-
assertEquals(3, storageResponse.getDataNodeUsage().size());
191+
GenericTestUtils.waitFor(this::verifyStorageDistributionAfterKeyCreation, 1000, 30000);
192+
closeAllContainers();
193+
fs.delete(dir1, true);
194+
GenericTestUtils.waitFor(this::verifyPendingDeletionAfterKeyDeletionOm, 1000, 30000);
195+
GenericTestUtils.waitFor(this::verifyPendingDeletionAfterKeyDeletionScm, 2000, 30000);
196+
GenericTestUtils.waitFor(() ->
197+
Objects.requireNonNull(scm.getClientProtocolServer().getDeletedBlockSummary()).getTotalBlockCount() == 0,
198+
1000, 30000);
199+
GenericTestUtils.waitFor(this::verifyPendingDeletionAfterKeyDeletionDn, 2000, 60000);
200+
GenericTestUtils.waitFor(this::verifyPendingDeletionClearsAtDn, 2000, 60000);
201+
cluster.getHddsDatanodes().get(0).stop();
202+
GenericTestUtils.waitFor(this::verifyPendingDeletionAfterKeyDeletionOnDnFailure, 2000, 60000);
203+
}
204+
205+
private boolean verifyStorageDistributionAfterKeyCreation() {
206+
try {
207+
StringBuilder urlBuilder = new StringBuilder();
208+
urlBuilder.append(getReconWebAddress(conf)).append(STORAGE_DIST_ENDPOINT);
209+
String response = TestReconEndpointUtil.makeHttpCall(conf, urlBuilder);
210+
StorageCapacityDistributionResponse storageResponse =
211+
MAPPER.readValue(response, StorageCapacityDistributionResponse.class);
212+
213+
assertEquals(20, storageResponse.getGlobalNamespace().getTotalKeys());
214+
assertEquals(60, storageResponse.getGlobalNamespace().getTotalUsedSpace());
215+
assertEquals(0, storageResponse.getUsedSpaceBreakDown().getOpenKeyBytes());
216+
assertEquals(60, storageResponse.getUsedSpaceBreakDown().getCommittedKeyBytes());
217+
assertEquals(3, storageResponse.getDataNodeUsage().size());
218+
219+
return true;
220+
} catch (Exception e) {
221+
LOG.debug("Waiting for storage distribution assertions to pass", e);
222+
return false;
223+
}
224+
}
225+
226+
private boolean verifyPendingDeletionAfterKeyDeletionOm() {
227+
try {
228+
syncDataFromOM();
229+
StringBuilder urlBuilder = new StringBuilder();
230+
urlBuilder.append(getReconWebAddress(conf)).append(PENDING_DELETION_ENDPOINT).append("?component=om");
231+
String response = TestReconEndpointUtil.makeHttpCall(conf, urlBuilder);
232+
Map<String, Number> pendingDeletionMap = MAPPER.readValue(response, Map.class);
233+
assertEquals(30L, pendingDeletionMap.get("totalSize").longValue());
234+
assertEquals(30L, pendingDeletionMap.get("pendingDirectorySize").longValue() +
235+
pendingDeletionMap.get("pendingKeySize").longValue());
236+
return true;
237+
} catch (Exception e) {
238+
LOG.debug("Waiting for storage distribution assertions to pass", e);
239+
return false;
240+
}
241+
}
242+
243+
private boolean verifyPendingDeletionAfterKeyDeletionScm() {
244+
try {
245+
StringBuilder urlBuilder = new StringBuilder();
246+
urlBuilder.append(getReconWebAddress(conf)).append(PENDING_DELETION_ENDPOINT).append("?component=scm");
247+
String response = TestReconEndpointUtil.makeHttpCall(conf, urlBuilder);
248+
ScmPendingDeletion pendingDeletion = MAPPER.readValue(response, ScmPendingDeletion.class);
249+
assertEquals(30, pendingDeletion.getTotalReplicatedBlockSize());
250+
assertEquals(10, pendingDeletion.getTotalBlocksize());
251+
assertEquals(10, pendingDeletion.getTotalBlocksCount());
252+
return true;
253+
} catch (Throwable e) {
254+
LOG.debug("Waiting for storage distribution assertions to pass", e);
255+
return false;
256+
}
257+
}
258+
259+
private boolean verifyPendingDeletionAfterKeyDeletionDn() {
260+
try {
261+
scm.getScmHAManager().asSCMHADBTransactionBuffer().flush();
262+
StringBuilder urlBuilder = new StringBuilder();
263+
urlBuilder.append(getReconWebAddress(conf)).append(PENDING_DELETION_ENDPOINT).append("?component=dn");
264+
String response = TestReconEndpointUtil.makeHttpCall(conf, urlBuilder);
265+
DataNodeMetricsServiceResponse pendingDeletion = MAPPER.readValue(response, DataNodeMetricsServiceResponse.class);
266+
assertNotNull(pendingDeletion);
267+
assertEquals(30, pendingDeletion.getTotalPendingDeletionSize());
268+
assertEquals(DataNodeMetricsService.MetricCollectionStatus.FINISHED, pendingDeletion.getStatus());
269+
assertEquals(pendingDeletion.getTotalNodesQueried(), pendingDeletion.getPendingDeletionPerDataNode().size());
270+
assertEquals(0, pendingDeletion.getTotalNodeQueryFailures());
271+
pendingDeletion.getPendingDeletionPerDataNode().forEach(dn -> {
272+
assertEquals(10, dn.getPendingBlockSize());
273+
});
274+
return true;
275+
} catch (Throwable e) {
276+
LOG.debug("Waiting for storage distribution assertions to pass", e);
277+
return false;
278+
}
279+
}
280+
281+
private boolean verifyPendingDeletionClearsAtDn() {
282+
try {
283+
scm.getScmHAManager().asSCMHADBTransactionBuffer().flush();
284+
StringBuilder urlBuilder = new StringBuilder();
285+
urlBuilder.append(getReconWebAddress(conf)).append(PENDING_DELETION_ENDPOINT).append("?component=dn");
286+
String response = TestReconEndpointUtil.makeHttpCall(conf, urlBuilder);
287+
DataNodeMetricsServiceResponse pendingDeletion = MAPPER.readValue(response, DataNodeMetricsServiceResponse.class);
288+
assertNotNull(pendingDeletion);
289+
assertEquals(0, pendingDeletion.getTotalPendingDeletionSize());
290+
assertEquals(DataNodeMetricsService.MetricCollectionStatus.FINISHED, pendingDeletion.getStatus());
291+
assertEquals(pendingDeletion.getTotalNodesQueried(), pendingDeletion.getPendingDeletionPerDataNode().size());
292+
assertEquals(0, pendingDeletion.getTotalNodeQueryFailures());
293+
pendingDeletion.getPendingDeletionPerDataNode().forEach(dn -> {
294+
assertEquals(0, dn.getPendingBlockSize());
295+
});
296+
return true;
297+
} catch (Throwable e) {
298+
LOG.debug("Waiting for storage distribution assertions to pass", e);
299+
return false;
300+
}
301+
}
302+
303+
private boolean verifyPendingDeletionAfterKeyDeletionOnDnFailure() {
304+
try {
305+
StringBuilder urlBuilder = new StringBuilder();
306+
urlBuilder.append(getReconWebAddress(conf)).append(PENDING_DELETION_ENDPOINT).append("?component=dn");
307+
String response = TestReconEndpointUtil.makeHttpCall(conf, urlBuilder);
308+
DataNodeMetricsServiceResponse pendingDeletion = MAPPER.readValue(response, DataNodeMetricsServiceResponse.class);
309+
assertNotNull(pendingDeletion);
310+
assertEquals(1, pendingDeletion.getTotalNodeQueryFailures());
311+
assertTrue(pendingDeletion.getPendingDeletionPerDataNode()
312+
.stream()
313+
.anyMatch(dn -> dn.getPendingBlockSize() == -1));
314+
return true;
315+
} catch (Throwable e) {
316+
return false;
317+
}
206318
}
207319

208320
private void verifyBlocksCreated(
@@ -286,4 +398,12 @@ public static void tear() {
286398
cluster.shutdown();
287399
}
288400
}
401+
402+
private static void closeAllContainers() {
403+
for (ContainerInfo container :
404+
scm.getContainerManager().getContainers()) {
405+
scm.getEventQueue().fireEvent(SCMEvents.CLOSE_CONTAINER,
406+
container.containerID());
407+
}
408+
}
289409
}

hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/MetricsServiceProviderFactory.java

Lines changed: 29 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -17,12 +17,20 @@
1717

1818
package org.apache.hadoop.ozone.recon;
1919

20+
import static org.apache.hadoop.ozone.recon.ReconServerConfigKeys.OZONE_RECON_METRICS_HTTP_CONNECTION_REQUEST_TIMEOUT;
21+
import static org.apache.hadoop.ozone.recon.ReconServerConfigKeys.OZONE_RECON_METRICS_HTTP_CONNECTION_REQUEST_TIMEOUT_DEFAULT;
22+
import static org.apache.hadoop.ozone.recon.ReconServerConfigKeys.OZONE_RECON_METRICS_HTTP_CONNECTION_TIMEOUT;
23+
import static org.apache.hadoop.ozone.recon.ReconServerConfigKeys.OZONE_RECON_METRICS_HTTP_CONNECTION_TIMEOUT_DEFAULT;
24+
25+
import java.util.concurrent.TimeUnit;
2026
import javax.inject.Inject;
2127
import javax.inject.Singleton;
2228
import org.apache.commons.lang3.StringUtils;
2329
import org.apache.hadoop.hdds.conf.OzoneConfiguration;
2430
import org.apache.hadoop.hdds.recon.ReconConfigKeys;
31+
import org.apache.hadoop.hdfs.web.URLConnectionFactory;
2532
import org.apache.hadoop.ozone.recon.spi.MetricsServiceProvider;
33+
import org.apache.hadoop.ozone.recon.spi.impl.JmxServiceProviderImpl;
2634
import org.apache.hadoop.ozone.recon.spi.impl.PrometheusServiceProviderImpl;
2735
import org.slf4j.Logger;
2836
import org.slf4j.LoggerFactory;
@@ -39,12 +47,23 @@ public class MetricsServiceProviderFactory {
3947

4048
private OzoneConfiguration configuration;
4149
private ReconUtils reconUtils;
50+
private URLConnectionFactory connectionFactory;
4251

4352
@Inject
4453
public MetricsServiceProviderFactory(OzoneConfiguration configuration,
4554
ReconUtils reconUtils) {
4655
this.configuration = configuration;
4756
this.reconUtils = reconUtils;
57+
int connectionTimeout = (int) configuration.getTimeDuration(
58+
OZONE_RECON_METRICS_HTTP_CONNECTION_TIMEOUT,
59+
OZONE_RECON_METRICS_HTTP_CONNECTION_TIMEOUT_DEFAULT,
60+
TimeUnit.MILLISECONDS);
61+
int connectionRequestTimeout = (int) configuration.getTimeDuration(
62+
OZONE_RECON_METRICS_HTTP_CONNECTION_REQUEST_TIMEOUT,
63+
OZONE_RECON_METRICS_HTTP_CONNECTION_REQUEST_TIMEOUT_DEFAULT,
64+
TimeUnit.MILLISECONDS);
65+
connectionFactory = URLConnectionFactory.newDefaultURLConnectionFactory(connectionTimeout,
66+
connectionRequestTimeout, configuration);
4867
}
4968

5069
/**
@@ -62,11 +81,20 @@ public MetricsServiceProvider getMetricsServiceProvider() {
6281
String.format("Choosing Prometheus as Metrics service provider " +
6382
"with configured endpoint: %s", prometheusEndpoint));
6483
}
65-
return new PrometheusServiceProviderImpl(configuration, reconUtils);
84+
return new PrometheusServiceProviderImpl(configuration, reconUtils, connectionFactory);
6685
}
6786
return null;
6887
}
6988

89+
/**
90+
* Returns the configured MetricsServiceProvider implementation for Jmx.
91+
* @param endpoint
92+
* @return MetricsServiceProvider instance for Jmx
93+
*/
94+
public MetricsServiceProvider getJmxMetricsServiceProvider(String endpoint) {
95+
return new JmxServiceProviderImpl(reconUtils, endpoint, connectionFactory);
96+
}
97+
7098
/**
7199
* Returns the Prometheus endpoint if configured. Otherwise returns null.
72100
*

hadoop-ozone/recon/src/main/java/org/apache/hadoop/ozone/recon/ReconServerConfigKeys.java

Lines changed: 10 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -123,14 +123,14 @@ public final class ReconServerConfigKeys {
123123

124124
public static final String
125125
OZONE_RECON_METRICS_HTTP_CONNECTION_TIMEOUT_DEFAULT =
126-
"10s";
126+
"30s";
127127

128128
public static final String
129129
OZONE_RECON_METRICS_HTTP_CONNECTION_REQUEST_TIMEOUT =
130130
"ozone.recon.metrics.http.connection.request.timeout";
131131

132132
public static final String
133-
OZONE_RECON_METRICS_HTTP_CONNECTION_REQUEST_TIMEOUT_DEFAULT = "10s";
133+
OZONE_RECON_METRICS_HTTP_CONNECTION_REQUEST_TIMEOUT_DEFAULT = "60s";
134134

135135
public static final String OZONE_RECON_SCM_CONTAINER_THRESHOLD =
136136
"ozone.recon.scm.container.threshold";
@@ -213,6 +213,14 @@ public final class ReconServerConfigKeys {
213213
public static final int
214214
OZONE_RECON_SCM_CLIENT_FAILOVER_MAX_RETRY_DEFAULT = 3;
215215

216+
public static final String OZONE_RECON_DN_METRICS_COLLECTION_MINIMUM_API_DELAY =
217+
"ozone.recon.dn.metrics.collection.minimum.api.delay";
218+
public static final String OZONE_RECON_DN_METRICS_COLLECTION_MINIMUM_API_DELAY_DEFAULT = "30s";
219+
220+
public static final String OZONE_RECON_DN_METRICS_COLLECTION_TIMEOUT =
221+
"ozone.recon.dn.metrics.collection.timeout";
222+
public static final String OZONE_RECON_DN_METRICS_COLLECTION_TIMEOUT_DEFAULT = "10m";
223+
216224
/**
217225
* Private constructor for utility class.
218226
*/

0 commit comments

Comments
 (0)