This is an automated email from the ASF dual-hosted git repository.
adoroszlai pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/ozone.git
The following commit(s) were added to refs/heads/master by this push:
new 789c7f098a HDDS-10152. findbugs: Field should be static (#6168)
789c7f098a is described below
commit 789c7f098af18941b85ca92b09015ffb93b4ccb9
Author: Sarveksha Yeshavantha Raju
<[email protected]>
AuthorDate: Tue Feb 6 22:16:38 2024 +0530
HDDS-10152. findbugs: Field should be static (#6168)
---
.../hdds/scm/storage/BlockDataStreamOutput.java | 6 +-
.../container/common/TestDatanodeStateMachine.java | 4 +-
.../hadoop/hdds/utils/TestRDBSnapshotProvider.java | 16 ++--
.../ozone/rocksdiff/RocksDBCheckpointDiffer.java | 6 +-
.../rocksdiff/TestRocksDBCheckpointDiffer.java | 46 +++++------
.../hadoop/hdds/scm/node/SCMNodeManager.java | 2 +-
.../ozone/om/protocolPB/TestS3GrpcOmTransport.java | 8 +-
.../apache/hadoop/ozone/MiniOzoneChaosCluster.java | 4 +-
.../apache/hadoop/fs/ozone/TestOzoneFsHAURLs.java | 16 ++--
.../hadoop/hdds/upgrade/TestHDDSUpgrade.java | 10 +--
.../om/TestOzoneManagerListVolumesSecure.java | 96 +++++++++++-----------
.../org/apache/hadoop/fs/ozone/OzoneFsShell.java | 4 +-
12 files changed, 109 insertions(+), 109 deletions(-)
diff --git
a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/BlockDataStreamOutput.java
b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/BlockDataStreamOutput.java
index bfef03e87d..d5423d4ec0 100644
---
a/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/BlockDataStreamOutput.java
+++
b/hadoop-hdds/client/src/main/java/org/apache/hadoop/hdds/scm/storage/BlockDataStreamOutput.java
@@ -134,7 +134,7 @@ public class BlockDataStreamOutput implements
ByteBufferStreamOutput {
private final DataStreamOutput out;
private CompletableFuture<DataStreamReply> dataStreamCloseReply;
private List<CompletableFuture<DataStreamReply>> futures = new ArrayList<>();
- private final long syncSize = 0; // TODO: disk sync is disabled for now
+ private static final long SYNC_SIZE = 0; // TODO: disk sync is disabled for
now
private long syncPosition = 0;
private StreamBuffer currentBuffer;
private XceiverClientMetrics metrics;
@@ -630,9 +630,9 @@ public class BlockDataStreamOutput implements
ByteBufferStreamOutput {
}
private boolean needSync(long position) {
- if (syncSize > 0) {
+ if (SYNC_SIZE > 0) {
// TODO: or position >= fileLength
- if (position - syncPosition >= syncSize) {
+ if (position - syncPosition >= SYNC_SIZE) {
syncPosition = position;
return true;
}
diff --git
a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/TestDatanodeStateMachine.java
b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/TestDatanodeStateMachine.java
index 4f5b87dd3e..5738f5c110 100644
---
a/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/TestDatanodeStateMachine.java
+++
b/hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/TestDatanodeStateMachine.java
@@ -68,7 +68,7 @@ public class TestDatanodeStateMachine {
LoggerFactory.getLogger(TestDatanodeStateMachine.class);
// Changing it to 1, as current code checks for multiple scm directories,
// and fail if exists
- private final int scmServerCount = 1;
+ private static final int SCM_SERVER_COUNT = 1;
private List<String> serverAddresses;
private List<RPC.Server> scmServers;
private List<ScmTestMock> mockServers;
@@ -91,7 +91,7 @@ public class TestDatanodeStateMachine {
serverAddresses = new ArrayList<>();
scmServers = new ArrayList<>();
mockServers = new ArrayList<>();
- for (int x = 0; x < scmServerCount; x++) {
+ for (int x = 0; x < SCM_SERVER_COUNT; x++) {
int port = SCMTestUtils.getReuseableAddress().getPort();
String address = "127.0.0.1";
serverAddresses.add(address + ":" + port);
diff --git
a/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/utils/TestRDBSnapshotProvider.java
b/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/utils/TestRDBSnapshotProvider.java
index baf39bd257..9edbe4b3fc 100644
---
a/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/utils/TestRDBSnapshotProvider.java
+++
b/hadoop-hdds/framework/src/test/java/org/apache/hadoop/hdds/utils/TestRDBSnapshotProvider.java
@@ -81,8 +81,8 @@ public class TestRDBSnapshotProvider {
private Set<TableConfig> configSet;
private RDBSnapshotProvider rdbSnapshotProvider;
private File testDir;
- private final int numUsedCF = 3;
- private final String leaderId = "leaderNode-1";
+ private static final int NUM_USED_CF = 3;
+ private static final String LEADER_ID = "leaderNode-1";
private final AtomicReference<DBCheckpoint> latestCK =
new AtomicReference<>(null);
@@ -109,7 +109,7 @@ public class TestRDBSnapshotProvider {
public void downloadSnapshot(String leaderNodeID, File targetFile)
throws IOException {
for (int i = 0; i < 10; i++) {
- insertDataToDB(numUsedCF);
+ insertDataToDB(NUM_USED_CF);
}
DBCheckpoint dbCheckpoint = rdbStore.getCheckpoint(true);
latestCK.set(dbCheckpoint);
@@ -151,30 +151,30 @@ public class TestRDBSnapshotProvider {
assertEquals(0, before);
// Get first snapshot
- checkpoint = rdbSnapshotProvider.downloadDBSnapshotFromLeader(leaderId);
+ checkpoint = rdbSnapshotProvider.downloadDBSnapshotFromLeader(LEADER_ID);
File checkpointDir = checkpoint.getCheckpointLocation().toFile();
assertEquals(candidateDir, checkpointDir);
int first = HAUtils.getExistingSstFiles(
rdbSnapshotProvider.getCandidateDir()).size();
// Get second snapshot
- checkpoint = rdbSnapshotProvider.downloadDBSnapshotFromLeader(leaderId);
+ checkpoint = rdbSnapshotProvider.downloadDBSnapshotFromLeader(LEADER_ID);
int second = HAUtils.getExistingSstFiles(
rdbSnapshotProvider.getCandidateDir()).size();
assertThat(second).withFailMessage("The second snapshot should have more
SST files")
.isGreaterThan(first);
DBCheckpoint latestCheckpoint = latestCK.get();
compareDB(latestCheckpoint.getCheckpointLocation().toFile(),
- checkpoint.getCheckpointLocation().toFile(), numUsedCF);
+ checkpoint.getCheckpointLocation().toFile(), NUM_USED_CF);
// Get third snapshot
- checkpoint = rdbSnapshotProvider.downloadDBSnapshotFromLeader(leaderId);
+ checkpoint = rdbSnapshotProvider.downloadDBSnapshotFromLeader(LEADER_ID);
int third = HAUtils.getExistingSstFiles(
rdbSnapshotProvider.getCandidateDir()).size();
assertThat(third).withFailMessage("The third snapshot should have more SST
files")
.isGreaterThan(second);
compareDB(latestCK.get().getCheckpointLocation().toFile(),
- checkpoint.getCheckpointLocation().toFile(), numUsedCF);
+ checkpoint.getCheckpointLocation().toFile(), NUM_USED_CF);
// Test cleanup candidateDB
rdbSnapshotProvider.init();
diff --git
a/hadoop-hdds/rocksdb-checkpoint-differ/src/main/java/org/apache/ozone/rocksdiff/RocksDBCheckpointDiffer.java
b/hadoop-hdds/rocksdb-checkpoint-differ/src/main/java/org/apache/ozone/rocksdiff/RocksDBCheckpointDiffer.java
index 5e612d8b20..97d015fb23 100644
---
a/hadoop-hdds/rocksdb-checkpoint-differ/src/main/java/org/apache/ozone/rocksdiff/RocksDBCheckpointDiffer.java
+++
b/hadoop-hdds/rocksdb-checkpoint-differ/src/main/java/org/apache/ozone/rocksdiff/RocksDBCheckpointDiffer.java
@@ -172,7 +172,7 @@ public class RocksDBCheckpointDiffer implements
AutoCloseable,
private ColumnFamilyHandle snapshotInfoTableCFHandle;
private final AtomicInteger tarballRequestCount;
- private final String dagPruningServiceName = "CompactionDagPruningService";
+ private static final String DAG_PRUNING_SERVICE_NAME =
"CompactionDagPruningService";
private AtomicBoolean suspended;
private ColumnFamilyHandle compactionLogTableCFHandle;
@@ -230,7 +230,7 @@ public class RocksDBCheckpointDiffer implements
AutoCloseable,
TimeUnit.MILLISECONDS);
if (pruneCompactionDagDaemonRunIntervalInMs > 0) {
- this.scheduler = new Scheduler(dagPruningServiceName,
+ this.scheduler = new Scheduler(DAG_PRUNING_SERVICE_NAME,
true, 1);
this.scheduler.scheduleWithFixedDelay(
@@ -307,7 +307,7 @@ public class RocksDBCheckpointDiffer implements
AutoCloseable,
if (!closed) {
closed = true;
if (scheduler != null) {
- LOG.info("Shutting down {}.", dagPruningServiceName);
+ LOG.info("Shutting down {}.", DAG_PRUNING_SERVICE_NAME);
scheduler.close();
}
}
diff --git
a/hadoop-hdds/rocksdb-checkpoint-differ/src/test/java/org/apache/ozone/rocksdiff/TestRocksDBCheckpointDiffer.java
b/hadoop-hdds/rocksdb-checkpoint-differ/src/test/java/org/apache/ozone/rocksdiff/TestRocksDBCheckpointDiffer.java
index 6dded6cb57..b01e4cc2e3 100644
---
a/hadoop-hdds/rocksdb-checkpoint-differ/src/test/java/org/apache/ozone/rocksdiff/TestRocksDBCheckpointDiffer.java
+++
b/hadoop-hdds/rocksdb-checkpoint-differ/src/test/java/org/apache/ozone/rocksdiff/TestRocksDBCheckpointDiffer.java
@@ -126,10 +126,10 @@ public class TestRocksDBCheckpointDiffer {
private final List<List<ColumnFamilyHandle>> colHandles = new ArrayList<>();
- private final String activeDbDirName = "./rocksdb-data";
- private final String metadataDirName = "./metadata";
- private final String compactionLogDirName = "compaction-log";
- private final String sstBackUpDirName = "compaction-sst-backup";
+ private static final String ACTIVE_DB_DIR_NAME = "./rocksdb-data";
+ private static final String METADATA_DIR_NAME = "./metadata";
+ private static final String COMPACTION_LOG_DIR_NAME = "compaction-log";
+ private static final String SST_BACK_UP_DIR_NAME = "compaction-sst-backup";
private File activeDbDir;
private File metadataDirDir;
private File compactionLogDir;
@@ -150,17 +150,17 @@ public class TestRocksDBCheckpointDiffer {
// Test class log level. Set to DEBUG for verbose output
GenericTestUtils.setLogLevel(TestRocksDBCheckpointDiffer.LOG, Level.INFO);
- activeDbDir = new File(activeDbDirName);
- createDir(activeDbDir, activeDbDirName);
+ activeDbDir = new File(ACTIVE_DB_DIR_NAME);
+ createDir(activeDbDir, ACTIVE_DB_DIR_NAME);
- metadataDirDir = new File(metadataDirName);
- createDir(metadataDirDir, metadataDirName);
+ metadataDirDir = new File(METADATA_DIR_NAME);
+ createDir(metadataDirDir, METADATA_DIR_NAME);
- compactionLogDir = new File(metadataDirName, compactionLogDirName);
- createDir(compactionLogDir, metadataDirName + "/" + compactionLogDirName);
+ compactionLogDir = new File(METADATA_DIR_NAME, COMPACTION_LOG_DIR_NAME);
+ createDir(compactionLogDir, METADATA_DIR_NAME + "/" +
COMPACTION_LOG_DIR_NAME);
- sstBackUpDir = new File(metadataDirName, sstBackUpDirName);
- createDir(sstBackUpDir, metadataDirName + "/" + sstBackUpDirName);
+ sstBackUpDir = new File(METADATA_DIR_NAME, SST_BACK_UP_DIR_NAME);
+ createDir(sstBackUpDir, METADATA_DIR_NAME + "/" + SST_BACK_UP_DIR_NAME);
config = mock(ConfigurationSource.class);
@@ -174,10 +174,10 @@ public class TestRocksDBCheckpointDiffer {
OZONE_OM_SNAPSHOT_PRUNE_COMPACTION_DAG_DAEMON_RUN_INTERVAL_DEFAULT,
TimeUnit.MILLISECONDS)).thenReturn(0L);
- rocksDBCheckpointDiffer = new RocksDBCheckpointDiffer(metadataDirName,
- sstBackUpDirName,
- compactionLogDirName,
- activeDbDirName,
+ rocksDBCheckpointDiffer = new RocksDBCheckpointDiffer(METADATA_DIR_NAME,
+ SST_BACK_UP_DIR_NAME,
+ COMPACTION_LOG_DIR_NAME,
+ ACTIVE_DB_DIR_NAME,
config);
ColumnFamilyOptions cfOpts = new ColumnFamilyOptions()
@@ -189,7 +189,7 @@ public class TestRocksDBCheckpointDiffer {
.setCreateMissingColumnFamilies(true);
rocksDBCheckpointDiffer.setRocksDBForCompactionTracking(dbOptions);
- activeRocksDB = RocksDB.open(dbOptions, activeDbDirName, cfDescriptors,
+ activeRocksDB = RocksDB.open(dbOptions, ACTIVE_DB_DIR_NAME, cfDescriptors,
cfHandles);
keyTableCFHandle = cfHandles.get(1);
directoryTableCFHandle = cfHandles.get(2);
@@ -518,7 +518,7 @@ public class TestRocksDBCheckpointDiffer {
@Test
void testDifferWithDB() throws Exception {
writeKeysAndCheckpointing();
- readRocksDBInstance(activeDbDirName, activeRocksDB, null,
+ readRocksDBInstance(ACTIVE_DB_DIR_NAME, activeRocksDB, null,
rocksDBCheckpointDiffer);
if (LOG.isDebugEnabled()) {
@@ -614,7 +614,7 @@ public class TestRocksDBCheckpointDiffer {
}
cpDirList.add(dir);
- createCheckPoint(activeDbDirName, cpPath, rocksDB);
+ createCheckPoint(ACTIVE_DB_DIR_NAME, cpPath, rocksDB);
final UUID snapshotId = UUID.randomUUID();
List<ColumnFamilyHandle> colHandle = new ArrayList<>();
colHandles.add(colHandle);
@@ -1273,7 +1273,7 @@ public class TestRocksDBCheckpointDiffer {
if (compactionLogs != null) {
for (int i = 0; i < compactionLogs.size(); i++) {
- String compactionFileName = metadataDirName + "/" +
compactionLogDirName
+ String compactionFileName = METADATA_DIR_NAME + "/" +
COMPACTION_LOG_DIR_NAME
+ "/0000" + i + COMPACTION_LOG_FILE_NAME_SUFFIX;
File compactionFile = new File(compactionFileName);
Files.write(compactionFile.toPath(),
@@ -1491,8 +1491,8 @@ public class TestRocksDBCheckpointDiffer {
Path compactionLogFilePath = null;
if (compactionLog != null) {
- String compactionLogFileName = metadataDirName + "/" +
- compactionLogDirName + "/compaction_log" +
+ String compactionLogFileName = METADATA_DIR_NAME + "/" +
+ COMPACTION_LOG_DIR_NAME + "/compaction_log" +
COMPACTION_LOG_FILE_NAME_SUFFIX;
compactionLogFilePath = new File(compactionLogFileName).toPath();
createFileWithContext(compactionLogFileName, compactionLog);
@@ -1512,7 +1512,7 @@ public class TestRocksDBCheckpointDiffer {
Set<String> actualFileSetAfterPruning;
try (Stream<Path> pathStream = Files.list(
- Paths.get(metadataDirName + "/" + sstBackUpDirName))
+ Paths.get(METADATA_DIR_NAME + "/" + SST_BACK_UP_DIR_NAME))
.filter(e -> e.toString().toLowerCase()
.endsWith(SST_FILE_EXTENSION))
.sorted()) {
diff --git
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/SCMNodeManager.java
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/SCMNodeManager.java
index 152c8fd659..a149998db8 100644
---
a/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/SCMNodeManager.java
+++
b/hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/SCMNodeManager.java
@@ -272,7 +272,7 @@ public class SCMNodeManager implements NodeManager {
* Returns the Number of Datanodes by State they are in. Passing null for
* either of the states acts like a wildcard for that state.
*
- * @parem nodeOpState - The Operational State of the node
+ * @param nodeOpState - The Operational State of the node
* @param health - The health of the node
* @return count
*/
diff --git
a/hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/om/protocolPB/TestS3GrpcOmTransport.java
b/hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/om/protocolPB/TestS3GrpcOmTransport.java
index 10bb155d70..3d73a42e69 100644
---
a/hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/om/protocolPB/TestS3GrpcOmTransport.java
+++
b/hadoop-ozone/common/src/test/java/org/apache/hadoop/ozone/om/protocolPB/TestS3GrpcOmTransport.java
@@ -60,13 +60,13 @@ public class TestS3GrpcOmTransport {
private static final Logger LOG =
LoggerFactory.getLogger(TestS3GrpcOmTransport.class);
- private final String leaderOMNodeId = "TestOM";
+ private static final String LEADER_OM_NODE_ID = "TestOM";
private final OMResponse omResponse = OMResponse.newBuilder()
.setSuccess(true)
.setStatus(org.apache.hadoop.ozone.protocol
.proto.OzoneManagerProtocolProtos.Status.OK)
- .setLeaderOMNodeId(leaderOMNodeId)
+ .setLeaderOMNodeId(LEADER_OM_NODE_ID)
.setCmdType(Type.AllocateBlock)
.build();
@@ -167,7 +167,7 @@ public class TestS3GrpcOmTransport {
final OMResponse resp = client.submitRequest(omRequest);
assertEquals(resp.getStatus(), org.apache.hadoop.ozone.protocol
.proto.OzoneManagerProtocolProtos.Status.OK);
- assertEquals(resp.getLeaderOMNodeId(), leaderOMNodeId);
+ assertEquals(resp.getLeaderOMNodeId(), LEADER_OM_NODE_ID);
}
@Test
@@ -191,7 +191,7 @@ public class TestS3GrpcOmTransport {
final OMResponse resp = client.submitRequest(omRequest);
assertEquals(resp.getStatus(), org.apache.hadoop.ozone.protocol
.proto.OzoneManagerProtocolProtos.Status.OK);
- assertEquals(resp.getLeaderOMNodeId(), leaderOMNodeId);
+ assertEquals(resp.getLeaderOMNodeId(), LEADER_OM_NODE_ID);
}
@Test
diff --git
a/hadoop-ozone/fault-injection-test/mini-chaos-tests/src/test/java/org/apache/hadoop/ozone/MiniOzoneChaosCluster.java
b/hadoop-ozone/fault-injection-test/mini-chaos-tests/src/test/java/org/apache/hadoop/ozone/MiniOzoneChaosCluster.java
index 1ffed5323a..f212570fc5 100644
---
a/hadoop-ozone/fault-injection-test/mini-chaos-tests/src/test/java/org/apache/hadoop/ozone/MiniOzoneChaosCluster.java
+++
b/hadoop-ozone/fault-injection-test/mini-chaos-tests/src/test/java/org/apache/hadoop/ozone/MiniOzoneChaosCluster.java
@@ -63,7 +63,7 @@ public class MiniOzoneChaosCluster extends
MiniOzoneHAClusterImpl {
private final FailureManager failureManager;
- private final int waitForClusterToBeReadyTimeout = 120000; // 2 min
+ private static final int WAIT_FOR_CLUSTER_TO_BE_READY_TIMEOUT = 120000; // 2
min
private final Set<OzoneManager> failedOmSet;
private final Set<StorageContainerManager> failedScmSet;
@@ -158,7 +158,7 @@ public class MiniOzoneChaosCluster extends
MiniOzoneHAClusterImpl {
}
}
return true;
- }, 1000, waitForClusterToBeReadyTimeout);
+ }, 1000, WAIT_FOR_CLUSTER_TO_BE_READY_TIMEOUT);
}
/**
diff --git
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFsHAURLs.java
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFsHAURLs.java
index 3ade5b62c3..47dc9ac0c3 100644
---
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFsHAURLs.java
+++
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFsHAURLs.java
@@ -81,16 +81,16 @@ public class TestOzoneFsHAURLs {
private String bucketName;
private String rootPath;
- private final String o3fsImplKey =
+ private static final String O3FS_IMPL_KEY =
"fs." + OzoneConsts.OZONE_URI_SCHEME + ".impl";
- private final String o3fsImplValue =
+ private static final String O3FS_IMPL_VALUE =
"org.apache.hadoop.fs.ozone.OzoneFileSystem";
private static OzoneClient client;
- private final String ofsImplKey =
+ private static final String OFS_IMPL_KEY =
"fs." + OzoneConsts.OZONE_OFS_URI_SCHEME + ".impl";
- private final String ofsImplValue =
+ private static final String OFS_IMPL_VALUE =
"org.apache.hadoop.fs.ozone.RootedOzoneFileSystem";
@@ -204,7 +204,7 @@ public class TestOzoneFsHAURLs {
public void testWithQualifiedDefaultFS() throws Exception {
OzoneConfiguration clientConf = new OzoneConfiguration(conf);
clientConf.setQuietMode(false);
- clientConf.set(o3fsImplKey, o3fsImplValue);
+ clientConf.set(O3FS_IMPL_KEY, O3FS_IMPL_VALUE);
// fs.defaultFS = o3fs://bucketName.volumeName.omServiceId/
clientConf.set(CommonConfigurationKeysPublic.FS_DEFAULT_NAME_KEY,
rootPath);
@@ -299,7 +299,7 @@ public class TestOzoneFsHAURLs {
private void testWithDefaultFS(String defaultFS) throws Exception {
OzoneConfiguration clientConf = new OzoneConfiguration(conf);
clientConf.setQuietMode(false);
- clientConf.set(o3fsImplKey, o3fsImplValue);
+ clientConf.set(O3FS_IMPL_KEY, O3FS_IMPL_VALUE);
// fs.defaultFS = file:///
clientConf.set(CommonConfigurationKeysPublic.FS_DEFAULT_NAME_KEY,
defaultFS);
@@ -344,8 +344,8 @@ public class TestOzoneFsHAURLs {
public void testIncorrectAuthorityInURI() throws Exception {
OzoneConfiguration clientConf = new OzoneConfiguration(conf);
clientConf.setQuietMode(false);
- clientConf.set(o3fsImplKey, o3fsImplValue);
- clientConf.set(ofsImplKey, ofsImplValue);
+ clientConf.set(O3FS_IMPL_KEY, O3FS_IMPL_VALUE);
+ clientConf.set(OFS_IMPL_KEY, OFS_IMPL_VALUE);
FsShell shell = new FsShell(clientConf);
String incorrectSvcId = "dummy";
String o3fsPathWithCorrectSvcId =
diff --git
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/upgrade/TestHDDSUpgrade.java
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/upgrade/TestHDDSUpgrade.java
index caf9cadb16..57e807b7c7 100644
---
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/upgrade/TestHDDSUpgrade.java
+++
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/hdds/upgrade/TestHDDSUpgrade.java
@@ -115,7 +115,7 @@ public class TestHDDSUpgrade {
private StorageContainerManager scm;
private ContainerManager scmContainerManager;
private PipelineManager scmPipelineManager;
- private final int numContainersCreated = 1;
+ private static final int NUM_CONTAINERS_CREATED = 1;
private HDDSLayoutVersionManager scmVersionManager;
private AtomicBoolean testPassed = new AtomicBoolean(true);
private static
@@ -316,7 +316,7 @@ public class TestHDDSUpgrade {
// Verify Post-Upgrade conditions on the SCM.
TestHddsUpgradeUtils.testPostUpgradeConditionsSCM(
cluster.getStorageContainerManagersList(),
- numContainersCreated, NUM_DATA_NODES);
+ NUM_CONTAINERS_CREATED, NUM_DATA_NODES);
// All datanodes on the SCM should have moved to HEALTHY-READONLY state.
TestHddsUpgradeUtils.testDataNodesStateOnSCM(
@@ -327,7 +327,7 @@ public class TestHDDSUpgrade {
// In the happy path case, no containers should have been quasi closed as
// a result of the upgrade.
TestHddsUpgradeUtils.testPostUpgradeConditionsDataNodes(
- cluster.getHddsDatanodes(), numContainersCreated, CLOSED);
+ cluster.getHddsDatanodes(), NUM_CONTAINERS_CREATED, CLOSED);
// Test that we can use a pipeline after upgrade.
// Will fail with exception if there are no pipelines.
@@ -871,7 +871,7 @@ public class TestHDDSUpgrade {
// Verify Post-Upgrade conditions on the SCM.
// With failure injection
TestHddsUpgradeUtils.testPostUpgradeConditionsSCM(
- cluster.getStorageContainerManagersList(), numContainersCreated,
+ cluster.getStorageContainerManagersList(), NUM_CONTAINERS_CREATED,
NUM_DATA_NODES);
// All datanodes on the SCM should have moved to HEALTHY-READONLY state.
@@ -898,7 +898,7 @@ public class TestHDDSUpgrade {
// Verify the SCM has driven all the DataNodes through Layout Upgrade.
TestHddsUpgradeUtils.testPostUpgradeConditionsDataNodes(
- cluster.getHddsDatanodes(), numContainersCreated);
+ cluster.getHddsDatanodes(), NUM_CONTAINERS_CREATED);
// Verify that new pipeline can be created with upgraded datanodes.
try {
diff --git
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOzoneManagerListVolumesSecure.java
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOzoneManagerListVolumesSecure.java
index 1c751bc99a..41f1c14f37 100644
---
a/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOzoneManagerListVolumesSecure.java
+++
b/hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOzoneManagerListVolumesSecure.java
@@ -83,7 +83,7 @@ public class TestOzoneManagerListVolumesSecure {
private OzoneManager om;
private static final String OM_CERT_SERIAL_ID = "9879877970576";
- private final String adminUser = "om";
+ private static final String ADMIN_USER = "om";
private String adminPrincipal;
private String adminPrincipalInOtherHost;
private File adminKeytab;
@@ -91,8 +91,8 @@ public class TestOzoneManagerListVolumesSecure {
private UserGroupInformation adminUGI;
private UserGroupInformation adminInOtherHostUGI;
- private final String user1 = "user1";
- private final String user2 = "user2";
+ private static final String USER_1 = "user1";
+ private static final String USER_2 = "user2";
private String userPrincipal1;
private String userPrincipal2;
private File userKeytab1;
@@ -142,18 +142,18 @@ public class TestOzoneManagerListVolumesSecure {
String host = InetAddress.getLocalHost()
.getCanonicalHostName().toLowerCase();
String hostAndRealm = host + "@" + this.realm;
- this.adminPrincipal = adminUser + "/" + hostAndRealm;
- this.adminPrincipalInOtherHost = adminUser + "/otherhost@" + this.realm;
- this.adminKeytab = new File(workDir, adminUser + ".keytab");
- this.adminKeytabInOtherHost = new File(workDir, adminUser +
+ this.adminPrincipal = ADMIN_USER + "/" + hostAndRealm;
+ this.adminPrincipalInOtherHost = ADMIN_USER + "/otherhost@" + this.realm;
+ this.adminKeytab = new File(workDir, ADMIN_USER + ".keytab");
+ this.adminKeytabInOtherHost = new File(workDir, ADMIN_USER +
"InOtherHost.keytab");
createPrincipal(this.adminKeytab, adminPrincipal);
createPrincipal(this.adminKeytabInOtherHost, adminPrincipalInOtherHost);
- this.userPrincipal1 = this.user1 + "/" + hostAndRealm;
- this.userPrincipal2 = this.user2 + "/" + hostAndRealm;
- this.userKeytab1 = new File(workDir, this.user1 + ".keytab");
- this.userKeytab2 = new File(workDir, this.user2 + ".keytab");
+ this.userPrincipal1 = USER_1 + "/" + hostAndRealm;
+ this.userPrincipal2 = USER_2 + "/" + hostAndRealm;
+ this.userKeytab1 = new File(workDir, USER_1 + ".keytab");
+ this.userKeytab2 = new File(workDir, USER_2 + ".keytab");
createPrincipal(this.userKeytab1, userPrincipal1);
createPrincipal(this.userKeytab2, userPrincipal2);
}
@@ -212,12 +212,12 @@ public class TestOzoneManagerListVolumesSecure {
String aclUser1All = "user:user1:a";
String aclUser2All = "user:user2:a";
String aclWorldAll = "world::a";
- createVolumeWithOwnerAndAcl(omClient, "volume1", user1, aclUser1All);
- createVolumeWithOwnerAndAcl(omClient, "volume2", user2, aclUser2All);
- createVolumeWithOwnerAndAcl(omClient, "volume3", user1, aclUser2All);
- createVolumeWithOwnerAndAcl(omClient, "volume4", user2, aclUser1All);
- createVolumeWithOwnerAndAcl(omClient, "volume5", user1, aclWorldAll);
- createVolumeWithOwnerAndAcl(omClient, "volume6", adminUser, null);
+ createVolumeWithOwnerAndAcl(omClient, "volume1", USER_1, aclUser1All);
+ createVolumeWithOwnerAndAcl(omClient, "volume2", USER_2, aclUser2All);
+ createVolumeWithOwnerAndAcl(omClient, "volume3", USER_1, aclUser2All);
+ createVolumeWithOwnerAndAcl(omClient, "volume4", USER_2, aclUser1All);
+ createVolumeWithOwnerAndAcl(omClient, "volume5", USER_1, aclWorldAll);
+ createVolumeWithOwnerAndAcl(omClient, "volume6", ADMIN_USER, null);
omClient.close();
}
@@ -226,7 +226,7 @@ public class TestOzoneManagerListVolumesSecure {
String ownerName, String aclString) throws IOException {
// Create volume use adminUgi
OmVolumeArgs.Builder builder =
-
OmVolumeArgs.newBuilder().setVolume(volumeName).setAdminName(adminUser);
+
OmVolumeArgs.newBuilder().setVolume(volumeName).setAdminName(ADMIN_USER);
if (!Strings.isNullOrEmpty(ownerName)) {
builder.setOwnerName(ownerName);
}
@@ -315,9 +315,9 @@ public class TestOzoneManagerListVolumesSecure {
// Login as user1, list other users' volumes
doAs(userUGI1, () -> {
- checkUser(user2, Arrays.asList("volume2", "volume3", "volume4",
+ checkUser(USER_2, Arrays.asList("volume2", "volume3", "volume4",
"volume5"), true);
- checkUser(adminUser, Arrays
+ checkUser(ADMIN_USER, Arrays
.asList("volume1", "volume2", "volume3", "volume4", "volume5",
"volume6", "s3v"), true);
return true;
@@ -325,9 +325,9 @@ public class TestOzoneManagerListVolumesSecure {
// Login as user2, list other users' volumes
doAs(userUGI2, () -> {
- checkUser(user1, Arrays.asList("volume1", "volume3", "volume4",
+ checkUser(USER_1, Arrays.asList("volume1", "volume3", "volume4",
"volume5"), true);
- checkUser(adminUser, Arrays
+ checkUser(ADMIN_USER, Arrays
.asList("volume1", "volume2", "volume3", "volume4", "volume5",
"volume6", "s3v"), true);
return true;
@@ -335,18 +335,18 @@ public class TestOzoneManagerListVolumesSecure {
// Login as admin, list other users' volumes
doAs(adminUGI, () -> {
- checkUser(user1, Arrays.asList("volume1", "volume3", "volume4",
+ checkUser(USER_1, Arrays.asList("volume1", "volume3", "volume4",
"volume5"), true);
- checkUser(user2, Arrays.asList("volume2", "volume3", "volume4",
+ checkUser(USER_2, Arrays.asList("volume2", "volume3", "volume4",
"volume5"), true);
return true;
});
// Login as admin in other host, list other users' volumes
doAs(adminInOtherHostUGI, () -> {
- checkUser(user1, Arrays.asList("volume1", "volume3",
+ checkUser(USER_1, Arrays.asList("volume1", "volume3",
"volume4", "volume5"), true);
- checkUser(user2, Arrays.asList("volume2", "volume3",
+ checkUser(USER_2, Arrays.asList("volume2", "volume3",
"volume4", "volume5"), true);
return true;
});
@@ -363,18 +363,18 @@ public class TestOzoneManagerListVolumesSecure {
// Login as user1, list other users' volumes, expect failure
doAs(userUGI1, () -> {
- checkUser(user2, Arrays.asList("volume2", "volume3", "volume4",
+ checkUser(USER_2, Arrays.asList("volume2", "volume3", "volume4",
"volume5"), false);
- checkUser(adminUser, Arrays.asList("volume1", "volume2", "volume3",
+ checkUser(ADMIN_USER, Arrays.asList("volume1", "volume2", "volume3",
"volume4", "volume5", "volume6", "s3v"), false);
return true;
});
// Login as user2, list other users' volumes, expect failure
doAs(userUGI2, () -> {
- checkUser(user1, Arrays.asList("volume1", "volume3", "volume4",
+ checkUser(USER_1, Arrays.asList("volume1", "volume3", "volume4",
"volume5"), false);
- checkUser(adminUser,
+ checkUser(ADMIN_USER,
Arrays.asList("volume1", "volume2", "volume3",
"volume4", "volume5", "volume6", "s3v"), false);
return true;
@@ -382,18 +382,18 @@ public class TestOzoneManagerListVolumesSecure {
// While admin should be able to list volumes just fine.
doAs(adminUGI, () -> {
- checkUser(user1, Arrays.asList("volume1", "volume3", "volume4",
+ checkUser(USER_1, Arrays.asList("volume1", "volume3", "volume4",
"volume5"), true);
- checkUser(user2, Arrays.asList("volume2", "volume3", "volume4",
+ checkUser(USER_2, Arrays.asList("volume2", "volume3", "volume4",
"volume5"), true);
return true;
});
// While admin in other host should be able to list volumes just fine.
doAs(adminInOtherHostUGI, () -> {
- checkUser(user1, Arrays.asList("volume1", "volume3",
+ checkUser(USER_1, Arrays.asList("volume1", "volume3",
"volume4", "volume5"), true);
- checkUser(user2, Arrays.asList("volume2", "volume3",
+ checkUser(USER_2, Arrays.asList("volume2", "volume3",
"volume4", "volume5"), true);
return true;
});
@@ -405,28 +405,28 @@ public class TestOzoneManagerListVolumesSecure {
// Login as user1, list their own volumes
doAs(userUGI1, () -> {
- checkUser(user1, Arrays.asList("volume1", "volume3", "volume4",
+ checkUser(USER_1, Arrays.asList("volume1", "volume3", "volume4",
"volume5"), true);
return true;
});
// Login as user2, list their own volumes
doAs(userUGI2, () -> {
- checkUser(user2, Arrays.asList("volume2", "volume3", "volume4",
+ checkUser(USER_2, Arrays.asList("volume2", "volume3", "volume4",
"volume5"), true);
return true;
});
// Login as admin, list their own volumes
doAs(adminUGI, () -> {
- checkUser(adminUser, Arrays.asList("volume1", "volume2", "volume3",
+ checkUser(ADMIN_USER, Arrays.asList("volume1", "volume2", "volume3",
"volume4", "volume5", "volume6", "s3v"), true);
return true;
});
// Login as admin in other host, list their own volumes
doAs(adminInOtherHostUGI, () -> {
- checkUser(adminUser, Arrays.asList("volume1", "volume2",
+ checkUser(ADMIN_USER, Arrays.asList("volume1", "volume2",
"volume3", "volume4", "volume5", "volume6", "s3v"), true);
return true;
});
@@ -438,12 +438,12 @@ public class TestOzoneManagerListVolumesSecure {
// Login as user1, list their own volumes
doAs(userUGI1, () -> {
- checkUser(user1, Arrays.asList("volume1", "volume3", "volume4",
+ checkUser(USER_1, Arrays.asList("volume1", "volume3", "volume4",
"volume5"), false);
return true;
});
- // Login as user2, list their own volumes
+ // Login as USER_2, list their own volumes
doAs(userUGI2, () -> {
checkUser(userPrincipal2, Arrays.asList("volume2", "volume3",
"volume4", "volume5"), false);
@@ -473,26 +473,26 @@ public class TestOzoneManagerListVolumesSecure {
// Login as user1, list their own volumes
doAs(userUGI1, () -> {
- checkUser(user1, Arrays.asList("volume1", "volume3", "volume5"),
+ checkUser(USER_1, Arrays.asList("volume1", "volume3", "volume5"),
true);
return true;
});
// Login as user2, list their own volumes
doAs(userUGI2, () -> {
- checkUser(user2, Arrays.asList("volume2", "volume4"),
+ checkUser(USER_2, Arrays.asList("volume2", "volume4"),
true);
return true;
});
doAs(adminUGI, () -> {
- checkUser(adminUser, Arrays.asList("volume6", "s3v"), true);
+ checkUser(ADMIN_USER, Arrays.asList("volume6", "s3v"), true);
return true;
});
// Login as admin in other host, list their own volumes
doAs(adminInOtherHostUGI, () -> {
- checkUser(adminUser, Arrays.asList("volume6", "s3v"),
+ checkUser(ADMIN_USER, Arrays.asList("volume6", "s3v"),
true);
return true;
});
@@ -504,26 +504,26 @@ public class TestOzoneManagerListVolumesSecure {
// Login as user1, list their own volumes
doAs(userUGI1, () -> {
- checkUser(user1, Arrays.asList("volume1", "volume3", "volume5"),
+ checkUser(USER_1, Arrays.asList("volume1", "volume3", "volume5"),
true);
return true;
});
// Login as user2, list their own volumes
doAs(userUGI2, () -> {
- checkUser(user2, Arrays.asList("volume2", "volume4"),
+ checkUser(USER_2, Arrays.asList("volume2", "volume4"),
true);
return true;
});
doAs(adminUGI, () -> {
- checkUser(adminUser, Arrays.asList("volume6", "s3v"), true);
+ checkUser(ADMIN_USER, Arrays.asList("volume6", "s3v"), true);
return true;
});
// Login as admin in other host, list their own volumes
doAs(adminInOtherHostUGI, () -> {
- checkUser(adminUser, Arrays.asList("volume6", "s3v"),
+ checkUser(ADMIN_USER, Arrays.asList("volume6", "s3v"),
true);
return true;
});
diff --git
a/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/OzoneFsShell.java
b/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/OzoneFsShell.java
index 1cde5fecb9..a8b963a7ab 100644
---
a/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/OzoneFsShell.java
+++
b/hadoop-ozone/ozonefs-common/src/main/java/org/apache/hadoop/fs/ozone/OzoneFsShell.java
@@ -31,7 +31,7 @@ import org.apache.hadoop.util.ToolRunner;
@InterfaceAudience.Private
public class OzoneFsShell extends FsShell {
- private final String ozoneUsagePrefix = "Usage: ozone fs [generic options]";
+ private static final String OZONE_USAGE_PREFIX = "Usage: ozone fs [generic
options]";
/**
* Default ctor with no configuration. Be sure to invoke
@@ -66,7 +66,7 @@ public class OzoneFsShell extends FsShell {
@Override
protected String getUsagePrefix() {
- return ozoneUsagePrefix;
+ return OZONE_USAGE_PREFIX;
}
/**
---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]