peterxcli commented on code in PR #8251:
URL: https://github.com/apache/ozone/pull/8251#discussion_r2048182925
##########
hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMClientProtocolServer.java:
##########
@@ -230,43 +230,55 @@ public void join() throws InterruptedException {
public ContainerWithPipeline allocateContainer(HddsProtos.ReplicationType
replicationType, HddsProtos.ReplicationFactor factor,
String owner) throws IOException {
- if (scm.getScmContext().isInSafeMode()) {
- throw new SCMException("SafeModePrecheck failed for allocateContainer",
- ResultCodes.SAFE_MODE_EXCEPTION);
+
+ Map<String, String> auditMap = Maps.newHashMap();
+ auditMap.put("replicationType", String.valueOf(replicationType));
+ auditMap.put("factor", String.valueOf(factor));
+ auditMap.put("owner", String.valueOf(owner));
+
+ try {
+ if (scm.getScmContext().isInSafeMode()) {
+ throw new SCMException("SafeModePrecheck failed for allocateContainer",
+ ResultCodes.SAFE_MODE_EXCEPTION);
+ }
+ getScm().checkAdminAccess(getRemoteUser(), false);
+ final ContainerInfo container = scm.getContainerManager()
+ .allocateContainer(
+ ReplicationConfig.fromProtoTypeAndFactor(replicationType,
factor),
+ owner);
+ final Pipeline pipeline = scm.getPipelineManager()
+ .getPipeline(container.getPipelineID());
+ ContainerWithPipeline cp = new ContainerWithPipeline(container,
pipeline);
+ AUDIT.logWriteSuccess(buildAuditMessageForSuccess(
+ SCMAction.ALLOCATE_CONTAINER, auditMap)
+ );
+ return cp;
+ } catch (Exception ex) {
Review Comment:
nit
```suggestion
} catch (IOException ex) {
```
##########
hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMClientProtocolServer.java:
##########
@@ -605,16 +643,21 @@ public List<HddsProtos.Node> queryNode(
.addNodeOperationalStates(ns.getOperationalState())
.build());
} catch (NodeNotFoundException e) {
- throw new IOException(
- "An unexpected error occurred querying the NodeStatus", e);
+ AUDIT.logReadFailure(buildAuditMessageForFailure(
+ SCMAction.QUERY_NODE, auditMap, e));
+ throw new IOException("An unexpected error occurred querying the
NodeStatus", e);
}
}
+ AUDIT.logReadSuccess(buildAuditMessageForSuccess(
+ SCMAction.QUERY_NODE, auditMap));
Review Comment:
same question.
##########
hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMClientProtocolServer.java:
##########
@@ -815,33 +902,29 @@ public ScmInfo getScmInfo() {
.setClusterId(scm.getScmStorageConfig().getClusterID())
.setScmId(scm.getScmStorageConfig().getScmId())
.setPeerRoles(scm.getScmHAManager().getRatisServer().getRatisRoles());
- return builder.build();
+ ScmInfo info = builder.build();
Review Comment:
please remove the above `boolean auditSuccess = true;` (I cant comment at
there)
##########
hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMClientProtocolServer.java:
##########
@@ -627,52 +670,78 @@ public HddsProtos.Node queryNode(UUID uuid)
.build();
}
} catch (NodeNotFoundException e) {
- throw new IOException(
+ IOException ex = new IOException(
"An unexpected error occurred querying the NodeStatus", e);
+ AUDIT.logReadFailure(buildAuditMessageForFailure(
+ SCMAction.QUERY_NODE, auditMap, ex));
+ throw ex;
}
+ AUDIT.logReadSuccess(buildAuditMessageForSuccess(
+ SCMAction.QUERY_NODE, auditMap));
return result;
}
@Override
public List<DatanodeAdminError> decommissionNodes(List<String> nodes,
boolean force)
throws IOException {
+ final Map<String, String> auditMap = Maps.newHashMap();
+ auditMap.put("nodes", String.valueOf(nodes));
+ auditMap.put("force", String.valueOf(force));
+
try {
getScm().checkAdminAccess(getRemoteUser(), false);
- return scm.getScmDecommissionManager().decommissionNodes(nodes, force);
+ List<DatanodeAdminError> result =
scm.getScmDecommissionManager().decommissionNodes(nodes, force);
+ AUDIT.logWriteSuccess(buildAuditMessageForSuccess(
+ SCMAction.DECOMMISSION_NODES, auditMap));
+ return result;
} catch (Exception ex) {
- LOG.error("Failed to decommission nodes", ex);
+ AUDIT.logWriteFailure(buildAuditMessageForFailure(
+ SCMAction.DECOMMISSION_NODES, auditMap, ex));
throw ex;
}
}
@Override
public List<DatanodeAdminError> recommissionNodes(List<String> nodes)
throws IOException {
+ final Map<String, String> auditMap = Maps.newHashMap();
+ auditMap.put("nodes", String.valueOf(nodes));
try {
getScm().checkAdminAccess(getRemoteUser(), false);
- return scm.getScmDecommissionManager().recommissionNodes(nodes);
+ List<DatanodeAdminError> result =
scm.getScmDecommissionManager().recommissionNodes(nodes);
+ AUDIT.logWriteSuccess(buildAuditMessageForSuccess(
+ SCMAction.DECOMMISSION_NODES, auditMap));
+ return result;
} catch (Exception ex) {
Review Comment:
ditto
##########
hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMClientProtocolServer.java:
##########
@@ -605,16 +643,21 @@ public List<HddsProtos.Node> queryNode(
.addNodeOperationalStates(ns.getOperationalState())
.build());
} catch (NodeNotFoundException e) {
- throw new IOException(
- "An unexpected error occurred querying the NodeStatus", e);
+ AUDIT.logReadFailure(buildAuditMessageForFailure(
+ SCMAction.QUERY_NODE, auditMap, e));
+ throw new IOException("An unexpected error occurred querying the
NodeStatus", e);
Review Comment:
Should this be:
```suggestion
IOException ex = new IOException("An unexpected error occurred
querying the NodeStatus", e);
AUDIT.logReadFailure(buildAuditMessageForFailure(
SCMAction.QUERY_NODE, auditMap, ex));
throw ex
```
or just keep it?
I do see different behaviour on this.
https://github.com/apache/ozone/pull/8251/files#diff-02d31f948c607eae697c655f6ec08e36eb5aefec42f57dc53c50751aaadda2e4R672-R680
##########
hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMClientProtocolServer.java:
##########
@@ -326,22 +336,35 @@ public ContainerWithPipeline
getContainerWithPipeline(long containerID)
public List<HddsProtos.SCMContainerReplicaProto> getContainerReplicas(
long containerId, int clientVersion) throws IOException {
List<HddsProtos.SCMContainerReplicaProto> results = new ArrayList<>();
+ Map<String, String> auditMap = new HashMap<>();
+ auditMap.put("containerId", String.valueOf(containerId));
+ auditMap.put("clientVersion", String.valueOf(clientVersion));
- Set<ContainerReplica> replicas = getScm().getContainerManager()
- .getContainerReplicas(ContainerID.valueOf(containerId));
- for (ContainerReplica r : replicas) {
- results.add(
- HddsProtos.SCMContainerReplicaProto.newBuilder()
- .setContainerID(containerId)
- .setState(r.getState().toString())
-
.setDatanodeDetails(r.getDatanodeDetails().toProto(clientVersion))
- .setBytesUsed(r.getBytesUsed())
- .setPlaceOfBirth(r.getOriginDatanodeId().toString())
- .setKeyCount(r.getKeyCount())
- .setSequenceID(r.getSequenceId())
- .setReplicaIndex(r.getReplicaIndex()).build()
- );
+ try {
+ Set<ContainerReplica> replicas = getScm().getContainerManager()
+ .getContainerReplicas(ContainerID.valueOf(containerId));
+ for (ContainerReplica r : replicas) {
+ results.add(
+ HddsProtos.SCMContainerReplicaProto.newBuilder()
+ .setContainerID(containerId)
+ .setState(r.getState().toString())
+
.setDatanodeDetails(r.getDatanodeDetails().toProto(clientVersion))
+ .setBytesUsed(r.getBytesUsed())
+ .setPlaceOfBirth(r.getOriginDatanodeId().toString())
+ .setKeyCount(r.getKeyCount())
+ .setSequenceID(r.getSequenceId())
+ .setReplicaIndex(r.getReplicaIndex()).build()
+ );
+ }
+ } catch (Exception ex) {
+ AUDIT.logReadFailure(buildAuditMessageForFailure(
+ SCMAction.GET_CONTAINER_REPLICAS,
+ auditMap, ex));
+ throw ex;
}
+ AUDIT.logReadSuccess(buildAuditMessageForSuccess(
+ SCMAction.GET_CONTAINER_WITH_PIPELINE_BATCH,
+ auditMap));
Review Comment:
Should we also put this into the end of the try block, just like others?
##########
hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMClientProtocolServer.java:
##########
@@ -627,52 +670,78 @@ public HddsProtos.Node queryNode(UUID uuid)
.build();
}
} catch (NodeNotFoundException e) {
- throw new IOException(
+ IOException ex = new IOException(
"An unexpected error occurred querying the NodeStatus", e);
+ AUDIT.logReadFailure(buildAuditMessageForFailure(
+ SCMAction.QUERY_NODE, auditMap, ex));
+ throw ex;
}
+ AUDIT.logReadSuccess(buildAuditMessageForSuccess(
+ SCMAction.QUERY_NODE, auditMap));
return result;
}
@Override
public List<DatanodeAdminError> decommissionNodes(List<String> nodes,
boolean force)
throws IOException {
+ final Map<String, String> auditMap = Maps.newHashMap();
+ auditMap.put("nodes", String.valueOf(nodes));
+ auditMap.put("force", String.valueOf(force));
+
try {
getScm().checkAdminAccess(getRemoteUser(), false);
- return scm.getScmDecommissionManager().decommissionNodes(nodes, force);
+ List<DatanodeAdminError> result =
scm.getScmDecommissionManager().decommissionNodes(nodes, force);
+ AUDIT.logWriteSuccess(buildAuditMessageForSuccess(
+ SCMAction.DECOMMISSION_NODES, auditMap));
+ return result;
} catch (Exception ex) {
Review Comment:
```suggestion
} catch (IOException ex) {
```
##########
hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMClientProtocolServer.java:
##########
@@ -737,17 +807,34 @@ public Pipeline
createReplicationPipeline(HddsProtos.ReplicationType type,
}
@Override
- public List<Pipeline> listPipelines() {
- AUDIT.logReadSuccess(
- buildAuditMessageForSuccess(SCMAction.LIST_PIPELINE, null));
- return scm.getPipelineManager().getPipelines();
+ public List<Pipeline> listPipelines() throws IOException {
Review Comment:
This whole process doesn't throw any error.
##########
hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMClientProtocolServer.java:
##########
@@ -815,33 +902,29 @@ public ScmInfo getScmInfo() {
.setClusterId(scm.getScmStorageConfig().getClusterID())
.setScmId(scm.getScmStorageConfig().getScmId())
.setPeerRoles(scm.getScmHAManager().getRatisServer().getRatisRoles());
- return builder.build();
+ ScmInfo info = builder.build();
+ AUDIT.logReadSuccess(buildAuditMessageForSuccess(
+ SCMAction.GET_SCM_INFO, null));
+ return info;
} catch (Exception ex) {
- auditSuccess = false;
- AUDIT.logReadFailure(
- buildAuditMessageForFailure(SCMAction.GET_SCM_INFO, null, ex)
+ AUDIT.logReadFailure(buildAuditMessageForFailure(
Review Comment:
And I think this doest throw, too.
##########
hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMClientProtocolServer.java:
##########
@@ -627,52 +670,78 @@ public HddsProtos.Node queryNode(UUID uuid)
.build();
}
} catch (NodeNotFoundException e) {
- throw new IOException(
+ IOException ex = new IOException(
"An unexpected error occurred querying the NodeStatus", e);
+ AUDIT.logReadFailure(buildAuditMessageForFailure(
+ SCMAction.QUERY_NODE, auditMap, ex));
+ throw ex;
}
+ AUDIT.logReadSuccess(buildAuditMessageForSuccess(
+ SCMAction.QUERY_NODE, auditMap));
return result;
}
@Override
public List<DatanodeAdminError> decommissionNodes(List<String> nodes,
boolean force)
throws IOException {
+ final Map<String, String> auditMap = Maps.newHashMap();
+ auditMap.put("nodes", String.valueOf(nodes));
+ auditMap.put("force", String.valueOf(force));
+
try {
getScm().checkAdminAccess(getRemoteUser(), false);
- return scm.getScmDecommissionManager().decommissionNodes(nodes, force);
+ List<DatanodeAdminError> result =
scm.getScmDecommissionManager().decommissionNodes(nodes, force);
+ AUDIT.logWriteSuccess(buildAuditMessageForSuccess(
+ SCMAction.DECOMMISSION_NODES, auditMap));
+ return result;
} catch (Exception ex) {
- LOG.error("Failed to decommission nodes", ex);
+ AUDIT.logWriteFailure(buildAuditMessageForFailure(
+ SCMAction.DECOMMISSION_NODES, auditMap, ex));
throw ex;
}
}
@Override
public List<DatanodeAdminError> recommissionNodes(List<String> nodes)
throws IOException {
+ final Map<String, String> auditMap = Maps.newHashMap();
+ auditMap.put("nodes", String.valueOf(nodes));
try {
getScm().checkAdminAccess(getRemoteUser(), false);
- return scm.getScmDecommissionManager().recommissionNodes(nodes);
+ List<DatanodeAdminError> result =
scm.getScmDecommissionManager().recommissionNodes(nodes);
+ AUDIT.logWriteSuccess(buildAuditMessageForSuccess(
+ SCMAction.DECOMMISSION_NODES, auditMap));
+ return result;
} catch (Exception ex) {
- LOG.error("Failed to recommission nodes", ex);
+ AUDIT.logWriteFailure(buildAuditMessageForFailure(
+ SCMAction.DECOMMISSION_NODES, auditMap, ex));
throw ex;
}
}
@Override
public List<DatanodeAdminError> startMaintenanceNodes(List<String> nodes,
int endInHours, boolean force) throws IOException {
+ final Map<String, String> auditMap = Maps.newHashMap();
+ auditMap.put("nodes", String.valueOf(nodes));
+ auditMap.put("endInHours", String.valueOf(endInHours));
+ auditMap.put("force", String.valueOf(force));
try {
getScm().checkAdminAccess(getRemoteUser(), false);
- return scm.getScmDecommissionManager()
+ List<DatanodeAdminError> result = scm.getScmDecommissionManager()
Review Comment:
`scm.getScmDecommissionManager().startMaintenanceNodes` doesnt throw, too.
##########
hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMClientProtocolServer.java:
##########
@@ -627,52 +670,78 @@ public HddsProtos.Node queryNode(UUID uuid)
.build();
}
} catch (NodeNotFoundException e) {
- throw new IOException(
+ IOException ex = new IOException(
"An unexpected error occurred querying the NodeStatus", e);
+ AUDIT.logReadFailure(buildAuditMessageForFailure(
+ SCMAction.QUERY_NODE, auditMap, ex));
+ throw ex;
}
+ AUDIT.logReadSuccess(buildAuditMessageForSuccess(
+ SCMAction.QUERY_NODE, auditMap));
return result;
}
@Override
public List<DatanodeAdminError> decommissionNodes(List<String> nodes,
boolean force)
throws IOException {
+ final Map<String, String> auditMap = Maps.newHashMap();
+ auditMap.put("nodes", String.valueOf(nodes));
+ auditMap.put("force", String.valueOf(force));
+
try {
getScm().checkAdminAccess(getRemoteUser(), false);
- return scm.getScmDecommissionManager().decommissionNodes(nodes, force);
+ List<DatanodeAdminError> result =
scm.getScmDecommissionManager().decommissionNodes(nodes, force);
+ AUDIT.logWriteSuccess(buildAuditMessageForSuccess(
+ SCMAction.DECOMMISSION_NODES, auditMap));
+ return result;
Review Comment:
`scm.getScmDecommissionManager().decommissionNodes` doesnt throw exception.
```suggestion
AUDIT.logWriteSuccess(buildAuditMessageForSuccess(
SCMAction.DECOMMISSION_NODES, auditMap));
return List<DatanodeAdminError> result =
scm.getScmDecommissionManager().decommissionNodes(nodes, force);;
```
##########
hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/server/SCMClientProtocolServer.java:
##########
@@ -737,17 +807,34 @@ public Pipeline
createReplicationPipeline(HddsProtos.ReplicationType type,
}
@Override
- public List<Pipeline> listPipelines() {
- AUDIT.logReadSuccess(
- buildAuditMessageForSuccess(SCMAction.LIST_PIPELINE, null));
- return scm.getPipelineManager().getPipelines();
+ public List<Pipeline> listPipelines() throws IOException {
+ try {
+ List<Pipeline> pipelines = scm.getPipelineManager().getPipelines();
+ AUDIT.logReadSuccess(buildAuditMessageForSuccess(
+ SCMAction.LIST_PIPELINE, null));
+ return pipelines;
+ } catch (Exception ex) {
+ AUDIT.logReadFailure(buildAuditMessageForFailure(
+ SCMAction.LIST_PIPELINE, null, ex));
+ throw ex;
+ }
}
@Override
public Pipeline getPipeline(HddsProtos.PipelineID pipelineID)
throws IOException {
- return scm.getPipelineManager().getPipeline(
- PipelineID.getFromProtobuf(pipelineID));
+ final Map<String, String> auditMap = Maps.newHashMap();
+ auditMap.put("pipelineID", pipelineID.getId());
+ try {
+ Pipeline pipeline =
scm.getPipelineManager().getPipeline(PipelineID.getFromProtobuf(pipelineID));
+ AUDIT.logReadSuccess(buildAuditMessageForSuccess(
+ SCMAction.GET_PIPELINE, auditMap));
+ return pipeline;
+ } catch (Exception ex) {
Review Comment:
```suggestion
} catch (PipelineNotFoundException ex) {
```
--
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
To unsubscribe, e-mail: [email protected]
For queries about this service, please contact Infrastructure at:
[email protected]
---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]