tkalkirill commented on code in PR #1599:
URL: https://github.com/apache/ignite-3/pull/1599#discussion_r1091735042
##########
modules/table/src/main/java/org/apache/ignite/internal/table/distributed/TableManager.java:
##########
@@ -1952,50 +1952,56 @@ public void onError(Throwable e) {
metaStorageMgr.registerPrefixWatch(ByteArray.fromString(STABLE_ASSIGNMENTS_PREFIX),
new WatchListener() {
@Override
public void onUpdate(WatchEvent evt) {
- if (!busyLock.enterBusy()) {
- throw new IgniteInternalException(new
NodeStoppingException());
- }
-
- try {
- assert evt.single();
+ inBusyLock(busyLock, () -> {
+ assert evt.single() : evt;
Entry stableAssignmentsWatchEvent =
evt.entryEvent().newEntry();
if (stableAssignmentsWatchEvent.value() == null) {
return;
}
- int part =
extractPartitionNumber(stableAssignmentsWatchEvent.key());
- UUID tblId =
extractTableId(stableAssignmentsWatchEvent.key(), STABLE_ASSIGNMENTS_PREFIX);
+ int partitionId =
extractPartitionNumber(stableAssignmentsWatchEvent.key());
+ UUID tableId =
extractTableId(stableAssignmentsWatchEvent.key(), STABLE_ASSIGNMENTS_PREFIX);
- TablePartitionId replicaGrpId = new
TablePartitionId(tblId, part);
+ TablePartitionId replicaGrpId = new
TablePartitionId(tableId, partitionId);
Set<Assignment> stableAssignments =
ByteUtils.fromBytes(stableAssignmentsWatchEvent.value());
- byte[] pendingFromMetastorage =
metaStorageMgr.get(pendingPartAssignmentsKey(replicaGrpId),
-
stableAssignmentsWatchEvent.revision()).join().value();
+ byte[] pendingAssignmentsFromMetaStorage =
metaStorageMgr.get(
+ pendingPartAssignmentsKey(replicaGrpId),
+ stableAssignmentsWatchEvent.revision()
+ ).join().value();
- Set<Assignment> pendingAssignments =
pendingFromMetastorage == null
+ Set<Assignment> pendingAssignments =
pendingAssignmentsFromMetaStorage == null
? Set.of()
- : ByteUtils.fromBytes(pendingFromMetastorage);
+ :
ByteUtils.fromBytes(pendingAssignmentsFromMetaStorage);
String localMemberName =
clusterService.topologyService().localMember().name();
boolean shouldStopLocalServices =
Stream.concat(stableAssignments.stream(), pendingAssignments.stream())
.noneMatch(assignment ->
assignment.consistentId().equals(localMemberName));
if (shouldStopLocalServices) {
+ TableImpl table = tablesByIdVv.latest().get(tableId);
+
+ CompletableFuture<Void> destroyMvPartitionStorageFuture
+ =
table.internalTable().storage().destroyPartition(partitionId);
+
+
table.internalTable().txStateStorage().destroyTxStateStorage(partitionId);
+
+ // Necessary evil.
Review Comment:
Methods `TxStateTableStorage#destroyTxStateStorage` and
`MvTableStorage#destroyPartition` close the storages and clean up the data.
Thus, if we close the storages, then in order to clear them, we will need to
create them again, which may not look good.
But in fact, deleting storages is done in almost O(1) at the moment, we just
close storages and delete files for persistent storages, and for volatile
storages we put tasks for cleaning in separate pools.
I can describe it in the comments so that there are fewer questions.
##########
modules/table/src/main/java/org/apache/ignite/internal/table/distributed/TableManager.java:
##########
@@ -1952,50 +1952,56 @@ public void onError(Throwable e) {
metaStorageMgr.registerPrefixWatch(ByteArray.fromString(STABLE_ASSIGNMENTS_PREFIX),
new WatchListener() {
@Override
public void onUpdate(WatchEvent evt) {
- if (!busyLock.enterBusy()) {
- throw new IgniteInternalException(new
NodeStoppingException());
- }
-
- try {
- assert evt.single();
+ inBusyLock(busyLock, () -> {
+ assert evt.single() : evt;
Entry stableAssignmentsWatchEvent =
evt.entryEvent().newEntry();
if (stableAssignmentsWatchEvent.value() == null) {
return;
}
- int part =
extractPartitionNumber(stableAssignmentsWatchEvent.key());
- UUID tblId =
extractTableId(stableAssignmentsWatchEvent.key(), STABLE_ASSIGNMENTS_PREFIX);
+ int partitionId =
extractPartitionNumber(stableAssignmentsWatchEvent.key());
+ UUID tableId =
extractTableId(stableAssignmentsWatchEvent.key(), STABLE_ASSIGNMENTS_PREFIX);
- TablePartitionId replicaGrpId = new
TablePartitionId(tblId, part);
+ TablePartitionId replicaGrpId = new
TablePartitionId(tableId, partitionId);
Set<Assignment> stableAssignments =
ByteUtils.fromBytes(stableAssignmentsWatchEvent.value());
- byte[] pendingFromMetastorage =
metaStorageMgr.get(pendingPartAssignmentsKey(replicaGrpId),
-
stableAssignmentsWatchEvent.revision()).join().value();
+ byte[] pendingAssignmentsFromMetaStorage =
metaStorageMgr.get(
+ pendingPartAssignmentsKey(replicaGrpId),
+ stableAssignmentsWatchEvent.revision()
+ ).join().value();
- Set<Assignment> pendingAssignments =
pendingFromMetastorage == null
+ Set<Assignment> pendingAssignments =
pendingAssignmentsFromMetaStorage == null
? Set.of()
- : ByteUtils.fromBytes(pendingFromMetastorage);
+ :
ByteUtils.fromBytes(pendingAssignmentsFromMetaStorage);
String localMemberName =
clusterService.topologyService().localMember().name();
boolean shouldStopLocalServices =
Stream.concat(stableAssignments.stream(), pendingAssignments.stream())
.noneMatch(assignment ->
assignment.consistentId().equals(localMemberName));
if (shouldStopLocalServices) {
+ TableImpl table = tablesByIdVv.latest().get(tableId);
+
+ CompletableFuture<Void> destroyMvPartitionStorageFuture
+ =
table.internalTable().storage().destroyPartition(partitionId);
+
+
table.internalTable().txStateStorage().destroyTxStateStorage(partitionId);
+
+ // Necessary evil.
Review Comment:
Methods `TxStateTableStorage#destroyTxStateStorage` and
`MvTableStorage#destroyPartition` close the storages and clean up the data.
Thus, if we close the storages, then in order to clear them, we will need to
create them again, which may not look good.
But in fact, deleting storages is done in almost O(1) at the moment, we just
close storages and delete files for persistent storages, and for volatile
storages we put tasks for cleaning in separate pools.
I can describe it in the comments so that there are fewer questions.
--
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
To unsubscribe, e-mail: [email protected]
For queries about this service, please contact Infrastructure at:
[email protected]