ptupitsyn commented on code in PR #6779: URL: https://github.com/apache/ignite-3/pull/6779#discussion_r2523049448
########## modules/client/src/main/java/org/apache/ignite/internal/client/table/MapFunction.java: ########## @@ -0,0 +1,32 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one or more + * contributor license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright ownership. + * The ASF licenses this file to You under the Apache License, Version 2.0 + * (the "License"); you may not use this file except in compliance with + * the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.ignite.internal.client.table; + +import java.util.Collection; +import java.util.concurrent.CompletableFuture; +import org.apache.ignite.internal.lang.IgniteTriFunction; + +/** + * Defines alias for batch map function. + * + * @param <K> Key type. + * @param <R> Result type. + */ +@FunctionalInterface +interface MapFunction<K, R> extends IgniteTriFunction<Collection<K>, PartitionAwarenessProvider, Boolean, CompletableFuture<R>> { Review Comment: It is difficult to understand the purpose of every function parameter. Let's remove `extends IgniteTriFunction` and add the function to the interface with explanatory parameter names and javadoc? ########## modules/client/src/main/java/org/apache/ignite/internal/client/table/ClientTable.java: ########## @@ -839,25 +868,202 @@ <E> CompletableFuture<List<E>> split( idx++; } - List<CompletableFuture<List<E>>> res = new ArrayList<>(aff.size()); - List<Batch<E>> batches = new ArrayList<>(); + CompletableFuture<List<E>> resFut = new CompletableFuture<>(); + mapAndRetry(fun, keys, txns, mapped, new long[1], resFut, log); + return resFut; + }); + } + + private static <R, E> void mapAndRetry( Review Comment: I think we can move those new methods to a separate class dedicated to batch tx mapping. `ClientTable` is already too long. ########## modules/client/src/main/java/org/apache/ignite/internal/client/table/ClientTable.java: ########## @@ -839,25 +868,202 @@ <E> CompletableFuture<List<E>> split( idx++; } - List<CompletableFuture<List<E>>> res = new ArrayList<>(aff.size()); - List<Batch<E>> batches = new ArrayList<>(); + CompletableFuture<List<E>> resFut = new CompletableFuture<>(); + mapAndRetry(fun, keys, txns, mapped, new long[1], resFut, log); + return resFut; + }); + } + + private static <R, E> void mapAndRetry( + MapFunction<E, R> mapFun, + @Nullable R initialValue, Reducer<R> reducer, + List<Transaction> txns, + Map<Integer, List<E>> mapped, + long[] startTs, + CompletableFuture<R> resFut, + IgniteLogger log + ) { + if (startTs[0] == 0) { + startTs[0] = System.nanoTime(); Review Comment: Should we use `observableTimestamp` instead? ########## modules/client/src/main/java/org/apache/ignite/internal/client/table/ClientTable.java: ########## @@ -839,25 +868,202 @@ <E> CompletableFuture<List<E>> split( idx++; } - List<CompletableFuture<List<E>>> res = new ArrayList<>(aff.size()); - List<Batch<E>> batches = new ArrayList<>(); + CompletableFuture<List<E>> resFut = new CompletableFuture<>(); + mapAndRetry(fun, keys, txns, mapped, new long[1], resFut, log); + return resFut; + }); + } + + private static <R, E> void mapAndRetry( + MapFunction<E, R> mapFun, + @Nullable R initialValue, Reducer<R> reducer, + List<Transaction> txns, + Map<Integer, List<E>> mapped, + long[] startTs, + CompletableFuture<R> resFut, + IgniteLogger log + ) { + if (startTs[0] == 0) { + startTs[0] = System.nanoTime(); + } + + List<CompletableFuture<R>> res = new ArrayList<>(); + + for (Entry<Integer, List<E>> entry : mapped.entrySet()) { + res.add(mapFun.apply(entry.getValue(), PartitionAwarenessProvider.of(entry.getKey()), mapped.size() > 1)); + } + + CompletableFutures.allOf(res).handle((ignored, err) -> { + List<CompletableFuture<Void>> waitCommitFuts = List.of(); + if (!txns.isEmpty()) { + boolean allRetryableExceptions = true; + + for (int i = 0; i < res.size(); i++) { + CompletableFuture<R> fut0 = res.get(i); + if (fut0.isCompletedExceptionally()) { + try { + fut0.join(); + } catch (CompletionException e) { + allRetryableExceptions = ExceptionUtils.matchAny(unwrapCause(e), ACQUIRE_LOCK_ERR); + } + } + Transaction tx0 = txns.get(i); + tx0.rollbackAsync().whenComplete((r, e) -> { + if (e != null) { + log.error("Failed to rollback a transactional batch: [tx=" + tx0 + ']', e); + } + }); + } + + if (err != null) { + // Check if we can retry. + long nowRelative = System.nanoTime(); + if (allRetryableExceptions && nowRelative - startTs[0] < DEFAULT_IMPLICIT_GET_ALL_TIMEOUT_NANOS) { + startTs[0] = nowRelative; + txns.clear(); // This collection is re-filled on next map attempt. + + mapAndRetry(mapFun, initialValue, reducer, txns, mapped, startTs, resFut, log); - for (Entry<Integer, Batch<E>> entry : mapped.entrySet()) { - res.add(fun.apply(entry.getValue().batch, PartitionAwarenessProvider.of(entry.getKey()))); - batches.add(entry.getValue()); + return null; } - return CompletableFuture.allOf(res.toArray(new CompletableFuture[0])).thenApply(ignored -> { - var in = new ArrayList<E>(Collections.nCopies(keys.size(), null)); + resFut.completeExceptionally(err); + + return null; + } + + waitCommitFuts = unlockFragments(txns, log); + } else { + if (err != null) { + resFut.completeExceptionally(err); + + return null; + } + } + + R in = initialValue; + + for (CompletableFuture<R> val : res) { + in = reducer.reduce(in, val.getNow(null)); + } + + if (waitCommitFuts.isEmpty()) { + resFut.complete(in); + } else { + R finalIn = in; + CompletableFutures.allOf(waitCommitFuts).whenComplete((r, e) -> { + // Ignore errors. + resFut.complete(finalIn); + }); + } - for (int i = 0; i < res.size(); i++) { - CompletableFuture<List<E>> f = res.get(i); - reduceWithKeepOrder(in, f.getNow(null), batches.get(i).originalIndices); + return null; + }); + } + + private static <E> void mapAndRetry( + MapFunction<E, List<E>> mapFun, + Collection<E> keys, + List<Transaction> txns, + Map<Integer, Batch<E>> mapped, + long[] startTs, + CompletableFuture<List<E>> resFut, + IgniteLogger log + ) { + if (startTs[0] == 0) { + startTs[0] = System.nanoTime(); + } + + List<CompletableFuture<List<E>>> res = new ArrayList<>(mapped.size()); + List<Batch<E>> batches = new ArrayList<>(); + + for (Entry<Integer, Batch<E>> entry : mapped.entrySet()) { + res.add(mapFun.apply(entry.getValue().batch, PartitionAwarenessProvider.of(entry.getKey()), mapped.size() > 1)); + batches.add(entry.getValue()); + } + + CompletableFutures.allOf(res).handle((ignored, err) -> { + // TODO remove copy paste + List<CompletableFuture<Void>> waitCommitFuts = List.of(); + if (!txns.isEmpty()) { + boolean allRetryableExceptions = true; + + for (int i = 0; i < res.size(); i++) { + CompletableFuture<?> fut0 = res.get(i); + if (fut0.isCompletedExceptionally()) { + try { + fut0.join(); + } catch (CompletionException e) { + allRetryableExceptions = ExceptionUtils.matchAny(unwrapCause(e), ACQUIRE_LOCK_ERR); } + } + txns.get(i).rollbackAsync(); + } - return in; - }); + if (err != null) { + // Check if we can retry. + long nowRelative = System.nanoTime(); + if (allRetryableExceptions && nowRelative - startTs[0] < DEFAULT_IMPLICIT_GET_ALL_TIMEOUT_NANOS) { + startTs[0] = nowRelative; + txns.clear(); // This collection is re-filled on next map attempt. + + mapAndRetry(mapFun, keys, txns, mapped, startTs, resFut, log); + + return null; + } + + resFut.completeExceptionally(err); + + return null; + } + + waitCommitFuts = unlockFragments(txns, log); + } else { + if (err != null) { + resFut.completeExceptionally(err); + + return null; + } + } + + var in = new ArrayList<E>(Collections.nCopies(keys.size(), null)); + + for (int i = 0; i < res.size(); i++) { + CompletableFuture<List<E>> f = res.get(i); + reduceWithKeepOrder(in, f.getNow(null), batches.get(i).originalIndices); + } + + if (waitCommitFuts.isEmpty()) { + resFut.complete(in); + } else { + CompletableFutures.allOf(waitCommitFuts).whenComplete((r, e) -> { + // Ignore errors. + resFut.complete(in); }); + } + + return null; + }); + } + + @NotNull + private static List<CompletableFuture<Void>> unlockFragments(List<Transaction> txns, IgniteLogger log) { + List<CompletableFuture<Void>> waitCommitFuts = new ArrayList<>(); + + for (Transaction txn : txns) { + // ClientTransaction tx0 = (ClientTransaction) txn; TODO FIXME investigate error handling Review Comment: TODO without ticket ########## modules/client/src/main/java/org/apache/ignite/internal/client/table/ClientTable.java: ########## @@ -839,25 +868,202 @@ <E> CompletableFuture<List<E>> split( idx++; } - List<CompletableFuture<List<E>>> res = new ArrayList<>(aff.size()); - List<Batch<E>> batches = new ArrayList<>(); + CompletableFuture<List<E>> resFut = new CompletableFuture<>(); + mapAndRetry(fun, keys, txns, mapped, new long[1], resFut, log); + return resFut; + }); + } + + private static <R, E> void mapAndRetry( + MapFunction<E, R> mapFun, + @Nullable R initialValue, Reducer<R> reducer, + List<Transaction> txns, + Map<Integer, List<E>> mapped, + long[] startTs, + CompletableFuture<R> resFut, + IgniteLogger log + ) { + if (startTs[0] == 0) { + startTs[0] = System.nanoTime(); + } + + List<CompletableFuture<R>> res = new ArrayList<>(); + + for (Entry<Integer, List<E>> entry : mapped.entrySet()) { + res.add(mapFun.apply(entry.getValue(), PartitionAwarenessProvider.of(entry.getKey()), mapped.size() > 1)); + } + + CompletableFutures.allOf(res).handle((ignored, err) -> { + List<CompletableFuture<Void>> waitCommitFuts = List.of(); + if (!txns.isEmpty()) { + boolean allRetryableExceptions = true; + + for (int i = 0; i < res.size(); i++) { + CompletableFuture<R> fut0 = res.get(i); + if (fut0.isCompletedExceptionally()) { + try { + fut0.join(); + } catch (CompletionException e) { + allRetryableExceptions = ExceptionUtils.matchAny(unwrapCause(e), ACQUIRE_LOCK_ERR); + } + } + Transaction tx0 = txns.get(i); + tx0.rollbackAsync().whenComplete((r, e) -> { Review Comment: Looks like we roll back even if `err == null`? ########## modules/client/src/main/java/org/apache/ignite/internal/client/table/ClientTable.java: ########## @@ -762,16 +796,24 @@ private static <E> void reduceWithKeepOrder(List<E> agg, List<E> cur, List<Integ } } - <R, E> CompletableFuture<R> split( - Transaction tx, + <R, E> CompletableFuture<R> splitAndRun( Collection<E> keys, - BiFunction<Collection<E>, PartitionAwarenessProvider, CompletableFuture<R>> fun, + MapFunction<E, R> fun, @Nullable R initialValue, Reducer<R> reducer, BiFunction<ClientSchema, E, Integer> hashFunc ) { - assert tx != null; + return splitAndRun(keys, fun, initialValue, reducer, hashFunc, List.of()); Review Comment: Should we skip the splitting if there is only one (or too few) active client connections? Looks like splitting might reduce performance in that case. ########## modules/client/src/main/java/org/apache/ignite/internal/client/table/ClientTable.java: ########## @@ -839,25 +868,202 @@ <E> CompletableFuture<List<E>> split( idx++; } - List<CompletableFuture<List<E>>> res = new ArrayList<>(aff.size()); - List<Batch<E>> batches = new ArrayList<>(); + CompletableFuture<List<E>> resFut = new CompletableFuture<>(); + mapAndRetry(fun, keys, txns, mapped, new long[1], resFut, log); + return resFut; + }); + } + + private static <R, E> void mapAndRetry( + MapFunction<E, R> mapFun, + @Nullable R initialValue, Reducer<R> reducer, + List<Transaction> txns, + Map<Integer, List<E>> mapped, + long[] startTs, + CompletableFuture<R> resFut, + IgniteLogger log + ) { + if (startTs[0] == 0) { + startTs[0] = System.nanoTime(); + } + + List<CompletableFuture<R>> res = new ArrayList<>(); + + for (Entry<Integer, List<E>> entry : mapped.entrySet()) { + res.add(mapFun.apply(entry.getValue(), PartitionAwarenessProvider.of(entry.getKey()), mapped.size() > 1)); + } + + CompletableFutures.allOf(res).handle((ignored, err) -> { + List<CompletableFuture<Void>> waitCommitFuts = List.of(); + if (!txns.isEmpty()) { + boolean allRetryableExceptions = true; + + for (int i = 0; i < res.size(); i++) { + CompletableFuture<R> fut0 = res.get(i); + if (fut0.isCompletedExceptionally()) { + try { + fut0.join(); + } catch (CompletionException e) { + allRetryableExceptions = ExceptionUtils.matchAny(unwrapCause(e), ACQUIRE_LOCK_ERR); + } + } + Transaction tx0 = txns.get(i); + tx0.rollbackAsync().whenComplete((r, e) -> { + if (e != null) { + log.error("Failed to rollback a transactional batch: [tx=" + tx0 + ']', e); + } + }); + } + + if (err != null) { + // Check if we can retry. + long nowRelative = System.nanoTime(); + if (allRetryableExceptions && nowRelative - startTs[0] < DEFAULT_IMPLICIT_GET_ALL_TIMEOUT_NANOS) { + startTs[0] = nowRelative; + txns.clear(); // This collection is re-filled on next map attempt. + + mapAndRetry(mapFun, initialValue, reducer, txns, mapped, startTs, resFut, log); - for (Entry<Integer, Batch<E>> entry : mapped.entrySet()) { - res.add(fun.apply(entry.getValue().batch, PartitionAwarenessProvider.of(entry.getKey()))); - batches.add(entry.getValue()); + return null; } - return CompletableFuture.allOf(res.toArray(new CompletableFuture[0])).thenApply(ignored -> { - var in = new ArrayList<E>(Collections.nCopies(keys.size(), null)); + resFut.completeExceptionally(err); + + return null; + } + + waitCommitFuts = unlockFragments(txns, log); + } else { + if (err != null) { + resFut.completeExceptionally(err); + + return null; + } + } + + R in = initialValue; + + for (CompletableFuture<R> val : res) { + in = reducer.reduce(in, val.getNow(null)); + } + + if (waitCommitFuts.isEmpty()) { + resFut.complete(in); + } else { + R finalIn = in; + CompletableFutures.allOf(waitCommitFuts).whenComplete((r, e) -> { + // Ignore errors. + resFut.complete(finalIn); + }); + } - for (int i = 0; i < res.size(); i++) { - CompletableFuture<List<E>> f = res.get(i); - reduceWithKeepOrder(in, f.getNow(null), batches.get(i).originalIndices); + return null; + }); + } + + private static <E> void mapAndRetry( + MapFunction<E, List<E>> mapFun, + Collection<E> keys, + List<Transaction> txns, + Map<Integer, Batch<E>> mapped, + long[] startTs, + CompletableFuture<List<E>> resFut, + IgniteLogger log + ) { + if (startTs[0] == 0) { + startTs[0] = System.nanoTime(); + } + + List<CompletableFuture<List<E>>> res = new ArrayList<>(mapped.size()); + List<Batch<E>> batches = new ArrayList<>(); + + for (Entry<Integer, Batch<E>> entry : mapped.entrySet()) { + res.add(mapFun.apply(entry.getValue().batch, PartitionAwarenessProvider.of(entry.getKey()), mapped.size() > 1)); + batches.add(entry.getValue()); + } + + CompletableFutures.allOf(res).handle((ignored, err) -> { + // TODO remove copy paste Review Comment: TODO without ticket. -- This is an automated message from the Apache Git Service. To respond to the message, please log on to GitHub and use the URL above to go to the specific comment. To unsubscribe, e-mail: [email protected] For queries about this service, please contact Infrastructure at: [email protected]
