ptupitsyn commented on code in PR #7449:
URL: https://github.com/apache/ignite-3/pull/7449#discussion_r2711944800
##########
modules/client/src/main/java/org/apache/ignite/internal/client/tx/ClientTransaction.java:
##########
@@ -315,6 +315,9 @@ private void packEnlisted(PayloadOutputChannel w) {
if (cnt > 0) {
w.out().packLong(tracker.get().longValue());
}
+
+ // Send information about directly mapped writes to ensure a proper
cleanup algorithm is choosed.
Review Comment:
```suggestion
// Send information about directly mapped writes to ensure a proper
cleanup algorithm is chosen.
```
##########
modules/runner/src/integrationTest/java/org/apache/ignite/internal/client/ItClientDirectMappingTest.java:
##########
@@ -53,66 +78,104 @@ public class ItClientDirectMappingTest extends
ClusterPerTestIntegrationTest {
+ " failureHandler.dumpThreadsOnFailure: false\n"
+ "}";
- @BeforeEach
- public void setup() throws Exception {
- String zoneSql = "create zone test_zone with partitions=5, replicas=1,
storage_profiles='" + DEFAULT_AIPERSIST_PROFILE_NAME + "'";
- String sql = "create table " + TABLE_NAME + " (key int primary key,
val varchar(20)) zone TEST_ZONE";
+ @BeforeAll
+ public static void setup() throws Exception {
+ String zoneSql = "create zone " + ZONE_NAME + " with partitions=" +
PARTITIONS + ", replicas=2, storage_profiles='"
+ + DEFAULT_AIPERSIST_PROFILE_NAME + "'";
+ String sql = "create table " + TABLE_NAME + " (key int primary key,
val varchar(20)) zone " + ZONE_NAME;
+ String zoneSql2 = "create zone " + ZONE_NAME2 + " with partitions=" +
PARTITIONS + ", replicas=2, storage_profiles='"
+ + DEFAULT_AIPERSIST_PROFILE_NAME + "'";
+ String sql2 = "create table " + TABLE_NAME_2 + " (key int primary key,
val varchar(20)) zone " + ZONE_NAME2;
- cluster.doInSession(0, session -> {
+ CLUSTER.doInSession(0, session -> {
executeUpdate(zoneSql, session);
executeUpdate(sql, session);
+ executeUpdate(zoneSql2, session);
+ executeUpdate(sql2, session);
});
}
- @Override
- protected void customizeInitParameters(InitParametersBuilder builder) {
- super.customizeInitParameters(builder);
-
- builder.clusterConfiguration("ignite {"
- + " transaction: {"
- + " readOnlyTimeoutMillis: 30000,"
- + " readWriteTimeoutMillis: 30000"
- + " },"
- + " replication: {"
- + " rpcTimeoutMillis: 30000"
- + " },"
- + "}");
- }
-
- @Test
- public void testBasicImplicit() {
- try (IgniteClient client0 = clientConnectedToNode(0)) {
- ClientTable table = (ClientTable)
client0.tables().table(TABLE_NAME);
- KeyValueView<Tuple, Tuple> kvView = table.keyValueView();
-
- Tuple key = Tuple.create().set("key", 0);
- Tuple val = Tuple.create().set("val", "test0");
-
- kvView.put(null, key, val);
- Tuple val0 = kvView.get(null, key);
- assertTrue(Tuple.equals(val, val0));
+ @ParameterizedTest
+ @ValueSource(booleans = {true, false})
+ public void testReadOnCoordinatorWithDirectWrite(boolean commit) {
+ try (IgniteClient client = clientConnectedToAllNodes()) {
+ Table table1 = client.tables().table(TABLE_NAME);
+ KeyValueView<Tuple, Tuple> view1 = table1.keyValueView();
+ Table table2 = client.tables().table(TABLE_NAME_2);
+ KeyValueView<Tuple, Tuple> view2 = table2.keyValueView();
+
+ Map<Partition, ClusterNode> map1 =
table1.partitionDistribution().primaryReplicasAsync().join();
+ Map<Integer, ClusterNode> mapPartById1 =
map1.entrySet().stream().collect(Collectors.toMap(
+ entry -> Math.toIntExact(entry.getKey().id()),
+ Entry::getValue
+ ));
+
+ Map<Partition, ClusterNode> map2 =
table2.partitionDistribution().primaryReplicasAsync().join();
+ Map<Integer, ClusterNode> mapPartById2 =
map2.entrySet().stream().collect(Collectors.toMap(
+ entry -> Math.toIntExact(entry.getKey().id()),
+ Entry::getValue
+ ));
+
+ // Find a partition which mapped to different primaries.
+ int targetPart = -1;
+
+ for (int i = 0; i < PARTITIONS; i++) {
+ ClusterNode node1 = mapPartById1.get(i);
+ ClusterNode node2 = mapPartById2.get(i);
+
+ if (!node1.equals(node2)) {
+ targetPart = i;
+ break;
+ }
+ }
+
+ if (targetPart == -1) {
+ log.warn("Skipping test due to bad assignment");
+ return;
+ }
+
+ log.info("DBG: using partition " + targetPart);
Review Comment:
Level is info, but message has DBG?
--
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.
To unsubscribe, e-mail: [email protected]
For queries about this service, please contact Infrastructure at:
[email protected]