[ 
https://issues.apache.org/jira/browse/CASSANDRA-18993?page=com.atlassian.jira.plugin.system.issuetabpanels:comment-tabpanel&focusedCommentId=17783633#comment-17783633
 ] 

Alex Petrov commented on CASSANDRA-18993:
-----------------------------------------

Repro:
{code:java}
    @Test
    public void silentDataLossTest() throws Throwable
    {
        try (Cluster cluster = builder().withNodes(1)
                                        .withConfig((cfg) -> {
                                            cfg.set("memtable_heap_space", 
"512MiB")
                                               .set("column_index_size", 
"1KiB");
                                        })
                                        .start())
        {
            cluster.schemaChange("CREATE KEYSPACE IF NOT EXISTS 
distributed_test_keyspace WITH replication = {'class': 'SimpleStrategy', 
'replication_factor': 1};");
            cluster.schemaChange("CREATE TABLE IF NOT EXISTS 
distributed_test_keyspace.sut (pk1 bigint,ck1 ascii,v1 ascii, PRIMARY KEY (pk1, 
ck1)) WITH  CLUSTERING ORDER BY (ck1 ASC);");
            cluster.schemaChange("CREATE TABLE IF NOT EXISTS 
distributed_test_keyspace.model (pk1 bigint,ck1 ascii,v1 ascii, PRIMARY KEY 
(pk1, ck1)) WITH  CLUSTERING ORDER BY (ck1 ASC);");
            for (String tbl : new String[]{ "model", "sut" })
            {
                cluster.coordinator(1).execute("INSERT INTO 
distributed_test_keyspace." + tbl + " (pk1,ck1,v1) VALUES (?, ?, ?) USING 
TIMESTAMP 2796;", QUORUM,
                                               -5095160963388022135L, 
"DPJCvLDEDZnaEbUFhUVaRwdFQDikZLmsxSdVBEqooUAlqDQlStFwlwzVfpdJQfkG5033", 
"JcMxGhPTaMHvCWHqbZudusDaZNvVSxVtitaCueFYlOEalFPuJsUkZzOVntcbWeVx473218153102427916835771671002269155241181613830359523821851961238681251252551261956820112990482292349610087946720815012725516116722810220384253253126155718111061170",
 -5095160963388022135L);
                cluster.coordinator(1).execute("INSERT INTO 
distributed_test_keyspace." + tbl + " (pk1,ck1) VALUES (?, ?) USING TIMESTAMP 
9673;", QUORUM,
                                               -5095160963388022135L, 
"FGTwdBRbABgDknItRjYSvxPXSdhqYCfuhxkTqYESsqPclKMGJAVldTHiQDikZLms101165501834286",
 -5095160963388022135L);
                cluster.coordinator(1).execute("INSERT INTO 
distributed_test_keyspace." + tbl + " (pk1,ck1) VALUES (?, ?) USING TIMESTAMP 
7070;", QUORUM,
                                               -5095160963388022135L, 
"EZsDzsPMgKaBtQMODPJCvLDEYDzpfKqYCmfLLyPPdvPbsXYqaIEGJuEpRQdJwEup15652246130252128225641091541411815737248488414116225117098",
 -5095160963388022135L);
                cluster.coordinator(1).execute("INSERT INTO 
distributed_test_keyspace." + tbl + " (pk1,ck1,v1) VALUES (?, ?, ?) USING 
TIMESTAMP 4095;", QUORUM,
                                               -5095160963388022135L, 
"HXuAukuhSFPNmxwcrjcnEtdvhUUEkSvygjEATtVzYDzpfKqYCgbmJQViaymGDgfW1652172243206180157137178201265235112108156237214",
 
"hxkTqYESCmfLLyPPTLDFowirgerzwhAVJktxaFwAGIzPsPrWoqaYHuFOygLwVHYl8365214226110261426220217513513712816915180102101365921121412719022516064163209173134217205239112",
 -5095160963388022135L);
                // comment this one to hit RT boundary bug
                cluster.coordinator(1).execute("INSERT INTO 
distributed_test_keyspace." + tbl + " (pk1,ck1) VALUES (?, ?) USING TIMESTAMP 
7938;", QUORUM,
                                               -5095160963388022135L, 
"XEFrgBnOLHahCNpPalrmgBCUHHruatGSisXHJxcYntcbWeVxNYJzEiFewKrXnFMQ722181162481551633514112160498813312419378157224137205892202551931861302023013682213024410422023448238221123510211024610918462",
 -5095160963388022135L);
                cluster.coordinator(1).execute("DELETE FROM 
distributed_test_keyspace." + tbl + " USING TIMESTAMP 3628 WHERE pk1 = ? AND 
ck1 > ? AND ck1 <= ?;", QUORUM,
                                               -5095160963388022135L, 
"EZsDzsPMgKaBtQMODPJCvLDEYDzpfKqYCmfLLyPPdvPbsXYqaIEGJuEpRQdJwEup15652246130252128225641091541411815737248488414116225117098",
 
"mTNkwIyBQdPlymmMfpdJQfkGsUdSCMlwdvPbsXYqAVokALUTYiqBDfKVctLULPli131882272341129768310816024018356781847012029115110118203905244179237513886822492892054258646145171179206125418068245148147179",
 -5095160963388022135L);
                cluster.coordinator(1).execute("DELETE FROM 
distributed_test_keyspace." + tbl + " USING TIMESTAMP 6463 WHERE pk1 = ? AND 
ck1 > ? AND ck1 <= ?;", QUORUM,
                                               -5095160963388022135L, 
"HXuAukuhSFPNmxwcrjcnEtdvhUUEkSvygjEATtVzYDzpfKqYCgbmJQViaymGDgfW1652172243206180157137178201265235112108156237214",
 
"dbqXpTyyKLcKgkLFwKrXnFMQYwRbGDWbGEebeOApZNvVSxVtqwyOnfYGchTAyMjm1971372042587121667129167712108815719922553824520512793206817056105245591888552341941222181",
 -5095160963388022135L);
            }
            cluster.get(1).nodetool("flush", "distributed_test_keyspace", 
"sut");
            Iterator<Object[]> iterSut = 
cluster.coordinator(1).executeWithPaging("SELECT * FROM 
distributed_test_keyspace.sut WHERE pk1 = ?;",
                                                                               
QUORUM, 1, -5095160963388022135L);
            Iterator<Object[]> iterModel = 
cluster.coordinator(1).executeWithPaging("SELECT * FROM 
distributed_test_keyspace.model WHERE pk1 = ?;",
                                                                               
QUORUM, 1, -5095160963388022135L);
            while (iterSut.hasNext() && iterModel.hasNext())
                Assert.assertTrue(Arrays.equals(iterModel.next(), 
iterSut.next()));            Assert.assertEquals(iterModel.hasNext(), 
iterSut.hasNext());
        }
    } {code}

> Harry-found silent data loss issue
> ----------------------------------
>
>                 Key: CASSANDRA-18993
>                 URL: https://issues.apache.org/jira/browse/CASSANDRA-18993
>             Project: Cassandra
>          Issue Type: Bug
>          Components: Legacy/Core
>            Reporter: Alex Petrov
>            Assignee: Jacek Lewandowski
>            Priority: Normal
>             Fix For: 4.1.x, 5.0-beta, 5.x
>
>
> Harry has discovered a silent data loss bug in trunk, but it goes all the way 
> back to appx 4.1.
> Some rows are not visble after flush. Compared Harry repro running a memtable 
> instead of a regular Harry model, and it still reproduces (in other words, it 
> is almost certainly not a Harry issue).
> Simple schema, and only one flush is required for repro, so not an unlikely 
> bug. Max partition size is 1k rows, so not an unlikely setup here, either. No 
> concurrency involved; reproduces stably. Good chance this is related to the 
> fact the schema has DESC clusterings.
> I am working on posting a Harry branch that stably reproduces it.
>  



--
This message was sent by Atlassian Jira
(v8.20.10#820010)

---------------------------------------------------------------------
To unsubscribe, e-mail: commits-unsubscr...@cassandra.apache.org
For additional commands, e-mail: commits-h...@cassandra.apache.org

Reply via email to