See my test case attached below. In my setup it usually fails around the 800th try...

import java.util.ArrayList;
import java.util.Arrays;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.Random;

import me.prettyprint.cassandra.service.CassandraClient;
import me.prettyprint.cassandra.service.CassandraClientPool;
import me.prettyprint.cassandra.service.CassandraClientPoolFactory;
import me.prettyprint.cassandra.service.Keyspace;

import org.apache.cassandra.thrift.Column;
import org.apache.cassandra.thrift.ColumnOrSuperColumn;
import org.apache.cassandra.thrift.ColumnParent;
import org.apache.cassandra.thrift.Mutation;
import org.apache.cassandra.thrift.SlicePredicate;
import org.apache.cassandra.thrift.SuperColumn;
import org.junit.Assert;
import org.junit.Test;

public final class ConsistencyTest
{
    private static String HOST = "localhost";
    private static int PORT = 9160;
    private static String KEYSPACE = "Keyspace1";
    private static String FAMILY = "Super1";
    private static String ROW_KEY = "key";
    private static byte[] SUPER_COLUMN = "super".getBytes();
    private static byte[] SUB_COLUMN = "sub".getBytes();

    private void write(CassandraClientPool aPool, byte[] aValue)
    throws Exception
    {
        CassandraClient client = aPool.borrowClient(HOST, PORT);
        final Keyspace keyspace = client.getKeyspace(KEYSPACE);

        final List<Column> columnList = new ArrayList<Column>();
columnList.add(new Column(SUB_COLUMN, aValue, keyspace.createTimestamp()));

final SuperColumn superColumn = new SuperColumn(SUPER_COLUMN, columnList);
        final ColumnOrSuperColumn cosc = new ColumnOrSuperColumn();
        cosc.setSuper_column(superColumn);

        final Mutation mutation = new Mutation();
        mutation.setColumn_or_supercolumn(cosc);

        final List<Mutation> mutations = new ArrayList<Mutation>();
        mutations.add(mutation);

        final Map<String,List<Mutation>> familyBatch =
            new HashMap<String,List<Mutation>>();
        familyBatch.put(FAMILY, mutations);

        final Map<String,Map<String,List<Mutation>>> batch =
            new HashMap<String,Map<String,List<Mutation>>>();
        batch.put(ROW_KEY, familyBatch);

        try
        {
            keyspace.batchMutate(batch);
            client = keyspace.getClient();
        }
        finally
        {
            aPool.releaseClient(client);
        }
    }

    private byte[] read(CassandraClientPool aPool)
    throws Exception
    {
        CassandraClient client = aPool.borrowClient(HOST, PORT);
        final Keyspace keyspace = client.getKeyspace(KEYSPACE);

        final List<byte[]> columnNames = new ArrayList<byte[]>();
        columnNames.add(SUPER_COLUMN);

        final SlicePredicate predicate = new SlicePredicate();
        predicate.setColumn_names(columnNames);

        final List<SuperColumn> result;
        try
        {
result = keyspace.getSuperSlice(ROW_KEY, new ColumnParent(FAMILY), predicate);
            client = keyspace.getClient();
        }
        finally
        {
            aPool.releaseClient(client);
        }

        // never mind the inefficiency
        for (SuperColumn superColumn : result)
        {
            for (Column column : superColumn.getColumns())
            {
                if (Arrays.equals(superColumn.getName(), SUPER_COLUMN)
&& Arrays.equals(column.getName(), SUB_COLUMN))
                {
                    return column.getValue();
                }
            }
        }
        return null;
    }

    @Test
    public void testConsistency()
    throws Exception
    {
final CassandraClientPool pool = CassandraClientPoolFactory.INSTANCE.get();

        for (int i = 0; (i < 1000); ++i)
        {
            final byte[] value = new byte[1];
            new Random().nextBytes(value);

            write(pool, value);
            final byte[] verify = read(pool);

Assert.assertArrayEquals("failed on attempt " + (i + 1), value, verify);
        }
    }
}

On 7/19/2010 9:26 PM, Ran Tavory wrote:
if your test case is correct then it sounds like a bug to me. With one node, unless you're writing with CL=0 you should get full consistency.

On Mon, Jul 19, 2010 at 10:14 PM, Hugo <h...@unitedgames.com <mailto:h...@unitedgames.com>> wrote:

    Hi,

    Being fairly new to Cassandra I have a question on the eventual
    consistency. I'm currently performing experiments with a
    single-node Cassandra system and a single client. In some of my
    tests I perform an update to an existing subcolumn in a row and
    subsequently read it back from the same thread. More often than
    not I get back the value I've written (and expected), but
    sometimes it can occur that I get back the old value of the
    subcolumn. Is this a bug or does it fall into the eventual
    consistency?

    I'm using Hector 0.6.0-14 on Cassandra 0.6.3 on a single disk,
    double-core Windows machine with a Sun 1.6 JVM. All reads and
    writes are quorum (the default), but I don't think this matters in
    my setup.

    Groets, Hugo.


Reply via email to