BulkRecordWriter throws NPE for counter columns Patch by goffinet and driftx, reviewed by Brandon Williams for CASSANDRA-3906
Project: http://git-wip-us.apache.org/repos/asf/cassandra/repo Commit: http://git-wip-us.apache.org/repos/asf/cassandra/commit/648e62e5 Tree: http://git-wip-us.apache.org/repos/asf/cassandra/tree/648e62e5 Diff: http://git-wip-us.apache.org/repos/asf/cassandra/diff/648e62e5 Branch: refs/heads/trunk Commit: 648e62e58eaea8dba261d81e9048b0ae64536518 Parents: d2c22a8 Author: Chris Goffinet <c...@chrisgoffinet.com> Authored: Tue Feb 14 09:12:33 2012 -0800 Committer: Chris Goffinet <c...@chrisgoffinet.com> Committed: Tue Feb 14 09:12:33 2012 -0800 ---------------------------------------------------------------------- CHANGES.txt | 2 +- .../apache/cassandra/hadoop/BulkRecordWriter.java | 15 ++++++++------- 2 files changed, 9 insertions(+), 8 deletions(-) ---------------------------------------------------------------------- http://git-wip-us.apache.org/repos/asf/cassandra/blob/648e62e5/CHANGES.txt ---------------------------------------------------------------------- diff --git a/CHANGES.txt b/CHANGES.txt index 10ff2be..9481f5e 100644 --- a/CHANGES.txt +++ b/CHANGES.txt @@ -75,7 +75,7 @@ parent CF (CASSANDRA-3877) * Finish cleanup up tombstone purge code (CASSANDRA-3872) * Avoid NPE on aboarted stream-out sessions (CASSANDRA-3904) - + * BulkRecordWriter throws NPE for counter columns (CASSANDRA-3906) 1.0.8 * fix race between cleanup and flush on secondary index CFSes (CASSANDRA-3712) http://git-wip-us.apache.org/repos/asf/cassandra/blob/648e62e5/src/java/org/apache/cassandra/hadoop/BulkRecordWriter.java ---------------------------------------------------------------------- diff --git a/src/java/org/apache/cassandra/hadoop/BulkRecordWriter.java b/src/java/org/apache/cassandra/hadoop/BulkRecordWriter.java index 9962f24..aded15e 100644 --- a/src/java/org/apache/cassandra/hadoop/BulkRecordWriter.java +++ b/src/java/org/apache/cassandra/hadoop/BulkRecordWriter.java @@ -143,13 +143,14 @@ implements org.apache.hadoop.mapred.RecordWriter<ByteBuffer,List<Mutation>> if (cfType == CFType.SUPER) { writer.newSuperColumn(mut.getColumn_or_supercolumn().getSuper_column().name); - for (Column column : mut.getColumn_or_supercolumn().getSuper_column().columns) + if (colType == ColType.COUNTER) + for (CounterColumn column : mut.getColumn_or_supercolumn().getCounter_super_column().columns) + writer.addCounterColumn(column.name, column.value); + else { - if (colType == ColType.COUNTER) - writer.addCounterColumn(column.name, column.value.getLong()); - else + for (Column column : mut.getColumn_or_supercolumn().getSuper_column().columns) { - if(0 == column.ttl) + if(column.ttl == 0) writer.addColumn(column.name, column.value, column.timestamp); else writer.addExpiringColumn(column.name, column.value, column.timestamp, column.ttl, System.currentTimeMillis() + (column.ttl * 1000)); @@ -159,10 +160,10 @@ implements org.apache.hadoop.mapred.RecordWriter<ByteBuffer,List<Mutation>> else { if (colType == ColType.COUNTER) - writer.addCounterColumn(mut.getColumn_or_supercolumn().column.name, mut.getColumn_or_supercolumn().column.value.getLong()); + writer.addCounterColumn(mut.getColumn_or_supercolumn().counter_column.name, mut.getColumn_or_supercolumn().counter_column.value); else { - if(0 == mut.getColumn_or_supercolumn().column.ttl) + if(mut.getColumn_or_supercolumn().column.ttl == 0) writer.addColumn(mut.getColumn_or_supercolumn().column.name, mut.getColumn_or_supercolumn().column.value, mut.getColumn_or_supercolumn().column.timestamp); else writer.addExpiringColumn(mut.getColumn_or_supercolumn().column.name, mut.getColumn_or_supercolumn().column.value, mut.getColumn_or_supercolumn().column.timestamp, mut.getColumn_or_supercolumn().column.ttl, System.currentTimeMillis() + (mut.getColumn_or_supercolumn().column.ttl * 1000));