Hello,
I am using Hadoop 0.20.2 with the new mapreduce API. I have a class
that will be used as a value to mapper/reducer. I have implemented
Writable as follows, but I am not sure this is the right way. Could
someone please tell me how to do it when some ints, some int[] and
other proper objects like java.util.BitSet are involved?
All the examples I have seen deal with native Writables or primitives.
/**
*
*/
import java.io.DataInput;
import java.io.DataOutput;
import java.io.IOException;
import java.util.BitSet;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.io.ObjectWritable;
import org.apache.hadoop.io.Writable;
public class Data implements Writable
{
public static Configuration conf;
public BitSet b;
public BitSet rb;
public int[] c;
public int bNullPosition;
public int rbNullPosition;
/**
* writable must have empty constructor
*/
public Data()
{
b = new BitSet();
rb = new BitSet();
}
public Data(BitSet b, BitSet rb, int[] c, int bNullPosition,
int rbNullPositionl)
{
this.b = b;
this.rb = rb;
this.c = c;
this.bNullPosition = bNullPosition;
this.rbNullPosition = rbNullPosition;
}
@Override
public void readFields(DataInput in) throws IOException
{
ObjectWritable bWritable = new ObjectWritable(BitSet.class, b);
bWritable.readFields(in);
bWritable = new ObjectWritable(BitSet.class, rb);
bWritable.readFields(in);
bWritable = null;
c = (int[]) ObjectWritable.readObject(in, conf);
bNullPosition = in.readInt();
rbNullPosition = in.readInt();
}
@Override
public void write(DataOutput out) throws IOException
{
ObjectWritable bWritable = new ObjectWritable(BitSet.class, b);
bWritable.write(out);
bWritable = new ObjectWritable(BitSet.class, rb);
bWritable.write(out);
bWritable = null;
ObjectWritable.writeObject(out, c, int[].class, conf);
out.writeInt(bNullPosition);
out.writeInt(rbNullPosition);
}
}