Attached is a stand alone utility to test compression compatibility
before starting hbase:

# ./hbase org.apache.hadoop.hbase.test.HFileCompressionTest
Usage: HFileCompressionTest hdfs compression
  compression must be one of [ none, gz, lzo ]

# ./hbase org.apache.hadoop.hbase.test.HFileCompressionTest
hdfs://localhost:8020/ lzo
09/09/29 13:39:55 INFO lzo.GPLNativeCodeLoader: Loaded native gpl library
09/09/29 13:39:55 INFO lzo.LzoCodec: Successfully loaded & initialized
native-lzo library
09/09/29 13:39:55 INFO compress.CodecPool: Got brand-new compressor
OK

The utility creates a temporary HFile named '.hfile-comp-test' at the
specified HDFS path and outputs either 'OK' or 'FAILED'.


/*
 * Compression validation test.
 * [email protected]
 */
package org.apache.hadoop.hbase.test;

import java.net.URI;
import java.util.Map;

import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hdfs.DistributedFileSystem;
import org.apache.hadoop.security.UnixUserGroupInformation;
import org.apache.hadoop.hbase.io.hfile.HFile;
import org.apache.hadoop.hbase.util.Bytes;


public class HFileCompressionTest
{
	protected static Path path = new Path(".hfile-comp-test");

	public static void usage()
	{
		System.out.println("Usage: HFileCompressionTest hdfs compression");
		System.out.println("  compression must be one of [ none, gz, lzo ]");
		System.exit(0);
	}

	protected static DistributedFileSystem openConnection(String urlString)
		throws java.net.URISyntaxException, java.io.IOException
	{
		URI dfsUri = new URI(urlString);
		Configuration dfsConf = new Configuration();
		DistributedFileSystem dfs = new DistributedFileSystem();
		dfs.initialize(dfsUri, dfsConf);

		return dfs;
	}

	protected static boolean closeConnection(DistributedFileSystem dfs) 
	{
		if (dfs != null) 
		{
			try {
		        dfs.close();
		        dfs = null;
		    } catch (Exception e) {
		        e.printStackTrace();
		    }
		}

		return (dfs == null);
	}


    public static void main (String[] args)
    {
		if (args.length != 2)
			usage();
		try {
			DistributedFileSystem dfs = openConnection(
				   args[0]);

			dfs.delete(path, false);

			HFile.Writer writer = new HFile.Writer(dfs, path,
					HFile.DEFAULT_BLOCKSIZE, args[1], null);
			writer.append(Bytes.toBytes("testkey"), 
							Bytes.toBytes("testval"));
			writer.appendFileInfo(Bytes.toBytes("infokey"), 
							Bytes.toBytes("infoval"));
			writer.close();

			HFile.Reader reader = new HFile.Reader(dfs, path, null, false);
			Map<byte[],byte[]> fileInfo = reader.loadFileInfo();
			byte[] key = reader.getFirstKey();
			boolean rc = Bytes.toString(key).equals("testkey");
			reader.close();

			dfs.delete(path, false);
			closeConnection(dfs);

			if (rc)
			{
				System.out.println("OK");
				System.exit(0);
			}
		} catch (Exception e) {
			e.printStackTrace();
		}

		System.out.println("FAILED");
	}

}

Reply via email to