Merge branch '1.6' into 1.7

Conflicts:
        core/src/main/java/org/apache/accumulo/core/util/Merge.java


Project: http://git-wip-us.apache.org/repos/asf/accumulo/repo
Commit: http://git-wip-us.apache.org/repos/asf/accumulo/commit/84d3e5c4
Tree: http://git-wip-us.apache.org/repos/asf/accumulo/tree/84d3e5c4
Diff: http://git-wip-us.apache.org/repos/asf/accumulo/diff/84d3e5c4

Branch: refs/heads/master
Commit: 84d3e5c4fa3064b644651d9ab958cb499f02c751
Parents: 6cc5ab5 c462001
Author: Christopher Tubbs <ctubb...@apache.org>
Authored: Tue Feb 2 19:19:26 2016 -0500
Committer: Christopher Tubbs <ctubb...@apache.org>
Committed: Tue Feb 2 19:19:26 2016 -0500

----------------------------------------------------------------------
 .../core/metadata/schema/DataFileValue.java     | 18 ++++++++++++---
 .../org/apache/accumulo/core/util/Merge.java    |  8 ++-----
 .../apache/accumulo/server/init/Initialize.java |  8 ++++---
 .../constraints/MetadataConstraintsTest.java    | 17 +++++++-------
 .../iterators/MetadataBulkLoadFilterTest.java   |  5 ++--
 .../apache/accumulo/server/util/CloneTest.java  | 24 ++++++++++++++------
 .../metadata/MetadataBatchScanTest.java         |  3 ++-
 7 files changed, 53 insertions(+), 30 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/accumulo/blob/84d3e5c4/core/src/main/java/org/apache/accumulo/core/metadata/schema/DataFileValue.java
----------------------------------------------------------------------
diff --cc 
core/src/main/java/org/apache/accumulo/core/metadata/schema/DataFileValue.java
index cebe041,dfa1114..5f1379d
--- 
a/core/src/main/java/org/apache/accumulo/core/metadata/schema/DataFileValue.java
+++ 
b/core/src/main/java/org/apache/accumulo/core/metadata/schema/DataFileValue.java
@@@ -16,6 -16,10 +16,10 @@@
   */
  package org.apache.accumulo.core.metadata.schema;
  
 -import static com.google.common.base.Charsets.UTF_8;
++import static java.nio.charset.StandardCharsets.UTF_8;
+ 
+ import org.apache.accumulo.core.data.Value;
+ 
  public class DataFileValue {
    private long size;
    private long numEntries;

http://git-wip-us.apache.org/repos/asf/accumulo/blob/84d3e5c4/core/src/main/java/org/apache/accumulo/core/util/Merge.java
----------------------------------------------------------------------
diff --cc core/src/main/java/org/apache/accumulo/core/util/Merge.java
index 3c60d25,c5e3e8e..9f6f6ab
--- a/core/src/main/java/org/apache/accumulo/core/util/Merge.java
+++ b/core/src/main/java/org/apache/accumulo/core/util/Merge.java
@@@ -31,9 -29,10 +29,10 @@@ import org.apache.accumulo.core.conf.Ac
  import org.apache.accumulo.core.conf.ConfigurationCopy;
  import org.apache.accumulo.core.conf.Property;
  import org.apache.accumulo.core.data.Key;
 -import org.apache.accumulo.core.data.KeyExtent;
  import org.apache.accumulo.core.data.Value;
 +import org.apache.accumulo.core.data.impl.KeyExtent;
  import org.apache.accumulo.core.metadata.MetadataTable;
+ import org.apache.accumulo.core.metadata.schema.DataFileValue;
  import org.apache.accumulo.core.metadata.schema.MetadataSchema.TabletsSection;
  import 
org.apache.accumulo.core.metadata.schema.MetadataSchema.TabletsSection.DataFileColumnFamily;
  import org.apache.accumulo.core.security.Authorizations;

http://git-wip-us.apache.org/repos/asf/accumulo/blob/84d3e5c4/server/base/src/main/java/org/apache/accumulo/server/init/Initialize.java
----------------------------------------------------------------------
diff --cc 
server/base/src/main/java/org/apache/accumulo/server/init/Initialize.java
index 9b9dac9,491dc25..4e5864e
--- a/server/base/src/main/java/org/apache/accumulo/server/init/Initialize.java
+++ b/server/base/src/main/java/org/apache/accumulo/server/init/Initialize.java
@@@ -30,12 -27,9 +30,10 @@@ import java.util.HashMap
  import java.util.HashSet;
  import java.util.Locale;
  import java.util.Map.Entry;
 +import java.util.Set;
 +import java.util.TreeMap;
  import java.util.UUID;
  
--import jline.console.ConsoleReader;
- 
  import org.apache.accumulo.core.Constants;
  import org.apache.accumulo.core.cli.Help;
  import org.apache.accumulo.core.client.AccumuloSecurityException;
@@@ -58,21 -48,9 +56,22 @@@ import org.apache.accumulo.core.master.
  import org.apache.accumulo.core.master.thrift.MasterGoalState;
  import org.apache.accumulo.core.metadata.MetadataTable;
  import org.apache.accumulo.core.metadata.RootTable;
++import org.apache.accumulo.core.metadata.schema.DataFileValue;
 +import 
org.apache.accumulo.core.metadata.schema.MetadataSchema.ReplicationSection;
  import org.apache.accumulo.core.metadata.schema.MetadataSchema.TabletsSection;
 +import 
org.apache.accumulo.core.metadata.schema.MetadataSchema.TabletsSection.CurrentLocationColumnFamily;
 +import 
org.apache.accumulo.core.metadata.schema.MetadataSchema.TabletsSection.DataFileColumnFamily;
 +import 
org.apache.accumulo.core.metadata.schema.MetadataSchema.TabletsSection.FutureLocationColumnFamily;
 +import 
org.apache.accumulo.core.metadata.schema.MetadataSchema.TabletsSection.LogColumnFamily;
 +import 
org.apache.accumulo.core.metadata.schema.MetadataSchema.TabletsSection.ServerColumnFamily;
 +import 
org.apache.accumulo.core.metadata.schema.MetadataSchema.TabletsSection.TabletColumnFamily;
 +import org.apache.accumulo.core.replication.ReplicationConstants;
 +import org.apache.accumulo.core.replication.ReplicationSchema.StatusSection;
 +import org.apache.accumulo.core.replication.ReplicationSchema.WorkSection;
 +import org.apache.accumulo.core.replication.ReplicationTable;
  import org.apache.accumulo.core.util.CachedConfiguration;
  import org.apache.accumulo.core.util.ColumnFQ;
 +import org.apache.accumulo.core.util.LocalityGroupUtil;
  import org.apache.accumulo.core.util.Pair;
  import org.apache.accumulo.core.volume.VolumeConfiguration;
  import org.apache.accumulo.core.zookeeper.ZooUtil;
@@@ -104,17 -77,12 +103,19 @@@ import org.apache.hadoop.fs.FileStatus
  import org.apache.hadoop.fs.FileSystem;
  import org.apache.hadoop.fs.Path;
  import org.apache.hadoop.io.Text;
 -import org.apache.log4j.Logger;
 +import org.apache.hadoop.security.UserGroupInformation;
  import org.apache.zookeeper.KeeperException;
  import org.apache.zookeeper.ZooDefs.Ids;
 +import org.slf4j.Logger;
 +import org.slf4j.LoggerFactory;
  
  import com.beust.jcommander.Parameter;
 +import com.google.auto.service.AutoService;
 +import com.google.common.base.Joiner;
 +import com.google.common.base.Optional;
 +
++import jline.console.ConsoleReader;
+ 
  /**
   * This class is used to setup the directory structure and the root tablet to 
get an instance started
   *
@@@ -154,8 -121,7 +155,9 @@@ public class Initialize implements Keyw
    }
  
    private static HashMap<String,String> initialMetadataConf = new 
HashMap<String,String>();
 +  private static HashMap<String,String> initialMetadataCombinerConf = new 
HashMap<String,String>();
 +  private static HashMap<String,String> initialReplicationTableConf = new 
HashMap<String,String>();
+ 
    static {
      
initialMetadataConf.put(Property.TABLE_FILE_COMPRESSED_BLOCK_SIZE.getKey(), 
"32K");
      initialMetadataConf.put(Property.TABLE_FILE_REPLICATION.getKey(), "5");
@@@ -472,19 -342,16 +474,19 @@@
      tabletWriter.close();
    }
  
 -  private static void createEntriesForTablet(FileSKVWriter writer, String 
tableId, String tabletDir, Text tabletPrevEndRow, Text tabletEndRow)
 -      throws IOException {
 -    Text extent = new Text(KeyExtent.getMetadataEntry(new Text(tableId), 
tabletEndRow));
 -    addEntry(writer, extent, DIRECTORY_COLUMN, new 
Value(tabletDir.getBytes(UTF_8)));
 -    addEntry(writer, extent, TIME_COLUMN, new 
Value((TabletTime.LOGICAL_TIME_ID + "0").getBytes(UTF_8)));
 -    addEntry(writer, extent, PREV_ROW_COLUMN, 
KeyExtent.encodePrevEndRow(tabletPrevEndRow));
 +  private static void createEntriesForTablet(TreeMap<Key,Value> map, Tablet 
tablet) {
-     Value EMPTY_SIZE = new Value("0,0".getBytes(UTF_8));
++    Value EMPTY_SIZE = new DataFileValue(0, 0).encodeAsValue();
 +    Text extent = new Text(KeyExtent.getMetadataEntry(new 
Text(tablet.tableId), tablet.endRow));
 +    addEntry(map, extent, DIRECTORY_COLUMN, new 
Value(tablet.dir.getBytes(UTF_8)));
 +    addEntry(map, extent, TIME_COLUMN, new Value((TabletTime.LOGICAL_TIME_ID 
+ "0").getBytes(UTF_8)));
 +    addEntry(map, extent, PREV_ROW_COLUMN, 
KeyExtent.encodePrevEndRow(tablet.prevEndRow));
 +    for (String file : tablet.files) {
 +      addEntry(map, extent, new ColumnFQ(DataFileColumnFamily.NAME, new 
Text(file)), EMPTY_SIZE);
 +    }
    }
  
 -  private static void addEntry(FileSKVWriter writer, Text row, ColumnFQ col, 
Value value) throws IOException {
 -    writer.append(new Key(row, col.getColumnFamily(), 
col.getColumnQualifier(), 0), value);
 +  private static void addEntry(TreeMap<Key,Value> map, Text row, ColumnFQ 
col, Value value) {
 +    map.put(new Key(row, col.getColumnFamily(), col.getColumnQualifier(), 0), 
value);
    }
  
    private static void createDirectories(VolumeManager fs, String... dirs) 
throws IOException {

http://git-wip-us.apache.org/repos/asf/accumulo/blob/84d3e5c4/server/base/src/test/java/org/apache/accumulo/server/constraints/MetadataConstraintsTest.java
----------------------------------------------------------------------
diff --cc 
server/base/src/test/java/org/apache/accumulo/server/constraints/MetadataConstraintsTest.java
index 7b6eec2,7b6eec2..61167fd
--- 
a/server/base/src/test/java/org/apache/accumulo/server/constraints/MetadataConstraintsTest.java
+++ 
b/server/base/src/test/java/org/apache/accumulo/server/constraints/MetadataConstraintsTest.java
@@@ -25,6 -25,6 +25,7 @@@ import java.util.List
  import org.apache.accumulo.core.conf.AccumuloConfiguration;
  import org.apache.accumulo.core.data.Mutation;
  import org.apache.accumulo.core.data.Value;
++import org.apache.accumulo.core.metadata.schema.DataFileValue;
  import org.apache.accumulo.core.metadata.schema.MetadataSchema.TabletsSection;
  import 
org.apache.accumulo.core.metadata.schema.MetadataSchema.TabletsSection.DataFileColumnFamily;
  import org.apache.accumulo.fate.zookeeper.TransactionWatcher.Arbitrator;
@@@ -140,7 -140,7 +141,7 @@@ public class MetadataConstraintsTest 
      // inactive txid
      m = new Mutation(new Text("0;foo"));
      m.put(TabletsSection.BulkFileColumnFamily.NAME, new Text("/someFile"), 
new Value("12345".getBytes()));
--    m.put(DataFileColumnFamily.NAME, new Text("/someFile"), new 
Value("1,1".getBytes()));
++    m.put(DataFileColumnFamily.NAME, new Text("/someFile"), new 
DataFileValue(1, 1).encodeAsValue());
      violations = mc.check(null, m);
      assertNotNull(violations);
      assertEquals(1, violations.size());
@@@ -149,7 -149,7 +150,7 @@@
      // txid that throws exception
      m = new Mutation(new Text("0;foo"));
      m.put(TabletsSection.BulkFileColumnFamily.NAME, new Text("/someFile"), 
new Value("9".getBytes()));
--    m.put(DataFileColumnFamily.NAME, new Text("/someFile"), new 
Value("1,1".getBytes()));
++    m.put(DataFileColumnFamily.NAME, new Text("/someFile"), new 
DataFileValue(1, 1).encodeAsValue());
      violations = mc.check(null, m);
      assertNotNull(violations);
      assertEquals(1, violations.size());
@@@ -158,7 -158,7 +159,7 @@@
      // active txid w/ file
      m = new Mutation(new Text("0;foo"));
      m.put(TabletsSection.BulkFileColumnFamily.NAME, new Text("/someFile"), 
new Value("5".getBytes()));
--    m.put(DataFileColumnFamily.NAME, new Text("/someFile"), new 
Value("1,1".getBytes()));
++    m.put(DataFileColumnFamily.NAME, new Text("/someFile"), new 
DataFileValue(1, 1).encodeAsValue());
      violations = mc.check(null, m);
      assertNull(violations);
  
@@@ -173,9 -173,9 +174,9 @@@
      // two active txids w/ files
      m = new Mutation(new Text("0;foo"));
      m.put(TabletsSection.BulkFileColumnFamily.NAME, new Text("/someFile"), 
new Value("5".getBytes()));
--    m.put(DataFileColumnFamily.NAME, new Text("/someFile"), new 
Value("1,1".getBytes()));
++    m.put(DataFileColumnFamily.NAME, new Text("/someFile"), new 
DataFileValue(1, 1).encodeAsValue());
      m.put(TabletsSection.BulkFileColumnFamily.NAME, new Text("/someFile2"), 
new Value("7".getBytes()));
--    m.put(DataFileColumnFamily.NAME, new Text("/someFile2"), new 
Value("1,1".getBytes()));
++    m.put(DataFileColumnFamily.NAME, new Text("/someFile2"), new 
DataFileValue(1, 1).encodeAsValue());
      violations = mc.check(null, m);
      assertNotNull(violations);
      assertEquals(1, violations.size());
@@@ -184,16 -184,16 +185,16 @@@
      // two files w/ one active txid
      m = new Mutation(new Text("0;foo"));
      m.put(TabletsSection.BulkFileColumnFamily.NAME, new Text("/someFile"), 
new Value("5".getBytes()));
--    m.put(DataFileColumnFamily.NAME, new Text("/someFile"), new 
Value("1,1".getBytes()));
++    m.put(DataFileColumnFamily.NAME, new Text("/someFile"), new 
DataFileValue(1, 1).encodeAsValue());
      m.put(TabletsSection.BulkFileColumnFamily.NAME, new Text("/someFile2"), 
new Value("5".getBytes()));
--    m.put(DataFileColumnFamily.NAME, new Text("/someFile2"), new 
Value("1,1".getBytes()));
++    m.put(DataFileColumnFamily.NAME, new Text("/someFile2"), new 
DataFileValue(1, 1).encodeAsValue());
      violations = mc.check(null, m);
      assertNull(violations);
  
      // two loaded w/ one active txid and one file
      m = new Mutation(new Text("0;foo"));
      m.put(TabletsSection.BulkFileColumnFamily.NAME, new Text("/someFile"), 
new Value("5".getBytes()));
--    m.put(DataFileColumnFamily.NAME, new Text("/someFile"), new 
Value("1,1".getBytes()));
++    m.put(DataFileColumnFamily.NAME, new Text("/someFile"), new 
DataFileValue(1, 1).encodeAsValue());
      m.put(TabletsSection.BulkFileColumnFamily.NAME, new Text("/someFile2"), 
new Value("5".getBytes()));
      violations = mc.check(null, m);
      assertNotNull(violations);

http://git-wip-us.apache.org/repos/asf/accumulo/blob/84d3e5c4/server/base/src/test/java/org/apache/accumulo/server/iterators/MetadataBulkLoadFilterTest.java
----------------------------------------------------------------------
diff --cc 
server/base/src/test/java/org/apache/accumulo/server/iterators/MetadataBulkLoadFilterTest.java
index 7e9543f,cfdd5e9..ed662a5
--- 
a/server/base/src/test/java/org/apache/accumulo/server/iterators/MetadataBulkLoadFilterTest.java
+++ 
b/server/base/src/test/java/org/apache/accumulo/server/iterators/MetadataBulkLoadFilterTest.java
@@@ -30,9 -30,8 +30,10 @@@ import org.apache.accumulo.core.iterato
  import org.apache.accumulo.core.iterators.IteratorUtil.IteratorScope;
  import org.apache.accumulo.core.iterators.SortedKeyValueIterator;
  import org.apache.accumulo.core.iterators.SortedMapIterator;
++import org.apache.accumulo.core.metadata.schema.DataFileValue;
  import org.apache.accumulo.core.metadata.schema.MetadataSchema.TabletsSection;
  import 
org.apache.accumulo.core.metadata.schema.MetadataSchema.TabletsSection.DataFileColumnFamily;
 +import org.apache.accumulo.core.security.Authorizations;
  import org.apache.accumulo.core.util.ColumnFQ;
  import org.apache.accumulo.fate.zookeeper.TransactionWatcher.Arbitrator;
  import org.apache.hadoop.io.Text;
@@@ -85,12 -84,12 +86,12 @@@ public class MetadataBulkLoadFilterTes
  
      // following should not be deleted by filter
      put(tm1, "2;m", TabletsSection.ServerColumnFamily.DIRECTORY_COLUMN, 
"/t1");
--    put(tm1, "2;m", DataFileColumnFamily.NAME, "/t1/file1", "1,1");
++    put(tm1, "2;m", DataFileColumnFamily.NAME, "/t1/file1", new 
DataFileValue(1, 1).encodeAsString());
      put(tm1, "2;m", TabletsSection.BulkFileColumnFamily.NAME, "/t1/file1", 
"5");
      put(tm1, "2;m", TabletsSection.BulkFileColumnFamily.NAME, "/t1/file3", 
"7");
      put(tm1, "2;m", TabletsSection.BulkFileColumnFamily.NAME, "/t1/file4", 
"9");
      put(tm1, "2<", TabletsSection.ServerColumnFamily.DIRECTORY_COLUMN, "/t2");
--    put(tm1, "2<", DataFileColumnFamily.NAME, "/t2/file2", "1,1");
++    put(tm1, "2<", DataFileColumnFamily.NAME, "/t2/file2", new 
DataFileValue(1, 1).encodeAsString());
      put(tm1, "2<", TabletsSection.BulkFileColumnFamily.NAME, "/t2/file6", 
"5");
      put(tm1, "2<", TabletsSection.BulkFileColumnFamily.NAME, "/t2/file7", 
"7");
      put(tm1, "2<", TabletsSection.BulkFileColumnFamily.NAME, "/t2/file8", 
"9");

http://git-wip-us.apache.org/repos/asf/accumulo/blob/84d3e5c4/server/base/src/test/java/org/apache/accumulo/server/util/CloneTest.java
----------------------------------------------------------------------
diff --cc 
server/base/src/test/java/org/apache/accumulo/server/util/CloneTest.java
index 0c696a1,74d5e64..9d33935
--- a/server/base/src/test/java/org/apache/accumulo/server/util/CloneTest.java
+++ b/server/base/src/test/java/org/apache/accumulo/server/util/CloneTest.java
@@@ -28,10 -29,11 +29,11 @@@ import org.apache.accumulo.core.client.
  import org.apache.accumulo.core.client.mock.MockInstance;
  import org.apache.accumulo.core.client.security.tokens.PasswordToken;
  import org.apache.accumulo.core.data.Key;
 -import org.apache.accumulo.core.data.KeyExtent;
  import org.apache.accumulo.core.data.Mutation;
  import org.apache.accumulo.core.data.Value;
 +import org.apache.accumulo.core.data.impl.KeyExtent;
  import org.apache.accumulo.core.metadata.MetadataTable;
+ import org.apache.accumulo.core.metadata.schema.DataFileValue;
  import org.apache.accumulo.core.metadata.schema.MetadataSchema.TabletsSection;
  import 
org.apache.accumulo.core.metadata.schema.MetadataSchema.TabletsSection.DataFileColumnFamily;
  import org.apache.accumulo.core.security.Authorizations;

http://git-wip-us.apache.org/repos/asf/accumulo/blob/84d3e5c4/test/src/main/java/org/apache/accumulo/test/performance/metadata/MetadataBatchScanTest.java
----------------------------------------------------------------------
diff --cc 
test/src/main/java/org/apache/accumulo/test/performance/metadata/MetadataBatchScanTest.java
index 620ae8b,07bfdc7..1c7ce67
--- 
a/test/src/main/java/org/apache/accumulo/test/performance/metadata/MetadataBatchScanTest.java
+++ 
b/test/src/main/java/org/apache/accumulo/test/performance/metadata/MetadataBatchScanTest.java
@@@ -37,8 -36,10 +37,9 @@@ import org.apache.accumulo.core.client.
  import org.apache.accumulo.core.data.Mutation;
  import org.apache.accumulo.core.data.Range;
  import org.apache.accumulo.core.data.Value;
 +import org.apache.accumulo.core.data.impl.KeyExtent;
  import org.apache.accumulo.core.metadata.MetadataTable;
+ import org.apache.accumulo.core.metadata.schema.DataFileValue;
  import org.apache.accumulo.core.metadata.schema.MetadataSchema.TabletsSection;
  import 
org.apache.accumulo.core.metadata.schema.MetadataSchema.TabletsSection.DataFileColumnFamily;
  import org.apache.accumulo.core.security.Authorizations;

Reply via email to