Repository: phoenix
Updated Branches:
  refs/heads/4.x-HBase-1.1 d77bad2b3 -> 9194a00a9


PHOENIX-3609 Detect and fix corrupted local index region during compaction


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/9194a00a
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/9194a00a
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/9194a00a

Branch: refs/heads/4.x-HBase-1.1
Commit: 9194a00a9743cc36dc17f58a45148e048e9de7a3
Parents: d77bad2
Author: Ankit Singhal <ankitsingha...@gmail.com>
Authored: Mon Feb 6 13:16:13 2017 +0530
Committer: Ankit Singhal <ankitsingha...@gmail.com>
Committed: Mon Feb 6 13:16:13 2017 +0530

----------------------------------------------------------------------
 .../phoenix/end2end/index/LocalIndexIT.java     | 79 ++++++++++++++++++
 .../DataTableLocalIndexRegionScanner.java       | 86 ++++++++++++++++++++
 .../IndexHalfStoreFileReaderGenerator.java      | 74 +++++++++++++++--
 .../coprocessor/MetaDataRegionObserver.java     | 30 ++++---
 .../org/apache/phoenix/hbase/index/Indexer.java |  3 +-
 .../apache/phoenix/schema/MetaDataClient.java   |  1 +
 .../java/org/apache/phoenix/util/IndexUtil.java |  5 ++
 .../org/apache/phoenix/util/MetaDataUtil.java   |  8 ++
 .../org/apache/phoenix/util/RepairUtil.java     | 40 +++++++++
 .../org/apache/phoenix/util/SchemaUtil.java     |  2 +-
 10 files changed, 307 insertions(+), 21 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/phoenix/blob/9194a00a/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/LocalIndexIT.java
----------------------------------------------------------------------
diff --git 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/LocalIndexIT.java 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/LocalIndexIT.java
index 278f4cf..f5135d9 100644
--- 
a/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/LocalIndexIT.java
+++ 
b/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/LocalIndexIT.java
@@ -26,22 +26,32 @@ import static org.junit.Assert.assertNotNull;
 import static org.junit.Assert.assertTrue;
 import static org.junit.Assert.fail;
 
+import java.io.IOException;
 import java.sql.Connection;
 import java.sql.DriverManager;
 import java.sql.ResultSet;
 import java.sql.SQLException;
 import java.sql.Statement;
 import java.util.Collection;
+import java.util.Iterator;
+import java.util.List;
 
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.FileUtil;
+import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hbase.HColumnDescriptor;
+import org.apache.hadoop.hbase.HRegionInfo;
 import org.apache.hadoop.hbase.HTableDescriptor;
 import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.client.HBaseAdmin;
 import org.apache.hadoop.hbase.client.HTable;
+import org.apache.hadoop.hbase.client.HTableInterface;
 import org.apache.hadoop.hbase.client.Result;
 import org.apache.hadoop.hbase.client.ResultScanner;
 import org.apache.hadoop.hbase.client.Scan;
 import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.hbase.util.FSUtils;
 import org.apache.hadoop.hbase.util.Pair;
 import org.apache.phoenix.compile.QueryPlan;
 import org.apache.phoenix.coprocessor.BaseScannerRegionObserver;
@@ -563,5 +573,74 @@ public class LocalIndexIT extends BaseLocalIndexIT {
             conn1.close();
         }
     }
+    
+    @Test
+    public void testLocalIndexAutomaticRepair() throws Exception {
+        if (isNamespaceMapped) { return; }
+        PhoenixConnection conn = 
DriverManager.getConnection(getUrl()).unwrap(PhoenixConnection.class);
+        try (HTableInterface metaTable = 
conn.getQueryServices().getTable(TableName.META_TABLE_NAME.getName());
+                HBaseAdmin admin = conn.getQueryServices().getAdmin();) {
+            Statement statement = conn.createStatement();
+            final String tableName = "T_AUTO_MATIC_REPAIR";
+            String indexName = "IDX_T_AUTO_MATIC_REPAIR";
+            String indexName1 = "IDX_T_AUTO_MATIC_REPAIR_1";
+            statement.execute("create table " + tableName + " (id integer not 
null,fn varchar,"
+                    + "cf1.ln varchar constraint pk primary key(id)) split on 
(1,2,3,4,5)");
+            statement.execute("create local index " + indexName + " on " + 
tableName + "  (fn,cf1.ln)");
+            statement.execute("create local index " + indexName1 + " on " + 
tableName + "  (fn)");
+            for (int i = 0; i < 7; i++) {
+                statement.execute("upsert into " + tableName + "  values(" + i 
+ ",'fn" + i + "','ln" + i + "')");
+            }
+            conn.commit();
+            ResultSet rs = statement.executeQuery("SELECT COUNT(*) FROM " + 
indexName);
+            assertTrue(rs.next());
+            assertEquals(7, rs.getLong(1));
+            List<HRegionInfo> tableRegions = 
admin.getTableRegions(TableName.valueOf(tableName));
+            admin.disableTable(tableName);
+            copyLocalIndexHFiles(config, tableRegions.get(0), 
tableRegions.get(1), false);
+            copyLocalIndexHFiles(config, tableRegions.get(3), 
tableRegions.get(0), false);
+
+            admin.enableTable(tableName);
+
+            int count=getCount(conn, tableName, "L#0");
+            assertTrue(count > 14);
+            admin.majorCompact(tableName);
+            int tryCount = 5;// need to wait for rebuilding of corrupted local 
index region
+            while (tryCount-- > 0 && count != 14) {
+                Thread.sleep(30000);
+                count = getCount(conn, tableName, "L#0");
+            }
+            assertEquals(14, count);
+            rs = statement.executeQuery("SELECT COUNT(*) FROM " + indexName1);
+            assertTrue(rs.next());
+            assertEquals(7, rs.getLong(1));
+        }
+    }
+
+    private void copyLocalIndexHFiles(Configuration conf, HRegionInfo 
fromRegion, HRegionInfo toRegion, boolean move)
+            throws IOException {
+        Path root = FSUtils.getRootDir(conf);
+
+        Path seondRegion = new Path(HTableDescriptor.getTableDir(root, 
fromRegion.getTableName()) + Path.SEPARATOR
+                + fromRegion.getEncodedName() + Path.SEPARATOR + "L#0/");
+        Path hfilePath = 
FSUtils.getCurrentFileSystem(conf).listFiles(seondRegion, 
true).next().getPath();
+        Path firstRegionPath = new Path(HTableDescriptor.getTableDir(root, 
toRegion.getTableName()) + Path.SEPARATOR
+                + toRegion.getEncodedName() + Path.SEPARATOR + "L#0/");
+        FileSystem currentFileSystem = FSUtils.getCurrentFileSystem(conf);
+        assertTrue(FileUtil.copy(currentFileSystem, hfilePath, 
currentFileSystem, firstRegionPath, move, conf));
+    }
+
+    private int getCount(PhoenixConnection conn, String tableName, String 
columnFamily)
+            throws IOException, SQLException {
+        Iterator<Result> iterator = 
conn.getQueryServices().getTable(Bytes.toBytes(tableName))
+                .getScanner(Bytes.toBytes(columnFamily)).iterator();
+        int count = 0;
+        while (iterator.hasNext()) {
+            iterator.next();
+            count++;
+        }
+        return count;
+    }
+
 
 }

http://git-wip-us.apache.org/repos/asf/phoenix/blob/9194a00a/phoenix-core/src/main/java/org/apache/hadoop/hbase/regionserver/DataTableLocalIndexRegionScanner.java
----------------------------------------------------------------------
diff --git 
a/phoenix-core/src/main/java/org/apache/hadoop/hbase/regionserver/DataTableLocalIndexRegionScanner.java
 
b/phoenix-core/src/main/java/org/apache/hadoop/hbase/regionserver/DataTableLocalIndexRegionScanner.java
new file mode 100644
index 0000000..e4486bc
--- /dev/null
+++ 
b/phoenix-core/src/main/java/org/apache/hadoop/hbase/regionserver/DataTableLocalIndexRegionScanner.java
@@ -0,0 +1,86 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.regionserver;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.List;
+
+import org.apache.hadoop.hbase.Cell;
+import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
+import org.apache.phoenix.coprocessor.DelegateRegionScanner;
+import org.apache.phoenix.hbase.index.ValueGetter;
+import org.apache.phoenix.hbase.index.util.GenericKeyValueBuilder;
+import org.apache.phoenix.hbase.index.util.ImmutableBytesPtr;
+import org.apache.phoenix.hbase.index.util.KeyValueBuilder;
+import org.apache.phoenix.index.IndexMaintainer;
+import org.apache.phoenix.schema.tuple.MultiKeyValueTuple;
+
+public class DataTableLocalIndexRegionScanner extends DelegateRegionScanner {
+    MultiKeyValueTuple result = new MultiKeyValueTuple();
+    ImmutableBytesWritable ptr = new ImmutableBytesWritable();
+    KeyValueBuilder kvBuilder = GenericKeyValueBuilder.INSTANCE;
+    private List<IndexMaintainer> indexMaintainers;
+    private byte[] startKey;
+    private byte[] endKey;
+    private byte[] localIndexFamily;
+
+    public DataTableLocalIndexRegionScanner(RegionScanner scanner, Region 
region,
+            List<IndexMaintainer> indexMaintainers, byte[] localIndexFamily) 
throws IOException {
+        super(scanner);
+        this.indexMaintainers = indexMaintainers;
+        this.startKey = region.getRegionInfo().getStartKey();
+        this.endKey = region.getRegionInfo().getEndKey();
+        this.localIndexFamily = localIndexFamily;
+    }
+
+    @Override
+    public boolean next(List<Cell> outResult, ScannerContext scannerContext) 
throws IOException {
+        List<Cell> dataTableResults = new ArrayList<Cell>();
+        boolean next = super.next(dataTableResults, scannerContext);
+        getLocalIndexCellsFromDataTable(dataTableResults, outResult);
+        return next;
+    }
+
+    @Override
+    public boolean next(List<Cell> results) throws IOException {
+        List<Cell> dataTableResults = new ArrayList<Cell>();
+        boolean next = super.next(dataTableResults);
+        getLocalIndexCellsFromDataTable(dataTableResults, results);
+        return next;
+    }
+
+    private void getLocalIndexCellsFromDataTable(List<Cell> dataTableResults, 
List<Cell> localIndexResults)
+            throws IOException {
+        if (!dataTableResults.isEmpty()) {
+            result.setKeyValues(dataTableResults);
+            for (IndexMaintainer maintainer : indexMaintainers) {
+                result.getKey(ptr);
+                ValueGetter valueGetter = maintainer
+                        
.createGetterFromKeyValues(ImmutableBytesPtr.copyBytesIfNecessary(ptr), 
dataTableResults);
+                List<Cell> list = maintainer.buildUpdateMutation(kvBuilder, 
valueGetter, ptr,
+                        dataTableResults.get(0).getTimestamp(), startKey, 
endKey).getFamilyCellMap()
+                        .get(localIndexFamily);
+                if (list != null) {
+                    localIndexResults.addAll(list);
+                }
+            }
+        }
+    }
+
+}

http://git-wip-us.apache.org/repos/asf/phoenix/blob/9194a00a/phoenix-core/src/main/java/org/apache/hadoop/hbase/regionserver/IndexHalfStoreFileReaderGenerator.java
----------------------------------------------------------------------
diff --git 
a/phoenix-core/src/main/java/org/apache/hadoop/hbase/regionserver/IndexHalfStoreFileReaderGenerator.java
 
b/phoenix-core/src/main/java/org/apache/hadoop/hbase/regionserver/IndexHalfStoreFileReaderGenerator.java
index 7cd6405..537febd 100644
--- 
a/phoenix-core/src/main/java/org/apache/hadoop/hbase/regionserver/IndexHalfStoreFileReaderGenerator.java
+++ 
b/phoenix-core/src/main/java/org/apache/hadoop/hbase/regionserver/IndexHalfStoreFileReaderGenerator.java
@@ -28,7 +28,6 @@ import java.util.NavigableSet;
 
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.hbase.Cell;
 import org.apache.hadoop.hbase.CellUtil;
 import org.apache.hadoop.hbase.HConstants;
 import org.apache.hadoop.hbase.HRegionInfo;
@@ -64,6 +63,11 @@ import org.apache.phoenix.util.IndexUtil;
 import org.apache.phoenix.util.MetaDataUtil;
 import org.apache.phoenix.util.PhoenixRuntime;
 import org.apache.phoenix.util.QueryUtil;
+import org.apache.phoenix.util.RepairUtil;
+
+import com.google.common.collect.Lists;
+
+import jline.internal.Log;
 
 public class IndexHalfStoreFileReaderGenerator extends BaseRegionObserver {
     
@@ -184,15 +188,22 @@ public class IndexHalfStoreFileReaderGenerator extends 
BaseRegionObserver {
     public InternalScanner 
preCompactScannerOpen(ObserverContext<RegionCoprocessorEnvironment> c,
             Store store, List<? extends KeyValueScanner> scanners, ScanType 
scanType,
             long earliestPutTs, InternalScanner s, CompactionRequest request) 
throws IOException {
-        if (!store.getFamily().getNameAsString()
-                .startsWith(QueryConstants.LOCAL_INDEX_COLUMN_FAMILY_PREFIX)
-                || s != null
-                || !store.hasReferences()) {
-            return s;
-        }
-        List<StoreFileScanner> newScanners = new 
ArrayList<StoreFileScanner>(scanners.size());
+
+        if (!IndexUtil.isLocalIndexStore(store) || s != null) { return s; }
         Scan scan = new Scan();
         scan.setMaxVersions(store.getFamily().getMaxVersions());
+        if (!store.hasReferences()) {
+            InternalScanner repairScanner = null;
+            if (request.isMajor() && 
(!RepairUtil.isLocalIndexStoreFilesConsistent(c.getEnvironment(), store))) {
+                repairScanner = getRepairScanner(c.getEnvironment(), store);
+            }
+            if (repairScanner != null) {
+                return repairScanner;
+            } else {
+                return s;
+            }
+        }
+        List<StoreFileScanner> newScanners = new 
ArrayList<StoreFileScanner>(scanners.size());
         boolean scanUsePread = 
c.getEnvironment().getConfiguration().getBoolean("hbase.storescanner.use.pread",
 scan.isSmall());
         for(KeyValueScanner scanner: scanners) {
             Reader reader = ((StoreFileScanner) scanner).getReader();
@@ -238,7 +249,54 @@ public class IndexHalfStoreFileReaderGenerator extends 
BaseRegionObserver {
         }
         return viewConstants;
     }
+    
+    /**
+     * @param env
+     * @param store Local Index store 
+     * @param scan
+     * @param scanType
+     * @param earliestPutTs
+     * @param request
+     * @return StoreScanner for new Local Index data for a passed store and 
Null if repair is not possible
+     * @throws IOException
+     */
+    private InternalScanner getRepairScanner(RegionCoprocessorEnvironment env, 
Store store) throws IOException {
+        //List<KeyValueScanner> scannersForStoreFiles = 
Lists.newArrayListWithExpectedSize(store.getStorefilesCount());
+        Scan scan = new Scan();
+        scan.setMaxVersions(store.getFamily().getMaxVersions());
+        for (Store s : env.getRegion().getStores()) {
+            if (!IndexUtil.isLocalIndexStore(s)) {
+                scan.addFamily(s.getFamily().getName());
+            }
+        }
+        try {
+            PhoenixConnection conn = 
QueryUtil.getConnection(env.getConfiguration())
+                    .unwrap(PhoenixConnection.class);
+            String dataTableName = 
MetaDataUtil.getPhoenixTableNameFromDesc(env.getRegion().getTableDesc());
+            if (dataTableName == null) {
+                Log.warn("Found corrupted local index for region:" + 
env.getRegion().getRegionInfo().toString()
+                        + " but data table attribute is not set in 
tableDescriptor "
+                        + "so automatic repair will not succeed" + ", local 
index created are may be from old client");
+                return null;
+            }
+            PTable dataPTable = PhoenixRuntime.getTable(conn, dataTableName);
+            final List<IndexMaintainer> maintainers = Lists
+                    
.newArrayListWithExpectedSize(dataPTable.getIndexes().size());
+            for (PTable index : dataPTable.getIndexes()) {
+                if (index.getIndexType() == IndexType.LOCAL) {
+                    maintainers.add(index.getIndexMaintainer(dataPTable, 
conn));
+                }
+            }
+            return new 
DataTableLocalIndexRegionScanner(env.getRegion().getScanner(scan), 
env.getRegion(),
+                    maintainers, store.getFamily().getName());
+            
+
+        } catch (ClassNotFoundException | SQLException e) {
+            throw new IOException(e);
 
+        }
+    }
+    
     @Override
     public KeyValueScanner preStoreScannerOpen(final 
ObserverContext<RegionCoprocessorEnvironment> c,
         final Store store, final Scan scan, final NavigableSet<byte[]> 
targetCols,

http://git-wip-us.apache.org/repos/asf/phoenix/blob/9194a00a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataRegionObserver.java
----------------------------------------------------------------------
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataRegionObserver.java
 
b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataRegionObserver.java
index a60de03..8f36803 100644
--- 
a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataRegionObserver.java
+++ 
b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/MetaDataRegionObserver.java
@@ -17,11 +17,6 @@
  */
 package org.apache.phoenix.coprocessor;
 
-import static 
org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.INDEX_DISABLE_TIMESTAMP;
-import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.SYSTEM_CATALOG;
-import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.TABLE_NAME;
-import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.TABLE_SCHEM;
-import static org.apache.phoenix.jdbc.PhoenixDatabaseMetaData.TENANT_ID;
 import static org.apache.phoenix.schema.types.PDataType.TRUE_BYTES;
 
 import java.io.IOException;
@@ -225,12 +220,12 @@ public class MetaDataRegionObserver extends 
BaseRegionObserver {
             // separately, all updating the same data.
             RegionScanner scanner = null;
             PhoenixConnection conn = null;
-            if (inProgress.get() > 0) {
+            if (inProgress.getAndIncrement() > 0) {
+                inProgress.decrementAndGet();
                 LOG.debug("New ScheduledBuildIndexTask skipped as there is 
already one running");
                 return;
             }
             try {
-                inProgress.incrementAndGet();
                 Scan scan = new Scan();
                 SingleColumnValueFilter filter = new 
SingleColumnValueFilter(PhoenixDatabaseMetaData.TABLE_FAMILY_BYTES,
                     PhoenixDatabaseMetaData.INDEX_DISABLE_TIMESTAMP_BYTES,
@@ -266,7 +261,6 @@ public class MetaDataRegionObserver extends 
BaseRegionObserver {
 
                     if (disabledTimeStamp == null || disabledTimeStamp.length 
== 0 || (indexState != null
                             && PIndexState.BUILDING == 
PIndexState.fromSerializedValue(Bytes.toString(indexState)))) {
-
                         // Don't rebuild the building index , because they are 
marked for aysnc
                         continue;
                     }
@@ -290,7 +284,8 @@ public class MetaDataRegionObserver extends 
BaseRegionObserver {
 
                     // validity check
                     if (indexTable == null || indexTable.length == 0) {
-                        LOG.debug("Index rebuild has been skipped for row=" + 
r);
+                        LOG.debug("We find IndexTable empty during rebuild 
scan:" + scan
+                                + "so, Index rebuild has been skipped for 
row=" + r);
                         continue;
                     }
 
@@ -335,6 +330,9 @@ public class MetaDataRegionObserver extends 
BaseRegionObserver {
                         indexesToPartiallyRebuild = 
Lists.newArrayListWithExpectedSize(dataPTable.getIndexes().size());
                         dataTableToIndexesMap.put(dataPTable, 
indexesToPartiallyRebuild);
                     }
+                    LOG.debug("We have found " + indexPTable.getIndexState() + 
" Index:" + indexPTable.getName()
+                            + " on data table:" + dataPTable.getName() + " 
which was disabled at "
+                            + indexPTable.getIndexDisableTimestamp());
                     indexesToPartiallyRebuild.add(indexPTable);
                 } while (hasMore);
 
@@ -368,7 +366,7 @@ public class MetaDataRegionObserver extends 
BaseRegionObserver {
                                                        long timeStamp = 
Math.max(0, earliestDisableTimestamp - overlapTime);
                                                        LOG.info("Starting to 
build " + dataPTable + " indexes " + indexesToPartiallyRebuild
                                                                        + " 
from timestamp=" + timeStamp);
-
+                                                       
                                                        TableRef tableRef = new 
TableRef(null, dataPTable, HConstants.LATEST_TIMESTAMP, false);
                                                        // TODO Need to set 
high timeout
                                                        PostDDLCompiler 
compiler = new PostDDLCompiler(conn);
@@ -379,6 +377,7 @@ public class MetaDataRegionObserver extends 
BaseRegionObserver {
 
                                                        long scanEndTime = 
getTimestampForBatch(timeStamp,
                                                                        
batchExecutedPerTableMap.get(dataPTable.getName()));
+                                                       
                                                        
dataTableScan.setTimeRange(timeStamp, scanEndTime);
                                                        
dataTableScan.setCacheBlocks(false);
                                                        
dataTableScan.setAttribute(BaseScannerRegionObserver.REBUILD_INDEXES, 
TRUE_BYTES);
@@ -389,9 +388,17 @@ public class MetaDataRegionObserver extends 
BaseRegionObserver {
                                                                        conn);
                                                        byte[] attribValue = 
ByteUtil.copyKeyBytesIfNecessary(indexMetaDataPtr);
                                                        
dataTableScan.setAttribute(PhoenixIndexCodec.INDEX_MD, attribValue);
+                            LOG.info("Starting to partially build indexes:" + 
indexesToPartiallyRebuild
+                                    + " on data table:" + dataPTable.getName() 
+ " with the earliest disable timestamp:"
+                                    + earliestDisableTimestamp + " till "
+                                    + (scanEndTime == 
HConstants.LATEST_TIMESTAMP ? "LATEST_TIMESTAMP" : scanEndTime));
                                                        MutationState 
mutationState = plan.execute();
                                                        long rowCount = 
mutationState.getUpdateCount();
-                                                       LOG.info(rowCount + " 
rows of index which are rebuild");
+                            if (scanEndTime == HConstants.LATEST_TIMESTAMP) {
+                                LOG.info("Rebuild completed for all 
inactive/disabled indexes in data table:"
+                                        + dataPTable.getName());
+                            }
+                            LOG.info(" no. of datatable rows read in 
rebuilding process is " + rowCount);
                                                        for (PTable indexPTable 
: indexesToPartiallyRebuild) {
                                                                String 
indexTableFullName = SchemaUtil.getTableName(
                                                                                
indexPTable.getSchemaName().getString(),
@@ -400,6 +407,7 @@ public class MetaDataRegionObserver extends 
BaseRegionObserver {
                                                                        
updateIndexState(conn, indexTableFullName, env, PIndexState.INACTIVE,
                                                                                
        PIndexState.ACTIVE);
                                                                        
batchExecutedPerTableMap.remove(dataPTable.getName());
+                                    LOG.info("Making Index:" + 
indexPTable.getTableName() + " active after rebuilding");
                                                                } else {
 
                                                                        
updateDisableTimestamp(conn, indexTableFullName, env, scanEndTime, metaTable);

http://git-wip-us.apache.org/repos/asf/phoenix/blob/9194a00a/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/Indexer.java
----------------------------------------------------------------------
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/Indexer.java 
b/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/Indexer.java
index 3b05a7d..0d051be 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/Indexer.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/hbase/index/Indexer.java
@@ -529,7 +529,8 @@ public class Indexer extends BaseRegionObserver {
     try {
         writer.writeAndKillYourselfOnFailure(updates, true);
     } catch (IOException e) {
-        LOG.error("Exception thrown instead of killing server during index 
writing", e);
+            LOG.error("During WAL replay of outstanding index updates, "
+                    + "Exception is thrown instead of killing server during 
index writing", e);
     }
   }
 

http://git-wip-us.apache.org/repos/asf/phoenix/blob/9194a00a/phoenix-core/src/main/java/org/apache/phoenix/schema/MetaDataClient.java
----------------------------------------------------------------------
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/schema/MetaDataClient.java 
b/phoenix-core/src/main/java/org/apache/phoenix/schema/MetaDataClient.java
index a3b9b32..cc2b5b9 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/schema/MetaDataClient.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/schema/MetaDataClient.java
@@ -1442,6 +1442,7 @@ public class MetaDataClient {
                     statement.getProps().put("", new 
Pair<String,Object>(DEFAULT_COLUMN_FAMILY_NAME,dataTable.getDefaultFamilyName().getString()));
                 }
                 PrimaryKeyConstraint pk = FACTORY.primaryKey(null, 
allPkColumns);
+                tableProps.put(MetaDataUtil.DATA_TABLE_NAME_PROP_NAME, 
dataTable.getName().getString());
                 CreateTableStatement tableStatement = 
FACTORY.createTable(indexTableName, statement.getProps(), columnDefs, pk, 
statement.getSplitNodes(), PTableType.INDEX, statement.ifNotExists(), null, 
null, statement.getBindCount(), null);
                 table = createTableInternal(tableStatement, splits, dataTable, 
null, null, null, null, allocateIndexId, statement.getIndexType(), 
asyncCreatedDate, tableProps, commonFamilyProps);
                 break;

http://git-wip-us.apache.org/repos/asf/phoenix/blob/9194a00a/phoenix-core/src/main/java/org/apache/phoenix/util/IndexUtil.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/util/IndexUtil.java 
b/phoenix-core/src/main/java/org/apache/phoenix/util/IndexUtil.java
index 2a0f3b9..54f0453 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/util/IndexUtil.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/util/IndexUtil.java
@@ -50,6 +50,7 @@ import org.apache.hadoop.hbase.ipc.BlockingRpcCallback;
 import org.apache.hadoop.hbase.ipc.ServerRpcController;
 import org.apache.hadoop.hbase.protobuf.generated.ClientProtos.MutationProto;
 import org.apache.hadoop.hbase.regionserver.Region;
+import org.apache.hadoop.hbase.regionserver.Store;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.hadoop.io.WritableUtils;
 import org.apache.phoenix.compile.ColumnResolver;
@@ -733,4 +734,8 @@ public class IndexUtil {
         }
         return true;
     }
+
+    public static boolean isLocalIndexStore(Store store) {
+        return 
store.getFamily().getNameAsString().startsWith(QueryConstants.LOCAL_INDEX_COLUMN_FAMILY_PREFIX);
+    }
 }

http://git-wip-us.apache.org/repos/asf/phoenix/blob/9194a00a/phoenix-core/src/main/java/org/apache/phoenix/util/MetaDataUtil.java
----------------------------------------------------------------------
diff --git 
a/phoenix-core/src/main/java/org/apache/phoenix/util/MetaDataUtil.java 
b/phoenix-core/src/main/java/org/apache/phoenix/util/MetaDataUtil.java
index a9bb468..fe14ba4 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/util/MetaDataUtil.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/util/MetaDataUtil.java
@@ -557,6 +557,10 @@ public class MetaDataUtil {
     public static final String IS_LOCAL_INDEX_TABLE_PROP_NAME = 
"IS_LOCAL_INDEX_TABLE";
     public static final byte[] IS_LOCAL_INDEX_TABLE_PROP_BYTES = 
Bytes.toBytes(IS_LOCAL_INDEX_TABLE_PROP_NAME);
 
+    public static final String DATA_TABLE_NAME_PROP_NAME = "DATA_TABLE_NAME";
+
+    private static final byte[] DATA_TABLE_NAME_PROP_BYTES = 
Bytes.toBytes(DATA_TABLE_NAME_PROP_NAME);
+
 
 
     public static Scan newTableRowsScan(byte[] key, long startTimeStamp, long 
stopTimeStamp){
@@ -642,4 +646,8 @@ public class MetaDataUtil {
     public static boolean isLocalIndexFamily(byte[] cf) {
         return Bytes.startsWith(cf, 
QueryConstants.LOCAL_INDEX_COLUMN_FAMILY_PREFIX_BYTES);
     }
+    
+    public static String getPhoenixTableNameFromDesc(HTableDescriptor 
tableDesc) {
+        return 
Bytes.toString(tableDesc.getValue(MetaDataUtil.DATA_TABLE_NAME_PROP_BYTES));
+    }
 }

http://git-wip-us.apache.org/repos/asf/phoenix/blob/9194a00a/phoenix-core/src/main/java/org/apache/phoenix/util/RepairUtil.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/util/RepairUtil.java 
b/phoenix-core/src/main/java/org/apache/phoenix/util/RepairUtil.java
new file mode 100644
index 0000000..b9b7526
--- /dev/null
+++ b/phoenix-core/src/main/java/org/apache/phoenix/util/RepairUtil.java
@@ -0,0 +1,40 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.phoenix.util;
+
+import org.apache.hadoop.hbase.KeyValue;
+import org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment;
+import org.apache.hadoop.hbase.regionserver.Store;
+import org.apache.hadoop.hbase.regionserver.StoreFile;
+import org.apache.hadoop.hbase.util.Bytes;
+
+public class RepairUtil {
+    public static boolean 
isLocalIndexStoreFilesConsistent(RegionCoprocessorEnvironment environment, 
Store store) {
+        byte[] startKey = 
environment.getRegion().getRegionInfo().getStartKey();
+        byte[] endKey = environment.getRegion().getRegionInfo().getEndKey();
+        byte[] indexKeyEmbedded = startKey.length == 0 ? new 
byte[endKey.length] : startKey;
+        for (StoreFile file : store.getStorefiles()) {
+            byte[] fileFirstRowKey = 
KeyValue.createKeyValueFromKey(file.getReader().getFirstKey()).getRow();;
+            if ((fileFirstRowKey != null && 
Bytes.compareTo(file.getReader().getFirstKey(), 0, indexKeyEmbedded.length,
+                    indexKeyEmbedded, 0, indexKeyEmbedded.length) != 0)
+                    /*|| (endKey.length > 0 && 
Bytes.compareTo(file.getLastKey(), endKey) < 0)*/) { return false; }
+        }
+        return true;
+    }
+
+}

http://git-wip-us.apache.org/repos/asf/phoenix/blob/9194a00a/phoenix-core/src/main/java/org/apache/phoenix/util/SchemaUtil.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/util/SchemaUtil.java 
b/phoenix-core/src/main/java/org/apache/phoenix/util/SchemaUtil.java
index 9f22239..ed90426 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/util/SchemaUtil.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/util/SchemaUtil.java
@@ -805,7 +805,7 @@ public class SchemaUtil {
     public static boolean hasHTableDescriptorProps(Map<String, Object> 
tableProps) {
         int pTablePropCount = 0;
         for (String prop : tableProps.keySet()) {
-            if (TableProperty.isPhoenixTableProperty(prop)) {
+            if (TableProperty.isPhoenixTableProperty(prop) || 
prop.equals(MetaDataUtil.DATA_TABLE_NAME_PROP_NAME)) {
                 pTablePropCount++;
             }
         }

Reply via email to