phoenix-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From rajeshb...@apache.org
Subject phoenix git commit: PHOENIX-1059 Support index regions merge on their corresponding data regions merge(Rajeshbabu)
Date Tue, 20 Jan 2015 22:24:59 GMT
Repository: phoenix
Updated Branches:
  refs/heads/4.0 aa4fcbc77 -> d5ae62d7a


PHOENIX-1059 Support index regions merge on their corresponding data regions merge(Rajeshbabu)


Project: http://git-wip-us.apache.org/repos/asf/phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/phoenix/commit/d5ae62d7
Tree: http://git-wip-us.apache.org/repos/asf/phoenix/tree/d5ae62d7
Diff: http://git-wip-us.apache.org/repos/asf/phoenix/diff/d5ae62d7

Branch: refs/heads/4.0
Commit: d5ae62d7a68fa1cb866221ec7e038bb25b09a264
Parents: aa4fcbc
Author: Rajeshbabu Chintaguntla <rajeshbabu@apache.org>
Authored: Wed Jan 21 03:54:01 2015 +0530
Committer: Rajeshbabu Chintaguntla <rajeshbabu@apache.org>
Committed: Wed Jan 21 03:54:01 2015 +0530

----------------------------------------------------------------------
 .../phoenix/end2end/index/LocalIndexIT.java     |  81 ++++++++++++
 .../regionserver/IndexHalfStoreFileReader.java  |  55 +++++----
 .../IndexHalfStoreFileReaderGenerator.java      |  48 +++++++-
 .../hbase/regionserver/LocalIndexMerger.java    | 122 +++++++++++++++++++
 .../hbase/regionserver/LocalIndexSplitter.java  |  19 ++-
 .../coprocessor/BaseScannerRegionObserver.java  |   6 +-
 .../UngroupedAggregateRegionObserver.java       |  50 ++++----
 .../java/org/apache/phoenix/util/IndexUtil.java |  70 +++++++++--
 .../org/apache/phoenix/util/SchemaUtil.java     |   6 +
 .../java/org/apache/phoenix/query/BaseTest.java |   4 +
 10 files changed, 391 insertions(+), 70 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/phoenix/blob/d5ae62d7/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/LocalIndexIT.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/LocalIndexIT.java b/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/LocalIndexIT.java
index 85643a8..7a4d638 100644
--- a/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/LocalIndexIT.java
+++ b/phoenix-core/src/it/java/org/apache/phoenix/end2end/index/LocalIndexIT.java
@@ -786,6 +786,87 @@ public class LocalIndexIT extends BaseHBaseManagedTimeIT {
     }
 
     @Test
+    public void testLocalIndexScanAfterRegionsMerge() throws Exception {
+        createBaseTable(TestUtil.DEFAULT_DATA_TABLE_NAME, null, "('e','j','o')");
+        Connection conn1 = DriverManager.getConnection(getUrl());
+        try{
+            String[] strings = {"a","b","c","d","e","f","g","h","i","j","k","l","m","n","o","p","q","r","s","t","u","v","w","x","y","z"};
+            for (int i = 0; i < 26; i++) {
+                conn1.createStatement().execute(
+                    "UPSERT INTO " + TestUtil.DEFAULT_DATA_TABLE_NAME + " values('"+strings[i]+"',"
+ i + ","
+                            + (i + 1) + "," + (i + 2) + ",'" + strings[25 - i] + "')");
+            }
+            conn1.commit();
+            conn1.createStatement().execute("CREATE LOCAL INDEX " + TestUtil.DEFAULT_INDEX_TABLE_NAME
+ " ON " + TestUtil.DEFAULT_DATA_TABLE_NAME + "(v1)");
+            conn1.createStatement().execute("CREATE LOCAL INDEX " + TestUtil.DEFAULT_INDEX_TABLE_NAME
+ "_2 ON " + TestUtil.DEFAULT_DATA_TABLE_NAME + "(k3)");
+
+            ResultSet rs = conn1.createStatement().executeQuery("SELECT * FROM " + TestUtil.DEFAULT_DATA_TABLE_NAME);
+            assertTrue(rs.next());
+
+            HBaseAdmin admin = driver.getConnectionQueryServices(getUrl(), TestUtil.TEST_PROPERTIES).getAdmin();
+            CatalogTracker ct = new CatalogTracker(admin.getConfiguration());
+            List<HRegionInfo> regionsOfUserTable =
+                    MetaReader.getTableRegions(ct,
+                        TableName.valueOf(TestUtil.DEFAULT_DATA_TABLE_NAME), false);
+            admin.mergeRegions(regionsOfUserTable.get(0).getEncodedNameAsBytes(),
+                regionsOfUserTable.get(1).getEncodedNameAsBytes(), false);
+            regionsOfUserTable =
+                    MetaReader.getTableRegions(ct,
+                        TableName.valueOf(TestUtil.DEFAULT_DATA_TABLE_NAME), false);
+
+            while (regionsOfUserTable.size() != 3) {
+                Thread.sleep(100);
+                regionsOfUserTable =
+                        MetaReader.getTableRegions(ct,
+                            TableName.valueOf(TestUtil.DEFAULT_DATA_TABLE_NAME), false);
+            }
+            assertEquals(3, regionsOfUserTable.size());
+            TableName indexTable =
+                    TableName.valueOf(MetaDataUtil
+                            .getLocalIndexTableName(TestUtil.DEFAULT_DATA_TABLE_NAME));
+            List<HRegionInfo> regionsOfIndexTable =
+                    MetaReader.getTableRegions(ct, indexTable, false);
+
+            while (regionsOfIndexTable.size() != 3) {
+                Thread.sleep(100);
+                regionsOfIndexTable = MetaReader.getTableRegions(ct, indexTable, false);
+            }
+            assertEquals(3, regionsOfIndexTable.size());
+            String query = "SELECT t_id,k1,v1 FROM " + TestUtil.DEFAULT_DATA_TABLE_NAME;
+            rs = conn1.createStatement().executeQuery(query);
+            Thread.sleep(1000);
+            for (int j = 0; j < 26; j++) {
+                assertTrue(rs.next());
+                assertEquals(strings[25 - j], rs.getString("t_id"));
+                assertEquals(25 - j, rs.getInt("k1"));
+                assertEquals(strings[j], rs.getString("V1"));
+            }
+            rs = conn1.createStatement().executeQuery("EXPLAIN " + query);
+            assertEquals(
+                "CLIENT PARALLEL " + 3 + "-WAY RANGE SCAN OVER "
+                        + MetaDataUtil.getLocalIndexTableName(TestUtil.DEFAULT_DATA_TABLE_NAME)
+                        + " [-32768]\n" + "CLIENT MERGE SORT", QueryUtil.getExplainPlan(rs));
+
+            query = "SELECT t_id,k1,k3 FROM " + TestUtil.DEFAULT_DATA_TABLE_NAME;
+            rs = conn1.createStatement().executeQuery("EXPLAIN " + query);
+            assertEquals(
+                "CLIENT PARALLEL " + 3 + "-WAY RANGE SCAN OVER "
+                        + MetaDataUtil.getLocalIndexTableName(TestUtil.DEFAULT_DATA_TABLE_NAME)
+                        + " [-32767]\n" + "CLIENT MERGE SORT", QueryUtil.getExplainPlan(rs));
+            rs = conn1.createStatement().executeQuery(query);
+            Thread.sleep(1000);
+            for (int j = 0; j < 26; j++) {
+                assertTrue(rs.next());
+                assertEquals(strings[j], rs.getString("t_id"));
+                assertEquals(j, rs.getInt("k1"));
+                assertEquals(j + 2, rs.getInt("k3"));
+            }
+       } finally {
+            conn1.close();
+        }
+    }
+
+    @Test
     public void testLocalIndexStateWhenSplittingInProgress() throws Exception {
         createBaseTable(TestUtil.DEFAULT_DATA_TABLE_NAME+"2", null, "('e','j','o')");
         Connection conn1 = DriverManager.getConnection(getUrl());

http://git-wip-us.apache.org/repos/asf/phoenix/blob/d5ae62d7/phoenix-core/src/main/java/org/apache/hadoop/hbase/regionserver/IndexHalfStoreFileReader.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/hadoop/hbase/regionserver/IndexHalfStoreFileReader.java
b/phoenix-core/src/main/java/org/apache/hadoop/hbase/regionserver/IndexHalfStoreFileReader.java
index d8650cf..fa1def3 100644
--- a/phoenix-core/src/main/java/org/apache/hadoop/hbase/regionserver/IndexHalfStoreFileReader.java
+++ b/phoenix-core/src/main/java/org/apache/hadoop/hbase/regionserver/IndexHalfStoreFileReader.java
@@ -36,7 +36,6 @@ import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
 import org.apache.hadoop.hbase.io.Reference;
 import org.apache.hadoop.hbase.io.hfile.CacheConfig;
 import org.apache.hadoop.hbase.io.hfile.HFileScanner;
-import org.apache.hadoop.hbase.regionserver.StoreFile;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.phoenix.index.IndexMaintainer;
 
@@ -66,57 +65,69 @@ public class IndexHalfStoreFileReader extends StoreFile.Reader {
     private final byte[][] viewConstants; 
     private final int offset;
     private final HRegionInfo regionInfo;
-    private final HRegionInfo parent;
+    private final byte[] regionStartKeyInHFile;
 
     /**
+     * @param fs
      * @param p
      * @param cacheConf
      * @param r
+     * @param conf
+     * @param indexMaintainers
+     * @param viewConstants
+     * @param regionInfo
+     * @param regionStartKeyInHFile
+     * @param splitKey
      * @throws IOException
      */
     public IndexHalfStoreFileReader(final FileSystem fs, final Path p, final CacheConfig
cacheConf,
             final Reference r, final Configuration conf,
             final Map<ImmutableBytesWritable, IndexMaintainer> indexMaintainers,
             final byte[][] viewConstants, final HRegionInfo regionInfo,
-            final HRegionInfo parent) throws IOException {
+            final byte[] regionStartKeyInHFile, byte[] splitKey) throws IOException {
         super(fs, p, cacheConf, conf);
-        this.splitkey = r.getSplitKey();
+        this.splitkey = splitKey == null ? r.getSplitKey() : splitKey;
         // Is it top or bottom half?
         this.top = Reference.isTopFileRegion(r.getFileRegion());
         this.splitRow = CellUtil.cloneRow(KeyValue.createKeyValueFromKey(splitkey));
         this.indexMaintainers = indexMaintainers;
         this.viewConstants = viewConstants;
         this.regionInfo = regionInfo;
-        this.parent = parent;
-        this.offset =
-                parent.getStartKey().length != 0 ? parent.getStartKey().length
-                        : parent.getEndKey().length;
+        this.regionStartKeyInHFile = regionStartKeyInHFile;
+        this.offset = regionStartKeyInHFile.length;
     }
 
     /**
+     * @param fs
      * @param p
      * @param cacheConf
+     * @param in
+     * @param size
      * @param r
+     * @param conf
+     * @param indexMaintainers
+     * @param viewConstants
+     * @param regionInfo
+     * @param regionStartKeyInHFile
+     * @param splitKey
      * @throws IOException
      */
     public IndexHalfStoreFileReader(final FileSystem fs, final Path p, final CacheConfig
cacheConf,
             final FSDataInputStreamWrapper in, long size, final Reference r,
             final Configuration conf,
             final Map<ImmutableBytesWritable, IndexMaintainer> indexMaintainers,
-            final byte[][] viewConstants, final HRegionInfo regionInfo, final HRegionInfo
parent)
-            throws IOException {
+            final byte[][] viewConstants, final HRegionInfo regionInfo,
+            byte[] regionStartKeyInHFile, byte[] splitKey) throws IOException {
         super(fs, p, in, size, cacheConf, conf);
-        this.splitkey = r.getSplitKey();
+        this.splitkey = splitKey == null ? r.getSplitKey() : splitKey;
         // Is it top or bottom half?
         this.top = Reference.isTopFileRegion(r.getFileRegion());
         this.splitRow = CellUtil.cloneRow(KeyValue.createKeyValueFromKey(splitkey));
         this.indexMaintainers = indexMaintainers;
         this.viewConstants = viewConstants;
         this.regionInfo = regionInfo;
-        this.parent = parent;
-        this.offset =
-                parent.getStartKey().length != 0 ? parent.getStartKey().length
-                        : parent.getEndKey().length;
+        this.regionStartKeyInHFile = regionStartKeyInHFile;
+        this.offset = regionStartKeyInHFile.length;
     }
 
     protected boolean isTop() {
@@ -399,9 +410,7 @@ public class IndexHalfStoreFileReader extends StoreFile.Reader {
         KeyValue keyValue = new KeyValue(key);
         int rowLength = keyValue.getRowLength();
         int rowOffset = keyValue.getRowOffset();
-        byte[] parentStartKey =
-                parent.getStartKey().length == 0 ? new byte[parent.getEndKey().length] :
parent
-                        .getStartKey();
+
         int daughterStartKeyLength =
                 regionInfo.getStartKey().length == 0 ? regionInfo.getEndKey().length : regionInfo
                         .getStartKey().length;
@@ -414,20 +423,20 @@ public class IndexHalfStoreFileReader extends StoreFile.Reader {
                     keyValue.getRowLength(), splitRow, 0, splitRow.length) == 0
                 && keyValue.isDeleteFamily()) {
             KeyValue createFirstDeleteFamilyOnRow =
-                    KeyValue.createFirstDeleteFamilyOnRow(parentStartKey, keyValue.getFamily());
+                    KeyValue.createFirstDeleteFamilyOnRow(regionStartKeyInHFile, keyValue.getFamily());
             return createFirstDeleteFamilyOnRow;
         }
 
-        short length = (short) (keyValue.getRowLength() - daughterStartKeyLength + parentStartKey.length);
+        short length = (short) (keyValue.getRowLength() - daughterStartKeyLength + offset);
         byte[] replacedKey =
                 new byte[length + key.length - (rowOffset + rowLength) + ROW_KEY_LENGTH];
         System.arraycopy(Bytes.toBytes(length), 0, replacedKey, 0, ROW_KEY_LENGTH);
-        System.arraycopy(parentStartKey, 0, replacedKey, ROW_KEY_LENGTH, parentStartKey.length);
+        System.arraycopy(regionStartKeyInHFile, 0, replacedKey, ROW_KEY_LENGTH, offset);
         System.arraycopy(keyValue.getRowArray(), keyValue.getRowOffset() + daughterStartKeyLength,
-            replacedKey, parentStartKey.length + ROW_KEY_LENGTH, keyValue.getRowLength()
+            replacedKey, offset + ROW_KEY_LENGTH, keyValue.getRowLength()
                     - daughterStartKeyLength);
         System.arraycopy(key, rowOffset + rowLength, replacedKey,
-            parentStartKey.length + keyValue.getRowLength() - daughterStartKeyLength
+            offset + keyValue.getRowLength() - daughterStartKeyLength
                     + ROW_KEY_LENGTH, key.length - (rowOffset + rowLength));
         return KeyValue.createKeyValueFromKey(replacedKey);
     }

http://git-wip-us.apache.org/repos/asf/phoenix/blob/d5ae62d7/phoenix-core/src/main/java/org/apache/hadoop/hbase/regionserver/IndexHalfStoreFileReaderGenerator.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/hadoop/hbase/regionserver/IndexHalfStoreFileReaderGenerator.java
b/phoenix-core/src/main/java/org/apache/hadoop/hbase/regionserver/IndexHalfStoreFileReaderGenerator.java
index 2fbea22..f213d2d 100644
--- a/phoenix-core/src/main/java/org/apache/hadoop/hbase/regionserver/IndexHalfStoreFileReaderGenerator.java
+++ b/phoenix-core/src/main/java/org/apache/hadoop/hbase/regionserver/IndexHalfStoreFileReaderGenerator.java
@@ -26,8 +26,10 @@ import java.util.Map;
 
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hbase.CellUtil;
 import org.apache.hadoop.hbase.HConstants;
 import org.apache.hadoop.hbase.HRegionInfo;
+import org.apache.hadoop.hbase.KeyValue;
 import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.catalog.MetaReader;
 import org.apache.hadoop.hbase.client.HTable;
@@ -45,6 +47,8 @@ import org.apache.hadoop.hbase.io.Reference;
 import org.apache.hadoop.hbase.io.hfile.CacheConfig;
 import org.apache.hadoop.hbase.regionserver.StoreFile.Reader;
 import org.apache.hadoop.hbase.regionserver.compactions.CompactionRequest;
+import org.apache.hadoop.hbase.util.Bytes;
+import org.apache.hadoop.hbase.util.Pair;
 import org.apache.phoenix.index.IndexMaintainer;
 import org.apache.phoenix.jdbc.PhoenixConnection;
 import org.apache.phoenix.parse.AlterIndexStatement;
@@ -73,6 +77,8 @@ public class IndexHalfStoreFileReaderGenerator extends BaseRegionObserver
{
             Reference r, Reader reader) throws IOException {
         TableName tableName = ctx.getEnvironment().getRegion().getTableDesc().getTableName();
         HRegion region = ctx.getEnvironment().getRegion();
+        HRegionInfo childRegion = region.getRegionInfo();
+        byte[] splitKey = null;
         if (reader == null && r != null) {
             Scan scan = MetaReader.getScanForTableName(tableName);
             SingleColumnValueFilter scvf = null;
@@ -86,15 +92,45 @@ public class IndexHalfStoreFileReaderGenerator extends BaseRegionObserver
{
                 scvf.setFilterIfMissing(true);
             }
             if(scvf != null) scan.setFilter(scvf);
-            HRegionInfo parentRegion = null;
+            byte[] regionStartKeyInHFile = null;
             HTable metaTable = null;
             PhoenixConnection conn = null;
             try {
                 metaTable = new HTable(ctx.getEnvironment().getConfiguration(), TableName.META_TABLE_NAME);
-                ResultScanner scanner = metaTable.getScanner(scan);
-                Result result = scanner.next();
-                if (result == null || result.isEmpty()) return reader;
-                parentRegion = HRegionInfo.getHRegionInfo(result);
+                ResultScanner scanner = null;
+                Result result = null;
+                try {
+                    scanner = metaTable.getScanner(scan);
+                    result = scanner.next();
+                } finally {
+                    if(scanner != null) scanner.close();
+                }
+                if (result == null || result.isEmpty()) {
+                    Pair<HRegionInfo, HRegionInfo> mergeRegions =
+                            MetaReader.getRegionsFromMergeQualifier(ctx.getEnvironment()
+                                    .getRegionServerServices().getCatalogTracker(),
+                                region.getRegionName());
+                    if (mergeRegions == null || mergeRegions.getFirst() == null) return reader;
+                    byte[] splitRow =
+                            CellUtil.cloneRow(KeyValue.createKeyValueFromKey(r.getSplitKey()));
+                    // We need not change any thing in first region data because first region
start key
+                    // is equal to merged region start key. So returning same reader.
+                    if (Bytes.compareTo(mergeRegions.getFirst().getStartKey(), splitRow)
== 0) {
+                        return reader;
+                    } else {
+                        childRegion = mergeRegions.getSecond();
+                        regionStartKeyInHFile = mergeRegions.getSecond().getStartKey();
+                    }
+                    splitKey =
+                            KeyValue.createFirstOnRow(
+                                region.getStartKey().length == 0 ? new byte[region.getEndKey().length]
: region
+                                        .getStartKey()).getKey();
+                } else {
+                    HRegionInfo parentRegion = HRegionInfo.getHRegionInfo(result);
+                    regionStartKeyInHFile =
+                            parentRegion.getStartKey().length == 0 ? new byte[parentRegion
+                                    .getEndKey().length] : parentRegion.getStartKey();
+                }
             } finally {
                 if (metaTable != null) metaTable.close();
             }
@@ -118,7 +154,7 @@ public class IndexHalfStoreFileReaderGenerator extends BaseRegionObserver
{
                 byte[][] viewConstants = getViewConstants(dataTable);
                 return new IndexHalfStoreFileReader(fs, p, cacheConf, in, size, r, ctx
                         .getEnvironment().getConfiguration(), indexMaintainers, viewConstants,
-                        region.getRegionInfo(), parentRegion);
+                        childRegion, regionStartKeyInHFile, splitKey);
             } catch (ClassNotFoundException e) {
                 throw new IOException(e);
             } catch (SQLException e) {

http://git-wip-us.apache.org/repos/asf/phoenix/blob/d5ae62d7/phoenix-core/src/main/java/org/apache/hadoop/hbase/regionserver/LocalIndexMerger.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/hadoop/hbase/regionserver/LocalIndexMerger.java
b/phoenix-core/src/main/java/org/apache/hadoop/hbase/regionserver/LocalIndexMerger.java
new file mode 100644
index 0000000..6f8dd79
--- /dev/null
+++ b/phoenix-core/src/main/java/org/apache/hadoop/hbase/regionserver/LocalIndexMerger.java
@@ -0,0 +1,122 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hbase.regionserver;
+
+import java.io.IOException;
+import java.util.List;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.hbase.HTableDescriptor;
+import org.apache.hadoop.hbase.TableName;
+import org.apache.hadoop.hbase.catalog.MetaReader;
+import org.apache.hadoop.hbase.client.Mutation;
+import org.apache.hadoop.hbase.coprocessor.BaseRegionServerObserver;
+import org.apache.hadoop.hbase.coprocessor.ObserverContext;
+import org.apache.hadoop.hbase.coprocessor.RegionServerCoprocessorEnvironment;
+import org.apache.phoenix.schema.types.PBoolean;
+import org.apache.phoenix.util.IndexUtil;
+import org.apache.phoenix.util.MetaDataUtil;
+import org.apache.phoenix.util.SchemaUtil;
+
+public class LocalIndexMerger extends BaseRegionServerObserver {
+
+    private static final Log LOG = LogFactory.getLog(LocalIndexMerger.class);
+
+    private RegionMergeTransaction rmt = null;
+    private HRegion mergedRegion = null;
+
+    @Override
+    public void preMergeCommit(ObserverContext<RegionServerCoprocessorEnvironment>
ctx,
+            HRegion regionA, HRegion regionB, List<Mutation> metaEntries) throws IOException
{
+        HTableDescriptor tableDesc = regionA.getTableDesc();
+        if (SchemaUtil.isSystemTable(tableDesc.getName())) {
+            return;
+        }
+        RegionServerServices rss = ctx.getEnvironment().getRegionServerServices();
+        HRegionServer rs = (HRegionServer) rss;
+        if (tableDesc.getValue(MetaDataUtil.IS_LOCAL_INDEX_TABLE_PROP_BYTES) == null
+                || !Boolean.TRUE.equals(PBoolean.INSTANCE.toObject(tableDesc
+                        .getValue(MetaDataUtil.IS_LOCAL_INDEX_TABLE_PROP_BYTES)))) {
+            TableName indexTable =
+                    TableName.valueOf(MetaDataUtil.getLocalIndexPhysicalName(tableDesc.getName()));
+            if (!MetaReader.tableExists(rs.getCatalogTracker(), indexTable)) return;
+            HRegion indexRegionA = IndexUtil.getIndexRegion(regionA, ctx.getEnvironment());
+            if (indexRegionA == null) {
+                LOG.warn("Index region corresponindg to data region " + regionA
+                        + " not in the same server. So skipping the merge.");
+                ctx.bypass();
+                return;
+            }
+            HRegion indexRegionB = IndexUtil.getIndexRegion(regionB, ctx.getEnvironment());
+            if (indexRegionB == null) {
+                LOG.warn("Index region corresponindg to region " + regionB
+                        + " not in the same server. So skipping the merge.");
+                ctx.bypass();
+                return;
+            }
+            try {
+                rmt = new RegionMergeTransaction(indexRegionA, indexRegionB, false);
+                if (!rmt.prepare(rss)) {
+                    LOG.error("Prepare for the index regions merge [" + indexRegionA + ","
+                            + indexRegionB + "] failed. So returning null. ");
+                    ctx.bypass();
+                    return;
+                }
+                this.mergedRegion = rmt.stepsBeforePONR(rss, rss, false);
+                rmt.prepareMutationsForMerge(mergedRegion.getRegionInfo(),
+                    indexRegionA.getRegionInfo(), indexRegionB.getRegionInfo(),
+                    rss.getServerName(), metaEntries);
+            } catch (Exception e) {
+                ctx.bypass();
+                LOG.warn("index regions merge failed with the exception ", e);
+                if (rmt != null) {
+                    rmt.rollback(rss, rss);
+                    rmt = null;
+                    mergedRegion = null;
+                }
+            }
+        }
+    }
+
+    @Override
+    public void postMergeCommit(ObserverContext<RegionServerCoprocessorEnvironment>
ctx,
+            HRegion regionA, HRegion regionB, HRegion mergedRegion) throws IOException {
+        if (rmt != null && this.mergedRegion != null) {
+            RegionServerCoprocessorEnvironment environment = ctx.getEnvironment();
+            HRegionServer rs = (HRegionServer) environment.getRegionServerServices();
+            rmt.stepsAfterPONR(rs, rs, this.mergedRegion);
+        }
+    }
+
+    @Override
+    public void preRollBackMerge(ObserverContext<RegionServerCoprocessorEnvironment>
ctx,
+            HRegion regionA, HRegion regionB) throws IOException {
+        HRegionServer rs = (HRegionServer) ctx.getEnvironment().getRegionServerServices();
+        try {
+            if (rmt != null) {
+                rmt.rollback(rs, rs);
+                rmt = null;
+                mergedRegion = null;
+            }
+        } catch (Exception e) {
+            LOG.error("Error while rolling back the merge failure for index regions", e);
+            rs.abort("Abort; we got an error during rollback of index");
+        }
+    }
+}

http://git-wip-us.apache.org/repos/asf/phoenix/blob/d5ae62d7/phoenix-core/src/main/java/org/apache/hadoop/hbase/regionserver/LocalIndexSplitter.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/hadoop/hbase/regionserver/LocalIndexSplitter.java
b/phoenix-core/src/main/java/org/apache/hadoop/hbase/regionserver/LocalIndexSplitter.java
index 15e5ec8..2ac61cb 100644
--- a/phoenix-core/src/main/java/org/apache/hadoop/hbase/regionserver/LocalIndexSplitter.java
+++ b/phoenix-core/src/main/java/org/apache/hadoop/hbase/regionserver/LocalIndexSplitter.java
@@ -25,7 +25,9 @@ import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.hbase.HRegionInfo;
 import org.apache.hadoop.hbase.HTableDescriptor;
+import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.catalog.MetaEditor;
+import org.apache.hadoop.hbase.catalog.MetaReader;
 import org.apache.hadoop.hbase.client.Mutation;
 import org.apache.hadoop.hbase.client.Put;
 import org.apache.hadoop.hbase.coprocessor.BaseRegionObserver;
@@ -36,7 +38,6 @@ import org.apache.phoenix.hbase.index.util.VersionUtil;
 import org.apache.phoenix.jdbc.PhoenixConnection;
 import org.apache.phoenix.parse.AlterIndexStatement;
 import org.apache.phoenix.parse.ParseNodeFactory;
-import org.apache.phoenix.parse.TableName;
 import org.apache.phoenix.schema.MetaDataClient;
 import org.apache.phoenix.schema.types.PBoolean;
 import org.apache.phoenix.schema.PIndexState;
@@ -63,16 +64,24 @@ public class LocalIndexSplitter extends BaseRegionObserver {
             byte[] splitKey, List<Mutation> metaEntries) throws IOException {
         RegionCoprocessorEnvironment environment = ctx.getEnvironment();
         HTableDescriptor tableDesc = ctx.getEnvironment().getRegion().getTableDesc();
-        if (SchemaUtil.isMetaTable(tableDesc.getName())
-                || SchemaUtil.isSequenceTable(tableDesc.getName())) {
+        if (SchemaUtil.isSystemTable(tableDesc.getName())) {
             return;
         }
         RegionServerServices rss = ctx.getEnvironment().getRegionServerServices();
         if (tableDesc.getValue(MetaDataUtil.IS_LOCAL_INDEX_TABLE_PROP_BYTES) == null
                 || !Boolean.TRUE.equals(PBoolean.INSTANCE.toObject(tableDesc
                         .getValue(MetaDataUtil.IS_LOCAL_INDEX_TABLE_PROP_BYTES)))) {
+            TableName indexTable =
+                    TableName.valueOf(MetaDataUtil.getLocalIndexPhysicalName(tableDesc.getName()));
+            if (!MetaReader.tableExists(rss.getCatalogTracker(), indexTable)) return;
+
             HRegion indexRegion = IndexUtil.getIndexRegion(environment);
-            if (indexRegion == null) return;
+            if (indexRegion == null) {
+                LOG.warn("Index region corresponindg to data region " + environment.getRegion()
+                        + " not in the same server. So skipping the split.");
+                ctx.bypass();
+                return;
+            }
             try {
                 int encodedVersion = VersionUtil.encodeVersion(environment.getHBaseVersion());
                 if(encodedVersion >= SPLIT_TXN_MINIMUM_SUPPORTED_VERSION) {
@@ -136,7 +145,7 @@ public class LocalIndexSplitter extends BaseRegionObserver {
             for (PTable index : indexes) {
                 if (index.getIndexType() == IndexType.LOCAL) {
                     AlterIndexStatement indexStatement = FACTORY.alterIndex(FACTORY.namedTable(null,
-                        TableName.create(index.getSchemaName().getString(), index.getTableName().getString())),
+                        org.apache.phoenix.parse.TableName.create(index.getSchemaName().getString(),
index.getTableName().getString())),
                         dataTable.getTableName().getString(), false, PIndexState.INACTIVE);
                     client.alterIndex(indexStatement);
                 }

http://git-wip-us.apache.org/repos/asf/phoenix/blob/d5ae62d7/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/BaseScannerRegionObserver.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/BaseScannerRegionObserver.java
b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/BaseScannerRegionObserver.java
index 2e179a5..4033c54 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/BaseScannerRegionObserver.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/BaseScannerRegionObserver.java
@@ -284,7 +284,8 @@ abstract public class BaseScannerRegionObserver extends BaseRegionObserver
{
                         replaceArrayIndexElement(arrayKVRefs, arrayFuncRefs, result);
                     }
                     if (ScanUtil.isLocalIndex(scan) && !ScanUtil.isAnalyzeTable(scan))
{
-                        IndexUtil.wrapResultUsingOffset(result, offset, dataColumns, tupleProjector,
dataRegion, indexMaintainer, viewConstants, ptr);
+                        IndexUtil.wrapResultUsingOffset(c, result, offset, dataColumns,
+                            tupleProjector, dataRegion, indexMaintainer, viewConstants, ptr);
                     }
                     // There is a scanattribute set to retrieve the specific array element
                     return next;
@@ -305,7 +306,8 @@ abstract public class BaseScannerRegionObserver extends BaseRegionObserver
{
                         replaceArrayIndexElement(arrayKVRefs, arrayFuncRefs, result);
                     }
                     if ((offset > 0 || ScanUtil.isLocalIndex(scan))  && !ScanUtil.isAnalyzeTable(scan))
{
-                        IndexUtil.wrapResultUsingOffset(result, offset, dataColumns, tupleProjector,
dataRegion, indexMaintainer, viewConstants, ptr);
+                        IndexUtil.wrapResultUsingOffset(c, result, offset, dataColumns,
+                            tupleProjector, dataRegion, indexMaintainer, viewConstants, ptr);
                     }
                     // There is a scanattribute set to retrieve the specific array element
                     return next;

http://git-wip-us.apache.org/repos/asf/phoenix/blob/d5ae62d7/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/UngroupedAggregateRegionObserver.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/UngroupedAggregateRegionObserver.java
b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/UngroupedAggregateRegionObserver.java
index ff496ab..7e46370 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/UngroupedAggregateRegionObserver.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/coprocessor/UngroupedAggregateRegionObserver.java
@@ -41,6 +41,7 @@ import org.apache.hadoop.hbase.HRegionInfo;
 import org.apache.hadoop.hbase.KeyValue;
 import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.client.Delete;
+import org.apache.hadoop.hbase.client.HTableInterface;
 import org.apache.hadoop.hbase.client.Mutation;
 import org.apache.hadoop.hbase.client.Put;
 import org.apache.hadoop.hbase.client.Scan;
@@ -88,6 +89,7 @@ import org.apache.phoenix.util.LogUtil;
 import org.apache.phoenix.util.MetaDataUtil;
 import org.apache.phoenix.util.ScanUtil;
 import org.apache.phoenix.util.SchemaUtil;
+import org.apache.phoenix.util.ServerUtil;
 import org.apache.phoenix.util.TimeKeeper;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
@@ -132,7 +134,7 @@ public class UngroupedAggregateRegionObserver extends BaseScannerRegionObserver{
       // TODO: should we use the one that is all or none?
       region.batchMutate(mutations.toArray(mutationArray));
     }
-    
+
     public static void serializeIntoScan(Scan scan) {
         scan.setAttribute(BaseScannerRegionObserver.UNGROUPED_AGG, QueryConstants.TRUE);
     }
@@ -357,10 +359,7 @@ public class UngroupedAggregateRegionObserver extends BaseScannerRegionObserver{
                         }
                         // Commit in batches based on UPSERT_BATCH_SIZE_ATTRIB in config
                         if (!indexMutations.isEmpty() && batchSize > 0 &&
indexMutations.size() % batchSize == 0) {
-                            HRegion indexRegion = getIndexRegion(c.getEnvironment());
-                            // Get indexRegion corresponding to data region
-                            commitBatch(indexRegion, indexMutations, null);
-                            indexMutations.clear();
+                            commitIndexMutations(c, region, indexMutations);
                         }
 
                     } catch (ConstraintViolationException e) {
@@ -399,10 +398,7 @@ public class UngroupedAggregateRegionObserver extends BaseScannerRegionObserver{
         }
 
         if (!indexMutations.isEmpty()) {
-            HRegion indexRegion = getIndexRegion(c.getEnvironment());
-            // Get indexRegion corresponding to data region
-            commitBatch(indexRegion, indexMutations, null);
-            indexMutations.clear();
+            commitIndexMutations(c, region, indexMutations);
         }
 
         final boolean hadAny = hasAny;
@@ -446,6 +442,30 @@ public class UngroupedAggregateRegionObserver extends BaseScannerRegionObserver{
         };
         return scanner;
     }
+
+    private void commitIndexMutations(final ObserverContext<RegionCoprocessorEnvironment>
c,
+            HRegion region, List<Mutation> indexMutations) throws IOException {
+        // Get indexRegion corresponding to data region
+        HRegion indexRegion = IndexUtil.getIndexRegion(c.getEnvironment());
+        if (indexRegion != null) {
+            commitBatch(indexRegion, indexMutations, null);
+        } else {
+            TableName indexTable =
+                    TableName.valueOf(MetaDataUtil.getLocalIndexPhysicalName(region.getTableDesc()
+                            .getName()));
+            HTableInterface table = null;
+            try {
+                table = c.getEnvironment().getTable(indexTable);
+                table.batch(indexMutations);
+            } catch (InterruptedException ie) {
+                ServerUtil.throwIOException(c.getEnvironment().getRegion().getRegionNameAsString(),
+                    ie);
+            } finally {
+                if (table != null) table.close();
+             }
+        }
+        indexMutations.clear();
+    }
     
     @Override
     public InternalScanner preCompact(ObserverContext<RegionCoprocessorEnvironment>
c,
@@ -503,18 +523,6 @@ public class UngroupedAggregateRegionObserver extends BaseScannerRegionObserver{
         }
     }
 
-    private HRegion getIndexRegion(RegionCoprocessorEnvironment environment) throws IOException
{
-        HRegion userRegion = environment.getRegion();
-        TableName indexTableName = TableName.valueOf(MetaDataUtil.getLocalIndexPhysicalName(userRegion.getTableDesc().getName()));
-        List<HRegion> onlineRegions = environment.getRegionServerServices().getOnlineRegions(indexTableName);
-        for(HRegion indexRegion : onlineRegions) {
-            if (Bytes.compareTo(userRegion.getStartKey(), indexRegion.getStartKey()) == 0)
{
-                return indexRegion;
-            }
-        }
-        return null;
-    }
-
     private static PTable deserializeTable(byte[] b) {
         try {
             PTableProtos.PTable ptableProto = PTableProtos.PTable.parseFrom(b);

http://git-wip-us.apache.org/repos/asf/phoenix/blob/d5ae62d7/phoenix-core/src/main/java/org/apache/phoenix/util/IndexUtil.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/util/IndexUtil.java b/phoenix-core/src/main/java/org/apache/phoenix/util/IndexUtil.java
index dbc0f8c..d0ebe99 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/util/IndexUtil.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/util/IndexUtil.java
@@ -26,16 +26,21 @@ import java.util.List;
 import java.util.Map;
 
 import org.apache.hadoop.hbase.Cell;
+import org.apache.hadoop.hbase.HTableDescriptor;
 import org.apache.hadoop.hbase.KeyValue;
 import org.apache.hadoop.hbase.TableName;
 import org.apache.hadoop.hbase.client.Get;
+import org.apache.hadoop.hbase.client.HTableInterface;
 import org.apache.hadoop.hbase.client.Mutation;
 import org.apache.hadoop.hbase.client.Put;
 import org.apache.hadoop.hbase.client.Result;
 import org.apache.hadoop.hbase.client.Scan;
+import org.apache.hadoop.hbase.coprocessor.ObserverContext;
 import org.apache.hadoop.hbase.coprocessor.RegionCoprocessorEnvironment;
+import org.apache.hadoop.hbase.coprocessor.RegionServerCoprocessorEnvironment;
 import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
 import org.apache.hadoop.hbase.regionserver.HRegion;
+import org.apache.hadoop.hbase.regionserver.RegionServerServices;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.hadoop.io.WritableUtils;
 import org.apache.phoenix.compile.ColumnResolver;
@@ -288,23 +293,46 @@ public class IndexUtil {
             
         });
     }
-    
-    public static HRegion getIndexRegion(RegionCoprocessorEnvironment environment) throws
IOException {
-        HRegion userRegion = environment.getRegion();
-        TableName indexTableName = TableName.valueOf(MetaDataUtil.getLocalIndexPhysicalName(userRegion.getTableDesc().getName()));
-        List<HRegion> onlineRegions = environment.getRegionServerServices().getOnlineRegions(indexTableName);
+
+    public static HRegion getIndexRegion(RegionCoprocessorEnvironment environment)
+            throws IOException {
+        HRegion dataRegion = environment.getRegion();
+        return getIndexRegion(dataRegion, environment.getRegionServerServices());
+    }
+
+    public static HRegion
+            getIndexRegion(HRegion dataRegion, RegionServerCoprocessorEnvironment env)
+                    throws IOException {
+        return getIndexRegion(dataRegion, env.getRegionServerServices());
+    }
+
+    public static HRegion getDataRegion(RegionCoprocessorEnvironment env) throws IOException
{
+        HRegion indexRegion = env.getRegion();
+        return getDataRegion(indexRegion, env.getRegionServerServices());
+    }
+
+    public static HRegion
+            getDataRegion(HRegion indexRegion, RegionServerCoprocessorEnvironment env)
+                    throws IOException {
+        return getDataRegion(indexRegion, env.getRegionServerServices());
+    }
+
+    public static HRegion getIndexRegion(HRegion dataRegion, RegionServerServices rss) throws
IOException {
+        TableName indexTableName =
+                TableName.valueOf(MetaDataUtil.getLocalIndexPhysicalName(dataRegion.getTableDesc()
+                        .getName()));
+        List<HRegion> onlineRegions = rss.getOnlineRegions(indexTableName);
         for(HRegion indexRegion : onlineRegions) {
-            if (Bytes.compareTo(userRegion.getStartKey(), indexRegion.getStartKey()) == 0)
{
+            if (Bytes.compareTo(dataRegion.getStartKey(), indexRegion.getStartKey()) == 0)
{
                 return indexRegion;
             }
         }
         return null;
     }
 
-    public static HRegion getDataRegion(RegionCoprocessorEnvironment env) throws IOException
{
-        HRegion indexRegion = env.getRegion();
+    public static HRegion getDataRegion(HRegion indexRegion, RegionServerServices rss) throws
IOException {
         TableName dataTableName = TableName.valueOf(MetaDataUtil.getUserTableName(indexRegion.getTableDesc().getNameAsString()));
-        List<HRegion> onlineRegions = env.getRegionServerServices().getOnlineRegions(dataTableName);
+        List<HRegion> onlineRegions = rss.getOnlineRegions(dataTableName);
         for(HRegion region : onlineRegions) {
             if (Bytes.compareTo(indexRegion.getStartKey(), region.getStartKey()) == 0) {
                 return region;
@@ -422,9 +450,10 @@ public class IndexUtil {
         return QueryUtil.getViewStatement(index.getSchemaName().getString(), index.getTableName().getString(),
whereClause);
     }
     
-    public static void wrapResultUsingOffset(List<Cell> result, final int offset,
-            ColumnReference[] dataColumns, TupleProjector tupleProjector, HRegion dataRegion,
-            IndexMaintainer indexMaintainer, byte[][] viewConstants, ImmutableBytesWritable
ptr) throws IOException {
+    public static void wrapResultUsingOffset(final ObserverContext<RegionCoprocessorEnvironment>
c,
+            List<Cell> result, final int offset, ColumnReference[] dataColumns,
+            TupleProjector tupleProjector, HRegion dataRegion, IndexMaintainer indexMaintainer,
+            byte[][] viewConstants, ImmutableBytesWritable ptr) throws IOException {
         if (tupleProjector != null) {
             // Join back to data table here by issuing a local get projecting
             // all of the cq:cf from the KeyValueColumnExpression into the Get.
@@ -436,7 +465,22 @@ public class IndexUtil {
             for (int i = 0; i < dataColumns.length; i++) {
                 get.addColumn(dataColumns[i].getFamily(), dataColumns[i].getQualifier());
             }
-            Result joinResult = dataRegion.get(get);
+            Result joinResult = null;
+            if (dataRegion != null) {
+                joinResult = dataRegion.get(get);
+            } else {
+                TableName indexTable =
+                        TableName.valueOf(MetaDataUtil.getLocalIndexPhysicalName(c.getEnvironment()
+                                .getRegion().getTableDesc().getName()));
+                HTableInterface table = null;
+                try {
+                    table = c.getEnvironment().getTable(indexTable);
+                    joinResult = table.get(get);
+                } finally {
+                    if (table != null) table.close();
+                }
+            }
+            
             // TODO: handle null case (but shouldn't happen)
             Tuple joinTuple = new ResultTuple(joinResult);
             // This will create a byte[] that captures all of the values from the data table

http://git-wip-us.apache.org/repos/asf/phoenix/blob/d5ae62d7/phoenix-core/src/main/java/org/apache/phoenix/util/SchemaUtil.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/main/java/org/apache/phoenix/util/SchemaUtil.java b/phoenix-core/src/main/java/org/apache/phoenix/util/SchemaUtil.java
index e8aff8a..72c67bf 100644
--- a/phoenix-core/src/main/java/org/apache/phoenix/util/SchemaUtil.java
+++ b/phoenix-core/src/main/java/org/apache/phoenix/util/SchemaUtil.java
@@ -388,6 +388,12 @@ public class SchemaUtil {
         return PhoenixDatabaseMetaData.SYSTEM_CATALOG_SCHEMA.equals(schemaName) &&
PhoenixDatabaseMetaData.SYSTEM_CATALOG_TABLE.equals(tableName);
     }
 
+    public static boolean isSystemTable(byte[] fullTableName) {
+        String schemaName = SchemaUtil.getSchemaNameFromFullName(fullTableName);
+        if (QueryConstants.SYSTEM_SCHEMA_NAME.equals(schemaName)) return true;
+        return false;
+    }
+
     // Given the splits and the rowKeySchema, find out the keys that 
     public static byte[][] processSplits(byte[][] splits, LinkedHashSet<PColumn> pkColumns,
Integer saltBucketNum, boolean defaultRowKeyOrder) throws SQLException {
         // FIXME: shouldn't this return if splits.length == 0?

http://git-wip-us.apache.org/repos/asf/phoenix/blob/d5ae62d7/phoenix-core/src/test/java/org/apache/phoenix/query/BaseTest.java
----------------------------------------------------------------------
diff --git a/phoenix-core/src/test/java/org/apache/phoenix/query/BaseTest.java b/phoenix-core/src/test/java/org/apache/phoenix/query/BaseTest.java
index 488b162..6a2ce26 100644
--- a/phoenix-core/src/test/java/org/apache/phoenix/query/BaseTest.java
+++ b/phoenix-core/src/test/java/org/apache/phoenix/query/BaseTest.java
@@ -116,7 +116,9 @@ import org.apache.hadoop.hbase.HConstants;
 import org.apache.hadoop.hbase.HTableDescriptor;
 import org.apache.hadoop.hbase.IntegrationTestingUtility;
 import org.apache.hadoop.hbase.client.HBaseAdmin;
+import org.apache.hadoop.hbase.coprocessor.RegionServerObserver;
 import org.apache.hadoop.hbase.io.encoding.DataBlockEncoding;
+import org.apache.hadoop.hbase.regionserver.LocalIndexMerger;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.phoenix.end2end.BaseClientManagedTimeIT;
 import org.apache.phoenix.end2end.BaseHBaseManagedTimeIT;
@@ -625,6 +627,8 @@ public abstract class BaseTest {
         conf.setInt(HConstants.REGION_SERVER_HANDLER_COUNT, 5);
         conf.setInt(HConstants.REGION_SERVER_META_HANDLER_COUNT, 2);
         conf.setInt(HConstants.MASTER_HANDLER_COUNT, 2);
+        conf.setClass("hbase.coprocessor.regionserver.classes", LocalIndexMerger.class,
+            RegionServerObserver.class);
         conf.setInt("dfs.namenode.handler.count", 2);
         conf.setInt("dfs.namenode.service.handler.count", 2);
         conf.setInt("dfs.datanode.handler.count", 2);


Mime
View raw message