phoenix-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From jamestay...@apache.org
Subject git commit: Backport fix for PHOENIX-20: Single Key(s) not recognized by optimizer when table is salted
Date Mon, 03 Feb 2014 02:04:11 GMT
Updated Branches:
  refs/heads/2.2.3 64da9e222 -> f7e023c7e


Backport fix for PHOENIX-20: Single Key(s) not recognized by optimizer when table is salted


Project: http://git-wip-us.apache.org/repos/asf/incubator-phoenix/repo
Commit: http://git-wip-us.apache.org/repos/asf/incubator-phoenix/commit/f7e023c7
Tree: http://git-wip-us.apache.org/repos/asf/incubator-phoenix/tree/f7e023c7
Diff: http://git-wip-us.apache.org/repos/asf/incubator-phoenix/diff/f7e023c7

Branch: refs/heads/2.2.3
Commit: f7e023c7ecbcfc92788becda8d13493825d1246d
Parents: 64da9e2
Author: James Taylor <jamestaylor@apache.org>
Authored: Sun Feb 2 18:03:59 2014 -0800
Committer: James Taylor <jamestaylor@apache.org>
Committed: Sun Feb 2 18:03:59 2014 -0800

----------------------------------------------------------------------
 .../apache/phoenix/compile/WhereOptimizer.java  |  35 +++++--
 .../apache/phoenix/filter/SkipScanFilter.java   |  56 ++--------
 .../org/apache/phoenix/schema/SaltingUtil.java  |  13 ++-
 .../java/org/apache/phoenix/util/ScanUtil.java  |  36 +------
 .../org/apache/phoenix/util/SchemaUtil.java     |  41 +++++++-
 .../phoenix/compile/WhereClauseFilterTest.java  | 105 +++++++++++++++++--
 6 files changed, 185 insertions(+), 101 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/incubator-phoenix/blob/f7e023c7/src/main/java/org/apache/phoenix/compile/WhereOptimizer.java
----------------------------------------------------------------------
diff --git a/src/main/java/org/apache/phoenix/compile/WhereOptimizer.java b/src/main/java/org/apache/phoenix/compile/WhereOptimizer.java
index 9e5e100..d3482b0 100644
--- a/src/main/java/org/apache/phoenix/compile/WhereOptimizer.java
+++ b/src/main/java/org/apache/phoenix/compile/WhereOptimizer.java
@@ -30,10 +30,6 @@ import java.util.Set;
 
 import org.apache.hadoop.hbase.filter.CompareFilter.CompareOp;
 import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
-
-import com.google.common.base.Preconditions;
-import com.google.common.collect.Iterators;
-import com.google.common.collect.Lists;
 import org.apache.phoenix.expression.AndExpression;
 import org.apache.phoenix.expression.BaseTerminalExpression;
 import org.apache.phoenix.expression.CoerceExpression;
@@ -62,9 +58,12 @@ import org.apache.phoenix.schema.RowKeySchema;
 import org.apache.phoenix.schema.SaltingUtil;
 import org.apache.phoenix.schema.tuple.Tuple;
 import org.apache.phoenix.util.ByteUtil;
-import org.apache.phoenix.util.ScanUtil;
 import org.apache.phoenix.util.SchemaUtil;
 
+import com.google.common.base.Preconditions;
+import com.google.common.collect.Iterators;
+import com.google.common.collect.Lists;
+
 
 /**
  *
@@ -186,7 +185,7 @@ public class WhereOptimizer {
                 // If we have all single keys, we can optimize by adding the salt byte up
front
                 if (schema == SchemaUtil.VAR_BINARY_SCHEMA) {
                     ranges = SaltingUtil.setSaltByte(ranges, table.getBucketNum());
-                } else if (ScanUtil.isAllSingleRowScan(cnf, table.getRowKeySchema())) {
+                } else if (isAllSingleRowScan(cnf, table)) {
                     cnf.addFirst(SALT_PLACEHOLDER);
                     ranges = SaltingUtil.flattenRanges(cnf, table.getRowKeySchema(), table.getBucketNum());
                     schema = SchemaUtil.VAR_BINARY_SCHEMA;
@@ -201,6 +200,30 @@ public class WhereOptimizer {
         return whereClause.accept(new RemoveExtractedNodesVisitor(extractNodes));
     }
 
+    /**
+     * Calculate whether or not the list of ranges represents the full primary key
+     * of one or more rows
+     * @param ranges the list of ranges WITHOUT the salt KeyRange inserted yet
+     * @param table
+     * @return true if the list of ranges represents the full primary key of one or
+     * more ranges and false otherwise.
+     */
+    private static boolean isAllSingleRowScan(List<List<KeyRange>> ranges, PTable
table) {
+        RowKeySchema schema = table.getRowKeySchema();
+        if (ranges.size() + ( table.getBucketNum() == null ? 0 : 1) < schema.getMaxFields())
{
+            return false;
+        }
+        for (int i = 0; i < ranges.size(); i++) {
+            List<KeyRange> orRanges = ranges.get(i);
+            for (KeyRange range: orRanges) {
+                if (!range.isSingleKey()) {
+                    return false;
+                }
+            }
+        }
+        return true;
+    }
+
     private static class RemoveExtractedNodesVisitor extends TraverseNoExpressionVisitor<Expression>
{
         private final Set<Expression> nodesToRemove;
 

http://git-wip-us.apache.org/repos/asf/incubator-phoenix/blob/f7e023c7/src/main/java/org/apache/phoenix/filter/SkipScanFilter.java
----------------------------------------------------------------------
diff --git a/src/main/java/org/apache/phoenix/filter/SkipScanFilter.java b/src/main/java/org/apache/phoenix/filter/SkipScanFilter.java
index ac8f174..a8e0b00 100644
--- a/src/main/java/org/apache/phoenix/filter/SkipScanFilter.java
+++ b/src/main/java/org/apache/phoenix/filter/SkipScanFilter.java
@@ -31,19 +31,19 @@ import org.apache.hadoop.hbase.KeyValue.Type;
 import org.apache.hadoop.hbase.filter.FilterBase;
 import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
 import org.apache.hadoop.hbase.util.Bytes;
-
-import com.google.common.base.Objects;
-import com.google.common.collect.Lists;
-import com.google.common.hash.HashFunction;
-import com.google.common.hash.Hasher;
-import com.google.common.hash.Hashing;
 import org.apache.phoenix.query.KeyRange;
 import org.apache.phoenix.query.KeyRange.Bound;
 import org.apache.phoenix.query.QueryConstants;
 import org.apache.phoenix.schema.RowKeySchema;
-import org.apache.phoenix.schema.ValueSchema.Field;
 import org.apache.phoenix.util.ByteUtil;
 import org.apache.phoenix.util.ScanUtil;
+import org.apache.phoenix.util.SchemaUtil;
+
+import com.google.common.base.Objects;
+import com.google.common.collect.Lists;
+import com.google.common.hash.HashFunction;
+import com.google.common.hash.Hasher;
+import com.google.common.hash.Hashing;
 
 
 /**
@@ -81,21 +81,10 @@ public class SkipScanFilter extends FilterBase {
     }
 
     public SkipScanFilter(List<List<KeyRange>> slots, RowKeySchema schema) {
-        int maxKeyLength = getTerminatorCount(schema);
-        for (List<KeyRange> slot : slots) {
-            int maxSlotLength = 0;
-            for (KeyRange range : slot) {
-                int maxRangeLength = Math.max(range.getLowerRange().length, range.getUpperRange().length);
-                if (maxSlotLength < maxRangeLength) {
-                    maxSlotLength = maxRangeLength;
-                }
-            }
-            maxKeyLength += maxSlotLength;
-        }
-        init(slots, schema, maxKeyLength);
+        init(slots, schema);
     }
 
-    private void init(List<List<KeyRange>> slots, RowKeySchema schema, int maxKeyLength)
{
+    private void init(List<List<KeyRange>> slots, RowKeySchema schema) {
         for (List<KeyRange> ranges : slots) {
             if (ranges.isEmpty()) {
                 throw new IllegalStateException();
@@ -103,7 +92,7 @@ public class SkipScanFilter extends FilterBase {
         }
         this.slots = slots;
         this.schema = schema;
-        this.maxKeyLength = maxKeyLength;
+        this.maxKeyLength = SchemaUtil.getMaxKeyLength(schema, slots);
         this.position = new int[slots.size()];
         startKey = new byte[maxKeyLength];
         endKey = new byte[maxKeyLength];
@@ -440,46 +429,23 @@ public class SkipScanFilter extends FilterBase {
         return targetKey;
     }
 
-    private int getTerminatorCount(RowKeySchema schema) {
-        int nTerminators = 0;
-        for (int i = 0; i < schema.getFieldCount(); i++) {
-            Field field = schema.getField(i);
-            // We won't have a terminator on the last PK column
-            // unless it is variable length and exclusive, but
-            // having the extra byte irregardless won't hurt anything
-            if (!field.getDataType().isFixedWidth()) {
-                nTerminators++;
-            }
-        }
-        return nTerminators;
-    }
-
     @Override
     public void readFields(DataInput in) throws IOException {
         RowKeySchema schema = new RowKeySchema();
         schema.readFields(in);
-        int maxLength = getTerminatorCount(schema);
         int andLen = in.readInt();
         List<List<KeyRange>> slots = Lists.newArrayListWithExpectedSize(andLen);
         for (int i=0; i<andLen; i++) {
             int orlen = in.readInt();
             List<KeyRange> orclause = Lists.newArrayListWithExpectedSize(orlen);
             slots.add(orclause);
-            int maxSlotLength = 0;
             for (int j=0; j<orlen; j++) {
                 KeyRange range = new KeyRange();
                 range.readFields(in);
-                if (range.getLowerRange().length > maxSlotLength) {
-                    maxSlotLength = range.getLowerRange().length;
-                }
-                if (range.getUpperRange().length > maxSlotLength) {
-                    maxSlotLength = range.getUpperRange().length;
-                }
                 orclause.add(range);
             }
-            maxLength += maxSlotLength;
         }
-        this.init(slots, schema, maxLength);
+        this.init(slots, schema);
     }
 
     @Override

http://git-wip-us.apache.org/repos/asf/incubator-phoenix/blob/f7e023c7/src/main/java/org/apache/phoenix/schema/SaltingUtil.java
----------------------------------------------------------------------
diff --git a/src/main/java/org/apache/phoenix/schema/SaltingUtil.java b/src/main/java/org/apache/phoenix/schema/SaltingUtil.java
index 50ec4b6..094f29e 100644
--- a/src/main/java/org/apache/phoenix/schema/SaltingUtil.java
+++ b/src/main/java/org/apache/phoenix/schema/SaltingUtil.java
@@ -30,6 +30,7 @@ import org.apache.phoenix.compile.ScanRanges;
 import org.apache.phoenix.query.KeyRange;
 import org.apache.phoenix.query.KeyRange.Bound;
 import org.apache.phoenix.util.ScanUtil;
+import org.apache.phoenix.util.SchemaUtil;
 
 
 /**
@@ -124,16 +125,18 @@ public class SaltingUtil {
         }
         KeyRange[] expandedRanges = new KeyRange[count];
         int[] position = new int[ranges.size()];
-        int estimatedKeyLength = ScanUtil.estimateMaximumKeyLength(schema, 1, ranges);
+        int maxKeyLength = SchemaUtil.getMaxKeyLength(schema, ranges);
         int idx = 0, length;
         byte saltByte;
-        byte[] key = new byte[estimatedKeyLength];
+        byte[] key = new byte[maxKeyLength];
         do {
-            length = ScanUtil.setKey(schema, ranges, position, Bound.LOWER, key, 1, 0, ranges.size(),
1);
+            length = ScanUtil.setKey(schema, ranges, position, Bound.LOWER, key, NUM_SALTING_BYTES,
1, ranges.size(), 1);
             saltByte = SaltingUtil.getSaltingByte(key, 1, length, bucketNum);
             key[0] = saltByte;
-            byte[] saltedKey = Arrays.copyOf(key, length + 1);
-            KeyRange range = PDataType.VARBINARY.getKeyRange(saltedKey, true, saltedKey,
true);
+            byte[] saltedStartKey = Arrays.copyOf(key, length + 1);
+            length = ScanUtil.setKey(schema, ranges, position, Bound.UPPER, key, NUM_SALTING_BYTES,
1, ranges.size(), 1);
+            byte[] saltedEndKey = Arrays.copyOf(key, length + 1);
+            KeyRange range = PDataType.VARBINARY.getKeyRange(saltedStartKey, true, saltedEndKey,
false);
             expandedRanges[idx++] = range;
         } while (incrementKey(ranges, position));
         // The comparator is imperfect, but sufficient for all single keys.

http://git-wip-us.apache.org/repos/asf/incubator-phoenix/blob/f7e023c7/src/main/java/org/apache/phoenix/util/ScanUtil.java
----------------------------------------------------------------------
diff --git a/src/main/java/org/apache/phoenix/util/ScanUtil.java b/src/main/java/org/apache/phoenix/util/ScanUtil.java
index 05337b7..aa56209 100644
--- a/src/main/java/org/apache/phoenix/util/ScanUtil.java
+++ b/src/main/java/org/apache/phoenix/util/ScanUtil.java
@@ -35,8 +35,6 @@ import org.apache.hadoop.hbase.filter.Filter;
 import org.apache.hadoop.hbase.filter.FilterList;
 import org.apache.hadoop.hbase.io.ImmutableBytesWritable;
 import org.apache.hadoop.hbase.util.Bytes;
-
-import com.google.common.collect.Lists;
 import org.apache.phoenix.compile.ScanRanges;
 import org.apache.phoenix.coprocessor.MetaDataProtocol;
 import org.apache.phoenix.filter.SkipScanFilter;
@@ -47,6 +45,8 @@ import org.apache.phoenix.schema.PDataType;
 import org.apache.phoenix.schema.PTable;
 import org.apache.phoenix.schema.RowKeySchema;
 
+import com.google.common.collect.Lists;
+
 
 /**
  * 
@@ -231,23 +231,6 @@ public class ScanUtil {
         return keyCopy;
     }
 
-    public static int estimateMaximumKeyLength(RowKeySchema schema, int schemaStartIndex,
List<List<KeyRange>> slots) {
-        int maxLowerKeyLength = 0, maxUpperKeyLength = 0;
-        for (int i = 0; i < slots.size(); i++) {
-            int maxLowerRangeLength = 0, maxUpperRangeLength = 0;
-            for (KeyRange range: slots.get(i)) {
-                maxLowerRangeLength = Math.max(maxLowerRangeLength, range.getLowerRange().length);

-                maxUpperRangeLength = Math.max(maxUpperRangeLength, range.getUpperRange().length);
-            }
-            int trailingByte = (schema.getField(schemaStartIndex).getDataType().isFixedWidth()
||
-                    schemaStartIndex == schema.getFieldCount() - 1 ? 0 : 1);
-            maxLowerKeyLength += maxLowerRangeLength + trailingByte;
-            maxUpperKeyLength += maxUpperKeyLength + trailingByte;
-            schemaStartIndex++;
-        }
-        return Math.max(maxLowerKeyLength, maxUpperKeyLength);
-    }
-
     /*
      * Set the key by appending the keyRanges inside slots at positions as specified by the
position array.
      * 
@@ -352,21 +335,6 @@ public class ScanUtil {
         return offset - byteOffset;
     }
 
-    public static boolean isAllSingleRowScan(List<List<KeyRange>> ranges, RowKeySchema
schema) {
-        if (ranges.size() < schema.getMaxFields()) {
-            return false;
-        }
-        for (int i = 0; i < ranges.size(); i++) {
-            List<KeyRange> orRanges = ranges.get(i);
-            for (KeyRange range: orRanges) {
-                if (!range.isSingleKey()) {
-                    return false;
-                }
-            }
-        }
-        return true;
-    }
-
     /**
      * Perform a binary lookup on the list of KeyRange for the tightest slot such that the
slotBound
      * of the current slot is higher or equal than the slotBound of our range. 

http://git-wip-us.apache.org/repos/asf/incubator-phoenix/blob/f7e023c7/src/main/java/org/apache/phoenix/util/SchemaUtil.java
----------------------------------------------------------------------
diff --git a/src/main/java/org/apache/phoenix/util/SchemaUtil.java b/src/main/java/org/apache/phoenix/util/SchemaUtil.java
index b960073..0e3127c 100644
--- a/src/main/java/org/apache/phoenix/util/SchemaUtil.java
+++ b/src/main/java/org/apache/phoenix/util/SchemaUtil.java
@@ -68,11 +68,6 @@ import org.apache.hadoop.hbase.regionserver.MultiVersionConsistencyControl;
 import org.apache.hadoop.hbase.regionserver.RegionScanner;
 import org.apache.hadoop.hbase.util.Bytes;
 import org.apache.hadoop.hbase.util.Pair;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import com.google.common.collect.Lists;
-import com.google.common.collect.Maps;
 import org.apache.phoenix.coprocessor.MetaDataProtocol;
 import org.apache.phoenix.exception.SQLExceptionCode;
 import org.apache.phoenix.exception.SQLExceptionInfo;
@@ -80,6 +75,7 @@ import org.apache.phoenix.jdbc.PhoenixConnection;
 import org.apache.phoenix.jdbc.PhoenixDatabaseMetaData;
 import org.apache.phoenix.parse.HintNode.Hint;
 import org.apache.phoenix.query.ConnectionQueryServices;
+import org.apache.phoenix.query.KeyRange;
 import org.apache.phoenix.query.QueryConstants;
 import org.apache.phoenix.schema.AmbiguousColumnException;
 import org.apache.phoenix.schema.ColumnFamilyNotFoundException;
@@ -96,6 +92,12 @@ import org.apache.phoenix.schema.PTableType;
 import org.apache.phoenix.schema.RowKeySchema;
 import org.apache.phoenix.schema.RowKeySchema.RowKeySchemaBuilder;
 import org.apache.phoenix.schema.SaltingUtil;
+import org.apache.phoenix.schema.ValueSchema.Field;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import com.google.common.collect.Lists;
+import com.google.common.collect.Maps;
 
 
 
@@ -820,4 +822,33 @@ public class SchemaUtil {
         String tableName = fullTableName.substring(index+1);
         return getTableKey(schemaName, tableName); 
     }
+    
+    private static int getTerminatorCount(RowKeySchema schema) {
+        int nTerminators = 0;
+        for (int i = 0; i < schema.getFieldCount(); i++) {
+            Field field = schema.getField(i);
+            // We won't have a terminator on the last PK column
+            // unless it is variable length and exclusive, but
+            // having the extra byte irregardless won't hurt anything
+            if (!field.getDataType().isFixedWidth()) {
+                nTerminators++;
+            }
+        }
+        return nTerminators;
+    }
+
+    public static int getMaxKeyLength(RowKeySchema schema, List<List<KeyRange>>
slots) {
+        int maxKeyLength = getTerminatorCount(schema);
+        for (List<KeyRange> slot : slots) {
+            int maxSlotLength = 0;
+            for (KeyRange range : slot) {
+                int maxRangeLength = Math.max(range.getLowerRange().length, range.getUpperRange().length);
+                if (maxSlotLength < maxRangeLength) {
+                    maxSlotLength = maxRangeLength;
+                }
+            }
+            maxKeyLength += maxSlotLength;
+        }
+        return maxKeyLength;
+    }
 }

http://git-wip-us.apache.org/repos/asf/incubator-phoenix/blob/f7e023c7/src/test/java/org/apache/phoenix/compile/WhereClauseFilterTest.java
----------------------------------------------------------------------
diff --git a/src/test/java/org/apache/phoenix/compile/WhereClauseFilterTest.java b/src/test/java/org/apache/phoenix/compile/WhereClauseFilterTest.java
index 6bbc030..a4a77cb 100644
--- a/src/test/java/org/apache/phoenix/compile/WhereClauseFilterTest.java
+++ b/src/test/java/org/apache/phoenix/compile/WhereClauseFilterTest.java
@@ -19,6 +19,7 @@
  */
 package org.apache.phoenix.compile;
 
+import static java.util.Collections.emptyList;
 import static org.apache.phoenix.util.TestUtil.ATABLE_NAME;
 import static org.apache.phoenix.util.TestUtil.TEST_PROPERTIES;
 import static org.apache.phoenix.util.TestUtil.and;
@@ -31,7 +32,6 @@ import static org.apache.phoenix.util.TestUtil.multiKVFilter;
 import static org.apache.phoenix.util.TestUtil.not;
 import static org.apache.phoenix.util.TestUtil.or;
 import static org.apache.phoenix.util.TestUtil.singleKVFilter;
-import static java.util.Collections.emptyList;
 import static org.junit.Assert.assertArrayEquals;
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertNotNull;
@@ -52,11 +52,6 @@ import org.apache.hadoop.hbase.client.Scan;
 import org.apache.hadoop.hbase.filter.CompareFilter.CompareOp;
 import org.apache.hadoop.hbase.filter.Filter;
 import org.apache.hadoop.hbase.util.Bytes;
-import org.junit.Ignore;
-import org.junit.Test;
-
-import com.google.common.collect.ImmutableList;
-import com.google.common.collect.Sets;
 import org.apache.phoenix.expression.Expression;
 import org.apache.phoenix.expression.LiteralExpression;
 import org.apache.phoenix.expression.RowKeyColumnExpression;
@@ -64,6 +59,7 @@ import org.apache.phoenix.expression.function.SubstrFunction;
 import org.apache.phoenix.filter.RowKeyComparisonFilter;
 import org.apache.phoenix.filter.SkipScanFilter;
 import org.apache.phoenix.jdbc.PhoenixConnection;
+import org.apache.phoenix.jdbc.PhoenixPreparedStatement;
 import org.apache.phoenix.parse.SQLParser;
 import org.apache.phoenix.parse.SelectStatement;
 import org.apache.phoenix.query.BaseConnectionlessQueryTest;
@@ -71,10 +67,16 @@ import org.apache.phoenix.query.KeyRange;
 import org.apache.phoenix.query.QueryConstants;
 import org.apache.phoenix.schema.PDataType;
 import org.apache.phoenix.schema.RowKeyValueAccessor;
+import org.apache.phoenix.schema.SaltingUtil;
 import org.apache.phoenix.util.ByteUtil;
 import org.apache.phoenix.util.DateUtil;
 import org.apache.phoenix.util.NumberUtil;
 import org.apache.phoenix.util.StringUtil;
+import org.junit.Ignore;
+import org.junit.Test;
+
+import com.google.common.collect.ImmutableList;
+import com.google.common.collect.Sets;
 
 
 public class WhereClauseFilterTest extends BaseConnectionlessQueryTest {
@@ -114,6 +116,97 @@ public class WhereClauseFilterTest extends BaseConnectionlessQueryTest
{
     }
 
     @Test
+    public void testSingleFixedFullPkSalted() throws SQLException {
+        PhoenixConnection pconn = DriverManager.getConnection(getUrl(), TEST_PROPERTIES).unwrap(PhoenixConnection.class);
+        pconn.createStatement().execute("CREATE TABLE t (k bigint not null primary key, v
varchar) SALT_BUCKETS=20");
+        String query = "select * from t where k=" + 1;
+        PhoenixPreparedStatement pstmt = new PhoenixPreparedStatement(pconn, query);
+        QueryPlan plan = pstmt.optimizeQuery();
+        Scan scan = plan.getContext().getScan();
+        Filter filter = scan.getFilter();
+        assertNull(filter);
+        byte[] key = new byte[PDataType.LONG.getByteSize() + 1];
+        PDataType.LONG.toBytes(1L, key, 1);
+        key[0] = SaltingUtil.getSaltingByte(key, 1, PDataType.LONG.getByteSize(), 20);
+        byte[] expectedStartKey = key;
+        byte[] expectedEndKey = ByteUtil.nextKey(key);
+        byte[] startKey = scan.getStartRow();
+        byte[] stopKey = scan.getStopRow();
+        assertTrue(Bytes.compareTo(expectedStartKey, startKey) == 0);
+        assertTrue(Bytes.compareTo(expectedEndKey, stopKey) == 0);
+    }
+
+    @Test
+    public void testSingleVariableFullPkSalted() throws SQLException {
+        PhoenixConnection pconn = DriverManager.getConnection(getUrl(), TEST_PROPERTIES).unwrap(PhoenixConnection.class);
+        pconn.createStatement().execute("CREATE TABLE t (k varchar primary key, v varchar)
SALT_BUCKETS=20");
+        String query = "select * from t where k='a'";
+        PhoenixPreparedStatement pstmt = new PhoenixPreparedStatement(pconn, query);
+        QueryPlan plan = pstmt.optimizeQuery();
+        Scan scan = plan.getContext().getScan();
+        Filter filter = scan.getFilter();
+        assertNull(filter);
+        byte[] key = new byte[2];
+        PDataType.VARCHAR.toBytes("a", key, 1);
+        key[0] = SaltingUtil.getSaltingByte(key, 1, 1, 20);
+        byte[] expectedStartKey = key;
+        byte[] expectedEndKey = ByteUtil.concat(key, ByteUtil.nextKey(QueryConstants.SEPARATOR_BYTE_ARRAY));
+        byte[] startKey = scan.getStartRow();
+        byte[] stopKey = scan.getStopRow();
+        assertTrue(Bytes.compareTo(expectedStartKey, startKey) == 0);
+        assertTrue(Bytes.compareTo(expectedEndKey, stopKey) == 0);
+    }
+
+    @Test
+    public void testMultiFixedFullPkSalted() throws SQLException {
+        PhoenixConnection pconn = DriverManager.getConnection(getUrl(), TEST_PROPERTIES).unwrap(PhoenixConnection.class);
+        pconn.createStatement().execute("CREATE TABLE t (k bigint not null primary key, v
varchar) SALT_BUCKETS=20");
+        String query = "select * from t where k in (1,3)";
+        PhoenixPreparedStatement pstmt = new PhoenixPreparedStatement(pconn, query);
+        QueryPlan plan = pstmt.optimizeQuery();
+        Scan scan = plan.getContext().getScan();
+        Filter filter = scan.getFilter();
+        byte[] key = new byte[PDataType.LONG.getByteSize() + 1];
+        PDataType.LONG.toBytes(1L, key, 1);
+        key[0] = SaltingUtil.getSaltingByte(key, 1, PDataType.LONG.getByteSize(), 20);
+        byte[] startKey1 = key;
+        byte[] endKey1 = ByteUtil.nextKey(key);
+        
+        key = new byte[PDataType.LONG.getByteSize() + 1];
+        PDataType.LONG.toBytes(3L, key, 1);
+        key[0] = SaltingUtil.getSaltingByte(key, 1, PDataType.LONG.getByteSize(), 20);
+        byte[] startKey2 = key;
+        byte[] endKey2 = ByteUtil.nextKey(key);
+        
+        byte[] startKey = scan.getStartRow();
+        byte[] stopKey = scan.getStopRow();
+        
+        // Due to salting byte, the 1 key may be after the 3 key
+        byte[] expectedStartKey;
+        byte[] expectedEndKey;
+        List<List<KeyRange>> expectedRanges = Collections.singletonList(
+                Arrays.asList(KeyRange.getKeyRange(startKey1, true, endKey1, false),
+                              KeyRange.getKeyRange(startKey2, true, endKey2, false)));
+        if (Bytes.compareTo(startKey1, startKey2) > 0) {
+            expectedStartKey = startKey2;
+            expectedEndKey = endKey1;
+            Collections.reverse(expectedRanges.get(0));
+        } else {
+            expectedStartKey = startKey1;
+            expectedEndKey = endKey2;
+        }
+        assertTrue(Bytes.compareTo(expectedStartKey, startKey) == 0);
+        assertTrue(Bytes.compareTo(expectedEndKey, stopKey) == 0);
+
+        assertNotNull(filter);
+        assertTrue(filter instanceof SkipScanFilter);
+        StatementContext context = plan.getContext();
+        ScanRanges scanRanges = context.getScanRanges();
+        List<List<KeyRange>> ranges = scanRanges.getRanges();
+        assertEquals(expectedRanges, ranges);
+    }
+
+    @Test
     public void testMultiColumnEqualFilter() throws SQLException {
         String tenantId = "000000000000001";
         String query = "select * from atable where organization_id='" + tenantId + "' and
a_string=b_string";


Mime
View raw message