cassandra-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From jbel...@apache.org
Subject [1/4] cassandra git commit: Revert "Stop accessing the partitioner directly via StorageService"
Date Fri, 31 Jul 2015 17:25:08 GMT
Repository: cassandra
Updated Branches:
  refs/heads/trunk 69f77cbdd -> a22ce89e8


http://git-wip-us.apache.org/repos/asf/cassandra/blob/a22ce89e/test/unit/org/apache/cassandra/db/ScrubTest.java
----------------------------------------------------------------------
diff --git a/test/unit/org/apache/cassandra/db/ScrubTest.java b/test/unit/org/apache/cassandra/db/ScrubTest.java
index 25b9cde..ee51a4d 100644
--- a/test/unit/org/apache/cassandra/db/ScrubTest.java
+++ b/test/unit/org/apache/cassandra/db/ScrubTest.java
@@ -22,6 +22,8 @@ import java.io.File;
 import java.io.IOError;
 import java.io.IOException;
 import java.io.RandomAccessFile;
+import java.lang.reflect.Field;
+import java.lang.reflect.Modifier;
 import java.nio.ByteBuffer;
 import java.util.*;
 import java.util.concurrent.ExecutionException;
@@ -42,9 +44,9 @@ import org.apache.cassandra.cql3.UntypedResultSet;
 import org.apache.cassandra.db.compaction.CompactionManager;
 import org.apache.cassandra.db.compaction.OperationType;
 import org.apache.cassandra.db.compaction.Scrubber;
+import org.apache.cassandra.db.index.SecondaryIndex;
 import org.apache.cassandra.db.lifecycle.LifecycleTransaction;
 import org.apache.cassandra.db.marshal.*;
-import org.apache.cassandra.db.marshal.UUIDType;
 import org.apache.cassandra.db.partitions.Partition;
 import org.apache.cassandra.db.partitions.PartitionUpdate;
 import org.apache.cassandra.dht.ByteOrderedPartitioner;
@@ -53,10 +55,14 @@ import org.apache.cassandra.exceptions.ConfigurationException;
 import org.apache.cassandra.exceptions.RequestExecutionException;
 import org.apache.cassandra.exceptions.WriteTimeoutException;
 import org.apache.cassandra.io.compress.CompressionMetadata;
-import org.apache.cassandra.io.sstable.*;
+import org.apache.cassandra.io.sstable.Component;
+import org.apache.cassandra.io.sstable.Descriptor;
+import org.apache.cassandra.io.sstable.SSTableRewriter;
+import org.apache.cassandra.io.sstable.SSTableTxnWriter;
 import org.apache.cassandra.io.sstable.format.SSTableReader;
 import org.apache.cassandra.io.util.FileUtils;
 import org.apache.cassandra.schema.KeyspaceParams;
+import org.apache.cassandra.service.StorageService;
 import org.apache.cassandra.utils.ByteBufferUtil;
 
 import static org.junit.Assert.*;
@@ -73,8 +79,6 @@ public class ScrubTest
     public static final String CF_UUID = "UUIDKeys";
     public static final String CF_INDEX1 = "Indexed1";
     public static final String CF_INDEX2 = "Indexed2";
-    public static final String CF_INDEX1_BYTEORDERED = "Indexed1_ordered";
-    public static final String CF_INDEX2_BYTEORDERED = "Indexed2_ordered";
 
     public static final String COL_INDEX = "birthdate";
     public static final String COL_NON_INDEX = "notbirthdate";
@@ -94,9 +98,7 @@ public class ScrubTest
                                                 .compressionParameters(SchemaLoader.getCompressionParameters(COMPRESSION_CHUNK_LENGTH)),
                                     SchemaLoader.standardCFMD(KEYSPACE, CF_UUID, 0, UUIDType.instance),
                                     SchemaLoader.keysIndexCFMD(KEYSPACE, CF_INDEX1, true),
-                                    SchemaLoader.compositeIndexCFMD(KEYSPACE, CF_INDEX2, true),
-                                    SchemaLoader.keysIndexCFMD(KEYSPACE, CF_INDEX1_BYTEORDERED, true).copy(ByteOrderedPartitioner.instance),
-                                    SchemaLoader.compositeIndexCFMD(KEYSPACE, CF_INDEX2_BYTEORDERED, true).copy(ByteOrderedPartitioner.instance));
+                                    SchemaLoader.compositeIndexCFMD(KEYSPACE, CF_INDEX2, true));
     }
 
     @Test
@@ -304,7 +306,7 @@ public class ScrubTest
     {
         // This test assumes ByteOrderPartitioner to create out-of-order SSTable
         IPartitioner oldPartitioner = DatabaseDescriptor.getPartitioner();
-        DatabaseDescriptor.setPartitionerUnsafe(new ByteOrderedPartitioner());
+        DatabaseDescriptor.setPartitioner(new ByteOrderedPartitioner());
 
         // Create out-of-order SSTable
         File tempDir = File.createTempFile("ScrubTest.testScrubOutOfOrder", "").getParentFile();
@@ -378,7 +380,7 @@ public class ScrubTest
         {
             FileUtils.deleteRecursive(tempDataDir);
             // reset partitioner
-            DatabaseDescriptor.setPartitionerUnsafe(oldPartitioner);
+            DatabaseDescriptor.setPartitioner(oldPartitioner);
         }
     }
 
@@ -392,9 +394,9 @@ public class ScrubTest
             CompressionMetadata compData = CompressionMetadata.create(sstable.getFilename());
 
             CompressionMetadata.Chunk chunk1 = compData.chunkFor(
-                    sstable.getPosition(PartitionPosition.ForKey.get(key1, sstable.getPartitioner()), SSTableReader.Operator.EQ).position);
+                    sstable.getPosition(PartitionPosition.ForKey.get(key1, sstable.partitioner), SSTableReader.Operator.EQ).position);
             CompressionMetadata.Chunk chunk2 = compData.chunkFor(
-                    sstable.getPosition(PartitionPosition.ForKey.get(key2, sstable.getPartitioner()), SSTableReader.Operator.EQ).position);
+                    sstable.getPosition(PartitionPosition.ForKey.get(key2, sstable.partitioner), SSTableReader.Operator.EQ).position);
 
             startPosition = Math.min(chunk1.offset, chunk2.offset);
             endPosition = Math.max(chunk1.offset + chunk1.length, chunk2.offset + chunk2.length);
@@ -403,8 +405,8 @@ public class ScrubTest
         }
         else
         { // overwrite with garbage from key1 to key2
-            long row0Start = sstable.getPosition(PartitionPosition.ForKey.get(key1, sstable.getPartitioner()), SSTableReader.Operator.EQ).position;
-            long row1Start = sstable.getPosition(PartitionPosition.ForKey.get(key2, sstable.getPartitioner()), SSTableReader.Operator.EQ).position;
+            long row0Start = sstable.getPosition(PartitionPosition.ForKey.get(key1, sstable.partitioner), SSTableReader.Operator.EQ).position;
+            long row1Start = sstable.getPosition(PartitionPosition.ForKey.get(key2, sstable.partitioner), SSTableReader.Operator.EQ).position;
             startPosition = Math.min(row0Start, row1Start);
             endPosition = Math.max(row0Start, row1Start);
         }
@@ -545,24 +547,28 @@ public class ScrubTest
     {
         //If the partitioner preserves the order then SecondaryIndex uses BytesType comparator,
         // otherwise it uses LocalByPartitionerType
-        testScrubIndex(CF_INDEX1_BYTEORDERED, COL_INDEX, false, true);
+        setKeyComparator(BytesType.instance);
+        testScrubIndex(CF_INDEX1, COL_INDEX, false, true);
     }
 
     @Test /* CASSANDRA-5174 */
     public void testScrubCompositeIndex_preserveOrder() throws IOException, ExecutionException, InterruptedException
     {
-        testScrubIndex(CF_INDEX2_BYTEORDERED, COL_INDEX, true, true);
+        setKeyComparator(BytesType.instance);
+        testScrubIndex(CF_INDEX2, COL_INDEX, true, true);
     }
 
     @Test /* CASSANDRA-5174 */
     public void testScrubKeysIndex() throws IOException, ExecutionException, InterruptedException
     {
+        setKeyComparator(new LocalByPartionerType(StorageService.getPartitioner()));
         testScrubIndex(CF_INDEX1, COL_INDEX, false, true);
     }
 
     @Test /* CASSANDRA-5174 */
     public void testScrubCompositeIndex() throws IOException, ExecutionException, InterruptedException
     {
+        setKeyComparator(new LocalByPartionerType(StorageService.getPartitioner()));
         testScrubIndex(CF_INDEX2, COL_INDEX, true, true);
     }
 
@@ -584,6 +590,33 @@ public class ScrubTest
         testScrubIndex(CF_INDEX1, COL_INDEX, false, true, true);
     }
 
+    /** The SecondaryIndex class is used for custom indexes so to avoid
+     * making a public final field into a private field with getters
+     * and setters, we resort to this hack in order to test it properly
+     * since it can have two values which influence the scrubbing behavior.
+     * @param comparator - the key comparator we want to test
+     */
+    private void setKeyComparator(AbstractType<?> comparator)
+    {
+        try
+        {
+            Field keyComparator = SecondaryIndex.class.getDeclaredField("keyComparator");
+            keyComparator.setAccessible(true);
+            int modifiers = keyComparator.getModifiers();
+            Field modifierField = keyComparator.getClass().getDeclaredField("modifiers");
+            modifiers = modifiers & ~Modifier.FINAL;
+            modifierField.setAccessible(true);
+            modifierField.setInt(keyComparator, modifiers);
+
+            keyComparator.set(null, comparator);
+        }
+        catch (Exception ex)
+        {
+            fail("Failed to change key comparator in secondary index : " + ex.getMessage());
+            ex.printStackTrace();
+        }
+    }
+
     private void testScrubIndex(String cfName, String colName, boolean composite, boolean ... scrubs)
             throws IOException, ExecutionException, InterruptedException
     {

http://git-wip-us.apache.org/repos/asf/cassandra/blob/a22ce89e/test/unit/org/apache/cassandra/db/VerifyTest.java
----------------------------------------------------------------------
diff --git a/test/unit/org/apache/cassandra/db/VerifyTest.java b/test/unit/org/apache/cassandra/db/VerifyTest.java
index f460cb5..2e659af 100644
--- a/test/unit/org/apache/cassandra/db/VerifyTest.java
+++ b/test/unit/org/apache/cassandra/db/VerifyTest.java
@@ -303,8 +303,8 @@ public class VerifyTest
         SSTableReader sstable = cfs.getLiveSSTables().iterator().next();
 
         // overwrite one row with garbage
-        long row0Start = sstable.getPosition(PartitionPosition.ForKey.get(ByteBufferUtil.bytes("0"), cfs.getPartitioner()), SSTableReader.Operator.EQ).position;
-        long row1Start = sstable.getPosition(PartitionPosition.ForKey.get(ByteBufferUtil.bytes("1"), cfs.getPartitioner()), SSTableReader.Operator.EQ).position;
+        long row0Start = sstable.getPosition(PartitionPosition.ForKey.get(ByteBufferUtil.bytes("0"), sstable.partitioner), SSTableReader.Operator.EQ).position;
+        long row1Start = sstable.getPosition(PartitionPosition.ForKey.get(ByteBufferUtil.bytes("1"), sstable.partitioner), SSTableReader.Operator.EQ).position;
         long startPosition = row0Start < row1Start ? row0Start : row1Start;
         long endPosition = row0Start < row1Start ? row1Start : row0Start;
 

http://git-wip-us.apache.org/repos/asf/cassandra/blob/a22ce89e/test/unit/org/apache/cassandra/db/compaction/TTLExpiryTest.java
----------------------------------------------------------------------
diff --git a/test/unit/org/apache/cassandra/db/compaction/TTLExpiryTest.java b/test/unit/org/apache/cassandra/db/compaction/TTLExpiryTest.java
index 360c663..1fc0c01 100644
--- a/test/unit/org/apache/cassandra/db/compaction/TTLExpiryTest.java
+++ b/test/unit/org/apache/cassandra/db/compaction/TTLExpiryTest.java
@@ -215,7 +215,7 @@ public class TTLExpiryTest
         cfs.enableAutoCompaction(true);
         assertEquals(1, cfs.getLiveSSTables().size());
         SSTableReader sstable = cfs.getLiveSSTables().iterator().next();
-        ISSTableScanner scanner = sstable.getScanner(ColumnFilter.all(sstable.metadata), DataRange.allData(cfs.getPartitioner()), false);
+        ISSTableScanner scanner = sstable.getScanner(ColumnFilter.all(sstable.metadata), DataRange.allData(sstable.partitioner), false);
         assertTrue(scanner.hasNext());
         while(scanner.hasNext())
         {

http://git-wip-us.apache.org/repos/asf/cassandra/blob/a22ce89e/test/unit/org/apache/cassandra/db/lifecycle/RealTransactionsTest.java
----------------------------------------------------------------------
diff --git a/test/unit/org/apache/cassandra/db/lifecycle/RealTransactionsTest.java b/test/unit/org/apache/cassandra/db/lifecycle/RealTransactionsTest.java
index 44f4d30..2153567 100644
--- a/test/unit/org/apache/cassandra/db/lifecycle/RealTransactionsTest.java
+++ b/test/unit/org/apache/cassandra/db/lifecycle/RealTransactionsTest.java
@@ -145,6 +145,7 @@ public class RealTransactionsTest extends SchemaLoader
         String query = "INSERT INTO %s.%s (key, name, val) VALUES (?, ?, ?)";
 
         try (CQLSSTableWriter writer = CQLSSTableWriter.builder()
+                                                       .withPartitioner(StorageService.getPartitioner())
                                                        .inDirectory(cfs.directories.getDirectoryForNewSSTables())
                                                        .forTable(String.format(schema, cfs.keyspace.getName(), cfs.name))
                                                        .using(String.format(query, cfs.keyspace.getName(), cfs.name))
@@ -177,6 +178,7 @@ public class RealTransactionsTest extends SchemaLoader
                                                            0,
                                                            0,
                                                            0,
+                                                           DatabaseDescriptor.getPartitioner(),
                                                            SerializationHeader.make(cfs.metadata, txn.originals()),
                                                            txn));
                 while (ci.hasNext())

http://git-wip-us.apache.org/repos/asf/cassandra/blob/a22ce89e/test/unit/org/apache/cassandra/db/lifecycle/TransactionLogsTest.java
----------------------------------------------------------------------
diff --git a/test/unit/org/apache/cassandra/db/lifecycle/TransactionLogsTest.java b/test/unit/org/apache/cassandra/db/lifecycle/TransactionLogsTest.java
index 4339877..4105800 100644
--- a/test/unit/org/apache/cassandra/db/lifecycle/TransactionLogsTest.java
+++ b/test/unit/org/apache/cassandra/db/lifecycle/TransactionLogsTest.java
@@ -40,6 +40,7 @@ import org.apache.cassandra.db.ColumnFamilyStore;
 import org.apache.cassandra.db.Directories;
 import org.apache.cassandra.db.SerializationHeader;
 import org.apache.cassandra.db.compaction.*;
+import org.apache.cassandra.dht.Murmur3Partitioner;
 import org.apache.cassandra.io.sstable.*;
 import org.apache.cassandra.io.sstable.format.SSTableReader;
 import org.apache.cassandra.io.sstable.metadata.MetadataCollector;
@@ -522,11 +523,12 @@ public class TransactionLogsTest extends AbstractTransactionalTest
 
         SerializationHeader header = SerializationHeader.make(cfs.metadata, Collections.EMPTY_LIST);
         StatsMetadata metadata = (StatsMetadata) new MetadataCollector(cfs.metadata.comparator)
-                                                 .finalizeMetadata(cfs.metadata.partitioner.getClass().getCanonicalName(), 0.01f, -1, header)
+                                                 .finalizeMetadata(Murmur3Partitioner.instance.getClass().getCanonicalName(), 0.01f, -1, header)
                                                  .get(MetadataType.STATS);
         SSTableReader reader = SSTableReader.internalOpen(descriptor,
                                                           components,
                                                           cfs.metadata,
+                                                          Murmur3Partitioner.instance,
                                                           dFile,
                                                           iFile,
                                                           MockSchema.indexSummary.sharedCopy(),

http://git-wip-us.apache.org/repos/asf/cassandra/blob/a22ce89e/test/unit/org/apache/cassandra/dht/BootStrapperTest.java
----------------------------------------------------------------------
diff --git a/test/unit/org/apache/cassandra/dht/BootStrapperTest.java b/test/unit/org/apache/cassandra/dht/BootStrapperTest.java
index 9b1fa01..a1ea0e4 100644
--- a/test/unit/org/apache/cassandra/dht/BootStrapperTest.java
+++ b/test/unit/org/apache/cassandra/dht/BootStrapperTest.java
@@ -58,7 +58,8 @@ public class BootStrapperTest
     @BeforeClass
     public static void setup() throws ConfigurationException
     {
-        oldPartitioner = StorageService.instance.setPartitionerUnsafe(Murmur3Partitioner.instance);
+        oldPartitioner = DatabaseDescriptor.getPartitioner();
+        DatabaseDescriptor.setPartitioner(Murmur3Partitioner.instance);
         SchemaLoader.startGossiper();
         SchemaLoader.prepareServer();
         SchemaLoader.schemaDefinition("BootStrapperTest");
@@ -67,7 +68,7 @@ public class BootStrapperTest
     @AfterClass
     public static void tearDown()
     {
-        DatabaseDescriptor.setPartitionerUnsafe(oldPartitioner);
+        DatabaseDescriptor.setPartitioner(oldPartitioner);
     }
 
     @Test
@@ -86,12 +87,12 @@ public class BootStrapperTest
     private RangeStreamer testSourceTargetComputation(String keyspaceName, int numOldNodes, int replicationFactor) throws UnknownHostException
     {
         StorageService ss = StorageService.instance;
-        TokenMetadata tmd = ss.getTokenMetadata();
 
         generateFakeEndpoints(numOldNodes);
-        Token myToken = tmd.partitioner.getRandomToken();
+        Token myToken = StorageService.getPartitioner().getRandomToken();
         InetAddress myEndpoint = InetAddress.getByName("127.0.0.1");
 
+        TokenMetadata tmd = ss.getTokenMetadata();
         assertEquals(numOldNodes, tmd.sortedTokens().size());
         RangeStreamer s = new RangeStreamer(tmd, null, myEndpoint, "Bootstrap", true, DatabaseDescriptor.getEndpointSnitch(), new StreamStateStore());
         IFailureDetector mockFailureDetector = new IFailureDetector()
@@ -135,7 +136,7 @@ public class BootStrapperTest
     private void generateFakeEndpoints(TokenMetadata tmd, int numOldNodes, int numVNodes) throws UnknownHostException
     {
         tmd.clearUnsafe();
-        IPartitioner p = tmd.partitioner;
+        IPartitioner p = StorageService.getPartitioner();
 
         for (int i = 1; i <= numOldNodes; i++)
         {

http://git-wip-us.apache.org/repos/asf/cassandra/blob/a22ce89e/test/unit/org/apache/cassandra/dht/KeyCollisionTest.java
----------------------------------------------------------------------
diff --git a/test/unit/org/apache/cassandra/dht/KeyCollisionTest.java b/test/unit/org/apache/cassandra/dht/KeyCollisionTest.java
index ade6ec1..3cda1d3 100644
--- a/test/unit/org/apache/cassandra/dht/KeyCollisionTest.java
+++ b/test/unit/org/apache/cassandra/dht/KeyCollisionTest.java
@@ -18,7 +18,12 @@
 package org.apache.cassandra.dht;
 
 import java.math.BigInteger;
+import java.nio.ByteBuffer;
+import java.util.ArrayList;
+import java.util.HashMap;
 import java.util.List;
+import java.util.Map;
+import java.util.Random;
 
 import org.junit.AfterClass;
 import org.junit.BeforeClass;
@@ -26,17 +31,23 @@ import org.junit.Test;
 
 import org.apache.cassandra.SchemaLoader;
 import org.apache.cassandra.Util;
+import org.apache.cassandra.config.CFMetaData;
 import org.apache.cassandra.config.DatabaseDescriptor;
 import org.apache.cassandra.config.Schema;
+import org.apache.cassandra.db.BufferDecoratedKey;
 import org.apache.cassandra.db.ColumnFamilyStore;
+import org.apache.cassandra.db.DecoratedKey;
 import org.apache.cassandra.db.Keyspace;
 import org.apache.cassandra.db.RowUpdateBuilder;
+import org.apache.cassandra.db.marshal.AbstractType;
+import org.apache.cassandra.db.marshal.IntegerType;
 import org.apache.cassandra.db.partitions.*;
 import org.apache.cassandra.exceptions.ConfigurationException;
 import org.apache.cassandra.schema.KeyspaceParams;
 import org.apache.cassandra.service.StorageService;
 import org.apache.cassandra.utils.ByteBufferUtil;
 import org.apache.cassandra.utils.FBUtilities;
+import org.apache.cassandra.utils.Pair;
 
 /**
  * Test cases where multiple keys collides, ie have the same token.
@@ -54,7 +65,8 @@ public class KeyCollisionTest
     @BeforeClass
     public static void defineSchema() throws ConfigurationException
     {
-        oldPartitioner = StorageService.instance.setPartitionerUnsafe(LengthPartitioner.instance);
+        oldPartitioner = DatabaseDescriptor.getPartitioner();
+        DatabaseDescriptor.setPartitioner(LengthPartitioner.instance);
         SchemaLoader.prepareServer();
         SchemaLoader.createKeyspace(KEYSPACE1,
                                     KeyspaceParams.simple(1),
@@ -64,7 +76,7 @@ public class KeyCollisionTest
     @AfterClass
     public static void tearDown()
     {
-        DatabaseDescriptor.setPartitionerUnsafe(oldPartitioner);
+        DatabaseDescriptor.setPartitioner(oldPartitioner);
     }
 
     @Test
@@ -124,4 +136,122 @@ public class KeyCollisionTest
             return 0;
         }
     }
+
+    public static class LengthPartitioner implements IPartitioner
+    {
+        public static final BigInteger ZERO = new BigInteger("0");
+        public static final BigIntegerToken MINIMUM = new BigIntegerToken("-1");
+
+        public static LengthPartitioner instance = new LengthPartitioner();
+
+        public DecoratedKey decorateKey(ByteBuffer key)
+        {
+            return new BufferDecoratedKey(getToken(key), key);
+        }
+
+        public BigIntegerToken midpoint(Token ltoken, Token rtoken)
+        {
+            // the symbolic MINIMUM token should act as ZERO: the empty bit array
+            BigInteger left = ltoken.equals(MINIMUM) ? ZERO : ((BigIntegerToken)ltoken).token;
+            BigInteger right = rtoken.equals(MINIMUM) ? ZERO : ((BigIntegerToken)rtoken).token;
+            Pair<BigInteger,Boolean> midpair = FBUtilities.midpoint(left, right, 127);
+            // discard the remainder
+            return new BigIntegerToken(midpair.left);
+        }
+
+        public BigIntegerToken getMinimumToken()
+        {
+            return MINIMUM;
+        }
+
+        public BigIntegerToken getRandomToken()
+        {
+            return new BigIntegerToken(BigInteger.valueOf(new Random().nextInt(15)));
+        }
+
+        private final Token.TokenFactory tokenFactory = new Token.TokenFactory() {
+            public ByteBuffer toByteArray(Token token)
+            {
+                BigIntegerToken bigIntegerToken = (BigIntegerToken) token;
+                return ByteBuffer.wrap(bigIntegerToken.token.toByteArray());
+            }
+
+            public Token fromByteArray(ByteBuffer bytes)
+            {
+                return new BigIntegerToken(new BigInteger(ByteBufferUtil.getArray(bytes)));
+            }
+
+            public String toString(Token token)
+            {
+                BigIntegerToken bigIntegerToken = (BigIntegerToken) token;
+                return bigIntegerToken.token.toString();
+            }
+
+            public Token fromString(String string)
+            {
+                return new BigIntegerToken(new BigInteger(string));
+            }
+
+            public void validate(String token) {}
+        };
+
+        public Token.TokenFactory getTokenFactory()
+        {
+            return tokenFactory;
+        }
+
+        public boolean preservesOrder()
+        {
+            return false;
+        }
+
+        public BigIntegerToken getToken(ByteBuffer key)
+        {
+            if (key.remaining() == 0)
+                return MINIMUM;
+            return new BigIntegerToken(BigInteger.valueOf(key.remaining()));
+        }
+
+        public Map<Token, Float> describeOwnership(List<Token> sortedTokens)
+        {
+            // allTokens will contain the count and be returned, sorted_ranges is shorthand for token<->token math.
+            Map<Token, Float> allTokens = new HashMap<Token, Float>();
+            List<Range<Token>> sortedRanges = new ArrayList<Range<Token>>();
+
+            // this initializes the counts to 0 and calcs the ranges in order.
+            Token lastToken = sortedTokens.get(sortedTokens.size() - 1);
+            for (Token node : sortedTokens)
+            {
+                allTokens.put(node, new Float(0.0));
+                sortedRanges.add(new Range<Token>(lastToken, node));
+                lastToken = node;
+            }
+
+            for (String ks : Schema.instance.getKeyspaces())
+            {
+                for (CFMetaData cfmd : Schema.instance.getTables(ks))
+                {
+                    for (Range<Token> r : sortedRanges)
+                    {
+                        // Looping over every KS:CF:Range, get the splits size and add it to the count
+                        allTokens.put(r.right, allTokens.get(r.right) + StorageService.instance.getSplits(ks, cfmd.cfName, r, 1).size());
+                    }
+                }
+            }
+
+            // Sum every count up and divide count/total for the fractional ownership.
+            Float total = new Float(0.0);
+            for (Float f : allTokens.values())
+                total += f;
+            for (Map.Entry<Token, Float> row : allTokens.entrySet())
+                allTokens.put(row.getKey(), row.getValue() / total);
+
+            return allTokens;
+        }
+
+        public AbstractType<?> getTokenValidator()
+        {
+            return IntegerType.instance;
+        }
+    }
 }

http://git-wip-us.apache.org/repos/asf/cassandra/blob/a22ce89e/test/unit/org/apache/cassandra/dht/LengthPartitioner.java
----------------------------------------------------------------------
diff --git a/test/unit/org/apache/cassandra/dht/LengthPartitioner.java b/test/unit/org/apache/cassandra/dht/LengthPartitioner.java
deleted file mode 100644
index 40a6774..0000000
--- a/test/unit/org/apache/cassandra/dht/LengthPartitioner.java
+++ /dev/null
@@ -1,158 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.cassandra.dht;
-
-import java.math.BigInteger;
-import java.nio.ByteBuffer;
-import java.util.*;
-
-import org.apache.cassandra.config.CFMetaData;
-import org.apache.cassandra.config.Schema;
-import org.apache.cassandra.db.BufferDecoratedKey;
-import org.apache.cassandra.db.DecoratedKey;
-import org.apache.cassandra.db.marshal.AbstractType;
-import org.apache.cassandra.db.marshal.IntegerType;
-import org.apache.cassandra.db.marshal.PartitionerDefinedOrder;
-import org.apache.cassandra.dht.KeyCollisionTest.BigIntegerToken;
-import org.apache.cassandra.service.StorageService;
-import org.apache.cassandra.utils.ByteBufferUtil;
-import org.apache.cassandra.utils.FBUtilities;
-import org.apache.cassandra.utils.Pair;
-
-public class LengthPartitioner implements IPartitioner
-{
-    public static final BigInteger ZERO = new BigInteger("0");
-    public static final BigIntegerToken MINIMUM = new BigIntegerToken("-1");
-
-    public static LengthPartitioner instance = new LengthPartitioner();
-
-    public DecoratedKey decorateKey(ByteBuffer key)
-    {
-        return new BufferDecoratedKey(getToken(key), key);
-    }
-
-    public BigIntegerToken midpoint(Token ltoken, Token rtoken)
-    {
-        // the symbolic MINIMUM token should act as ZERO: the empty bit array
-        BigInteger left = ltoken.equals(MINIMUM) ? ZERO : ((BigIntegerToken)ltoken).token;
-        BigInteger right = rtoken.equals(MINIMUM) ? ZERO : ((BigIntegerToken)rtoken).token;
-        Pair<BigInteger,Boolean> midpair = FBUtilities.midpoint(left, right, 127);
-        // discard the remainder
-        return new BigIntegerToken(midpair.left);
-    }
-
-    public BigIntegerToken getMinimumToken()
-    {
-        return MINIMUM;
-    }
-
-    public BigIntegerToken getRandomToken()
-    {
-        return new BigIntegerToken(BigInteger.valueOf(new Random().nextInt(15)));
-    }
-
-    private final Token.TokenFactory tokenFactory = new Token.TokenFactory() {
-        public ByteBuffer toByteArray(Token token)
-        {
-            BigIntegerToken bigIntegerToken = (BigIntegerToken) token;
-            return ByteBuffer.wrap(bigIntegerToken.token.toByteArray());
-        }
-
-        public Token fromByteArray(ByteBuffer bytes)
-        {
-            return new BigIntegerToken(new BigInteger(ByteBufferUtil.getArray(bytes)));
-        }
-
-        public String toString(Token token)
-        {
-            BigIntegerToken bigIntegerToken = (BigIntegerToken) token;
-            return bigIntegerToken.token.toString();
-        }
-
-        public Token fromString(String string)
-        {
-            return new BigIntegerToken(new BigInteger(string));
-        }
-
-        public void validate(String token) {}
-    };
-
-    public Token.TokenFactory getTokenFactory()
-    {
-        return tokenFactory;
-    }
-
-    public boolean preservesOrder()
-    {
-        return false;
-    }
-
-    public BigIntegerToken getToken(ByteBuffer key)
-    {
-        if (key.remaining() == 0)
-            return MINIMUM;
-        return new BigIntegerToken(BigInteger.valueOf(key.remaining()));
-    }
-
-    public Map<Token, Float> describeOwnership(List<Token> sortedTokens)
-    {
-        // allTokens will contain the count and be returned, sorted_ranges is shorthand for token<->token math.
-        Map<Token, Float> allTokens = new HashMap<Token, Float>();
-        List<Range<Token>> sortedRanges = new ArrayList<Range<Token>>();
-
-        // this initializes the counts to 0 and calcs the ranges in order.
-        Token lastToken = sortedTokens.get(sortedTokens.size() - 1);
-        for (Token node : sortedTokens)
-        {
-            allTokens.put(node, new Float(0.0));
-            sortedRanges.add(new Range<Token>(lastToken, node));
-            lastToken = node;
-        }
-
-        for (String ks : Schema.instance.getKeyspaces())
-        {
-            for (CFMetaData cfmd : Schema.instance.getTables(ks))
-            {
-                for (Range<Token> r : sortedRanges)
-                {
-                    // Looping over every KS:CF:Range, get the splits size and add it to the count
-                    allTokens.put(r.right, allTokens.get(r.right) + StorageService.instance.getSplits(ks, cfmd.cfName, r, 1).size());
-                }
-            }
-        }
-
-        // Sum every count up and divide count/total for the fractional ownership.
-        Float total = new Float(0.0);
-        for (Float f : allTokens.values())
-            total += f;
-        for (Map.Entry<Token, Float> row : allTokens.entrySet())
-            allTokens.put(row.getKey(), row.getValue() / total);
-
-        return allTokens;
-    }
-
-    public AbstractType<?> getTokenValidator()
-    {
-        return IntegerType.instance;
-    }
-
-    public AbstractType<?> partitionOrdering()
-    {
-        return new PartitionerDefinedOrder(this);
-    }
-}
\ No newline at end of file

http://git-wip-us.apache.org/repos/asf/cassandra/blob/a22ce89e/test/unit/org/apache/cassandra/dht/PartitionerTestCase.java
----------------------------------------------------------------------
diff --git a/test/unit/org/apache/cassandra/dht/PartitionerTestCase.java b/test/unit/org/apache/cassandra/dht/PartitionerTestCase.java
index cb892a7..887c481 100644
--- a/test/unit/org/apache/cassandra/dht/PartitionerTestCase.java
+++ b/test/unit/org/apache/cassandra/dht/PartitionerTestCase.java
@@ -129,7 +129,7 @@ public abstract class PartitionerTestCase
     {
         // This call initializes StorageService, needed to populate the keyspaces.
         // TODO: This points to potential problems in the initialization sequence. Should be solved by CASSANDRA-7837.
-        StorageService.instance.getKeyspaces();
+        StorageService.getPartitioner();
 
         try
         {

http://git-wip-us.apache.org/repos/asf/cassandra/blob/a22ce89e/test/unit/org/apache/cassandra/gms/SerializationsTest.java
----------------------------------------------------------------------
diff --git a/test/unit/org/apache/cassandra/gms/SerializationsTest.java b/test/unit/org/apache/cassandra/gms/SerializationsTest.java
index 8b7ad1f..bab1ace 100644
--- a/test/unit/org/apache/cassandra/gms/SerializationsTest.java
+++ b/test/unit/org/apache/cassandra/gms/SerializationsTest.java
@@ -19,14 +19,11 @@
 package org.apache.cassandra.gms;
 
 import org.apache.cassandra.AbstractSerializationsTester;
-import org.apache.cassandra.config.DatabaseDescriptor;
-import org.apache.cassandra.dht.IPartitioner;
 import org.apache.cassandra.dht.Token;
 import org.apache.cassandra.io.util.DataInputPlus.DataInputStreamPlus;
 import org.apache.cassandra.io.util.DataOutputStreamPlus;
 import org.apache.cassandra.service.StorageService;
 import org.apache.cassandra.utils.FBUtilities;
-
 import org.junit.Test;
 
 import java.io.IOException;
@@ -76,9 +73,7 @@ public class SerializationsTest extends AbstractSerializationsTester
         states.put(InetAddress.getByName("127.0.0.2"), Statics.EndpointSt);
         GossipDigestAck ack = new GossipDigestAck(Statics.Digests, states);
         GossipDigestAck2 ack2 = new GossipDigestAck2(states);
-        GossipDigestSyn syn = new GossipDigestSyn("Not a real cluster name",
-                                                  StorageService.instance.getTokenMetadata().partitioner.getClass().getCanonicalName(),
-                                                  Statics.Digests);
+        GossipDigestSyn syn = new GossipDigestSyn("Not a real cluster name", StorageService.getPartitioner().getClass().getCanonicalName(), Statics.Digests);
 
         DataOutputStreamPlus out = getOutput("gms.Gossip.bin");
         for (GossipDigest gd : Statics.Digests)
@@ -116,10 +111,9 @@ public class SerializationsTest extends AbstractSerializationsTester
     {
         private static HeartBeatState HeartbeatSt = new HeartBeatState(101, 201);
         private static EndpointState EndpointSt = new EndpointState(HeartbeatSt);
-        private static IPartitioner partitioner = StorageService.instance.getTokenMetadata().partitioner;
-        private static VersionedValue.VersionedValueFactory vvFact = new VersionedValue.VersionedValueFactory(partitioner);
+        private static VersionedValue.VersionedValueFactory vvFact = new VersionedValue.VersionedValueFactory(StorageService.getPartitioner());
         private static VersionedValue vv0 = vvFact.load(23d);
-        private static VersionedValue vv1 = vvFact.bootstrapping(Collections.<Token>singleton(partitioner.getRandomToken()));
+        private static VersionedValue vv1 = vvFact.bootstrapping(Collections.<Token>singleton(StorageService.getPartitioner().getRandomToken()));
         private static List<GossipDigest> Digests = new ArrayList<GossipDigest>();
 
         {

http://git-wip-us.apache.org/repos/asf/cassandra/blob/a22ce89e/test/unit/org/apache/cassandra/io/sstable/BigTableWriterTest.java
----------------------------------------------------------------------
diff --git a/test/unit/org/apache/cassandra/io/sstable/BigTableWriterTest.java b/test/unit/org/apache/cassandra/io/sstable/BigTableWriterTest.java
index 357298e..08de62f 100644
--- a/test/unit/org/apache/cassandra/io/sstable/BigTableWriterTest.java
+++ b/test/unit/org/apache/cassandra/io/sstable/BigTableWriterTest.java
@@ -31,6 +31,7 @@ import org.apache.cassandra.db.Keyspace;
 import org.apache.cassandra.db.SerializationHeader;
 import org.apache.cassandra.db.marshal.*;
 import org.apache.cassandra.db.rows.EncodingStats;
+import org.apache.cassandra.io.sstable.format.SSTableWriter;
 import org.apache.cassandra.schema.KeyspaceParams;
 import org.apache.cassandra.utils.concurrent.AbstractTransactionalTest;
 

http://git-wip-us.apache.org/repos/asf/cassandra/blob/a22ce89e/test/unit/org/apache/cassandra/io/sstable/CQLSSTableWriterTest.java
----------------------------------------------------------------------
diff --git a/test/unit/org/apache/cassandra/io/sstable/CQLSSTableWriterTest.java b/test/unit/org/apache/cassandra/io/sstable/CQLSSTableWriterTest.java
index 2e9768e..7bc21ee 100644
--- a/test/unit/org/apache/cassandra/io/sstable/CQLSSTableWriterTest.java
+++ b/test/unit/org/apache/cassandra/io/sstable/CQLSSTableWriterTest.java
@@ -26,20 +26,19 @@ import java.util.UUID;
 
 import com.google.common.collect.ImmutableMap;
 import com.google.common.io.Files;
-
 import org.junit.AfterClass;
 import org.junit.BeforeClass;
 import org.junit.Test;
 
 import org.apache.cassandra.SchemaLoader;
-import org.apache.cassandra.Util;
 import org.apache.cassandra.config.CFMetaData;
 import org.apache.cassandra.config.Config;
 import org.apache.cassandra.config.Schema;
 import org.apache.cassandra.cql3.QueryProcessor;
 import org.apache.cassandra.cql3.UntypedResultSet;
 import org.apache.cassandra.db.Keyspace;
-import org.apache.cassandra.dht.*;
+import org.apache.cassandra.dht.Range;
+import org.apache.cassandra.dht.Token;
 import org.apache.cassandra.service.StorageService;
 import org.apache.cassandra.utils.FBUtilities;
 import org.apache.cassandra.utils.OutputHandler;
@@ -66,79 +65,78 @@ public class CQLSSTableWriterTest
     @Test
     public void testUnsortedWriter() throws Exception
     {
-        try (AutoCloseable switcher = Util.switchPartitioner(ByteOrderedPartitioner.instance))
-        {
-            String KS = "cql_keyspace";
-            String TABLE = "table1";
-
-            File tempdir = Files.createTempDir();
-            File dataDir = new File(tempdir.getAbsolutePath() + File.separator + KS + File.separator + TABLE);
-            assert dataDir.mkdirs();
-
-            String schema = "CREATE TABLE cql_keyspace.table1 ("
-                          + "  k int PRIMARY KEY,"
-                          + "  v1 text,"
-                          + "  v2 int"
-                          + ")";
-            String insert = "INSERT INTO cql_keyspace.table1 (k, v1, v2) VALUES (?, ?, ?)";
-            CQLSSTableWriter writer = CQLSSTableWriter.builder()
-                                                      .inDirectory(dataDir)
-                                                      .forTable(schema)
-                                                      .using(insert).build();
+        String KS = "cql_keyspace";
+        String TABLE = "table1";
 
-            writer.addRow(0, "test1", 24);
-            writer.addRow(1, "test2", 44);
-            writer.addRow(2, "test3", 42);
-            writer.addRow(ImmutableMap.<String, Object>of("k", 3, "v2", 12));
+        File tempdir = Files.createTempDir();
+        File dataDir = new File(tempdir.getAbsolutePath() + File.separator + KS + File.separator + TABLE);
+        assert dataDir.mkdirs();
 
-            writer.close();
+        String schema = "CREATE TABLE cql_keyspace.table1 ("
+                      + "  k int PRIMARY KEY,"
+                      + "  v1 text,"
+                      + "  v2 int"
+                      + ")";
+        String insert = "INSERT INTO cql_keyspace.table1 (k, v1, v2) VALUES (?, ?, ?)";
+        CQLSSTableWriter writer = CQLSSTableWriter.builder()
+                                                  .inDirectory(dataDir)
+                                                  .forTable(schema)
+                                                  .withPartitioner(StorageService.getPartitioner())
+                                                  .using(insert).build();
 
-            SSTableLoader loader = new SSTableLoader(dataDir, new SSTableLoader.Client()
-            {
-                private String keyspace;
+        writer.addRow(0, "test1", 24);
+        writer.addRow(1, "test2", 44);
+        writer.addRow(2, "test3", 42);
+        writer.addRow(ImmutableMap.<String, Object>of("k", 3, "v2", 12));
 
-                public void init(String keyspace)
-                {
-                    this.keyspace = keyspace;
-                    for (Range<Token> range : StorageService.instance.getLocalRanges("cql_keyspace"))
-                        addRangeForEndpoint(range, FBUtilities.getBroadcastAddress());
-                }
+        writer.close();
 
-                public CFMetaData getTableMetadata(String cfName)
-                {
-                    return Schema.instance.getCFMetaData(keyspace, cfName);
-                }
-            }, new OutputHandler.SystemOutput(false, false));
+        SSTableLoader loader = new SSTableLoader(dataDir, new SSTableLoader.Client()
+        {
+            private String keyspace;
 
-            loader.stream().get();
+            public void init(String keyspace)
+            {
+                this.keyspace = keyspace;
+                for (Range<Token> range : StorageService.instance.getLocalRanges("cql_keyspace"))
+                    addRangeForEndpoint(range, FBUtilities.getBroadcastAddress());
+                setPartitioner(StorageService.getPartitioner());
+            }
 
-            UntypedResultSet rs = QueryProcessor.executeInternal("SELECT * FROM cql_keyspace.table1;");
-            assertEquals(4, rs.size());
+            public CFMetaData getTableMetadata(String cfName)
+            {
+                return Schema.instance.getCFMetaData(keyspace, cfName);
+            }
+        }, new OutputHandler.SystemOutput(false, false));
 
-            Iterator<UntypedResultSet.Row> iter = rs.iterator();
-            UntypedResultSet.Row row;
+        loader.stream().get();
 
-            row = iter.next();
-            assertEquals(0, row.getInt("k"));
-            assertEquals("test1", row.getString("v1"));
-            assertEquals(24, row.getInt("v2"));
+        UntypedResultSet rs = QueryProcessor.executeInternal("SELECT * FROM cql_keyspace.table1;");
+        assertEquals(4, rs.size());
 
-            row = iter.next();
-            assertEquals(1, row.getInt("k"));
-            assertEquals("test2", row.getString("v1"));
-            //assertFalse(row.has("v2"));
-            assertEquals(44, row.getInt("v2"));
+        Iterator<UntypedResultSet.Row> iter = rs.iterator();
+        UntypedResultSet.Row row;
 
-            row = iter.next();
-            assertEquals(2, row.getInt("k"));
-            assertEquals("test3", row.getString("v1"));
-            assertEquals(42, row.getInt("v2"));
+        row = iter.next();
+        assertEquals(0, row.getInt("k"));
+        assertEquals("test1", row.getString("v1"));
+        assertEquals(24, row.getInt("v2"));
 
-            row = iter.next();
-            assertEquals(3, row.getInt("k"));
-            assertEquals(null, row.getBytes("v1")); // Using getBytes because we know it won't NPE
-            assertEquals(12, row.getInt("v2"));
-        }
+        row = iter.next();
+        assertEquals(1, row.getInt("k"));
+        assertEquals("test2", row.getString("v1"));
+        //assertFalse(row.has("v2"));
+        assertEquals(44, row.getInt("v2"));
+
+        row = iter.next();
+        assertEquals(2, row.getInt("k"));
+        assertEquals("test3", row.getString("v1"));
+        assertEquals(42, row.getInt("v2"));
+
+        row = iter.next();
+        assertEquals(3, row.getInt("k"));
+        assertEquals(null, row.getBytes("v1")); // Using getBytes because we know it won't NPE
+        assertEquals(12, row.getInt("v2"));
     }
 
     @Test
@@ -161,6 +159,7 @@ public class CQLSSTableWriterTest
         CQLSSTableWriter writer = CQLSSTableWriter.builder()
                                                   .inDirectory(dataDir)
                                                   .forTable(schema)
+                                                  .withPartitioner(StorageService.getPartitioner())
                                                   .using(insert)
                                                   .withBufferSizeInMB(1)
                                                   .build();
@@ -196,6 +195,7 @@ public class CQLSSTableWriterTest
         CQLSSTableWriter writer = CQLSSTableWriter.builder()
                                                   .inDirectory(tempdir)
                                                   .forTable(schema)
+                                                  .withPartitioner(StorageService.instance.getPartitioner())
                                                   .using(insert)
                                                   .withBufferSizeInMB(1)
                                                   .build();
@@ -234,6 +234,7 @@ public class CQLSSTableWriterTest
             CQLSSTableWriter writer = CQLSSTableWriter.builder()
                     .inDirectory(dataDir)
                     .forTable(schema)
+                    .withPartitioner(StorageService.instance.getPartitioner())
                     .using(insert).build();
 
             try
@@ -288,6 +289,7 @@ public class CQLSSTableWriterTest
                 this.keyspace = keyspace;
                 for (Range<Token> range : StorageService.instance.getLocalRanges(KS))
                     addRangeForEndpoint(range, FBUtilities.getBroadcastAddress());
+                setPartitioner(StorageService.getPartitioner());
             }
 
             public CFMetaData getTableMetadata(String cfName)

http://git-wip-us.apache.org/repos/asf/cassandra/blob/a22ce89e/test/unit/org/apache/cassandra/io/sstable/IndexSummaryTest.java
----------------------------------------------------------------------
diff --git a/test/unit/org/apache/cassandra/io/sstable/IndexSummaryTest.java b/test/unit/org/apache/cassandra/io/sstable/IndexSummaryTest.java
index baa6fad..7442a22 100644
--- a/test/unit/org/apache/cassandra/io/sstable/IndexSummaryTest.java
+++ b/test/unit/org/apache/cassandra/io/sstable/IndexSummaryTest.java
@@ -26,7 +26,7 @@ import java.util.*;
 import com.google.common.collect.Lists;
 import org.junit.Test;
 
-import org.apache.cassandra.Util;
+import org.apache.cassandra.config.DatabaseDescriptor;
 import org.apache.cassandra.db.*;
 import org.apache.cassandra.dht.IPartitioner;
 import org.apache.cassandra.dht.RandomPartitioner;
@@ -39,12 +39,11 @@ import org.apache.cassandra.utils.Pair;
 import static org.apache.cassandra.io.sstable.IndexSummaryBuilder.downsample;
 import static org.apache.cassandra.io.sstable.IndexSummaryBuilder.entriesAtSamplingLevel;
 import static org.apache.cassandra.io.sstable.Downsampling.BASE_SAMPLING_LEVEL;
+
 import static org.junit.Assert.*;
 
 public class IndexSummaryTest
 {
-    IPartitioner partitioner = Util.testPartitioner();
-
     @Test
     public void testGetKey()
     {
@@ -83,7 +82,7 @@ public class IndexSummaryTest
         dos.writeUTF("JUNK");
         FileUtils.closeQuietly(dos);
         DataInputStream dis = new DataInputStream(new ByteArrayInputStream(dos.toByteArray()));
-        IndexSummary is = IndexSummary.serializer.deserialize(dis, partitioner, false, 1, 1);
+        IndexSummary is = IndexSummary.serializer.deserialize(dis, DatabaseDescriptor.getPartitioner(), false, 1, 1);
         for (int i = 0; i < 100; i++)
             assertEquals(i, is.binarySearch(random.left.get(i)));
         // read the junk
@@ -127,13 +126,13 @@ public class IndexSummaryTest
             for (int i = 0; i < size; i++)
             {
                 UUID uuid = UUID.randomUUID();
-                DecoratedKey key = partitioner.decorateKey(ByteBufferUtil.bytes(uuid));
+                DecoratedKey key = DatabaseDescriptor.getPartitioner().decorateKey(ByteBufferUtil.bytes(uuid));
                 list.add(key);
             }
             Collections.sort(list);
             for (int i = 0; i < size; i++)
                 builder.maybeAddEntry(list.get(i), i);
-            IndexSummary summary = builder.build(partitioner);
+            IndexSummary summary = builder.build(DatabaseDescriptor.getPartitioner());
             return Pair.create(list, summary);
         }
         catch (IOException e)
@@ -186,7 +185,7 @@ public class IndexSummaryTest
         int downsamplingRound = 1;
         for (int samplingLevel = BASE_SAMPLING_LEVEL - 1; samplingLevel >= 1; samplingLevel--)
         {
-            try (IndexSummary downsampled = downsample(original, samplingLevel, 128, partitioner);)
+            try (IndexSummary downsampled = downsample(original, samplingLevel, 128, DatabaseDescriptor.getPartitioner());)
             {
                 assertEquals(entriesAtSamplingLevel(samplingLevel, original.getMaxNumberOfEntries()), downsampled.size());
 
@@ -211,7 +210,7 @@ public class IndexSummaryTest
         downsamplingRound = 1;
         for (int downsampleLevel = BASE_SAMPLING_LEVEL - 1; downsampleLevel >= 1; downsampleLevel--)
         {
-            IndexSummary downsampled = downsample(previous, downsampleLevel, 128, partitioner);
+            IndexSummary downsampled = downsample(previous, downsampleLevel, 128, DatabaseDescriptor.getPartitioner());
             if (previous != original)
                 previous.close();
             assertEquals(entriesAtSamplingLevel(downsampleLevel, original.getMaxNumberOfEntries()), downsampled.size());

http://git-wip-us.apache.org/repos/asf/cassandra/blob/a22ce89e/test/unit/org/apache/cassandra/io/sstable/LegacySSTableTest.java
----------------------------------------------------------------------
diff --git a/test/unit/org/apache/cassandra/io/sstable/LegacySSTableTest.java b/test/unit/org/apache/cassandra/io/sstable/LegacySSTableTest.java
index d2922cc..7d97ec0 100644
--- a/test/unit/org/apache/cassandra/io/sstable/LegacySSTableTest.java
+++ b/test/unit/org/apache/cassandra/io/sstable/LegacySSTableTest.java
@@ -136,7 +136,7 @@ public class LegacySSTableTest
     private void testStreaming(String version) throws Exception
     {
         SSTableReader sstable = SSTableReader.open(getDescriptor(version));
-        IPartitioner p = sstable.getPartitioner();
+        IPartitioner p = StorageService.getPartitioner();
         List<Range<Token>> ranges = new ArrayList<>();
         ranges.add(new Range<>(p.getMinimumToken(), p.getToken(ByteBufferUtil.bytes("100"))));
         ranges.add(new Range<>(p.getToken(ByteBufferUtil.bytes("100")), p.getMinimumToken()));

http://git-wip-us.apache.org/repos/asf/cassandra/blob/a22ce89e/test/unit/org/apache/cassandra/io/sstable/SSTableLoaderTest.java
----------------------------------------------------------------------
diff --git a/test/unit/org/apache/cassandra/io/sstable/SSTableLoaderTest.java b/test/unit/org/apache/cassandra/io/sstable/SSTableLoaderTest.java
index dfd7821..782f7fd 100644
--- a/test/unit/org/apache/cassandra/io/sstable/SSTableLoaderTest.java
+++ b/test/unit/org/apache/cassandra/io/sstable/SSTableLoaderTest.java
@@ -86,6 +86,7 @@ public class SSTableLoaderTest
             this.keyspace = keyspace;
             for (Range<Token> range : StorageService.instance.getLocalRanges(KEYSPACE1))
                 addRangeForEndpoint(range, FBUtilities.getBroadcastAddress());
+            setPartitioner(StorageService.getPartitioner());
         }
 
         public CFMetaData getTableMetadata(String tableName)
@@ -106,6 +107,7 @@ public class SSTableLoaderTest
 
         try (CQLSSTableWriter writer = CQLSSTableWriter.builder()
                                                        .inDirectory(dataDir)
+                                                       .withPartitioner(StorageService.getPartitioner())
                                                        .forTable(String.format(schema, KEYSPACE1, CF_STANDARD1))
                                                        .using(String.format(query, KEYSPACE1, CF_STANDARD1))
                                                        .build())
@@ -139,6 +141,7 @@ public class SSTableLoaderTest
 
         CQLSSTableWriter writer = CQLSSTableWriter.builder()
                                                   .inDirectory(dataDir)
+                                                  .withPartitioner(StorageService.getPartitioner())
                                                   .forTable(String.format(schema, KEYSPACE1, CF_STANDARD2))
                                                   .using(String.format(query, KEYSPACE1, CF_STANDARD2))
                                                   .withBufferSizeInMB(1)

http://git-wip-us.apache.org/repos/asf/cassandra/blob/a22ce89e/test/unit/org/apache/cassandra/io/sstable/SSTableReaderTest.java
----------------------------------------------------------------------
diff --git a/test/unit/org/apache/cassandra/io/sstable/SSTableReaderTest.java b/test/unit/org/apache/cassandra/io/sstable/SSTableReaderTest.java
index 2fe5ef2..651ed8d 100644
--- a/test/unit/org/apache/cassandra/io/sstable/SSTableReaderTest.java
+++ b/test/unit/org/apache/cassandra/io/sstable/SSTableReaderTest.java
@@ -52,14 +52,17 @@ import org.apache.cassandra.db.compaction.OperationType;
 import org.apache.cassandra.db.lifecycle.LifecycleTransaction;
 import org.apache.cassandra.db.index.SecondaryIndexSearcher;
 import org.apache.cassandra.db.rows.Row;
-import org.apache.cassandra.dht.*;
+import org.apache.cassandra.dht.LocalPartitioner;
 import org.apache.cassandra.dht.LocalPartitioner.LocalToken;
+import org.apache.cassandra.dht.Range;
+import org.apache.cassandra.dht.Token;
 import org.apache.cassandra.io.sstable.format.SSTableReader;
 import org.apache.cassandra.io.util.FileDataInput;
 import org.apache.cassandra.io.util.MmappedSegmentedFile;
 import org.apache.cassandra.io.util.SegmentedFile;
 import org.apache.cassandra.schema.KeyspaceParams;
 import org.apache.cassandra.service.CacheService;
+import org.apache.cassandra.service.StorageService;
 import org.apache.cassandra.utils.ByteBufferUtil;
 import org.apache.cassandra.utils.Pair;
 
@@ -76,11 +79,9 @@ public class SSTableReaderTest
     public static final String CF_INDEXED = "Indexed1";
     public static final String CF_STANDARDLOWINDEXINTERVAL = "StandardLowIndexInterval";
 
-    private IPartitioner partitioner;
-
-    Token t(int i)
+    static Token t(int i)
     {
-        return partitioner.getToken(ByteBufferUtil.bytes(String.valueOf(i)));
+        return StorageService.getPartitioner().getToken(ByteBufferUtil.bytes(String.valueOf(i)));
     }
 
     @BeforeClass
@@ -103,7 +104,6 @@ public class SSTableReaderTest
     {
         Keyspace keyspace = Keyspace.open(KEYSPACE1);
         ColumnFamilyStore store = keyspace.getColumnFamilyStore("Standard2");
-        partitioner = store.getPartitioner();
 
         // insert data and compact to a single sstable
         CompactionManager.instance.disableAutoCompaction();
@@ -124,7 +124,7 @@ public class SSTableReaderTest
         // 2 keys
         ranges.add(new Range<>(t(2), t(4)));
         // wrapping range from key to end
-        ranges.add(new Range<>(t(6), partitioner.getMinimumToken()));
+        ranges.add(new Range<>(t(6), StorageService.getPartitioner().getMinimumToken()));
         // empty range (should be ignored)
         ranges.add(new Range<>(t(9), t(91)));
 
@@ -146,7 +146,6 @@ public class SSTableReaderTest
 
         Keyspace keyspace = Keyspace.open(KEYSPACE1);
         ColumnFamilyStore store = keyspace.getColumnFamilyStore("Standard1");
-        partitioner = store.getPartitioner();
 
         // insert a bunch of data and compact to a single sstable
         CompactionManager.instance.disableAutoCompaction();
@@ -167,7 +166,7 @@ public class SSTableReaderTest
         {
             DecoratedKey dk = Util.dk(String.valueOf(j));
             FileDataInput file = sstable.getFileDataInput(sstable.getPosition(dk, SSTableReader.Operator.EQ).position);
-            DecoratedKey keyInDisk = sstable.decorateKey(ByteBufferUtil.readWithShortLength(file));
+            DecoratedKey keyInDisk = sstable.partitioner.decorateKey(ByteBufferUtil.readWithShortLength(file));
             assert keyInDisk.equals(dk) : String.format("%s != %s in %s", keyInDisk, dk, file.getPath());
         }
 
@@ -185,7 +184,6 @@ public class SSTableReaderTest
 
         Keyspace keyspace = Keyspace.open(KEYSPACE1);
         ColumnFamilyStore store = keyspace.getColumnFamilyStore("Standard1");
-        partitioner = store.getPartitioner();
 
         for (int j = 0; j < 100; j += 2)
         {
@@ -213,7 +211,6 @@ public class SSTableReaderTest
         // try to make sure CASSANDRA-8239 never happens again
         Keyspace keyspace = Keyspace.open(KEYSPACE1);
         ColumnFamilyStore store = keyspace.getColumnFamilyStore("Standard1");
-        partitioner = store.getPartitioner();
 
         for (int j = 0; j < 10; j++)
         {
@@ -229,7 +226,7 @@ public class SSTableReaderTest
         SSTableReader sstable = store.getLiveSSTables().iterator().next();
         assertEquals(0, sstable.getReadMeter().count());
 
-        DecoratedKey key = sstable.decorateKey(ByteBufferUtil.bytes("4"));
+        DecoratedKey key = sstable.partitioner.decorateKey(ByteBufferUtil.bytes("4"));
         Util.getAll(Util.cmd(store, key).build());
         assertEquals(1, sstable.getReadMeter().count());
 
@@ -242,7 +239,6 @@ public class SSTableReaderTest
     {
         Keyspace keyspace = Keyspace.open(KEYSPACE1);
         ColumnFamilyStore store = keyspace.getColumnFamilyStore("Standard2");
-        partitioner = store.getPartitioner();
         CacheService.instance.keyCache.setCapacity(100);
 
         // insert data and compact to a single sstable
@@ -281,7 +277,6 @@ public class SSTableReaderTest
         // Create secondary index and flush to disk
         Keyspace keyspace = Keyspace.open(KEYSPACE1);
         ColumnFamilyStore store = keyspace.getColumnFamilyStore(CF_INDEXED);
-        partitioner = store.getPartitioner();
 
         new RowUpdateBuilder(store.metadata, System.currentTimeMillis(), "k1")
             .clustering("0")
@@ -298,7 +293,6 @@ public class SSTableReaderTest
     {
         Keyspace keyspace = Keyspace.open(KEYSPACE1);
         ColumnFamilyStore store = keyspace.getColumnFamilyStore("Standard2");
-        partitioner = store.getPartitioner();
         CacheService.instance.keyCache.setCapacity(1000);
 
         // insert data and compact to a single sstable
@@ -389,7 +383,7 @@ public class SSTableReaderTest
         store.forceBlockingFlush();
 
         ColumnFamilyStore indexCfs = store.indexManager.getIndexForColumn(store.metadata.getColumnDefinition(bytes("birthdate"))).getIndexCfs();
-        assert indexCfs.isIndex();
+        assert indexCfs.partitioner instanceof LocalPartitioner;
         SSTableReader sstable = indexCfs.getLiveSSTables().iterator().next();
         assert sstable.first.getToken() instanceof LocalToken;
 
@@ -409,7 +403,6 @@ public class SSTableReaderTest
     {
         Keyspace keyspace = Keyspace.open(KEYSPACE1);
         ColumnFamilyStore store = keyspace.getColumnFamilyStore("Standard1");
-        partitioner = store.getPartitioner();
 
         new RowUpdateBuilder(store.metadata, 0, "k1")
             .clustering("xyz")
@@ -435,7 +428,6 @@ public class SSTableReaderTest
     {
         Keyspace keyspace = Keyspace.open(KEYSPACE1);
         ColumnFamilyStore store = keyspace.getColumnFamilyStore("Standard2");
-        partitioner = store.getPartitioner();
 
         // insert data and compact to a single sstable. The
         // number of keys inserted is greater than index_interval
@@ -467,7 +459,7 @@ public class SSTableReaderTest
         Set<Component> components = Sets.newHashSet(Component.DATA, Component.PRIMARY_INDEX);
         if (sstable.components.contains(Component.COMPRESSION_INFO))
             components.add(Component.COMPRESSION_INFO);
-        SSTableReader bulkLoaded = SSTableReader.openForBatch(sstable.descriptor, components, store.metadata);
+        SSTableReader bulkLoaded = SSTableReader.openForBatch(sstable.descriptor, components, store.metadata, sstable.partitioner);
         sections = bulkLoaded.getPositionsForRanges(ranges);
         assert sections.size() == 1 : "Expected to find range in sstable opened for bulk loading";
         bulkLoaded.selfRef().release();
@@ -518,7 +510,7 @@ public class SSTableReaderTest
                 public void run()
                 {
                     Iterable<DecoratedKey> results = store.keySamples(
-                            new Range<>(sstable.getPartitioner().getMinimumToken(), sstable.getPartitioner().getToken(key)));
+                            new Range<>(sstable.partitioner.getMinimumToken(), sstable.partitioner.getToken(key)));
                     assertTrue(results.iterator().hasNext());
                 }
             }));

http://git-wip-us.apache.org/repos/asf/cassandra/blob/a22ce89e/test/unit/org/apache/cassandra/io/sstable/SSTableRewriterTest.java
----------------------------------------------------------------------
diff --git a/test/unit/org/apache/cassandra/io/sstable/SSTableRewriterTest.java b/test/unit/org/apache/cassandra/io/sstable/SSTableRewriterTest.java
index fd22941..cb07d37 100644
--- a/test/unit/org/apache/cassandra/io/sstable/SSTableRewriterTest.java
+++ b/test/unit/org/apache/cassandra/io/sstable/SSTableRewriterTest.java
@@ -216,7 +216,7 @@ public class SSTableRewriterTest extends SchemaLoader
                         if (sstable.openReason == SSTableReader.OpenReason.EARLY)
                         {
                             SSTableReader c = txn.current(sstables.iterator().next());
-                            Collection<Range<Token>> r = Arrays.asList(new Range<>(cfs.getPartitioner().getMinimumToken(), cfs.getPartitioner().getMinimumToken()));
+                            Collection<Range<Token>> r = Arrays.asList(new Range<>(cfs.partitioner.getMinimumToken(), cfs.partitioner.getMinimumToken()));
                             List<Pair<Long, Long>> tmplinkPositions = sstable.getPositionsForRanges(r);
                             List<Pair<Long, Long>> compactingPositions = c.getPositionsForRanges(r);
                             assertEquals(1, tmplinkPositions.size());

http://git-wip-us.apache.org/repos/asf/cassandra/blob/a22ce89e/test/unit/org/apache/cassandra/repair/ValidatorTest.java
----------------------------------------------------------------------
diff --git a/test/unit/org/apache/cassandra/repair/ValidatorTest.java b/test/unit/org/apache/cassandra/repair/ValidatorTest.java
index d77daf0..e18dc1d 100644
--- a/test/unit/org/apache/cassandra/repair/ValidatorTest.java
+++ b/test/unit/org/apache/cassandra/repair/ValidatorTest.java
@@ -26,7 +26,6 @@ import org.junit.BeforeClass;
 import org.junit.Test;
 
 import org.apache.cassandra.SchemaLoader;
-import org.apache.cassandra.config.Schema;
 import org.apache.cassandra.db.BufferDecoratedKey;
 import org.apache.cassandra.db.ColumnFamilyStore;
 import org.apache.cassandra.db.Keyspace;
@@ -41,6 +40,7 @@ import org.apache.cassandra.net.MessagingService;
 import org.apache.cassandra.repair.messages.RepairMessage;
 import org.apache.cassandra.repair.messages.ValidationComplete;
 import org.apache.cassandra.schema.KeyspaceParams;
+import org.apache.cassandra.service.StorageService;
 import org.apache.cassandra.utils.ByteBufferUtil;
 import org.apache.cassandra.utils.MerkleTree;
 import org.apache.cassandra.utils.concurrent.SimpleCondition;
@@ -55,7 +55,7 @@ public class ValidatorTest
 {
     private static final String keyspace = "ValidatorTest";
     private static final String columnFamily = "Standard1";
-    private static IPartitioner partitioner;
+    private final IPartitioner partitioner = StorageService.getPartitioner();
 
     @BeforeClass
     public static void defineSchema() throws Exception
@@ -64,7 +64,6 @@ public class ValidatorTest
         SchemaLoader.createKeyspace(keyspace,
                                     KeyspaceParams.simple(1),
                                     SchemaLoader.standardCFMD(keyspace, columnFamily));
-        partitioner = Schema.instance.getCFMetaData(keyspace, columnFamily).partitioner;
     }
 
     @After
@@ -82,6 +81,7 @@ public class ValidatorTest
         final SimpleCondition lock = new SimpleCondition();
         MessagingService.instance().addMessageSink(new IMessageSink()
         {
+            @SuppressWarnings("unchecked")
             public boolean allowOutgoingMessage(MessageOut message, int id, InetAddress to)
             {
                 try
@@ -113,7 +113,7 @@ public class ValidatorTest
         ColumnFamilyStore cfs = Keyspace.open(keyspace).getColumnFamilyStore(columnFamily);
 
         Validator validator = new Validator(desc, remote, 0);
-        MerkleTree tree = new MerkleTree(partitioner, validator.desc.range, MerkleTree.RECOMMENDED_DEPTH, (int) Math.pow(2, 15));
+        MerkleTree tree = new MerkleTree(cfs.partitioner, validator.desc.range, MerkleTree.RECOMMENDED_DEPTH, (int) Math.pow(2, 15));
         validator.prepare(cfs, tree);
 
         // and confirm that the tree was split
@@ -142,6 +142,7 @@ public class ValidatorTest
         final SimpleCondition lock = new SimpleCondition();
         MessagingService.instance().addMessageSink(new IMessageSink()
         {
+            @SuppressWarnings("unchecked")
             public boolean allowOutgoingMessage(MessageOut message, int id, InetAddress to)
             {
                 try

http://git-wip-us.apache.org/repos/asf/cassandra/blob/a22ce89e/test/unit/org/apache/cassandra/service/ActiveRepairServiceTest.java
----------------------------------------------------------------------
diff --git a/test/unit/org/apache/cassandra/service/ActiveRepairServiceTest.java b/test/unit/org/apache/cassandra/service/ActiveRepairServiceTest.java
index a61a33e..75b99d4 100644
--- a/test/unit/org/apache/cassandra/service/ActiveRepairServiceTest.java
+++ b/test/unit/org/apache/cassandra/service/ActiveRepairServiceTest.java
@@ -77,8 +77,8 @@ public class ActiveRepairServiceTest
 
         TokenMetadata tmd = StorageService.instance.getTokenMetadata();
         tmd.clearUnsafe();
-        StorageService.instance.setTokens(Collections.singleton(tmd.partitioner.getRandomToken()));
-        tmd.updateNormalToken(tmd.partitioner.getMinimumToken(), REMOTE);
+        StorageService.instance.setTokens(Collections.singleton(StorageService.getPartitioner().getRandomToken()));
+        tmd.updateNormalToken(StorageService.getPartitioner().getMinimumToken(), REMOTE);
         assert tmd.isMember(REMOTE);
     }
 
@@ -208,7 +208,7 @@ public class ActiveRepairServiceTest
         for (int i = 1; i <= max; i++)
         {
             InetAddress endpoint = InetAddress.getByName("127.0.0." + i);
-            tmd.updateNormalToken(tmd.partitioner.getRandomToken(), endpoint);
+            tmd.updateNormalToken(StorageService.getPartitioner().getRandomToken(), endpoint);
             endpoints.add(endpoint);
         }
         return endpoints;

http://git-wip-us.apache.org/repos/asf/cassandra/blob/a22ce89e/test/unit/org/apache/cassandra/service/LeaveAndBootstrapTest.java
----------------------------------------------------------------------
diff --git a/test/unit/org/apache/cassandra/service/LeaveAndBootstrapTest.java b/test/unit/org/apache/cassandra/service/LeaveAndBootstrapTest.java
index b2bb081..e5b2599 100644
--- a/test/unit/org/apache/cassandra/service/LeaveAndBootstrapTest.java
+++ b/test/unit/org/apache/cassandra/service/LeaveAndBootstrapTest.java
@@ -25,14 +25,12 @@ import java.util.*;
 
 import com.google.common.collect.HashMultimap;
 import com.google.common.collect.Multimap;
-
 import org.junit.AfterClass;
 import org.junit.BeforeClass;
 import org.junit.Test;
 
 import org.apache.cassandra.SchemaLoader;
 import org.apache.cassandra.Util;
-import org.apache.cassandra.Util.PartitionerSwitcher;
 import org.apache.cassandra.config.Schema;
 import org.apache.cassandra.db.SystemKeyspace;
 import org.apache.cassandra.dht.IPartitioner;
@@ -52,7 +50,7 @@ import static org.junit.Assert.*;
 public class LeaveAndBootstrapTest
 {
     private static final IPartitioner partitioner = RandomPartitioner.instance;
-    private static PartitionerSwitcher partitionerSwitcher;
+    private static IPartitioner oldPartitioner;
     private static final String KEYSPACE1 = "LeaveAndBootstrapTestKeyspace1";
     private static final String KEYSPACE2 = "LeaveAndBootstrapTestKeyspace2";
     private static final String KEYSPACE3 = "LeaveAndBootstrapTestKeyspace3";
@@ -61,7 +59,7 @@ public class LeaveAndBootstrapTest
     @BeforeClass
     public static void defineSchema() throws Exception
     {
-        partitionerSwitcher = Util.switchPartitioner(partitioner);
+        oldPartitioner = StorageService.instance.setPartitionerUnsafe(partitioner);
         SchemaLoader.loadSchema();
         SchemaLoader.schemaDefinition("LeaveAndBootstrapTest");
     }
@@ -69,7 +67,7 @@ public class LeaveAndBootstrapTest
     @AfterClass
     public static void tearDown()
     {
-        partitionerSwitcher.close();
+        StorageService.instance.setPartitionerUnsafe(oldPartitioner);
     }
 
     /**

http://git-wip-us.apache.org/repos/asf/cassandra/blob/a22ce89e/test/unit/org/apache/cassandra/service/SerializationsTest.java
----------------------------------------------------------------------
diff --git a/test/unit/org/apache/cassandra/service/SerializationsTest.java b/test/unit/org/apache/cassandra/service/SerializationsTest.java
index 80bb452..b7af1be 100644
--- a/test/unit/org/apache/cassandra/service/SerializationsTest.java
+++ b/test/unit/org/apache/cassandra/service/SerializationsTest.java
@@ -18,22 +18,19 @@
  */
 package org.apache.cassandra.service;
 
+import java.io.DataInputStream;
 import java.io.IOException;
 import java.net.InetAddress;
 import java.util.Collections;
 import java.util.UUID;
 
-import org.junit.AfterClass;
-import org.junit.BeforeClass;
 import org.junit.Test;
-
 import org.apache.cassandra.AbstractSerializationsTester;
-import org.apache.cassandra.Util;
-import org.apache.cassandra.Util.PartitionerSwitcher;
 import org.apache.cassandra.dht.IPartitioner;
 import org.apache.cassandra.dht.RandomPartitioner;
 import org.apache.cassandra.dht.Range;
 import org.apache.cassandra.dht.Token;
+import org.apache.cassandra.io.util.DataInputPlus;
 import org.apache.cassandra.io.util.DataInputPlus.DataInputStreamPlus;
 import org.apache.cassandra.io.util.DataOutputStreamPlus;
 import org.apache.cassandra.net.MessageIn;
@@ -47,26 +44,14 @@ import org.apache.cassandra.utils.MerkleTree;
 
 public class SerializationsTest extends AbstractSerializationsTester
 {
-    private static PartitionerSwitcher partitionerSwitcher;
-    private static UUID RANDOM_UUID;
-    private static Range<Token> FULL_RANGE;
-    private static RepairJobDesc DESC;
-
-    @BeforeClass
-    public static void defineSchema() throws Exception
-    {
-        partitionerSwitcher = Util.switchPartitioner(RandomPartitioner.instance);
-        RANDOM_UUID = UUID.fromString("b5c3d033-75aa-4c2f-a819-947aac7a0c54");
-        FULL_RANGE = new Range<>(Util.testPartitioner().getMinimumToken(), Util.testPartitioner().getMinimumToken());
-        DESC = new RepairJobDesc(getVersion() < MessagingService.VERSION_21 ? null : RANDOM_UUID, RANDOM_UUID, "Keyspace1", "Standard1", FULL_RANGE);
-    }
-
-    @AfterClass
-    public static void tearDown()
+    static
     {
-        partitionerSwitcher.close();
+        System.setProperty("cassandra.partitioner", "RandomPartitioner");
     }
 
+    private static final UUID RANDOM_UUID = UUID.fromString("b5c3d033-75aa-4c2f-a819-947aac7a0c54");
+    private static final Range<Token> FULL_RANGE = new Range<>(StorageService.getPartitioner().getMinimumToken(), StorageService.getPartitioner().getMinimumToken());
+    private static final RepairJobDesc DESC = new RepairJobDesc(getVersion() < MessagingService.VERSION_21 ? null : RANDOM_UUID, RANDOM_UUID, "Keyspace1", "Standard1", FULL_RANGE);
 
     private void testRepairMessageWrite(String fileName, RepairMessage... messages) throws IOException
     {

http://git-wip-us.apache.org/repos/asf/cassandra/blob/a22ce89e/test/unit/org/apache/cassandra/service/StorageProxyTest.java
----------------------------------------------------------------------
diff --git a/test/unit/org/apache/cassandra/service/StorageProxyTest.java b/test/unit/org/apache/cassandra/service/StorageProxyTest.java
index 801fc53..c996d5c 100644
--- a/test/unit/org/apache/cassandra/service/StorageProxyTest.java
+++ b/test/unit/org/apache/cassandra/service/StorageProxyTest.java
@@ -57,12 +57,12 @@ public class StorageProxyTest
 
     private static PartitionPosition startOf(String key)
     {
-        return token(key).minKeyBound();
+        return StorageService.getPartitioner().getToken(ByteBufferUtil.bytes(key)).minKeyBound();
     }
 
     private static PartitionPosition endOf(String key)
     {
-        return token(key).maxKeyBound();
+        return StorageService.getPartitioner().getToken(ByteBufferUtil.bytes(key)).maxKeyBound();
     }
 
     private static Range<Token> tokenRange(String left, String right)

http://git-wip-us.apache.org/repos/asf/cassandra/blob/a22ce89e/test/unit/org/apache/cassandra/streaming/StreamingTransferTest.java
----------------------------------------------------------------------
diff --git a/test/unit/org/apache/cassandra/streaming/StreamingTransferTest.java b/test/unit/org/apache/cassandra/streaming/StreamingTransferTest.java
index 85090dc..cb084a0 100644
--- a/test/unit/org/apache/cassandra/streaming/StreamingTransferTest.java
+++ b/test/unit/org/apache/cassandra/streaming/StreamingTransferTest.java
@@ -128,7 +128,7 @@ public class StreamingTransferTest
     public void testRequestEmpty() throws Exception
     {
         // requesting empty data should succeed
-        IPartitioner p = Util.testPartitioner();
+        IPartitioner p = StorageService.getPartitioner();
         List<Range<Token>> ranges = new ArrayList<>();
         ranges.add(new Range<>(p.getMinimumToken(), p.getToken(ByteBufferUtil.bytes("key1"))));
         ranges.add(new Range<>(p.getToken(ByteBufferUtil.bytes("key2")), p.getMinimumToken()));
@@ -215,7 +215,7 @@ public class StreamingTransferTest
 
     private void transferSSTables(SSTableReader sstable) throws Exception
     {
-        IPartitioner p = sstable.getPartitioner();
+        IPartitioner p = StorageService.getPartitioner();
         List<Range<Token>> ranges = new ArrayList<>();
         ranges.add(new Range<>(p.getMinimumToken(), p.getToken(ByteBufferUtil.bytes("key1"))));
         ranges.add(new Range<>(p.getToken(ByteBufferUtil.bytes("key2")), p.getMinimumToken()));
@@ -224,7 +224,7 @@ public class StreamingTransferTest
 
     private void transferRanges(ColumnFamilyStore cfs) throws Exception
     {
-        IPartitioner p = cfs.getPartitioner();
+        IPartitioner p = StorageService.getPartitioner();
         List<Range<Token>> ranges = new ArrayList<>();
         // wrapped range
         ranges.add(new Range<Token>(p.getToken(ByteBufferUtil.bytes("key1")), p.getToken(ByteBufferUtil.bytes("key0"))));
@@ -418,7 +418,7 @@ public class StreamingTransferTest
         SSTableReader sstable2 = SSTableUtils.prepare().write(content);
 
         // transfer the first and last key
-        IPartitioner p = Util.testPartitioner();
+        IPartitioner p = StorageService.getPartitioner();
         List<Range<Token>> ranges = new ArrayList<>();
         ranges.add(new Range<>(p.getMinimumToken(), p.getToken(ByteBufferUtil.bytes("test"))));
         ranges.add(new Range<>(p.getToken(ByteBufferUtil.bytes("transfer2")), p.getMinimumToken()));
@@ -447,7 +447,7 @@ public class StreamingTransferTest
     public void testTransferOfMultipleColumnFamilies() throws Exception
     {
         String keyspace = KEYSPACE_CACHEKEY;
-        IPartitioner p = Util.testPartitioner();
+        IPartitioner p = StorageService.getPartitioner();
         String[] columnFamilies = new String[] { "Standard1", "Standard2", "Standard3" };
         List<SSTableReader> ssTableReaders = new ArrayList<>();
 
@@ -517,7 +517,7 @@ public class StreamingTransferTest
         SSTableReader sstable = cfs.getLiveSSTables().iterator().next();
         cfs.clearUnsafe();
 
-        IPartitioner p = Util.testPartitioner();
+        IPartitioner p = StorageService.getPartitioner();
         List<Range<Token>> ranges = new ArrayList<>();
         ranges.add(new Range<>(p.getToken(ByteBufferUtil.bytes("key1")), p.getToken(ByteBufferUtil.bytes("key1000"))));
         ranges.add(new Range<>(p.getToken(ByteBufferUtil.bytes("key5")), p.getToken(ByteBufferUtil.bytes("key500"))));

http://git-wip-us.apache.org/repos/asf/cassandra/blob/a22ce89e/test/unit/org/apache/cassandra/utils/MerkleTreeTest.java
----------------------------------------------------------------------
diff --git a/test/unit/org/apache/cassandra/utils/MerkleTreeTest.java b/test/unit/org/apache/cassandra/utils/MerkleTreeTest.java
index deb401b..edb1fb1 100644
--- a/test/unit/org/apache/cassandra/utils/MerkleTreeTest.java
+++ b/test/unit/org/apache/cassandra/utils/MerkleTreeTest.java
@@ -68,7 +68,7 @@ public class MerkleTreeTest
         TOKEN_SCALE = new BigInteger("8");
         partitioner = RandomPartitioner.instance;
         // TODO need to trickle TokenSerializer
-        DatabaseDescriptor.setPartitionerUnsafe(partitioner);
+        DatabaseDescriptor.setPartitioner(partitioner);
         mt = new MerkleTree(partitioner, fullRange(), RECOMMENDED_DEPTH, Integer.MAX_VALUE);
     }
 

http://git-wip-us.apache.org/repos/asf/cassandra/blob/a22ce89e/test/unit/org/apache/cassandra/utils/SerializationsTest.java
----------------------------------------------------------------------
diff --git a/test/unit/org/apache/cassandra/utils/SerializationsTest.java b/test/unit/org/apache/cassandra/utils/SerializationsTest.java
index cf50769..c50f400 100644
--- a/test/unit/org/apache/cassandra/utils/SerializationsTest.java
+++ b/test/unit/org/apache/cassandra/utils/SerializationsTest.java
@@ -23,14 +23,13 @@ import java.io.IOException;
 
 import org.junit.Assert;
 import org.junit.Test;
-
 import org.apache.cassandra.AbstractSerializationsTester;
-import org.apache.cassandra.Util;
 import org.apache.cassandra.db.DecoratedKey;
 import org.apache.cassandra.db.marshal.Int32Type;
+import org.apache.cassandra.dht.IPartitioner;
 import org.apache.cassandra.io.util.DataInputPlus.DataInputStreamPlus;
 import org.apache.cassandra.io.util.DataOutputStreamPlus;
-import org.apache.cassandra.dht.IPartitioner;
+import org.apache.cassandra.service.StorageService;
 import org.apache.cassandra.dht.Murmur3Partitioner;
 
 import java.io.File;
@@ -38,9 +37,10 @@ import java.io.FileInputStream;
 
 public class SerializationsTest extends AbstractSerializationsTester
 {
+
     private static void testBloomFilterWrite(boolean offheap, boolean oldBfHashOrder) throws IOException
     {
-        IPartitioner partitioner = Util.testPartitioner();
+        IPartitioner partitioner = StorageService.getPartitioner();
         try (IFilter bf = FilterFactory.getFilter(1000000, 0.0001, offheap, oldBfHashOrder))
         {
             for (int i = 0; i < 100; i++)
@@ -54,10 +54,11 @@ public class SerializationsTest extends AbstractSerializationsTester
 
     private static void testBloomFilterWrite1000(boolean offheap, boolean oldBfHashOrder) throws IOException
     {
+        IPartitioner partitioner = StorageService.getPartitioner();
         try (IFilter bf = FilterFactory.getFilter(1000000, 0.0001, offheap, oldBfHashOrder))
         {
             for (int i = 0; i < 1000; i++)
-                bf.add(Util.dk(Int32Type.instance.decompose(i)));
+                bf.add(partitioner.decorateKey(Int32Type.instance.decompose(i)));
             try (DataOutputStreamPlus out = getOutput(oldBfHashOrder ? "2.1" : "3.0", "utils.BloomFilter1000.bin"))
             {
                 FilterFactory.serialize(bf, out);
@@ -74,18 +75,19 @@ public class SerializationsTest extends AbstractSerializationsTester
             testBloomFilterWrite1000(true, true);
         }
 
+        IPartitioner partitioner = StorageService.getPartitioner();
         try (DataInputStream in = getInput("3.0", "utils.BloomFilter1000.bin");
              IFilter filter = FilterFactory.deserialize(in, true, false))
         {
             boolean present;
             for (int i = 0 ; i < 1000 ; i++)
             {
-                present = filter.isPresent(Util.dk(Int32Type.instance.decompose(i)));
+                present = filter.isPresent(partitioner.decorateKey(Int32Type.instance.decompose(i)));
                 Assert.assertTrue(present);
             }
             for (int i = 1000 ; i < 2000 ; i++)
             {
-                present = filter.isPresent(Util.dk(Int32Type.instance.decompose(i)));
+                present = filter.isPresent(partitioner.decorateKey(Int32Type.instance.decompose(i)));
                 Assert.assertFalse(present);
             }
         }
@@ -96,12 +98,12 @@ public class SerializationsTest extends AbstractSerializationsTester
             boolean present;
             for (int i = 0 ; i < 1000 ; i++)
             {
-                present = filter.isPresent(Util.dk(Int32Type.instance.decompose(i)));
+                present = filter.isPresent(partitioner.decorateKey(Int32Type.instance.decompose(i)));
                 Assert.assertTrue(present);
             }
             for (int i = 1000 ; i < 2000 ; i++)
             {
-                present = filter.isPresent(Util.dk(Int32Type.instance.decompose(i)));
+                present = filter.isPresent(partitioner.decorateKey(Int32Type.instance.decompose(i)));
                 Assert.assertFalse(present);
             }
         }
@@ -115,13 +117,13 @@ public class SerializationsTest extends AbstractSerializationsTester
             boolean present;
             for (int i = 0 ; i < 1000 ; i++)
             {
-                present = filter.isPresent(Util.dk(Int32Type.instance.decompose(i)));
+                present = filter.isPresent(partitioner.decorateKey(Int32Type.instance.decompose(i)));
                 if (!present)
                     falseNegative ++;
             }
             for (int i = 1000 ; i < 2000 ; i++)
             {
-                present = filter.isPresent(Util.dk(Int32Type.instance.decompose(i)));
+                present = filter.isPresent(partitioner.decorateKey(Int32Type.instance.decompose(i)));
                 if (present)
                     falsePositive ++;
             }


Mime
View raw message