cassandra-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From alek...@apache.org
Subject [4/4] cassandra git commit: Move schema tables to the new system_schema keyspace
Date Fri, 10 Jul 2015 12:29:08 GMT
Move schema tables to the new system_schema keyspace

patch by Aleksey Yeschenko; reviewed by Tyler Hobbs for CASSANDRA-6717


Project: http://git-wip-us.apache.org/repos/asf/cassandra/repo
Commit: http://git-wip-us.apache.org/repos/asf/cassandra/commit/7d6c876e
Tree: http://git-wip-us.apache.org/repos/asf/cassandra/tree/7d6c876e
Diff: http://git-wip-us.apache.org/repos/asf/cassandra/diff/7d6c876e

Branch: refs/heads/trunk
Commit: 7d6c876ec9f8dd143046ff49b5d61066ad5206c1
Parents: 81ba561
Author: Aleksey Yeschenko <aleksey@apache.org>
Authored: Thu Jul 9 21:42:52 2015 +0300
Committer: Aleksey Yeschenko <aleksey@apache.org>
Committed: Fri Jul 10 15:28:37 2015 +0300

----------------------------------------------------------------------
 NEWS.txt                                        |    4 +
 ...-core-2.2.0-rc2-SNAPSHOT-20150617-shaded.jar |  Bin 2154972 -> 0 bytes
 ...ra-driver-core-2.2.0-rc2-SNAPSHOT-shaded.jar |  Bin 0 -> 2162223 bytes
 ...sandra-driver-internal-only-2.6.0c2.post.zip |  Bin 198346 -> 0 bytes
 lib/cassandra-driver-internal-only-2.6.0c2.zip  |  Bin 0 -> 203206 bytes
 pylib/cqlshlib/cql3handling.py                  |    4 +-
 .../org/apache/cassandra/config/CFMetaData.java |    6 +-
 .../org/apache/cassandra/config/Schema.java     |   29 +-
 .../cql3/statements/AlterKeyspaceStatement.java |    3 +-
 .../db/DefinitionsUpdateVerbHandler.java        |    4 +-
 src/java/org/apache/cassandra/db/Keyspace.java  |    6 +-
 .../db/MigrationRequestVerbHandler.java         |    4 +-
 .../org/apache/cassandra/db/ReadCommand.java    |    3 +-
 .../org/apache/cassandra/db/SystemKeyspace.java |  172 +-
 .../io/sstable/format/SSTableReader.java        |    2 +-
 .../org/apache/cassandra/schema/Functions.java  |    7 +
 .../cassandra/schema/LegacySchemaMigrator.java  |  804 ++++++++++
 .../cassandra/schema/LegacySchemaTables.java    | 1502 ------------------
 .../apache/cassandra/schema/SchemaKeyspace.java | 1501 +++++++++++++++++
 src/java/org/apache/cassandra/schema/Types.java |   12 +
 .../cassandra/service/CassandraDaemon.java      |    9 +
 .../apache/cassandra/service/ClientState.java   |    9 +-
 .../cassandra/service/MigrationManager.java     |   71 +-
 .../apache/cassandra/service/MigrationTask.java |    4 +-
 .../apache/cassandra/service/StorageProxy.java  |    2 +-
 .../cassandra/service/StorageService.java       |    6 +-
 .../cassandra/thrift/ThriftConversion.java      |    4 +-
 .../cassandra/thrift/ThriftValidation.java      |    4 +-
 .../cassandra/tools/nodetool/Cleanup.java       |    6 +-
 .../utils/NativeSSTableLoaderClient.java        |   19 +-
 .../unit/org/apache/cassandra/SchemaLoader.java |    4 +-
 .../apache/cassandra/config/CFMetaDataTest.java |   10 +-
 .../config/LegacySchemaTablesTest.java          |  153 --
 .../cql3/validation/entities/UFTest.java        |   16 +-
 .../cql3/validation/operations/AlterTest.java   |   24 +-
 .../operations/InsertUpdateIfConditionTest.java |    6 +-
 .../org/apache/cassandra/schema/DefsTest.java   |    2 +-
 .../schema/LegacySchemaMigratorTest.java        |  549 +++++++
 .../cassandra/schema/SchemaKeyspaceTest.java    |  153 ++
 .../service/StorageServiceServerTest.java       |    5 +-
 40 files changed, 3328 insertions(+), 1791 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/cassandra/blob/7d6c876e/NEWS.txt
----------------------------------------------------------------------
diff --git a/NEWS.txt b/NEWS.txt
index 54ed7c6..ce05b92 100644
--- a/NEWS.txt
+++ b/NEWS.txt
@@ -21,6 +21,10 @@ Upgrading
    _ New SSTable version 'la' with improved bloom-filter false-positive handling
      compared to previous version 'ka' used in 2.2 and 2.1. Running sstableupgrade
      is not necessary but recommended.
+   - Before upgrading to 3.0, make sure that your cluster is in complete agreement
+     (schema versions outputted by `nodetool describecluster` are all the same).
+   - Schema metadata is now stored in the new `system_schema` keyspace, and
+     legacy `system.schema_*` tables are now gone; see CASSANDRA-6717 for details.
    - Pig's CassandraStorage has been removed. Use CqlNativeStorage instead.
    - Hadoop BulkOutputFormat and BulkRecordWriter have been removed; use
      CqlBulkOutputFormat and CqlBulkRecordWriter instead.

http://git-wip-us.apache.org/repos/asf/cassandra/blob/7d6c876e/lib/cassandra-driver-core-2.2.0-rc2-SNAPSHOT-20150617-shaded.jar
----------------------------------------------------------------------
diff --git a/lib/cassandra-driver-core-2.2.0-rc2-SNAPSHOT-20150617-shaded.jar b/lib/cassandra-driver-core-2.2.0-rc2-SNAPSHOT-20150617-shaded.jar
deleted file mode 100644
index 7d971df..0000000
Binary files a/lib/cassandra-driver-core-2.2.0-rc2-SNAPSHOT-20150617-shaded.jar and /dev/null differ

http://git-wip-us.apache.org/repos/asf/cassandra/blob/7d6c876e/lib/cassandra-driver-core-2.2.0-rc2-SNAPSHOT-shaded.jar
----------------------------------------------------------------------
diff --git a/lib/cassandra-driver-core-2.2.0-rc2-SNAPSHOT-shaded.jar b/lib/cassandra-driver-core-2.2.0-rc2-SNAPSHOT-shaded.jar
new file mode 100644
index 0000000..9051202
Binary files /dev/null and b/lib/cassandra-driver-core-2.2.0-rc2-SNAPSHOT-shaded.jar differ

http://git-wip-us.apache.org/repos/asf/cassandra/blob/7d6c876e/lib/cassandra-driver-internal-only-2.6.0c2.post.zip
----------------------------------------------------------------------
diff --git a/lib/cassandra-driver-internal-only-2.6.0c2.post.zip b/lib/cassandra-driver-internal-only-2.6.0c2.post.zip
deleted file mode 100644
index 5fa57c7..0000000
Binary files a/lib/cassandra-driver-internal-only-2.6.0c2.post.zip and /dev/null differ

http://git-wip-us.apache.org/repos/asf/cassandra/blob/7d6c876e/lib/cassandra-driver-internal-only-2.6.0c2.zip
----------------------------------------------------------------------
diff --git a/lib/cassandra-driver-internal-only-2.6.0c2.zip b/lib/cassandra-driver-internal-only-2.6.0c2.zip
new file mode 100644
index 0000000..be9f162
Binary files /dev/null and b/lib/cassandra-driver-internal-only-2.6.0c2.zip differ

http://git-wip-us.apache.org/repos/asf/cassandra/blob/7d6c876e/pylib/cqlshlib/cql3handling.py
----------------------------------------------------------------------
diff --git a/pylib/cqlshlib/cql3handling.py b/pylib/cqlshlib/cql3handling.py
index 637234a..2767fb1 100644
--- a/pylib/cqlshlib/cql3handling.py
+++ b/pylib/cqlshlib/cql3handling.py
@@ -33,8 +33,8 @@ class UnexpectedTableStructure(UserWarning):
     def __str__(self):
         return 'Unexpected table structure; may not translate correctly to CQL. ' + self.msg
 
-SYSTEM_KEYSPACES = ('system', 'system_traces', 'system_auth', 'system_distributed')
-NONALTERBALE_KEYSPACES = ('system')
+SYSTEM_KEYSPACES = ('system', 'system_schema', 'system_traces', 'system_auth', 'system_distributed')
+NONALTERBALE_KEYSPACES = ('system', 'system_schema')
 
 class Cql3ParsingRuleSet(CqlParsingRuleSet):
     keywords = set((

http://git-wip-us.apache.org/repos/asf/cassandra/blob/7d6c876e/src/java/org/apache/cassandra/config/CFMetaData.java
----------------------------------------------------------------------
diff --git a/src/java/org/apache/cassandra/config/CFMetaData.java b/src/java/org/apache/cassandra/config/CFMetaData.java
index 6deee6d..f3c8bc1 100644
--- a/src/java/org/apache/cassandra/config/CFMetaData.java
+++ b/src/java/org/apache/cassandra/config/CFMetaData.java
@@ -34,7 +34,6 @@ import org.apache.commons.lang3.builder.HashCodeBuilder;
 import org.apache.commons.lang3.builder.ToStringBuilder;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
-import org.github.jamm.Unmetered;
 
 import org.apache.cassandra.cache.CachingOptions;
 import org.apache.cassandra.cql3.ColumnIdentifier;
@@ -49,11 +48,12 @@ import org.apache.cassandra.exceptions.*;
 import org.apache.cassandra.io.compress.CompressionParameters;
 import org.apache.cassandra.io.compress.LZ4Compressor;
 import org.apache.cassandra.io.util.DataOutputPlus;
-import org.apache.cassandra.schema.LegacySchemaTables;
+import org.apache.cassandra.schema.SchemaKeyspace;
 import org.apache.cassandra.utils.ByteBufferUtil;
 import org.apache.cassandra.utils.FBUtilities;
 import org.apache.cassandra.utils.UUIDGen;
 import org.apache.cassandra.utils.Pair;
+import org.github.jamm.Unmetered;
 
 /**
  * This class can be tricky to modify. Please read http://wiki.apache.org/cassandra/ConfigurationNotes for how to do so safely.
@@ -791,7 +791,7 @@ public final class CFMetaData
      */
     public boolean reload()
     {
-        return apply(LegacySchemaTables.createTableFromName(ksName, cfName));
+        return apply(SchemaKeyspace.createTableFromName(ksName, cfName));
     }
 
     /**

http://git-wip-us.apache.org/repos/asf/cassandra/blob/7d6c876e/src/java/org/apache/cassandra/config/Schema.java
----------------------------------------------------------------------
diff --git a/src/java/org/apache/cassandra/config/Schema.java b/src/java/org/apache/cassandra/config/Schema.java
index 4d3e6f3..29dffa1 100644
--- a/src/java/org/apache/cassandra/config/Schema.java
+++ b/src/java/org/apache/cassandra/config/Schema.java
@@ -22,6 +22,7 @@ import java.security.NoSuchAlgorithmException;
 import java.util.*;
 
 import com.google.common.collect.ImmutableList;
+import com.google.common.collect.ImmutableSet;
 import com.google.common.collect.Sets;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
@@ -49,6 +50,9 @@ public class Schema
 
     public static final Schema instance = new Schema();
 
+    /* system keyspace names (the ones with LocalStrategy replication strategy) */
+    public static final Set<String> SYSTEM_KEYSPACE_NAMES = ImmutableSet.of(SystemKeyspace.NAME, SchemaKeyspace.NAME);
+
     /**
      * longest permissible KS or CF name.  Our main concern is that filename not be more than 255 characters;
      * the filename will contain both the KS and CF names. Since non-schema-name components only take up
@@ -88,10 +92,19 @@ public class Schema
      */
     public Schema()
     {
+        load(SchemaKeyspace.metadata());
         load(SystemKeyspace.metadata());
     }
 
     /**
+     * @return whether or not the keyspace is a really system one (w/ LocalStrategy, unmodifiable, hardcoded)
+     */
+    public static boolean isSystemKeyspace(String keyspaceName)
+    {
+        return SYSTEM_KEYSPACE_NAMES.contains(keyspaceName.toLowerCase());
+    }
+
+    /**
      * load keyspace (keyspace) definitions, but do not initialize the keyspace instances.
      * Schema version may be updated as the result.
      */
@@ -107,7 +120,7 @@ public class Schema
      */
     public Schema loadFromDisk(boolean updateVersion)
     {
-        load(LegacySchemaTables.readSchemaFromSystemTables());
+        load(SchemaKeyspace.readSchemaFromSystemTables());
         if (updateVersion)
             updateVersion();
         return this;
@@ -253,7 +266,7 @@ public class Schema
      */
     public List<String> getNonSystemKeyspaces()
     {
-        return ImmutableList.copyOf(Sets.difference(keyspaces.keySet(), Collections.singleton(SystemKeyspace.NAME)));
+        return ImmutableList.copyOf(Sets.difference(keyspaces.keySet(), SYSTEM_KEYSPACE_NAMES));
     }
 
     /**
@@ -344,6 +357,11 @@ public class Schema
         cfIdMap.put(key, cfm.cfId);
     }
 
+    public void unload(CFMetaData cfm)
+    {
+        cfIdMap.remove(Pair.create(cfm.ksName, cfm.cfName));
+    }
+
     /**
      * Used for ColumnFamily data eviction out from the schema
      *
@@ -351,7 +369,7 @@ public class Schema
      */
     public void purge(CFMetaData cfm)
     {
-        cfIdMap.remove(Pair.create(cfm.ksName, cfm.cfName));
+        unload(cfm);
         cfm.markPurged();
     }
 
@@ -410,7 +428,7 @@ public class Schema
      */
     public void updateVersion()
     {
-        version = LegacySchemaTables.calculateSchemaDigest();
+        version = SchemaKeyspace.calculateSchemaDigest();
         SystemKeyspace.updateSchemaVersion(version);
     }
 
@@ -526,6 +544,9 @@ public class Schema
         ColumnFamilyStore cfs = Keyspace.open(ksName).getColumnFamilyStore(tableName);
         assert cfs != null;
 
+        // make sure all the indexes are dropped, or else.
+        cfs.indexManager.setIndexRemoved(new HashSet<>(cfs.indexManager.getBuiltIndexes()));
+
         // reinitialize the keyspace.
         CFMetaData cfm = oldKsm.tables.get(tableName).get();
         KeyspaceMetadata newKsm = oldKsm.withSwapped(oldKsm.tables.without(tableName));

http://git-wip-us.apache.org/repos/asf/cassandra/blob/7d6c876e/src/java/org/apache/cassandra/cql3/statements/AlterKeyspaceStatement.java
----------------------------------------------------------------------
diff --git a/src/java/org/apache/cassandra/cql3/statements/AlterKeyspaceStatement.java b/src/java/org/apache/cassandra/cql3/statements/AlterKeyspaceStatement.java
index 3e1bca5..b660f52 100644
--- a/src/java/org/apache/cassandra/cql3/statements/AlterKeyspaceStatement.java
+++ b/src/java/org/apache/cassandra/cql3/statements/AlterKeyspaceStatement.java
@@ -19,7 +19,6 @@ package org.apache.cassandra.cql3.statements;
 
 import org.apache.cassandra.auth.Permission;
 import org.apache.cassandra.config.Schema;
-import org.apache.cassandra.db.SystemKeyspace;
 import org.apache.cassandra.exceptions.*;
 import org.apache.cassandra.locator.LocalStrategy;
 import org.apache.cassandra.schema.KeyspaceMetadata;
@@ -56,7 +55,7 @@ public class AlterKeyspaceStatement extends SchemaAlteringStatement
         KeyspaceMetadata ksm = Schema.instance.getKSMetaData(name);
         if (ksm == null)
             throw new InvalidRequestException("Unknown keyspace " + name);
-        if (ksm.name.equalsIgnoreCase(SystemKeyspace.NAME))
+        if (Schema.isSystemKeyspace(ksm.name))
             throw new InvalidRequestException("Cannot alter system keyspace");
 
         attrs.validate();

http://git-wip-us.apache.org/repos/asf/cassandra/blob/7d6c876e/src/java/org/apache/cassandra/db/DefinitionsUpdateVerbHandler.java
----------------------------------------------------------------------
diff --git a/src/java/org/apache/cassandra/db/DefinitionsUpdateVerbHandler.java b/src/java/org/apache/cassandra/db/DefinitionsUpdateVerbHandler.java
index d5ede03..7add503 100644
--- a/src/java/org/apache/cassandra/db/DefinitionsUpdateVerbHandler.java
+++ b/src/java/org/apache/cassandra/db/DefinitionsUpdateVerbHandler.java
@@ -26,7 +26,7 @@ import org.apache.cassandra.concurrent.Stage;
 import org.apache.cassandra.concurrent.StageManager;
 import org.apache.cassandra.net.IVerbHandler;
 import org.apache.cassandra.net.MessageIn;
-import org.apache.cassandra.schema.LegacySchemaTables;
+import org.apache.cassandra.schema.SchemaKeyspace;
 import org.apache.cassandra.utils.WrappedRunnable;
 
 /**
@@ -47,7 +47,7 @@ public class DefinitionsUpdateVerbHandler implements IVerbHandler<Collection<Mut
         {
             public void runMayThrow() throws Exception
             {
-                LegacySchemaTables.mergeSchema(message.payload);
+                SchemaKeyspace.mergeSchema(message.payload);
             }
         });
     }

http://git-wip-us.apache.org/repos/asf/cassandra/blob/7d6c876e/src/java/org/apache/cassandra/db/Keyspace.java
----------------------------------------------------------------------
diff --git a/src/java/org/apache/cassandra/db/Keyspace.java b/src/java/org/apache/cassandra/db/Keyspace.java
index cc95b9f..9d3b178 100644
--- a/src/java/org/apache/cassandra/db/Keyspace.java
+++ b/src/java/org/apache/cassandra/db/Keyspace.java
@@ -25,6 +25,7 @@ import java.util.concurrent.ConcurrentMap;
 import java.util.concurrent.Future;
 
 import com.google.common.base.Function;
+import com.google.common.collect.ImmutableList;
 import com.google.common.collect.Iterables;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
@@ -40,6 +41,7 @@ import org.apache.cassandra.db.partitions.PartitionUpdate;
 import org.apache.cassandra.io.sstable.format.SSTableReader;
 import org.apache.cassandra.locator.AbstractReplicationStrategy;
 import org.apache.cassandra.schema.KeyspaceMetadata;
+import org.apache.cassandra.schema.SchemaKeyspace;
 import org.apache.cassandra.service.StorageService;
 import org.apache.cassandra.tracing.Tracing;
 import org.apache.cassandra.utils.concurrent.OpOrder;
@@ -90,7 +92,7 @@ public class Keyspace
 
     public static Keyspace open(String keyspaceName)
     {
-        assert initialized || keyspaceName.equals(SystemKeyspace.NAME);
+        assert initialized || Schema.isSystemKeyspace(keyspaceName);
         return open(keyspaceName, Schema.instance, true);
     }
 
@@ -527,7 +529,7 @@ public class Keyspace
 
     public static Iterable<Keyspace> system()
     {
-        return Iterables.transform(Collections.singleton(SystemKeyspace.NAME), keyspaceTransformer);
+        return Iterables.transform(Schema.SYSTEM_KEYSPACE_NAMES, keyspaceTransformer);
     }
 
     @Override

http://git-wip-us.apache.org/repos/asf/cassandra/blob/7d6c876e/src/java/org/apache/cassandra/db/MigrationRequestVerbHandler.java
----------------------------------------------------------------------
diff --git a/src/java/org/apache/cassandra/db/MigrationRequestVerbHandler.java b/src/java/org/apache/cassandra/db/MigrationRequestVerbHandler.java
index 79753c1..6502ed3 100644
--- a/src/java/org/apache/cassandra/db/MigrationRequestVerbHandler.java
+++ b/src/java/org/apache/cassandra/db/MigrationRequestVerbHandler.java
@@ -26,7 +26,7 @@ import org.apache.cassandra.net.IVerbHandler;
 import org.apache.cassandra.net.MessageIn;
 import org.apache.cassandra.net.MessageOut;
 import org.apache.cassandra.net.MessagingService;
-import org.apache.cassandra.schema.LegacySchemaTables;
+import org.apache.cassandra.schema.SchemaKeyspace;
 import org.apache.cassandra.service.MigrationManager;
 
 /**
@@ -41,7 +41,7 @@ public class MigrationRequestVerbHandler implements IVerbHandler
     {
         logger.debug("Received migration request from {}.", message.from);
         MessageOut<Collection<Mutation>> response = new MessageOut<>(MessagingService.Verb.INTERNAL_RESPONSE,
-                                                                     LegacySchemaTables.convertSchemaToMutations(),
+                                                                     SchemaKeyspace.convertSchemaToMutations(),
                                                                      MigrationManager.MigrationsSerializer.instance);
         MessagingService.instance().sendReply(response, id, message.from);
     }

http://git-wip-us.apache.org/repos/asf/cassandra/blob/7d6c876e/src/java/org/apache/cassandra/db/ReadCommand.java
----------------------------------------------------------------------
diff --git a/src/java/org/apache/cassandra/db/ReadCommand.java b/src/java/org/apache/cassandra/db/ReadCommand.java
index 7cc4884..b5182dd 100644
--- a/src/java/org/apache/cassandra/db/ReadCommand.java
+++ b/src/java/org/apache/cassandra/db/ReadCommand.java
@@ -27,6 +27,7 @@ import org.slf4j.LoggerFactory;
 import org.apache.cassandra.config.CFMetaData;
 import org.apache.cassandra.config.ColumnDefinition;
 import org.apache.cassandra.config.DatabaseDescriptor;
+import org.apache.cassandra.config.Schema;
 import org.apache.cassandra.db.index.SecondaryIndexSearcher;
 import org.apache.cassandra.db.filter.*;
 import org.apache.cassandra.db.rows.*;
@@ -323,7 +324,7 @@ public abstract class ReadCommand implements ReadQuery
             private final int failureThreshold = DatabaseDescriptor.getTombstoneFailureThreshold();
             private final int warningThreshold = DatabaseDescriptor.getTombstoneWarnThreshold();
 
-            private final boolean respectTombstoneThresholds = !ReadCommand.this.metadata().ksName.equals(SystemKeyspace.NAME);
+            private final boolean respectTombstoneThresholds = !Schema.isSystemKeyspace(ReadCommand.this.metadata().ksName);
 
             private int liveRows = 0;
             private int tombstones = 0;

http://git-wip-us.apache.org/repos/asf/cassandra/blob/7d6c876e/src/java/org/apache/cassandra/db/SystemKeyspace.java
----------------------------------------------------------------------
diff --git a/src/java/org/apache/cassandra/db/SystemKeyspace.java b/src/java/org/apache/cassandra/db/SystemKeyspace.java
index a3b021f..f0c91d6 100644
--- a/src/java/org/apache/cassandra/db/SystemKeyspace.java
+++ b/src/java/org/apache/cassandra/db/SystemKeyspace.java
@@ -34,6 +34,7 @@ import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 import org.apache.cassandra.config.CFMetaData;
 import org.apache.cassandra.config.DatabaseDescriptor;
+import org.apache.cassandra.config.Schema;
 import org.apache.cassandra.cql3.QueryProcessor;
 import org.apache.cassandra.cql3.UntypedResultSet;
 import org.apache.cassandra.cql3.functions.*;
@@ -55,7 +56,7 @@ import org.apache.cassandra.net.MessagingService;
 import org.apache.cassandra.schema.Functions;
 import org.apache.cassandra.schema.KeyspaceMetadata;
 import org.apache.cassandra.schema.KeyspaceParams;
-import org.apache.cassandra.schema.LegacySchemaTables;
+import org.apache.cassandra.schema.SchemaKeyspace;
 import org.apache.cassandra.schema.Tables;
 import org.apache.cassandra.schema.Types;
 import org.apache.cassandra.service.StorageService;
@@ -101,6 +102,14 @@ public final class SystemKeyspace
     public static final String SIZE_ESTIMATES = "size_estimates";
     public static final String AVAILABLE_RANGES = "available_ranges";
 
+    @Deprecated public static final String LEGACY_KEYSPACES = "schema_keyspaces";
+    @Deprecated public static final String LEGACY_COLUMNFAMILIES = "schema_columnfamilies";
+    @Deprecated public static final String LEGACY_COLUMNS = "schema_columns";
+    @Deprecated public static final String LEGACY_TRIGGERS = "schema_triggers";
+    @Deprecated public static final String LEGACY_USERTYPES = "schema_usertypes";
+    @Deprecated public static final String LEGACY_FUNCTIONS = "schema_functions";
+    @Deprecated public static final String LEGACY_AGGREGATES = "schema_aggregates";
+
     public static final CFMetaData Hints =
         compile(HINTS,
                 "hints awaiting delivery",
@@ -142,12 +151,11 @@ public final class SystemKeyspace
                 + "PRIMARY KEY ((row_key), cf_id))")
                 .compactionStrategyClass(LeveledCompactionStrategy.class);
 
-    // TODO: make private
-    public static final CFMetaData BuiltIndexes =
+    private static final CFMetaData BuiltIndexes =
         compile(BUILT_INDEXES,
                 "built column indexes",
                 "CREATE TABLE \"%s\" ("
-                + "table_name text,"
+                + "table_name text," // table_name here is the name of the keyspace - don't be fooled
                 + "index_name text,"
                 + "PRIMARY KEY ((table_name), index_name)) "
                 + "WITH COMPACT STORAGE");
@@ -263,6 +271,122 @@ public final class SystemKeyspace
                 + "ranges set<blob>,"
                 + "PRIMARY KEY ((keyspace_name)))");
 
+    @Deprecated
+    public static final CFMetaData LegacyKeyspaces =
+        compile(LEGACY_KEYSPACES,
+                "*DEPRECATED* keyspace definitions",
+                "CREATE TABLE %s ("
+                + "keyspace_name text,"
+                + "durable_writes boolean,"
+                + "strategy_class text,"
+                + "strategy_options text,"
+                + "PRIMARY KEY ((keyspace_name))) "
+                + "WITH COMPACT STORAGE");
+
+    @Deprecated
+    public static final CFMetaData LegacyColumnfamilies =
+        compile(LEGACY_COLUMNFAMILIES,
+                "*DEPRECATED* table definitions",
+                "CREATE TABLE %s ("
+                + "keyspace_name text,"
+                + "columnfamily_name text,"
+                + "bloom_filter_fp_chance double,"
+                + "caching text,"
+                + "cf_id uuid," // post-2.1 UUID cfid
+                + "comment text,"
+                + "compaction_strategy_class text,"
+                + "compaction_strategy_options text,"
+                + "comparator text,"
+                + "compression_parameters text,"
+                + "default_time_to_live int,"
+                + "default_validator text,"
+                + "dropped_columns map<text, bigint>,"
+                + "dropped_columns_types map<text, text>,"
+                + "gc_grace_seconds int,"
+                + "is_dense boolean,"
+                + "key_validator text,"
+                + "local_read_repair_chance double,"
+                + "max_compaction_threshold int,"
+                + "max_index_interval int,"
+                + "memtable_flush_period_in_ms int,"
+                + "min_compaction_threshold int,"
+                + "min_index_interval int,"
+                + "read_repair_chance double,"
+                + "speculative_retry text,"
+                + "subcomparator text,"
+                + "type text,"
+                + "PRIMARY KEY ((keyspace_name), columnfamily_name))");
+
+    @Deprecated
+    public static final CFMetaData LegacyColumns =
+        compile(LEGACY_COLUMNS,
+                "*DEPRECATED* column definitions",
+                "CREATE TABLE %s ("
+                + "keyspace_name text,"
+                + "columnfamily_name text,"
+                + "column_name text,"
+                + "component_index int,"
+                + "index_name text,"
+                + "index_options text,"
+                + "index_type text,"
+                + "type text,"
+                + "validator text,"
+                + "PRIMARY KEY ((keyspace_name), columnfamily_name, column_name))");
+
+    @Deprecated
+    public static final CFMetaData LegacyTriggers =
+        compile(LEGACY_TRIGGERS,
+                "*DEPRECATED* trigger definitions",
+                "CREATE TABLE %s ("
+                + "keyspace_name text,"
+                + "columnfamily_name text,"
+                + "trigger_name text,"
+                + "trigger_options map<text, text>,"
+                + "PRIMARY KEY ((keyspace_name), columnfamily_name, trigger_name))");
+
+    @Deprecated
+    public static final CFMetaData LegacyUsertypes =
+        compile(LEGACY_USERTYPES,
+                "*DEPRECATED* user defined type definitions",
+                "CREATE TABLE %s ("
+                + "keyspace_name text,"
+                + "type_name text,"
+                + "field_names list<text>,"
+                + "field_types list<text>,"
+                + "PRIMARY KEY ((keyspace_name), type_name))");
+
+    @Deprecated
+    public static final CFMetaData LegacyFunctions =
+        compile(LEGACY_FUNCTIONS,
+                "*DEPRECATED* user defined function definitions",
+                "CREATE TABLE %s ("
+                + "keyspace_name text,"
+                + "function_name text,"
+                + "signature frozen<list<text>>,"
+                + "argument_names list<text>,"
+                + "argument_types list<text>,"
+                + "body text,"
+                + "language text,"
+                + "return_type text,"
+                + "called_on_null_input boolean,"
+                + "PRIMARY KEY ((keyspace_name), function_name, signature))");
+
+    @Deprecated
+    public static final CFMetaData LegacyAggregates =
+        compile(LEGACY_AGGREGATES,
+                "*DEPRECATED* user defined aggregate definitions",
+                "CREATE TABLE %s ("
+                + "keyspace_name text,"
+                + "aggregate_name text,"
+                + "signature frozen<list<text>>,"
+                + "argument_types list<text>,"
+                + "final_func text,"
+                + "initcond blob,"
+                + "return_type text,"
+                + "state_func text,"
+                + "state_type text,"
+                + "PRIMARY KEY ((keyspace_name), aggregate_name, signature))");
+
     private static CFMetaData compile(String name, String description, String schema)
     {
         return CFMetaData.compile(String.format(schema, name), NAME)
@@ -276,22 +400,26 @@ public final class SystemKeyspace
 
     private static Tables tables()
     {
-        return Tables.builder()
-                     .add(LegacySchemaTables.All)
-                     .add(BuiltIndexes,
-                          Hints,
-                          Batchlog,
-                          Paxos,
-                          Local,
-                          Peers,
-                          PeerEvents,
-                          RangeXfers,
-                          CompactionsInProgress,
-                          CompactionHistory,
-                          SSTableActivity,
-                          SizeEstimates,
-                          AvailableRanges)
-                     .build();
+        return Tables.of(BuiltIndexes,
+                         Hints,
+                         Batchlog,
+                         Paxos,
+                         Local,
+                         Peers,
+                         PeerEvents,
+                         RangeXfers,
+                         CompactionsInProgress,
+                         CompactionHistory,
+                         SSTableActivity,
+                         SizeEstimates,
+                         AvailableRanges,
+                         LegacyKeyspaces,
+                         LegacyColumnfamilies,
+                         LegacyColumns,
+                         LegacyTriggers,
+                         LegacyUsertypes,
+                         LegacyFunctions,
+                         LegacyAggregates);
     }
 
     private static Functions functions()
@@ -322,7 +450,7 @@ public final class SystemKeyspace
     public static void finishStartup()
     {
         persistLocalMetadata();
-        LegacySchemaTables.saveSystemKeyspaceSchema();
+        SchemaKeyspace.saveSystemKeyspacesSchema();
     }
 
     private static void persistLocalMetadata()
@@ -366,7 +494,7 @@ public final class SystemKeyspace
      */
     public static UUID startCompaction(ColumnFamilyStore cfs, Iterable<SSTableReader> toCompact)
     {
-        if (NAME.equals(cfs.keyspace.getName()))
+        if (Schema.isSystemKeyspace(cfs.keyspace.getName()))
             return null;
 
         UUID compactionId = UUIDGen.getTimeUUID();

http://git-wip-us.apache.org/repos/asf/cassandra/blob/7d6c876e/src/java/org/apache/cassandra/io/sstable/format/SSTableReader.java
----------------------------------------------------------------------
diff --git a/src/java/org/apache/cassandra/io/sstable/format/SSTableReader.java b/src/java/org/apache/cassandra/io/sstable/format/SSTableReader.java
index c326881..555140f 100644
--- a/src/java/org/apache/cassandra/io/sstable/format/SSTableReader.java
+++ b/src/java/org/apache/cassandra/io/sstable/format/SSTableReader.java
@@ -2245,7 +2245,7 @@ public abstract class SSTableReader extends SSTable implements SelfRefCounted<SS
 
             // Don't track read rates for tables in the system keyspace and don't bother trying to load or persist
             // the read meter when in client mode.
-            if (SystemKeyspace.NAME.equals(desc.ksname))
+            if (Schema.isSystemKeyspace(desc.ksname))
             {
                 readMeter = null;
                 readMeterSyncFuture = null;

http://git-wip-us.apache.org/repos/asf/cassandra/blob/7d6c876e/src/java/org/apache/cassandra/schema/Functions.java
----------------------------------------------------------------------
diff --git a/src/java/org/apache/cassandra/schema/Functions.java b/src/java/org/apache/cassandra/schema/Functions.java
index ccef1ed..c65f58d 100644
--- a/src/java/org/apache/cassandra/schema/Functions.java
+++ b/src/java/org/apache/cassandra/schema/Functions.java
@@ -51,6 +51,11 @@ public final class Functions implements Iterable<Function>
         return builder().build();
     }
 
+    public static Functions of(Function... funs)
+    {
+        return builder().add(funs).build();
+    }
+
     public Iterator<Function> iterator()
     {
         return functions.values().iterator();
@@ -200,6 +205,8 @@ public final class Functions implements Iterable<Function>
 
         private Builder()
         {
+            // we need deterministic iteration order; otherwise Functions.equals() breaks down
+            functions.orderValuesBy((f1, f2) -> Integer.compare(f1.hashCode(), f2.hashCode()));
         }
 
         public Functions build()

http://git-wip-us.apache.org/repos/asf/cassandra/blob/7d6c876e/src/java/org/apache/cassandra/schema/LegacySchemaMigrator.java
----------------------------------------------------------------------
diff --git a/src/java/org/apache/cassandra/schema/LegacySchemaMigrator.java b/src/java/org/apache/cassandra/schema/LegacySchemaMigrator.java
new file mode 100644
index 0000000..a049194
--- /dev/null
+++ b/src/java/org/apache/cassandra/schema/LegacySchemaMigrator.java
@@ -0,0 +1,804 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.cassandra.schema;
+
+import java.nio.ByteBuffer;
+import java.util.*;
+import java.util.stream.Collectors;
+
+import com.google.common.collect.HashMultimap;
+import com.google.common.collect.ImmutableList;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import org.apache.cassandra.cache.CachingOptions;
+import org.apache.cassandra.config.*;
+import org.apache.cassandra.cql3.*;
+import org.apache.cassandra.cql3.functions.FunctionName;
+import org.apache.cassandra.cql3.functions.UDAggregate;
+import org.apache.cassandra.cql3.functions.UDFunction;
+import org.apache.cassandra.db.*;
+import org.apache.cassandra.db.marshal.*;
+import org.apache.cassandra.db.rows.RowIterator;
+import org.apache.cassandra.db.rows.UnfilteredRowIterators;
+import org.apache.cassandra.exceptions.InvalidRequestException;
+import org.apache.cassandra.io.compress.CompressionParameters;
+import org.apache.cassandra.service.StorageService;
+import org.apache.cassandra.utils.ByteBufferUtil;
+import org.apache.cassandra.utils.FBUtilities;
+import org.apache.cassandra.utils.concurrent.OpOrder;
+
+import static java.lang.String.format;
+import static org.apache.cassandra.utils.ByteBufferUtil.bytes;
+import static org.apache.cassandra.utils.FBUtilities.fromJsonMap;
+
+/**
+ * This majestic class performs migration from legacy (pre-3.0) system.schema_* schema tables to the new and glorious
+ * system_schema keyspace.
+ *
+ * The goal is to not lose any information in the migration - including the timestamps.
+ */
+@SuppressWarnings("deprecation")
+public final class LegacySchemaMigrator
+{
+    private LegacySchemaMigrator()
+    {
+    }
+
+    private static final Logger logger = LoggerFactory.getLogger(LegacySchemaMigrator.class);
+
+    static final List<CFMetaData> LegacySchemaTables =
+        ImmutableList.of(SystemKeyspace.LegacyKeyspaces,
+                         SystemKeyspace.LegacyColumnfamilies,
+                         SystemKeyspace.LegacyColumns,
+                         SystemKeyspace.LegacyTriggers,
+                         SystemKeyspace.LegacyUsertypes,
+                         SystemKeyspace.LegacyFunctions,
+                         SystemKeyspace.LegacyAggregates);
+
+    public static void migrate()
+    {
+        // read metadata from the legacy schema tables
+        Collection<Keyspace> keyspaces = readSchema();
+
+        // if already upgraded, or starting a new 3.0 node, abort early
+        if (keyspaces.isEmpty())
+        {
+            unloadLegacySchemaTables();
+            return;
+        }
+
+        // write metadata to the new schema tables
+        logger.info("Moving {} keyspaces from legacy schema tables to the new schema keyspace ({})",
+                    keyspaces.size(),
+                    SchemaKeyspace.NAME);
+        keyspaces.forEach(LegacySchemaMigrator::storeKeyspaceInNewSchemaTables);
+
+        // flush the new tables before truncating the old ones
+        SchemaKeyspace.flush();
+
+        // truncate the original tables (will be snapshotted now, and will have been snapshotted by pre-flight checks)
+        logger.info("Truncating legacy schema tables");
+        truncateLegacySchemaTables();
+
+        // remove legacy schema tables from Schema, so that their presence doesn't give the users any wrong ideas
+        unloadLegacySchemaTables();
+
+        logger.info("Completed migration of legacy schema tables");
+    }
+
+    static void unloadLegacySchemaTables()
+    {
+        KeyspaceMetadata systemKeyspace = Schema.instance.getKSMetaData(SystemKeyspace.NAME);
+
+        Tables systemTables = systemKeyspace.tables;
+        for (CFMetaData table : LegacySchemaTables)
+            systemTables = systemTables.without(table.cfName);
+
+        LegacySchemaTables.forEach(Schema.instance::unload);
+
+        Schema.instance.setKeyspaceMetadata(systemKeyspace.withSwapped(systemTables));
+    }
+
+    private static void truncateLegacySchemaTables()
+    {
+        LegacySchemaTables.forEach(table -> Schema.instance.getColumnFamilyStoreInstance(table.cfId).truncateBlocking());
+    }
+
+    private static void storeKeyspaceInNewSchemaTables(Keyspace keyspace)
+    {
+        Mutation mutation = SchemaKeyspace.makeCreateKeyspaceMutation(keyspace.name, keyspace.params, keyspace.timestamp);
+
+        for (Table table : keyspace.tables)
+            SchemaKeyspace.addTableToSchemaMutation(table.metadata, table.timestamp, true, mutation);
+
+        for (Type type : keyspace.types)
+            SchemaKeyspace.addTypeToSchemaMutation(type.metadata, type.timestamp, mutation);
+
+        for (Function function : keyspace.functions)
+            SchemaKeyspace.addFunctionToSchemaMutation(function.metadata, function.timestamp, mutation);
+
+        for (Aggregate aggregate : keyspace.aggregates)
+            SchemaKeyspace.addAggregateToSchemaMutation(aggregate.metadata, aggregate.timestamp, mutation);
+
+        mutation.apply();
+    }
+
+    /*
+     * Read all keyspaces metadata (including nested tables, types, and functions), with their modification timestamps
+     */
+    private static Collection<Keyspace> readSchema()
+    {
+        String query = format("SELECT keyspace_name FROM %s.%s", SystemKeyspace.NAME, SystemKeyspace.LEGACY_KEYSPACES);
+        Collection<String> keyspaceNames = new ArrayList<>();
+        query(query).forEach(row -> keyspaceNames.add(row.getString("keyspace_name")));
+        keyspaceNames.removeAll(Schema.SYSTEM_KEYSPACE_NAMES);
+
+        Collection<Keyspace> keyspaces = new ArrayList<>();
+        keyspaceNames.forEach(name -> keyspaces.add(readKeyspace(name)));
+        return keyspaces;
+    }
+
+    private static Keyspace readKeyspace(String keyspaceName)
+    {
+        long timestamp = readKeyspaceTimestamp(keyspaceName);
+        KeyspaceParams params = readKeyspaceParams(keyspaceName);
+
+        Collection<Table> tables = readTables(keyspaceName);
+        Collection<Type> types = readTypes(keyspaceName);
+        Collection<Function> functions = readFunctions(keyspaceName);
+        Collection<Aggregate> aggregates = readAggregates(keyspaceName);
+
+        return new Keyspace(timestamp, keyspaceName, params, tables, types, functions, aggregates);
+    }
+
+    /*
+     * Reading keyspace params
+     */
+
+    private static long readKeyspaceTimestamp(String keyspaceName)
+    {
+        String query = format("SELECT writeTime(durable_writes) AS timestamp FROM %s.%s WHERE keyspace_name = ?",
+                              SystemKeyspace.NAME,
+                              SystemKeyspace.LEGACY_KEYSPACES);
+        return query(query, keyspaceName).one().getLong("timestamp");
+    }
+
+    private static KeyspaceParams readKeyspaceParams(String keyspaceName)
+    {
+        String query = format("SELECT * FROM %s.%s WHERE keyspace_name = ?",
+                              SystemKeyspace.NAME,
+                              SystemKeyspace.LEGACY_KEYSPACES);
+        UntypedResultSet.Row row = query(query, keyspaceName).one();
+
+        boolean durableWrites = row.getBoolean("durable_writes");
+
+        Map<String, String> replication = new HashMap<>();
+        replication.putAll(fromJsonMap(row.getString("strategy_options")));
+        replication.put(KeyspaceParams.Replication.CLASS, row.getString("strategy_class"));
+
+        return KeyspaceParams.create(durableWrites, replication);
+    }
+
+    /*
+     * Reading tables
+     */
+
+    private static Collection<Table> readTables(String keyspaceName)
+    {
+        String query = format("SELECT columnfamily_name FROM %s.%s WHERE keyspace_name = ?",
+                              SystemKeyspace.NAME,
+                              SystemKeyspace.LEGACY_COLUMNFAMILIES);
+        Collection<String> tableNames = new ArrayList<>();
+        query(query, keyspaceName).forEach(row -> tableNames.add(row.getString("columnfamily_name")));
+
+        Collection<Table> tables = new ArrayList<>();
+        tableNames.forEach(name -> tables.add(readTable(keyspaceName, name)));
+        return tables;
+    }
+
+    private static Table readTable(String keyspaceName, String tableName)
+    {
+        long timestamp = readTableTimestamp(keyspaceName, tableName);
+        CFMetaData metadata = readTableMetadata(keyspaceName, tableName);
+        return new Table(timestamp, metadata);
+    }
+
+    private static long readTableTimestamp(String keyspaceName, String tableName)
+    {
+        String query = format("SELECT writeTime(type) AS timestamp FROM %s.%s WHERE keyspace_name = ? AND columnfamily_name = ?",
+                              SystemKeyspace.NAME,
+                              SystemKeyspace.LEGACY_COLUMNFAMILIES);
+        return query(query, keyspaceName, tableName).one().getLong("timestamp");
+    }
+
+    private static CFMetaData readTableMetadata(String keyspaceName, String tableName)
+    {
+        String tableQuery = format("SELECT * FROM %s.%s WHERE keyspace_name = ? AND columnfamily_name = ?",
+                                   SystemKeyspace.NAME,
+                                   SystemKeyspace.LEGACY_COLUMNFAMILIES);
+        UntypedResultSet.Row tableRow = query(tableQuery, keyspaceName, tableName).one();
+
+        String columnsQuery = format("SELECT * FROM %s.%s WHERE keyspace_name = ? AND columnfamily_name = ?",
+                                     SystemKeyspace.NAME,
+                                     SystemKeyspace.LEGACY_COLUMNS);
+        UntypedResultSet columnRows = query(columnsQuery, keyspaceName, tableName);
+
+        String triggersQuery = format("SELECT * FROM %s.%s WHERE keyspace_name = ? AND columnfamily_name = ?",
+                                      SystemKeyspace.NAME,
+                                      SystemKeyspace.LEGACY_TRIGGERS);
+        UntypedResultSet triggerRows = query(triggersQuery, keyspaceName, tableName);
+
+        return decodeTableMetadata(tableRow, columnRows, triggerRows);
+    }
+
+    private static CFMetaData decodeTableMetadata(UntypedResultSet.Row tableRow,
+                                                  UntypedResultSet columnRows,
+                                                  UntypedResultSet triggerRows)
+    {
+        String ksName = tableRow.getString("keyspace_name");
+        String cfName = tableRow.getString("columnfamily_name");
+
+        AbstractType<?> rawComparator = TypeParser.parse(tableRow.getString("comparator"));
+        AbstractType<?> subComparator = tableRow.has("subcomparator") ? TypeParser.parse(tableRow.getString("subcomparator")) : null;
+
+        boolean isSuper = "super".equals(tableRow.getString("type").toLowerCase());
+        boolean isDense = tableRow.getBoolean("is_dense");
+        boolean isCompound = rawComparator instanceof CompositeType;
+
+        // We don't really use the default validator but as we have it for backward compatibility, we use it to know if it's a counter table
+        AbstractType<?> defaultValidator = TypeParser.parse(tableRow.getString("default_validator"));
+        boolean isCounter = defaultValidator instanceof CounterColumnType;
+
+        /*
+         * With CASSANDRA-5202 we stopped inferring the cf id from the combination of keyspace/table names,
+         * and started storing the generated uuids in system.schema_columnfamilies.
+         *
+         * In 3.0 we SHOULD NOT see tables like that (2.0-created, non-upgraded).
+         * But in the off-chance that we do, we generate the deterministic uuid here.
+         */
+        UUID cfId = tableRow.has("cf_id")
+                  ? tableRow.getUUID("cf_id")
+                  : CFMetaData.generateLegacyCfId(ksName, cfName);
+
+        boolean isCQLTable = !isSuper && !isDense && isCompound;
+        boolean isStaticCompactTable = !isDense && !isCompound;
+
+        // Internally, compact tables have a specific layout, see CompactTables. But when upgrading from
+        // previous versions, they may not have the expected schema, so detect if we need to upgrade and do
+        // it in createColumnsFromColumnRows.
+        // We can remove this once we don't support upgrade from versions < 3.0.
+        boolean needsUpgrade = !isCQLTable && checkNeedsUpgrade(columnRows, isSuper, isStaticCompactTable);
+
+        List<ColumnDefinition> columnDefs = createColumnsFromColumnRows(columnRows,
+                                                                        ksName,
+                                                                        cfName,
+                                                                        rawComparator,
+                                                                        subComparator,
+                                                                        isSuper,
+                                                                        isCQLTable,
+                                                                        isStaticCompactTable,
+                                                                        needsUpgrade);
+
+        if (needsUpgrade)
+            addDefinitionForUpgrade(columnDefs, ksName, cfName, isStaticCompactTable, isSuper, rawComparator, subComparator, defaultValidator);
+
+        CFMetaData cfm = CFMetaData.create(ksName, cfName, cfId, isDense, isCompound, isSuper, isCounter, columnDefs);
+
+        cfm.readRepairChance(tableRow.getDouble("read_repair_chance"));
+        cfm.dcLocalReadRepairChance(tableRow.getDouble("local_read_repair_chance"));
+        cfm.gcGraceSeconds(tableRow.getInt("gc_grace_seconds"));
+        cfm.minCompactionThreshold(tableRow.getInt("min_compaction_threshold"));
+        cfm.maxCompactionThreshold(tableRow.getInt("max_compaction_threshold"));
+        if (tableRow.has("comment"))
+            cfm.comment(tableRow.getString("comment"));
+        if (tableRow.has("memtable_flush_period_in_ms"))
+            cfm.memtableFlushPeriod(tableRow.getInt("memtable_flush_period_in_ms"));
+        cfm.caching(CachingOptions.fromString(tableRow.getString("caching")));
+        if (tableRow.has("default_time_to_live"))
+            cfm.defaultTimeToLive(tableRow.getInt("default_time_to_live"));
+        if (tableRow.has("speculative_retry"))
+            cfm.speculativeRetry(CFMetaData.SpeculativeRetry.fromString(tableRow.getString("speculative_retry")));
+        cfm.compactionStrategyClass(CFMetaData.createCompactionStrategy(tableRow.getString("compaction_strategy_class")));
+        cfm.compressionParameters(CompressionParameters.create(fromJsonMap(tableRow.getString("compression_parameters"))));
+        cfm.compactionStrategyOptions(fromJsonMap(tableRow.getString("compaction_strategy_options")));
+
+        if (tableRow.has("min_index_interval"))
+            cfm.minIndexInterval(tableRow.getInt("min_index_interval"));
+
+        if (tableRow.has("max_index_interval"))
+            cfm.maxIndexInterval(tableRow.getInt("max_index_interval"));
+
+        if (tableRow.has("bloom_filter_fp_chance"))
+            cfm.bloomFilterFpChance(tableRow.getDouble("bloom_filter_fp_chance"));
+        else
+            cfm.bloomFilterFpChance(cfm.getBloomFilterFpChance());
+
+        if (tableRow.has("dropped_columns"))
+        {
+            Map<String, String> types = tableRow.has("dropped_columns_types")
+                                      ? tableRow.getMap("dropped_columns_types", UTF8Type.instance, UTF8Type.instance)
+                                      : Collections.<String, String>emptyMap();
+            addDroppedColumns(cfm, tableRow.getMap("dropped_columns", UTF8Type.instance, LongType.instance), types);
+        }
+
+        triggerRows.forEach(row -> cfm.addTriggerDefinition(readTrigger(row)));
+
+        return cfm;
+    }
+
+    // Should only be called on compact tables
+    private static boolean checkNeedsUpgrade(UntypedResultSet defs, boolean isSuper, boolean isStaticCompactTable)
+    {
+        if (isSuper)
+        {
+            // Check if we've added the "supercolumn map" column yet or not
+            for (UntypedResultSet.Row row : defs)
+                if (row.getString("column_name").isEmpty())
+                    return false;
+            return true;
+        }
+
+        // For static compact tables, we need to upgrade if the regular definitions haven't been converted to static yet,
+        // i.e. if we don't have a static definition yet.
+        if (isStaticCompactTable)
+            return !hasKind(defs, ColumnDefinition.Kind.STATIC);
+
+        // For dense compact tables, we need to upgrade if we don't have a compact value definition
+        return !hasKind(defs, ColumnDefinition.Kind.REGULAR);
+    }
+
+    private static void addDefinitionForUpgrade(List<ColumnDefinition> defs,
+                                                String ksName,
+                                                String cfName,
+                                                boolean isStaticCompactTable,
+                                                boolean isSuper,
+                                                AbstractType<?> rawComparator,
+                                                AbstractType<?> subComparator,
+                                                AbstractType<?> defaultValidator)
+    {
+        CompactTables.DefaultNames names = CompactTables.defaultNameGenerator(defs);
+
+        if (isSuper)
+        {
+            defs.add(ColumnDefinition.regularDef(ksName, cfName, CompactTables.SUPER_COLUMN_MAP_COLUMN_STR, MapType.getInstance(subComparator, defaultValidator, true), null));
+        }
+        else if (isStaticCompactTable)
+        {
+            defs.add(ColumnDefinition.clusteringKeyDef(ksName, cfName, names.defaultClusteringName(), rawComparator, null));
+            defs.add(ColumnDefinition.regularDef(ksName, cfName, names.defaultCompactValueName(), defaultValidator, null));
+        }
+        else
+        {
+            // For dense compact tables, we get here if we don't have a compact value column, in which case we should add it
+            // (we use EmptyType to recognize that the compact value was not declared by the use (see CreateTableStatement too))
+            defs.add(ColumnDefinition.regularDef(ksName, cfName, names.defaultCompactValueName(), EmptyType.instance, null));
+        }
+    }
+
+    private static boolean hasKind(UntypedResultSet defs, ColumnDefinition.Kind kind)
+    {
+        for (UntypedResultSet.Row row : defs)
+        {
+            if (deserializeKind(row.getString("type")) == kind)
+                return true;
+        }
+        return false;
+    }
+
+    private static void addDroppedColumns(CFMetaData cfm, Map<String, Long> droppedTimes, Map<String, String> types)
+    {
+        for (Map.Entry<String, Long> entry : droppedTimes.entrySet())
+        {
+            String name = entry.getKey();
+            long time = entry.getValue();
+            AbstractType<?> type = types.containsKey(name) ? TypeParser.parse(types.get(name)) : null;
+            cfm.getDroppedColumns().put(ColumnIdentifier.getInterned(name, true), new CFMetaData.DroppedColumn(type, time));
+        }
+    }
+
+    private static List<ColumnDefinition> createColumnsFromColumnRows(UntypedResultSet rows,
+                                                                      String keyspace,
+                                                                      String table,
+                                                                      AbstractType<?> rawComparator,
+                                                                      AbstractType<?> rawSubComparator,
+                                                                      boolean isSuper,
+                                                                      boolean isCQLTable,
+                                                                      boolean isStaticCompactTable,
+                                                                      boolean needsUpgrade)
+    {
+        List<ColumnDefinition> columns = new ArrayList<>();
+        for (UntypedResultSet.Row row : rows)
+            columns.add(createColumnFromColumnRow(row, keyspace, table, rawComparator, rawSubComparator, isSuper, isCQLTable, isStaticCompactTable, needsUpgrade));
+        return columns;
+    }
+
+    private static ColumnDefinition createColumnFromColumnRow(UntypedResultSet.Row row,
+                                                              String keyspace,
+                                                              String table,
+                                                              AbstractType<?> rawComparator,
+                                                              AbstractType<?> rawSubComparator,
+                                                              boolean isSuper,
+                                                              boolean isCQLTable,
+                                                              boolean isStaticCompactTable,
+                                                              boolean needsUpgrade)
+    {
+        ColumnDefinition.Kind kind = deserializeKind(row.getString("type"));
+        if (needsUpgrade && isStaticCompactTable && kind == ColumnDefinition.Kind.REGULAR)
+            kind = ColumnDefinition.Kind.STATIC;
+
+        Integer componentIndex = null;
+        if (row.has("component_index"))
+            componentIndex = row.getInt("component_index");
+
+        // Note: we save the column name as string, but we should not assume that it is an UTF8 name, we
+        // we need to use the comparator fromString method
+        AbstractType<?> comparator = isCQLTable
+                                   ? UTF8Type.instance
+                                   : CompactTables.columnDefinitionComparator(kind, isSuper, rawComparator, rawSubComparator);
+        ColumnIdentifier name = ColumnIdentifier.getInterned(comparator.fromString(row.getString("column_name")), comparator);
+
+        AbstractType<?> validator = parseType(row.getString("validator"));
+
+        IndexType indexType = null;
+        if (row.has("index_type"))
+            indexType = IndexType.valueOf(row.getString("index_type"));
+
+        Map<String, String> indexOptions = null;
+        if (row.has("index_options"))
+            indexOptions = fromJsonMap(row.getString("index_options"));
+
+        String indexName = null;
+        if (row.has("index_name"))
+            indexName = row.getString("index_name");
+
+        return new ColumnDefinition(keyspace, table, name, validator, indexType, indexOptions, indexName, componentIndex, kind);
+    }
+
+    private static ColumnDefinition.Kind deserializeKind(String kind)
+    {
+        if ("clustering_key".equalsIgnoreCase(kind))
+            return ColumnDefinition.Kind.CLUSTERING_COLUMN;
+        if ("compact_value".equalsIgnoreCase(kind))
+            return ColumnDefinition.Kind.REGULAR;
+        return Enum.valueOf(ColumnDefinition.Kind.class, kind.toUpperCase());
+    }
+
+    private static TriggerDefinition readTrigger(UntypedResultSet.Row row)
+    {
+        String name = row.getString("trigger_name");
+        String classOption = row.getMap("trigger_options", UTF8Type.instance, UTF8Type.instance).get("class");
+        return new TriggerDefinition(name, classOption);
+    }
+
+    /*
+     * Reading user types
+     */
+
+    private static Collection<Type> readTypes(String keyspaceName)
+    {
+        String query = format("SELECT type_name FROM %s.%s WHERE keyspace_name = ?",
+                              SystemKeyspace.NAME,
+                              SystemKeyspace.LEGACY_USERTYPES);
+        Collection<String> typeNames = new ArrayList<>();
+        query(query, keyspaceName).forEach(row -> typeNames.add(row.getString("type_name")));
+
+        Collection<Type> types = new ArrayList<>();
+        typeNames.forEach(name -> types.add(readType(keyspaceName, name)));
+        return types;
+    }
+
+    private static Type readType(String keyspaceName, String typeName)
+    {
+        long timestamp = readTypeTimestamp(keyspaceName, typeName);
+        UserType metadata = readTypeMetadata(keyspaceName, typeName);
+        return new Type(timestamp, metadata);
+    }
+
+    /*
+     * Unfortunately there is not a single REGULAR column in system.schema_usertypes, so annoyingly we cannot
+     * use the writeTime() CQL function, and must resort to a lower level.
+     */
+    private static long readTypeTimestamp(String keyspaceName, String typeName)
+    {
+        ColumnFamilyStore store = org.apache.cassandra.db.Keyspace.open(SystemKeyspace.NAME)
+                                                                  .getColumnFamilyStore(SystemKeyspace.LEGACY_USERTYPES);
+
+        ClusteringComparator comparator = store.metadata.comparator;
+        Slices slices = Slices.with(comparator, Slice.make(comparator, typeName));
+        int nowInSec = FBUtilities.nowInSeconds();
+        DecoratedKey key = StorageService.getPartitioner().decorateKey(AsciiType.instance.fromString(keyspaceName));
+        SinglePartitionReadCommand command = SinglePartitionSliceCommand.create(store.metadata, nowInSec, key, slices);
+
+        try (OpOrder.Group op = store.readOrdering.start();
+             RowIterator partition = UnfilteredRowIterators.filter(command.queryMemtableAndDisk(store, op), nowInSec))
+        {
+            return partition.next().primaryKeyLivenessInfo().timestamp();
+        }
+    }
+
+    private static UserType readTypeMetadata(String keyspaceName, String typeName)
+    {
+        String query = format("SELECT * FROM %s.%s WHERE keyspace_name = ? AND type_name = ?",
+                              SystemKeyspace.NAME,
+                              SystemKeyspace.LEGACY_USERTYPES);
+        UntypedResultSet.Row row = query(query, keyspaceName, typeName).one();
+
+        List<ByteBuffer> names =
+            row.getList("field_names", UTF8Type.instance)
+               .stream()
+               .map(ByteBufferUtil::bytes)
+               .collect(Collectors.toList());
+
+        List<AbstractType<?>> types =
+            row.getList("field_types", UTF8Type.instance)
+               .stream()
+               .map(LegacySchemaMigrator::parseType)
+               .collect(Collectors.toList());
+
+        return new UserType(keyspaceName, bytes(typeName), names, types);
+    }
+
+    /*
+     * Reading UDFs
+     */
+
+    private static Collection<Function> readFunctions(String keyspaceName)
+    {
+        String query = format("SELECT function_name, signature FROM %s.%s WHERE keyspace_name = ?",
+                              SystemKeyspace.NAME,
+                              SystemKeyspace.LEGACY_FUNCTIONS);
+        HashMultimap<String, List<String>> functionSignatures = HashMultimap.create();
+        query(query, keyspaceName).forEach(row ->
+        {
+            functionSignatures.put(row.getString("function_name"), row.getList("signature", UTF8Type.instance));
+        });
+
+        Collection<Function> functions = new ArrayList<>();
+        functionSignatures.entries().forEach(pair -> functions.add(readFunction(keyspaceName, pair.getKey(), pair.getValue())));
+        return functions;
+    }
+
+    private static Function readFunction(String keyspaceName, String functionName, List<String> signature)
+    {
+        long timestamp = readFunctionTimestamp(keyspaceName, functionName, signature);
+        UDFunction metadata = readFunctionMetadata(keyspaceName, functionName, signature);
+        return new Function(timestamp, metadata);
+    }
+
+    private static long readFunctionTimestamp(String keyspaceName, String functionName, List<String> signature)
+    {
+        String query = format("SELECT writeTime(return_type) AS timestamp " +
+                              "FROM %s.%s " +
+                              "WHERE keyspace_name = ? AND function_name = ? AND signature = ?",
+                              SystemKeyspace.NAME,
+                              SystemKeyspace.LEGACY_FUNCTIONS);
+        return query(query, keyspaceName, functionName, signature).one().getLong("timestamp");
+    }
+
+    private static UDFunction readFunctionMetadata(String keyspaceName, String functionName, List<String> signature)
+    {
+        String query = format("SELECT * FROM %s.%s WHERE keyspace_name = ? AND function_name = ? AND signature = ?",
+                              SystemKeyspace.NAME,
+                              SystemKeyspace.LEGACY_FUNCTIONS);
+        UntypedResultSet.Row row = query(query, keyspaceName, functionName, signature).one();
+
+        FunctionName name = new FunctionName(keyspaceName, functionName);
+
+        List<ColumnIdentifier> argNames = new ArrayList<>();
+        if (row.has("argument_names"))
+            for (String arg : row.getList("argument_names", UTF8Type.instance))
+                argNames.add(new ColumnIdentifier(arg, true));
+
+        List<AbstractType<?>> argTypes = new ArrayList<>();
+        if (row.has("argument_types"))
+            for (String type : row.getList("argument_types", UTF8Type.instance))
+                argTypes.add(parseType(type));
+
+        AbstractType<?> returnType = parseType(row.getString("return_type"));
+
+        String language = row.getString("language");
+        String body = row.getString("body");
+        boolean calledOnNullInput = row.getBoolean("called_on_null_input");
+
+        try
+        {
+            return UDFunction.create(name, argNames, argTypes, returnType, calledOnNullInput, language, body);
+        }
+        catch (InvalidRequestException e)
+        {
+            return UDFunction.createBrokenFunction(name, argNames, argTypes, returnType, calledOnNullInput, language, body, e);
+        }
+    }
+
+    /*
+     * Reading UDAs
+     */
+
+    private static Collection<Aggregate> readAggregates(String keyspaceName)
+    {
+        String query = format("SELECT aggregate_name, signature FROM %s.%s WHERE keyspace_name = ?",
+                              SystemKeyspace.NAME,
+                              SystemKeyspace.LEGACY_AGGREGATES);
+        HashMultimap<String, List<String>> aggregateSignatures = HashMultimap.create();
+        query(query, keyspaceName).forEach(row ->
+        {
+            aggregateSignatures.put(row.getString("aggregate_name"), row.getList("signature", UTF8Type.instance));
+        });
+
+        Collection<Aggregate> aggregates = new ArrayList<>();
+        aggregateSignatures.entries().forEach(pair -> aggregates.add(readAggregate(keyspaceName, pair.getKey(), pair.getValue())));
+        return aggregates;
+    }
+
+    private static Aggregate readAggregate(String keyspaceName, String aggregateName, List<String> signature)
+    {
+        long timestamp = readAggregateTimestamp(keyspaceName, aggregateName, signature);
+        UDAggregate metadata = readAggregateMetadata(keyspaceName, aggregateName, signature);
+        return new Aggregate(timestamp, metadata);
+    }
+
+    private static long readAggregateTimestamp(String keyspaceName, String aggregateName, List<String> signature)
+    {
+        String query = format("SELECT writeTime(return_type) AS timestamp " +
+                              "FROM %s.%s " +
+                              "WHERE keyspace_name = ? AND aggregate_name = ? AND signature = ?",
+                              SystemKeyspace.NAME,
+                              SystemKeyspace.LEGACY_AGGREGATES);
+        return query(query, keyspaceName, aggregateName, signature).one().getLong("timestamp");
+    }
+
+    private static UDAggregate readAggregateMetadata(String keyspaceName, String functionName, List<String> signature)
+    {
+        String query = format("SELECT * FROM %s.%s WHERE keyspace_name = ? AND function_name = ? AND signature = ?",
+                              SystemKeyspace.NAME,
+                              SystemKeyspace.LEGACY_AGGREGATES);
+        UntypedResultSet.Row row = query(query, keyspaceName, functionName, signature).one();
+
+        FunctionName name = new FunctionName(keyspaceName, functionName);
+
+        List<String> types = row.getList("argument_types", UTF8Type.instance);
+
+        List<AbstractType<?>> argTypes = new ArrayList<>();
+        if (types != null)
+        {
+            argTypes = new ArrayList<>(types.size());
+            for (String type : types)
+                argTypes.add(parseType(type));
+        }
+
+        AbstractType<?> returnType = parseType(row.getString("return_type"));
+
+        FunctionName stateFunc = parseAggregateFunctionName(keyspaceName, row.getString("state_func"));
+        FunctionName finalFunc = row.has("final_func") ? parseAggregateFunctionName(keyspaceName, row.getString("final_func")) : null;
+        AbstractType<?> stateType = row.has("state_type") ? parseType(row.getString("state_type")) : null;
+        ByteBuffer initcond = row.has("initcond") ? row.getBytes("initcond") : null;
+
+        try
+        {
+            return UDAggregate.create(name, argTypes, returnType, stateFunc, finalFunc, stateType, initcond);
+        }
+        catch (InvalidRequestException reason)
+        {
+            return UDAggregate.createBroken(name, argTypes, returnType, initcond, reason);
+        }
+    }
+
+    private static FunctionName parseAggregateFunctionName(String ksName, String func)
+    {
+        int i = func.indexOf('.');
+
+        // function name can be abbreviated (pre 2.2rc2) - it is in the same keyspace as the aggregate
+        if (i == -1)
+            return new FunctionName(ksName, func);
+
+        String ks = func.substring(0, i);
+        String f = func.substring(i + 1);
+
+        // only aggregate's function keyspace and system keyspace are allowed
+        assert ks.equals(ksName) || ks.equals(SystemKeyspace.NAME);
+
+        return new FunctionName(ks, f);
+    }
+
+    private static UntypedResultSet query(String query, Object... values)
+    {
+        return QueryProcessor.executeOnceInternal(query, values);
+    }
+
+    private static AbstractType<?> parseType(String str)
+    {
+        return TypeParser.parse(str);
+    }
+
+    private static final class Keyspace
+    {
+        final long timestamp;
+        final String name;
+        final KeyspaceParams params;
+        final Collection<Table> tables;
+        final Collection<Type> types;
+        final Collection<Function> functions;
+        final Collection<Aggregate> aggregates;
+
+        Keyspace(long timestamp,
+                 String name,
+                 KeyspaceParams params,
+                 Collection<Table> tables,
+                 Collection<Type> types,
+                 Collection<Function> functions,
+                 Collection<Aggregate> aggregates)
+        {
+            this.timestamp = timestamp;
+            this.name = name;
+            this.params = params;
+            this.tables = tables;
+            this.types = types;
+            this.functions = functions;
+            this.aggregates = aggregates;
+        }
+    }
+
+    private static final class Table
+    {
+        final long timestamp;
+        final CFMetaData metadata;
+
+        Table(long timestamp, CFMetaData metadata)
+        {
+            this.timestamp = timestamp;
+            this.metadata = metadata;
+        }
+    }
+
+    private static final class Type
+    {
+        final long timestamp;
+        final UserType metadata;
+
+        Type(long timestamp, UserType metadata)
+        {
+            this.timestamp = timestamp;
+            this.metadata = metadata;
+        }
+    }
+
+    private static final class Function
+    {
+        final long timestamp;
+        final UDFunction metadata;
+
+        Function(long timestamp, UDFunction metadata)
+        {
+            this.timestamp = timestamp;
+            this.metadata = metadata;
+        }
+    }
+
+    private static final class Aggregate
+    {
+        final long timestamp;
+        final UDAggregate metadata;
+
+        Aggregate(long timestamp, UDAggregate metadata)
+        {
+            this.timestamp = timestamp;
+            this.metadata = metadata;
+        }
+    }
+}


Mime
View raw message