cassandra-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From slebre...@apache.org
Subject [11/11] cassandra git commit: Remove pre-3.0 compatibility code for 4.0
Date Wed, 30 Nov 2016 09:50:04 GMT
Remove pre-3.0 compatibility code for 4.0

patch by Sylvain Lebresne; reviewed by Aleksey Yeschenko for CASSANDRA-12716


Project: http://git-wip-us.apache.org/repos/asf/cassandra/repo
Commit: http://git-wip-us.apache.org/repos/asf/cassandra/commit/4a246419
Tree: http://git-wip-us.apache.org/repos/asf/cassandra/tree/4a246419
Diff: http://git-wip-us.apache.org/repos/asf/cassandra/diff/4a246419

Branch: refs/heads/trunk
Commit: 4a2464192e9e69457f5a5ecf26c094f9298bf069
Parents: 3fabc33
Author: Sylvain Lebresne <sylvain@datastax.com>
Authored: Tue Sep 27 15:26:15 2016 +0200
Committer: Sylvain Lebresne <sylvain@datastax.com>
Committed: Wed Nov 30 10:23:18 2016 +0100

----------------------------------------------------------------------
 CHANGES.txt                                     |    1 +
 NEWS.txt                                        |    4 +
 .../cassandra/auth/CassandraRoleManager.java    |   10 -
 .../batchlog/LegacyBatchlogMigrator.java        |  199 ----
 .../org/apache/cassandra/config/CFMetaData.java |    8 -
 .../restrictions/StatementRestrictions.java     |    3 -
 .../apache/cassandra/db/ColumnFamilyStore.java  |   27 +-
 .../org/apache/cassandra/db/Directories.java    |   40 +-
 .../org/apache/cassandra/db/LegacyLayout.java   |  488 +-------
 src/java/org/apache/cassandra/db/Memtable.java  |   12 +-
 src/java/org/apache/cassandra/db/Mutation.java  |   51 +-
 .../cassandra/db/MutationVerbHandler.java       |   19 +-
 .../cassandra/db/PartitionRangeReadCommand.java |    6 +-
 .../cassandra/db/RangeSliceVerbHandler.java     |   29 -
 .../org/apache/cassandra/db/ReadCommand.java    | 1061 +----------------
 .../org/apache/cassandra/db/ReadResponse.java   |  264 +----
 .../org/apache/cassandra/db/RowIndexEntry.java  |  189 +--
 .../org/apache/cassandra/db/Serializers.java    |  183 ---
 .../db/SinglePartitionReadCommand.java          |    4 +-
 .../org/apache/cassandra/db/SystemKeyspace.java |  229 +---
 .../cassandra/db/UnfilteredDeserializer.java    |  658 ++---------
 .../columniterator/AbstractSSTableIterator.java |   44 +-
 .../db/columniterator/SSTableIterator.java      |    6 +-
 .../columniterator/SSTableReversedIterator.java |   18 +-
 .../db/commitlog/CommitLogArchiver.java         |    2 +-
 .../db/commitlog/CommitLogDescriptor.java       |   47 +-
 .../cassandra/db/commitlog/CommitLogReader.java |   44 +-
 .../db/compaction/CompactionManager.java        |    4 +-
 .../cassandra/db/compaction/Upgrader.java       |    2 +-
 .../cassandra/db/compaction/Verifier.java       |    3 +-
 .../writers/DefaultCompactionWriter.java        |    2 +-
 .../writers/MajorLeveledCompactionWriter.java   |    2 +-
 .../writers/MaxSSTableSizeWriter.java           |    2 +-
 .../SplittingSizeTieredCompactionWriter.java    |    2 +-
 .../apache/cassandra/db/filter/RowFilter.java   |  103 +-
 .../db/partitions/PartitionUpdate.java          |   60 +-
 .../UnfilteredPartitionIterators.java           |    9 +-
 .../UnfilteredRowIteratorWithLowerBound.java    |    5 +-
 .../db/rows/UnfilteredRowIterators.java         |   10 +-
 .../apache/cassandra/dht/AbstractBounds.java    |    5 +
 src/java/org/apache/cassandra/gms/Gossiper.java |    6 -
 .../cassandra/hints/LegacyHintsMigrator.java    |  244 ----
 .../io/ForwardingVersionedSerializer.java       |   57 -
 .../io/compress/CompressionMetadata.java        |   11 +-
 .../io/sstable/AbstractSSTableSimpleWriter.java |    9 +-
 .../apache/cassandra/io/sstable/Component.java  |   94 +-
 .../apache/cassandra/io/sstable/Descriptor.java |  264 ++---
 .../apache/cassandra/io/sstable/IndexInfo.java  |   78 +-
 .../cassandra/io/sstable/IndexSummary.java      |   29 +-
 .../io/sstable/IndexSummaryRedistribution.java  |   16 +-
 .../apache/cassandra/io/sstable/SSTable.java    |   48 +-
 .../cassandra/io/sstable/SSTableLoader.java     |    2 +-
 .../io/sstable/SSTableSimpleIterator.java       |  112 +-
 .../cassandra/io/sstable/SSTableTxnWriter.java  |   10 +-
 .../sstable/format/RangeAwareSSTableWriter.java |    4 +-
 .../io/sstable/format/SSTableFormat.java        |    8 -
 .../io/sstable/format/SSTableReader.java        |   94 +-
 .../io/sstable/format/SSTableWriter.java        |   16 +-
 .../cassandra/io/sstable/format/Version.java    |   22 -
 .../io/sstable/format/big/BigFormat.java        |  125 +-
 .../io/sstable/format/big/BigTableWriter.java   |    6 +-
 .../io/sstable/metadata/CompactionMetadata.java |   13 -
 .../metadata/LegacyMetadataSerializer.java      |  163 ---
 .../io/sstable/metadata/StatsMetadata.java      |   44 +-
 .../io/util/CompressedChunkReader.java          |    5 +-
 .../io/util/DataIntegrityMetadata.java          |    6 +-
 .../cassandra/net/IncomingTcpConnection.java    |   28 +-
 .../org/apache/cassandra/net/MessageOut.java    |    2 +-
 .../apache/cassandra/net/MessagingService.java  |   73 +-
 .../cassandra/net/OutboundTcpConnection.java    |   35 +-
 .../apache/cassandra/repair/RepairJobDesc.java  |   27 +-
 .../org/apache/cassandra/repair/Validator.java  |    2 +-
 .../cassandra/schema/LegacySchemaMigrator.java  | 1099 ------------------
 .../cassandra/service/AbstractReadExecutor.java |    5 +-
 .../apache/cassandra/service/CacheService.java  |    4 -
 .../cassandra/service/CassandraDaemon.java      |   28 -
 .../apache/cassandra/service/DataResolver.java  |    2 +-
 .../apache/cassandra/service/ReadCallback.java  |    5 +-
 .../apache/cassandra/service/StartupChecks.java |    7 +-
 .../apache/cassandra/service/StorageProxy.java  |   72 +-
 .../cassandra/service/StorageService.java       |   11 +-
 .../apache/cassandra/service/paxos/Commit.java  |   16 +-
 .../service/paxos/PrepareResponse.java          |   39 +-
 .../cassandra/streaming/StreamReader.java       |   11 +-
 .../compress/CompressedStreamReader.java        |    3 +-
 .../streaming/messages/FileMessageHeader.java   |   25 +-
 .../streaming/messages/StreamMessage.java       |    2 -
 .../apache/cassandra/tools/SSTableExport.java   |    9 +-
 .../cassandra/tools/SSTableMetadataViewer.java  |    2 +-
 .../tools/SSTableRepairedAtSetter.java          |   21 +-
 .../cassandra/tools/StandaloneSplitter.java     |    5 +-
 .../org/apache/cassandra/utils/BloomFilter.java |   18 +-
 .../cassandra/utils/BloomFilterSerializer.java  |    8 +-
 .../apache/cassandra/utils/FilterFactory.java   |   16 +-
 .../org/apache/cassandra/utils/MerkleTree.java  |   48 +-
 .../lb-1-big-CompressionInfo.db                 |  Bin 43 -> 0 bytes
 .../cf_with_duplicates_2_0/lb-1-big-Data.db     |  Bin 84 -> 0 bytes
 .../lb-1-big-Digest.adler32                     |    1 -
 .../cf_with_duplicates_2_0/lb-1-big-Filter.db   |  Bin 16 -> 0 bytes
 .../cf_with_duplicates_2_0/lb-1-big-Index.db    |  Bin 18 -> 0 bytes
 .../lb-1-big-Statistics.db                      |  Bin 4474 -> 0 bytes
 .../cf_with_duplicates_2_0/lb-1-big-Summary.db  |  Bin 84 -> 0 bytes
 .../cf_with_duplicates_2_0/lb-1-big-TOC.txt     |    8 -
 .../2.0/CommitLog-3-1431528750790.log           |  Bin 2097152 -> 0 bytes
 .../2.0/CommitLog-3-1431528750791.log           |  Bin 2097152 -> 0 bytes
 .../2.0/CommitLog-3-1431528750792.log           |  Bin 2097152 -> 0 bytes
 .../2.0/CommitLog-3-1431528750793.log           |  Bin 2097152 -> 0 bytes
 test/data/legacy-commitlog/2.0/hash.txt         |    3 -
 .../2.1/CommitLog-4-1431529069529.log           |  Bin 2097152 -> 0 bytes
 .../2.1/CommitLog-4-1431529069530.log           |  Bin 2097152 -> 0 bytes
 test/data/legacy-commitlog/2.1/hash.txt         |    3 -
 .../CommitLog-5-1438186885380.log               |  Bin 839051 -> 0 bytes
 .../legacy-commitlog/2.2-lz4-bitrot/hash.txt    |    6 -
 .../CommitLog-5-1438186885380.log               |  Bin 839051 -> 0 bytes
 .../legacy-commitlog/2.2-lz4-bitrot2/hash.txt   |    6 -
 .../CommitLog-5-1438186885380.log               |  Bin 839001 -> 0 bytes
 .../legacy-commitlog/2.2-lz4-truncated/hash.txt |    5 -
 .../2.2-lz4/CommitLog-5-1438186885380.log       |  Bin 839051 -> 0 bytes
 .../2.2-lz4/CommitLog-5-1438186885381.log       |  Bin 100 -> 0 bytes
 test/data/legacy-commitlog/2.2-lz4/hash.txt     |    5 -
 .../2.2-snappy/CommitLog-5-1438186915514.log    |  Bin 820332 -> 0 bytes
 .../2.2-snappy/CommitLog-5-1438186915515.log    |  Bin 99 -> 0 bytes
 test/data/legacy-commitlog/2.2-snappy/hash.txt  |    5 -
 .../2.2/CommitLog-5-1438186815314.log           |  Bin 2097152 -> 0 bytes
 .../2.2/CommitLog-5-1438186815315.log           |  Bin 2097152 -> 0 bytes
 test/data/legacy-commitlog/2.2/hash.txt         |    5 -
 .../Keyspace1/Keyspace1-Standard1-jb-0-CRC.db   |  Bin 8 -> 0 bytes
 .../Keyspace1/Keyspace1-Standard1-jb-0-Data.db  |  Bin 36000 -> 0 bytes
 .../Keyspace1-Standard1-jb-0-Digest.sha1        |    1 -
 .../Keyspace1-Standard1-jb-0-Filter.db          |  Bin 1136 -> 0 bytes
 .../Keyspace1/Keyspace1-Standard1-jb-0-Index.db |  Bin 15300 -> 0 bytes
 .../Keyspace1-Standard1-jb-0-Statistics.db      |  Bin 4395 -> 0 bytes
 .../Keyspace1-Standard1-jb-0-Summary.db         |  Bin 162 -> 0 bytes
 .../Keyspace1/Keyspace1-Standard1-jb-0-TOC.txt  |    8 -
 ...bles-legacy_jb_clust-jb-1-CompressionInfo.db |  Bin 115 -> 0 bytes
 .../legacy_tables-legacy_jb_clust-jb-1-Data.db  |  Bin 12006 -> 0 bytes
 ...legacy_tables-legacy_jb_clust-jb-1-Filter.db |  Bin 24 -> 0 bytes
 .../legacy_tables-legacy_jb_clust-jb-1-Index.db |  Bin 1219455 -> 0 bytes
 ...cy_tables-legacy_jb_clust-jb-1-Statistics.db |  Bin 6798 -> 0 bytes
 ...egacy_tables-legacy_jb_clust-jb-1-Summary.db |  Bin 71 -> 0 bytes
 .../legacy_tables-legacy_jb_clust-jb-1-TOC.txt  |    7 -
 ...acy_jb_clust_compact-jb-1-CompressionInfo.db |  Bin 83 -> 0 bytes
 ..._tables-legacy_jb_clust_compact-jb-1-Data.db |  Bin 5270 -> 0 bytes
 ...ables-legacy_jb_clust_compact-jb-1-Filter.db |  Bin 24 -> 0 bytes
 ...tables-legacy_jb_clust_compact-jb-1-Index.db |  Bin 157685 -> 0 bytes
 ...s-legacy_jb_clust_compact-jb-1-Statistics.db |  Bin 6791 -> 0 bytes
 ...bles-legacy_jb_clust_compact-jb-1-Summary.db |  Bin 71 -> 0 bytes
 ..._tables-legacy_jb_clust_compact-jb-1-TOC.txt |    7 -
 ...acy_jb_clust_counter-jb-1-CompressionInfo.db |  Bin 75 -> 0 bytes
 ..._tables-legacy_jb_clust_counter-jb-1-Data.db |  Bin 4276 -> 0 bytes
 ...ables-legacy_jb_clust_counter-jb-1-Filter.db |  Bin 24 -> 0 bytes
 ...tables-legacy_jb_clust_counter-jb-1-Index.db |  Bin 610555 -> 0 bytes
 ...s-legacy_jb_clust_counter-jb-1-Statistics.db |  Bin 6801 -> 0 bytes
 ...bles-legacy_jb_clust_counter-jb-1-Summary.db |  Bin 71 -> 0 bytes
 ..._tables-legacy_jb_clust_counter-jb-1-TOC.txt |    7 -
 ...lust_counter_compact-jb-1-CompressionInfo.db |  Bin 75 -> 0 bytes
 ...legacy_jb_clust_counter_compact-jb-1-Data.db |  Bin 4228 -> 0 bytes
 ...gacy_jb_clust_counter_compact-jb-1-Filter.db |  Bin 24 -> 0 bytes
 ...egacy_jb_clust_counter_compact-jb-1-Index.db |  Bin 157685 -> 0 bytes
 ..._jb_clust_counter_compact-jb-1-Statistics.db |  Bin 6791 -> 0 bytes
 ...acy_jb_clust_counter_compact-jb-1-Summary.db |  Bin 71 -> 0 bytes
 ...legacy_jb_clust_counter_compact-jb-1-TOC.txt |    7 -
 ...les-legacy_jb_simple-jb-1-CompressionInfo.db |  Bin 43 -> 0 bytes
 .../legacy_tables-legacy_jb_simple-jb-1-Data.db |  Bin 134 -> 0 bytes
 ...egacy_tables-legacy_jb_simple-jb-1-Filter.db |  Bin 24 -> 0 bytes
 ...legacy_tables-legacy_jb_simple-jb-1-Index.db |  Bin 75 -> 0 bytes
 ...y_tables-legacy_jb_simple-jb-1-Statistics.db |  Bin 4392 -> 0 bytes
 ...gacy_tables-legacy_jb_simple-jb-1-Summary.db |  Bin 71 -> 0 bytes
 .../legacy_tables-legacy_jb_simple-jb-1-TOC.txt |    7 -
 ...cy_jb_simple_compact-jb-1-CompressionInfo.db |  Bin 43 -> 0 bytes
 ...tables-legacy_jb_simple_compact-jb-1-Data.db |  Bin 108 -> 0 bytes
 ...bles-legacy_jb_simple_compact-jb-1-Filter.db |  Bin 24 -> 0 bytes
 ...ables-legacy_jb_simple_compact-jb-1-Index.db |  Bin 75 -> 0 bytes
 ...-legacy_jb_simple_compact-jb-1-Statistics.db |  Bin 4395 -> 0 bytes
 ...les-legacy_jb_simple_compact-jb-1-Summary.db |  Bin 71 -> 0 bytes
 ...tables-legacy_jb_simple_compact-jb-1-TOC.txt |    7 -
 ...cy_jb_simple_counter-jb-1-CompressionInfo.db |  Bin 43 -> 0 bytes
 ...tables-legacy_jb_simple_counter-jb-1-Data.db |  Bin 118 -> 0 bytes
 ...bles-legacy_jb_simple_counter-jb-1-Filter.db |  Bin 24 -> 0 bytes
 ...ables-legacy_jb_simple_counter-jb-1-Index.db |  Bin 75 -> 0 bytes
 ...-legacy_jb_simple_counter-jb-1-Statistics.db |  Bin 4395 -> 0 bytes
 ...les-legacy_jb_simple_counter-jb-1-Summary.db |  Bin 71 -> 0 bytes
 ...tables-legacy_jb_simple_counter-jb-1-TOC.txt |    7 -
 ...mple_counter_compact-jb-1-CompressionInfo.db |  Bin 43 -> 0 bytes
 ...egacy_jb_simple_counter_compact-jb-1-Data.db |  Bin 118 -> 0 bytes
 ...acy_jb_simple_counter_compact-jb-1-Filter.db |  Bin 24 -> 0 bytes
 ...gacy_jb_simple_counter_compact-jb-1-Index.db |  Bin 75 -> 0 bytes
 ...jb_simple_counter_compact-jb-1-Statistics.db |  Bin 4395 -> 0 bytes
 ...cy_jb_simple_counter_compact-jb-1-Summary.db |  Bin 71 -> 0 bytes
 ...egacy_jb_simple_counter_compact-jb-1-TOC.txt |    7 -
 ...bles-legacy_ka_clust-ka-1-CompressionInfo.db |  Bin 115 -> 0 bytes
 .../legacy_tables-legacy_ka_clust-ka-1-Data.db  |  Bin 12144 -> 0 bytes
 ...gacy_tables-legacy_ka_clust-ka-1-Digest.sha1 |    1 -
 ...legacy_tables-legacy_ka_clust-ka-1-Filter.db |  Bin 24 -> 0 bytes
 .../legacy_tables-legacy_ka_clust-ka-1-Index.db |  Bin 1219455 -> 0 bytes
 ...cy_tables-legacy_ka_clust-ka-1-Statistics.db |  Bin 6859 -> 0 bytes
 ...egacy_tables-legacy_ka_clust-ka-1-Summary.db |  Bin 71 -> 0 bytes
 .../legacy_tables-legacy_ka_clust-ka-1-TOC.txt  |    8 -
 ...acy_ka_clust_compact-ka-1-CompressionInfo.db |  Bin 83 -> 0 bytes
 ..._tables-legacy_ka_clust_compact-ka-1-Data.db |  Bin 5277 -> 0 bytes
 ...les-legacy_ka_clust_compact-ka-1-Digest.sha1 |    1 -
 ...ables-legacy_ka_clust_compact-ka-1-Filter.db |  Bin 24 -> 0 bytes
 ...tables-legacy_ka_clust_compact-ka-1-Index.db |  Bin 157685 -> 0 bytes
 ...s-legacy_ka_clust_compact-ka-1-Statistics.db |  Bin 6859 -> 0 bytes
 ...bles-legacy_ka_clust_compact-ka-1-Summary.db |  Bin 83 -> 0 bytes
 ..._tables-legacy_ka_clust_compact-ka-1-TOC.txt |    8 -
 ...acy_ka_clust_counter-ka-1-CompressionInfo.db |  Bin 75 -> 0 bytes
 ..._tables-legacy_ka_clust_counter-ka-1-Data.db |  Bin 4635 -> 0 bytes
 ...les-legacy_ka_clust_counter-ka-1-Digest.sha1 |    1 -
 ...ables-legacy_ka_clust_counter-ka-1-Filter.db |  Bin 24 -> 0 bytes
 ...tables-legacy_ka_clust_counter-ka-1-Index.db |  Bin 610555 -> 0 bytes
 ...s-legacy_ka_clust_counter-ka-1-Statistics.db |  Bin 6859 -> 0 bytes
 ...bles-legacy_ka_clust_counter-ka-1-Summary.db |  Bin 71 -> 0 bytes
 ..._tables-legacy_ka_clust_counter-ka-1-TOC.txt |    8 -
 ...lust_counter_compact-ka-1-CompressionInfo.db |  Bin 75 -> 0 bytes
 ...legacy_ka_clust_counter_compact-ka-1-Data.db |  Bin 4527 -> 0 bytes
 ...cy_ka_clust_counter_compact-ka-1-Digest.sha1 |    1 -
 ...gacy_ka_clust_counter_compact-ka-1-Filter.db |  Bin 24 -> 0 bytes
 ...egacy_ka_clust_counter_compact-ka-1-Index.db |  Bin 157685 -> 0 bytes
 ..._ka_clust_counter_compact-ka-1-Statistics.db |  Bin 6859 -> 0 bytes
 ...acy_ka_clust_counter_compact-ka-1-Summary.db |  Bin 83 -> 0 bytes
 ...legacy_ka_clust_counter_compact-ka-1-TOC.txt |    8 -
 ...les-legacy_ka_simple-ka-1-CompressionInfo.db |  Bin 43 -> 0 bytes
 .../legacy_tables-legacy_ka_simple-ka-1-Data.db |  Bin 134 -> 0 bytes
 ...acy_tables-legacy_ka_simple-ka-1-Digest.sha1 |    1 -
 ...egacy_tables-legacy_ka_simple-ka-1-Filter.db |  Bin 24 -> 0 bytes
 ...legacy_tables-legacy_ka_simple-ka-1-Index.db |  Bin 75 -> 0 bytes
 ...y_tables-legacy_ka_simple-ka-1-Statistics.db |  Bin 4453 -> 0 bytes
 ...gacy_tables-legacy_ka_simple-ka-1-Summary.db |  Bin 71 -> 0 bytes
 .../legacy_tables-legacy_ka_simple-ka-1-TOC.txt |    8 -
 ...cy_ka_simple_compact-ka-1-CompressionInfo.db |  Bin 43 -> 0 bytes
 ...tables-legacy_ka_simple_compact-ka-1-Data.db |  Bin 105 -> 0 bytes
 ...es-legacy_ka_simple_compact-ka-1-Digest.sha1 |    1 -
 ...bles-legacy_ka_simple_compact-ka-1-Filter.db |  Bin 24 -> 0 bytes
 ...ables-legacy_ka_simple_compact-ka-1-Index.db |  Bin 75 -> 0 bytes
 ...-legacy_ka_simple_compact-ka-1-Statistics.db |  Bin 4453 -> 0 bytes
 ...les-legacy_ka_simple_compact-ka-1-Summary.db |  Bin 83 -> 0 bytes
 ...tables-legacy_ka_simple_compact-ka-1-TOC.txt |    8 -
 ...cy_ka_simple_counter-ka-1-CompressionInfo.db |  Bin 43 -> 0 bytes
 ...tables-legacy_ka_simple_counter-ka-1-Data.db |  Bin 125 -> 0 bytes
 ...es-legacy_ka_simple_counter-ka-1-Digest.sha1 |    1 -
 ...bles-legacy_ka_simple_counter-ka-1-Filter.db |  Bin 24 -> 0 bytes
 ...ables-legacy_ka_simple_counter-ka-1-Index.db |  Bin 75 -> 0 bytes
 ...-legacy_ka_simple_counter-ka-1-Statistics.db |  Bin 4453 -> 0 bytes
 ...les-legacy_ka_simple_counter-ka-1-Summary.db |  Bin 71 -> 0 bytes
 ...tables-legacy_ka_simple_counter-ka-1-TOC.txt |    8 -
 ...mple_counter_compact-ka-1-CompressionInfo.db |  Bin 43 -> 0 bytes
 ...egacy_ka_simple_counter_compact-ka-1-Data.db |  Bin 124 -> 0 bytes
 ...y_ka_simple_counter_compact-ka-1-Digest.sha1 |    1 -
 ...acy_ka_simple_counter_compact-ka-1-Filter.db |  Bin 24 -> 0 bytes
 ...gacy_ka_simple_counter_compact-ka-1-Index.db |  Bin 75 -> 0 bytes
 ...ka_simple_counter_compact-ka-1-Statistics.db |  Bin 4453 -> 0 bytes
 ...cy_ka_simple_counter_compact-ka-1-Summary.db |  Bin 83 -> 0 bytes
 ...egacy_ka_simple_counter_compact-ka-1-TOC.txt |    8 -
 .../legacy_la_clust/la-1-big-CompressionInfo.db |  Bin 115 -> 0 bytes
 .../legacy_la_clust/la-1-big-Data.db            |  Bin 12082 -> 0 bytes
 .../legacy_la_clust/la-1-big-Digest.adler32     |    1 -
 .../legacy_la_clust/la-1-big-Filter.db          |  Bin 24 -> 0 bytes
 .../legacy_la_clust/la-1-big-Index.db           |  Bin 1219455 -> 0 bytes
 .../legacy_la_clust/la-1-big-Statistics.db      |  Bin 6859 -> 0 bytes
 .../legacy_la_clust/la-1-big-Summary.db         |  Bin 71 -> 0 bytes
 .../legacy_la_clust/la-1-big-TOC.txt            |    8 -
 .../la-1-big-CompressionInfo.db                 |  Bin 83 -> 0 bytes
 .../legacy_la_clust_compact/la-1-big-Data.db    |  Bin 5286 -> 0 bytes
 .../la-1-big-Digest.adler32                     |    1 -
 .../legacy_la_clust_compact/la-1-big-Filter.db  |  Bin 24 -> 0 bytes
 .../legacy_la_clust_compact/la-1-big-Index.db   |  Bin 157685 -> 0 bytes
 .../la-1-big-Statistics.db                      |  Bin 6859 -> 0 bytes
 .../legacy_la_clust_compact/la-1-big-Summary.db |  Bin 75 -> 0 bytes
 .../legacy_la_clust_compact/la-1-big-TOC.txt    |    8 -
 .../la-1-big-CompressionInfo.db                 |  Bin 75 -> 0 bytes
 .../legacy_la_clust_counter/la-1-big-Data.db    |  Bin 4623 -> 0 bytes
 .../la-1-big-Digest.adler32                     |    1 -
 .../legacy_la_clust_counter/la-1-big-Filter.db  |  Bin 24 -> 0 bytes
 .../legacy_la_clust_counter/la-1-big-Index.db   |  Bin 610555 -> 0 bytes
 .../la-1-big-Statistics.db                      |  Bin 6859 -> 0 bytes
 .../legacy_la_clust_counter/la-1-big-Summary.db |  Bin 71 -> 0 bytes
 .../legacy_la_clust_counter/la-1-big-TOC.txt    |    8 -
 .../la-1-big-CompressionInfo.db                 |  Bin 75 -> 0 bytes
 .../la-1-big-Data.db                            |  Bin 4527 -> 0 bytes
 .../la-1-big-Digest.adler32                     |    1 -
 .../la-1-big-Filter.db                          |  Bin 24 -> 0 bytes
 .../la-1-big-Index.db                           |  Bin 157685 -> 0 bytes
 .../la-1-big-Statistics.db                      |  Bin 6859 -> 0 bytes
 .../la-1-big-Summary.db                         |  Bin 75 -> 0 bytes
 .../la-1-big-TOC.txt                            |    8 -
 .../la-1-big-CompressionInfo.db                 |  Bin 43 -> 0 bytes
 .../legacy_la_simple/la-1-big-Data.db           |  Bin 139 -> 0 bytes
 .../legacy_la_simple/la-1-big-Digest.adler32    |    1 -
 .../legacy_la_simple/la-1-big-Filter.db         |  Bin 24 -> 0 bytes
 .../legacy_la_simple/la-1-big-Index.db          |  Bin 75 -> 0 bytes
 .../legacy_la_simple/la-1-big-Statistics.db     |  Bin 4453 -> 0 bytes
 .../legacy_la_simple/la-1-big-Summary.db        |  Bin 71 -> 0 bytes
 .../legacy_la_simple/la-1-big-TOC.txt           |    8 -
 .../la-1-big-CompressionInfo.db                 |  Bin 43 -> 0 bytes
 .../legacy_la_simple_compact/la-1-big-Data.db   |  Bin 106 -> 0 bytes
 .../la-1-big-Digest.adler32                     |    1 -
 .../legacy_la_simple_compact/la-1-big-Filter.db |  Bin 24 -> 0 bytes
 .../legacy_la_simple_compact/la-1-big-Index.db  |  Bin 75 -> 0 bytes
 .../la-1-big-Statistics.db                      |  Bin 4453 -> 0 bytes
 .../la-1-big-Summary.db                         |  Bin 75 -> 0 bytes
 .../legacy_la_simple_compact/la-1-big-TOC.txt   |    8 -
 .../la-1-big-CompressionInfo.db                 |  Bin 43 -> 0 bytes
 .../legacy_la_simple_counter/la-1-big-Data.db   |  Bin 123 -> 0 bytes
 .../la-1-big-Digest.adler32                     |    1 -
 .../legacy_la_simple_counter/la-1-big-Filter.db |  Bin 24 -> 0 bytes
 .../legacy_la_simple_counter/la-1-big-Index.db  |  Bin 75 -> 0 bytes
 .../la-1-big-Statistics.db                      |  Bin 4453 -> 0 bytes
 .../la-1-big-Summary.db                         |  Bin 71 -> 0 bytes
 .../legacy_la_simple_counter/la-1-big-TOC.txt   |    8 -
 .../la-1-big-CompressionInfo.db                 |  Bin 43 -> 0 bytes
 .../la-1-big-Data.db                            |  Bin 124 -> 0 bytes
 .../la-1-big-Digest.adler32                     |    1 -
 .../la-1-big-Filter.db                          |  Bin 24 -> 0 bytes
 .../la-1-big-Index.db                           |  Bin 75 -> 0 bytes
 .../la-1-big-Statistics.db                      |  Bin 4453 -> 0 bytes
 .../la-1-big-Summary.db                         |  Bin 75 -> 0 bytes
 .../la-1-big-TOC.txt                            |    8 -
 ...pactions_in_progress-ka-1-CompressionInfo.db |  Bin 43 -> 0 bytes
 .../system-compactions_in_progress-ka-1-Data.db |  Bin 146 -> 0 bytes
 ...tem-compactions_in_progress-ka-1-Digest.sha1 |    1 -
 ...ystem-compactions_in_progress-ka-1-Filter.db |  Bin 16 -> 0 bytes
 ...system-compactions_in_progress-ka-1-Index.db |  Bin 30 -> 0 bytes
 ...m-compactions_in_progress-ka-1-Statistics.db |  Bin 4450 -> 0 bytes
 ...stem-compactions_in_progress-ka-1-Summary.db |  Bin 116 -> 0 bytes
 .../system-compactions_in_progress-ka-1-TOC.txt |    8 -
 .../test-foo-ka-3-CompressionInfo.db            |  Bin 43 -> 0 bytes
 .../test-foo-ka-3-Data.db                       |  Bin 141 -> 0 bytes
 .../test-foo-ka-3-Digest.sha1                   |    1 -
 .../test-foo-ka-3-Filter.db                     |  Bin 176 -> 0 bytes
 .../test-foo-ka-3-Index.db                      |  Bin 90 -> 0 bytes
 .../test-foo-ka-3-Statistics.db                 |  Bin 4458 -> 0 bytes
 .../test-foo-ka-3-Summary.db                    |  Bin 80 -> 0 bytes
 .../test-foo-ka-3-TOC.txt                       |    8 -
 .../test-foo-tmp-ka-4-Data.db                   |  Bin 141 -> 0 bytes
 .../test-foo-tmp-ka-4-Index.db                  |  Bin 90 -> 0 bytes
 .../test-foo-tmplink-ka-4-Data.db               |  Bin 141 -> 0 bytes
 .../test-foo-tmplink-ka-4-Index.db              |  Bin 90 -> 0 bytes
 .../la-1-big-CompressionInfo.db                 |  Bin 43 -> 0 bytes
 .../la-1-big-Data.db                            |  Bin 93 -> 0 bytes
 .../la-1-big-Digest.adler32                     |    1 -
 .../la-1-big-Filter.db                          |  Bin 16 -> 0 bytes
 .../la-1-big-Index.db                           |  Bin 54 -> 0 bytes
 .../la-1-big-Statistics.db                      |  Bin 4442 -> 0 bytes
 .../la-1-big-Summary.db                         |  Bin 80 -> 0 bytes
 .../la-1-big-TOC.txt                            |    8 -
 .../tmp-la-2-big-Data.db                        |  Bin 93 -> 0 bytes
 .../tmp-la-2-big-Index.db                       |  Bin 54 -> 0 bytes
 .../tmp-lb-3-big-Data.db                        |  Bin 93 -> 0 bytes
 .../tmp-lb-3-big-Index.db                       |  Bin 54 -> 0 bytes
 .../tmplink-la-2-big-Data.db                    |  Bin 93 -> 0 bytes
 .../tmplink-la-2-big-Index.db                   |  Bin 54 -> 0 bytes
 .../manifest.json                               |    1 -
 .../manifest.json                               |    1 -
 .../manifest.json                               |    1 -
 .../Keyspace1-legacyleveled-ic-0-Data.db        |  Bin 530 -> 0 bytes
 .../Keyspace1-legacyleveled-ic-0-Digest.sha1    |    1 -
 .../Keyspace1-legacyleveled-ic-0-Filter.db      |  Bin 24 -> 0 bytes
 .../Keyspace1-legacyleveled-ic-0-Index.db       |  Bin 180 -> 0 bytes
 .../Keyspace1-legacyleveled-ic-0-Statistics.db  |  Bin 4361 -> 0 bytes
 .../Keyspace1-legacyleveled-ic-0-Summary.db     |  Bin 92 -> 0 bytes
 .../Keyspace1-legacyleveled-ic-0-TOC.txt        |    7 -
 .../Keyspace1-legacyleveled-ic-1-Data.db        |  Bin 530 -> 0 bytes
 .../Keyspace1-legacyleveled-ic-1-Digest.sha1    |    1 -
 .../Keyspace1-legacyleveled-ic-1-Filter.db      |  Bin 24 -> 0 bytes
 .../Keyspace1-legacyleveled-ic-1-Index.db       |  Bin 180 -> 0 bytes
 .../Keyspace1-legacyleveled-ic-1-Statistics.db  |  Bin 4361 -> 0 bytes
 .../Keyspace1-legacyleveled-ic-1-Summary.db     |  Bin 92 -> 0 bytes
 .../Keyspace1-legacyleveled-ic-1-TOC.txt        |    7 -
 .../Keyspace1-legacyleveled-ic-2-Data.db        |  Bin 530 -> 0 bytes
 .../Keyspace1-legacyleveled-ic-2-Digest.sha1    |    1 -
 .../Keyspace1-legacyleveled-ic-2-Filter.db      |  Bin 24 -> 0 bytes
 .../Keyspace1-legacyleveled-ic-2-Index.db       |  Bin 180 -> 0 bytes
 .../Keyspace1-legacyleveled-ic-2-Statistics.db  |  Bin 4361 -> 0 bytes
 .../Keyspace1-legacyleveled-ic-2-Summary.db     |  Bin 92 -> 0 bytes
 .../Keyspace1-legacyleveled-ic-2-TOC.txt        |    7 -
 .../Keyspace1/legacyleveled/legacyleveled.json  |   27 -
 .../org/apache/cassandra/cql3/ViewLongTest.java |    2 +-
 .../cassandra/utils/LongBloomFilterTest.java    |   66 +-
 .../cassandra/AbstractSerializationsTester.java |    6 -
 .../apache/cassandra/batchlog/BatchTest.java    |   59 -
 .../cassandra/batchlog/BatchlogManagerTest.java |  123 +-
 .../cassandra/cache/CacheProviderTest.java      |    4 +-
 .../org/apache/cassandra/cql3/CQLTester.java    |    1 +
 .../org/apache/cassandra/db/PartitionTest.java  |   18 +-
 .../apache/cassandra/db/ReadResponseTest.java   |   99 --
 .../apache/cassandra/db/RowIndexEntryTest.java  |   43 +-
 .../unit/org/apache/cassandra/db/ScrubTest.java |   40 +-
 .../db/SinglePartitionSliceCommandTest.java     |   55 -
 .../apache/cassandra/db/SystemKeyspaceTest.java |   77 --
 .../org/apache/cassandra/db/VerifyTest.java     |    7 +-
 .../db/commitlog/CommitLogDescriptorTest.java   |   10 +-
 .../cassandra/db/commitlog/CommitLogTest.java   |   13 +-
 .../db/commitlog/CommitLogUpgradeTest.java      |   77 --
 .../db/compaction/AntiCompactionTest.java       |    4 +-
 .../db/lifecycle/RealTransactionsTest.java      |    5 +-
 .../rows/DigestBackwardCompatibilityTest.java   |  179 ---
 .../hints/LegacyHintsMigratorTest.java          |  197 ----
 .../sasi/disk/PerSSTableIndexWriterTest.java    |    4 +-
 .../CompressedRandomAccessReaderTest.java       |    8 +-
 .../CompressedSequentialWriterTest.java         |    2 +-
 .../io/sstable/BigTableWriterTest.java          |   12 +-
 .../cassandra/io/sstable/DescriptorTest.java    |   42 +-
 .../cassandra/io/sstable/IndexSummaryTest.java  |    8 +-
 .../cassandra/io/sstable/LegacySSTableTest.java |   16 +-
 .../io/sstable/SSTableRewriterTest.java         |    4 +-
 .../cassandra/io/sstable/SSTableUtils.java      |    2 +-
 .../io/sstable/SSTableWriterTestBase.java       |    4 +-
 .../sstable/format/ClientModeSSTableTest.java   |  133 ---
 .../format/SSTableFlushObserverTest.java        |    2 +-
 .../metadata/MetadataSerializerTest.java        |    9 +-
 .../cassandra/io/util/MmappedRegionsTest.java   |    2 +-
 .../schema/LegacySchemaMigratorTest.java        |  845 --------------
 .../cassandra/service/SerializationsTest.java   |    2 +-
 .../compression/CompressedInputStreamTest.java  |    5 +-
 .../org/apache/cassandra/utils/BitSetTest.java  |    9 +-
 .../apache/cassandra/utils/BloomFilterTest.java |   84 +-
 .../cassandra/utils/SerializationsTest.java     |   90 +-
 418 files changed, 805 insertions(+), 8864 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/cassandra/blob/4a246419/CHANGES.txt
----------------------------------------------------------------------
diff --git a/CHANGES.txt b/CHANGES.txt
index 28b7900..61844f2 100644
--- a/CHANGES.txt
+++ b/CHANGES.txt
@@ -1,4 +1,5 @@
 4.0
+ * Remove pre-3.0 compatibility code for 4.0 (CASSANDRA-12716)
  * Add column definition kind to dropped columns in schema (CASSANDRA-12705)
  * Add (automate) Nodetool Documentation (CASSANDRA-12672)
  * Update bundled cqlsh python driver to 3.7.0 (CASSANDRA-12736)

http://git-wip-us.apache.org/repos/asf/cassandra/blob/4a246419/NEWS.txt
----------------------------------------------------------------------
diff --git a/NEWS.txt b/NEWS.txt
index 631d770..d838847 100644
--- a/NEWS.txt
+++ b/NEWS.txt
@@ -21,6 +21,10 @@ New features
 
 Upgrading
 ---------
+    - Cassandra 4.0 removed support for any pre-3.0 format. This means you cannot upgrade from a 2.x version to 4.0
+      directly, you have to upgrade to a 3.0.x/3.x version first (and run upgradesstable). In particular, this mean
+      Cassandra 4.0 cannot load or read pre-3.0 sstables in any way: you will need to upgrade those sstable in 3.0.x/3.x
+      first.
     - Cassandra will no longer allow invalid keyspace replication options, such as invalid datacenter names for
       NetworkTopologyStrategy. Operators MUST add new nodes to a datacenter before they can set set ALTER or 
       CREATE keyspace replication policies using that datacenter. Existing keyspaces will continue to operate, 

http://git-wip-us.apache.org/repos/asf/cassandra/blob/4a246419/src/java/org/apache/cassandra/auth/CassandraRoleManager.java
----------------------------------------------------------------------
diff --git a/src/java/org/apache/cassandra/auth/CassandraRoleManager.java b/src/java/org/apache/cassandra/auth/CassandraRoleManager.java
index 7b55ac9..d371df3 100644
--- a/src/java/org/apache/cassandra/auth/CassandraRoleManager.java
+++ b/src/java/org/apache/cassandra/auth/CassandraRoleManager.java
@@ -375,16 +375,6 @@ public class CassandraRoleManager implements IRoleManager
         {
             public void run()
             {
-                // If not all nodes are on 2.2, we don't want to initialize the role manager as this will confuse 2.1
-                // nodes (see CASSANDRA-9761 for details). So we re-schedule the setup for later, hoping that the upgrade
-                // will be finished by then.
-                if (!MessagingService.instance().areAllNodesAtLeast22())
-                {
-                    logger.trace("Not all nodes are upgraded to a version that supports Roles yet, rescheduling setup task");
-                    scheduleSetupTask(setupTask);
-                    return;
-                }
-
                 isClusterReady = true;
                 try
                 {

http://git-wip-us.apache.org/repos/asf/cassandra/blob/4a246419/src/java/org/apache/cassandra/batchlog/LegacyBatchlogMigrator.java
----------------------------------------------------------------------
diff --git a/src/java/org/apache/cassandra/batchlog/LegacyBatchlogMigrator.java b/src/java/org/apache/cassandra/batchlog/LegacyBatchlogMigrator.java
deleted file mode 100644
index 4592488..0000000
--- a/src/java/org/apache/cassandra/batchlog/LegacyBatchlogMigrator.java
+++ /dev/null
@@ -1,199 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.cassandra.batchlog;
-
-import java.io.IOException;
-import java.net.InetAddress;
-import java.nio.ByteBuffer;
-import java.util.*;
-import java.util.concurrent.TimeUnit;
-
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import org.apache.cassandra.config.SchemaConstants;
-import org.apache.cassandra.cql3.QueryProcessor;
-import org.apache.cassandra.cql3.UntypedResultSet;
-import org.apache.cassandra.db.*;
-import org.apache.cassandra.db.marshal.UUIDType;
-import org.apache.cassandra.db.partitions.PartitionUpdate;
-import org.apache.cassandra.exceptions.WriteFailureException;
-import org.apache.cassandra.exceptions.WriteTimeoutException;
-import org.apache.cassandra.io.util.DataInputBuffer;
-import org.apache.cassandra.io.util.DataOutputBuffer;
-import org.apache.cassandra.net.MessagingService;
-import org.apache.cassandra.service.AbstractWriteResponseHandler;
-import org.apache.cassandra.service.WriteResponseHandler;
-import org.apache.cassandra.utils.FBUtilities;
-import org.apache.cassandra.utils.UUIDGen;
-
-public final class LegacyBatchlogMigrator
-{
-    private static final Logger logger = LoggerFactory.getLogger(LegacyBatchlogMigrator.class);
-
-    private LegacyBatchlogMigrator()
-    {
-        // static class
-    }
-
-    @SuppressWarnings("deprecation")
-    public static void migrate()
-    {
-        ColumnFamilyStore store = Keyspace.open(SchemaConstants.SYSTEM_KEYSPACE_NAME).getColumnFamilyStore(SystemKeyspace.LEGACY_BATCHLOG);
-
-        // nothing to migrate
-        if (store.isEmpty())
-            return;
-
-        logger.info("Migrating legacy batchlog to new storage");
-
-        int convertedBatches = 0;
-        String query = String.format("SELECT id, data, written_at, version FROM %s.%s",
-                                     SchemaConstants.SYSTEM_KEYSPACE_NAME,
-                                     SystemKeyspace.LEGACY_BATCHLOG);
-
-        int pageSize = BatchlogManager.calculatePageSize(store);
-
-        UntypedResultSet rows = QueryProcessor.executeInternalWithPaging(query, pageSize);
-        for (UntypedResultSet.Row row : rows)
-        {
-            if (apply(row, convertedBatches))
-                convertedBatches++;
-        }
-
-        if (convertedBatches > 0)
-            Keyspace.openAndGetStore(SystemKeyspace.LegacyBatchlog).truncateBlocking();
-    }
-
-    @SuppressWarnings("deprecation")
-    public static boolean isLegacyBatchlogMutation(Mutation mutation)
-    {
-        return mutation.getKeyspaceName().equals(SchemaConstants.SYSTEM_KEYSPACE_NAME)
-            && mutation.getPartitionUpdate(SystemKeyspace.LegacyBatchlog.cfId) != null;
-    }
-
-    @SuppressWarnings("deprecation")
-    public static void handleLegacyMutation(Mutation mutation)
-    {
-        PartitionUpdate update = mutation.getPartitionUpdate(SystemKeyspace.LegacyBatchlog.cfId);
-        logger.trace("Applying legacy batchlog mutation {}", update);
-        update.forEach(row -> apply(UntypedResultSet.Row.fromInternalRow(update.metadata(), update.partitionKey(), row), -1));
-    }
-
-    private static boolean apply(UntypedResultSet.Row row, long counter)
-    {
-        UUID id = row.getUUID("id");
-        long timestamp = id.version() == 1 ? UUIDGen.unixTimestamp(id) : row.getLong("written_at");
-        int version = row.has("version") ? row.getInt("version") : MessagingService.VERSION_12;
-
-        if (id.version() != 1)
-            id = UUIDGen.getTimeUUID(timestamp, counter);
-
-        logger.trace("Converting mutation at {}", timestamp);
-
-        try (DataInputBuffer in = new DataInputBuffer(row.getBytes("data"), false))
-        {
-            int numMutations = in.readInt();
-            List<Mutation> mutations = new ArrayList<>(numMutations);
-            for (int i = 0; i < numMutations; i++)
-                mutations.add(Mutation.serializer.deserialize(in, version));
-
-            BatchlogManager.store(Batch.createLocal(id, TimeUnit.MILLISECONDS.toMicros(timestamp), mutations));
-            return true;
-        }
-        catch (Throwable t)
-        {
-            logger.error("Failed to convert mutation {} at timestamp {}", id, timestamp, t);
-            return false;
-        }
-    }
-
-    public static void syncWriteToBatchlog(WriteResponseHandler<?> handler, Batch batch, Collection<InetAddress> endpoints)
-    throws WriteTimeoutException, WriteFailureException
-    {
-        for (InetAddress target : endpoints)
-        {
-            logger.trace("Sending legacy batchlog store request {} to {} for {} mutations", batch.id, target, batch.size());
-
-            int targetVersion = MessagingService.instance().getVersion(target);
-            MessagingService.instance().sendRR(getStoreMutation(batch, targetVersion).createMessage(MessagingService.Verb.MUTATION),
-                                               target,
-                                               handler,
-                                               false);
-        }
-    }
-
-    public static void asyncRemoveFromBatchlog(Collection<InetAddress> endpoints, UUID uuid, long queryStartNanoTime)
-    {
-        AbstractWriteResponseHandler<IMutation> handler = new WriteResponseHandler<>(endpoints,
-                                                                                     Collections.<InetAddress>emptyList(),
-                                                                                     ConsistencyLevel.ANY,
-                                                                                     Keyspace.open(SchemaConstants.SYSTEM_KEYSPACE_NAME),
-                                                                                     null,
-                                                                                     WriteType.SIMPLE,
-                                                                                     queryStartNanoTime);
-        Mutation mutation = getRemoveMutation(uuid);
-
-        for (InetAddress target : endpoints)
-        {
-            logger.trace("Sending legacy batchlog remove request {} to {}", uuid, target);
-            MessagingService.instance().sendRR(mutation.createMessage(MessagingService.Verb.MUTATION), target, handler, false);
-        }
-    }
-
-    static void store(Batch batch, int version)
-    {
-        getStoreMutation(batch, version).apply();
-    }
-
-    @SuppressWarnings("deprecation")
-    static Mutation getStoreMutation(Batch batch, int version)
-    {
-        PartitionUpdate.SimpleBuilder builder = PartitionUpdate.simpleBuilder(SystemKeyspace.LegacyBatchlog, batch.id);
-        builder.row()
-               .timestamp(batch.creationTime)
-               .add("written_at", new Date(batch.creationTime / 1000))
-               .add("data", getSerializedMutations(version, batch.decodedMutations))
-               .add("version", version);
-        return builder.buildAsMutation();
-    }
-
-    @SuppressWarnings("deprecation")
-    private static Mutation getRemoveMutation(UUID uuid)
-    {
-        return new Mutation(PartitionUpdate.fullPartitionDelete(SystemKeyspace.LegacyBatchlog,
-                                                                UUIDType.instance.decompose(uuid),
-                                                                FBUtilities.timestampMicros(),
-                                                                FBUtilities.nowInSeconds()));
-    }
-
-    private static ByteBuffer getSerializedMutations(int version, Collection<Mutation> mutations)
-    {
-        try (DataOutputBuffer buf = new DataOutputBuffer())
-        {
-            buf.writeInt(mutations.size());
-            for (Mutation mutation : mutations)
-                Mutation.serializer.serialize(mutation, buf, version);
-            return buf.buffer();
-        }
-        catch (IOException e)
-        {
-            throw new RuntimeException(e);
-        }
-    }
-}

http://git-wip-us.apache.org/repos/asf/cassandra/blob/4a246419/src/java/org/apache/cassandra/config/CFMetaData.java
----------------------------------------------------------------------
diff --git a/src/java/org/apache/cassandra/config/CFMetaData.java b/src/java/org/apache/cassandra/config/CFMetaData.java
index a60700c..8f11089 100644
--- a/src/java/org/apache/cassandra/config/CFMetaData.java
+++ b/src/java/org/apache/cassandra/config/CFMetaData.java
@@ -94,8 +94,6 @@ public final class CFMetaData
     public volatile ClusteringComparator comparator;  // bytes, long, timeuuid, utf8, etc. This is built directly from clusteringColumns
     public final IPartitioner partitioner;            // partitioner the table uses
 
-    private final Serializers serializers;
-
     // non-final, for now
     public volatile TableParams params = TableParams.DEFAULT;
 
@@ -303,7 +301,6 @@ public final class CFMetaData
         rebuild();
 
         this.resource = DataResource.table(ksName, cfName);
-        this.serializers = new Serializers(this);
     }
 
     // This rebuild informations that are intrinsically duplicate of the table definition but
@@ -1115,11 +1112,6 @@ public final class CFMetaData
         return isView;
     }
 
-    public Serializers serializers()
-    {
-        return serializers;
-    }
-
     public AbstractType<?> makeLegacyDefaultValidator()
     {
         return isCounter()

http://git-wip-us.apache.org/repos/asf/cassandra/blob/4a246419/src/java/org/apache/cassandra/cql3/restrictions/StatementRestrictions.java
----------------------------------------------------------------------
diff --git a/src/java/org/apache/cassandra/cql3/restrictions/StatementRestrictions.java b/src/java/org/apache/cassandra/cql3/restrictions/StatementRestrictions.java
index 53ac68c..7b034ea 100644
--- a/src/java/org/apache/cassandra/cql3/restrictions/StatementRestrictions.java
+++ b/src/java/org/apache/cassandra/cql3/restrictions/StatementRestrictions.java
@@ -556,9 +556,6 @@ public final class StatementRestrictions
                                                VariableSpecifications boundNames,
                                                SecondaryIndexManager indexManager)
     {
-        if (!MessagingService.instance().areAllNodesAtLeast30())
-            throw new InvalidRequestException("Please upgrade all nodes to at least 3.0 before using custom index expressions");
-
         if (expressions.size() > 1)
             throw new InvalidRequestException(IndexRestrictions.MULTIPLE_EXPRESSIONS);
 

http://git-wip-us.apache.org/repos/asf/cassandra/blob/4a246419/src/java/org/apache/cassandra/db/ColumnFamilyStore.java
----------------------------------------------------------------------
diff --git a/src/java/org/apache/cassandra/db/ColumnFamilyStore.java b/src/java/org/apache/cassandra/db/ColumnFamilyStore.java
index f46e6f7..2234d79 100644
--- a/src/java/org/apache/cassandra/db/ColumnFamilyStore.java
+++ b/src/java/org/apache/cassandra/db/ColumnFamilyStore.java
@@ -747,8 +747,7 @@ public class ColumnFamilyStore implements ColumnFamilyStoreMBean
                                                descriptor.ksname,
                                                descriptor.cfname,
                                                fileIndexGenerator.incrementAndGet(),
-                                               descriptor.formatType,
-                                               descriptor.digestComponent);
+                                               descriptor.formatType);
             }
             while (new File(newDescriptor.filenameFor(Component.DATA)).exists());
 
@@ -815,26 +814,24 @@ public class ColumnFamilyStore implements ColumnFamilyStoreMBean
         return name;
     }
 
-    public String getSSTablePath(File directory)
+    public Descriptor newSSTableDescriptor(File directory)
     {
-        return getSSTablePath(directory, SSTableFormat.Type.current().info.getLatestVersion(), SSTableFormat.Type.current());
+        return newSSTableDescriptor(directory, SSTableFormat.Type.current().info.getLatestVersion(), SSTableFormat.Type.current());
     }
 
-    public String getSSTablePath(File directory, SSTableFormat.Type format)
+    public Descriptor newSSTableDescriptor(File directory, SSTableFormat.Type format)
     {
-        return getSSTablePath(directory, format.info.getLatestVersion(), format);
+        return newSSTableDescriptor(directory, format.info.getLatestVersion(), format);
     }
 
-    private String getSSTablePath(File directory, Version version, SSTableFormat.Type format)
+    private Descriptor newSSTableDescriptor(File directory, Version version, SSTableFormat.Type format)
     {
-        Descriptor desc = new Descriptor(version,
-                                         directory,
-                                         keyspace.getName(),
-                                         name,
-                                         fileIndexGenerator.incrementAndGet(),
-                                         format,
-                                         Component.digestFor(BigFormat.latestVersion.uncompressedChecksumType()));
-        return desc.filenameFor(Component.DATA);
+        return new Descriptor(version,
+                              directory,
+                              keyspace.getName(),
+                              name,
+                              fileIndexGenerator.incrementAndGet(),
+                              format);
     }
 
     /**

http://git-wip-us.apache.org/repos/asf/cassandra/blob/4a246419/src/java/org/apache/cassandra/db/Directories.java
----------------------------------------------------------------------
diff --git a/src/java/org/apache/cassandra/db/Directories.java b/src/java/org/apache/cassandra/db/Directories.java
index e0e1c08..2bb4784 100644
--- a/src/java/org/apache/cassandra/db/Directories.java
+++ b/src/java/org/apache/cassandra/db/Directories.java
@@ -260,9 +260,8 @@ public class Directories
                         if (file.isDirectory())
                             return false;
 
-                        Pair<Descriptor, Component> pair = SSTable.tryComponentFromFilename(file.getParentFile(),
-                                                                                            file.getName());
-                        return pair != null && pair.left.ksname.equals(metadata.ksName) && pair.left.cfname.equals(metadata.cfName);
+                        Descriptor desc = SSTable.tryDescriptorFromFilename(file);
+                        return desc != null && desc.ksname.equals(metadata.ksName) && desc.cfname.equals(metadata.cfName);
 
                     }
                 });
@@ -308,8 +307,9 @@ public class Directories
     {
         for (File dir : dataPaths)
         {
-            if (new File(dir, filename).exists())
-                return Descriptor.fromFilename(dir, filename).left;
+            File file = new File(dir, filename);
+            if (file.exists())
+                return Descriptor.fromFilename(file);
         }
         return null;
     }
@@ -755,7 +755,7 @@ public class Directories
                             return false;
 
                     case FINAL:
-                        Pair<Descriptor, Component> pair = SSTable.tryComponentFromFilename(file.getParentFile(), file.getName());
+                        Pair<Descriptor, Component> pair = SSTable.tryComponentFromFilename(file);
                         if (pair == null)
                             return false;
 
@@ -769,24 +769,6 @@ public class Directories
                             previous = new HashSet<>();
                             components.put(pair.left, previous);
                         }
-                        else if (pair.right.type == Component.Type.DIGEST)
-                        {
-                            if (pair.right != pair.left.digestComponent)
-                            {
-                                // Need to update the DIGEST component as it might be set to another
-                                // digest type as a guess. This may happen if the first component is
-                                // not the DIGEST (but the Data component for example), so the digest
-                                // type is _guessed_ from the Version.
-                                // Although the Version explicitly defines the digest type, it doesn't
-                                // seem to be true under all circumstances. Generated sstables from a
-                                // post 2.1.8 snapshot produced Digest.sha1 files although Version
-                                // defines Adler32.
-                                // TL;DR this piece of code updates the digest component to be "correct".
-                                components.remove(pair.left);
-                                Descriptor updated = pair.left.withDigestComponent(pair.right);
-                                components.put(updated, previous);
-                            }
-                        }
                         previous.add(pair.right);
                         nbFiles++;
                         return false;
@@ -1043,11 +1025,11 @@ public class Directories
         public boolean isAcceptable(Path path)
         {
             File file = path.toFile();
-            Pair<Descriptor, Component> pair = SSTable.tryComponentFromFilename(path.getParent().toFile(), file.getName());
-            return pair != null
-                    && pair.left.ksname.equals(metadata.ksName)
-                    && pair.left.cfname.equals(metadata.cfName)
-                    && !toSkip.contains(file);
+            Descriptor desc = SSTable.tryDescriptorFromFilename(file);
+            return desc != null
+                && desc.ksname.equals(metadata.ksName)
+                && desc.cfname.equals(metadata.cfName)
+                && !toSkip.contains(file);
         }
     }
 }

http://git-wip-us.apache.org/repos/asf/cassandra/blob/4a246419/src/java/org/apache/cassandra/db/LegacyLayout.java
----------------------------------------------------------------------
diff --git a/src/java/org/apache/cassandra/db/LegacyLayout.java b/src/java/org/apache/cassandra/db/LegacyLayout.java
index ab62a0e..ad0f1b7 100644
--- a/src/java/org/apache/cassandra/db/LegacyLayout.java
+++ b/src/java/org/apache/cassandra/db/LegacyLayout.java
@@ -17,33 +17,24 @@
  */
 package org.apache.cassandra.db;
 
-import java.io.DataInput;
 import java.io.IOException;
-import java.io.IOError;
 import java.nio.ByteBuffer;
 import java.security.MessageDigest;
 import java.util.*;
 
-import org.apache.cassandra.config.SchemaConstants;
 import org.apache.cassandra.utils.AbstractIterator;
 import com.google.common.collect.Iterators;
-import com.google.common.collect.Lists;
 import com.google.common.collect.PeekingIterator;
 
 import org.apache.cassandra.config.CFMetaData;
 import org.apache.cassandra.config.ColumnDefinition;
 import org.apache.cassandra.db.filter.ColumnFilter;
-import org.apache.cassandra.db.filter.DataLimits;
 import org.apache.cassandra.db.rows.*;
-import org.apache.cassandra.db.partitions.*;
 import org.apache.cassandra.db.context.CounterContext;
 import org.apache.cassandra.db.marshal.*;
 import org.apache.cassandra.io.util.DataInputPlus;
 import org.apache.cassandra.io.util.DataOutputPlus;
-import org.apache.cassandra.net.MessagingService;
 import org.apache.cassandra.utils.*;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
 
 import static org.apache.cassandra.utils.ByteBufferUtil.bytes;
 
@@ -52,18 +43,8 @@ import static org.apache.cassandra.utils.ByteBufferUtil.bytes;
  */
 public abstract class LegacyLayout
 {
-    private static final Logger logger = LoggerFactory.getLogger(LegacyLayout.class);
-
     public final static int MAX_CELL_NAME_LENGTH = FBUtilities.MAX_UNSIGNED_SHORT;
 
-    public final static int STATIC_PREFIX = 0xFFFF;
-
-    public final static int DELETION_MASK        = 0x01;
-    public final static int EXPIRATION_MASK      = 0x02;
-    public final static int COUNTER_MASK         = 0x04;
-    public final static int COUNTER_UPDATE_MASK  = 0x08;
-    private final static int RANGE_TOMBSTONE_MASK = 0x10;
-
     private LegacyLayout() {}
 
     public static AbstractType<?> makeLegacyComparator(CFMetaData metadata)
@@ -135,7 +116,7 @@ public abstract class LegacyLayout
         return decodeCellName(metadata, cellname, false);
     }
 
-    public static LegacyCellName decodeCellName(CFMetaData metadata, ByteBuffer cellname, boolean readAllAsDynamic) throws UnknownColumnException
+    private static LegacyCellName decodeCellName(CFMetaData metadata, ByteBuffer cellname, boolean readAllAsDynamic) throws UnknownColumnException
     {
         Clustering clustering = decodeClustering(metadata, cellname);
 
@@ -233,30 +214,6 @@ public abstract class LegacyLayout
         return new LegacyBound(cb, metadata.isCompound() && CompositeType.isStaticName(bound), collectionName);
     }
 
-    public static ByteBuffer encodeBound(CFMetaData metadata, ClusteringBound bound, boolean isStart)
-    {
-        if (bound == ClusteringBound.BOTTOM || bound == ClusteringBound.TOP || metadata.comparator.size() == 0)
-            return ByteBufferUtil.EMPTY_BYTE_BUFFER;
-
-        ClusteringPrefix clustering = bound.clustering();
-
-        if (!metadata.isCompound())
-        {
-            assert clustering.size() == 1;
-            return clustering.get(0);
-        }
-
-        CompositeType ctype = CompositeType.getInstance(metadata.comparator.subtypes());
-        CompositeType.Builder builder = ctype.builder();
-        for (int i = 0; i < clustering.size(); i++)
-            builder.add(clustering.get(i));
-
-        if (isStart)
-            return bound.isInclusive() ? builder.build() : builder.buildAsEndOfRange();
-        else
-            return bound.isInclusive() ? builder.buildAsEndOfRange() : builder.build();
-    }
-
     public static ByteBuffer encodeCellName(CFMetaData metadata, ClusteringPrefix clustering, ByteBuffer columnName, ByteBuffer collectionElement)
     {
         boolean isStatic = clustering == Clustering.STATIC_CLUSTERING;
@@ -330,213 +287,6 @@ public abstract class LegacyLayout
         return Clustering.make(components.subList(0, Math.min(csize, components.size())).toArray(new ByteBuffer[csize]));
     }
 
-    public static ByteBuffer encodeClustering(CFMetaData metadata, ClusteringPrefix clustering)
-    {
-        if (clustering.size() == 0)
-            return ByteBufferUtil.EMPTY_BYTE_BUFFER;
-
-        if (!metadata.isCompound())
-        {
-            assert clustering.size() == 1;
-            return clustering.get(0);
-        }
-
-        ByteBuffer[] values = new ByteBuffer[clustering.size()];
-        for (int i = 0; i < clustering.size(); i++)
-            values[i] = clustering.get(i);
-        return CompositeType.build(values);
-    }
-
-    /**
-     * The maximum number of cells to include per partition when converting to the old format.
-     * <p>
-     * We already apply the limit during the actual query, but for queries that counts cells and not rows (thrift queries
-     * and distinct queries as far as old nodes are concerned), we may still include a little bit more than requested
-     * because {@link DataLimits} always include full rows. So if the limit ends in the middle of a queried row, the
-     * full row will be part of our result. This would confuse old nodes however so we make sure to truncate it to
-     * what's expected before writting it on the wire.
-     *
-     * @param command the read commmand for which to determine the maximum cells per partition. This can be {@code null}
-     * in which case {@code Integer.MAX_VALUE} is returned.
-     * @return the maximum number of cells per partition that should be enforced according to the read command if
-     * post-query limitation are in order (see above). This will be {@code Integer.MAX_VALUE} if no such limits are
-     * necessary.
-     */
-    private static int maxCellsPerPartition(ReadCommand command)
-    {
-        if (command == null)
-            return Integer.MAX_VALUE;
-
-        DataLimits limits = command.limits();
-
-        // There is 2 types of DISTINCT queries: those that includes only the partition key, and those that include static columns.
-        // On old nodes, the latter expects the first row in term of CQL count, which is what we already have and there is no additional
-        // limit to apply. The former however expect only one cell per partition and rely on it (See CASSANDRA-10762).
-        if (limits.isDistinct())
-            return command.columnFilter().fetchedColumns().statics.isEmpty() ? 1 : Integer.MAX_VALUE;
-
-        switch (limits.kind())
-        {
-            case THRIFT_LIMIT:
-            case SUPER_COLUMN_COUNTING_LIMIT:
-                return limits.perPartitionCount();
-            default:
-                return Integer.MAX_VALUE;
-        }
-    }
-
-    // For serializing to old wire format
-    public static LegacyUnfilteredPartition fromUnfilteredRowIterator(ReadCommand command, UnfilteredRowIterator iterator)
-    {
-        // we need to extract the range tombstone so materialize the partition. Since this is
-        // used for the on-wire format, this is not worst than it used to be.
-        final ImmutableBTreePartition partition = ImmutableBTreePartition.create(iterator);
-        DeletionInfo info = partition.deletionInfo();
-        Pair<LegacyRangeTombstoneList, Iterator<LegacyCell>> pair = fromRowIterator(partition.metadata(), partition.iterator(), partition.staticRow());
-
-        LegacyLayout.LegacyRangeTombstoneList rtl = pair.left;
-
-        // Processing the cell iterator results in the LegacyRangeTombstoneList being populated, so we do this
-        // before we use the LegacyRangeTombstoneList at all
-        List<LegacyLayout.LegacyCell> cells = Lists.newArrayList(pair.right);
-
-        int maxCellsPerPartition = maxCellsPerPartition(command);
-        if (cells.size() > maxCellsPerPartition)
-            cells = cells.subList(0, maxCellsPerPartition);
-
-        // The LegacyRangeTombstoneList already has range tombstones for the single-row deletions and complex
-        // deletions.  Go through our normal range tombstones and add then to the LegacyRTL so that the range
-        // tombstones all get merged and sorted properly.
-        if (info.hasRanges())
-        {
-            Iterator<RangeTombstone> rangeTombstoneIterator = info.rangeIterator(false);
-            while (rangeTombstoneIterator.hasNext())
-            {
-                RangeTombstone rt = rangeTombstoneIterator.next();
-                Slice slice = rt.deletedSlice();
-                LegacyLayout.LegacyBound start = new LegacyLayout.LegacyBound(slice.start(), false, null);
-                LegacyLayout.LegacyBound end = new LegacyLayout.LegacyBound(slice.end(), false, null);
-                rtl.add(start, end, rt.deletionTime().markedForDeleteAt(), rt.deletionTime().localDeletionTime());
-            }
-        }
-
-        return new LegacyUnfilteredPartition(info.getPartitionDeletion(), rtl, cells);
-    }
-
-    public static void serializeAsLegacyPartition(ReadCommand command, UnfilteredRowIterator partition, DataOutputPlus out, int version) throws IOException
-    {
-        assert version < MessagingService.VERSION_30;
-
-        out.writeBoolean(true);
-
-        LegacyLayout.LegacyUnfilteredPartition legacyPartition = LegacyLayout.fromUnfilteredRowIterator(command, partition);
-
-        UUIDSerializer.serializer.serialize(partition.metadata().cfId, out, version);
-        DeletionTime.serializer.serialize(legacyPartition.partitionDeletion, out);
-
-        legacyPartition.rangeTombstones.serialize(out, partition.metadata());
-
-        // begin cell serialization
-        out.writeInt(legacyPartition.cells.size());
-        for (LegacyLayout.LegacyCell cell : legacyPartition.cells)
-        {
-            ByteBufferUtil.writeWithShortLength(cell.name.encode(partition.metadata()), out);
-            out.writeByte(cell.serializationFlags());
-            if (cell.isExpiring())
-            {
-                out.writeInt(cell.ttl);
-                out.writeInt(cell.localDeletionTime);
-            }
-            else if (cell.isTombstone())
-            {
-                out.writeLong(cell.timestamp);
-                out.writeInt(TypeSizes.sizeof(cell.localDeletionTime));
-                out.writeInt(cell.localDeletionTime);
-                continue;
-            }
-            else if (cell.isCounterUpdate())
-            {
-                out.writeLong(cell.timestamp);
-                long count = CounterContext.instance().getLocalCount(cell.value);
-                ByteBufferUtil.writeWithLength(ByteBufferUtil.bytes(count), out);
-                continue;
-            }
-            else if (cell.isCounter())
-            {
-                out.writeLong(Long.MIN_VALUE);  // timestampOfLastDelete (not used, and MIN_VALUE is the default)
-            }
-
-            out.writeLong(cell.timestamp);
-            ByteBufferUtil.writeWithLength(cell.value, out);
-        }
-    }
-
-    // For the old wire format
-    // Note: this can return null if an empty partition is serialized!
-    public static UnfilteredRowIterator deserializeLegacyPartition(DataInputPlus in, int version, SerializationHelper.Flag flag, ByteBuffer key) throws IOException
-    {
-        assert version < MessagingService.VERSION_30;
-
-        // This is only used in mutation, and mutation have never allowed "null" column families
-        boolean present = in.readBoolean();
-        if (!present)
-            return null;
-
-        CFMetaData metadata = CFMetaData.serializer.deserialize(in, version);
-        LegacyDeletionInfo info = LegacyDeletionInfo.deserialize(metadata, in);
-        int size = in.readInt();
-        Iterator<LegacyCell> cells = deserializeCells(metadata, in, flag, size);
-        SerializationHelper helper = new SerializationHelper(metadata, version, flag);
-        return onWireCellstoUnfilteredRowIterator(metadata, metadata.partitioner.decorateKey(key), info, cells, false, helper);
-    }
-
-    // For the old wire format
-    public static long serializedSizeAsLegacyPartition(ReadCommand command, UnfilteredRowIterator partition, int version)
-    {
-        assert version < MessagingService.VERSION_30;
-
-        if (partition.isEmpty())
-            return TypeSizes.sizeof(false);
-
-        long size = TypeSizes.sizeof(true);
-
-        LegacyLayout.LegacyUnfilteredPartition legacyPartition = LegacyLayout.fromUnfilteredRowIterator(command, partition);
-
-        size += UUIDSerializer.serializer.serializedSize(partition.metadata().cfId, version);
-        size += DeletionTime.serializer.serializedSize(legacyPartition.partitionDeletion);
-        size += legacyPartition.rangeTombstones.serializedSize(partition.metadata());
-
-        // begin cell serialization
-        size += TypeSizes.sizeof(legacyPartition.cells.size());
-        for (LegacyLayout.LegacyCell cell : legacyPartition.cells)
-        {
-            size += ByteBufferUtil.serializedSizeWithShortLength(cell.name.encode(partition.metadata()));
-            size += 1;  // serialization flags
-            if (cell.kind == LegacyLayout.LegacyCell.Kind.EXPIRING)
-            {
-                size += TypeSizes.sizeof(cell.ttl);
-                size += TypeSizes.sizeof(cell.localDeletionTime);
-            }
-            else if (cell.kind == LegacyLayout.LegacyCell.Kind.DELETED)
-            {
-                size += TypeSizes.sizeof(cell.timestamp);
-                // localDeletionTime replaces cell.value as the body
-                size += TypeSizes.sizeof(TypeSizes.sizeof(cell.localDeletionTime));
-                size += TypeSizes.sizeof(cell.localDeletionTime);
-                continue;
-            }
-            else if (cell.kind == LegacyLayout.LegacyCell.Kind.COUNTER)
-            {
-                size += TypeSizes.sizeof(Long.MIN_VALUE);  // timestampOfLastDelete
-            }
-
-            size += TypeSizes.sizeof(cell.timestamp);
-            size += ByteBufferUtil.serializedSizeWithLength(cell.value);
-        }
-
-        return size;
-    }
-
     // For thrift sake
     public static UnfilteredRowIterator toUnfilteredRowIterator(CFMetaData metadata,
                                                                 DecoratedKey key,
@@ -547,32 +297,6 @@ public abstract class LegacyLayout
         return toUnfilteredRowIterator(metadata, key, delInfo, cells, false, helper);
     }
 
-    // For deserializing old wire format
-    public static UnfilteredRowIterator onWireCellstoUnfilteredRowIterator(CFMetaData metadata,
-                                                                           DecoratedKey key,
-                                                                           LegacyDeletionInfo delInfo,
-                                                                           Iterator<LegacyCell> cells,
-                                                                           boolean reversed,
-                                                                           SerializationHelper helper)
-    {
-
-        // If the table is a static compact, the "column_metadata" are now internally encoded as
-        // static. This has already been recognized by decodeCellName, but it means the cells
-        // provided are not in the expected order (the "static" cells are not necessarily at the front).
-        // So sort them to make sure toUnfilteredRowIterator works as expected.
-        // Further, if the query is reversed, then the on-wire format still has cells in non-reversed
-        // order, but we need to have them reverse in the final UnfilteredRowIterator. So reverse them.
-        if (metadata.isStaticCompactTable() || reversed)
-        {
-            List<LegacyCell> l = new ArrayList<>();
-            Iterators.addAll(l, cells);
-            Collections.sort(l, legacyCellComparator(metadata, reversed));
-            cells = l.iterator();
-        }
-
-        return toUnfilteredRowIterator(metadata, key, delInfo, cells, reversed, helper);
-    }
-
     private static UnfilteredRowIterator toUnfilteredRowIterator(CFMetaData metadata,
                                                                  DecoratedKey key,
                                                                  LegacyDeletionInfo delInfo,
@@ -624,47 +348,6 @@ public abstract class LegacyLayout
                                                true);
     }
 
-    public static Row extractStaticColumns(CFMetaData metadata, DataInputPlus in, Columns statics) throws IOException
-    {
-        assert !statics.isEmpty();
-        assert metadata.isCompactTable();
-
-        if (metadata.isSuper())
-            // TODO: there is in practice nothing to do here, but we need to handle the column_metadata for super columns somewhere else
-            throw new UnsupportedOperationException();
-
-        Set<ByteBuffer> columnsToFetch = new HashSet<>(statics.size());
-        for (ColumnDefinition column : statics)
-            columnsToFetch.add(column.name.bytes);
-
-        Row.Builder builder = BTreeRow.unsortedBuilder(FBUtilities.nowInSeconds());
-        builder.newRow(Clustering.STATIC_CLUSTERING);
-
-        boolean foundOne = false;
-        LegacyAtom atom;
-        while ((atom = readLegacyAtom(metadata, in, false)) != null)
-        {
-            if (atom.isCell())
-            {
-                LegacyCell cell = atom.asCell();
-                if (!columnsToFetch.contains(cell.name.encode(metadata)))
-                    continue;
-
-                foundOne = true;
-                builder.addCell(new BufferCell(cell.name.column, cell.timestamp, cell.ttl, cell.localDeletionTime, cell.value, null));
-            }
-            else
-            {
-                LegacyRangeTombstone tombstone = atom.asRangeTombstone();
-                // TODO: we need to track tombstones and potentially ignore cells that are
-                // shadowed (or even better, replace them by tombstones).
-                throw new UnsupportedOperationException();
-            }
-        }
-
-        return foundOne ? builder.build() : Rows.EMPTY_STATIC_ROW;
-    }
-
     private static Row getNextRow(CellGrouper grouper, PeekingIterator<? extends LegacyAtom> cells)
     {
         if (!cells.hasNext())
@@ -714,7 +397,7 @@ public abstract class LegacyLayout
             private Iterator<LegacyCell> initializeRow()
             {
                 if (staticRow == null || staticRow.isEmpty())
-                    return Collections.<LegacyLayout.LegacyCell>emptyIterator();
+                    return Collections.emptyIterator();
 
                 Pair<LegacyRangeTombstoneList, Iterator<LegacyCell>> row = fromRow(metadata, staticRow);
                 deletions.addAll(row.left);
@@ -843,7 +526,7 @@ public abstract class LegacyLayout
         return legacyCellComparator(metadata, false);
     }
 
-    public static Comparator<LegacyCell> legacyCellComparator(final CFMetaData metadata, final boolean reversed)
+    private static Comparator<LegacyCell> legacyCellComparator(final CFMetaData metadata, final boolean reversed)
     {
         final Comparator<LegacyCellName> cellNameComparator = legacyCellNameComparator(metadata, reversed);
         return new Comparator<LegacyCell>()
@@ -1013,121 +696,7 @@ public abstract class LegacyLayout
         };
     }
 
-    public static LegacyAtom readLegacyAtom(CFMetaData metadata, DataInputPlus in, boolean readAllAsDynamic) throws IOException
-    {
-        while (true)
-        {
-            ByteBuffer cellname = ByteBufferUtil.readWithShortLength(in);
-            if (!cellname.hasRemaining())
-                return null; // END_OF_ROW
-
-            try
-            {
-                int b = in.readUnsignedByte();
-                return (b & RANGE_TOMBSTONE_MASK) != 0
-                    ? readLegacyRangeTombstoneBody(metadata, in, cellname)
-                    : readLegacyCellBody(metadata, in, cellname, b, SerializationHelper.Flag.LOCAL, readAllAsDynamic);
-            }
-            catch (UnknownColumnException e)
-            {
-                // We can get there if we read a cell for a dropped column, and ff that is the case,
-                // then simply ignore the cell is fine. But also not that we ignore if it's the
-                // system keyspace because for those table we actually remove columns without registering
-                // them in the dropped columns
-                assert metadata.ksName.equals(SchemaConstants.SYSTEM_KEYSPACE_NAME) || metadata.getDroppedColumnDefinition(e.columnName) != null : e.getMessage();
-            }
-        }
-    }
-
-    public static LegacyCell readLegacyCell(CFMetaData metadata, DataInput in, SerializationHelper.Flag flag) throws IOException, UnknownColumnException
-    {
-        ByteBuffer cellname = ByteBufferUtil.readWithShortLength(in);
-        int b = in.readUnsignedByte();
-        return readLegacyCellBody(metadata, in, cellname, b, flag, false);
-    }
-
-    public static LegacyCell readLegacyCellBody(CFMetaData metadata, DataInput in, ByteBuffer cellname, int mask, SerializationHelper.Flag flag, boolean readAllAsDynamic)
-    throws IOException, UnknownColumnException
-    {
-        // Note that we want to call decodeCellName only after we've deserialized other parts, since it can throw
-        // and we want to throw only after having deserialized the full cell.
-        if ((mask & COUNTER_MASK) != 0)
-        {
-            in.readLong(); // timestampOfLastDelete: this has been unused for a long time so we ignore it
-            long ts = in.readLong();
-            ByteBuffer value = ByteBufferUtil.readWithLength(in);
-            if (flag == SerializationHelper.Flag.FROM_REMOTE || (flag == SerializationHelper.Flag.LOCAL && CounterContext.instance().shouldClearLocal(value)))
-                value = CounterContext.instance().clearAllLocal(value);
-            return new LegacyCell(LegacyCell.Kind.COUNTER, decodeCellName(metadata, cellname, readAllAsDynamic), value, ts, Cell.NO_DELETION_TIME, Cell.NO_TTL);
-        }
-        else if ((mask & EXPIRATION_MASK) != 0)
-        {
-            int ttl = in.readInt();
-            int expiration = in.readInt();
-            long ts = in.readLong();
-            ByteBuffer value = ByteBufferUtil.readWithLength(in);
-            return new LegacyCell(LegacyCell.Kind.EXPIRING, decodeCellName(metadata, cellname, readAllAsDynamic), value, ts, expiration, ttl);
-        }
-        else
-        {
-            long ts = in.readLong();
-            ByteBuffer value = ByteBufferUtil.readWithLength(in);
-            LegacyCellName name = decodeCellName(metadata, cellname, readAllAsDynamic);
-            return (mask & COUNTER_UPDATE_MASK) != 0
-                ? new LegacyCell(LegacyCell.Kind.COUNTER, name, CounterContext.instance().createLocal(ByteBufferUtil.toLong(value)), ts, Cell.NO_DELETION_TIME, Cell.NO_TTL)
-                : ((mask & DELETION_MASK) == 0
-                        ? new LegacyCell(LegacyCell.Kind.REGULAR, name, value, ts, Cell.NO_DELETION_TIME, Cell.NO_TTL)
-                        : new LegacyCell(LegacyCell.Kind.DELETED, name, ByteBufferUtil.EMPTY_BYTE_BUFFER, ts, ByteBufferUtil.toInt(value), Cell.NO_TTL));
-        }
-    }
-
-    public static LegacyRangeTombstone readLegacyRangeTombstoneBody(CFMetaData metadata, DataInputPlus in, ByteBuffer boundname) throws IOException
-    {
-        LegacyBound min = decodeBound(metadata, boundname, true);
-        LegacyBound max = decodeBound(metadata, ByteBufferUtil.readWithShortLength(in), false);
-        DeletionTime dt = DeletionTime.serializer.deserialize(in);
-        return new LegacyRangeTombstone(min, max, dt);
-    }
-
-    public static Iterator<LegacyCell> deserializeCells(final CFMetaData metadata,
-                                                        final DataInput in,
-                                                        final SerializationHelper.Flag flag,
-                                                        final int size)
-    {
-        return new AbstractIterator<LegacyCell>()
-        {
-            private int i = 0;
-
-            protected LegacyCell computeNext()
-            {
-                if (i >= size)
-                    return endOfData();
-
-                ++i;
-                try
-                {
-                    return readLegacyCell(metadata, in, flag);
-                }
-                catch (UnknownColumnException e)
-                {
-                    // We can get there if we read a cell for a dropped column, and if that is the case,
-                    // then simply ignore the cell is fine. But also not that we ignore if it's the
-                    // system keyspace because for those table we actually remove columns without registering
-                    // them in the dropped columns
-                    if (metadata.ksName.equals(SchemaConstants.SYSTEM_KEYSPACE_NAME) || metadata.getDroppedColumnDefinition(e.columnName) != null)
-                        return computeNext();
-                    else
-                        throw new IOError(e);
-                }
-                catch (IOException e)
-                {
-                    throw new IOError(e);
-                }
-            }
-        };
-    }
-
-    public static class CellGrouper
+    private static class CellGrouper
     {
         public final CFMetaData metadata;
         private final boolean isStatic;
@@ -1285,53 +854,6 @@ public abstract class LegacyLayout
         }
     }
 
-    public static class LegacyUnfilteredPartition
-    {
-        public final DeletionTime partitionDeletion;
-        public final LegacyRangeTombstoneList rangeTombstones;
-        public final List<LegacyCell> cells;
-
-        private LegacyUnfilteredPartition(DeletionTime partitionDeletion, LegacyRangeTombstoneList rangeTombstones, List<LegacyCell> cells)
-        {
-            this.partitionDeletion = partitionDeletion;
-            this.rangeTombstones = rangeTombstones;
-            this.cells = cells;
-        }
-
-        public void digest(CFMetaData metadata, MessageDigest digest)
-        {
-            for (LegacyCell cell : cells)
-            {
-                digest.update(cell.name.encode(metadata).duplicate());
-
-                if (cell.isCounter())
-                    CounterContext.instance().updateDigest(digest, cell.value);
-                else
-                    digest.update(cell.value.duplicate());
-
-                FBUtilities.updateWithLong(digest, cell.timestamp);
-                FBUtilities.updateWithByte(digest, cell.serializationFlags());
-
-                if (cell.isExpiring())
-                    FBUtilities.updateWithInt(digest, cell.ttl);
-
-                if (cell.isCounter())
-                {
-                    // Counters used to have the timestampOfLastDelete field, which we stopped using long ago and has been hard-coded
-                    // to Long.MIN_VALUE but was still taken into account in 2.2 counter digests (to maintain backward compatibility
-                    // in the first place).
-                    FBUtilities.updateWithLong(digest, Long.MIN_VALUE);
-                }
-            }
-
-            if (partitionDeletion.markedForDeleteAt() != Long.MIN_VALUE)
-                digest.update(ByteBufferUtil.bytes(partitionDeletion.markedForDeleteAt()));
-
-            if (!rangeTombstones.isEmpty())
-                rangeTombstones.updateDigest(digest);
-        }
-    }
-
     public static class LegacyCellName
     {
         public final Clustering clustering;
@@ -1822,7 +1344,7 @@ public abstract class LegacyLayout
      * This class is needed to allow us to convert single-row deletions and complex deletions into range tombstones
      * and properly merge them into the normal set of range tombstones.
      */
-    public static class LegacyRangeTombstoneList
+    private static class LegacyRangeTombstoneList
     {
         private final LegacyBoundComparator comparator;
 

http://git-wip-us.apache.org/repos/asf/cassandra/blob/4a246419/src/java/org/apache/cassandra/db/Memtable.java
----------------------------------------------------------------------
diff --git a/src/java/org/apache/cassandra/db/Memtable.java b/src/java/org/apache/cassandra/db/Memtable.java
index a063bf4..987381c 100644
--- a/src/java/org/apache/cassandra/db/Memtable.java
+++ b/src/java/org/apache/cassandra/db/Memtable.java
@@ -449,9 +449,9 @@ public class Memtable implements Comparable<Memtable>
             this.isBatchLogTable = cfs.name.equals(SystemKeyspace.BATCHES) && cfs.keyspace.getName().equals(SchemaConstants.SYSTEM_KEYSPACE_NAME);
 
             if (flushLocation == null)
-                writer = createFlushWriter(txn, cfs.getSSTablePath(getDirectories().getWriteableLocationAsFile(estimatedSize)), columnsCollector.get(), statsCollector.get());
+                writer = createFlushWriter(txn, cfs.newSSTableDescriptor(getDirectories().getWriteableLocationAsFile(estimatedSize)), columnsCollector.get(), statsCollector.get());
             else
-                writer = createFlushWriter(txn, cfs.getSSTablePath(getDirectories().getLocationForDisk(flushLocation)), columnsCollector.get(), statsCollector.get());
+                writer = createFlushWriter(txn, cfs.newSSTableDescriptor(getDirectories().getLocationForDisk(flushLocation)), columnsCollector.get(), statsCollector.get());
 
         }
 
@@ -503,14 +503,14 @@ public class Memtable implements Comparable<Memtable>
         }
 
         public SSTableMultiWriter createFlushWriter(LifecycleTransaction txn,
-                                                  String filename,
-                                                  PartitionColumns columns,
-                                                  EncodingStats stats)
+                                                    Descriptor descriptor,
+                                                    PartitionColumns columns,
+                                                    EncodingStats stats)
         {
             MetadataCollector sstableMetadataCollector = new MetadataCollector(cfs.metadata.comparator)
                     .commitLogIntervals(new IntervalSet<>(commitLogLowerBound.get(), commitLogUpperBound.get()));
 
-            return cfs.createSSTableMultiWriter(Descriptor.fromFilename(filename),
+            return cfs.createSSTableMultiWriter(descriptor,
                                                 toFlush.size(),
                                                 ActiveRepairService.UNREPAIRED_SSTABLE,
                                                 sstableMetadataCollector,

http://git-wip-us.apache.org/repos/asf/cassandra/blob/4a246419/src/java/org/apache/cassandra/db/Mutation.java
----------------------------------------------------------------------
diff --git a/src/java/org/apache/cassandra/db/Mutation.java b/src/java/org/apache/cassandra/db/Mutation.java
index b08d6e5..5a571e8 100644
--- a/src/java/org/apache/cassandra/db/Mutation.java
+++ b/src/java/org/apache/cassandra/db/Mutation.java
@@ -368,21 +368,9 @@ public class Mutation implements IMutation
     {
         public void serialize(Mutation mutation, DataOutputPlus out, int version) throws IOException
         {
-            if (version < MessagingService.VERSION_20)
-                out.writeUTF(mutation.getKeyspaceName());
-
             /* serialize the modifications in the mutation */
             int size = mutation.modifications.size();
-
-            if (version < MessagingService.VERSION_30)
-            {
-                ByteBufferUtil.writeWithShortLength(mutation.key().getKey(), out);
-                out.writeInt(size);
-            }
-            else
-            {
-                out.writeUnsignedVInt(size);
-            }
+            out.writeUnsignedVInt(size);
 
             assert size > 0;
             for (Map.Entry<UUID, PartitionUpdate> entry : mutation.modifications.entrySet())
@@ -391,24 +379,10 @@ public class Mutation implements IMutation
 
         public Mutation deserialize(DataInputPlus in, int version, SerializationHelper.Flag flag) throws IOException
         {
-            if (version < MessagingService.VERSION_20)
-                in.readUTF(); // read pre-2.0 keyspace name
-
-            ByteBuffer key = null;
-            int size;
-            if (version < MessagingService.VERSION_30)
-            {
-                key = ByteBufferUtil.readWithShortLength(in);
-                size = in.readInt();
-            }
-            else
-            {
-                size = (int)in.readUnsignedVInt();
-            }
-
+            int size = (int)in.readUnsignedVInt();
             assert size > 0;
 
-            PartitionUpdate update = PartitionUpdate.serializer.deserialize(in, version, flag, key);
+            PartitionUpdate update = PartitionUpdate.serializer.deserialize(in, version, flag);
             if (size == 1)
                 return new Mutation(update);
 
@@ -418,7 +392,7 @@ public class Mutation implements IMutation
             modifications.put(update.metadata().cfId, update);
             for (int i = 1; i < size; ++i)
             {
-                update = PartitionUpdate.serializer.deserialize(in, version, flag, dk);
+                update = PartitionUpdate.serializer.deserialize(in, version, flag);
                 modifications.put(update.metadata().cfId, update);
             }
 
@@ -432,22 +406,7 @@ public class Mutation implements IMutation
 
         public long serializedSize(Mutation mutation, int version)
         {
-            int size = 0;
-
-            if (version < MessagingService.VERSION_20)
-                size += TypeSizes.sizeof(mutation.getKeyspaceName());
-
-            if (version < MessagingService.VERSION_30)
-            {
-                int keySize = mutation.key().getKey().remaining();
-                size += TypeSizes.sizeof((short) keySize) + keySize;
-                size += TypeSizes.sizeof(mutation.modifications.size());
-            }
-            else
-            {
-                size += TypeSizes.sizeofUnsignedVInt(mutation.modifications.size());
-            }
-
+            int size = TypeSizes.sizeofUnsignedVInt(mutation.modifications.size());
             for (Map.Entry<UUID, PartitionUpdate> entry : mutation.modifications.entrySet())
                 size += PartitionUpdate.serializer.serializedSize(entry.getValue(), version);
 

http://git-wip-us.apache.org/repos/asf/cassandra/blob/4a246419/src/java/org/apache/cassandra/db/MutationVerbHandler.java
----------------------------------------------------------------------
diff --git a/src/java/org/apache/cassandra/db/MutationVerbHandler.java b/src/java/org/apache/cassandra/db/MutationVerbHandler.java
index 5888438..59247a2 100644
--- a/src/java/org/apache/cassandra/db/MutationVerbHandler.java
+++ b/src/java/org/apache/cassandra/db/MutationVerbHandler.java
@@ -21,7 +21,6 @@ import java.io.DataInputStream;
 import java.io.IOException;
 import java.net.InetAddress;
 
-import org.apache.cassandra.batchlog.LegacyBatchlogMigrator;
 import org.apache.cassandra.exceptions.WriteTimeoutException;
 import org.apache.cassandra.io.util.FastByteArrayInputStream;
 import org.apache.cassandra.net.*;
@@ -59,16 +58,10 @@ public class MutationVerbHandler implements IVerbHandler<Mutation>
 
         try
         {
-            if (message.version < MessagingService.VERSION_30 && LegacyBatchlogMigrator.isLegacyBatchlogMutation(message.payload))
-            {
-                LegacyBatchlogMigrator.handleLegacyMutation(message.payload);
-                reply(id, replyTo);
-            }
-            else
-                message.payload.applyFuture().thenAccept(o -> reply(id, replyTo)).exceptionally(wto -> {
-                    failed();
-                    return null;
-                });
+            message.payload.applyFuture().thenAccept(o -> reply(id, replyTo)).exceptionally(wto -> {
+                failed();
+                return null;
+            });
         }
         catch (WriteTimeoutException wto)
         {
@@ -76,10 +69,6 @@ public class MutationVerbHandler implements IVerbHandler<Mutation>
         }
     }
 
-    /**
-     * Older version (< 1.0) will not send this message at all, hence we don't
-     * need to check the version of the data.
-     */
     private static void forwardToLocalNodes(Mutation mutation, MessagingService.Verb verb, byte[] forwardBytes, InetAddress from) throws IOException
     {
         try (DataInputStream in = new DataInputStream(new FastByteArrayInputStream(forwardBytes)))

http://git-wip-us.apache.org/repos/asf/cassandra/blob/4a246419/src/java/org/apache/cassandra/db/PartitionRangeReadCommand.java
----------------------------------------------------------------------
diff --git a/src/java/org/apache/cassandra/db/PartitionRangeReadCommand.java b/src/java/org/apache/cassandra/db/PartitionRangeReadCommand.java
index 50b568e..3cabf75 100644
--- a/src/java/org/apache/cassandra/db/PartitionRangeReadCommand.java
+++ b/src/java/org/apache/cassandra/db/PartitionRangeReadCommand.java
@@ -273,11 +273,9 @@ public class PartitionRangeReadCommand extends ReadCommand
         return Transformation.apply(iter, new CacheFilter());
     }
 
-    public MessageOut<ReadCommand> createMessage(int version)
+    public MessageOut<ReadCommand> createMessage()
     {
-        return dataRange().isPaging()
-             ? new MessageOut<>(MessagingService.Verb.PAGED_RANGE, this, pagedRangeSerializer)
-             : new MessageOut<>(MessagingService.Verb.RANGE_SLICE, this, rangeSliceSerializer);
+        return new MessageOut<>(MessagingService.Verb.RANGE_SLICE, this, serializer);
     }
 
     protected void appendCQLWhereClause(StringBuilder sb)


Mime
View raw message