cassandra-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From jbel...@apache.org
Subject [1/8] git commit: merge from 1.1
Date Tue, 31 Jul 2012 05:10:07 GMT
Updated Branches:
  refs/heads/cassandra-1.1 f9d1f93f0 -> 271405656
  refs/heads/trunk 9ecda7230 -> 02308f6f9


merge from 1.1


Project: http://git-wip-us.apache.org/repos/asf/cassandra/repo
Commit: http://git-wip-us.apache.org/repos/asf/cassandra/commit/02308f6f
Tree: http://git-wip-us.apache.org/repos/asf/cassandra/tree/02308f6f
Diff: http://git-wip-us.apache.org/repos/asf/cassandra/diff/02308f6f

Branch: refs/heads/trunk
Commit: 02308f6f9ae272fde101357a7d395f0118b80d0d
Parents: 9ecda72 2714056
Author: Jonathan Ellis <jbellis@apache.org>
Authored: Tue Jul 31 00:09:09 2012 -0500
Committer: Jonathan Ellis <jbellis@apache.org>
Committed: Tue Jul 31 00:09:51 2012 -0500

----------------------------------------------------------------------
 CHANGES.txt                                        |    1 +
 NEWS.txt                                           |   14 ++++
 conf/cassandra-env.sh                              |   22 +++----
 debian/cassandra.install                           |    1 +
 debian/changelog                                   |    6 ++
 .../cassandra/config/DatabaseDescriptor.java       |    4 -
 src/java/org/apache/cassandra/db/Directories.java  |    2 +-
 .../cassandra/db/commitlog/CommitLogSegment.java   |    1 +
 .../org/apache/cassandra/io/util/FileUtils.java    |   47 +++++++++++++++
 .../cassandra/io/util/MmappedSegmentedFile.java    |   27 +--------
 tools/bin/token-generator                          |   16 +++++
 11 files changed, 99 insertions(+), 42 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/cassandra/blob/02308f6f/CHANGES.txt
----------------------------------------------------------------------
diff --cc CHANGES.txt
index 09f4508,7348a99..c9f0551
--- a/CHANGES.txt
+++ b/CHANGES.txt
@@@ -1,42 -1,5 +1,43 @@@
 +1.2-dev
 + * clean up ioexceptions (CASSANDRA-2116)
 + * Introduce new json format with row level deletion (CASSANDRA-4054)
 + * remove redundant "name" column from schema_keyspaces (CASSANDRA-4433)
 + * improve "nodetool ring" handling of multi-dc clusters (CASSANDRA-3047)
 + * update NTS calculateNaturalEndpoints to be O(N log N) (CASSANDRA-3881)
 + * add UseCondCardMark XX jvm settings on jdk 1.7 (CASSANDRA-4366)
 + * split up rpc timeout by operation type (CASSANDRA-2819)
 + * rewrite key cache save/load to use only sequential i/o (CASSANDRA-3762)
 + * update MS protocol with a version handshake + broadcast address id
 +   (CASSANDRA-4311)
 + * multithreaded hint replay (CASSANDRA-4189)
 + * add inter-node message compression (CASSANDRA-3127)
 + * remove COPP (CASSANDRA-2479)
 + * Track tombstone expiration and compact when tombstone content is
 +   higher than a configurable threshold, default 20% (CASSANDRA-3442, 4234)
 + * update MurmurHash to version 3 (CASSANDRA-2975)
 + * (CLI) track elapsed time for `delete' operation (CASSANDRA-4060)
 + * (CLI) jline version is bumped to 1.0 to properly  support
 +   'delete' key function (CASSANDRA-4132)
 + * Save IndexSummary into new SSTable 'Summary' component (CASSANDRA-2392, 4289)
 + * Add support for range tombstones (CASSANDRA-3708)
 + * Improve MessagingService efficiency (CASSANDRA-3617)
 + * Avoid ID conflicts from concurrent schema changes (CASSANDRA-3794)
 + * Set thrift HSHA server thread limit to unlimited by default (CASSANDRA-4277)
 + * Avoids double serialization of CF id in RowMutation messages
 +   (CASSANDRA-4293)
 + * stream compressed sstables directly with java nio (CASSANDRA-4297)
 + * Support multiple ranges in SliceQueryFilter (CASSANDRA-3885)
 + * Add column metadata to system column families (CASSANDRA-4018)
 + * (cql3) Always use composite types by default (CASSANDRA-4329)
 + * (cql3) Add support for set, map and list (CASSANDRA-3647)
 + * Validate date type correctly (CASSANDRA-4441)
 + * (cql3) Allow definitions with only a PK (CASSANDRA-4361)
 + * (cql3) Add support for row key composites (CASSANDRA-4179)
 + * improve DynamicEndpointSnitch by using reservoir sampling (CASSANDRA-4038)
 +
 +
  1.1.3
+  * munmap commitlog segments before rename (CASSANDRA-4337)
   * (JMX) rename getRangeKeySample to sampleKeyRange to avoid returning
     multi-MB results as an attribute (CASSANDRA-4452)
   * flush based on data size, not throughput; overwritten columns no 

http://git-wip-us.apache.org/repos/asf/cassandra/blob/02308f6f/NEWS.txt
----------------------------------------------------------------------
diff --cc NEWS.txt
index c52bd89,b1d8969..04d1527
--- a/NEWS.txt
+++ b/NEWS.txt
@@@ -9,47 -9,19 +9,61 @@@ upgrade, just in case you need to roll 
  by version X, but the inverse is not necessarily the case.)
  
  
 +1.2
 +===
 +Upgrading
 +---------
 +    - 1.2 is NOT network-compatible with versions older than 1.0.  That
 +      means if you want to do a rolling, zero-downtime upgrade, you'll need
 +      to upgrade first to 1.0.x or 1.1.x, and then to 1.2.  1.2 retains
 +      the ability to read data files from Cassandra versions at least
 +      back to 0.6, so a non-rolling upgrade remains possible with just
 +      one step.
 +    - The hints schema was changed from 1.1 to 1.2. Cassandra automatically
 +      snapshots and then truncates the hints column family as part of
 +      starting up 1.2 for the first time.  Additionally, upgraded nodes
 +      will not store new hints destined for older (pre-1.2) nodes.  It is
 +      therefore recommended that you perform a cluster upgrade when all
 +      nodes are up.
 +    - The `nodetool removetoken` command (and corresponding JMX operation)
 +      have been renamed to `nodetool removenode`.  This function is
 +      incompatible with the earlier `nodetool removetoken`, and attempts to
 +      remove nodes in this way with a mixed 1.1 (or lower) / 1.2 cluster,
 +      is not supported.
 +    - The somewhat ill-concieved CollatingOrderPreservingPartitioner
 +      has been removed.  Use RandomPartitioner (recommended) or
 +      ByteOrderedPartitioner instead.
 +    - Global option hinted_handoff_throttle_delay_in_ms has been removed.
 +      hinted_handoff_throttle_in_kb has been added instead.
 +    - The default bloom filter fp chance has been increased to 1%.
 +      This will save about 30% of the memory used by the old default.
 +      Existing columnfamilies will retain their old setting.
 +
 +Features
 +--------
 +    - Cassandra can now handle concurrent CREATE TABLE schema changes
 +      as well as other updates
 +    - rpc_timeout has been split up to allow finer-grained control
 +      on timeouts for different operation types
 +    - num_tokens can now be specified in cassandra.yaml. This defines the
 +      number of tokens assigned to the host on the ring (default: 1).
 +      Also specifying initial_token will override any num_tokens setting.
 +
 +
+ 1.1.3
+ =====
+ 
+ Upgrading
+ ---------
+     - Nothing specific to this release, but please see 1.1 if you are upgrading
+       from a previous version.
+ 
+ Features
+ --------
+     - the cqlsh COPY command can now export to CSV flat files
+     - added a new tools/bin/token-generator to facilitate generating evenly distributed
tokens
+ 
++
  1.1.2
  =====
  

http://git-wip-us.apache.org/repos/asf/cassandra/blob/02308f6f/src/java/org/apache/cassandra/config/DatabaseDescriptor.java
----------------------------------------------------------------------
diff --cc src/java/org/apache/cassandra/config/DatabaseDescriptor.java
index e3b91c6,54486d0..796ba64
--- a/src/java/org/apache/cassandra/config/DatabaseDescriptor.java
+++ b/src/java/org/apache/cassandra/config/DatabaseDescriptor.java
@@@ -40,9 -40,7 +40,8 @@@ import org.apache.cassandra.db.ColumnFa
  import org.apache.cassandra.db.DefsTable;
  import org.apache.cassandra.db.SystemTable;
  import org.apache.cassandra.dht.IPartitioner;
 +import org.apache.cassandra.io.FSWriteError;
  import org.apache.cassandra.io.util.FileUtils;
- import org.apache.cassandra.io.util.MmappedSegmentedFile;
  import org.apache.cassandra.locator.DynamicEndpointSnitch;
  import org.apache.cassandra.locator.EndpointSnitchInfo;
  import org.apache.cassandra.locator.IEndpointSnitch;

http://git-wip-us.apache.org/repos/asf/cassandra/blob/02308f6f/src/java/org/apache/cassandra/db/Directories.java
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/cassandra/blob/02308f6f/src/java/org/apache/cassandra/db/commitlog/CommitLogSegment.java
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/cassandra/blob/02308f6f/src/java/org/apache/cassandra/io/util/FileUtils.java
----------------------------------------------------------------------
diff --cc src/java/org/apache/cassandra/io/util/FileUtils.java
index fec0cff,5ed6276..42b240b
--- a/src/java/org/apache/cassandra/io/util/FileUtils.java
+++ b/src/java/org/apache/cassandra/io/util/FileUtils.java
@@@ -18,9 -18,13 +18,12 @@@
  package org.apache.cassandra.io.util;
  
  import java.io.*;
+ import java.lang.reflect.InvocationTargetException;
+ import java.lang.reflect.Method;
+ import java.nio.MappedByteBuffer;
  import java.text.DecimalFormat;
 +import java.util.Arrays;
  import java.util.Comparator;
 -import java.util.List;
  
  import org.slf4j.Logger;
  import org.slf4j.LoggerFactory;
@@@ -32,49 -35,30 +35,67 @@@ import org.apache.cassandra.utils.CLibr
  
  public class FileUtils
  {
 -    private static Logger logger_ = LoggerFactory.getLogger(FileUtils.class);
 -    private static final DecimalFormat df_ = new DecimalFormat("#.##");
 -    private static final double kb_ = 1024d;
 -    private static final double mb_ = 1024*1024d;
 -    private static final double gb_ = 1024*1024*1024d;
 -    private static final double tb_ = 1024*1024*1024*1024d;
 +    private static final Logger logger = LoggerFactory.getLogger(FileUtils.class);
 +    private static final double KB = 1024d;
 +    private static final double MB = 1024*1024d;
 +    private static final double GB = 1024*1024*1024d;
 +    private static final double TB = 1024*1024*1024*1024d;
  
 -    private static final Method cleanerMethod = initCleaner();
 +    private static final DecimalFormat df = new DecimalFormat("#.##");
  
 -    private static Method initCleaner()
++    private static final Method cleanerMethod;
++
++    static
+     {
++        Method m;
+         try
+         {
 -            return Class.forName("sun.nio.ch.DirectBuffer").getMethod("cleaner");
++            m = Class.forName("sun.nio.ch.DirectBuffer").getMethod("cleaner");
+         }
+         catch (Exception e)
+         {
+             // Perhaps a non-sun-derived JVM - contributions welcome
 -            logger_.info("Cannot initialize un-mmaper.  (Are you using a non-SUN JVM?) 
Compacted data files will not be removed promptly.  Consider using a SUN JVM or using standard
disk access mode");
 -            return null;
++            logger.info("Cannot initialize un-mmaper.  (Are you using a non-SUN JVM?)  Compacted
data files will not be removed promptly.  Consider using a SUN JVM or using standard disk
access mode");
++            m = null;
++        }
++        cleanerMethod = m;
++    }
++
 +    public static void createHardLink(File from, File to)
 +    {
 +        if (to.exists())
 +            throw new RuntimeException("Tried to create duplicate hard link to " + to);
 +        if (!from.exists())
 +            throw new RuntimeException("Tried to hard link to file that does not exist "
+ from);
 +
 +        try
 +        {
 +            CLibrary.createHardLink(from, to);
 +        }
 +        catch (IOException e)
 +        {
 +            throw new FSWriteError(e, to);
 +        }
 +    }
 +
 +    public static File createTempFile(String prefix, String suffix, File directory)
 +    {
 +        try
 +        {
 +            return File.createTempFile(prefix, suffix, directory);
          }
 +        catch (IOException e)
 +        {
 +            throw new FSWriteError(e, directory);
 +        }
 +    }
 +
 +    public static File createTempFile(String prefix, String suffix)
 +    {
 +        return createTempFile(prefix, suffix, new File(System.getProperty("java.io.tmpdir")));
      }
  
 -    public static void deleteWithConfirm(String file) throws IOException
 +    public static void deleteWithConfirm(String file)
      {
          deleteWithConfirm(new File(file));
      }
@@@ -174,30 -137,32 +195,56 @@@
              throw e;
      }
  
 +    public static String getCanonicalPath(String filename)
 +    {
 +        try
 +        {
 +            return new File(filename).getCanonicalPath();
 +        }
 +        catch (IOException e)
 +        {
 +            throw new FSReadError(e, filename);
 +        }
 +    }
 +
 +    public static String getCanonicalPath(File file)
 +    {
 +        try
 +        {
 +            return file.getCanonicalPath();
 +        }
 +        catch (IOException e)
 +        {
 +            throw new FSReadError(e, file);
 +        }
 +    }
 +
+     public static boolean isCleanerAvailable()
+     {
+         return cleanerMethod != null;
+     }
+ 
+     public static void clean(MappedByteBuffer buffer)
+     {
+         try
+         {
+             Object cleaner = cleanerMethod.invoke(buffer);
+             cleaner.getClass().getMethod("clean").invoke(cleaner);
+         }
+         catch (IllegalAccessException e)
+         {
+             throw new RuntimeException(e);
+         }
+         catch (InvocationTargetException e)
+         {
+             throw new RuntimeException(e);
+         }
+         catch (NoSuchMethodException e)
+         {
+             throw new RuntimeException(e);
+         }
+     }
+ 
      public static class FileComparator implements Comparator<File>
      {
          public int compare(File f, File f2)

http://git-wip-us.apache.org/repos/asf/cassandra/blob/02308f6f/src/java/org/apache/cassandra/io/util/MmappedSegmentedFile.java
----------------------------------------------------------------------
diff --cc src/java/org/apache/cassandra/io/util/MmappedSegmentedFile.java
index f3620e3,5ebbe24..cb812bc
--- a/src/java/org/apache/cassandra/io/util/MmappedSegmentedFile.java
+++ b/src/java/org/apache/cassandra/io/util/MmappedSegmentedFile.java
@@@ -7,18 -9,22 +7,17 @@@
   * "License"); you may not use this file except in compliance
   * with the License.  You may obtain a copy of the License at
   *
 - *   http://www.apache.org/licenses/LICENSE-2.0
 - *
 - * Unless required by applicable law or agreed to in writing,
 - * software distributed under the License is distributed on an
 - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
 - * KIND, either express or implied.  See the License for the
 - * specific language governing permissions and limitations
 - * under the License.
 + *     http://www.apache.org/licenses/LICENSE-2.0
   *
 + * Unless required by applicable law or agreed to in writing, software
 + * distributed under the License is distributed on an "AS IS" BASIS,
 + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 + * See the License for the specific language governing permissions and
 + * limitations under the License.
   */
 +package org.apache.cassandra.io.util;
  
 -
 -import java.io.File;
 -import java.io.IOError;
 -import java.io.IOException;
 -import java.io.RandomAccessFile;
 +import java.io.*;
- import java.lang.reflect.Method;
  import java.nio.MappedByteBuffer;
  import java.nio.channels.FileChannel;
  import java.util.ArrayList;
@@@ -80,33 -82,22 +77,15 @@@ public class MmappedSegmentedFile exten
          }
  
          // not mmap'd: open a braf covering the segment
 -        try
 -        {
 -            // FIXME: brafs are unbounded, so this segment will cover the rest of the file,
rather than just the row
 -            RandomAccessReader file = RandomAccessReader.open(new File(path));
 -            file.seek(position);
 -            return file;
 -        }
 -        catch (IOException e)
 -        {
 -            throw new IOError(e);
 -        }
 +        // FIXME: brafs are unbounded, so this segment will cover the rest of the file,
rather than just the row
 +        RandomAccessReader file = RandomAccessReader.open(new File(path));
 +        file.seek(position);
 +        return file;
      }
  
-     public static void initCleaner()
-     {
-         try
-         {
-             cleanerMethod = Class.forName("sun.nio.ch.DirectBuffer").getMethod("cleaner");
-         }
-         catch (Exception e)
-         {
-             // Perhaps a non-sun-derived JVM - contributions welcome
-             logger.info("Cannot initialize un-mmaper.  (Are you using a non-SUN JVM?)  Compacted
data files will not be removed promptly.  Consider using a SUN JVM or using standard disk
access mode");
-         }
-     }
- 
-     public static boolean isCleanerAvailable()
-     {
-         return cleanerMethod != null;
-     }
- 
      public void cleanup()
      {
-         if (cleanerMethod == null)
+         if (!FileUtils.isCleanerAvailable())
              return;
  
          /*


Mime
View raw message