cassandra-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From jbel...@apache.org
Subject svn commit: r936510 - in /cassandra/trunk: ./ conf/ lib/ src/java/org/apache/cassandra/config/ src/java/org/apache/cassandra/db/ src/java/org/apache/cassandra/db/commitlog/ src/java/org/apache/cassandra/io/sstable/ src/java/org/apache/cassandra/service...
Date Wed, 21 Apr 2010 21:05:27 GMT
Author: jbellis
Date: Wed Apr 21 21:05:26 2010
New Revision: 936510

URL: http://svn.apache.org/viewvc?rev=936510&view=rev
Log:
convert cassandra.xml to cassandra.yaml.  patch by Todd Blose and Stu Hood for CASSANDRA-990

Added:
    cassandra/trunk/conf/cassandra.yaml
    cassandra/trunk/lib/snakeyaml-1.6.jar
Removed:
    cassandra/trunk/conf/cassandra.xml
Modified:
    cassandra/trunk/NEWS.txt
    cassandra/trunk/conf/log4j-server.properties
    cassandra/trunk/src/java/org/apache/cassandra/config/DatabaseDescriptor.java
    cassandra/trunk/src/java/org/apache/cassandra/db/Table.java
    cassandra/trunk/src/java/org/apache/cassandra/db/commitlog/CommitLog.java
    cassandra/trunk/src/java/org/apache/cassandra/db/commitlog/CommitLogExecutorService.java
    cassandra/trunk/src/java/org/apache/cassandra/io/sstable/IndexSummary.java
    cassandra/trunk/src/java/org/apache/cassandra/io/sstable/RowIndexedReader.java
    cassandra/trunk/src/java/org/apache/cassandra/service/EmbeddedCassandraService.java
    cassandra/trunk/src/java/org/apache/cassandra/service/StorageService.java
    cassandra/trunk/src/java/org/apache/cassandra/service/StorageServiceMBean.java
    cassandra/trunk/src/java/org/apache/cassandra/utils/XMLUtils.java
    cassandra/trunk/test/conf/cassandra.xml
    cassandra/trunk/test/system/__init__.py
    cassandra/trunk/test/unit/org/apache/cassandra/SchemaLoader.java
    cassandra/trunk/test/unit/org/apache/cassandra/client/TestRingCache.java
    cassandra/trunk/test/unit/org/apache/cassandra/service/EmbeddedCassandraServiceTest.java

Modified: cassandra/trunk/NEWS.txt
URL: http://svn.apache.org/viewvc/cassandra/trunk/NEWS.txt?rev=936510&r1=936509&r2=936510&view=diff
==============================================================================
--- cassandra/trunk/NEWS.txt (original)
+++ cassandra/trunk/NEWS.txt Wed Apr 21 21:05:26 2010
@@ -12,16 +12,16 @@ Features
 
 Configuraton
 ------------
-    - Configuration file renamed to cassandra.xml and log4j.properties to
+    - Configuration file renamed to cassandra.yaml and log4j.properties to
       log4j-server.properties
     - The ThriftAddress and ThriftPort directives have been renamed to
       RPCAddress and RPCPort respectively.
-    - The keyspaces defined in storage-conf.xml are ignored on startup as a
+    - The keyspaces defined in cassandra.yaml are ignored on startup as a
       result of CASSANDRA-44.  A JMX method has been exposed in the 
-      StorageServiceMBean to force a schema load from storage-conf.xml. It
+      StorageServiceMBean to force a schema load from cassandra.yaml. It
       is a one-shot affair though and you should conduct it on a seed node
       before other nodes. Subsequent restarts will load the schema from the 
-      system table and attempts to load the schema from XML will be ignored.  
+      system table and attempts to load the schema from YAML will be ignored.  
       You shoud only have to do this for one node since new nodes will receive
       schema updates on startup from the seed node you updated manually. 
     - EndPointSnitch was renamed to RackInferringSnitch.  A new SimpleSnitch

Added: cassandra/trunk/conf/cassandra.yaml
URL: http://svn.apache.org/viewvc/cassandra/trunk/conf/cassandra.yaml?rev=936510&view=auto
==============================================================================
--- cassandra/trunk/conf/cassandra.yaml (added)
+++ cassandra/trunk/conf/cassandra.yaml Wed Apr 21 21:05:26 2010
@@ -0,0 +1,185 @@
+# Cassandra storage config YAML
+# See http://wiki.apache.org/cassandra/StorageConfiguration for explanations of configuration directives.
+
+# name of the cluster
+cluster_name: 'Test Cluster'
+
+# Set to true to make new [non-seed] nodes automatically migrate the right data to themselves. 
+auto_bootstrap: false
+
+# authentication backend, implementing IAuthenticator; used to limit keyspace access
+authenticator: org.apache.cassandra.auth.AllowAllAuthenticator
+
+# any IPartitioner may be used, including your own as long as it is on the classpath. 
+# Out of the box, Cassandra provides org.apache.cassandra.dht.RandomPartitioner
+# org.apache.cassandra.dht.OrderPreservingPartitioner, and 
+# org.apache.cassandra.dht.CollatingOrderPreservingPartitioner.
+partitioner: org.apache.cassandra.dht.RandomPartitioner
+
+# directories where Cassandra should store data on disk.
+data_file_directories:
+    - /var/lib/cassandra/data
+
+# Addresses of hosts that are deemed contact points. 
+# Cassandra nodes use this list of hosts to find each other and learn the topology of the ring. 
+# You must change this if you are running multiple nodes!
+seeds:
+    - 127.0.0.1
+
+# Access mode.  mmapped i/o is substantially faster, but only practical on
+# a 64bit machine (which notably does not include EC2 "small" instances)
+# or relatively small datasets.  "auto", the safe choice, will enable
+# mmapping on a 64bit JVM.  Other values are "mmap", "mmap_index_only"
+# (which may allow you to get part of the benefits of mmap on a 32bit
+# machine by mmapping only index files) and "standard".
+# (The buffer size settings that follow only apply to standard,
+# non-mmapped i/o.)
+disk_access_mode: auto
+
+# Unlike most systems, in Cassandra writes are faster than reads, so
+# you can afford more of those in parallel.  A good rule of thumb is 2
+# concurrent reads per processor core.  Increase ConcurrentWrites to
+# the number of clients writing at once if you enable CommitLogSync +
+# CommitLogSyncDelay. -->
+concurrent_reads: 8
+concurrent_writes: 32
+
+# Buffer size to use when performing contiguous column slices. 
+# Increase this to the size of the column slices you typically perform
+sliced_buffer_size_in_kb: 64
+
+# TCP port, for commands and data
+storage_port: 7000
+
+# Address to bind to and tell other nodes to connect to. You _must_ change this if you want multiple nodes to be able to communicate!
+listen_address: localhost
+
+# The address to bind the Thrift RPC service to
+rpc_address: localhost
+# port for Thrift to listen on
+rpc_port: 9160
+# Whether or not to use a framed transport for Thrift.
+thrift_framed_transport: false
+snapshot_before_compaction: false
+
+# The threshold size in megabytes the binary memtable must grow to, before it's submitted for flushing to disk. 
+binary_memtable_throughput_in_mb: 256
+# Number of minutes to keep a memtable in memory
+memtable_flush_after_mins: 60
+# Size of the memtable in memory before it is dumped
+memtable_throughput_in_mb: 64
+# Number of objects in millions in the memtable before it is dumped
+memtable_operations_in_millions: 0.3
+# Buffer size to use when flushing !memtables to disk.
+flush_data_buffer_size_in_mb: 32
+# Increase (decrease) the index buffer size relative to the data buffer if you have few (many) columns per key.
+flush_index_buffer_size_in_mb: 8
+
+column_index_size_in_kb: 64
+row_warning_threshold_in_mb: 512
+
+# commit log
+commitlog_directory: /var/lib/cassandra/commitlog
+
+# Size to allow commitlog to grow to before creating a new segment 
+commitlog_rotation_threshold_in_mb: 128
+
+# commitlog_sync may be either "periodic" or "batch." 
+# When in batch mode, Cassandra won't ack writes until the commit log has been fsynced to disk. 
+# It will wait up to CommitLogSyncBatchWindowInMS milliseconds for other writes, before performing the sync. 
+commitlog_sync: periodic
+
+# the other option is "timed," where writes may be acked immediately and the CommitLog 
+# is simply synced every commitlog_sync_period_in_ms milliseconds. 
+commitlog_sync_period_in_ms: 1000
+
+# Time to wait for a reply from other nodes before failing the command 
+rpc_timeout_in_ms: 5000
+
+# time to wait before garbage collecting tombstones (deletion markers)
+gc_grace_seconds: 864000
+
+# A ColumnFamily is the Cassandra concept closest to a relational table. 
+# Keyspaces are separate groups of ColumnFamilies. 
+# Except in very unusual circumstances you will have one Keyspace per application. 
+#
+# keyspace required parameters: name, replica_replacement_strategy, replication_factor, and endpoint_snitch
+#
+# name -- name of the keyspace; "system" is reserved for Cassandra Internals.
+#
+# replica_placement_strategy -- Strategy: Setting this to the class that implements
+# IReplicaPlacementStrategy will change the way the node picker works.
+# Out of the box, Cassandra provides
+# org.apache.cassandra.locator.RackUnawareStrategy and
+# org.apache.cassandra.locator.RackAwareStrategy (place one replica in
+# a different datacenter, and the others on different racks in the same one.)
+#
+#
+# replication_factor -- Number of replicas of the data
+#
+# endpoint_snitch -- Setting this to the class that implements
+# AbstractEndpointSnitch, which lets Cassandra know enough
+# about your network topology to route requests efficiently.
+# Out of the box, Cassandra provides org.apache.cassandra.locator.EndPointSnitch,
+# and PropertyFileEndPointSnitch is available in contrib/.
+#
+# 
+# column_families -- column families associated with this keyspace
+#
+# compare_with -- tells Cassandra how to sort the columns for slicing operations. The default is BytesType, 
+# which is a straightforward lexical comparison of the bytes in each column. 
+# Other options are AsciiType, UTF8Type, LexicalUUIDType, TimeUUIDType, and LongType. 
+# You can also specify the fully-qualified class name to a class of your choice extending org.apache.cassandra.db.marshal.AbstractType. 
+#
+#
+# keys_cached -- optional. defaults to 200000 keys. 
+# specifies the number of keys per sstable whose locations we keep in
+# memory in "mostly LRU" order.  (JUST the key locations, NOT any
+# column values.) Specify a fraction (value less than 1) or an absolute number of keys to cache.
+#
+# rows_cached -- optional. defaults to 0. (i.e. row caching is off by default)
+# specifies the number of rows whose entire contents we cache in memory. Do not use this on
+# ColumnFamilies with large rows, or ColumnFamilies with high write:read
+# ratios. Specify a fraction (value less than 1) or an absolute number of rows to cache.
+#
+#
+# comment -- optional. used to attach additional human-readable information about the column family to its definition.
+#
+# 
+# reqad_repair_chance -- optional. must be between 0 and 1. defaults to 1.0 (always read repair).
+# specifies the probability with which read repairs should be invoked on non-quorum reads.
+#
+#
+keyspaces:
+    - name: Keyspace1
+      replica_placement_strategy: org.apache.cassandra.locator.RackUnawareStrategy
+      replication_factor: 1
+      endpoint_snitch: org.apache.cassandra.locator.EndPointSnitch
+      column_families:
+        - name: Standard1
+          compare_with: BytesType
+
+        - name: Standard2
+          compare_with: UTF8Type
+          read_repair_chance: 0.1
+          keys_cached: 100
+
+        - name: StandardByUUID1
+          compare_with: TimeUUIDType
+
+        - name: Super1
+          column_type: Super
+          compare_with: BytesType
+          compare_subcolumns_with: BytesType
+
+        - name: Super2
+          column_type: Super
+          compare_subcolumns_with: UTF8Type
+          rows_cached: 10000
+          keys_cached: 50
+          comment: 'A column family with supercolumns, whose column and subcolumn names are UTF8 strings'
+
+        - name: Super3
+          column_type: Super
+          compare_with: LongType
+          comment: 'A column family with supercolumns, whose column names are Longs (8 bytes)'

Modified: cassandra/trunk/conf/log4j-server.properties
URL: http://svn.apache.org/viewvc/cassandra/trunk/conf/log4j-server.properties?rev=936510&r1=936509&r2=936510&view=diff
==============================================================================
--- cassandra/trunk/conf/log4j-server.properties (original)
+++ cassandra/trunk/conf/log4j-server.properties Wed Apr 21 21:05:26 2010
@@ -18,7 +18,7 @@
 # and the pattern to %c instead of %l.  (%l is slower.)
 
 # output messages into a rolling log file as well as stdout
-log4j.rootLogger=INFO,stdout,R
+log4j.rootLogger=DEBUG,stdout,R
 
 # stdout
 log4j.appender.stdout=org.apache.log4j.ConsoleAppender

Added: cassandra/trunk/lib/snakeyaml-1.6.jar
URL: http://svn.apache.org/viewvc/cassandra/trunk/lib/snakeyaml-1.6.jar?rev=936510&view=auto
==============================================================================
Files cassandra/trunk/lib/snakeyaml-1.6.jar (added) and cassandra/trunk/lib/snakeyaml-1.6.jar Wed Apr 21 21:05:26 2010 differ

Modified: cassandra/trunk/src/java/org/apache/cassandra/config/DatabaseDescriptor.java
URL: http://svn.apache.org/viewvc/cassandra/trunk/src/java/org/apache/cassandra/config/DatabaseDescriptor.java?rev=936510&r1=936509&r2=936510&view=diff
==============================================================================
--- cassandra/trunk/src/java/org/apache/cassandra/config/DatabaseDescriptor.java (original)
+++ cassandra/trunk/src/java/org/apache/cassandra/config/DatabaseDescriptor.java Wed Apr 21 21:05:26 2010
@@ -28,23 +28,23 @@ import org.apache.cassandra.db.marshal.T
 import org.apache.cassandra.db.marshal.UTF8Type;
 import org.apache.cassandra.db.migration.Migration;
 import org.apache.cassandra.dht.IPartitioner;
-import org.apache.cassandra.locator.IEndpointSnitch;
 import org.apache.cassandra.locator.AbstractReplicationStrategy;
 import org.apache.cassandra.io.util.FileUtils;
 import org.apache.cassandra.service.StorageService;
 import org.apache.cassandra.utils.FBUtilities;
-import org.apache.cassandra.utils.XMLUtils;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
-import org.w3c.dom.Node;
-import org.w3c.dom.NodeList;
-import org.xml.sax.SAXException;
-
-import javax.xml.parsers.ParserConfigurationException;
-import javax.xml.transform.TransformerException;
-import javax.xml.xpath.XPathExpressionException;
+import org.apache.cassandra.locator.IEndpointSnitch;
+
+import org.yaml.snakeyaml.Loader;
+import org.yaml.snakeyaml.TypeDescription;
+import org.yaml.snakeyaml.Yaml;
+import org.yaml.snakeyaml.error.YAMLException;
+
 import java.io.File;
+import java.io.FileInputStream;
 import java.io.IOException;
+import java.io.InputStream;
 import java.lang.reflect.Constructor;
 import java.lang.reflect.InvocationTargetException;
 import java.util.*;
@@ -56,90 +56,38 @@ public class DatabaseDescriptor
 {
     private static Logger logger = LoggerFactory.getLogger(DatabaseDescriptor.class);
 
-    // don't capitalize these; we need them to match what's in the config file for CLS.valueOf to parse
-    public static enum CommitLogSync {
-        periodic,
-        batch
-    }
-
-    public static enum DiskAccessMode {
-        auto,
-        mmap,
-        mmap_index_only,
-        standard,
-    }
-
     public static final String random = "RANDOM";
     public static final String ophf = "OPHF";
     private static IEndpointSnitch snitch;
-    private static int storagePort = 7000;
-    private static int rpcPort = 9160;
-    private static boolean thriftFramed = false;
     private static InetAddress listenAddress; // leave null so we can fall through to getLocalHost
     private static InetAddress rpcAddress;
-    private static String clusterName = "Test";
-    private static long rpcTimeoutInMillis = 2000;
     private static Set<InetAddress> seeds = new HashSet<InetAddress>();
-    /* Keeps the list of data file directories */
-    private static String[] dataFileDirectories;
     /* Current index into the above list of directories */
     private static int currentIndex = 0;
-    private static String logFileDirectory;
     private static int consistencyThreads = 4; // not configurable
-    private static int concurrentReaders = 8;
-    private static int concurrentWriters = 32;
 
-    private static double flushDataBufferSizeInMB = 32;
-    private static double flushIndexBufferSizeInMB = 8;
-    private static int slicedReadBufferSizeInKB = 64;
 
     static Map<String, KSMetaData> tables = new HashMap<String, KSMetaData>();
-    private static int bmtThreshold = 256;
-    /* if this a row exceeds this threshold, we issue warnings during compaction */
-    private static long rowWarningThreshold = 512 * 1024 * 1024;
 
     /* Hashing strategy Random or OPHF */
     private static IPartitioner partitioner;
 
-    /* if the size of columns or super-columns are more than this, indexing will kick in */
-    private static int columnIndexSizeInKB;
-    /* Number of minutes to keep a memtable in memory */
-    private static int memtableLifetimeMs = 60 * 60 * 1000;
-    /* Size of the memtable in memory before it is dumped */
-    private static int memtableThroughput = 64;
-    /* Number of objects in millions in the memtable before it is dumped */
-    private static double memtableOperations = 0.1;
-    /* Job Jar Location */
-    private static String jobJarFileLocation;
-    /* Address where to run the job tracker */
-    private static String jobTrackerHost;    
-    /* time to wait before garbage collecting tombstones (deletion markers) */
-    private static int gcGraceInSeconds = 10 * 24 * 3600; // 10 days
-
-    // the path qualified config file (storage-conf.xml) name
+    // the path qualified config file (cassandra.yaml) name
     private static String configFileName;
-    /* initial token in the ring */
-    private static String initialToken = null;
 
-    private static CommitLogSync commitLogSync;
-    private static double commitLogSyncBatchMS;
-    private static int commitLogSyncPeriodMS;
-
-    private static DiskAccessMode diskAccessMode;
-    private static DiskAccessMode indexAccessMode;
-
-    private static boolean snapshotBeforeCompaction;
-    private static boolean autoBootstrap = false;
+    private static Config.DiskAccessMode indexAccessMode;
+    
+    private static Config conf;
 
     private static IAuthenticator authenticator = new AllowAllAuthenticator();
 
-    private final static String STORAGE_CONF_FILE = "cassandra.xml";
+    private final static String STORAGE_CONF_FILE = "cassandra.yaml";
 
     private static final UUID INITIAL_VERSION = new UUID(4096, 0); // has type nibble set to 1, everything else to zero.
     private static UUID defsVersion = INITIAL_VERSION;
 
     /**
-     * Try the storage-config system property, and then inspect the classpath.
+     * Inspect the classpath to find STORAGE_CONF_FILE.
      */
     static String getStorageConfigPath() throws ConfigurationException
     {
@@ -156,316 +104,176 @@ public class DatabaseDescriptor
     {
         try
         {
+            
             configFileName = getStorageConfigPath();
+            
             if (logger.isDebugEnabled())
                 logger.debug("Loading settings from " + configFileName);
-            XMLUtils xmlUtils = new XMLUtils(configFileName);
-
-            /* Cluster Name */
-            clusterName = xmlUtils.getNodeValue("/Storage/ClusterName");
-
-            String syncRaw = xmlUtils.getNodeValue("/Storage/CommitLogSync");
-            try
-            {
-                commitLogSync = CommitLogSync.valueOf(syncRaw);
-            }
-            catch (IllegalArgumentException e)
-            {
-                throw new ConfigurationException("CommitLogSync must be either 'periodic' or 'batch'");
-            }
-            if (commitLogSync == null)
+            
+            InputStream input = new FileInputStream(new File(configFileName));
+            org.yaml.snakeyaml.constructor.Constructor constructor = new org.yaml.snakeyaml.constructor.Constructor(Config.class);
+            TypeDescription desc = new TypeDescription(Config.class);
+            TypeDescription ksDesc = new TypeDescription(Keyspace.class);
+            ksDesc.putListPropertyType("column_families", ColumnFamily.class);
+            desc.putListPropertyType("keyspaces", Keyspace.class);
+            constructor.addTypeDescription(desc);
+            constructor.addTypeDescription(ksDesc);
+            Yaml yaml = new Yaml(new Loader(constructor));
+            conf = (Config)yaml.load(input);
+            
+            if (conf.commitlog_sync == null)
             {
                 throw new ConfigurationException("Missing required directive CommitLogSync");
             }
-            else if (commitLogSync == CommitLogSync.batch)
+
+            if (conf.commitlog_sync == Config.CommitLogSync.batch)
             {
-                try
-                {
-                    commitLogSyncBatchMS = Double.valueOf(xmlUtils.getNodeValue("/Storage/CommitLogSyncBatchWindowInMS"));
-                }
-                catch (Exception e)
+                if (conf.commitlog_sync_batch_window_in_ms == null)
                 {
-                    throw new ConfigurationException("Unrecognized value for CommitLogSyncBatchWindowInMS.  Double expected.");
-                }
-                if (xmlUtils.getNodeValue("/Storage/CommitLogSyncPeriodInMS") != null)
+                    throw new ConfigurationException("Missing value for commitlog_sync_batch_window_in_ms: Double expected.");
+                } 
+                else if (conf.commitlog_sync_period_in_ms != null)
                 {
-                    throw new ConfigurationException("Batch sync specified, but CommitLogSyncPeriodInMS found.  Only specify CommitLogSyncBatchWindowInMS when using batch sync.");
+                    throw new ConfigurationException("Batch sync specified, but commitlog_sync_period_in_ms found. Only specify commitlog_sync_batch_window_in_ms when using batch sync");
                 }
-                logger.debug("Syncing log with a batch window of " + commitLogSyncBatchMS);
+                logger.debug("Syncing log with a batch window of " + conf.commitlog_sync_batch_window_in_ms);
             }
             else
             {
-                assert commitLogSync == CommitLogSync.periodic;
-                try
-                {
-                    commitLogSyncPeriodMS = Integer.valueOf(xmlUtils.getNodeValue("/Storage/CommitLogSyncPeriodInMS"));
-                }
-                catch (Exception e)
+                if (conf.commitlog_sync_period_in_ms == null)
                 {
-                    throw new ConfigurationException("Unrecognized value for CommitLogSyncPeriodInMS.  Integer expected.");
+                    throw new ConfigurationException("Missing value for commitlog_sync_period_in_ms: Integer expected");
                 }
-                if (xmlUtils.getNodeValue("/Storage/CommitLogSyncBatchWindowInMS") != null)
+                else if (conf.commitlog_sync_batch_window_in_ms != null)
                 {
-                    throw new ConfigurationException("Periodic sync specified, but CommitLogSyncBatchWindowInMS found.  Only specify CommitLogSyncPeriodInMS when using periodic sync.");
+                    throw new ConfigurationException("commitlog_sync_period_in_ms specified, but commitlog_sync_batch_window_in_ms found.  Only specify commitlog_sync_period_in_ms when using periodic sync.");
                 }
-                logger.debug("Syncing log with a period of " + commitLogSyncPeriodMS);
-            }
-
-            String modeRaw = xmlUtils.getNodeValue("/Storage/DiskAccessMode");
-            try
-            {
-                diskAccessMode = DiskAccessMode.valueOf(modeRaw);
+                logger.debug("Syncing log with a period of " + conf.commitlog_sync_period_in_ms);
             }
-            catch (IllegalArgumentException e)
-            {
-                throw new ConfigurationException("DiskAccessMode must be either 'auto', 'mmap', 'mmap_index_only', or 'standard'");
-            }
-            if (diskAccessMode == DiskAccessMode.auto)
+            
+            if (conf.disk_access_mode == Config.DiskAccessMode.auto)
             {
-                diskAccessMode = System.getProperty("os.arch").contains("64") ? DiskAccessMode.mmap : DiskAccessMode.standard;
-                indexAccessMode = diskAccessMode;
-                logger.info("Auto DiskAccessMode determined to be " + diskAccessMode);
+                conf.disk_access_mode = System.getProperty("os.arch").contains("64") ? Config.DiskAccessMode.mmap : Config.DiskAccessMode.standard;
+                indexAccessMode = conf.disk_access_mode;
+                logger.info("Auto DiskAccessMode determined to be " + conf.disk_access_mode);
             }
-            else if (diskAccessMode == DiskAccessMode.mmap_index_only)
+            else if (conf.disk_access_mode == Config.DiskAccessMode.mmap_index_only)
             {
-                diskAccessMode = DiskAccessMode.standard;
-                indexAccessMode = DiskAccessMode.mmap;
+                conf.disk_access_mode = Config.DiskAccessMode.standard;
+                indexAccessMode = Config.DiskAccessMode.mmap;
             }
             else
             {
-                indexAccessMode = diskAccessMode;
+                indexAccessMode = conf.disk_access_mode;
             }
 
             /* Authentication and authorization backend, implementing IAuthenticator */
-            String authenticatorClassName = xmlUtils.getNodeValue("/Storage/Authenticator");
-            if (authenticatorClassName != null)
+            if (conf.authenticator != null)
             {
                 try
                 {
-                    Class cls = Class.forName(authenticatorClassName);
+                    Class cls = Class.forName(conf.authenticator);
                     authenticator = (IAuthenticator) cls.getConstructor().newInstance();
                 }
                 catch (ClassNotFoundException e)
                 {
-                    throw new ConfigurationException("Invalid authenticator class " + authenticatorClassName);
+                    throw new ConfigurationException("Invalid authenticator class " + conf.authenticator);
                 }
             }
 
             authenticator.validateConfiguration();
             
             /* Hashing strategy */
-            String partitionerClassName = xmlUtils.getNodeValue("/Storage/Partitioner");
-            if (partitionerClassName == null)
+            if (conf.partitioner == null)
             {
-                throw new ConfigurationException("Missing partitioner directive /Storage/Partitioner");
+                throw new ConfigurationException("Missing directive: partitioner");
             }
             try
             {
-                Class cls = Class.forName(partitionerClassName);
+                Class cls = Class.forName(conf.partitioner);
                 partitioner = (IPartitioner) cls.getConstructor().newInstance();
             }
             catch (ClassNotFoundException e)
             {
-                throw new ConfigurationException("Invalid partitioner class " + partitionerClassName);
+                throw new ConfigurationException("Invalid partitioner class " + conf.partitioner);
             }
 
-            /* JobTracker address */
-            jobTrackerHost = xmlUtils.getNodeValue("/Storage/JobTrackerHost");
-
-            /* Job Jar file location */
-            jobJarFileLocation = xmlUtils.getNodeValue("/Storage/JobJarFileLocation");
-
-            String gcGrace = xmlUtils.getNodeValue("/Storage/GCGraceSeconds");
-            if ( gcGrace != null )
-                gcGraceInSeconds = Integer.parseInt(gcGrace);
-
-            initialToken = xmlUtils.getNodeValue("/Storage/InitialToken");
-
-            /* RPC Timeout */
-            String rpcTimeout = xmlUtils.getNodeValue("/Storage/RpcTimeoutInMillis");
-            if ( rpcTimeout != null )
-                rpcTimeoutInMillis = Integer.parseInt(rpcTimeout);
-
             /* Thread per pool */
-            String rawReaders = xmlUtils.getNodeValue("/Storage/ConcurrentReads");
-            if (rawReaders != null)
-            {
-                concurrentReaders = Integer.parseInt(rawReaders);
-            }
-            if (concurrentReaders < 2)
+            if (conf.concurrent_reads != null && conf.concurrent_reads < 2) 
             {
-                throw new ConfigurationException("ConcurrentReads must be at least 2");
+                throw new ConfigurationException("concurrent_reads must be at least 2");
             }
 
-            String rawWriters = xmlUtils.getNodeValue("/Storage/ConcurrentWrites");
-            if (rawWriters != null)
-            {
-                concurrentWriters = Integer.parseInt(rawWriters);
-            }
-            if (concurrentWriters < 2)
+            if (conf.concurrent_writes != null && conf.concurrent_writes < 2)
             {
-                throw new ConfigurationException("ConcurrentWrites must be at least 2");
+                throw new ConfigurationException("concurrent_writes must be at least 2");
             }
-
-            String rawFlushData = xmlUtils.getNodeValue("/Storage/FlushDataBufferSizeInMB");
-            if (rawFlushData != null)
-            {
-                flushDataBufferSizeInMB = Double.parseDouble(rawFlushData);
-            }
-            String rawFlushIndex = xmlUtils.getNodeValue("/Storage/FlushIndexBufferSizeInMB");
-            if (rawFlushIndex != null)
-            {
-                flushIndexBufferSizeInMB = Double.parseDouble(rawFlushIndex);
-            }
-
-            String rawSlicedBuffer = xmlUtils.getNodeValue("/Storage/SlicedBufferSizeInKB");
-            if (rawSlicedBuffer != null)
-            {
-                slicedReadBufferSizeInKB = Integer.parseInt(rawSlicedBuffer);
-            }
-
-            String bmtThresh = xmlUtils.getNodeValue("/Storage/BinaryMemtableThroughputInMB");
-            if (bmtThresh != null)
-            {
-                bmtThreshold = Integer.parseInt(bmtThresh);
-            }
-
-            /* TCP port on which the storage system listens */
-            String port = xmlUtils.getNodeValue("/Storage/StoragePort");
-            if ( port != null )
-                storagePort = Integer.parseInt(port);
-
+            
             /* Local IP or hostname to bind services to */
-            String listenAddr = xmlUtils.getNodeValue("/Storage/ListenAddress");
-            if (listenAddr != null)
+            if (conf.listen_address != null)
             {
-                if (listenAddr.equals("0.0.0.0"))
-                    throw new ConfigurationException("ListenAddress must be a single interface.  See http://wiki.apache.org/cassandra/FAQ#cant_listen_on_ip_any");
+                if (conf.listen_address.equals("0.0.0.0"))
+                {
+                    throw new ConfigurationException("listen_address must be a single interface.  See http://wiki.apache.org/cassandra/FAQ#cant_listen_on_ip_any");
+                }
+                
                 try
                 {
-                    listenAddress = InetAddress.getByName(listenAddr);
+                    listenAddress = InetAddress.getByName(conf.listen_address);
                 }
                 catch (UnknownHostException e)
                 {
-                    throw new ConfigurationException("Unknown ListenAddress '" + listenAddr + "'");
+                    throw new ConfigurationException("Unknown listen_address '" + conf.listen_address + "'");
                 }
             }
-
+            
             /* Local IP or hostname to bind RPC server to */
-            String rpcAddr = xmlUtils.getNodeValue("/Storage/RPCAddress");
-            if ( rpcAddr != null )
-                rpcAddress = InetAddress.getByName(rpcAddr);
-
-
-            /* get the RPC port from conf file */
-            port = xmlUtils.getNodeValue("/Storage/RPCPort");
-            if (port != null)
-                rpcPort = Integer.parseInt(port);
-
-            /* Framed (Thrift) transport (default to "no") */
-            String framedRaw = xmlUtils.getNodeValue("/Storage/ThriftFramedTransport");
-            if (framedRaw != null)
-            {
-                if (framedRaw.equalsIgnoreCase("true") || framedRaw.equalsIgnoreCase("false"))
-                {
-                    thriftFramed = Boolean.valueOf(framedRaw);
-                }
-                else
-                {
-                    throw new ConfigurationException("Unrecognized value for ThriftFramedTransport.  Use 'true' or 'false'.");
-                }
-            }
-
+            if (conf.rpc_address != null)
+                rpcAddress = InetAddress.getByName(conf.rpc_address);
+            
             /* end point snitch */
-            String endpointSnitchClassName = xmlUtils.getNodeValue("/Storage/EndpointSnitch");
-            if (endpointSnitchClassName == null)
+            if (conf.endpoint_snitch == null)
             {
-                throw new ConfigurationException("Missing endpointsnitch directive");
+                throw new ConfigurationException("Missing endpoint_snitch directive");
             }
-            snitch = createEndpointSnitch(endpointSnitchClassName);
-
-            /* snapshot-before-compaction.  defaults to false */
-            String sbc = xmlUtils.getNodeValue("/Storage/SnapshotBeforeCompaction");
-            if (sbc != null)
-            {
-                if (sbc.equalsIgnoreCase("true") || sbc.equalsIgnoreCase("false"))
-                {
-                    if (logger.isDebugEnabled())
-                        logger.debug("setting snapshotBeforeCompaction to " + sbc);
-                    snapshotBeforeCompaction = Boolean.valueOf(sbc);
-                }
-                else
-                {
-                    throw new ConfigurationException("Unrecognized value for SnapshotBeforeCompaction.  Use 'true' or 'false'.");
-                }
-            }
-
-            /* snapshot-before-compaction.  defaults to false */
-            String autoBootstr = xmlUtils.getNodeValue("/Storage/AutoBootstrap");
-            if (autoBootstr != null)
+            snitch = createEndpointSnitch(conf.endpoint_snitch);
+            
+            if (logger.isDebugEnabled() && conf.auto_bootstrap != null)
             {
-                if (autoBootstr.equalsIgnoreCase("true") || autoBootstr.equalsIgnoreCase("false"))
-                {
-                    if (logger.isDebugEnabled())
-                        logger.debug("setting autoBootstrap to " + autoBootstr);
-                    autoBootstrap = Boolean.valueOf(autoBootstr);
-                }
-                else
-                {
-                    throw new ConfigurationException("Unrecognized value for AutoBootstrap.  Use 'true' or 'false'.");
-                }
+                logger.debug("setting auto_bootstrap to " + conf.auto_bootstrap);
             }
-
-            /* Number of days to keep the memtable around w/o flushing */
-            String lifetime = xmlUtils.getNodeValue("/Storage/MemtableFlushAfterMinutes");
-            if (lifetime != null)
-                memtableLifetimeMs = Integer.parseInt(lifetime) * 60 * 1000;
-
-            /* Size of the memtable in memory in MB before it is dumped */
-            String memtableSize = xmlUtils.getNodeValue("/Storage/MemtableThroughputInMB");
-            if ( memtableSize != null )
-                memtableThroughput = Integer.parseInt(memtableSize);
+            
             /* Number of objects in millions in the memtable before it is dumped */
-            String memtableObjectCount = xmlUtils.getNodeValue("/Storage/MemtableOperationsInMillions");
-            if ( memtableObjectCount != null )
-                memtableOperations = Double.parseDouble(memtableObjectCount);
-            if (memtableOperations <= 0)
+            if (conf.memtable_operations_in_millions != null && conf.memtable_operations_in_millions <= 0)
             {
-                throw new ConfigurationException("Memtable object count must be a positive double");
+                throw new ConfigurationException("memtable_operations_in_millions must be a positive double");
             }
-
-            /* read the size at which we should do column indexes */
-            String columnIndexSize = xmlUtils.getNodeValue("/Storage/ColumnIndexSizeInKB");
-            if(columnIndexSize == null)
-            {
-                columnIndexSizeInKB = 64;
-            }
-            else
+            
+            if (conf.row_warning_threshold_in_mb != null && conf.row_warning_threshold_in_mb <= 0)
             {
-                columnIndexSizeInKB = Integer.parseInt(columnIndexSize);
+                throw new ConfigurationException("row_warning_threshold_in_mb must be a positive integer");
             }
-
-            String rowWarning = xmlUtils.getNodeValue("/Storage/RowWarningThresholdInMB");
-            if (rowWarning != null)
+            
+            /* data file and commit log directories. they get created later, when they're needed. */
+            if (conf.commitlog_directory != null && conf.data_file_directories != null)
             {
-                rowWarningThreshold = Long.parseLong(rowWarning) * 1024 * 1024;
-                if (rowWarningThreshold <= 0)
-                    throw new ConfigurationException("Row warning threshold must be a positive integer");
+                for (String datadir : conf.data_file_directories)
+                {
+                    if (datadir.equals(conf.commitlog_directory))
+                        throw new ConfigurationException("commitlog_directory must not be the same as any data_file_directories");
+                }
             }
-            /* data file and commit log directories. they get created later, when they're needed. */
-            dataFileDirectories = xmlUtils.getNodeValues("/Storage/DataFileDirectories/DataFileDirectory");
-            logFileDirectory = xmlUtils.getNodeValue("/Storage/CommitLogDirectory");
-
-            for (String datadir : dataFileDirectories)
+            else
             {
-                if (datadir.equals(logFileDirectory))
-                    throw new ConfigurationException("CommitLogDirectory must not be the same as any DataFileDirectory");
+                if (conf.commitlog_directory == null)
+                    throw new ConfigurationException("commitlog_directory missing");
+                if (conf.data_file_directories == null)
+                    throw new ConfigurationException("data_file_directories missing; at least one data directory must be specified");
             }
 
             /* threshold after which commit log should be rotated. */
-            String value = xmlUtils.getNodeValue("/Storage/CommitLogRotationThresholdInMB");
-            if ( value != null)
-                CommitLog.setSegmentSize(Integer.parseInt(value) * 1024 * 1024);
+            if (conf.commitlog_rotation_threshold_in_mb != null)
+                CommitLog.setSegmentSize(conf.commitlog_rotation_threshold_in_mb * 1024 * 1024);
 
             // Hardcoded system tables
             final CFMetaData[] systemCfDefs = new CFMetaData[]
@@ -502,14 +310,13 @@ public class DatabaseDescriptor
             CFMetaData.fixMaxId();
             
             /* Load the seeds for node contact points */
-            String[] seedsxml = xmlUtils.getNodeValues("/Storage/Seeds/Seed");
-            if (seedsxml.length <= 0)
+            if (conf.seeds == null || conf.seeds.length <= 0)
             {
-                throw new ConfigurationException("A minimum of one seed is required.");
+                throw new ConfigurationException("seeds missing; a minimum of one seed is required.");
             }
-            for( int i = 0; i < seedsxml.length; ++i )
+            for( int i = 0; i < conf.seeds.length; ++i )
             {
-                seeds.add(InetAddress.getByName(seedsxml[i]));
+                seeds.add(InetAddress.getByName(conf.seeds[i]));
             }
         }
         catch (ConfigurationException e)
@@ -518,6 +325,12 @@ public class DatabaseDescriptor
             System.err.println("Bad configuration; unable to start server");
             System.exit(1);
         }
+        catch (YAMLException e)
+        {
+            logger.error("Fatal error: " + e.getMessage());
+            System.err.println("Bad configuration; unable to start server");
+            System.exit(1);
+        }
         catch (Exception e)
         {
             throw new RuntimeException(e);
@@ -565,7 +378,7 @@ public class DatabaseDescriptor
         }
         return snitch;
     }
-
+    
     public static void loadSchemas() throws IOException
     {
         // we can load tables from local storage if a version is set in the system table and that acutally maps to
@@ -586,192 +399,98 @@ public class DatabaseDescriptor
             }
             
             // since we loaded definitions from local storage, log a warning if definitions exist in xml.
-            try
-            {
-                XMLUtils xmlUtils = new XMLUtils(configFileName);
-                NodeList tablesxml = xmlUtils.getRequestedNodeList("/Storage/Keyspaces/Keyspace");
-                if (tablesxml.getLength() > 0)
-                    logger.warn("Schema definitions were defined both locally and in storage-conf.xml. Definitions in storage-conf.xml were ignored.");
-            }
-            catch (Exception ex)
-            {
-                logger.warn("Problem checking for schema defintions in xml", ex);
-            }
+            
+            if (conf.keyspaces.size() > 0)
+                logger.warn("Schema definitions were defined both locally and in " + STORAGE_CONF_FILE +
+                    ". Definitions in " + STORAGE_CONF_FILE + " were ignored.");
+            
         }
         CFMetaData.fixMaxId();
     }
 
     /** reads xml. doesn't populate any internal structures. */
-    public static Collection<KSMetaData> readTablesFromXml() throws ConfigurationException
+    public static Collection<KSMetaData> readTablesFromYaml() throws ConfigurationException
     {
         List<KSMetaData> defs = new ArrayList<KSMetaData>();
-        XMLUtils xmlUtils = null;
-        try
-        {
-            xmlUtils = new XMLUtils(configFileName);
-        }
-        catch (ParserConfigurationException e)
-        {
-            ConfigurationException ex = new ConfigurationException(e.getMessage());
-            ex.initCause(e);
-            throw ex;
-        }
-        catch (SAXException e)
-        {
-            ConfigurationException ex = new ConfigurationException(e.getMessage());
-            ex.initCause(e);
-            throw ex;
-        }
-        catch (IOException e)
-        {
-            ConfigurationException ex = new ConfigurationException(e.getMessage());
-            ex.initCause(e);
-            throw ex;
-        }
-
+        
+        
         /* Read the table related stuff from config */
-        try
+        for (Keyspace keyspace : conf.keyspaces)
         {
-            NodeList tablesxml = xmlUtils.getRequestedNodeList("/Storage/Keyspaces/Keyspace");
-            int size = tablesxml.getLength();
-            for ( int i = 0; i < size; ++i )
-            {
-                String value = null;
-                Node table = tablesxml.item(i);
-
-                /* parsing out the table ksName */
-                String ksName = XMLUtils.getAttributeValue(table, "Name");
-                if (ksName == null)
-                {
-                    throw new ConfigurationException("Table name attribute is required");
-                }
-                if (ksName.equalsIgnoreCase(Table.SYSTEM_TABLE))
+            /* parsing out the table name */
+            if (keyspace.name == null)
+            {
+                throw new ConfigurationException("Keyspace name attribute is required");
+            }
+            
+            if (keyspace.name.equalsIgnoreCase(Table.SYSTEM_TABLE))
+            {
+                throw new ConfigurationException("'system' is a reserved table name for Cassandra internals");
+            }
+            
+            /* See which replica placement strategy to use */
+            if (keyspace.replica_placement_strategy == null)
+            {
+                throw new ConfigurationException("Missing replica_placement_strategy directive for " + keyspace.name);
+            }
+            Class<? extends AbstractReplicationStrategy> strategyClass = null;
+            try
+            {
+                strategyClass = (Class<? extends AbstractReplicationStrategy>) Class.forName(keyspace.replica_placement_strategy);
+            }
+            catch (ClassNotFoundException e)
+            {
+                throw new ConfigurationException("Invalid replicaplacementstrategy class " + keyspace.replica_placement_strategy);
+            }
+            
+            /* Data replication factor */
+            if (keyspace.replication_factor == null)
+            {
+                throw new ConfigurationException("Missing replication_factor directory for keyspace " + keyspace.name);
+            }
+            
+            int size2 = keyspace.column_families.length;
+            CFMetaData[] cfDefs = new CFMetaData[size2];
+            int j = 0;
+            for (ColumnFamily cf : keyspace.column_families)
+            {
+                if (cf.name == null)
                 {
-                    throw new ConfigurationException("'system' is a reserved table name for Cassandra internals");
+                    throw new ConfigurationException("ColumnFamily name attribute is required");
                 }
-
-                /* See which replica placement strategy to use */
-                value = xmlUtils.getNodeValue("/Storage/Keyspaces/Keyspace[@Name='" + ksName + "']/ReplicaPlacementStrategy");
-                if (value == null)
+                if (cf.name.contains("-"))
                 {
-                    throw new ConfigurationException("Missing replicaplacementstrategy directive for " + ksName);
+                    throw new ConfigurationException("ColumnFamily names cannot contain hyphens");
                 }
-                Class<? extends AbstractReplicationStrategy> strategyClass = null;
-                try
+                
+                String columnType = org.apache.cassandra.db.ColumnFamily.getColumnType(cf.column_type);
+                if (columnType == null)
                 {
-                    strategyClass = (Class<? extends AbstractReplicationStrategy>) Class.forName(value);
+                    throw new ConfigurationException("ColumnFamily " + cf.name + " has invalid type " + cf.column_type);
                 }
-                catch (ClassNotFoundException e)
+                
+                // Parse out the column comparator
+                AbstractType comparator = getComparator(cf.compare_with);
+                AbstractType subcolumnComparator = null;
+                if (columnType.equals("Super"))
                 {
-                    throw new ConfigurationException("Invalid replicaplacementstrategy class " + value);
+                    subcolumnComparator = getComparator(cf.compare_subcolumns_with);
                 }
-
-                /* Data replication factor */
-                value = xmlUtils.getNodeValue("/Storage/Keyspaces/Keyspace[@Name='" + ksName + "']/ReplicationFactor");
-                int replicationFactor = -1;
-                if (value == null)
-                    throw new ConfigurationException("Missing replicationfactor directory for keyspace " + ksName);
-                else
+                else if (cf.compare_subcolumns_with != null)
                 {
-                    replicationFactor = Integer.parseInt(value);
+                    throw new ConfigurationException("compare_subcolumns_with is only a valid attribute on super columnfamilies (not regular columnfamily " + cf.name + ")");
                 }
-
-                String xqlTable = "/Storage/Keyspaces/Keyspace[@Name='" + ksName + "']/";
-                NodeList columnFamilies = xmlUtils.getRequestedNodeList(xqlTable + "ColumnFamily");
-
-                //NodeList columnFamilies = xmlUtils.getRequestedNodeList(table, "ColumnFamily");
-                int size2 = columnFamilies.getLength();
-                CFMetaData[] cfDefs = new CFMetaData[size2];
-                for ( int j = 0; j < size2; ++j )
-                {
-                    Node columnFamily = columnFamilies.item(j);
-                    String tableName = ksName;
-                    String cfName = XMLUtils.getAttributeValue(columnFamily, "Name");
-                    if (cfName == null)
-                    {
-                        throw new ConfigurationException("ColumnFamily name attribute is required");
-                    }
-                    if (cfName.contains("-"))
-                    {
-                        throw new ConfigurationException("ColumnFamily names cannot contain hyphens");
-                    }
-                    String xqlCF = xqlTable + "ColumnFamily[@Name='" + cfName + "']/";
-
-                    // Parse out the column type
-                    String rawColumnType = XMLUtils.getAttributeValue(columnFamily, "ColumnType");
-                    String columnType = ColumnFamily.getColumnType(rawColumnType);
-                    if (columnType == null)
-                    {
-                        throw new ConfigurationException("ColumnFamily " + cfName + " has invalid type " + rawColumnType);
-                    }
-
-                    if (XMLUtils.getAttributeValue(columnFamily, "ColumnSort") != null)
-                    {
-                        throw new ConfigurationException("ColumnSort is no longer an accepted attribute.  Use CompareWith instead.");
-                    }
-
-                    // Parse out the column comparator
-                    AbstractType comparator = getComparator(XMLUtils.getAttributeValue(columnFamily, "CompareWith"));
-                    AbstractType subcolumnComparator = null;
-                    if (columnType.equals("Super"))
-                    {
-                        subcolumnComparator = getComparator(XMLUtils.getAttributeValue(columnFamily, "CompareSubcolumnsWith"));
-                    }
-                    else if (XMLUtils.getAttributeValue(columnFamily, "CompareSubcolumnsWith") != null)
-                    {
-                        throw new ConfigurationException("CompareSubcolumnsWith is only a valid attribute on super columnfamilies (not regular columnfamily " + cfName + ")");
-                    }
-
-                    double keyCacheSize = CFMetaData.DEFAULT_KEY_CACHE_SIZE;
-                    if ((value = XMLUtils.getAttributeValue(columnFamily, "KeysCachedFraction")) != null)
-                    {
-                        keyCacheSize = Double.valueOf(value);
-                        // TODO: KeysCachedFraction deprecated: remove in 1.0
-                        logger.warn("KeysCachedFraction is deprecated: use KeysCached instead.");
-                    }
-                    if ((value = XMLUtils.getAttributeValue(columnFamily, "KeysCached")) != null)
-                    {
-                        keyCacheSize = FBUtilities.parseDoubleOrPercent(value);
-                    }
-
-                    double rowCacheSize = CFMetaData.DEFAULT_ROW_CACHE_SIZE;
-                    if ((value = XMLUtils.getAttributeValue(columnFamily, "RowsCached")) != null)
-                    {
-                        rowCacheSize = FBUtilities.parseDoubleOrPercent(value);
-                    }
-
-                    double readRepairChance = CFMetaData.DEFAULT_READ_REPAIR_CHANCE;
-                    if ((value = XMLUtils.getAttributeValue(columnFamily, "ReadRepairChance")) != null)
-                    {
-                        readRepairChance = FBUtilities.parseDoubleOrPercent(value);
-                        if (readRepairChance < 0.0 || readRepairChance > 1.0)
-                        {                        
-                            throw new ConfigurationException("ReadRepairChance must be between 0.0 and 1.0");
-                        }
-                    }
-
-                    // Parse out user-specified logical names for the various dimensions
-                    // of a the column family from the config.
-                    String comment = xmlUtils.getNodeValue(xqlCF + "Comment");
-
-                    // insert it into the table dictionary.
-                    cfDefs[j] = new CFMetaData(tableName, cfName, columnType, comparator, subcolumnComparator, comment, rowCacheSize, keyCacheSize, readRepairChance);
+                
+                if (cf.read_repair_chance < 0.0 || cf.read_repair_chance > 1.0)
+                {                        
+                    throw new ConfigurationException("read_repair_chance must be between 0.0 and 1.0");
                 }
-                defs.add(new KSMetaData(ksName, strategyClass, replicationFactor, cfDefs));
+                cfDefs[j++] = new CFMetaData(keyspace.name, cf.name, columnType, comparator, subcolumnComparator, cf.comment, cf.rows_cached, cf.keys_cached, cf.read_repair_chance);
             }
+            defs.add(new KSMetaData(keyspace.name, strategyClass, keyspace.replication_factor, cfDefs));
+            
         }
-        catch (XPathExpressionException e)
-        {
-            ConfigurationException ex = new ConfigurationException(e.getMessage());
-            ex.initCause(e);
-            throw ex;
-        }
-        catch (TransformerException e)
-        {
-            ConfigurationException ex = new ConfigurationException(e.getMessage());
-            ex.initCause(e);
-            throw ex;
-        }
+
         return defs;
     }
 
@@ -782,7 +501,7 @@ public class DatabaseDescriptor
 
     public static boolean isThriftFramed()
     {
-        return thriftFramed;
+        return conf.thrift_framed_transport;
     }
 
     public static AbstractType getComparator(String compareWith) throws ConfigurationException
@@ -843,17 +562,17 @@ public class DatabaseDescriptor
     public static void createAllDirectories() throws IOException
     {
         try {
-            if (dataFileDirectories.length == 0)
+            if (conf.data_file_directories.length == 0)
             {
                 throw new ConfigurationException("At least one DataFileDirectory must be specified");
             }
-            for ( String dataFileDirectory : dataFileDirectories )
+            for ( String dataFileDirectory : conf.data_file_directories )
                 FileUtils.createDirectory(dataFileDirectory);
-            if (logFileDirectory == null)
+            if (conf.commitlog_directory == null)
             {
-                throw new ConfigurationException("CommitLogDirectory must be specified");
+                throw new ConfigurationException("commitlog_directory must be specified");
             }
-            FileUtils.createDirectory(logFileDirectory);
+            FileUtils.createDirectory(conf.commitlog_directory);
         }
         catch (ConfigurationException ex) {
             logger.error("Fatal error: " + ex.getMessage());
@@ -864,7 +583,7 @@ public class DatabaseDescriptor
 
     public static int getGcGraceInSeconds()
     {
-        return gcGraceInSeconds;
+        return conf.gc_grace_seconds;
     }
 
     public static IPartitioner getPartitioner()
@@ -887,37 +606,37 @@ public class DatabaseDescriptor
     
     public static String getJobTrackerAddress()
     {
-        return jobTrackerHost;
+        return conf.job_tracker_host;
     }
     
     public static int getColumnIndexSize()
     {
-    	return columnIndexSizeInKB * 1024;
+    	return conf.column_index_size_in_kb * 1024;
     }
 
     public static int getMemtableLifetimeMS()
     {
-      return memtableLifetimeMs;
+      return conf.memtable_flush_after_mins * 60 * 1000;
     }
 
     public static String getInitialToken()
     {
-      return initialToken;
+      return conf.initial_token;
     }
 
     public static int getMemtableThroughput()
     {
-      return memtableThroughput;
+      return conf.memtable_throughput_in_mb;
     }
 
     public static double getMemtableOperations()
     {
-      return memtableOperations;
+      return conf.memtable_operations_in_millions;
     }
 
     public static String getClusterName()
     {
-        return clusterName;
+        return conf.cluster_name;
     }
 
     public static String getConfigFileName() {
@@ -926,7 +645,7 @@ public class DatabaseDescriptor
 
     public static String getJobJarLocation()
     {
-        return jobJarFileLocation;
+        return conf.job_jar_file_location;
     }
     
     public static Map<String, CFMetaData> getTableMetaData(String tableName)
@@ -976,12 +695,12 @@ public class DatabaseDescriptor
 
     public static int getStoragePort()
     {
-        return storagePort;
+        return conf.storage_port;
     }
 
     public static int getRpcPort()
     {
-        return rpcPort;
+        return conf.rpc_port;
     }
 
     public static int getReplicationFactor(String table)
@@ -996,7 +715,7 @@ public class DatabaseDescriptor
 
     public static long getRpcTimeout()
     {
-        return rpcTimeoutInMillis;
+        return conf.rpc_timeout_in_ms;
     }
 
     public static int getConsistencyThreads()
@@ -1006,22 +725,22 @@ public class DatabaseDescriptor
 
     public static int getConcurrentReaders()
     {
-        return concurrentReaders;
+        return conf.concurrent_reads;
     }
 
     public static int getConcurrentWriters()
     {
-        return concurrentWriters;
+        return conf.concurrent_writes;
     }
 
     public static long getRowWarningThreshold()
     {
-        return rowWarningThreshold;
+        return conf.row_warning_threshold_in_mb * 1024 * 1024;
     }
     
     public static String[] getAllDataFileLocations()
     {
-        return dataFileDirectories;
+        return conf.data_file_directories;
     }
 
     /**
@@ -1033,11 +752,11 @@ public class DatabaseDescriptor
      */
     public static String[] getAllDataFileLocationsForTable(String table)
     {
-        String[] tableLocations = new String[dataFileDirectories.length];
+        String[] tableLocations = new String[conf.data_file_directories.length];
 
-        for (int i = 0; i < dataFileDirectories.length; i++)
+        for (int i = 0; i < conf.data_file_directories.length; i++)
         {
-            tableLocations[i] = dataFileDirectories[i] + File.separator + table;
+            tableLocations[i] = conf.data_file_directories[i] + File.separator + table;
         }
 
         return tableLocations;
@@ -1045,14 +764,14 @@ public class DatabaseDescriptor
 
     public synchronized static String getNextAvailableDataLocation()
     {
-        String dataFileDirectory = dataFileDirectories[currentIndex];
-        currentIndex = (currentIndex + 1) % dataFileDirectories.length;
+        String dataFileDirectory = conf.data_file_directories[currentIndex];
+        currentIndex = (currentIndex + 1) % conf.data_file_directories.length;
         return dataFileDirectory;
     }
 
     public static String getLogFileLocation()
     {
-        return logFileDirectory;
+        return conf.commitlog_directory;
     }
 
     public static Set<InetAddress> getSeeds()
@@ -1183,60 +902,60 @@ public class DatabaseDescriptor
 
     public static double getCommitLogSyncBatchWindow()
     {
-        return commitLogSyncBatchMS;
+        return conf.commitlog_sync_batch_window_in_ms;
     }
 
     public static int getCommitLogSyncPeriod() {
-        return commitLogSyncPeriodMS;
+        return conf.commitlog_sync_period_in_ms;
     }
 
-    public static CommitLogSync getCommitLogSync()
+    public static Config.CommitLogSync getCommitLogSync()
     {
-        return commitLogSync;
+        return conf.commitlog_sync;
     }
 
-    public static DiskAccessMode getDiskAccessMode()
+    public static Config.DiskAccessMode getDiskAccessMode()
     {
-        return diskAccessMode;
+        return conf.disk_access_mode;
     }
 
-    public static DiskAccessMode getIndexAccessMode()
+    public static Config.DiskAccessMode getIndexAccessMode()
     {
         return indexAccessMode;
     }
 
     public static double getFlushDataBufferSizeInMB()
     {
-        return flushDataBufferSizeInMB;
+        return conf.flush_data_buffer_size_in_mb;
     }
 
     public static double getFlushIndexBufferSizeInMB()
     {
-        return flushIndexBufferSizeInMB;
+        return conf.flush_index_buffer_size_in_mb;
     }
 
     public static int getIndexedReadBufferSizeInKB()
     {
-        return columnIndexSizeInKB;
+        return conf.column_index_size_in_kb;
     }
 
     public static int getSlicedReadBufferSizeInKB()
     {
-        return slicedReadBufferSizeInKB;
+        return conf.sliced_buffer_size_in_kb;
     }
 
     public static int getBMTThreshold()
     {
-        return bmtThreshold;
+        return conf.binary_memtable_throughput_in_mb;
     }
 
     public static boolean isSnapshotBeforeCompaction()
     {
-        return snapshotBeforeCompaction;
+        return conf.snapshot_before_compaction;
     }
 
     public static boolean isAutoBootstrap()
     {
-        return autoBootstrap;
+        return conf.auto_bootstrap;
     }
 }

Modified: cassandra/trunk/src/java/org/apache/cassandra/db/Table.java
URL: http://svn.apache.org/viewvc/cassandra/trunk/src/java/org/apache/cassandra/db/Table.java?rev=936510&r1=936509&r2=936510&view=diff
==============================================================================
--- cassandra/trunk/src/java/org/apache/cassandra/db/Table.java (original)
+++ cassandra/trunk/src/java/org/apache/cassandra/db/Table.java Wed Apr 21 21:05:26 2010
@@ -30,6 +30,7 @@ import com.google.common.base.Function;
 import com.google.common.collect.Iterables;
 import org.apache.cassandra.config.CFMetaData;
 import org.apache.cassandra.config.DatabaseDescriptor;
+import org.apache.cassandra.config.Config;
 import org.apache.cassandra.db.commitlog.CommitLog;
 import org.apache.cassandra.db.commitlog.CommitLogSegment;
 import org.apache.cassandra.dht.Range;
@@ -238,7 +239,7 @@ public class Table 
     private Table(String table)
     {
         name = table;
-        waitForCommitLog = DatabaseDescriptor.getCommitLogSync() == DatabaseDescriptor.CommitLogSync.batch;
+        waitForCommitLog = DatabaseDescriptor.getCommitLogSync() == Config.CommitLogSync.batch;
         // create data directories.
         for (String dataDir : DatabaseDescriptor.getAllDataFileLocations())
         {

Modified: cassandra/trunk/src/java/org/apache/cassandra/db/commitlog/CommitLog.java
URL: http://svn.apache.org/viewvc/cassandra/trunk/src/java/org/apache/cassandra/db/commitlog/CommitLog.java?rev=936510&r1=936509&r2=936510&view=diff
==============================================================================
--- cassandra/trunk/src/java/org/apache/cassandra/db/commitlog/CommitLog.java (original)
+++ cassandra/trunk/src/java/org/apache/cassandra/db/commitlog/CommitLog.java Wed Apr 21 21:05:26 2010
@@ -22,6 +22,7 @@ import com.google.common.collect.HashMul
 import com.google.common.collect.Multimap;
 import org.apache.cassandra.config.CFMetaData;
 import org.apache.cassandra.config.DatabaseDescriptor;
+import org.apache.cassandra.config.Config;
 import org.apache.cassandra.config.KSMetaData;
 import org.apache.cassandra.db.ColumnFamily;
 import org.apache.cassandra.db.RowMutation;
@@ -117,7 +118,7 @@ public class CommitLog
         // All we need to do is create a new one.
         segments.add(new CommitLogSegment());
         
-        if (DatabaseDescriptor.getCommitLogSync() == DatabaseDescriptor.CommitLogSync.periodic)
+        if (DatabaseDescriptor.getCommitLogSync() == Config.CommitLogSync.periodic)
         {
             final Runnable syncer = new WrappedRunnable()
             {

Modified: cassandra/trunk/src/java/org/apache/cassandra/db/commitlog/CommitLogExecutorService.java
URL: http://svn.apache.org/viewvc/cassandra/trunk/src/java/org/apache/cassandra/db/commitlog/CommitLogExecutorService.java?rev=936510&r1=936509&r2=936510&view=diff
==============================================================================
--- cassandra/trunk/src/java/org/apache/cassandra/db/commitlog/CommitLogExecutorService.java (original)
+++ cassandra/trunk/src/java/org/apache/cassandra/db/commitlog/CommitLogExecutorService.java Wed Apr 21 21:05:26 2010
@@ -30,6 +30,7 @@ import javax.management.MBeanServer;
 import javax.management.ObjectName;
 
 import org.apache.cassandra.config.DatabaseDescriptor;
+import org.apache.cassandra.config.Config;
 import org.apache.cassandra.utils.WrappedRunnable;
 
 class CommitLogExecutorService extends AbstractExecutorService implements CommitLogExecutorServiceMBean
@@ -40,7 +41,7 @@ class CommitLogExecutorService extends A
 
     public CommitLogExecutorService()
     {
-        this(DatabaseDescriptor.getCommitLogSync() == DatabaseDescriptor.CommitLogSync.batch
+        this(DatabaseDescriptor.getCommitLogSync() == Config.CommitLogSync.batch
              ? DatabaseDescriptor.getConcurrentWriters()
              : 1024 * Runtime.getRuntime().availableProcessors());
     }
@@ -52,7 +53,7 @@ class CommitLogExecutorService extends A
         {
             public void runMayThrow() throws Exception
             {
-                if (DatabaseDescriptor.getCommitLogSync() == DatabaseDescriptor.CommitLogSync.batch)
+                if (DatabaseDescriptor.getCommitLogSync() == Config.CommitLogSync.batch)
                 {
                     while (true)
                     {

Modified: cassandra/trunk/src/java/org/apache/cassandra/io/sstable/IndexSummary.java
URL: http://svn.apache.org/viewvc/cassandra/trunk/src/java/org/apache/cassandra/io/sstable/IndexSummary.java?rev=936510&r1=936509&r2=936510&view=diff
==============================================================================
--- cassandra/trunk/src/java/org/apache/cassandra/io/sstable/IndexSummary.java (original)
+++ cassandra/trunk/src/java/org/apache/cassandra/io/sstable/IndexSummary.java Wed Apr 21 21:05:26 2010
@@ -27,6 +27,7 @@ import java.util.List;
 import java.util.Map;
 
 import org.apache.cassandra.config.DatabaseDescriptor;
+import org.apache.cassandra.config.Config;
 import org.apache.cassandra.db.DecoratedKey;
 
 public class IndexSummary
@@ -42,7 +43,7 @@ public class IndexSummary
 
     public void maybeAddEntry(DecoratedKey decoratedKey, long dataPosition, long dataSize, long indexPosition, long nextIndexPosition)
     {
-        boolean spannedIndexEntry = DatabaseDescriptor.getIndexAccessMode() == DatabaseDescriptor.DiskAccessMode.mmap
+        boolean spannedIndexEntry = DatabaseDescriptor.getIndexAccessMode() == Config.DiskAccessMode.mmap
                                     && RowIndexedReader.bufferIndex(indexPosition) != RowIndexedReader.bufferIndex(nextIndexPosition);
         if (keysWritten++ % INDEX_INTERVAL == 0 || spannedIndexEntry)
         {

Modified: cassandra/trunk/src/java/org/apache/cassandra/io/sstable/RowIndexedReader.java
URL: http://svn.apache.org/viewvc/cassandra/trunk/src/java/org/apache/cassandra/io/sstable/RowIndexedReader.java?rev=936510&r1=936509&r2=936510&view=diff
==============================================================================
--- cassandra/trunk/src/java/org/apache/cassandra/io/sstable/RowIndexedReader.java (original)
+++ cassandra/trunk/src/java/org/apache/cassandra/io/sstable/RowIndexedReader.java Wed Apr 21 21:05:26 2010
@@ -37,6 +37,7 @@ import org.apache.cassandra.utils.FBUtil
 import org.apache.cassandra.utils.Pair;
 import org.apache.cassandra.service.StorageService;
 import org.apache.cassandra.config.DatabaseDescriptor;
+import org.apache.cassandra.config.Config;
 import org.apache.cassandra.db.*;
 import org.apache.cassandra.db.filter.QueryFilter;
 import org.apache.cassandra.db.marshal.AbstractType;
@@ -74,7 +75,7 @@ class RowIndexedReader extends SSTableRe
     {
         super(desc, partitioner);
 
-        if (DatabaseDescriptor.getIndexAccessMode() == DatabaseDescriptor.DiskAccessMode.mmap)
+        if (DatabaseDescriptor.getIndexAccessMode() == Config.DiskAccessMode.mmap)
         {
             long indexLength = new File(indexFilename()).length();
             int bufferCount = 1 + (int) (indexLength / BUFFER_SIZE);
@@ -88,11 +89,11 @@ class RowIndexedReader extends SSTableRe
         }
         else
         {
-            assert DatabaseDescriptor.getIndexAccessMode() == DatabaseDescriptor.DiskAccessMode.standard;
+            assert DatabaseDescriptor.getIndexAccessMode() == Config.DiskAccessMode.standard;
             indexBuffers = null;
         }
 
-        if (DatabaseDescriptor.getDiskAccessMode() == DatabaseDescriptor.DiskAccessMode.mmap)
+        if (DatabaseDescriptor.getDiskAccessMode() == Config.DiskAccessMode.mmap)
         {
             int bufferCount = 1 + (int) (new File(getFilename()).length() / BUFFER_SIZE);
             buffers = new MappedByteBuffer[bufferCount];
@@ -105,7 +106,7 @@ class RowIndexedReader extends SSTableRe
         }
         else
         {
-            assert DatabaseDescriptor.getDiskAccessMode() == DatabaseDescriptor.DiskAccessMode.standard;
+            assert DatabaseDescriptor.getDiskAccessMode() == Config.DiskAccessMode.standard;
             buffers = null;
         }
 

Modified: cassandra/trunk/src/java/org/apache/cassandra/service/EmbeddedCassandraService.java
URL: http://svn.apache.org/viewvc/cassandra/trunk/src/java/org/apache/cassandra/service/EmbeddedCassandraService.java?rev=936510&r1=936509&r2=936510&view=diff
==============================================================================
--- cassandra/trunk/src/java/org/apache/cassandra/service/EmbeddedCassandraService.java (original)
+++ cassandra/trunk/src/java/org/apache/cassandra/service/EmbeddedCassandraService.java Wed Apr 21 21:05:26 2010
@@ -36,7 +36,7 @@ import org.slf4j.LoggerFactory;
 
 /**
  * An embedded, in-memory cassandra storage service that listens
- * on the thrift interface as configured in storage-conf.xml
+ * on the thrift interface as configured in cassandra.yaml
  * This kind of service is useful when running unit tests of
  * services using cassandra for example.
  *
@@ -48,8 +48,6 @@ import org.slf4j.LoggerFactory;
  * In the client code create a new thread and spawn it with its {@link Thread#start()} method.
  * Example:
  * <pre>
- *      // Tell cassandra where the configuration files are.
-        System.setProperty("storage-config", "conf");
 
         cassandra = new EmbeddedCassandraService();
         cassandra.init();

Modified: cassandra/trunk/src/java/org/apache/cassandra/service/StorageService.java
URL: http://svn.apache.org/viewvc/cassandra/trunk/src/java/org/apache/cassandra/service/StorageService.java?rev=936510&r1=936509&r2=936510&view=diff
==============================================================================
--- cassandra/trunk/src/java/org/apache/cassandra/service/StorageService.java (original)
+++ cassandra/trunk/src/java/org/apache/cassandra/service/StorageService.java Wed Apr 21 21:05:26 2010
@@ -1576,16 +1576,16 @@ public class StorageService implements I
     }
 
     /**
-     * load schema from xml. This can only be done on a fresh system.
+     * load schema from yaml. This can only be done on a fresh system.
      * @throws ConfigurationException
      * @throws IOException
      */
-    public void loadSchemaFromXML() throws ConfigurationException, IOException
+    public void loadSchemaFromYAML() throws ConfigurationException, IOException
     { 
         // blow up if there is a schema saved.
         if (DatabaseDescriptor.getDefsVersion().timestamp() > 0 || Migration.getLastMigrationId() != null)
             throw new ConfigurationException("Cannot load from XML on top of pre-existing schemas.");
-        for (KSMetaData table : DatabaseDescriptor.readTablesFromXml())
+        for (KSMetaData table : DatabaseDescriptor.readTablesFromYaml())
             new AddKeyspace(table).apply();
         
         assert DatabaseDescriptor.getDefsVersion().timestamp() > 0;

Modified: cassandra/trunk/src/java/org/apache/cassandra/service/StorageServiceMBean.java
URL: http://svn.apache.org/viewvc/cassandra/trunk/src/java/org/apache/cassandra/service/StorageServiceMBean.java?rev=936510&r1=936509&r2=936510&view=diff
==============================================================================
--- cassandra/trunk/src/java/org/apache/cassandra/service/StorageServiceMBean.java (original)
+++ cassandra/trunk/src/java/org/apache/cassandra/service/StorageServiceMBean.java Wed Apr 21 21:05:26 2010
@@ -172,8 +172,8 @@ public interface StorageServiceMBean
     public void drain() throws IOException, InterruptedException, ExecutionException;
 
     /**
-     * Introduced in 0.7 to allow nodes to load their existing xml defined schemas.
+     * Introduced in 0.7 to allow nodes to load their existing yaml defined schemas.
      * @todo: deprecate in 0.7+1, remove in 0.7+2.
      */ 
-    public void loadSchemaFromXML() throws ConfigurationException, IOException;
+    public void loadSchemaFromYAML() throws ConfigurationException, IOException;
 }

Modified: cassandra/trunk/src/java/org/apache/cassandra/utils/XMLUtils.java
URL: http://svn.apache.org/viewvc/cassandra/trunk/src/java/org/apache/cassandra/utils/XMLUtils.java?rev=936510&r1=936509&r2=936510&view=diff
==============================================================================
--- cassandra/trunk/src/java/org/apache/cassandra/utils/XMLUtils.java (original)
+++ cassandra/trunk/src/java/org/apache/cassandra/utils/XMLUtils.java Wed Apr 21 21:05:26 2010
@@ -85,11 +85,4 @@ public class XMLUtils
 		}
 		return value;
 	}
-
-    public static void main(String[] args) throws Throwable
-    {
-        XMLUtils xmlUtils = new XMLUtils("C:\\Engagements\\Cassandra-Golden\\storage-conf.xml");
-        String[] value = xmlUtils.getNodeValues("/Storage/Seeds/Seed");
-        System.out.println(value);
-    }
 }

Modified: cassandra/trunk/test/conf/cassandra.xml
URL: http://svn.apache.org/viewvc/cassandra/trunk/test/conf/cassandra.xml?rev=936510&r1=936509&r2=936510&view=diff
==============================================================================
--- cassandra/trunk/test/conf/cassandra.xml (original)
+++ cassandra/trunk/test/conf/cassandra.xml Wed Apr 21 21:05:26 2010
@@ -1,80 +0,0 @@
-<!--
- ~ Licensed to the Apache Software Foundation (ASF) under one
- ~ or more contributor license agreements.  See the NOTICE file
- ~ distributed with this work for additional information
- ~ regarding copyright ownership.  The ASF licenses this file
- ~ to you under the Apache License, Version 2.0 (the
- ~ "License"); you may not use this file except in compliance
- ~ with the License.  You may obtain a copy of the License at
- ~
- ~    http://www.apache.org/licenses/LICENSE-2.0
- ~
- ~ Unless required by applicable law or agreed to in writing,
- ~ software distributed under the License is distributed on an
- ~ "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- ~ KIND, either express or implied.  See the License for the
- ~ specific language governing permissions and limitations
- ~ under the License.
- -->
-<Storage>
-   <ClusterName>Test Cluster</ClusterName>
-   <FlushDataBufferSizeInMB>1</FlushDataBufferSizeInMB>
-   <FlushIndexBufferSizeInMB>0.1</FlushIndexBufferSizeInMB>
-   <CommitLogSync>batch</CommitLogSync>
-   <CommitLogSyncBatchWindowInMS>1.0</CommitLogSyncBatchWindowInMS>
-   <Partitioner>org.apache.cassandra.dht.CollatingOrderPreservingPartitioner</Partitioner>
-   <RpcTimeoutInMillis>5000</RpcTimeoutInMillis>
-   <ListenAddress>127.0.0.1</ListenAddress>
-   <StoragePort>7010</StoragePort>
-   <RPCPort>9170</RPCPort>
-   <ColumnIndexSizeInKB>4</ColumnIndexSizeInKB>
-   <CommitLogDirectory>build/test/cassandra/commitlog</CommitLogDirectory>
-   <CommitLogRotationThresholdInMB>128</CommitLogRotationThresholdInMB>
-   <DataFileDirectories>
-     <DataFileDirectory>build/test/cassandra/data</DataFileDirectory>
-   </DataFileDirectories>
-   <BootstrapFileDirectory>build/test/cassandra/bootstrap</BootstrapFileDirectory>
-   <DiskAccessMode>mmap</DiskAccessMode>
-   <MemtableThroughputInMB>1</MemtableThroughputInMB>
-   <MemtableOperationsInMillions>0.00002</MemtableOperationsInMillions> <!-- 20 -->
-   <EndpointSnitch>org.apache.cassandra.locator.SimpleSnitch</EndpointSnitch>
-   <Keyspaces>
-     <Keyspace Name = "Keyspace1">
-       <ColumnFamily Name="Standard1" RowsCached="10%" KeysCached="0"/>
-       <ColumnFamily Name="Standard2"/>
-       <ColumnFamily CompareWith="LongType" Name="StandardLong1"/>
-       <ColumnFamily CompareWith="LongType" Name="StandardLong2"/>
-       <ColumnFamily ColumnType="Super" CompareSubcolumnsWith="LongType" Name="Super1" RowsCached="1000" KeysCached="0"/>
-       <ColumnFamily ColumnType="Super" CompareSubcolumnsWith="LongType" Name="Super2"/>
-       <ColumnFamily ColumnType="Super" CompareSubcolumnsWith="LongType" Name="Super3"/>
-       <ColumnFamily ColumnType="Super" CompareSubcolumnsWith="UTF8Type" Name="Super4"/>
-       <ReplicaPlacementStrategy>org.apache.cassandra.locator.RackUnawareStrategy</ReplicaPlacementStrategy>
-       <ReplicationFactor>1</ReplicationFactor>
-     </Keyspace>
-     <Keyspace Name = "Keyspace2">
-       <ColumnFamily Name="Standard1"/>
-       <ColumnFamily Name="Standard3"/>
-       <ColumnFamily ColumnType="Super" Name="Super3"/>
-       <ColumnFamily ColumnType="Super" CompareSubcolumnsWith="TimeUUIDType" Name="Super4"/>
-       <ReplicaPlacementStrategy>org.apache.cassandra.locator.RackUnawareStrategy</ReplicaPlacementStrategy>
-       <ReplicationFactor>1</ReplicationFactor>
-     </Keyspace>
-     <Keyspace Name = "Keyspace3">
-       <ColumnFamily Name="Standard1"/>
-       <ReplicaPlacementStrategy>org.apache.cassandra.locator.RackUnawareStrategy</ReplicaPlacementStrategy>
-       <ReplicationFactor>5</ReplicationFactor>
-     </Keyspace>
-     <Keyspace Name = "Keyspace4">
-       <ColumnFamily Name="Standard1"/>
-       <ColumnFamily Name="Standard3"/>
-       <ColumnFamily ColumnType="Super" Name="Super3"/>
-       <ColumnFamily ColumnType="Super" CompareSubcolumnsWith="TimeUUIDType" Name="Super4"/>
-       <ReplicaPlacementStrategy>org.apache.cassandra.locator.RackUnawareStrategy</ReplicaPlacementStrategy>
-       <ReplicationFactor>3</ReplicationFactor>
-     </Keyspace>
-   </Keyspaces>
-   <Seeds>
-     <!-- we don't want this node to think it is a seed. -->
-     <Seed>127.0.0.2</Seed>
-   </Seeds>
-</Storage>

Modified: cassandra/trunk/test/system/__init__.py
URL: http://svn.apache.org/viewvc/cassandra/trunk/test/system/__init__.py?rev=936510&r1=936509&r2=936510&view=diff
==============================================================================
--- cassandra/trunk/test/system/__init__.py (original)
+++ cassandra/trunk/test/system/__init__.py Wed Apr 21 21:05:26 2010
@@ -80,7 +80,7 @@ class BaseTester(object):
 
             # clean out old stuff
             import shutil
-            # todo get directories from conf/storage-conf.xml
+            # todo get directories from conf/cassandra.yaml
             for dirname in ['system', 'data', 'commitlog']:
                 try:
                     shutil.rmtree('build/test/cassandra/' + dirname)

Modified: cassandra/trunk/test/unit/org/apache/cassandra/SchemaLoader.java
URL: http://svn.apache.org/viewvc/cassandra/trunk/test/unit/org/apache/cassandra/SchemaLoader.java?rev=936510&r1=936509&r2=936510&view=diff
==============================================================================
--- cassandra/trunk/test/unit/org/apache/cassandra/SchemaLoader.java (original)
+++ cassandra/trunk/test/unit/org/apache/cassandra/SchemaLoader.java Wed Apr 21 21:05:26 2010
@@ -28,11 +28,11 @@ public class SchemaLoader
     // todo: when xml is fully deprecated, this method should be changed to manually load a few table definitions into
     // the definitions keyspace.
     @BeforeClass
-    public static void loadSchemaFromXml()
+    public static void loadSchemaFromYaml()
     {
         try
         {
-            for (KSMetaData ksm : DatabaseDescriptor.readTablesFromXml())
+            for (KSMetaData ksm : DatabaseDescriptor.readTablesFromYaml())
                 DatabaseDescriptor.setTableDefinition(ksm, DatabaseDescriptor.getDefsVersion());
         }
         catch (ConfigurationException e)

Modified: cassandra/trunk/test/unit/org/apache/cassandra/client/TestRingCache.java
URL: http://svn.apache.org/viewvc/cassandra/trunk/test/unit/org/apache/cassandra/client/TestRingCache.java?rev=936510&r1=936509&r2=936510&view=diff
==============================================================================
--- cassandra/trunk/test/unit/org/apache/cassandra/client/TestRingCache.java (original)
+++ cassandra/trunk/test/unit/org/apache/cassandra/client/TestRingCache.java Wed Apr 21 21:05:26 2010
@@ -59,7 +59,7 @@ public class TestRingCache
     }
 
     /**
-     * usage: java -Dstorage-config="confpath" org.apache.cassandra.client.TestRingCache [keyspace row-id-prefix row-id-int]
+     * usage: java -cp <configpath> org.apache.cassandra.client.TestRingCache [keyspace row-id-prefix row-id-int]
      * to test a single keyspace/row, use the parameters. row-id-prefix and row-id-int are appended together to form a
      * single row id.  If you supply now parameters, 'Keyspace1' is assumed and will check 9 rows ('row1' through 'row9').
      * @param args

Modified: cassandra/trunk/test/unit/org/apache/cassandra/service/EmbeddedCassandraServiceTest.java
URL: http://svn.apache.org/viewvc/cassandra/trunk/test/unit/org/apache/cassandra/service/EmbeddedCassandraServiceTest.java?rev=936510&r1=936509&r2=936510&view=diff
==============================================================================
--- cassandra/trunk/test/unit/org/apache/cassandra/service/EmbeddedCassandraServiceTest.java (original)
+++ cassandra/trunk/test/unit/org/apache/cassandra/service/EmbeddedCassandraServiceTest.java Wed Apr 21 21:05:26 2010
@@ -72,10 +72,8 @@ public class EmbeddedCassandraServiceTes
     public static void setup() throws TTransportException, IOException, InterruptedException, ConfigurationException
     {
 
-        // Tell cassandra where the configuration files are.
-        // Use the test configuration file.
-        System.setProperty("storage-config", "test/conf");
-        for (KSMetaData table : DatabaseDescriptor.readTablesFromXml())
+        // Manually load tables from the test configuration file.
+        for (KSMetaData table : DatabaseDescriptor.readTablesFromYaml())
             DatabaseDescriptor.setTableDefinition(table, DatabaseDescriptor.getDefsVersion());
 
         cassandra = new EmbeddedCassandraService();



Mime
View raw message