Return-Path: X-Original-To: archive-asf-public-internal@cust-asf2.ponee.io Delivered-To: archive-asf-public-internal@cust-asf2.ponee.io Received: from cust-asf.ponee.io (cust-asf.ponee.io [163.172.22.183]) by cust-asf2.ponee.io (Postfix) with ESMTP id C3B92200B0F for ; Fri, 17 Jun 2016 10:54:15 +0200 (CEST) Received: by cust-asf.ponee.io (Postfix) id C2818160A50; Fri, 17 Jun 2016 08:54:15 +0000 (UTC) Delivered-To: archive-asf-public@cust-asf.ponee.io Received: from mail.apache.org (hermes.apache.org [140.211.11.3]) by cust-asf.ponee.io (Postfix) with SMTP id 8F3D2160A71 for ; Fri, 17 Jun 2016 10:54:12 +0200 (CEST) Received: (qmail 52283 invoked by uid 500); 17 Jun 2016 08:54:11 -0000 Mailing-List: contact commits-help@ignite.apache.org; run by ezmlm Precedence: bulk List-Help: List-Unsubscribe: List-Post: List-Id: Reply-To: dev@ignite.apache.org Delivered-To: mailing list commits@ignite.apache.org Received: (qmail 51330 invoked by uid 99); 17 Jun 2016 08:54:10 -0000 Received: from git1-us-west.apache.org (HELO git1-us-west.apache.org) (140.211.11.23) by apache.org (qpsmtpd/0.29) with ESMTP; Fri, 17 Jun 2016 08:54:10 +0000 Received: by git1-us-west.apache.org (ASF Mail Server at git1-us-west.apache.org, from userid 33) id 70A8DEADA0; Fri, 17 Jun 2016 08:54:10 +0000 (UTC) Content-Type: text/plain; charset="us-ascii" MIME-Version: 1.0 Content-Transfer-Encoding: 7bit From: vozerov@apache.org To: commits@ignite.apache.org Date: Fri, 17 Jun 2016 08:54:32 -0000 Message-Id: <363ca294cc524e9283861d704dc8d731@git.apache.org> In-Reply-To: References: X-Mailer: ASF-Git Admin Mailer Subject: [23/41] ignite git commit: IGNITE-1371 Implemented Cassandra cache store. archived-at: Fri, 17 Jun 2016 08:54:16 -0000 http://git-wip-us.apache.org/repos/asf/ignite/blob/83c26a91/modules/cassandra/src/test/bootstrap/aws/cassandra/cassandra-bootstrap.sh ---------------------------------------------------------------------- diff --git a/modules/cassandra/src/test/bootstrap/aws/cassandra/cassandra-bootstrap.sh b/modules/cassandra/src/test/bootstrap/aws/cassandra/cassandra-bootstrap.sh new file mode 100644 index 0000000..298c1b4 --- /dev/null +++ b/modules/cassandra/src/test/bootstrap/aws/cassandra/cassandra-bootstrap.sh @@ -0,0 +1,373 @@ +#!/bin/bash + +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +AWS_CLI_DOWNLOAD_URL=https://s3.amazonaws.com/aws-cli/awscli-bundle.zip + +S3_ROOT=s3://bucket/folder +S3_DOWNLOADS=$S3_ROOT/test +S3_SYSTEM=$S3_ROOT/test1 + +CASSANDRA_DOWNLOAD_URL=http://www-eu.apache.org/dist/cassandra/3.5/apache-cassandra-3.5-bin.tar.gz +CASSANDRA_TARBALL=apache-cassandra-3.5-bin.tar.gz +CASSANDRA_UNTAR_DIR=apache-cassandra-3.5 + +TESTS_PACKAGE_DONLOAD_URL=$S3_DOWNLOADS/ignite-cassandra-tests-1.6.0-SNAPSHOT.zip +TESTS_PACKAGE_ZIP=ignite-cassandra-tests-1.6.0-SNAPSHOT.zip +TESTS_PACKAGE_UNZIP_DIR=ignite-cassandra-tests + +S3_LOGS_URL=$S3_SYSTEM/logs/c-logs +S3_LOGS_TRIGGER_URL=$S3_SYSTEM/logs-trigger +S3_BOOTSTRAP_SUCCESS_URL=$S3_SYSTEM/c-success +S3_BOOTSTRAP_FAILURE_URL=$S3_SYSTEM/c-failure +S3_CASSANDRA_NODES_DISCOVERY_URL=$S3_SYSTEM/c-discovery +S3_CASSANDRA_FIRST_NODE_LOCK_URL=$S3_SYSTEM/c-first-node-lock +S3_CASSANDRA_NODES_JOIN_LOCK_URL=$S3_SYSTEM/c-join-lock + +INSTANCE_REGION=us-west-2 +INSTANCE_NAME_TAG=CASSANDRA-SERVER +INSTANCE_OWNER_TAG=ignite@apache.org +INSTANCE_PROJECT_TAG=ignite + +terminate() +{ + if [[ "$S3_BOOTSTRAP_SUCCESS_URL" != */ ]]; then + S3_BOOTSTRAP_SUCCESS_URL=${S3_BOOTSTRAP_SUCCESS_URL}/ + fi + + if [[ "$S3_BOOTSTRAP_FAILURE_URL" != */ ]]; then + S3_BOOTSTRAP_FAILURE_URL=${S3_BOOTSTRAP_FAILURE_URL}/ + fi + + host_name=$(hostname -f | tr '[:upper:]' '[:lower:]') + msg=$host_name + + if [ -n "$1" ]; then + echo "[ERROR] $1" + echo "[ERROR]-----------------------------------------------------" + echo "[ERROR] Cassandra node bootstrap failed" + echo "[ERROR]-----------------------------------------------------" + msg=$1 + reportFolder=${S3_BOOTSTRAP_FAILURE_URL}${host_name} + reportFile=$reportFolder/__error__ + else + echo "[INFO]-----------------------------------------------------" + echo "[INFO] Cassandra node bootstrap successfully completed" + echo "[INFO]-----------------------------------------------------" + reportFolder=${S3_BOOTSTRAP_SUCCESS_URL}${host_name} + reportFile=$reportFolder/__success__ + fi + + echo $msg > /opt/bootstrap-result + + aws s3 rm --recursive $reportFolder + if [ $? -ne 0 ]; then + echo "[ERROR] Failed drop report folder: $reportFolder" + fi + + aws s3 cp --sse AES256 /opt/bootstrap-result $reportFile + if [ $? -ne 0 ]; then + echo "[ERROR] Failed to report bootstrap result to: $reportFile" + fi + + rm -f /opt/bootstrap-result + + if [ -n "$1" ]; then + exit 1 + fi + + exit 0 +} + +tagInstance() +{ + export EC2_HOME=/opt/aws/apitools/ec2 + export JAVA_HOME=/opt/jdk1.8.0_77 + export PATH=$JAVA_HOME/bin:$EC2_HOME/bin:$PATH + + INSTANCE_ID=$(curl http://169.254.169.254/latest/meta-data/instance-id) + if [ $? -ne 0 ]; then + terminate "Failed to get instance metadata to tag it" + fi + + if [ -n "$INSTANCE_NAME_TAG" ]; then + ec2-create-tags $INSTANCE_ID --tag Name=${INSTANCE_NAME_TAG} --region $INSTANCE_REGION + if [ $code -ne 0 ]; then + terminate "Failed to tag EC2 instance with: Name=${INSTANCE_NAME_TAG}" + fi + fi + + if [ -n "$INSTANCE_OWNER_TAG" ]; then + ec2-create-tags $INSTANCE_ID --tag owner=${INSTANCE_OWNER_TAG} --region $INSTANCE_REGION + if [ $code -ne 0 ]; then + terminate "Failed to tag EC2 instance with: owner=${INSTANCE_OWNER_TAG}" + fi + fi + + if [ -n "$INSTANCE_PROJECT_TAG" ]; then + ec2-create-tags $INSTANCE_ID --tag project=${INSTANCE_PROJECT_TAG} --region $INSTANCE_REGION + if [ $code -ne 0 ]; then + terminate "Failed to tag EC2 instance with: project=${INSTANCE_PROJECT_TAG}" + fi + fi +} + +downloadPackage() +{ + echo "[INFO] Downloading $3 package from $1 into $2" + + if [[ "$1" == s3* ]]; then + aws s3 cp $1 $2 + + if [ $? -ne 0 ]; then + echo "[WARN] Failed to download $3 package from first attempt" + rm -Rf $2 + sleep 10s + + echo "[INFO] Trying second attempt to download $3 package" + aws s3 cp $1 $2 + + if [ $? -ne 0 ]; then + echo "[WARN] Failed to download $3 package from second attempt" + rm -Rf $2 + sleep 10s + + echo "[INFO] Trying third attempt to download $3 package" + aws s3 cp $1 $2 + + if [ $? -ne 0 ]; then + terminate "All three attempts to download $3 package from $1 are failed" + fi + fi + fi + else + curl "$1" -o "$2" + + if [ $? -ne 0 ] && [ $? -ne 6 ]; then + echo "[WARN] Failed to download $3 package from first attempt" + rm -Rf $2 + sleep 10s + + echo "[INFO] Trying second attempt to download $3 package" + curl "$1" -o "$2" + + if [ $? -ne 0 ] && [ $? -ne 6 ]; then + echo "[WARN] Failed to download $3 package from second attempt" + rm -Rf $2 + sleep 10s + + echo "[INFO] Trying third attempt to download $3 package" + curl "$1" -o "$2" + + if [ $? -ne 0 ] && [ $? -ne 6 ]; then + terminate "All three attempts to download $3 package from $1 are failed" + fi + fi + fi + fi + + echo "[INFO] $3 package successfully downloaded from $1 into $2" +} + +if [[ "$S3_CASSANDRA_NODES_DISCOVERY_URL" != */ ]]; then + S3_CASSANDRA_NODES_DISCOVERY_URL=${S3_CASSANDRA_NODES_DISCOVERY_URL}/ +fi + +echo "[INFO]-----------------------------------------------------------------" +echo "[INFO] Bootstrapping Cassandra node" +echo "[INFO]-----------------------------------------------------------------" +echo "[INFO] Cassandra download URL: $CASSANDRA_DOWNLOAD_URL" +echo "[INFO] Tests package download URL: $TESTS_PACKAGE_DONLOAD_URL" +echo "[INFO] Logs URL: $S3_LOGS_URL" +echo "[INFO] Logs trigger URL: $S3_LOGS_TRIGGER_URL" +echo "[INFO] Cassandra nodes discovery URL: $S3_CASSANDRA_NODES_DISCOVERY_URL" +echo "[INFO] Cassandra first node lock URL: $S3_CASSANDRA_FIRST_NODE_LOCK_URL" +echo "[INFO] Cassandra nodes join lock URL: $S3_CASSANDRA_NODES_JOIN_LOCK_URL" +echo "[INFO] Bootsrap success URL: $S3_BOOTSTRAP_SUCCESS_URL" +echo "[INFO] Bootsrap failure URL: $S3_BOOTSTRAP_FAILURE_URL" +echo "[INFO]-----------------------------------------------------------------" + +echo "[INFO] Installing 'wget' package" +yum -y install wget +if [ $? -ne 0 ]; then + terminate "Failed to install 'wget' package" +fi + +echo "[INFO] Installing 'net-tools' package" +yum -y install net-tools +if [ $? -ne 0 ]; then + terminate "Failed to install 'net-tools' package" +fi + +echo "[INFO] Installing 'python' package" +yum -y install python +if [ $? -ne 0 ]; then + terminate "Failed to install 'python' package" +fi + +echo "[INFO] Installing 'unzip' package" +yum -y install unzip +if [ $? -ne 0 ]; then + terminate "Failed to install 'unzip' package" +fi + +rm -Rf /opt/jdk1.8.0_77 /opt/jdk-8u77-linux-x64.tar.gz + +echo "[INFO] Downloading 'jdk-8u77'" +wget --no-cookies --no-check-certificate --header "Cookie: gpw_e24=http%3A%2F%2Fwww.oracle.com%2F; oraclelicense=accept-securebackup-cookie" "http://download.oracle.com/otn-pub/java/jdk/8u77-b03/jdk-8u77-linux-x64.tar.gz" -O /opt/jdk-8u77-linux-x64.tar.gz +if [ $? -ne 0 ]; then + terminate "Failed to download 'jdk-8u77'" +fi + +echo "[INFO] Unzipping 'jdk-8u77'" +tar -xvzf /opt/jdk-8u77-linux-x64.tar.gz -C /opt +if [ $? -ne 0 ]; then + terminate "Failed to untar 'jdk-8u77'" +fi + +rm -Rf /opt/jdk-8u77-linux-x64.tar.gz + +downloadPackage "https://bootstrap.pypa.io/get-pip.py" "/opt/get-pip.py" "get-pip.py" + +echo "[INFO] Installing 'pip'" +python /opt/get-pip.py +if [ $? -ne 0 ]; then + terminate "Failed to install 'pip'" +fi + +echo "[INFO] Installing 'awscli'" +pip install --upgrade awscli +if [ $? -ne 0 ]; then + echo "[ERROR] Failed to install 'awscli' using pip" + echo "[INFO] Trying to install awscli using zip archive" + echo "[INFO] Downloading awscli zip" + + downloadPackage "$AWS_CLI_DOWNLOAD_URL" "/opt/awscli-bundle.zip" "awscli" + + echo "[INFO] Unzipping awscli zip" + unzip /opt/awscli-bundle.zip -d /opt + if [ $? -ne 0 ]; then + terminate "Failed to unzip awscli zip" + fi + + rm -Rf /opt/awscli-bundle.zip + + echo "[INFO] Installing awscli" + /opt/awscli-bundle/install -i /usr/local/aws -b /usr/local/bin/aws + if [ $? -ne 0 ]; then + terminate "Failed to install awscli" + fi + + echo "[INFO] Successfully installed awscli from zip archive" +fi + +tagInstance + +echo "[INFO] Creating 'cassandra' group" +exists=$(cat /etc/group | grep cassandra) +if [ -z "$exists" ]; then + groupadd cassandra + if [ $? -ne 0 ]; then + terminate "Failed to create 'cassandra' group" + fi +fi + +echo "[INFO] Creating 'cassandra' user" +exists=$(cat /etc/passwd | grep cassandra) +if [ -z "$exists" ]; then + useradd -g cassandra cassandra + if [ $? -ne 0 ]; then + terminate "Failed to create 'cassandra' user" + fi +fi + +rm -Rf /storage/cassandra /opt/cassandra /opt/$CASSANDRA_TARBALL + +echo "[INFO] Creating '/storage/cassandra' storage" +mkdir -p /storage/cassandra +chown -R cassandra:cassandra /storage/cassandra +if [ $? -ne 0 ]; then + terminate "Failed to setup Cassandra storage dir: /storage/cassandra" +fi + +downloadPackage "$CASSANDRA_DOWNLOAD_URL" "/opt/$CASSANDRA_TARBALL" "Cassandra" + +echo "[INFO] Unzipping Cassandra package" +tar -xvzf /opt/$CASSANDRA_TARBALL -C /opt +if [ $? -ne 0 ]; then + terminate "Failed to untar Cassandra package" +fi + +rm -f /opt/$CASSANDRA_TARBALL /opt/cassandra +mv /opt/$CASSANDRA_UNTAR_DIR /opt/cassandra +chown -R cassandra:cassandra /opt/cassandra + +downloadPackage "$TESTS_PACKAGE_DONLOAD_URL" "/opt/$TESTS_PACKAGE_ZIP" "Tests" + +unzip /opt/$TESTS_PACKAGE_ZIP -d /opt +if [ $? -ne 0 ]; then + terminate "Failed to unzip tests package: $TESTS_PACKAGE_DONLOAD_URL" +fi + +chown -R cassandra:cassandra /opt/$TESTS_PACKAGE_UNZIP_DIR +find /opt/$TESTS_PACKAGE_UNZIP_DIR -type f -name "*.sh" -exec chmod ug+x {} \; + +if [ ! -f "/opt/$TESTS_PACKAGE_UNZIP_DIR/bootstrap/aws/cassandra/cassandra-env.sh" ]; then + terminate "There are no cassandra-env.sh in tests package" +fi + +if [ ! -f "/opt/$TESTS_PACKAGE_UNZIP_DIR/bootstrap/aws/cassandra/cassandra-start.sh" ]; then + terminate "There are no cassandra-start.sh in tests package" +fi + +if [ ! -f "/opt/$TESTS_PACKAGE_UNZIP_DIR/bootstrap/aws/cassandra/cassandra-template.yaml" ]; then + terminate "There are no cassandra-start.sh in tests package" +fi + +if [ ! -f "/opt/$TESTS_PACKAGE_UNZIP_DIR/bootstrap/aws/logs-collector.sh" ]; then + terminate "There are no logs-collector.sh in tests package" +fi + +mv -f /opt/$TESTS_PACKAGE_UNZIP_DIR/bootstrap/aws/cassandra/cassandra-start.sh /opt +mv -f /opt/$TESTS_PACKAGE_UNZIP_DIR/bootstrap/aws/cassandra/cassandra-env.sh /opt/cassandra/conf +mv -f /opt/$TESTS_PACKAGE_UNZIP_DIR/bootstrap/aws/cassandra/cassandra-template.yaml /opt/cassandra/conf +mv -f /opt/$TESTS_PACKAGE_UNZIP_DIR/bootstrap/aws/logs-collector.sh /opt +rm -Rf /opt/$TESTS_PACKAGE_UNZIP_DIR +chown -R cassandra:cassandra /opt/cassandra /opt/cassandra-start.sh /opt/logs-collector.sh + +#profile=/home/cassandra/.bash_profile +profile=/root/.bash_profile + +echo "export JAVA_HOME=/opt/jdk1.8.0_77" >> $profile +echo "export CASSANDRA_HOME=/opt/cassandra" >> $profile +echo "export PATH=\$JAVA_HOME/bin:\$CASSANDRA_HOME/bin:\$PATH" >> $profile +echo "export S3_BOOTSTRAP_SUCCESS_URL=$S3_BOOTSTRAP_SUCCESS_URL" >> $profile +echo "export S3_BOOTSTRAP_FAILURE_URL=$S3_BOOTSTRAP_FAILURE_URL" >> $profile +echo "export S3_CASSANDRA_NODES_DISCOVERY_URL=$S3_CASSANDRA_NODES_DISCOVERY_URL" >> $profile +echo "export S3_CASSANDRA_NODES_JOIN_LOCK_URL=$S3_CASSANDRA_NODES_JOIN_LOCK_URL" >> $profile +echo "export S3_CASSANDRA_FIRST_NODE_LOCK_URL=$S3_CASSANDRA_FIRST_NODE_LOCK_URL" >> $profile + +HOST_NAME=$(hostname -f | tr '[:upper:]' '[:lower:]') + +/opt/logs-collector.sh "/opt/cassandra/logs" "$S3_LOGS_URL/$HOST_NAME" "$S3_LOGS_TRIGGER_URL" > /opt/cassandra/logs-collector.log & + +cmd="/opt/cassandra-start.sh" + +#sudo -u cassandra -g cassandra sh -c "$cmd | tee /opt/cassandra/start.log" + +$cmd | tee /opt/cassandra/start.log \ No newline at end of file http://git-wip-us.apache.org/repos/asf/ignite/blob/83c26a91/modules/cassandra/src/test/bootstrap/aws/cassandra/cassandra-env.sh ---------------------------------------------------------------------- diff --git a/modules/cassandra/src/test/bootstrap/aws/cassandra/cassandra-env.sh b/modules/cassandra/src/test/bootstrap/aws/cassandra/cassandra-env.sh new file mode 100644 index 0000000..11dfc50 --- /dev/null +++ b/modules/cassandra/src/test/bootstrap/aws/cassandra/cassandra-env.sh @@ -0,0 +1,283 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +calculate_heap_sizes() +{ + case "`uname`" in + Linux) + system_memory_in_mb=`free -m | awk '/:/ {print $2;exit}'` + system_cpu_cores=`egrep -c 'processor([[:space:]]+):.*' /proc/cpuinfo` + ;; + FreeBSD) + system_memory_in_bytes=`sysctl hw.physmem | awk '{print $2}'` + system_memory_in_mb=`expr $system_memory_in_bytes / 1024 / 1024` + system_cpu_cores=`sysctl hw.ncpu | awk '{print $2}'` + ;; + SunOS) + system_memory_in_mb=`prtconf | awk '/Memory size:/ {print $3}'` + system_cpu_cores=`psrinfo | wc -l` + ;; + Darwin) + system_memory_in_bytes=`sysctl hw.memsize | awk '{print $2}'` + system_memory_in_mb=`expr $system_memory_in_bytes / 1024 / 1024` + system_cpu_cores=`sysctl hw.ncpu | awk '{print $2}'` + ;; + *) + # assume reasonable defaults for e.g. a modern desktop or + # cheap server + system_memory_in_mb="2048" + system_cpu_cores="2" + ;; + esac + + # some systems like the raspberry pi don't report cores, use at least 1 + if [ "$system_cpu_cores" -lt "1" ] + then + system_cpu_cores="1" + fi + + # set max heap size based on the following + # max(min(1/2 ram, 1024MB), min(1/4 ram, 8GB)) + # calculate 1/2 ram and cap to 1024MB + # calculate 1/4 ram and cap to 8192MB + # pick the max + half_system_memory_in_mb=`expr $system_memory_in_mb / 2` + quarter_system_memory_in_mb=`expr $half_system_memory_in_mb / 2` + if [ "$half_system_memory_in_mb" -gt "1024" ] + then + half_system_memory_in_mb="1024" + fi + if [ "$quarter_system_memory_in_mb" -gt "8192" ] + then + quarter_system_memory_in_mb="8192" + fi + if [ "$half_system_memory_in_mb" -gt "$quarter_system_memory_in_mb" ] + then + max_heap_size_in_mb="$half_system_memory_in_mb" + else + max_heap_size_in_mb="$quarter_system_memory_in_mb" + fi + MAX_HEAP_SIZE="${max_heap_size_in_mb}M" + + # Young gen: min(max_sensible_per_modern_cpu_core * num_cores, 1/4 * heap size) + max_sensible_yg_per_core_in_mb="100" + max_sensible_yg_in_mb=`expr $max_sensible_yg_per_core_in_mb "*" $system_cpu_cores` + + desired_yg_in_mb=`expr $max_heap_size_in_mb / 4` + + if [ "$desired_yg_in_mb" -gt "$max_sensible_yg_in_mb" ] + then + HEAP_NEWSIZE="${max_sensible_yg_in_mb}M" + else + HEAP_NEWSIZE="${desired_yg_in_mb}M" + fi +} + +# Determine the sort of JVM we'll be running on. +java_ver_output=`"${JAVA:-java}" -version 2>&1` +jvmver=`echo "$java_ver_output" | grep '[openjdk|java] version' | awk -F'"' 'NR==1 {print $2}'` +JVM_VERSION=${jvmver%_*} +JVM_PATCH_VERSION=${jvmver#*_} + +if [ "$JVM_VERSION" \< "1.8" ] ; then + echo "Cassandra 3.0 and later require Java 8u40 or later." + exit 1; +fi + +if [ "$JVM_VERSION" \< "1.8" ] && [ "$JVM_PATCH_VERSION" \< "40" ] ; then + echo "Cassandra 3.0 and later require Java 8u40 or later." + exit 1; +fi + +jvm=`echo "$java_ver_output" | grep -A 1 'java version' | awk 'NR==2 {print $1}'` +case "$jvm" in + OpenJDK) + JVM_VENDOR=OpenJDK + # this will be "64-Bit" or "32-Bit" + JVM_ARCH=`echo "$java_ver_output" | awk 'NR==3 {print $2}'` + ;; + "Java(TM)") + JVM_VENDOR=Oracle + # this will be "64-Bit" or "32-Bit" + JVM_ARCH=`echo "$java_ver_output" | awk 'NR==3 {print $3}'` + ;; + *) + # Help fill in other JVM values + JVM_VENDOR=other + JVM_ARCH=unknown + ;; +esac + +# Override these to set the amount of memory to allocate to the JVM at +# start-up. For production use you may wish to adjust this for your +# environment. MAX_HEAP_SIZE is the total amount of memory dedicated +# to the Java heap. HEAP_NEWSIZE refers to the size of the young +# generation. Both MAX_HEAP_SIZE and HEAP_NEWSIZE should be either set +# or not (if you set one, set the other). +# +# The main trade-off for the young generation is that the larger it +# is, the longer GC pause times will be. The shorter it is, the more +# expensive GC will be (usually). +# +# The example HEAP_NEWSIZE assumes a modern 8-core+ machine for decent pause +# times. If in doubt, and if you do not particularly want to tweak, go with +# 100 MB per physical CPU core. + +#MAX_HEAP_SIZE="4G" +#HEAP_NEWSIZE="800M" + +# Set this to control the amount of arenas per-thread in glibc +#export MALLOC_ARENA_MAX=4 + +# only calculate the size if it's not set manually +if [ "x$MAX_HEAP_SIZE" = "x" ] && [ "x$HEAP_NEWSIZE" = "x" ]; then + calculate_heap_sizes +else + if [ "x$MAX_HEAP_SIZE" = "x" ] || [ "x$HEAP_NEWSIZE" = "x" ]; then + echo "please set or unset MAX_HEAP_SIZE and HEAP_NEWSIZE in pairs (see cassandra-env.sh)" + exit 1 + fi +fi + +if [ "x$MALLOC_ARENA_MAX" = "x" ] ; then + export MALLOC_ARENA_MAX=4 +fi + +#GC log path has to be defined here because it needs to access CASSANDRA_HOME +JVM_OPTS="$JVM_OPTS -Xloggc:${CASSANDRA_HOME}/logs/gc.log" + +# Here we create the arguments that will get passed to the jvm when +# starting cassandra. + +# Read user-defined JVM options from jvm.options file +JVM_OPTS_FILE=$CASSANDRA_CONF/jvm.options +for opt in `grep "^-" $JVM_OPTS_FILE` +do + JVM_OPTS="$JVM_OPTS $opt" +done + +# Check what parameters were defined on jvm.options file to avoid conflicts +echo $JVM_OPTS | grep -q Xmn +DEFINED_XMN=$? +echo $JVM_OPTS | grep -q Xmx +DEFINED_XMX=$? +echo $JVM_OPTS | grep -q Xms +DEFINED_XMS=$? +echo $JVM_OPTS | grep -q UseConcMarkSweepGC +USING_CMS=$? + +# We only set -Xms and -Xmx if they were not defined on jvm.options file +# If defined, both Xmx and Xms should be defined together. +if [ $DEFINED_XMX -ne 0 ] && [ $DEFINED_XMS -ne 0 ]; then + JVM_OPTS="$JVM_OPTS -Xms${MAX_HEAP_SIZE}" + JVM_OPTS="$JVM_OPTS -Xmx${MAX_HEAP_SIZE}" +elif [ $DEFINED_XMX -ne 0 ] || [ $DEFINED_XMS -ne 0 ]; then + echo "Please set or unset -Xmx and -Xms flags in pairs on jvm.options file." + exit 1 +fi + +# We only set -Xmn flag if it was not defined in jvm.options file +# and if the CMS GC is being used +# If defined, both Xmn and Xmx should be defined together. +if [ $DEFINED_XMN -eq 0 ] && [ $DEFINED_XMX -ne 0 ]; then + echo "Please set or unset -Xmx and -Xmn flags in pairs on jvm.options file." + exit 1 +elif [ $DEFINED_XMN -ne 0 ] && [ $USING_CMS -eq 0 ]; then + JVM_OPTS="$JVM_OPTS -Xmn${HEAP_NEWSIZE}" +fi + +if [ "$JVM_ARCH" = "64-Bit" ] && [ $USING_CMS -eq 0 ]; then + JVM_OPTS="$JVM_OPTS -XX:+UseCondCardMark" +fi + +# provides hints to the JIT compiler +JVM_OPTS="$JVM_OPTS -XX:CompileCommandFile=$CASSANDRA_CONF/hotspot_compiler" + +# add the jamm javaagent +JVM_OPTS="$JVM_OPTS -javaagent:$CASSANDRA_HOME/lib/jamm-0.3.0.jar" + +# set jvm HeapDumpPath with CASSANDRA_HEAPDUMP_DIR +if [ "x$CASSANDRA_HEAPDUMP_DIR" != "x" ]; then + JVM_OPTS="$JVM_OPTS -XX:HeapDumpPath=$CASSANDRA_HEAPDUMP_DIR/cassandra-`date +%s`-pid$$.hprof" +fi + +# jmx: metrics and administration interface +# +# add this if you're having trouble connecting: +# JVM_OPTS="$JVM_OPTS -Djava.rmi.server.hostname=" +# +# see +# https://blogs.oracle.com/jmxetc/entry/troubleshooting_connection_problems_in_jconsole +# for more on configuring JMX through firewalls, etc. (Short version: +# get it working with no firewall first.) +# +# Cassandra ships with JMX accessible *only* from localhost. +# To enable remote JMX connections, uncomment lines below +# with authentication and/or ssl enabled. See https://wiki.apache.org/cassandra/JmxSecurity +# +if [ "x$LOCAL_JMX" = "x" ]; then + LOCAL_JMX=yes +fi + +# Specifies the default port over which Cassandra will be available for +# JMX connections. +# For security reasons, you should not expose this port to the internet. Firewall it if needed. +JMX_PORT="7199" + +if [ "$LOCAL_JMX" = "yes" ]; then +# JVM_OPTS="$JVM_OPTS -Dcassandra.jmx.local.port=$JMX_PORT -XX:+DisableExplicitGC" + JVM_OPTS="$JVM_OPTS -Dcom.sun.management.jmxremote" + JVM_OPTS="$JVM_OPTS -Dcom.sun.management.jmxremote.port=$JMX_PORT" + JVM_OPTS="$JVM_OPTS -Dcom.sun.management.jmxremote.rmi.port=$JMX_PORT" + JVM_OPTS="$JVM_OPTS -Dcom.sun.management.jmxremote.local.only=false" + JVM_OPTS="$JVM_OPTS -Dcom.sun.management.jmxremote.authenticate=false" + JVM_OPTS="$JVM_OPTS -Dcom.sun.management.jmxremote.ssl=false" + JVM_OPTS="$JVM_OPTS -XX:+UnlockCommercialFeatures" + JVM_OPTS="$JVM_OPTS -XX:+FlightRecorder" + JVM_OPTS="$JVM_OPTS -XX:FlightRecorderOptions=defaultrecording=true" +else + JVM_OPTS="$JVM_OPTS -Dcom.sun.management.jmxremote.port=$JMX_PORT" + JVM_OPTS="$JVM_OPTS -Dcom.sun.management.jmxremote.rmi.port=$JMX_PORT" + JVM_OPTS="$JVM_OPTS -Dcom.sun.management.jmxremote.ssl=false" + JVM_OPTS="$JVM_OPTS -Dcom.sun.management.jmxremote.authenticate=true" + JVM_OPTS="$JVM_OPTS -Dcom.sun.management.jmxremote.password.file=/etc/cassandra/jmxremote.password" +# JVM_OPTS="$JVM_OPTS -Djavax.net.ssl.keyStore=/path/to/keystore" +# JVM_OPTS="$JVM_OPTS -Djavax.net.ssl.keyStorePassword=" +# JVM_OPTS="$JVM_OPTS -Djavax.net.ssl.trustStore=/path/to/truststore" +# JVM_OPTS="$JVM_OPTS -Djavax.net.ssl.trustStorePassword=" +# JVM_OPTS="$JVM_OPTS -Dcom.sun.management.jmxremote.ssl.need.client.auth=true" +# JVM_OPTS="$JVM_OPTS -Dcom.sun.management.jmxremote.registry.ssl=true" +# JVM_OPTS="$JVM_OPTS -Dcom.sun.management.jmxremote.ssl.enabled.protocols=" +# JVM_OPTS="$JVM_OPTS -Dcom.sun.management.jmxremote.ssl.enabled.cipher.suites=" +fi + +# To use mx4j, an HTML interface for JMX, add mx4j-tools.jar to the lib/ +# directory. +# See http://wiki.apache.org/cassandra/Operations#Monitoring_with_MX4J +# By default mx4j listens on 0.0.0.0:8081. Uncomment the following lines +# to control its listen address and port. +#MX4J_ADDRESS="-Dmx4jaddress=127.0.0.1" +#MX4J_PORT="-Dmx4jport=8081" + +# Cassandra uses SIGAR to capture OS metrics CASSANDRA-7838 +# for SIGAR we have to set the java.library.path +# to the location of the native libraries. +JVM_OPTS="$JVM_OPTS -Djava.library.path=$CASSANDRA_HOME/lib/sigar-bin" + +JVM_OPTS="$JVM_OPTS $MX4J_ADDRESS" +JVM_OPTS="$JVM_OPTS $MX4J_PORT" +JVM_OPTS="$JVM_OPTS $JVM_EXTRA_OPTS" http://git-wip-us.apache.org/repos/asf/ignite/blob/83c26a91/modules/cassandra/src/test/bootstrap/aws/cassandra/cassandra-start.sh ---------------------------------------------------------------------- diff --git a/modules/cassandra/src/test/bootstrap/aws/cassandra/cassandra-start.sh b/modules/cassandra/src/test/bootstrap/aws/cassandra/cassandra-start.sh new file mode 100644 index 0000000..c73c509 --- /dev/null +++ b/modules/cassandra/src/test/bootstrap/aws/cassandra/cassandra-start.sh @@ -0,0 +1,550 @@ +#!/bin/bash + +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +#profile=/home/cassandra/.bash_profile +profile=/root/.bash_profile + +. $profile + +terminate() +{ + if [[ "$S3_BOOTSTRAP_SUCCESS_URL" != */ ]]; then + S3_BOOTSTRAP_SUCCESS_URL=${S3_BOOTSTRAP_SUCCESS_URL}/ + fi + + if [[ "$S3_BOOTSTRAP_FAILURE_URL" != */ ]]; then + S3_BOOTSTRAP_FAILURE_URL=${S3_BOOTSTRAP_FAILURE_URL}/ + fi + + msg=$HOST_NAME + + if [ -n "$1" ]; then + echo "[ERROR] $1" + echo "[ERROR]-----------------------------------------------------" + echo "[ERROR] Failed to start Cassandra node" + echo "[ERROR]-----------------------------------------------------" + msg=$1 + reportFolder=${S3_BOOTSTRAP_FAILURE_URL}${HOST_NAME} + reportFile=$reportFolder/__error__ + else + echo "[INFO]-----------------------------------------------------" + echo "[INFO] Cassandra node successfully started" + echo "[INFO]-----------------------------------------------------" + reportFolder=${S3_BOOTSTRAP_SUCCESS_URL}${HOST_NAME} + reportFile=$reportFolder/__success__ + fi + + echo $msg > /opt/cassandra/start_result + + aws s3 rm --recursive $reportFolder + if [ $? -ne 0 ]; then + echo "[ERROR] Failed drop report folder: $reportFolder" + fi + + if [ -d "/opt/cassandra/logs" ]; then + aws s3 sync --sse AES256 /opt/cassandra/logs $reportFolder + if [ $? -ne 0 ]; then + echo "[ERROR] Failed to export Cassandra logs to: $reportFolder" + fi + fi + + aws s3 cp --sse AES256 /opt/cassandra/start_result $reportFile + if [ $? -ne 0 ]; then + echo "[ERROR] Failed to export node start result to: $reportFile" + fi + + rm -f /opt/cassandra/start_result /opt/cassandra/join-lock /opt/cassandra/remote-join-lock + + if [ -n "$1" ]; then + exit 1 + fi + + exit 0 +} + +registerNode() +{ + echo "[INFO] Registering Cassandra node seed: ${S3_CASSANDRA_NODES_DISCOVERY_URL}$HOST_NAME" + + aws s3 cp --sse AES256 /opt/cassandra/join-lock ${S3_CASSANDRA_NODES_DISCOVERY_URL}$HOST_NAME + if [ $? -ne 0 ]; then + terminate "Failed to register Cassandra seed info in: ${S3_CASSANDRA_NODES_DISCOVERY_URL}$HOST_NAME" + fi + + echo "[INFO] Cassandra node seed successfully registered" +} + +unregisterNode() +{ + echo "[INFO] Removing Cassandra node registration from: ${S3_CASSANDRA_NODES_DISCOVERY_URL}$HOST_NAME" + aws s3 rm ${S3_CASSANDRA_NODES_DISCOVERY_URL}$HOST_NAME + echo "[INFO] Cassandra node registration removed" +} + +cleanupMetadata() +{ + echo "[INFO] Running cleanup" + aws s3 rm $S3_CASSANDRA_NODES_JOIN_LOCK_URL + aws s3 rm --recursive $S3_CASSANDRA_NODES_DISCOVERY_URL + aws s3 rm --recursive $S3_BOOTSTRAP_SUCCESS_URL + aws s3 rm --recursive $S3_BOOTSTRAP_FAILURE_URL + echo "[INFO] Cleanup completed" +} + +setupCassandraSeeds() +{ + echo "[INFO] Setting up Cassandra seeds" + + if [ "$FIRST_NODE" == "true" ]; then + CASSANDRA_SEEDS=$(hostname -f | tr '[:upper:]' '[:lower:]') + echo "[INFO] Using host address as a seed for the first Cassandra node: $CASSANDRA_SEEDS" + aws s3 rm --recursive ${S3_CASSANDRA_NODES_DISCOVERY_URL::-1} + if [ $? -ne 0 ]; then + terminate "Failed to clean Cassandra node discovery URL: $S3_CASSANDRA_NODES_DISCOVERY_URL" + fi + + cat /opt/cassandra/conf/cassandra-template.yaml | sed -r "s/\\\$\{CASSANDRA_SEEDS\}/$CASSANDRA_SEEDS/g" > /opt/cassandra/conf/cassandra.yaml + + return 0 + fi + + echo "[INFO] Looking for Cassandra seeds in: $S3_CASSANDRA_NODES_DISCOVERY_URL" + + startTime=$(date +%s) + + while true; do + seeds=$(aws s3 ls $S3_CASSANDRA_NODES_DISCOVERY_URL | grep -v PRE | sed -r "s/^.* //g") + if [ -n "$seeds" ]; then + seeds=($seeds) + length=${#seeds[@]} + + if [ $length -lt 4 ]; then + seed1=${seeds[0]} + seed2=${seeds[1]} + seed3=${seeds[2]} + else + pos1=$(($RANDOM%$length)) + pos2=$(($RANDOM%$length)) + pos3=$(($RANDOM%$length)) + seed1=${seeds[${pos1}]} + seed2=${seeds[${pos2}]} + seed3=${seeds[${pos3}]} + fi + + CASSANDRA_SEEDS=$seed1 + CASSANDRA_SEED=$seed1 + + if [ "$seed2" != "$seed1" ] && [ -n "$seed2" ]; then + CASSANDRA_SEEDS="$CASSANDRA_SEEDS,$seed2" + fi + + if [ "$seed3" != "$seed2" ] && [ "$seed3" != "$seed1" ] && [ -n "$seed3" ]; then + CASSANDRA_SEEDS="$CASSANDRA_SEEDS,$seed3" + fi + + echo "[INFO] Using Cassandra seeds: $CASSANDRA_SEEDS" + + cat /opt/cassandra/conf/cassandra-template.yaml | sed -r "s/\\\$\{CASSANDRA_SEEDS\}/$CASSANDRA_SEEDS/g" > /opt/cassandra/conf/cassandra.yaml + + return 0 + fi + + currentTime=$(date +%s) + duration=$(( $currentTime-$startTime )) + duration=$(( $duration/60 )) + + if [ $duration -gt $NODE_STARTUP_TIME ]; then + terminate "${NODE_STARTUP_TIME}min timeout expired, but first Cassandra node is still not up and running" + fi + + echo "[INFO] Waiting for the first Cassandra node to start and publish its seed, time passed ${duration}min" + + sleep 1m + done +} + +tryToGetFirstNodeLock() +{ + echo "[INFO] Trying to get first node lock" + + checkFirstNodeLockExist + if [ $? -ne 0 ]; then + return 1 + fi + + createFirstNodeLock + + sleep 5s + + rm -Rf /opt/cassandra/first-node-lock + + aws s3 cp $S3_CASSANDRA_FIRST_NODE_LOCK_URL /opt/cassandra/first-node-lock + if [ $? -ne 0 ]; then + echo "[WARN] Failed to check just created first node lock" + return 1 + fi + + first_host=$(cat /opt/cassandra/first-node-lock) + + rm -f /opt/cassandra/first-node-lock + + if [ "$first_host" != "$HOST_NAME" ]; then + echo "[INFO] Node $first_host has discarded previously created first node lock" + return 1 + fi + + echo "[INFO] Congratulations, got first node lock" + + return 0 +} + +checkFirstNodeLockExist() +{ + echo "[INFO] Checking for the first node lock" + + lockExists=$(aws s3 ls $S3_CASSANDRA_FIRST_NODE_LOCK_URL) + if [ -n "$lockExists" ]; then + echo "[INFO] First node lock already exists" + return 1 + fi + + echo "[INFO] First node lock doesn't exist" + + return 0 +} + +createFirstNodeLock() +{ + aws s3 cp --sse AES256 /opt/cassandra/join-lock $S3_CASSANDRA_FIRST_NODE_LOCK_URL + if [ $? -ne 0 ]; then + terminate "Failed to create first node lock" + fi + echo "[INFO] Created first node lock" +} + +removeFirstNodeLock() +{ + aws s3 rm $S3_CASSANDRA_FIRST_NODE_LOCK_URL + if [ $? -ne 0 ]; then + terminate "Failed to remove first node lock" + fi + echo "[INFO] Removed first node lock" +} + +tryToGetClusterJoinLock() +{ + echo "[INFO] Trying to get cluster join lock" + + checkClusterJoinLockExist + if [ $? -ne 0 ]; then + return 1 + fi + + createClusterJoinLock + + sleep 5s + + rm -Rf /opt/cassandra/remote-join-lock + + aws s3 cp $S3_CASSANDRA_NODES_JOIN_LOCK_URL /opt/cassandra/remote-join-lock + if [ $? -ne 0 ]; then + echo "[WARN] Failed to check just created cluster join lock" + return 1 + fi + + join_host=$(cat /opt/cassandra/remote-join-lock) + + if [ "$join_host" != "$HOST_NAME" ]; then + echo "[INFO] Node $first_host has discarded previously created cluster join lock" + return 1 + fi + + echo "[INFO] Congratulations, got cluster join lock" + + return 0 +} + +checkClusterJoinLockExist() +{ + echo "[INFO] Checking for the cluster join lock" + + lockExists=$(aws s3 ls $S3_CASSANDRA_NODES_JOIN_LOCK_URL) + if [ -n "$lockExists" ]; then + echo "[INFO] Cluster join lock already exists" + return 1 + fi + + status=$(/opt/cassandra/bin/nodetool -h $CASSANDRA_SEED status) + leaving=$(echo $status | grep UL) + moving=$(echo $status | grep UM) + joining=$(echo $status | grep UJ) + + if [ -n "$leaving" ] || [ -n "$moving" ] || [ -n "$joining" ]; then + echo "[INFO] Cluster join lock doesn't exist in S3, but some node still trying to join Cassandra cluster" + return 1 + fi + + echo "[INFO] Cluster join lock doesn't exist" + + return 0 +} + +createClusterJoinLock() +{ + aws s3 cp --sse AES256 /opt/cassandra/join-lock $S3_CASSANDRA_NODES_JOIN_LOCK_URL + if [ $? -ne 0 ]; then + terminate "Failed to create cluster join lock" + fi + echo "[INFO] Created cluster join lock" +} + +removeClusterJoinLock() +{ + aws s3 rm $S3_CASSANDRA_NODES_JOIN_LOCK_URL + if [ $? -ne 0 ]; then + terminate "Failed to remove cluster join lock" + fi + echo "[INFO] Removed cluster join lock" +} + +waitToJoinCassandraCluster() +{ + echo "[INFO] Waiting to join Cassandra cluster" + + while true; do + tryToGetClusterJoinLock + + if [ $? -ne 0 ]; then + echo "[INFO] Another node is trying to join cluster. Waiting for extra 1min." + sleep 1m + else + echo "[INFO]-------------------------------------------------------------" + echo "[INFO] Congratulations, got lock to join Cassandra cluster" + echo "[INFO]-------------------------------------------------------------" + break + fi + done +} + +waitFirstCassandraNodeRegistered() +{ + echo "[INFO] Waiting for the first Cassandra node to register" + + startTime=$(date +%s) + + while true; do + first_host= + + exists=$(aws s3 ls $S3_CASSANDRA_FIRST_NODE_LOCK_URL) + if [ -n "$exists" ]; then + rm -Rf /opt/cassandra/first-node-lock + + aws s3 cp $S3_CASSANDRA_FIRST_NODE_LOCK_URL /opt/cassandra/first-node-lock + if [ $? -ne 0 ]; then + terminate "Failed to check existing first node lock" + fi + + first_host=$(cat /opt/cassandra/first-node-lock) + + rm -Rf /opt/cassandra/first-node-lock + fi + + if [ -n "$first_host" ]; then + exists=$(aws s3 ls ${S3_CASSANDRA_NODES_DISCOVERY_URL}${first_host}) + if [ -n "$exists" ]; then + break + fi + fi + + currentTime=$(date +%s) + duration=$(( $currentTime-$startTime )) + duration=$(( $duration/60 )) + + if [ $duration -gt $NODE_STARTUP_TIME ]; then + terminate "${NODE_STARTUP_TIME}min timeout expired, but first Cassandra node is still not up and running" + fi + + echo "[INFO] Waiting extra 1min" + + sleep 1m + done + + echo "[INFO] First Cassandra node registered" +} + +startCassandra() +{ + echo "[INFO]-------------------------------------------------------------" + echo "[INFO] Trying attempt $START_ATTEMPT to start Cassandra daemon" + echo "[INFO]-------------------------------------------------------------" + echo "" + + setupCassandraSeeds + + if [ "$FIRST_NODE" != "true" ]; then + waitToJoinCassandraCluster + fi + + proc=$(ps -ef | grep java | grep "org.apache.cassandra.service.CassandraDaemon") + proc=($proc) + + if [ -n "${proc[1]}" ]; then + echo "[INFO] Terminating existing Cassandra process ${proc[1]}" + kill -9 ${proc[1]} + fi + + echo "[INFO] Starting Cassandra" + rm -Rf /opt/cassandra/logs/* /storage/cassandra/* + /opt/cassandra/bin/cassandra -R & + + echo "[INFO] Cassandra job id: $!" + + sleep 1m + + START_ATTEMPT=$(( $START_ATTEMPT+1 )) +} + +# Time (in minutes) to wait for the Cassandra node up and running and register it in S3 +NODE_STARTUP_TIME=10 + +# Number of attempts to start (not first) Cassandra daemon +NODE_START_ATTEMPTS=3 + +HOST_NAME=$(hostname -f | tr '[:upper:]' '[:lower:]') +echo $HOST_NAME > /opt/cassandra/join-lock + +START_ATTEMPT=0 + +FIRST_NODE="false" + +unregisterNode + +tryToGetFirstNodeLock + +if [ $? -eq 0 ]; then + FIRST_NODE="true" +fi + +echo "[INFO]-----------------------------------------------------------------" + +if [ "$FIRST_NODE" == "true" ]; then + echo "[INFO] Starting first Cassandra node" +else + echo "[INFO] Starting Cassandra node" +fi + +echo "[INFO]-----------------------------------------------------------------" +echo "[INFO] Cassandra nodes discovery URL: $S3_CASSANDRA_NODES_DISCOVERY_URL" +echo "[INFO] Cassandra first node lock URL: $S3_CASSANDRA_FIRST_NODE_LOCK_URL" +echo "[INFO] Cassandra nodes join lock URL: $S3_CASSANDRA_NODES_JOIN_LOCK_URL" +echo "[INFO] Start success URL: $S3_BOOTSTRAP_SUCCESS_URL" +echo "[INFO] Start failure URL: $S3_BOOTSTRAP_FAILURE_URL" +echo "[INFO] CASSANDRA_HOME: $CASSANDRA_HOME" +echo "[INFO] JAVA_HOME: $JAVA_HOME" +echo "[INFO] PATH: $PATH" +echo "[INFO]-----------------------------------------------------------------" + +if [ -z "$S3_CASSANDRA_NODES_DISCOVERY_URL" ]; then + terminate "S3 discovery URL doesn't specified" +fi + +if [[ "$S3_CASSANDRA_NODES_DISCOVERY_URL" != */ ]]; then + S3_CASSANDRA_NODES_DISCOVERY_URL=${S3_CASSANDRA_NODES_DISCOVERY_URL}/ +fi + +if [ "$FIRST_NODE" != "true" ]; then + waitFirstCassandraNodeRegistered +else + cleanupMetadata +fi + +startCassandra + +startTime=$(date +%s) + +while true; do + proc=$(ps -ef | grep java | grep "org.apache.cassandra.service.CassandraDaemon") + + /opt/cassandra/bin/nodetool status &> /dev/null + + if [ $? -eq 0 ]; then + echo "[INFO]-----------------------------------------------------" + echo "[INFO] Cassandra daemon successfully started" + echo "[INFO]-----------------------------------------------------" + echo $proc + echo "[INFO]-----------------------------------------------------" + + if [ "$FIRST_NODE" != "true" ]; then + removeClusterJoinLock + fi + + break + fi + + currentTime=$(date +%s) + duration=$(( $currentTime-$startTime )) + duration=$(( $duration/60 )) + + if [ $duration -gt $NODE_STARTUP_TIME ]; then + if [ "$FIRST_NODE" == "true" ]; then + removeFirstNodeLock + terminate "${NODE_STARTUP_TIME}min timeout expired, but first Cassandra daemon is still not up and running" + else + removeClusterJoinLock + + if [ $START_ATTEMPT -gt $NODE_START_ATTEMPTS ]; then + terminate "${NODE_START_ATTEMPTS} attempts exceed, but Cassandra daemon is still not up and running" + fi + + startCassandra + fi + + continue + fi + + concurrencyError=$(cat /opt/cassandra/logs/system.log | grep "java.lang.UnsupportedOperationException: Other bootstrapping/leaving/moving nodes detected, cannot bootstrap while cassandra.consistent.rangemovement is true") + + if [ -n "$concurrencyError" ] && [ "$FIRST_NODE" != "true" ]; then + removeClusterJoinLock + echo "[WARN] Failed to concurrently start Cassandra daemon. Sleeping for extra 1min" + sleep 1m + startCassandra + continue + fi + + if [ -z "$proc" ]; then + if [ "$FIRST_NODE" == "true" ]; then + removeFirstNodeLock + terminate "Failed to start Cassandra daemon" + fi + + removeClusterJoinLock + echo "[WARN] Failed to start Cassandra daemon. Sleeping for extra 1min" + sleep 1m + startCassandra + continue + fi + + echo "[INFO] Waiting for Cassandra daemon to start, time passed ${duration}min" + sleep 30s +done + +registerNode + +terminate \ No newline at end of file http://git-wip-us.apache.org/repos/asf/ignite/blob/83c26a91/modules/cassandra/src/test/bootstrap/aws/cassandra/cassandra-template.yaml ---------------------------------------------------------------------- diff --git a/modules/cassandra/src/test/bootstrap/aws/cassandra/cassandra-template.yaml b/modules/cassandra/src/test/bootstrap/aws/cassandra/cassandra-template.yaml new file mode 100644 index 0000000..965e34e --- /dev/null +++ b/modules/cassandra/src/test/bootstrap/aws/cassandra/cassandra-template.yaml @@ -0,0 +1,889 @@ +# +# Licensed to the Apache Software Foundation (ASF) under one or more +# contributor license agreements. See the NOTICE file distributed with +# this work for additional information regarding copyright ownership. +# The ASF licenses this file to You under the Apache License, Version 2.0 +# (the "License"); you may not use this file except in compliance with +# the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +# + +# Cassandra storage config YAML + +# NOTE: +# See http://wiki.apache.org/cassandra/StorageConfiguration for +# full explanations of configuration directives +# /NOTE + +# The name of the cluster. This is mainly used to prevent machines in +# one logical cluster from joining another. +cluster_name: 'CassandraIgnite' + +# It makes new (non-seed) nodes automatically migrate the right data to themselves. +# When initializing a fresh cluster with no data, add auto_bootstrap: false +auto_bootstrap: false + +# This defines the number of tokens randomly assigned to this node on the ring +# The more tokens, relative to other nodes, the larger the proportion of data +# that this node will store. You probably want all nodes to have the same number +# of tokens assuming they have equal hardware capability. +# +# If you leave this unspecified, Cassandra will use the default of 1 token for legacy compatibility, +# and will use the initial_token as described below. +# +# Specifying initial_token will override this setting on the node's initial start, +# on subsequent starts, this setting will apply even if initial token is set. +# +# If you already have a cluster with 1 token per node, and wish to migrate to +# multiple tokens per node, see http://wiki.apache.org/cassandra/Operations +num_tokens: 256 + +# initial_token allows you to specify tokens manually. While you can use # it with +# vnodes (num_tokens > 1, above) -- in which case you should provide a +# comma-separated list -- it's primarily used when adding nodes # to legacy clusters +# that do not have vnodes enabled. +# initial_token: + +# See http://wiki.apache.org/cassandra/HintedHandoff +# May either be "true" or "false" to enable globally, or contain a list +# of data centers to enable per-datacenter. +# hinted_handoff_enabled: DC1,DC2 +hinted_handoff_enabled: true +# this defines the maximum amount of time a dead host will have hints +# generated. After it has been dead this long, new hints for it will not be +# created until it has been seen alive and gone down again. +max_hint_window_in_ms: 10800000 # 3 hours +# Maximum throttle in KBs per second, per delivery thread. This will be +# reduced proportionally to the number of nodes in the cluster. (If there +# are two nodes in the cluster, each delivery thread will use the maximum +# rate; if there are three, each will throttle to half of the maximum, +# since we expect two nodes to be delivering hints simultaneously.) +hinted_handoff_throttle_in_kb: 1024 +# Number of threads with which to deliver hints; +# Consider increasing this number when you have multi-dc deployments, since +# cross-dc handoff tends to be slower +max_hints_delivery_threads: 2 + +# Maximum throttle in KBs per second, total. This will be +# reduced proportionally to the number of nodes in the cluster. +batchlog_replay_throttle_in_kb: 1024 + +# Authentication backend, implementing IAuthenticator; used to identify users +# Out of the box, Cassandra provides org.apache.cassandra.auth.{AllowAllAuthenticator, +# PasswordAuthenticator}. +# +# - AllowAllAuthenticator performs no checks - set it to disable authentication. +# - PasswordAuthenticator relies on username/password pairs to authenticate +# users. It keeps usernames and hashed passwords in system_auth.credentials table. +# Please increase system_auth keyspace replication factor if you use this authenticator. +# If using PasswordAuthenticator, CassandraRoleManager must also be used (see below) +#authenticator: PasswordAuthenticator +authenticator: AllowAllAuthenticator + +# Authorization backend, implementing IAuthorizer; used to limit access/provide permissions +# Out of the box, Cassandra provides org.apache.cassandra.auth.{AllowAllAuthorizer, +# CassandraAuthorizer}. +# +# - AllowAllAuthorizer allows any action to any user - set it to disable authorization. +# - CassandraAuthorizer stores permissions in system_auth.permissions table. Please +# increase system_auth keyspace replication factor if you use this authorizer. +#authorizer: CassandraAuthorizer +authorizer: AllowAllAuthorizer + +# Part of the Authentication & Authorization backend, implementing IRoleManager; used +# to maintain grants and memberships between roles. +# Out of the box, Cassandra provides org.apache.cassandra.auth.CassandraRoleManager, +# which stores role information in the system_auth keyspace. Most functions of the +# IRoleManager require an authenticated login, so unless the configured IAuthenticator +# actually implements authentication, most of this functionality will be unavailable. +# +# - CassandraRoleManager stores role data in the system_auth keyspace. Please +# increase system_auth keyspace replication factor if you use this role manager. +role_manager: CassandraRoleManager + +# Validity period for roles cache (fetching permissions can be an +# expensive operation depending on the authorizer). Granted roles are cached for +# authenticated sessions in AuthenticatedUser and after the period specified +# here, become eligible for (async) reload. +# Defaults to 2000, set to 0 to disable. +# Will be disabled automatically for AllowAllAuthenticator. +roles_validity_in_ms: 2000 + +# Refresh interval for roles cache (if enabled). +# After this interval, cache entries become eligible for refresh. Upon next +# access, an async reload is scheduled and the old value returned until it +# completes. If roles_validity_in_ms is non-zero, then this must be +# also. +# Defaults to the same value as roles_validity_in_ms. +# roles_update_interval_in_ms: 1000 + +# Validity period for permissions cache (fetching permissions can be an +# expensive operation depending on the authorizer, CassandraAuthorizer is +# one example). Defaults to 2000, set to 0 to disable. +# Will be disabled automatically for AllowAllAuthorizer. +permissions_validity_in_ms: 2000 + +# Refresh interval for permissions cache (if enabled). +# After this interval, cache entries become eligible for refresh. Upon next +# access, an async reload is scheduled and the old value returned until it +# completes. If permissions_validity_in_ms is non-zero, then this must be +# also. +# Defaults to the same value as permissions_validity_in_ms. +# permissions_update_interval_in_ms: 1000 + +# The partitioner is responsible for distributing groups of rows (by +# partition key) across nodes in the cluster. You should leave this +# alone for new clusters. The partitioner can NOT be changed without +# reloading all data, so when upgrading you should set this to the +# same partitioner you were already using. +# +# Besides Murmur3Partitioner, partitioners included for backwards +# compatibility include RandomPartitioner, ByteOrderedPartitioner, and +# OrderPreservingPartitioner. +# +partitioner: org.apache.cassandra.dht.Murmur3Partitioner + +# Directories where Cassandra should store data on disk. Cassandra +# will spread data evenly across them, subject to the granularity of +# the configured compaction strategy. +# If not set, the default directory is $CASSANDRA_HOME/data/data. +data_file_directories: + - /storage/cassandra/data + +# commit log. when running on magnetic HDD, this should be a +# separate spindle than the data directories. +# If not set, the default directory is $CASSANDRA_HOME/data/commitlog. +commitlog_directory: /storage/cassandra/commitlog + +# policy for data disk failures: +# die: shut down gossip and client transports and kill the JVM for any fs errors or +# single-sstable errors, so the node can be replaced. +# stop_paranoid: shut down gossip and client transports even for single-sstable errors, +# kill the JVM for errors during startup. +# stop: shut down gossip and client transports, leaving the node effectively dead, but +# can still be inspected via JMX, kill the JVM for errors during startup. +# best_effort: stop using the failed disk and respond to requests based on +# remaining available sstables. This means you WILL see obsolete +# data at CL.ONE! +# ignore: ignore fatal errors and let requests fail, as in pre-1.2 Cassandra +disk_failure_policy: stop + +# policy for commit disk failures: +# die: shut down gossip and Thrift and kill the JVM, so the node can be replaced. +# stop: shut down gossip and Thrift, leaving the node effectively dead, but +# can still be inspected via JMX. +# stop_commit: shutdown the commit log, letting writes collect but +# continuing to service reads, as in pre-2.0.5 Cassandra +# ignore: ignore fatal errors and let the batches fail +commit_failure_policy: stop + +# Maximum size of the key cache in memory. +# +# Each key cache hit saves 1 seek and each row cache hit saves 2 seeks at the +# minimum, sometimes more. The key cache is fairly tiny for the amount of +# time it saves, so it's worthwhile to use it at large numbers. +# The row cache saves even more time, but must contain the entire row, +# so it is extremely space-intensive. It's best to only use the +# row cache if you have hot rows or static rows. +# +# NOTE: if you reduce the size, you may not get you hottest keys loaded on startup. +# +# Default value is empty to make it "auto" (min(5% of Heap (in MB), 100MB)). Set to 0 to disable key cache. +key_cache_size_in_mb: + +# Duration in seconds after which Cassandra should +# save the key cache. Caches are saved to saved_caches_directory as +# specified in this configuration file. +# +# Saved caches greatly improve cold-start speeds, and is relatively cheap in +# terms of I/O for the key cache. Row cache saving is much more expensive and +# has limited use. +# +# Default is 14400 or 4 hours. +key_cache_save_period: 14400 + +# Number of keys from the key cache to save +# Disabled by default, meaning all keys are going to be saved +# key_cache_keys_to_save: 100 + +# Row cache implementation class name. +# Available implementations: +# org.apache.cassandra.cache.OHCProvider Fully off-heap row cache implementation (default). +# org.apache.cassandra.cache.SerializingCacheProvider This is the row cache implementation availabile +# in previous releases of Cassandra. +# row_cache_class_name: org.apache.cassandra.cache.OHCProvider + +# Maximum size of the row cache in memory. +# Please note that OHC cache implementation requires some additional off-heap memory to manage +# the map structures and some in-flight memory during operations before/after cache entries can be +# accounted against the cache capacity. This overhead is usually small compared to the whole capacity. +# Do not specify more memory that the system can afford in the worst usual situation and leave some +# headroom for OS block level cache. Do never allow your system to swap. +# +# Default value is 0, to disable row caching. +row_cache_size_in_mb: 0 + +# Duration in seconds after which Cassandra should save the row cache. +# Caches are saved to saved_caches_directory as specified in this configuration file. +# +# Saved caches greatly improve cold-start speeds, and is relatively cheap in +# terms of I/O for the key cache. Row cache saving is much more expensive and +# has limited use. +# +# Default is 0 to disable saving the row cache. +row_cache_save_period: 0 + +# Number of keys from the row cache to save. +# Specify 0 (which is the default), meaning all keys are going to be saved +# row_cache_keys_to_save: 100 + +# Maximum size of the counter cache in memory. +# +# Counter cache helps to reduce counter locks' contention for hot counter cells. +# In case of RF = 1 a counter cache hit will cause Cassandra to skip the read before +# write entirely. With RF > 1 a counter cache hit will still help to reduce the duration +# of the lock hold, helping with hot counter cell updates, but will not allow skipping +# the read entirely. Only the local (clock, count) tuple of a counter cell is kept +# in memory, not the whole counter, so it's relatively cheap. +# +# NOTE: if you reduce the size, you may not get you hottest keys loaded on startup. +# +# Default value is empty to make it "auto" (min(2.5% of Heap (in MB), 50MB)). Set to 0 to disable counter cache. +# NOTE: if you perform counter deletes and rely on low gcgs, you should disable the counter cache. +counter_cache_size_in_mb: + +# Duration in seconds after which Cassandra should +# save the counter cache (keys only). Caches are saved to saved_caches_directory as +# specified in this configuration file. +# +# Default is 7200 or 2 hours. +counter_cache_save_period: 7200 + +# Number of keys from the counter cache to save +# Disabled by default, meaning all keys are going to be saved +# counter_cache_keys_to_save: 100 + +# The off-heap memory allocator. Affects storage engine metadata as +# well as caches. Experiments show that JEMAlloc saves some memory +# than the native GCC allocator (i.e., JEMalloc is more +# fragmentation-resistant). +# +# Supported values are: NativeAllocator, JEMallocAllocator +# +# If you intend to use JEMallocAllocator you have to install JEMalloc as library and +# modify cassandra-env.sh as directed in the file. +# +# Defaults to NativeAllocator +# memory_allocator: NativeAllocator + +# saved caches +# If not set, the default directory is $CASSANDRA_HOME/data/saved_caches. +saved_caches_directory: /storage/cassandra/saved_caches + +# commitlog_sync may be either "periodic" or "batch." +# +# When in batch mode, Cassandra won't ack writes until the commit log +# has been fsynced to disk. It will wait +# commitlog_sync_batch_window_in_ms milliseconds between fsyncs. +# This window should be kept short because the writer threads will +# be unable to do extra work while waiting. (You may need to increase +# concurrent_writes for the same reason.) +# +# commitlog_sync: batch +# commitlog_sync_batch_window_in_ms: 2 +# +# the other option is "periodic" where writes may be acked immediately +# and the CommitLog is simply synced every commitlog_sync_period_in_ms +# milliseconds. +commitlog_sync: periodic +commitlog_sync_period_in_ms: 10000 + +# The size of the individual commitlog file segments. A commitlog +# segment may be archived, deleted, or recycled once all the data +# in it (potentially from each columnfamily in the system) has been +# flushed to sstables. +# +# The default size is 32, which is almost always fine, but if you are +# archiving commitlog segments (see commitlog_archiving.properties), +# then you probably want a finer granularity of archiving; 8 or 16 MB +# is reasonable. +commitlog_segment_size_in_mb: 32 + +# Compression to apply to the commit log. If omitted, the commit log +# will be written uncompressed. LZ4, Snappy, and Deflate compressors +# are supported. +#commitlog_compression: +# - class_name: LZ4Compressor +# parameters: +# - + +# any class that implements the SeedProvider interface and has a +# constructor that takes a Map of parameters will do. +seed_provider: + # Addresses of hosts that are deemed contact points. + # Cassandra nodes use this list of hosts to find each other and learn + # the topology of the ring. You must change this if you are running + # multiple nodes! + - class_name: org.apache.cassandra.locator.SimpleSeedProvider + parameters: + # seeds is actually a comma-delimited list of addresses. + # Ex: ",," + - seeds: "${CASSANDRA_SEEDS}" + +# For workloads with more data than can fit in memory, Cassandra's +# bottleneck will be reads that need to fetch data from +# disk. "concurrent_reads" should be set to (16 * number_of_drives) in +# order to allow the operations to enqueue low enough in the stack +# that the OS and drives can reorder them. Same applies to +# "concurrent_counter_writes", since counter writes read the current +# values before incrementing and writing them back. +# +# On the other hand, since writes are almost never IO bound, the ideal +# number of "concurrent_writes" is dependent on the number of cores in +# your system; (8 * number_of_cores) is a good rule of thumb. +concurrent_reads: 32 +concurrent_writes: 32 +concurrent_counter_writes: 32 + +# Total memory to use for sstable-reading buffers. Defaults to +# the smaller of 1/4 of heap or 512MB. +# file_cache_size_in_mb: 512 + +# Total permitted memory to use for memtables. Cassandra will stop +# accepting writes when the limit is exceeded until a flush completes, +# and will trigger a flush based on memtable_cleanup_threshold +# If omitted, Cassandra will set both to 1/4 the size of the heap. +# memtable_heap_space_in_mb: 2048 +# memtable_offheap_space_in_mb: 2048 + +# Ratio of occupied non-flushing memtable size to total permitted size +# that will trigger a flush of the largest memtable. Lager mct will +# mean larger flushes and hence less compaction, but also less concurrent +# flush activity which can make it difficult to keep your disks fed +# under heavy write load. +# +# memtable_cleanup_threshold defaults to 1 / (memtable_flush_writers + 1) +# memtable_cleanup_threshold: 0.11 + +# Specify the way Cassandra allocates and manages memtable memory. +# Options are: +# heap_buffers: on heap nio buffers +# offheap_buffers: off heap (direct) nio buffers +# offheap_objects: native memory, eliminating nio buffer heap overhead +memtable_allocation_type: heap_buffers + +# Total space to use for commit logs on disk. +# +# If space gets above this value, Cassandra will flush every dirty CF +# in the oldest segment and remove it. So a small total commitlog space +# will tend to cause more flush activity on less-active columnfamilies. +# +# The default value is 8192. +# commitlog_total_space_in_mb: 8192 + +# This sets the amount of memtable flush writer threads. These will +# be blocked by disk io, and each one will hold a memtable in memory +# while blocked. +# +# memtable_flush_writers defaults to the smaller of (number of disks, +# number of cores), with a minimum of 2 and a maximum of 8. +# +# If your data directories are backed by SSD, you should increase this +# to the number of cores. +#memtable_flush_writers: 8 + +# A fixed memory pool size in MB for for SSTable index summaries. If left +# empty, this will default to 5% of the heap size. If the memory usage of +# all index summaries exceeds this limit, SSTables with low read rates will +# shrink their index summaries in order to meet this limit. However, this +# is a best-effort process. In extreme conditions Cassandra may need to use +# more than this amount of memory. +index_summary_capacity_in_mb: + +# How frequently index summaries should be resampled. This is done +# periodically to redistribute memory from the fixed-size pool to sstables +# proportional their recent read rates. Setting to -1 will disable this +# process, leaving existing index summaries at their current sampling level. +index_summary_resize_interval_in_minutes: 60 + +# Whether to, when doing sequential writing, fsync() at intervals in +# order to force the operating system to flush the dirty +# buffers. Enable this to avoid sudden dirty buffer flushing from +# impacting read latencies. Almost always a good idea on SSDs; not +# necessarily on platters. +trickle_fsync: false +trickle_fsync_interval_in_kb: 10240 + +# TCP port, for commands and data +# For security reasons, you should not expose this port to the internet. Firewall it if needed. +storage_port: 7000 + +# SSL port, for encrypted communication. Unused unless enabled in +# encryption_options +# For security reasons, you should not expose this port to the internet. Firewall it if needed. +ssl_storage_port: 7001 + +# Address or interface to bind to and tell other Cassandra nodes to connect to. +# You _must_ change this if you want multiple nodes to be able to communicate! +# +# Set listen_address OR listen_interface, not both. Interfaces must correspond +# to a single address, IP aliasing is not supported. +# +# Leaving it blank leaves it up to InetAddress.getLocalHost(). This +# will always do the Right Thing _if_ the node is properly configured +# (hostname, name resolution, etc), and the Right Thing is to use the +# address associated with the hostname (it might not be). +# +# Setting listen_address to 0.0.0.0 is always wrong. +# +# If you choose to specify the interface by name and the interface has an ipv4 and an ipv6 address +# you can specify which should be chosen using listen_interface_prefer_ipv6. If false the first ipv4 +# address will be used. If true the first ipv6 address will be used. Defaults to false preferring +# ipv4. If there is only one address it will be selected regardless of ipv4/ipv6. +listen_address: +# listen_interface: eth0 +# listen_interface_prefer_ipv6: false + +# Address to broadcast to other Cassandra nodes +# Leaving this blank will set it to the same value as listen_address +# broadcast_address: 1.2.3.4 + +# Internode authentication backend, implementing IInternodeAuthenticator; +# used to allow/disallow connections from peer nodes. +# internode_authenticator: org.apache.cassandra.auth.AllowAllInternodeAuthenticator + +# Whether to start the native transport server. +# Please note that the address on which the native transport is bound is the +# same as the rpc_address. The port however is different and specified below. +start_native_transport: true +# port for the CQL native transport to listen for clients on +# For security reasons, you should not expose this port to the internet. Firewall it if needed. +native_transport_port: 9042 +# The maximum threads for handling requests when the native transport is used. +# This is similar to rpc_max_threads though the default differs slightly (and +# there is no native_transport_min_threads, idle threads will always be stopped +# after 30 seconds). +# native_transport_max_threads: 128 +# +# The maximum size of allowed frame. Frame (requests) larger than this will +# be rejected as invalid. The default is 256MB. +# native_transport_max_frame_size_in_mb: 256 + +# The maximum number of concurrent client connections. +# The default is -1, which means unlimited. +# native_transport_max_concurrent_connections: -1 + +# The maximum number of concurrent client connections per source ip. +# The default is -1, which means unlimited. +# native_transport_max_concurrent_connections_per_ip: -1 + +# Whether to start the thrift rpc server. +start_rpc: true + +# The address or interface to bind the Thrift RPC service and native transport +# server to. +# +# Set rpc_address OR rpc_interface, not both. Interfaces must correspond +# to a single address, IP aliasing is not supported. +# +# Leaving rpc_address blank has the same effect as on listen_address +# (i.e. it will be based on the configured hostname of the node). +# +# Note that unlike listen_address, you can specify 0.0.0.0, but you must also +# set broadcast_rpc_address to a value other than 0.0.0.0. +# +# For security reasons, you should not expose this port to the internet. Firewall it if needed. +# +# If you choose to specify the interface by name and the interface has an ipv4 and an ipv6 address +# you can specify which should be chosen using rpc_interface_prefer_ipv6. If false the first ipv4 +# address will be used. If true the first ipv6 address will be used. Defaults to false preferring +# ipv4. If there is only one address it will be selected regardless of ipv4/ipv6. +rpc_address: +# rpc_interface: eth1 +# rpc_interface_prefer_ipv6: false + +# port for Thrift to listen for clients on +rpc_port: 9160 + +# RPC address to broadcast to drivers and other Cassandra nodes. This cannot +# be set to 0.0.0.0. If left blank, this will be set to the value of +# rpc_address. If rpc_address is set to 0.0.0.0, broadcast_rpc_address must +# be set. +broadcast_rpc_address: + +# enable or disable keepalive on rpc/native connections +rpc_keepalive: true + +# Cassandra provides two out-of-the-box options for the RPC Server: +# +# sync -> One thread per thrift connection. For a very large number of clients, memory +# will be your limiting factor. On a 64 bit JVM, 180KB is the minimum stack size +# per thread, and that will correspond to your use of virtual memory (but physical memory +# may be limited depending on use of stack space). +# +# hsha -> Stands for "half synchronous, half asynchronous." All thrift clients are handled +# asynchronously using a small number of threads that does not vary with the amount +# of thrift clients (and thus scales well to many clients). The rpc requests are still +# synchronous (one thread per active request). If hsha is selected then it is essential +# that rpc_max_threads is changed from the default value of unlimited. +# +# The default is sync because on Windows hsha is about 30% slower. On Linux, +# sync/hsha performance is about the same, with hsha of course using less memory. +# +# Alternatively, can provide your own RPC server by providing the fully-qualified class name +# of an o.a.c.t.TServerFactory that can create an instance of it. +rpc_server_type: sync + +# Uncomment rpc_min|max_thread to set request pool size limits. +# +# Regardless of your choice of RPC server (see above), the number of maximum requests in the +# RPC thread pool dictates how many concurrent requests are possible (but if you are using the sync +# RPC server, it also dictates the number of clients that can be connected at all). +# +# The default is unlimited and thus provides no protection against clients overwhelming the server. You are +# encouraged to set a maximum that makes sense for you in production, but do keep in mind that +# rpc_max_threads represents the maximum number of client requests this server may execute concurrently. +# +# rpc_min_threads: 16 +# rpc_max_threads: 2048 + +# uncomment to set socket buffer sizes on rpc connections +# rpc_send_buff_size_in_bytes: +# rpc_recv_buff_size_in_bytes: + +# Uncomment to set socket buffer size for internode communication +# Note that when setting this, the buffer size is limited by net.core.wmem_max +# and when not setting it it is defined by net.ipv4.tcp_wmem +# See: +# /proc/sys/net/core/wmem_max +# /proc/sys/net/core/rmem_max +# /proc/sys/net/ipv4/tcp_wmem +# /proc/sys/net/ipv4/tcp_wmem +# and: man tcp +# internode_send_buff_size_in_bytes: +# internode_recv_buff_size_in_bytes: + +# Frame size for thrift (maximum message length). +thrift_framed_transport_size_in_mb: 15 + +# Set to true to have Cassandra create a hard link to each sstable +# flushed or streamed locally in a backups/ subdirectory of the +# keyspace data. Removing these links is the operator's +# responsibility. +incremental_backups: false + +# Whether or not to take a snapshot before each compaction. Be +# careful using this option, since Cassandra won't clean up the +# snapshots for you. Mostly useful if you're paranoid when there +# is a data format change. +snapshot_before_compaction: false + +# Whether or not a snapshot is taken of the data before keyspace truncation +# or dropping of column families. The STRONGLY advised default of true +# should be used to provide data safety. If you set this flag to false, you will +# lose data on truncation or drop. +auto_snapshot: true + +# When executing a scan, within or across a partition, we need to keep the +# tombstones seen in memory so we can return them to the coordinator, which +# will use them to make sure other replicas also know about the deleted rows. +# With workloads that generate a lot of tombstones, this can cause performance +# problems and even exaust the server heap. +# (http://www.datastax.com/dev/blog/cassandra-anti-patterns-queues-and-queue-like-datasets) +# Adjust the thresholds here if you understand the dangers and want to +# scan more tombstones anyway. These thresholds may also be adjusted at runtime +# using the StorageService mbean. +tombstone_warn_threshold: 1000 +tombstone_failure_threshold: 100000 + +# Granularity of the collation index of rows within a partition. +# Increase if your rows are large, or if you have a very large +# number of rows per partition. The competing goals are these: +# 1) a smaller granularity means more index entries are generated +# and looking up rows withing the partition by collation column +# is faster +# 2) but, Cassandra will keep the collation index in memory for hot +# rows (as part of the key cache), so a larger granularity means +# you can cache more hot rows +column_index_size_in_kb: 64 + + +# Log WARN on any batch size exceeding this value. 5kb per batch by default. +# Caution should be taken on increasing the size of this threshold as it can lead to node instability. +batch_size_warn_threshold_in_kb: 5 + +# Fail any batch exceeding this value. 50kb (10x warn threshold) by default. +batch_size_fail_threshold_in_kb: 50 + +# Number of simultaneous compactions to allow, NOT including +# validation "compactions" for anti-entropy repair. Simultaneous +# compactions can help preserve read performance in a mixed read/write +# workload, by mitigating the tendency of small sstables to accumulate +# during a single long running compactions. The default is usually +# fine and if you experience problems with compaction running too +# slowly or too fast, you should look at +# compaction_throughput_mb_per_sec first. +# +# concurrent_compactors defaults to the smaller of (number of disks, +# number of cores), with a minimum of 2 and a maximum of 8. +# +# If your data directories are backed by SSD, you should increase this +# to the number of cores. +#concurrent_compactors: 1 + +# Throttles compaction to the given total throughput across the entire +# system. The faster you insert data, the faster you need to compact in +# order to keep the sstable count down, but in general, setting this to +# 16 to 32 times the rate you are inserting data is more than sufficient. +# Setting this to 0 disables throttling. Note that this account for all types +# of compaction, including validation compaction. +compaction_throughput_mb_per_sec: 16 + +# Log a warning when compacting partitions larger than this value +compaction_large_partition_warning_threshold_mb: 100 + +# When compacting, the replacement sstable(s) can be opened before they +# are completely written, and used in place of the prior sstables for +# any range that has been written. This helps to smoothly transfer reads +# between the sstables, reducing page cache churn and keeping hot rows hot +sstable_preemptive_open_interval_in_mb: 50 + +# Throttles all outbound streaming file transfers on this node to the +# given total throughput in Mbps. This is necessary because Cassandra does +# mostly sequential IO when streaming data during bootstrap or repair, which +# can lead to saturating the network connection and degrading rpc performance. +# When unset, the default is 200 Mbps or 25 MB/s. +# stream_throughput_outbound_megabits_per_sec: 200 + +# Throttles all streaming file transfer between the datacenters, +# this setting allows users to throttle inter dc stream throughput in addition +# to throttling all network stream traffic as configured with +# stream_throughput_outbound_megabits_per_sec +# inter_dc_stream_throughput_outbound_megabits_per_sec: + +# How long the coordinator should wait for read operations to complete +read_request_timeout_in_ms: 50000 +# How long the coordinator should wait for seq or index scans to complete +range_request_timeout_in_ms: 10000 +# How long the coordinator should wait for writes to complete +write_request_timeout_in_ms: 20000 +# How long the coordinator should wait for counter writes to complete +counter_write_request_timeout_in_ms: 5000 +# How long a coordinator should continue to retry a CAS operation +# that contends with other proposals for the same row +cas_contention_timeout_in_ms: 1000 +# How long the coordinator should wait for truncates to complete +# (This can be much longer, because unless auto_snapshot is disabled +# we need to flush first so we can snapshot before removing the data.) +truncate_request_timeout_in_ms: 60000 +# The default timeout for other, miscellaneous operations +request_timeout_in_ms: 10000 + +# Enable operation timeout information exchange between nodes to accurately +# measure request timeouts. If disabled, replicas will assume that requests +# were forwarded to them instantly by the coordinator, which means that +# under overload conditions we will waste that much extra time processing +# already-timed-out requests. +# +# Warning: before enabling this property make sure to ntp is installed +# and the times are synchronized between the nodes. +cross_node_timeout: false + +# Enable socket timeout for streaming operation. +# When a timeout occurs during streaming, streaming is retried from the start +# of the current file. This _can_ involve re-streaming an important amount of +# data, so you should avoid setting the value too low. +# Default value is 3600000, which means streams timeout after an hour. +# streaming_socket_timeout_in_ms: 3600000 + +# phi value that must be reached for a host to be marked down. +# most users should never need to adjust this. +# phi_convict_threshold: 8 + +# endpoint_snitch -- Set this to a class that implements +# IEndpointSnitch. The snitch has two functions: +# - it teaches Cassandra enough about your network topology to route +# requests efficiently +# - it allows Cassandra to spread replicas around your cluster to avoid +# correlated failures. It does this by grouping machines into +# "datacenters" and "racks." Cassandra will do its best not to have +# more than one replica on the same "rack" (which may not actually +# be a physical location) +# +# IF YOU CHANGE THE SNITCH AFTER DATA IS INSERTED INTO THE CLUSTER, +# YOU MUST RUN A FULL REPAIR, SINCE THE SNITCH AFFECTS WHERE REPLICAS +# ARE PLACED. +# +# Out of the box, Cassandra provides +# - SimpleSnitch: +# Treats Strategy order as proximity. This can improve cache +# locality when disabling read repair. Only appropriate for +# single-datacenter deployments. +# - GossipingPropertyFileSnitch +# This should be your go-to snitch for production use. The rack +# and datacenter for the local node are defined in +# cassandra-rackdc.properties and propagated to other nodes via +# gossip. If cassandra-topology.properties exists, it is used as a +# fallback, allowing migration from the PropertyFileSnitch. +# - PropertyFileSnitch: +# Proximity is determined by rack and data center, which are +# explicitly configured in cassandra-topology.properties. +# - Ec2Snitch: +# Appropriate for EC2 deployments in a single Region. Loads Region +# and Availability Zone information from the EC2 API. The Region is +# treated as the datacenter, and the Availability Zone as the rack. +# Only private IPs are used, so this will not work across multiple +# Regions. +# - Ec2MultiRegionSnitch: +# Uses public IPs as broadcast_address to allow cross-region +# connectivity. (Thus, you should set seed addresses to the public +# IP as well.) You will need to open the storage_port or +# ssl_storage_port on the public IP firewall. (For intra-Region +# traffic, Cassandra will switch to the private IP after +# establishing a connection.) +# - RackInferringSnitch: +# Proximity is determined by rack and data center, which are +# assumed to correspond to the 3rd and 2nd octet of each node's IP +# address, respectively. Unless this happens to match your +# deployment conventions, this is best used as an example of +# writing a custom Snitch class and is provided in that spirit. +# +# You can use a custom Snitch by setting this to the full class name +# of the snitch, which will be assumed to be on your classpath. +endpoint_snitch: GossipingPropertyFileSnitch + +# controls how often to perform the more expensive part of host score +# calculation +dynamic_snitch_update_interval_in_ms: 100 +# controls how often to reset all host scores, allowing a bad host to +# possibly recover +dynamic_snitch_reset_interval_in_ms: 600000 +# if set greater than zero and read_repair_chance is < 1.0, this will allow +# 'pinning' of replicas to hosts in order to increase cache capacity. +# The badness threshold will control how much worse the pinned host has to be +# before the dynamic snitch will prefer other replicas over it. This is +# expressed as a double which represents a percentage. Thus, a value of +# 0.2 means Cassandra would continue to prefer the static snitch values +# until the pinned host was 20% worse than the fastest. +dynamic_snitch_badness_threshold: 0.1 + +# request_scheduler -- Set this to a class that implements +# RequestScheduler, which will schedule incoming client requests +# according to the specific policy. This is useful for multi-tenancy +# with a single Cassandra cluster. +# NOTE: This is specifically for requests from the client and does +# not affect inter node communication. +# org.apache.cassandra.scheduler.NoScheduler - No scheduling takes place +# org.apache.cassandra.scheduler.RoundRobinScheduler - Round robin of +# client requests to a node with a separate queue for each +# request_scheduler_id. The scheduler is further customized by +# request_scheduler_options as described below. +request_scheduler: org.apache.cassandra.scheduler.NoScheduler + +# Scheduler Options vary based on the type of scheduler +# NoScheduler - Has no options +# RoundRobin +# - throttle_limit -- The throttle_limit is the number of in-flight +# requests per client. Requests beyond +# that limit are queued up until +# running requests can complete. +# The value of 80 here is twice the number of +# concurrent_reads + concurrent_writes. +# - default_weight -- default_weight is optional and allows for +# overriding the default which is 1. +# - weights -- Weights are optional and will default to 1 or the +# overridden default_weight. The weight translates into how +# many requests are handled during each turn of the +# RoundRobin, based on the scheduler id. +# +# request_scheduler_options: +# throttle_limit: 80 +# default_weight: 5 +# weights: +# Keyspace1: 1 +# Keyspace2: 5 + +# request_scheduler_id -- An identifier based on which to perform +# the request scheduling. Currently the only valid option is keyspace. +# request_scheduler_id: keyspace + +# Enable or disable inter-node encryption +# Default settings are TLS v1, RSA 1024-bit keys (it is imperative that +# users generate their own keys) TLS_RSA_WITH_AES_128_CBC_SHA as the cipher +# suite for authentication, key exchange and encryption of the actual data transfers. +# Use the DHE/ECDHE ciphers if running in FIPS 140 compliant mode. +# NOTE: No custom encryption options are enabled at the moment +# The available internode options are : all, none, dc, rack +# +# If set to dc cassandra will encrypt the traffic between the DCs +# If set to rack cassandra will encrypt the traffic between the racks +# +# The passwords used in these options must match the passwords used when generating +# the keystore and truststore. For instructions on generating these files, see: +# http://download.oracle.com/javase/6/docs/technotes/guides/security/jsse/JSSERefGuide.html#CreateKeystore +# +server_encryption_options: + internode_encryption: none + keystore: conf/.keystore + keystore_password: cassandra + truststore: conf/.truststore + truststore_password: cassandra + # More advanced defaults below: + # protocol: TLS + # algorithm: SunX509 + # store_type: JKS + # cipher_suites: [TLS_RSA_WITH_AES_128_CBC_SHA,TLS_RSA_WITH_AES_256_CBC_SHA,TLS_DHE_RSA_WITH_AES_128_CBC_SHA,TLS_DHE_RSA_WITH_AES_256_CBC_SHA,TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA,TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA] + # require_client_auth: false + +# enable or disable client/server encryption. +client_encryption_options: + enabled: false + keystore: conf/.keystore + keystore_password: cassandra + # require_client_auth: false + # Set trustore and truststore_password if require_client_auth is true + # truststore: conf/.truststore + # truststore_password: cassandra + # More advanced defaults below: + # protocol: TLS + # algorithm: SunX509 + # store_type: JKS + # cipher_suites: [TLS_RSA_WITH_AES_128_CBC_SHA,TLS_RSA_WITH_AES_256_CBC_SHA,TLS_DHE_RSA_WITH_AES_128_CBC_SHA,TLS_DHE_RSA_WITH_AES_256_CBC_SHA,TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA,TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA] + +# internode_compression controls whether traffic between nodes is +# compressed. +# can be: all - all traffic is compressed +# dc - traffic between different datacenters is compressed +# none - nothing is compressed. +internode_compression: all + +# Enable or disable tcp_nodelay for inter-dc communication. +# Disabling it will result in larger (but fewer) network packets being sent, +# reducing overhead from the TCP protocol itself, at the cost of increasing +# latency if you block for cross-datacenter responses. +inter_dc_tcp_nodelay: false + +# TTL for different trace types used during logging of the repair process. +tracetype_query_ttl: 86400 +tracetype_repair_ttl: 604800 + +# UDFs (user defined functions) are disabled by default. +# As of Cassandra 2.2, there is no security manager or anything else in place that +# prevents execution of evil code. CASSANDRA-9402 will fix this issue for Cassandra 3.0. +# This will inherently be backwards-incompatible with any 2.2 UDF that perform insecure +# operations such as opening a socket or writing to the filesystem. +enable_user_defined_functions: false + +# The default Windows kernel timer and scheduling resolution is 15.6ms for power conservation. +# Lowering this value on Windows can provide much tighter latency and better throughput, however +# some virtualized environments may see a negative performance impact from changing this setting +# below their system default. The sysinternals 'clockres' tool can confirm your system's default +# setting. +windows_timer_interval: 1