brooklyn-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From henev...@apache.org
Subject [28/51] [abbrv] [partial] brooklyn-library git commit: move subdir from incubator up a level as it is promoted to its own repo (first non-incubator commit!)
Date Mon, 01 Feb 2016 17:48:04 GMT
http://git-wip-us.apache.org/repos/asf/brooklyn-library/blob/02abbab0/brooklyn-library/software/database/src/main/java/org/apache/brooklyn/entity/database/crate/CrateNodeImpl.java
----------------------------------------------------------------------
diff --git a/brooklyn-library/software/database/src/main/java/org/apache/brooklyn/entity/database/crate/CrateNodeImpl.java b/brooklyn-library/software/database/src/main/java/org/apache/brooklyn/entity/database/crate/CrateNodeImpl.java
deleted file mode 100644
index f72e2ef..0000000
--- a/brooklyn-library/software/database/src/main/java/org/apache/brooklyn/entity/database/crate/CrateNodeImpl.java
+++ /dev/null
@@ -1,99 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-package org.apache.brooklyn.entity.database.crate;
-
-import org.apache.brooklyn.core.config.render.RendererHints;
-import org.apache.brooklyn.core.entity.Attributes;
-import org.apache.brooklyn.enricher.stock.Enrichers;
-import org.apache.brooklyn.entity.java.JavaAppUtils;
-import org.apache.brooklyn.entity.software.base.SoftwareProcessImpl;
-import org.apache.brooklyn.feed.http.HttpFeed;
-import org.apache.brooklyn.feed.http.HttpPollConfig;
-import org.apache.brooklyn.feed.http.HttpValueFunctions;
-import org.apache.brooklyn.feed.jmx.JmxFeed;
-import org.apache.brooklyn.util.guava.Functionals;
-
-public class CrateNodeImpl extends SoftwareProcessImpl implements CrateNode{
-
-    private JmxFeed jmxFeed;
-    private HttpFeed httpFeed;
-
-    static {
-        JavaAppUtils.init();
-        RendererHints.register(MANAGEMENT_URL, RendererHints.namedActionWithUrl());
-    }
-
-    @Override
-    public Class getDriverInterface() {
-        return CrateNodeDriver.class;
-    }
-
-    @Override
-    protected void connectSensors() {
-        super.connectSensors();
-        connectServiceUpIsRunning();
-        jmxFeed = JavaAppUtils.connectMXBeanSensors(this);
-        sensors().set(DATASTORE_URL, "crate://" + getAttribute(HOSTNAME) + ":" + getPort());
-        String url = "http://" + getAttribute(HOSTNAME) + ":" + getHttpPort();
-        sensors().set(MANAGEMENT_URL, url);
-
-        httpFeed = HttpFeed.builder()
-                .entity(this)
-                .baseUri(url)
-                .poll(new HttpPollConfig<String>(SERVER_NAME)
-                        .onSuccess(HttpValueFunctions.jsonContents("name", String.class)))
-                .poll(new HttpPollConfig<Integer>(SERVER_STATUS)
-                        .onSuccess(HttpValueFunctions.jsonContents("status", Integer.class)))
-                .poll(new HttpPollConfig<Boolean>(SERVER_OK)
-                        .onSuccess(HttpValueFunctions.jsonContents("ok", Boolean.class)))
-                .poll(new HttpPollConfig<String>(SERVER_BUILD_TIMESTAMP)
-                        .onSuccess(HttpValueFunctions.jsonContents(new String[]{"version", "build_timestamp"}, String.class)))
-                .poll(new HttpPollConfig<String>(SERVER_BUILD_HASH)
-                        .onSuccess(HttpValueFunctions.jsonContents(new String[]{"version", "build_hash"}, String.class)))
-                .poll(new HttpPollConfig<Boolean>(SERVER_IS_BUILD_SNAPSHOT)
-                        .onSuccess(HttpValueFunctions.jsonContents(new String[] {"version", "build_snapshot"}, Boolean.class)))
-                .poll(new HttpPollConfig<String>(SERVER_LUCENE_VERSION)
-                        .onSuccess(HttpValueFunctions.jsonContents(new String[] {"version", "lucene_version"}, String.class)))
-                .poll(new HttpPollConfig<String>(SERVER_ES_VERSION)
-                        .onSuccess(HttpValueFunctions.jsonContents(new String[] {"version", "es_version"}, String.class)))
-                .build();
-
-        enrichers().add(Enrichers.builder().updatingMap(Attributes.SERVICE_NOT_UP_INDICATORS)
-                .from(SERVER_OK)
-                .computing(Functionals.ifNotEquals(true).value("Crate server reports it is not ok."))
-                .build());
-    }
-
-    @Override
-    protected void disconnectSensors() {
-        disconnectServiceUpIsRunning();
-        if (jmxFeed != null) jmxFeed.stop();
-        if (httpFeed != null) httpFeed.stop();
-        super.disconnectSensors();
-    }
-
-    public Integer getPort() {
-        return getAttribute(CRATE_PORT);
-    }
-
-    public Integer getHttpPort() {
-        return getAttribute(CRATE_HTTP_PORT);
-    }
-
-}

http://git-wip-us.apache.org/repos/asf/brooklyn-library/blob/02abbab0/brooklyn-library/software/database/src/main/java/org/apache/brooklyn/entity/database/crate/CrateNodeSshDriver.java
----------------------------------------------------------------------
diff --git a/brooklyn-library/software/database/src/main/java/org/apache/brooklyn/entity/database/crate/CrateNodeSshDriver.java b/brooklyn-library/software/database/src/main/java/org/apache/brooklyn/entity/database/crate/CrateNodeSshDriver.java
deleted file mode 100644
index f972570..0000000
--- a/brooklyn-library/software/database/src/main/java/org/apache/brooklyn/entity/database/crate/CrateNodeSshDriver.java
+++ /dev/null
@@ -1,118 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-package org.apache.brooklyn.entity.database.crate;
-
-import static java.lang.String.format;
-
-import java.util.List;
-
-import org.apache.brooklyn.api.entity.EntityLocal;
-import org.apache.brooklyn.core.entity.Entities;
-import org.apache.brooklyn.entity.java.JavaSoftwareProcessSshDriver;
-
-import com.google.common.collect.ImmutableList;
-
-import org.apache.brooklyn.location.ssh.SshMachineLocation;
-import org.apache.brooklyn.util.collections.MutableMap;
-import org.apache.brooklyn.util.net.Urls;
-import org.apache.brooklyn.util.os.Os;
-import org.apache.brooklyn.util.ssh.BashCommands;
-
-public class CrateNodeSshDriver extends JavaSoftwareProcessSshDriver {
-
-    public CrateNodeSshDriver(EntityLocal entity, SshMachineLocation machine) {
-        super(entity, machine);
-    }
-
-    @Override
-    public void preInstall() {
-        resolver = Entities.newDownloader(this);
-        setExpandedInstallDir(Os.mergePaths(getInstallDir(),
-                resolver.getUnpackedDirectoryName(format("crate-%s", getVersion()))));
-    }
-
-    @Override
-    public void install() {
-        List<String> urls = resolver.getTargets();
-        String saveAs = resolver.getFilename();
-
-        List<String> commands = ImmutableList.<String>builder()
-                .addAll(BashCommands.commandsToDownloadUrlsAs(urls, saveAs))
-                .add("tar xvfz "+saveAs)
-                .build();
-
-        newScript(INSTALLING)
-                .failOnNonZeroResultCode()
-                .body.append(commands).execute();
-    }
-
-    @Override
-    public void customize() {
-        newScript(CUSTOMIZING)
-                .body.append("mkdir -p " + getDataLocation())
-                .execute();
-        copyTemplate(entity.getConfig(CrateNode.SERVER_CONFIG_URL), getConfigFileLocation());
-    }
-
-    @Override
-    public void launch() {
-        StringBuilder command = new StringBuilder(getExpandedInstallDir())
-                .append("/bin/crate ")
-                .append(" -d")
-                .append(" -p ").append(getPidFileLocation())
-                .append(" -Des.config=").append(getConfigFileLocation());
-        newScript(LAUNCHING)
-                .failOnNonZeroResultCode()
-                .body.append(command).execute();
-
-    }
-
-    @Override
-    public boolean isRunning() {
-        return newScript (MutableMap.of("usePidFile", getPidFileLocation()), CHECK_RUNNING)
-                .execute() == 0;
-    }
-
-    @Override
-    public void stop() {
-        // See https://crate.io/docs/stable/cli.html#signal-handling.
-        newScript(STOPPING)
-                .body.append("kill -USR2 `cat " + getPidFileLocation() + "`")
-                .execute();
-    }
-
-    protected String getConfigFileLocation() {
-        return Urls.mergePaths(getRunDir(), "config.yaml");
-    }
-
-    @Override
-    public String getLogFileLocation() {
-        return Urls.mergePaths(getRunDir(), "crate.log");
-    }
-
-    protected String getPidFileLocation () {
-        return Urls.mergePaths(getRunDir(), "pid.txt");
-    }
-
-    // public for use in template too.
-    public String getDataLocation() {
-        return Urls.mergePaths(getRunDir(), "data");
-    }
-
-}

http://git-wip-us.apache.org/repos/asf/brooklyn-library/blob/02abbab0/brooklyn-library/software/database/src/main/java/org/apache/brooklyn/entity/database/mariadb/MariaDbDriver.java
----------------------------------------------------------------------
diff --git a/brooklyn-library/software/database/src/main/java/org/apache/brooklyn/entity/database/mariadb/MariaDbDriver.java b/brooklyn-library/software/database/src/main/java/org/apache/brooklyn/entity/database/mariadb/MariaDbDriver.java
deleted file mode 100644
index 05c097a..0000000
--- a/brooklyn-library/software/database/src/main/java/org/apache/brooklyn/entity/database/mariadb/MariaDbDriver.java
+++ /dev/null
@@ -1,30 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-package org.apache.brooklyn.entity.database.mariadb;
-
-import org.apache.brooklyn.entity.software.base.SoftwareProcessDriver;
-import org.apache.brooklyn.util.core.task.system.ProcessTaskWrapper;
-
-/**
- * The {@link SoftwareProcessDriver} for MariaDB.
- */
-public interface MariaDbDriver extends SoftwareProcessDriver {
-    public String getStatusCmd();
-    public ProcessTaskWrapper<Integer> executeScriptAsync(String commands);
-}

http://git-wip-us.apache.org/repos/asf/brooklyn-library/blob/02abbab0/brooklyn-library/software/database/src/main/java/org/apache/brooklyn/entity/database/mariadb/MariaDbNode.java
----------------------------------------------------------------------
diff --git a/brooklyn-library/software/database/src/main/java/org/apache/brooklyn/entity/database/mariadb/MariaDbNode.java b/brooklyn-library/software/database/src/main/java/org/apache/brooklyn/entity/database/mariadb/MariaDbNode.java
deleted file mode 100644
index 3c78d99..0000000
--- a/brooklyn-library/software/database/src/main/java/org/apache/brooklyn/entity/database/mariadb/MariaDbNode.java
+++ /dev/null
@@ -1,98 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-package org.apache.brooklyn.entity.database.mariadb;
-
-import org.apache.brooklyn.api.catalog.Catalog;
-import org.apache.brooklyn.api.entity.ImplementedBy;
-import org.apache.brooklyn.api.objs.HasShortName;
-import org.apache.brooklyn.api.sensor.AttributeSensor;
-import org.apache.brooklyn.config.ConfigKey;
-import org.apache.brooklyn.core.config.ConfigKeys;
-import org.apache.brooklyn.core.config.MapConfigKey;
-import org.apache.brooklyn.core.entity.Attributes;
-import org.apache.brooklyn.core.location.PortRanges;
-import org.apache.brooklyn.core.sensor.BasicAttributeSensorAndConfigKey;
-import org.apache.brooklyn.core.sensor.PortAttributeSensorAndConfigKey;
-import org.apache.brooklyn.core.sensor.Sensors;
-import org.apache.brooklyn.core.sensor.BasicAttributeSensorAndConfigKey.StringAttributeSensorAndConfigKey;
-import org.apache.brooklyn.entity.database.DatabaseNode;
-import org.apache.brooklyn.entity.database.DatastoreMixins.DatastoreCommon;
-import org.apache.brooklyn.entity.software.base.SoftwareProcess;
-import org.apache.brooklyn.util.core.flags.SetFromFlag;
-
-@Catalog(name="MariaDB Node", description="MariaDB is an open source relational database management system (RDBMS)", iconUrl="classpath:///mariadb-logo-180x119.png")
-@ImplementedBy(MariaDbNodeImpl.class)
-public interface MariaDbNode extends SoftwareProcess, DatastoreCommon, HasShortName, DatabaseNode {
-
-    @SetFromFlag("version")
-    public static final ConfigKey<String> SUGGESTED_VERSION =
-        ConfigKeys.newConfigKeyWithDefault(SoftwareProcess.SUGGESTED_VERSION, "5.5.40");
-
-    // https://downloads.mariadb.org/interstitial/mariadb-5.5.33a/kvm-bintar-hardy-amd64/mariadb-5.5.33a-linux-x86_64.tar.gz/from/http://mirrors.coreix.net/mariadb
-    // above redirects to download the artifactd from the URLs below.
-    // Use `curl -sL -w "%{http_code} %{url_effective}\n" "http://..." -o target.tar.gz` to find out redirect URL.
-    //     64-bit: http://mirrors.coreix.net/mariadb/mariadb-5.5.40/bintar-linux-x86_64/mariadb-5.5.40-linux-x86_64.tar.gz
-    //     32-bit: http://mirrors.coreix.net/mariadb/mariadb-5.5.40/bintar-linux-x86/mariadb-5.5.40-linux-i686.tar.gz
-
-    @SetFromFlag("downloadUrl")
-    public static final BasicAttributeSensorAndConfigKey<String> DOWNLOAD_URL = new StringAttributeSensorAndConfigKey(
-          Attributes.DOWNLOAD_URL, "${driver.mirrorUrl}/mariadb-${version}/${driver.downloadParentDir}/mariadb-${version}-${driver.osTag}.tar.gz");
-
-    /** download mirror, if desired */
-    @SetFromFlag("mirrorUrl")
-    public static final ConfigKey<String> MIRROR_URL = ConfigKeys.newStringConfigKey("mariadb.install.mirror.url", "URL of mirror",
-        "http://mirrors.coreix.net/mariadb/"
-     );
-
-    @SetFromFlag("port")
-    public static final PortAttributeSensorAndConfigKey MARIADB_PORT =
-        new PortAttributeSensorAndConfigKey("mariadb.port", "MariaDB port", PortRanges.fromString("3306, 13306+"));
-
-    @SetFromFlag("dataDir")
-    public static final ConfigKey<String> DATA_DIR = ConfigKeys.newStringConfigKey(
-        "mariadb.datadir", "Directory for writing data files", null);
-
-    @SetFromFlag("serverConf")
-    public static final MapConfigKey<Object> MARIADB_SERVER_CONF = new MapConfigKey<Object>(
-        Object.class, "mariadb.server.conf", "Configuration options for MariaDB server");
-
-    public static final ConfigKey<Object> MARIADB_SERVER_CONF_LOWER_CASE_TABLE_NAMES =
-        MARIADB_SERVER_CONF.subKey("lower_case_table_names", "See MariaDB (or MySQL!) guide. Set 1 to ignore case in table names (useful for OS portability)");
-
-    @SetFromFlag("password")
-    public static final StringAttributeSensorAndConfigKey PASSWORD = new StringAttributeSensorAndConfigKey(
-        "mariadb.password", "Database admin password (or randomly generated if not set)", null);
-
-    @SetFromFlag("socketUid")
-    public static final StringAttributeSensorAndConfigKey SOCKET_UID = new StringAttributeSensorAndConfigKey(
-        "mariadb.socketUid", "Socket uid, for use in file /tmp/mysql.sock.<uid>.3306 (or randomly generated if not set)", null);
-
-    /** @deprecated since 0.7.0 use DATASTORE_URL */ @Deprecated
-    public static final AttributeSensor<String> MARIADB_URL = DATASTORE_URL;
-
-    @SetFromFlag("configurationTemplateUrl")
-    static final BasicAttributeSensorAndConfigKey<String> TEMPLATE_CONFIGURATION_URL = new StringAttributeSensorAndConfigKey(
-        "mariadb.template.configuration.url", "Template file (in freemarker format) for the my.cnf file",
-        "classpath://org/apache/brooklyn/entity/database/mariadb/my.cnf");
-
-    public static final AttributeSensor<Double> QUERIES_PER_SECOND_FROM_MARIADB =
-        Sensors.newDoubleSensor("mariadb.queries.perSec.fromMariadb");
-
-    public String executeScript(String commands);
-}

http://git-wip-us.apache.org/repos/asf/brooklyn-library/blob/02abbab0/brooklyn-library/software/database/src/main/java/org/apache/brooklyn/entity/database/mariadb/MariaDbNodeImpl.java
----------------------------------------------------------------------
diff --git a/brooklyn-library/software/database/src/main/java/org/apache/brooklyn/entity/database/mariadb/MariaDbNodeImpl.java b/brooklyn-library/software/database/src/main/java/org/apache/brooklyn/entity/database/mariadb/MariaDbNodeImpl.java
deleted file mode 100644
index 86a8bfb..0000000
--- a/brooklyn-library/software/database/src/main/java/org/apache/brooklyn/entity/database/mariadb/MariaDbNodeImpl.java
+++ /dev/null
@@ -1,136 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-package org.apache.brooklyn.entity.database.mariadb;
-
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-import org.apache.brooklyn.core.effector.EffectorBody;
-import org.apache.brooklyn.core.location.Locations;
-import org.apache.brooklyn.entity.software.base.SoftwareProcessImpl;
-import org.apache.brooklyn.feed.ssh.SshFeed;
-import org.apache.brooklyn.feed.ssh.SshPollConfig;
-import org.apache.brooklyn.feed.ssh.SshPollValue;
-import org.apache.brooklyn.location.ssh.SshMachineLocation;
-import org.apache.brooklyn.util.core.config.ConfigBag;
-import org.apache.brooklyn.util.guava.Maybe;
-import org.apache.brooklyn.util.text.Identifiers;
-import org.apache.brooklyn.util.text.Strings;
-import org.apache.brooklyn.util.time.Duration;
-
-import com.google.common.base.Function;
-
-public class MariaDbNodeImpl extends SoftwareProcessImpl implements MariaDbNode {
-
-    private static final Logger LOG = LoggerFactory.getLogger(MariaDbNodeImpl.class);
-
-    private SshFeed feed;
-
-    @Override
-    public Class<?> getDriverInterface() {
-        return MariaDbDriver.class;
-    }
-
-    @Override
-    public MariaDbDriver getDriver() {
-        return (MariaDbDriver) super.getDriver();
-    }
-
-    @Override
-    public void init() {
-        super.init();
-        getMutableEntityType().addEffector(EXECUTE_SCRIPT, new EffectorBody<String>() {
-            @Override
-            public String call(ConfigBag parameters) {
-                return executeScript((String)parameters.getStringKey("commands"));
-            }
-        });
-    }
-    
-    @Override
-    protected void connectSensors() {
-        super.connectSensors();
-        sensors().set(DATASTORE_URL, String.format("mysql://%s:%s/", getAttribute(HOSTNAME), getAttribute(MARIADB_PORT)));
-
-        /*        
-         * TODO status gives us things like:
-         *   Uptime: 2427  Threads: 1  Questions: 581  Slow queries: 0  Opens: 53  Flush tables: 1  Open tables: 35  Queries per second avg: 0.239
-         * So can extract lots of sensors from that.
-         */
-        Maybe<SshMachineLocation> machine = Locations.findUniqueSshMachineLocation(getLocations());
-
-        if (machine.isPresent()) {
-            String cmd = getDriver().getStatusCmd();
-            feed = SshFeed.builder()
-                    .entity(this)
-                    .period(Duration.FIVE_SECONDS)
-                    .machine(machine.get())
-                    .poll(new SshPollConfig<Boolean>(SERVICE_UP)
-                            .command(cmd)
-                            .setOnSuccess(true)
-                            .setOnFailureOrException(false))
-                    .poll(new SshPollConfig<Double>(QUERIES_PER_SECOND_FROM_MARIADB)
-                            .command(cmd)
-                            .onSuccess(new Function<SshPollValue, Double>() {
-                                public Double apply(SshPollValue input) {
-                                    String q = Strings.getFirstWordAfter(input.getStdout(), "Queries per second avg:");
-                                    return (q == null) ? null : Double.parseDouble(q);
-                                }})
-                            .setOnFailureOrException(null) )
-                    .build();
-        } else {
-            LOG.warn("Location(s) {} not an ssh-machine location, so not polling for status; setting serviceUp immediately", getLocations());
-            sensors().set(SERVICE_UP, true);
-        }
-    }
-
-    @Override
-    protected void disconnectSensors() {
-        if (feed != null) feed.stop();
-        super.disconnectSensors();
-    }
-
-    public int getPort() {
-        return getAttribute(MARIADB_PORT);
-    }
-
-    public String getSocketUid() {
-        String result = getAttribute(MariaDbNode.SOCKET_UID);
-        if (Strings.isBlank(result))
-            sensors().set(MariaDbNode.SOCKET_UID, (result = Identifiers.makeRandomId(6)));
-        return result;
-    }
-
-    public String getPassword() {
-        String result = getAttribute(MariaDbNode.PASSWORD);
-        if (Strings.isBlank(result))
-            sensors().set(MariaDbNode.PASSWORD, (result = Identifiers.makeRandomId(6)));
-        return result;
-    }
-
-    @Override
-    public String getShortName() {
-        return "MariaDB";
-    }
-
-    @Override
-    public String executeScript(String commands) {
-        return getDriver().executeScriptAsync(commands).block().getStdout();
-    }
-
-}

http://git-wip-us.apache.org/repos/asf/brooklyn-library/blob/02abbab0/brooklyn-library/software/database/src/main/java/org/apache/brooklyn/entity/database/mariadb/MariaDbSshDriver.java
----------------------------------------------------------------------
diff --git a/brooklyn-library/software/database/src/main/java/org/apache/brooklyn/entity/database/mariadb/MariaDbSshDriver.java b/brooklyn-library/software/database/src/main/java/org/apache/brooklyn/entity/database/mariadb/MariaDbSshDriver.java
deleted file mode 100644
index dedba55..0000000
--- a/brooklyn-library/software/database/src/main/java/org/apache/brooklyn/entity/database/mariadb/MariaDbSshDriver.java
+++ /dev/null
@@ -1,256 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-package org.apache.brooklyn.entity.database.mariadb;
-
-import static java.lang.String.format;
-import static org.apache.brooklyn.util.JavaGroovyEquivalents.groovyTruth;
-import static org.apache.brooklyn.util.ssh.BashCommands.commandsToDownloadUrlsAs;
-import static org.apache.brooklyn.util.ssh.BashCommands.installPackage;
-import static org.apache.brooklyn.util.ssh.BashCommands.ok;
-
-import java.io.InputStream;
-import java.io.Reader;
-import java.io.StringReader;
-import java.util.LinkedList;
-import java.util.List;
-import java.util.Map;
-
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-import org.apache.brooklyn.entity.database.DatastoreMixins;
-import org.apache.brooklyn.entity.software.base.AbstractSoftwareProcessSshDriver;
-import org.apache.brooklyn.api.location.OsDetails;
-import org.apache.brooklyn.core.effector.ssh.SshEffectorTasks;
-import org.apache.brooklyn.core.entity.Attributes;
-import org.apache.brooklyn.core.entity.Entities;
-import org.apache.brooklyn.location.ssh.SshMachineLocation;
-import org.apache.brooklyn.util.collections.MutableMap;
-import org.apache.brooklyn.util.core.task.DynamicTasks;
-import org.apache.brooklyn.util.core.task.system.ProcessTaskWrapper;
-import org.apache.brooklyn.util.net.Urls;
-import org.apache.brooklyn.util.os.Os;
-import org.apache.brooklyn.util.ssh.BashCommands;
-import org.apache.brooklyn.util.text.Identifiers;
-import org.apache.brooklyn.util.text.Strings;
-import org.apache.brooklyn.util.time.CountdownTimer;
-import org.apache.brooklyn.util.time.Duration;
-
-import com.google.common.collect.ImmutableMap;
-
-/**
- * The SSH implementation of the {@link MariaDbDriver}.
- */
-public class MariaDbSshDriver extends AbstractSoftwareProcessSshDriver implements MariaDbDriver {
-
-    public static final Logger log = LoggerFactory.getLogger(MariaDbSshDriver.class);
-
-    public MariaDbSshDriver(MariaDbNodeImpl entity, SshMachineLocation machine) {
-        super(entity, machine);
-
-        entity.sensors().set(Attributes.LOG_FILE_LOCATION, getLogFile());
-    }
-
-    public String getOsTag() {
-        OsDetails os = getLocation().getOsDetails();
-        // NOTE: cannot rely on OsDetails.isLinux() to return true for all linux flavours, so
-        // explicitly test for unsupported OSes, otherwise assume generic linux.
-        if (os == null) return "linux-i686";
-        if (os.isWindows() || os.isMac())
-            throw new UnsupportedOperationException("only support linux versions just now; OS details: " + os);
-        return (os.is64bit() ? "linux-x86_64" : "linux-i686");
-    }
-
-    public String getDownloadParentDir() {
-        // NOTE: cannot rely on OsDetails.isLinux() to return true for all linux flavours, so
-        // explicitly test for unsupported OSes, otherwise assume generic linux.
-        OsDetails os = getLocation().getOsDetails();
-        if (os == null) return "bintar-linux-x86";
-        if (os.isWindows() || os.isMac())
-            throw new UnsupportedOperationException("only support linux versions just now; OS details: " + os);
-        return (os.is64bit() ? "bintar-linux-x86_64" : "bintar-linux-x86");
-    }
-
-    public String getMirrorUrl() {
-        return entity.getConfig(MariaDbNode.MIRROR_URL);
-    }
-
-    public String getBaseDir() { return getExpandedInstallDir(); }
-
-    public String getDataDir() {
-        String result = entity.getConfig(MariaDbNode.DATA_DIR);
-        return (result == null) ? "." : result;
-    }
-
-    public String getLogFile() {
-        return Urls.mergePaths(getRunDir(), "console.log");
-    }
-
-    public String getConfigFile() {
-        return "my.cnf";
-    }
-
-    public String getInstallFilename() {
-        return String.format("mariadb-%s-%s.tar.gz", getVersion(), getOsTag());
-    }
-
-    @Override
-    public void preInstall() {
-        resolver = Entities.newDownloader(this, ImmutableMap.of("filename", getInstallFilename()));
-        setExpandedInstallDir(Os.mergePaths(getInstallDir(), resolver.getUnpackedDirectoryName(format("mariadb-%s-%s", getVersion(), getOsTag()))));
-    }
-
-    @Override
-    public void install() {
-        List<String> urls = resolver.getTargets();
-        String saveAs = resolver.getFilename();
-
-        List<String> commands = new LinkedList<String>();
-        commands.add(BashCommands.INSTALL_TAR);
-        commands.add(BashCommands.INSTALL_CURL);
-
-        commands.add("echo installing extra packages");
-        commands.add(installPackage(ImmutableMap.of("yum", "libgcc_s.so.1"), null));
-        commands.add(installPackage(ImmutableMap.of("yum", "libaio.so.1 libncurses.so.5", "apt", "libaio1 libaio-dev"), null));
-
-        // these deps are needed on some OS versions but others don't need them so ignore failures (ok(...))
-        commands.add(ok(installPackage(ImmutableMap.of("yum", "libaio", "apt", "ia32-libs"), null)));
-        commands.add("echo finished installing extra packages");
-
-        commands.addAll(commandsToDownloadUrlsAs(urls, saveAs));
-        commands.add(format("tar xfvz %s", saveAs));
-
-        newScript(INSTALLING).body.append(commands).execute();
-    }
-
-    public MariaDbNodeImpl getEntity() { return (MariaDbNodeImpl) super.getEntity(); }
-    public int getPort() { return getEntity().getPort(); }
-    public String getSocketUid() { return getEntity().getSocketUid(); }
-    public String getPassword() { return getEntity().getPassword(); }
-
-    @Override
-    public void customize() {
-        copyDatabaseConfigScript();
-
-        newScript(CUSTOMIZING)
-            .updateTaskAndFailOnNonZeroResultCode()
-            .body.append(
-                "chmod 600 "+getConfigFile(),
-                getBaseDir()+"/scripts/mysql_install_db "+
-                    "--basedir="+getBaseDir()+" --datadir="+getDataDir()+" "+
-                    "--defaults-file="+getConfigFile())
-            .execute();
-
-        // launch, then we will configure it
-        launch();
-
-        CountdownTimer timer = Duration.seconds(20).countdownTimer();
-        boolean hasCreationScript = copyDatabaseCreationScript();
-        timer.waitForExpiryUnchecked();
-
-        DynamicTasks.queue(
-            SshEffectorTasks.ssh(
-                "cd "+getRunDir(),
-                getBaseDir()+"/bin/mysqladmin --defaults-file="+getConfigFile()+" --password= password "+getPassword()
-            ).summary("setting password"));
-
-        if (hasCreationScript)
-            executeScriptFromInstalledFileAsync("creation-script.sql");
-
-        // not sure necessary to stop then subsequently launch, but seems safest
-        // (if skipping, use a flag in launch to indicate we've just launched it)
-        stop();
-    }
-
-    private void copyDatabaseConfigScript() {
-        newScript(CUSTOMIZING).execute();  //create the directory
-
-        String configScriptContents = processTemplate(entity.getAttribute(MariaDbNode.TEMPLATE_CONFIGURATION_URL));
-        Reader configContents = new StringReader(configScriptContents);
-
-        getMachine().copyTo(configContents, Urls.mergePaths(getRunDir(), getConfigFile()));
-    }
-
-    private boolean copyDatabaseCreationScript() {
-        InputStream creationScript = DatastoreMixins.getDatabaseCreationScript(entity);
-        if (creationScript==null) return false;
-        getMachine().copyTo(creationScript, getRunDir() + "/creation-script.sql");
-        return true;
-    }
-
-    public String getMariaDbServerOptionsString() {
-        Map<String, Object> options = entity.getConfig(MariaDbNode.MARIADB_SERVER_CONF);
-        StringBuilder result = new StringBuilder();
-        if (groovyTruth(options)) {
-            for (Map.Entry<String, Object> entry : options.entrySet()) {
-                result.append(entry.getKey());
-                String value = entry.getValue().toString();
-                if (!Strings.isEmpty(value)) {
-                    result.append(" = ").append(value);
-                }
-                result.append('\n');
-            }
-        }
-        return result.toString();
-    }
-
-    @Override
-    public void launch() {
-        newScript(MutableMap.of("usePidFile", true), LAUNCHING)
-            .updateTaskAndFailOnNonZeroResultCode()
-            .body.append(format("nohup %s/bin/mysqld --defaults-file=%s --user=`whoami` > %s 2>&1 < /dev/null &", getBaseDir(), getConfigFile(), getLogFile()))
-            .execute();
-    }
-
-    @Override
-    public boolean isRunning() {
-        return newScript(MutableMap.of("usePidFile", false), CHECK_RUNNING)
-            .body.append(getStatusCmd())
-            .execute() == 0;
-    }
-
-    @Override
-    public void stop() {
-        newScript(MutableMap.of("usePidFile", true), STOPPING).execute();
-    }
-
-    @Override
-    public void kill() {
-        newScript(MutableMap.of("usePidFile", true), KILLING).execute();
-    }
-
-    @Override
-    public String getStatusCmd() {
-        return format("%s/bin/mysqladmin --defaults-file=%s status", getExpandedInstallDir(), Urls.mergePaths(getRunDir(), getConfigFile()));
-    }
-
-    public ProcessTaskWrapper<Integer> executeScriptAsync(String commands) {
-        String filename = "mariadb-commands-"+Identifiers.makeRandomId(8);
-        DynamicTasks.queue(SshEffectorTasks.put(Urls.mergePaths(getRunDir(), filename)).contents(commands).summary("copying datastore script to execute "+filename));
-        return executeScriptFromInstalledFileAsync(filename);
-    }
-
-    public ProcessTaskWrapper<Integer> executeScriptFromInstalledFileAsync(String filenameAlreadyInstalledAtServer) {
-        return DynamicTasks.queue(
-                SshEffectorTasks.ssh(
-                                "cd "+getRunDir(),
-                                getBaseDir()+"/bin/mysql --defaults-file="+getConfigFile()+" < "+filenameAlreadyInstalledAtServer)
-                        .summary("executing datastore script "+filenameAlreadyInstalledAtServer));
-    }
-
-}

http://git-wip-us.apache.org/repos/asf/brooklyn-library/blob/02abbab0/brooklyn-library/software/database/src/main/java/org/apache/brooklyn/entity/database/mysql/InitSlaveTaskBody.java
----------------------------------------------------------------------
diff --git a/brooklyn-library/software/database/src/main/java/org/apache/brooklyn/entity/database/mysql/InitSlaveTaskBody.java b/brooklyn-library/software/database/src/main/java/org/apache/brooklyn/entity/database/mysql/InitSlaveTaskBody.java
deleted file mode 100644
index 70e29d7..0000000
--- a/brooklyn-library/software/database/src/main/java/org/apache/brooklyn/entity/database/mysql/InitSlaveTaskBody.java
+++ /dev/null
@@ -1,426 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-package org.apache.brooklyn.entity.database.mysql;
-
-import java.text.SimpleDateFormat;
-import java.util.Collection;
-import java.util.Date;
-import java.util.List;
-import java.util.Map;
-import java.util.Random;
-import java.util.concurrent.Callable;
-import java.util.concurrent.ExecutionException;
-import java.util.concurrent.Future;
-import java.util.concurrent.Semaphore;
-import java.util.regex.Matcher;
-import java.util.regex.Pattern;
-
-import org.apache.brooklyn.api.entity.Entity;
-import org.apache.brooklyn.api.mgmt.Task;
-import org.apache.brooklyn.api.sensor.AttributeSensor;
-import org.apache.brooklyn.core.effector.EffectorTasks;
-import org.apache.brooklyn.core.effector.Effectors;
-import org.apache.brooklyn.core.effector.ssh.SshEffectorTasks;
-import org.apache.brooklyn.core.entity.EntityPredicates;
-import org.apache.brooklyn.core.sensor.DependentConfiguration;
-import org.apache.brooklyn.core.sensor.Sensors;
-import org.apache.brooklyn.entity.database.mysql.MySqlNode.ExportDumpEffector;
-import org.apache.brooklyn.entity.software.base.SoftwareProcess;
-import org.apache.brooklyn.location.ssh.SshMachineLocation;
-import org.apache.brooklyn.util.core.task.DynamicTasks;
-import org.apache.brooklyn.util.core.task.TaskTags;
-import org.apache.brooklyn.util.core.task.ssh.SshTasks;
-import org.apache.brooklyn.util.core.task.system.ProcessTaskFactory;
-import org.apache.brooklyn.util.core.task.system.ProcessTaskWrapper;
-import org.apache.brooklyn.util.exceptions.Exceptions;
-import org.apache.brooklyn.util.os.Os;
-import org.apache.brooklyn.util.ssh.BashCommands;
-import org.apache.brooklyn.util.text.Identifiers;
-import org.apache.brooklyn.util.text.StringEscapes.BashStringEscapes;
-import org.apache.commons.io.FilenameUtils;
-import org.apache.commons.lang3.concurrent.ConcurrentUtils;
-import org.slf4j.Logger;
-import org.slf4j.LoggerFactory;
-
-import com.google.common.base.Joiner;
-import com.google.common.base.Optional;
-import com.google.common.base.Predicates;
-import com.google.common.base.Strings;
-import com.google.common.collect.FluentIterable;
-import com.google.common.collect.ImmutableList;
-import com.google.common.collect.ImmutableMap;
-import com.google.common.collect.Iterables;
-
-public class InitSlaveTaskBody implements Runnable {
-    private static final String SNAPSHOT_DUMP_OPTIONS = "--skip-lock-tables --single-transaction --flush-logs --hex-blob";
-
-    private static final Logger log = LoggerFactory.getLogger(InitSlaveTaskBody.class);
-
-    private final MySqlCluster cluster;
-    private final MySqlNode slave;
-    private Semaphore lock;
-
-    public InitSlaveTaskBody(MySqlCluster cluster, MySqlNode slave, Semaphore lock) {
-        this.cluster = cluster;
-        this.slave = slave;
-        this.lock = lock;
-    }
-
-    @Override
-    public void run() {
-        // Replication init state consists of:
-        //   * initial dump (optional)
-        //   * location of initial dump (could be on any of the members, optional)
-        //   * bin log file name
-        //   * bin log position
-        // 1. Check replication state:
-        //   * Does the dump exist (and the machine where it is located)
-        //   * Does the bin log exist on the master
-        // 2. If the replication state is not valid create a new one
-        //   * Select a slave to dump, master if no slaves
-        //   * If it's a slave do 'STOP SLAVE SQL_THREAD;'
-        //   * Call mysqldump to create the snapshot
-        //   * When done if a slave do 'START SLAVE SQL_THREAD;'
-        //   * Get master state from the dump - grep "MASTER_LOG_POS" dump.sql.
-        //     If slave get state from 'SHOW SLAVE STATUS'
-        //   * Save new init info in cluster - bin log name, position, dump
-        // 3. Init Slave
-        //   * transfer dump to new slave (if dump exists)
-        //   * import - mysql < ~/dump.sql
-        //   * change master to and start slave
-        //!!! Caveat if dumping from master and MyISAM tables are used dump may be inconsistent.
-        //   * Only way around it is to lock the database while dumping (or taking a snapshot through LVM which is quicker)
-        bootstrapSlaveAsync(getValidReplicationInfo(), slave);
-        cluster.getAttribute(MySqlClusterImpl.SLAVE_ID_ADDRESS_MAPPING).put(slave.getId(), slave.getAttribute(MySqlNode.SUBNET_ADDRESS));
-    }
-
-    private MySqlNode getMaster() {
-        return (MySqlNode) Iterables.find(cluster.getMembers(), MySqlClusterUtils.IS_MASTER);
-    }
-
-    private void bootstrapSlaveAsync(final Future<ReplicationSnapshot> replicationInfoFuture, final MySqlNode slave) {
-        DynamicTasks.queue("bootstrap slave replication", new Runnable() {
-            @Override
-            public void run() {
-                ReplicationSnapshot replicationSnapshot;
-                try {
-                    replicationSnapshot = replicationInfoFuture.get();
-                } catch (InterruptedException | ExecutionException e) {
-                    throw Exceptions.propagate(e);
-                }
-
-                MySqlNode master = getMaster();
-                String masterAddress = MySqlClusterUtils.validateSqlParam(master.getAttribute(MySqlNode.SUBNET_ADDRESS));
-                Integer masterPort = master.getAttribute(MySqlNode.MYSQL_PORT);
-                String slaveAddress = MySqlClusterUtils.validateSqlParam(slave.getAttribute(MySqlNode.SUBNET_ADDRESS));
-                String username = MySqlClusterUtils.validateSqlParam(cluster.getConfig(MySqlCluster.SLAVE_USERNAME));
-                String password = MySqlClusterUtils.validateSqlParam(cluster.getAttribute(MySqlCluster.SLAVE_PASSWORD));
-
-                if (replicationSnapshot.getEntityId() != null) {
-                    Entity sourceEntity = Iterables.find(cluster.getMembers(), EntityPredicates.idEqualTo(replicationSnapshot.getEntityId()));
-                    String dumpId = FilenameUtils.removeExtension(replicationSnapshot.getSnapshotPath());
-                    copyDumpAsync(sourceEntity, slave, replicationSnapshot.getSnapshotPath(), dumpId);
-                    DynamicTasks.queue(Effectors.invocation(slave, MySqlNode.IMPORT_DUMP, ImmutableMap.of("path", replicationSnapshot.getSnapshotPath())));
-                    //The dump resets the password to whatever is on the source instance, reset it back.
-                    //We are able to still login because privileges are not flushed, so we just set the password to the same value.
-                    DynamicTasks.queue(Effectors.invocation(slave, MySqlNode.CHANGE_PASSWORD, ImmutableMap.of("password", slave.getAttribute(MySqlNode.PASSWORD))));                        //
-                    //Flush privileges to load new users coming from the dump
-                    MySqlClusterUtils.executeSqlOnNodeAsync(slave, "FLUSH PRIVILEGES;");
-                }
-
-                MySqlClusterUtils.executeSqlOnNodeAsync(master, String.format(
-                        "CREATE USER '%s'@'%s' IDENTIFIED BY '%s';\n" +
-                        "GRANT REPLICATION SLAVE ON *.* TO '%s'@'%s';\n",
-                        username, slaveAddress, password, username, slaveAddress));
-
-                // Executing this will unblock SERVICE_UP wait in the start effector
-                String slaveCmd = String.format(
-                        "CHANGE MASTER TO " +
-                            "MASTER_HOST='%s', " +
-                            "MASTER_PORT=%d, " +
-                            "MASTER_USER='%s', " +
-                            "MASTER_PASSWORD='%s', " +
-                            "MASTER_LOG_FILE='%s', " +
-                            "MASTER_LOG_POS=%d;\n" +
-                        "START SLAVE;\n",
-                        masterAddress, masterPort,
-                        username, password,
-                        replicationSnapshot.getBinLogName(),
-                        replicationSnapshot.getBinLogPosition());
-                MySqlClusterUtils.executeSqlOnNodeAsync(slave, slaveCmd);
-            }
-        });
-    }
-
-    private void copyDumpAsync(Entity source, Entity dest, String sourceDumpPath, String dumpId) {
-        final SshMachineLocation sourceMachine = EffectorTasks.getSshMachine(source);
-        final SshMachineLocation destMachine = EffectorTasks.getSshMachine(dest);
-
-        String sourceRunDir = source.getAttribute(MySqlNode.RUN_DIR);
-        String privateKeyFile = dumpId + ".id_rsa";
-        final Task<String> tempKeyTask = DynamicTasks.queue(SshEffectorTasks.ssh(
-                "cd $RUN_DIR",
-                "PRIVATE_KEY=" + privateKeyFile,
-                "ssh-keygen -t rsa -N '' -f $PRIVATE_KEY -C " + dumpId + " > /dev/null",
-                "cat $PRIVATE_KEY.pub")
-                .environmentVariable("RUN_DIR", sourceRunDir)
-                .machine(sourceMachine)
-                .summary("generate private key for slave access")
-                .requiringZeroAndReturningStdout())
-                .asTask();
-
-        DynamicTasks.queue("add key to authorized_keys", new Runnable() {
-            @Override
-            public void run() {
-                String publicKey = tempKeyTask.getUnchecked();
-                DynamicTasks.queue(SshEffectorTasks.ssh(String.format(
-                        "cat >> ~/.ssh/authorized_keys <<EOF\n%s\nEOF", 
-                        publicKey))
-                    .machine(destMachine)
-                    .summary("Add key to authorized_keys")
-                    .requiringExitCodeZero());
-            }
-        });
-
-        final ProcessTaskWrapper<Integer> copyTask = SshEffectorTasks.ssh(
-                "cd $RUN_DIR",
-                String.format(
-                    "scp -o 'BatchMode yes' -o 'StrictHostKeyChecking no' -i '%s' '%s' '%s@%s:%s/%s.sql'",
-                    privateKeyFile,
-                    sourceDumpPath,
-                    destMachine.getUser(),
-                    dest.getAttribute(MySqlNode.SUBNET_ADDRESS),
-                    dest.getAttribute(MySqlNode.RUN_DIR),
-                    dumpId))
-                .environmentVariable("RUN_DIR", sourceRunDir)
-                .machine(sourceMachine)
-                .summary("copy database dump to slave")
-                .newTask();
-        // Let next couple of tasks complete even if this one fails so that we can clean up.
-        TaskTags.markInessential(copyTask);
-        DynamicTasks.queue(copyTask);
-
-        // Delete private key
-        DynamicTasks.queue(SshEffectorTasks.ssh(
-                "cd $RUN_DIR",
-                "rm " + privateKeyFile)
-            .environmentVariable("RUN_DIR", sourceRunDir)
-            .machine(sourceMachine)
-            .summary("remove private key"));
-
-        DynamicTasks.queue(SshEffectorTasks.ssh(String.format(
-                "sed -i'' -e '/%s/d' ~/.ssh/authorized_keys",
-                dumpId))
-            .machine(destMachine)
-            .summary("remove private key from authorized_keys")).asTask();
-
-        // The task will fail if copyTask fails, but only after the private key is deleted.
-        DynamicTasks.queue("check for successful copy", new Runnable() {
-            @Override
-            public void run() {
-                copyTask.asTask().getUnchecked();
-            }
-        });
-    }
-
-    private Future<ReplicationSnapshot> getValidReplicationInfo() {
-        try {
-            try {
-                lock.acquire();
-            } catch (InterruptedException e) {
-                throw Exceptions.propagate(e);
-            }
-            ReplicationSnapshot replicationSnapshot = getReplicationInfoMasterConfig();
-            if (replicationSnapshot == null) {
-                replicationSnapshot = getAttributeBlocking(cluster, MySqlCluster.REPLICATION_LAST_SLAVE_SNAPSHOT);
-            }
-            if (!isReplicationInfoValid(replicationSnapshot)) {
-                final MySqlNode snapshotNode = getSnapshotNode();
-                final String dumpName = getDumpUniqueId() + ".sql";
-                if (MySqlClusterUtils.IS_MASTER.apply(snapshotNode)) {
-                    return createMasterReplicationSnapshot(snapshotNode, dumpName);
-                } else {
-                    return createSlaveReplicationSnapshot(snapshotNode, dumpName);
-                }
-            }
-            return ConcurrentUtils.constantFuture(replicationSnapshot);
-        } finally {
-            lock.release();
-        }
-    }
-
-    /**
-     * Rebind backwards compatibility
-     * @deprecated since 0.9.0
-     */
-    @Deprecated
-    private ReplicationSnapshot getReplicationInfoMasterConfig() {
-        Entity master = getMaster();
-        AttributeSensor<String> MASTER_LOG_FILE = Sensors.newStringSensor(
-                "mysql.master.log_file", "The binary log file master is writing to");
-        AttributeSensor<Integer> MASTER_LOG_POSITION = Sensors.newIntegerSensor(
-                "mysql.master.log_position", "The position in the log file to start replication");
-
-        String logFile = master.sensors().get(MASTER_LOG_FILE);
-        Integer logPos = master.sensors().get(MASTER_LOG_POSITION);
-        if(logFile != null && logPos != null) {
-            ReplicationSnapshot replicationSnapshot = new ReplicationSnapshot(null, null, logFile, logPos);
-            cluster.sensors().set(MySqlCluster.REPLICATION_LAST_SLAVE_SNAPSHOT, replicationSnapshot);
-            master.sensors().set(MASTER_LOG_FILE, null);
-            master.sensors().set(MASTER_LOG_POSITION, null);
-            return replicationSnapshot;
-        }
-        return null;
-    }
-
-    private Future<ReplicationSnapshot> createMasterReplicationSnapshot(final MySqlNode master, final String dumpName) {
-        log.info("MySql cluster " + cluster + ": generating new replication snapshot on master node " + master + " with name " + dumpName);
-        String dumpOptions = SNAPSHOT_DUMP_OPTIONS + " --master-data=2" + getDumpDatabases(master);
-        ImmutableMap<String, String> params = ImmutableMap.of(
-                ExportDumpEffector.PATH.getName(), dumpName,
-                ExportDumpEffector.ADDITIONAL_OPTIONS.getName(), dumpOptions);
-        DynamicTasks.queue(Effectors.invocation(master, MySqlNode.EXPORT_DUMP, params));
-        return DynamicTasks.queue("get master log info from dump", new Callable<ReplicationSnapshot>() {
-            @Override
-            public ReplicationSnapshot call() throws Exception {
-                Pattern masterInfoPattern = Pattern.compile("CHANGE MASTER TO.*MASTER_LOG_FILE\\s*=\\s*'([^']+)'.*MASTER_LOG_POS\\s*=\\s*(\\d+)");
-                String masterInfo = DynamicTasks.queue(execSshTask(master, "grep -m1 'CHANGE MASTER TO' " + dumpName, "Extract master replication status from dump")
-                        .requiringZeroAndReturningStdout()).asTask().getUnchecked();
-                Matcher masterInfoMatcher = masterInfoPattern.matcher(masterInfo);
-                if (!masterInfoMatcher.find() || masterInfoMatcher.groupCount() != 2) {
-                    throw new IllegalStateException("Master dump doesn't contain replication info: " + masterInfo);
-                }
-                String masterLogFile = masterInfoMatcher.group(1);
-                int masterLogPosition = Integer.parseInt(masterInfoMatcher.group(2));
-                ReplicationSnapshot replicationSnapshot = new ReplicationSnapshot(master.getId(), dumpName, masterLogFile, masterLogPosition);
-                cluster.sensors().set(MySqlCluster.REPLICATION_LAST_SLAVE_SNAPSHOT, replicationSnapshot);
-                return replicationSnapshot;
-            }
-        });
-    }
-
-    private String getDumpDatabases(MySqlNode node) {
-        // The config will be inherited from the cluster
-        Collection<String> dumpDbs = node.config().get(MySqlCluster.SLAVE_REPLICATE_DUMP_DB);
-        if (dumpDbs != null && !dumpDbs.isEmpty()) {
-            return " --databases " + Joiner.on(' ').join(Iterables.transform(dumpDbs, BashStringEscapes.wrapBash()));
-        } else {
-            return " --all-databases";
-        }
-    }
-
-    private Future<ReplicationSnapshot> createSlaveReplicationSnapshot(final MySqlNode slave, final String dumpName) {
-        MySqlClusterUtils.executeSqlOnNodeAsync(slave, "STOP SLAVE SQL_THREAD;");
-        try {
-            log.info("MySql cluster " + cluster + ": generating new replication snapshot on slave node " + slave + " with name " + dumpName);
-            String dumpOptions = SNAPSHOT_DUMP_OPTIONS + getDumpDatabases(slave);
-            ImmutableMap<String, String> params = ImmutableMap.of(
-                    ExportDumpEffector.PATH.getName(), dumpName,
-                    ExportDumpEffector.ADDITIONAL_OPTIONS.getName(), dumpOptions);
-            DynamicTasks.queue(Effectors.invocation(slave, MySqlNode.EXPORT_DUMP, params));
-            return DynamicTasks.queue("get master log info from slave", new Callable<ReplicationSnapshot>() {
-                @Override
-                public ReplicationSnapshot call() throws Exception {
-                    String slaveStatusRow = slave.executeScript("SHOW SLAVE STATUS \\G");
-                    Map<String, String> slaveStatus = MySqlRowParser.parseSingle(slaveStatusRow);
-                    String masterLogFile = slaveStatus.get("Relay_Master_Log_File");
-                    int masterLogPosition = Integer.parseInt(slaveStatus.get("Exec_Master_Log_Pos"));
-                    ReplicationSnapshot replicationSnapshot = new ReplicationSnapshot(slave.getId(), dumpName, masterLogFile, masterLogPosition);
-                    cluster.sensors().set(MySqlCluster.REPLICATION_LAST_SLAVE_SNAPSHOT, replicationSnapshot);
-                    return replicationSnapshot;
-                }
-            });
-        } finally {
-            MySqlClusterUtils.executeSqlOnNodeAsync(slave, "START SLAVE SQL_THREAD;");
-        }
-    }
-
-    private MySqlNode getSnapshotNode() {
-        String snapshotNodeId = cluster.getConfig(MySqlCluster.REPLICATION_PREFERRED_SOURCE);
-        if (snapshotNodeId != null) {
-            Optional<Entity> preferredNode = Iterables.tryFind(cluster.getMembers(), EntityPredicates.idEqualTo(snapshotNodeId));
-            if (preferredNode.isPresent()) {
-                return (MySqlNode) preferredNode.get();
-            } else {
-                log.warn("MySql cluster " + this + " configured with preferred snapshot node " + snapshotNodeId + " but it's not a member. Defaulting to a random slave.");
-            }
-        }
-        return getRandomSlave();
-    }
-
-    private MySqlNode getRandomSlave() {
-        List<MySqlNode> slaves = getHealhtySlaves();
-        if (slaves.size() > 0) {
-            return slaves.get(new Random().nextInt(slaves.size()));
-        } else {
-            return getMaster();
-        }
-    }
-
-    private ImmutableList<MySqlNode> getHealhtySlaves() {
-        return FluentIterable.from(cluster.getMembers())
-                   .filter(Predicates.not(MySqlClusterUtils.IS_MASTER))
-                   .filter(EntityPredicates.attributeEqualTo(MySqlNode.SERVICE_UP, Boolean.TRUE))
-                   .filter(MySqlNode.class)
-                   .toList();
-    }
-
-    private boolean isReplicationInfoValid(ReplicationSnapshot replicationSnapshot) {
-        MySqlNode master = getMaster();
-        String dataDir = Strings.nullToEmpty(master.getConfig(MySqlNode.DATA_DIR));
-        if (!checkFileExistsOnEntity(master, Os.mergePathsUnix(dataDir, replicationSnapshot.getBinLogName()))) {
-            return false;
-        }
-        if (replicationSnapshot.getEntityId() != null) {
-            Optional<Entity> snapshotSlave = Iterables.tryFind(cluster.getChildren(), EntityPredicates.idEqualTo(replicationSnapshot.getEntityId()));
-            if (!snapshotSlave.isPresent()) {
-                log.info("MySql cluster " + cluster + " missing node " + replicationSnapshot.getEntityId() + " with last snapshot " + replicationSnapshot.getSnapshotPath() + ". Will generate new snapshot.");
-                return false;
-            }
-            if (!checkFileExistsOnEntity(snapshotSlave.get(), replicationSnapshot.getSnapshotPath())) {
-                log.info("MySql cluster " + cluster + ", node " + snapshotSlave.get() + " missing replication snapshot " + replicationSnapshot.getSnapshotPath() + ". Will generate new snapshot.");
-                return false;
-            }
-        }
-        return true;
-    }
-
-    private boolean checkFileExistsOnEntity(Entity entity, String path) {
-        String cmd = BashCommands.chain(
-                BashCommands.requireTest(String.format("-f \"%s\"", path), "File " + path + " doesn't exist."));
-        String summary = "Check if file " + path + " exists";
-        return DynamicTasks.queue(execSshTask(entity, cmd, summary).allowingNonZeroExitCode()).asTask().getUnchecked() == 0;
-    }
-
-    private ProcessTaskFactory<Integer> execSshTask(Entity entity, String cmd, String summary) {
-        SshMachineLocation machine = EffectorTasks.getSshMachine(entity);
-        return SshTasks.newSshExecTaskFactory(machine, "cd $RUN_DIR\n" + cmd)
-            .allowingNonZeroExitCode()
-            .environmentVariable("RUN_DIR", entity.getAttribute(SoftwareProcess.RUN_DIR))
-            .summary(summary);
-    }
-
-    private <T> T getAttributeBlocking(Entity masterNode, AttributeSensor<T> att) {
-        return DynamicTasks.queue(DependentConfiguration.attributeWhenReady(masterNode, att)).getUnchecked();
-    }
-
-    private String getDumpUniqueId() {
-        return "replication-dump-" + Identifiers.makeRandomId(8) + "-" + new SimpleDateFormat("yyyy-MM-dd--HH-mm-ss").format(new Date());
-    }
-}

http://git-wip-us.apache.org/repos/asf/brooklyn-library/blob/02abbab0/brooklyn-library/software/database/src/main/java/org/apache/brooklyn/entity/database/mysql/MySqlCluster.java
----------------------------------------------------------------------
diff --git a/brooklyn-library/software/database/src/main/java/org/apache/brooklyn/entity/database/mysql/MySqlCluster.java b/brooklyn-library/software/database/src/main/java/org/apache/brooklyn/entity/database/mysql/MySqlCluster.java
deleted file mode 100644
index 67c1e80..0000000
--- a/brooklyn-library/software/database/src/main/java/org/apache/brooklyn/entity/database/mysql/MySqlCluster.java
+++ /dev/null
@@ -1,77 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-package org.apache.brooklyn.entity.database.mysql;
-
-import java.util.Collection;
-import java.util.List;
-
-import org.apache.brooklyn.api.catalog.Catalog;
-import org.apache.brooklyn.api.entity.ImplementedBy;
-import org.apache.brooklyn.api.sensor.AttributeSensor;
-import org.apache.brooklyn.config.ConfigKey;
-import org.apache.brooklyn.core.config.ConfigKeys;
-import org.apache.brooklyn.core.sensor.BasicAttributeSensorAndConfigKey.StringAttributeSensorAndConfigKey;
-import org.apache.brooklyn.core.sensor.Sensors;
-import org.apache.brooklyn.entity.database.DatastoreMixins.HasDatastoreUrl;
-import org.apache.brooklyn.entity.group.DynamicCluster;
-import org.apache.brooklyn.util.GenericTypes;
-
-import com.google.common.reflect.TypeToken;
-
-@ImplementedBy(MySqlClusterImpl.class)
-@Catalog(name="MySql Master-Slave cluster", description="Sets up a cluster of MySQL nodes using master-slave relation and binary logging", iconUrl="classpath:///mysql-logo-110x57.png")
-public interface MySqlCluster extends DynamicCluster, HasDatastoreUrl {
-    interface MySqlMaster {
-        ConfigKey<String> MASTER_CREATION_SCRIPT_CONTENTS = ConfigKeys.newStringConfigKey(
-                "datastore.master.creation.script.contents", "Contents of creation script to initialize the master node after initializing replication");
-
-        ConfigKey<String> MASTER_CREATION_SCRIPT_URL = ConfigKeys.newStringConfigKey(
-                "datastore.master.creation.script.url", "URL of creation script to use to initialize the master node after initializing replication (ignored if creationScriptContents is specified)");
-    }
-    interface MySqlSlave {
-        AttributeSensor<Boolean> SLAVE_HEALTHY = Sensors.newBooleanSensor("mysql.slave.healthy", "Indicates that the replication state of the slave is healthy");
-        AttributeSensor<Integer> SLAVE_SECONDS_BEHIND_MASTER = Sensors.newIntegerSensor("mysql.slave.seconds_behind_master", "How many seconds behind master is the replication state on the slave");
-    }
-
-    AttributeSensor<ReplicationSnapshot> REPLICATION_LAST_SLAVE_SNAPSHOT = Sensors.newSensor(ReplicationSnapshot.class, "mysql.replication.last_slave_snapshot", "Last valid state to init slaves with");
-    ConfigKey<String> REPLICATION_PREFERRED_SOURCE = ConfigKeys.newStringConfigKey("mysql.replication.preferred_source", "ID of node to get the replication snapshot from. If not set a random slave is used, falling back to master if no slaves.");
-
-    ConfigKey<String> SLAVE_USERNAME = ConfigKeys.newStringConfigKey(
-            "mysql.slave.username", "The user name slaves will use to connect to the master", "slave");
-    ConfigKey<Collection<String>> SLAVE_REPLICATE_DO_DB = ConfigKeys.newConfigKey(GenericTypes.COLLECTION_STRING,
-            "mysql.slave.replicate_do_db", "Replicate only listed DBs. Use together with 'mysql.slave.replicate_dump_db'.");
-    ConfigKey<Collection<String>> SLAVE_REPLICATE_IGNORE_DB = ConfigKeys.newConfigKey(GenericTypes.COLLECTION_STRING,
-            "mysql.slave.replicate_ignore_db", "Don't replicate listed DBs. Use together with 'mysql.slave.replicate_dump_db'.");
-    ConfigKey<Collection<String>> SLAVE_REPLICATE_DO_TABLE = ConfigKeys.newConfigKey(GenericTypes.COLLECTION_STRING,
-            "mysql.slave.replicate_do_table", "Replicate only listed tables. Use together with 'mysql.slave.replicate_dump_db'.");
-    ConfigKey<Collection<String>> SLAVE_REPLICATE_IGNORE_TABLE = ConfigKeys.newConfigKey(GenericTypes.COLLECTION_STRING,
-            "mysql.slave.replicate_ignore_table", "Don't replicate listed tables. Use together with 'mysql.slave.replicate_dump_db'.");
-    ConfigKey<Collection<String>> SLAVE_REPLICATE_WILD_DO_TABLE = ConfigKeys.newConfigKey(GenericTypes.COLLECTION_STRING,
-            "mysql.slave.replicate_wild_do_table", "Replicate only listed tables, wildcards acepted. Use together with 'mysql.slave.replicate_dump_db'.");
-    ConfigKey<Collection<String>> SLAVE_REPLICATE_WILD_IGNORE_TABLE = ConfigKeys.newConfigKey(GenericTypes.COLLECTION_STRING,
-            "mysql.slave.replicate_wild_ignore_table", "Don't replicate listed tables, wildcards acepted. Use together with 'mysql.slave.replicate_dump_db'.");
-    ConfigKey<Collection<String>> SLAVE_REPLICATE_DUMP_DB = ConfigKeys.newConfigKey(GenericTypes.COLLECTION_STRING,
-            "mysql.slave.replicate_dump_db", "Databases to pass to the mysqldump command, used for slave initialization");
-    StringAttributeSensorAndConfigKey SLAVE_PASSWORD = new StringAttributeSensorAndConfigKey(
-            "mysql.slave.password", "The password slaves will use to connect to the master. Will be auto-generated by default.");
-    @SuppressWarnings("serial")
-    AttributeSensor<List<String>> SLAVE_DATASTORE_URL_LIST = Sensors.newSensor(new TypeToken<List<String>>() {},
-            "mysql.slave.datastore.url", "List of all slave's DATASTORE_URL sensors");
-    AttributeSensor<Double> QUERIES_PER_SECOND_FROM_MYSQL_PER_NODE = Sensors.newDoubleSensor("mysql.queries.perSec.fromMysql.perNode");
-}

http://git-wip-us.apache.org/repos/asf/brooklyn-library/blob/02abbab0/brooklyn-library/software/database/src/main/java/org/apache/brooklyn/entity/database/mysql/MySqlClusterImpl.java
----------------------------------------------------------------------
diff --git a/brooklyn-library/software/database/src/main/java/org/apache/brooklyn/entity/database/mysql/MySqlClusterImpl.java b/brooklyn-library/software/database/src/main/java/org/apache/brooklyn/entity/database/mysql/MySqlClusterImpl.java
deleted file mode 100644
index d356fc5..0000000
--- a/brooklyn-library/software/database/src/main/java/org/apache/brooklyn/entity/database/mysql/MySqlClusterImpl.java
+++ /dev/null
@@ -1,375 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-package org.apache.brooklyn.entity.database.mysql;
-
-import java.util.Collection;
-import java.util.List;
-import java.util.Map;
-import java.util.concurrent.Callable;
-import java.util.concurrent.ConcurrentHashMap;
-import java.util.concurrent.Semaphore;
-import java.util.concurrent.atomic.AtomicInteger;
-
-import javax.annotation.Nullable;
-
-import org.apache.brooklyn.api.entity.Entity;
-import org.apache.brooklyn.api.entity.EntityLocal;
-import org.apache.brooklyn.api.entity.EntitySpec;
-import org.apache.brooklyn.api.location.Location;
-import org.apache.brooklyn.api.sensor.AttributeSensor;
-import org.apache.brooklyn.api.sensor.SensorEvent;
-import org.apache.brooklyn.api.sensor.SensorEventListener;
-import org.apache.brooklyn.config.ConfigKey;
-import org.apache.brooklyn.core.entity.Attributes;
-import org.apache.brooklyn.core.entity.lifecycle.ServiceStateLogic.ServiceNotUpLogic;
-import org.apache.brooklyn.core.mgmt.BrooklynTaskTags;
-import org.apache.brooklyn.core.sensor.Sensors;
-import org.apache.brooklyn.enricher.stock.Enrichers;
-import org.apache.brooklyn.entity.group.DynamicClusterImpl;
-import org.apache.brooklyn.feed.function.FunctionFeed;
-import org.apache.brooklyn.feed.function.FunctionPollConfig;
-import org.apache.brooklyn.util.collections.CollectionFunctionals;
-import org.apache.brooklyn.util.core.ResourceUtils;
-import org.apache.brooklyn.util.core.task.DynamicTasks;
-import org.apache.brooklyn.util.core.task.TaskBuilder;
-import org.apache.brooklyn.util.guava.Functionals;
-import org.apache.brooklyn.util.guava.IfFunctions;
-import org.apache.brooklyn.util.text.Identifiers;
-import org.apache.brooklyn.util.text.StringPredicates;
-import org.apache.brooklyn.util.text.Strings;
-import org.apache.brooklyn.util.time.Duration;
-
-import com.google.common.base.Function;
-import com.google.common.base.Functions;
-import com.google.common.base.Predicates;
-import com.google.common.base.Supplier;
-import com.google.common.base.Suppliers;
-import com.google.common.collect.ImmutableMap;
-import com.google.common.collect.Iterables;
-import com.google.common.reflect.TypeToken;
-
-// https://dev.mysql.com/doc/refman/5.7/en/replication-howto.html
-
-// TODO SSL connection between master and slave
-// TODO Promote slave to master
-public class MySqlClusterImpl extends DynamicClusterImpl implements MySqlCluster {
-    private static final AttributeSensor<Boolean> NODE_REPLICATION_INITIALIZED = Sensors.newBooleanSensor("mysql.replication_initialized");
-
-    private static final String MASTER_CONFIG_URL = "classpath:///org/apache/brooklyn/entity/database/mysql/mysql_master.conf";
-    private static final String SLAVE_CONFIG_URL = "classpath:///org/apache/brooklyn/entity/database/mysql/mysql_slave.conf";
-    protected static final int MASTER_SERVER_ID = 1;
-
-    @SuppressWarnings("serial")
-    private static final AttributeSensor<Supplier<Integer>> SLAVE_NEXT_SERVER_ID = Sensors.newSensor(new TypeToken<Supplier<Integer>>() {},
-            "mysql.slave.next_server_id", "Returns the ID of the next slave server");
-    @SuppressWarnings("serial")
-    protected static final AttributeSensor<Map<String, String>> SLAVE_ID_ADDRESS_MAPPING = Sensors.newSensor(new TypeToken<Map<String, String>>() {},
-            "mysql.slave.id_address_mapping", "Maps slave entity IDs to SUBNET_ADDRESS, so the address is known at member remove time.");
-
-    @Override
-    public void init() {
-        super.init();
-        // Set id supplier in attribute so it is serialized
-        sensors().set(SLAVE_NEXT_SERVER_ID, new NextServerIdSupplier());
-        sensors().set(SLAVE_ID_ADDRESS_MAPPING, new ConcurrentHashMap<String, String>());
-        if (getConfig(SLAVE_PASSWORD) == null) {
-            sensors().set(SLAVE_PASSWORD, Identifiers.makeRandomId(8));
-        } else {
-            sensors().set(SLAVE_PASSWORD, getConfig(SLAVE_PASSWORD));
-        }
-        initSubscriptions();
-    }
-
-    @Override
-    public void rebind() {
-        super.rebind();
-        initSubscriptions();
-    }
-
-    private void initSubscriptions() {
-        subscriptions().subscribeToMembers(this, MySqlNode.SERVICE_PROCESS_IS_RUNNING, new NodeRunningListener(this));
-        subscriptions().subscribe(this, MEMBER_REMOVED, new MemberRemovedListener());
-    }
-
-    @SuppressWarnings({ "unchecked", "rawtypes" })
-    @Override
-    protected void initEnrichers() {
-        super.initEnrichers();
-        propagateMasterAttribute(MySqlNode.HOSTNAME);
-        propagateMasterAttribute(MySqlNode.ADDRESS);
-        propagateMasterAttribute(MySqlNode.SUBNET_HOSTNAME);
-        propagateMasterAttribute(MySqlNode.SUBNET_ADDRESS);
-        propagateMasterAttribute(MySqlNode.MYSQL_PORT);
-        propagateMasterAttribute(MySqlNode.DATASTORE_URL);
-
-        enrichers().add(Enrichers.builder()
-                .aggregating(MySqlNode.DATASTORE_URL)
-                .publishing(SLAVE_DATASTORE_URL_LIST)
-                .computing((Function<Collection<String>, List<String>>)(Function)Functions.identity())
-                .entityFilter(Predicates.not(MySqlClusterUtils.IS_MASTER))
-                .fromMembers()
-                .build());
-
-        enrichers().add(Enrichers.builder()
-                .aggregating(MySqlNode.QUERIES_PER_SECOND_FROM_MYSQL)
-                .publishing(QUERIES_PER_SECOND_FROM_MYSQL_PER_NODE)
-                .fromMembers()
-                .computingAverage()
-                .defaultValueForUnreportedSensors(0d)
-                .build());
-    }
-
-    private void propagateMasterAttribute(AttributeSensor<?> att) {
-        enrichers().add(Enrichers.builder()
-                .aggregating(att)
-                .publishing(att)
-                .computing(IfFunctions.ifPredicate(CollectionFunctionals.notEmpty())
-                        .apply(CollectionFunctionals.firstElement())
-                        .defaultValue(null))
-                .entityFilter(MySqlClusterUtils.IS_MASTER)
-                .build());
-    }
-
-    @Override
-    protected EntitySpec<?> getFirstMemberSpec() {
-        final EntitySpec<?> firstMemberSpec = super.getFirstMemberSpec();
-        if (firstMemberSpec != null) {
-            return applyDefaults(firstMemberSpec, Suppliers.ofInstance(MASTER_SERVER_ID), MASTER_CONFIG_URL);
-        }
-
-        final EntitySpec<?> memberSpec = super.getMemberSpec();
-        if (memberSpec != null) {
-            return applyDefaults(memberSpec, Suppliers.ofInstance(MASTER_SERVER_ID), MASTER_CONFIG_URL);
-        }
-
-        return EntitySpec.create(MySqlNode.class)
-                .displayName("MySql Master")
-                .configure(MySqlNode.MYSQL_SERVER_ID, MASTER_SERVER_ID)
-                .configure(MySqlNode.TEMPLATE_CONFIGURATION_URL, MASTER_CONFIG_URL);
-    }
-
-    @Override
-    protected EntitySpec<?> getMemberSpec() {
-        Supplier<Integer> serverIdSupplier = getAttribute(SLAVE_NEXT_SERVER_ID);
-
-        EntitySpec<?> spec = super.getMemberSpec();
-        if (spec != null) {
-            return applyDefaults(spec, serverIdSupplier, SLAVE_CONFIG_URL);
-        }
-
-        return EntitySpec.create(MySqlNode.class)
-                .displayName("MySql Slave")
-                // Slave server IDs will not be linear because getMemberSpec not always results in createNode (result discarded)
-                .configure(MySqlNode.MYSQL_SERVER_ID, serverIdSupplier.get())
-                .configure(MySqlNode.TEMPLATE_CONFIGURATION_URL, SLAVE_CONFIG_URL);
-    }
-
-    private EntitySpec<?> applyDefaults(EntitySpec<?> spec, Supplier<Integer> serverId, String configUrl) {
-        boolean needsServerId = !isKeyConfigured(spec, MySqlNode.MYSQL_SERVER_ID);
-        boolean needsConfigUrl = !isKeyConfigured(spec, MySqlNode.TEMPLATE_CONFIGURATION_URL.getConfigKey());
-        if (needsServerId || needsConfigUrl) {
-            EntitySpec<?> clonedSpec = EntitySpec.create(spec);
-            if (needsServerId) {
-                clonedSpec.configure(MySqlNode.MYSQL_SERVER_ID, serverId.get());
-            }
-            if (needsConfigUrl) {
-                clonedSpec.configure(MySqlNode.TEMPLATE_CONFIGURATION_URL, configUrl);
-            }
-            return clonedSpec;
-        } else {
-            return spec;
-        }
-    }
-
-    private boolean isKeyConfigured(EntitySpec<?> spec, ConfigKey<?> key) {
-        return spec.getConfig().containsKey(key) || spec.getFlags().containsKey(key.getName());
-    }
-
-    @Override
-    protected Entity createNode(Location loc, Map<?, ?> flags) {
-        MySqlNode node = (MySqlNode) super.createNode(loc, flags);
-        if (!MySqlClusterUtils.IS_MASTER.apply(node)) {
-            EntityLocal localNode = (EntityLocal) node;
-            ServiceNotUpLogic.updateNotUpIndicator(localNode, MySqlSlave.SLAVE_HEALTHY, "Replication not started");
-
-            addFeed(FunctionFeed.builder()
-                .entity(localNode)
-                .period(Duration.FIVE_SECONDS)
-                .poll(FunctionPollConfig.forSensor(MySqlSlave.SLAVE_HEALTHY)
-                        .callable(new SlaveStateCallable(node))
-                        .checkSuccess(StringPredicates.isNonBlank())
-                        .onSuccess(new SlaveStateParser(node))
-                        .setOnFailure(false)
-                        .description("Polls SHOW SLAVE STATUS"))
-                .build());
-
-            node.enrichers().add(Enrichers.builder().updatingMap(Attributes.SERVICE_NOT_UP_INDICATORS)
-                    .from(MySqlSlave.SLAVE_HEALTHY)
-                    .computing(Functionals.ifNotEquals(true).value("Slave replication status is not healthy") )
-                    .build());
-        }
-        return node;
-    }
-
-    public static class SlaveStateCallable implements Callable<String> {
-        private MySqlNode slave;
-        public SlaveStateCallable(MySqlNode slave) {
-            this.slave = slave;
-        }
-
-        @Override
-        public String call() throws Exception {
-            if (Boolean.TRUE.equals(slave.getAttribute(MySqlNode.SERVICE_PROCESS_IS_RUNNING))) {
-                return MySqlClusterUtils.executeSqlOnNode(slave, "SHOW SLAVE STATUS \\G");
-            } else {
-                return null;
-            }
-        }
-
-    }
-
-    public static class SlaveStateParser implements Function<String, Boolean> {
-        private Entity slave;
-
-        public SlaveStateParser(Entity slave) {
-            this.slave = slave;
-        }
-
-        @Override
-        public Boolean apply(String result) {
-            Map<String, String> status = MySqlRowParser.parseSingle(result);
-            String secondsBehindMaster = status.get("Seconds_Behind_Master");
-            if (secondsBehindMaster != null && !"NULL".equals(secondsBehindMaster)) {
-                slave.sensors().set(MySqlSlave.SLAVE_SECONDS_BEHIND_MASTER, new Integer(secondsBehindMaster));
-            }
-            return "Yes".equals(status.get("Slave_IO_Running")) && "Yes".equals(status.get("Slave_SQL_Running"));
-        }
-
-    }
-
-    private static class NextServerIdSupplier implements Supplier<Integer> {
-        private AtomicInteger nextId = new AtomicInteger(MASTER_SERVER_ID+1);
-
-        @Override
-        public Integer get() {
-            return nextId.getAndIncrement();
-        }
-    }
-
-    // ============= Member Init =============
-
-    // The task is executed separately from the start effector, so failing here
-    // will not fail the start effector as well, but it will eventually time out
-    // because replication is not started.
-    // Would be nice to be able to plug in to the entity lifecycle!
-
-    private static final class NodeRunningListener implements SensorEventListener<Boolean> {
-        private MySqlCluster cluster;
-        private Semaphore lock = new Semaphore(1);
-
-        public NodeRunningListener(MySqlCluster cluster) {
-            this.cluster = cluster;
-        }
-
-        @Override
-        public void onEvent(SensorEvent<Boolean> event) {
-            final MySqlNode node = (MySqlNode) event.getSource();
-            if (Boolean.TRUE.equals(event.getValue()) &&
-                    // We are interested in SERVICE_PROCESS_IS_RUNNING only while haven't come online yet.
-                    // Probably will get several updates while replication is initialized so an additional
-                    // check is needed whether we have already seen this.
-                    Boolean.FALSE.equals(node.getAttribute(MySqlNode.SERVICE_UP)) &&
-                    !Boolean.TRUE.equals(node.getAttribute(NODE_REPLICATION_INITIALIZED))) {
-
-                // Events executed sequentially so no need to synchronize here.
-                node.sensors().set(NODE_REPLICATION_INITIALIZED, Boolean.TRUE);
-
-                final Runnable nodeInitTaskBody;
-                if (MySqlClusterUtils.IS_MASTER.apply(node)) {
-                    nodeInitTaskBody = new InitMasterTaskBody(cluster, node);
-                } else {
-                    nodeInitTaskBody = new InitSlaveTaskBody(cluster, node, lock);
-                }
-
-                DynamicTasks.submitTopLevelTask(TaskBuilder.builder()
-                        .displayName("setup master-slave replication")
-                        .body(nodeInitTaskBody)
-                        .tag(BrooklynTaskTags.tagForContextEntity(node))
-                        .tag(BrooklynTaskTags.NON_TRANSIENT_TASK_TAG)
-                        .build(),
-                        node);
-            }
-        }
-
-    }
-    
-    private static class InitMasterTaskBody implements Runnable {
-        private MySqlNode master;
-        private MySqlCluster cluster;
-        public InitMasterTaskBody(MySqlCluster cluster, MySqlNode master) {
-            this.cluster = cluster;
-            this.master = master;
-        }
-
-        @Override
-        public void run() {
-            String binLogInfo = MySqlClusterUtils.executeSqlOnNode(master, "FLUSH TABLES WITH READ LOCK;SHOW MASTER STATUS \\G UNLOCK TABLES;");
-            Map<String, String> status = MySqlRowParser.parseSingle(binLogInfo);
-            String file = status.get("File");
-            String position = status.get("Position");
-            if (file != null && position != null) {
-                cluster.sensors().set(MySqlCluster.REPLICATION_LAST_SLAVE_SNAPSHOT, new ReplicationSnapshot(null, null, file, Integer.parseInt(position)));
-            }
-
-            //NOTE: Will be executed on each start, analogously to the standard CREATION_SCRIPT config
-            String creationScript = getDatabaseCreationScriptAsString(master);
-            if (creationScript != null) {
-                master.invoke(MySqlNode.EXECUTE_SCRIPT, ImmutableMap.of("commands", creationScript));
-            }
-        }
-
-        @Nullable private static String getDatabaseCreationScriptAsString(Entity entity) {
-            String url = entity.getConfig(MySqlMaster.MASTER_CREATION_SCRIPT_URL);
-            if (!Strings.isBlank(url))
-                return new ResourceUtils(entity).getResourceAsString(url);
-            String contents = entity.getConfig(MySqlMaster.MASTER_CREATION_SCRIPT_CONTENTS);
-            if (!Strings.isBlank(contents))
-                return contents;
-            return null;
-        }
-    }
-
-    // ============= Member Remove =============
-
-    public class MemberRemovedListener implements SensorEventListener<Entity> {
-        @Override
-        public void onEvent(SensorEvent<Entity> event) {
-            MySqlCluster cluster = (MySqlCluster) event.getSource();
-            Entity node = event.getValue();
-            String slaveAddress = cluster.getAttribute(SLAVE_ID_ADDRESS_MAPPING).remove(node.getId());
-            if (slaveAddress != null) {
-                // Could already be gone if stopping the entire app - let it throw an exception
-                MySqlNode master = (MySqlNode) Iterables.find(cluster.getMembers(), MySqlClusterUtils.IS_MASTER);
-                String username = MySqlClusterUtils.validateSqlParam(cluster.getConfig(SLAVE_USERNAME));
-                MySqlClusterUtils.executeSqlOnNodeAsync(master, String.format("DROP USER '%s'@'%s';", username, slaveAddress));
-            }
-        }
-    }
-
-}

http://git-wip-us.apache.org/repos/asf/brooklyn-library/blob/02abbab0/brooklyn-library/software/database/src/main/java/org/apache/brooklyn/entity/database/mysql/MySqlClusterUtils.java
----------------------------------------------------------------------
diff --git a/brooklyn-library/software/database/src/main/java/org/apache/brooklyn/entity/database/mysql/MySqlClusterUtils.java b/brooklyn-library/software/database/src/main/java/org/apache/brooklyn/entity/database/mysql/MySqlClusterUtils.java
deleted file mode 100644
index 9f8dc6d..0000000
--- a/brooklyn-library/software/database/src/main/java/org/apache/brooklyn/entity/database/mysql/MySqlClusterUtils.java
+++ /dev/null
@@ -1,52 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-package org.apache.brooklyn.entity.database.mysql;
-
-import org.apache.brooklyn.api.entity.Entity;
-import org.apache.brooklyn.api.mgmt.Task;
-import org.apache.brooklyn.core.effector.Effectors;
-import org.apache.brooklyn.core.entity.EntityPredicates;
-import org.apache.brooklyn.entity.database.DatastoreMixins.CanExecuteScript;
-import org.apache.brooklyn.util.core.task.DynamicTasks;
-
-import com.google.common.base.Predicate;
-import com.google.common.collect.ImmutableMap;
-
-public class MySqlClusterUtils {
-    protected static final Predicate<Entity> IS_MASTER = EntityPredicates.configEqualTo(MySqlNode.MYSQL_SERVER_ID, MySqlClusterImpl.MASTER_SERVER_ID);
-
-    protected static String executeSqlOnNode(MySqlNode node, String commands) {
-        return executeSqlOnNodeAsync(node, commands).getUnchecked();
-    }
-
-    // Can't call node.executeScript directly, need to change execution context, so use an effector task
-    protected static Task<String> executeSqlOnNodeAsync(MySqlNode node, String commands) {
-        return DynamicTasks.queue(Effectors.invocation(node, MySqlNode.EXECUTE_SCRIPT, ImmutableMap.of(CanExecuteScript.COMMANDS.getName(), commands))).asTask();
-    }
-
-    protected static String validateSqlParam(String config) {
-        // Don't go into escape madness, just deny any suspicious strings.
-        // Would be nice to use prepared statements, but not worth pulling in the extra dependencies.
-        if (config.contains("'") && config.contains("\\")) {
-            throw new IllegalStateException("User provided string contains illegal SQL characters: " + config);
-        }
-        return config;
-    }
-
-}

http://git-wip-us.apache.org/repos/asf/brooklyn-library/blob/02abbab0/brooklyn-library/software/database/src/main/java/org/apache/brooklyn/entity/database/mysql/MySqlDriver.java
----------------------------------------------------------------------
diff --git a/brooklyn-library/software/database/src/main/java/org/apache/brooklyn/entity/database/mysql/MySqlDriver.java b/brooklyn-library/software/database/src/main/java/org/apache/brooklyn/entity/database/mysql/MySqlDriver.java
deleted file mode 100644
index b4da5f9..0000000
--- a/brooklyn-library/software/database/src/main/java/org/apache/brooklyn/entity/database/mysql/MySqlDriver.java
+++ /dev/null
@@ -1,33 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-package org.apache.brooklyn.entity.database.mysql;
-
-import org.apache.brooklyn.entity.software.base.SoftwareProcessDriver;
-import org.apache.brooklyn.util.core.task.system.ProcessTaskWrapper;
-
-/**
- * The {@link SoftwareProcessDriver} for MySQL.
- */
-public interface MySqlDriver extends SoftwareProcessDriver {
-    String getStatusCmd();
-    ProcessTaskWrapper<Integer> executeScriptAsync(String commands);
-    ProcessTaskWrapper<Integer> executeScriptFromInstalledFileAsync(String filenameAlreadyInstalledAtServer);
-    ProcessTaskWrapper<Integer> dumpDatabase(String additionalOptions, String dumpDestination);
-    void changePassword(String oldPass, String newPass);
-}


Mime
View raw message