hawq-commits mailing list archives

Site index · List index
Message view « Date » · « Thread »
Top « Date » · « Thread »
From r...@apache.org
Subject [4/8] incubator-hawq git commit: HAWQ-121. Remove legacy command line tools.
Date Thu, 05 Nov 2015 03:10:00 GMT
http://git-wip-us.apache.org/repos/asf/incubator-hawq/blob/9932786b/tools/bin/gprecoverseg
----------------------------------------------------------------------
diff --git a/tools/bin/gprecoverseg b/tools/bin/gprecoverseg
deleted file mode 100755
index 6a4a447..0000000
--- a/tools/bin/gprecoverseg
+++ /dev/null
@@ -1,20 +0,0 @@
-#!/usr/bin/env python
-"""
-gprecoverseg
-
-Recovers Greenplum segment instances that are marked as invalid, 
-if mirroring is configured and operational.
-"""
-
-#
-# THIS IMPORT MUST COME FIRST
-#
-# import mainUtils FIRST to get python version check
-from gppylib.mainUtils                  import simple_main
-from gppylib.programs.clsRecoverSegment import GpRecoverSegmentProgram
-
-if __name__ == '__main__':
-    simple_main( GpRecoverSegmentProgram.createParser, 
-                 GpRecoverSegmentProgram.createProgram, 
-                 GpRecoverSegmentProgram.mainOptions() )
-

http://git-wip-us.apache.org/repos/asf/incubator-hawq/blob/9932786b/tools/bin/gpstart
----------------------------------------------------------------------
diff --git a/tools/bin/gpstart b/tools/bin/gpstart
deleted file mode 100755
index bff03d8..0000000
--- a/tools/bin/gpstart
+++ /dev/null
@@ -1,853 +0,0 @@
-#!/usr/bin/env python
-#
-# Copyright (c) Greenplum Inc 2008. All Rights Reserved. 
-#
-#
-# THIS IMPORT MUST COME FIRST
-#
-# import mainUtils FIRST to get python version check
-from gppylib.mainUtils import *
-
-import os, sys, copy, time
-
-import time
-from optparse import Option, OptionGroup, OptionParser, OptionValueError, SUPPRESS_USAGE, SUPPRESS_HELP
-
-try:
-    import pickle
-    
-    from gppylib.db import dbconn
-    from gppylib.gpparseopts import OptParser, OptChecker
-    from gppylib.gparray import *
-    from gppylib.gplog import *
-    from gppylib import gphostcache
-    from gppylib import userinput
-    from gppylib.db import catalog
-    from gppylib.commands import unix
-    from gppylib.commands import gp
-    from gppylib.commands.gp import SEGMENT_TIMEOUT_DEFAULT
-    from gppylib.commands import base
-    from gppylib.commands import pg
-    from gppylib.commands import dca
-    from gppylib import pgconf
-    from gppylib.operations.startSegments import *
-    from gppylib.gpcoverage import GpCoverage
-    from gppylib.utils import TableLogger
-    from gppylib.gp_dbid import GpDbidFile
-    from gppylib.gp_contentnum import GpContentnumFile
-    from gppylib.gp_era import GpEraFile
-except ImportError, e:
-    sys.exit('Cannot import modules.  Please check that you have sourced greenplum_path.sh.  Detail: ' + str(e))
-
-DEFAULT_NUM_WORKERS=64
-logger = get_default_logger()
-
-#---------------------------------------------------------------
-class GpStart:
-
-    ######
-    def __init__(self, specialMode, restricted, start_standby, master_datadir,
-                 wrapper,
-                 wrapper_args,
-                 parallel=DEFAULT_NUM_WORKERS,
-                 quiet=False,
-                 masteronly=False,
-                 interactive=False,
-                 timeout=SEGMENT_TIMEOUT_DEFAULT
-                 ):
-        assert(specialMode in [None, 'upgrade', 'maintenance'])
-        self.specialMode=specialMode
-        self.restricted=restricted
-        self.start_standby=start_standby
-        self.pool = None
-        self.parallel=parallel
-        self.attempt_standby_start=False
-        self.quiet=quiet
-        self.masteronly=masteronly
-        self.master_datadir=master_datadir
-        self.interactive=interactive
-        self.timeout=timeout
-        self.wrapper=wrapper;
-        self.wrapper_args=wrapper_args;
-        
-        #
-        # Some variables that are set during execution
-        #
-        self.dbidfile=None
-        self.era=None
-        self.gpversion=None
-        self.gparray=None
-        self.port=None
-        self.gphome=None
-        self.dburl=None
-        self.lc_collate=None
-        self.lc_monetary=None
-        self.lc_numeric=None
-        self.max_connections = None
-        self.gp_external_grant_privileges=None
-        logger.debug("Setting level of parallelism to: %d" % self.parallel)
-
-    ######
-    def run(self):
-        self._prepare()
-
-        # MPP-13700
-        if self.masteronly:
-            if os.getenv('GPSTART_INTERNAL_MASTER_ONLY'):
-                logger.info('Master-only start requested for management utilities.')
-            else:
-                if self.dbidfile.standby_dbid is not None:
-                    logger.warning("****************************************************************************")
-                    logger.warning("Master-only start requested in a configuration with a standby master.")
-                    logger.warning("This is advisable only under the direct supervision of Greenplum support. ")
-                    logger.warning("This mode of operation is not supported in a production environment and ")
-                    logger.warning("may lead to a split-brain condition and possible unrecoverable data loss.")
-                    logger.warning("****************************************************************************")
-                else:
-                    logger.info('Master-only start requested in configuration without a standby master.')
-                if not userinput.ask_yesno(None, "\nContinue with master-only startup", 'N'):
-                    raise UserAbortedException()
-
-        try:
-            # Disable Ctrl-C
-            signal.signal(signal.SIGINT,signal.SIG_IGN)
-
-            self._startMaster()
-            logger.info("Master Started...")
-
-            if self.masteronly:
-                return 0
-
-            # Do we have an even-number of recovered segments ?
-            if len(self.gparray.recoveredSegmentDbids) > 0 and len(self.gparray.recoveredSegmentDbids) % 2 == 0:
-                logger.info("Master start noticed recovered segments, updating configuration to rebalance.")
-                self.gparray.updateRoleForRecoveredSegs(self.dburl)
-                logger.info("Re-obtaining updated segment details from master...")
-                self.gparray = GpArray.initFromCatalog(self.dburl, utility=True)
-
-            self._check_standby_activated()
-
-            logger.info("Shutting down master")
-            cmd=gp.GpStop("Shutting down master", masterOnly=True,
-                          fast=True, quiet=logging_is_quiet(),
-                          verbose=logging_is_verbose(),
-                          datadir=self.master_datadir)
-            cmd.run()
-            logger.debug("results of forcing master shutdown: %s" % cmd)
-            #TODO: check results of command.
-    
-        finally:
-            # Reenable Ctrl-C
-            signal.signal(signal.SIGINT,signal.default_int_handler)
-
-        (segmentsToStart, invalidSegments, inactiveSegments) = self._prepare_segment_start()
-
-        if self.interactive:
-            self._summarize_actions(segmentsToStart)
-            if not userinput.ask_yesno(None, "\nContinue with Greenplum instance startup", 'N'):
-                raise UserAbortedException()
-
-        try:
-            # Disable Ctrl-C
-            signal.signal(signal.SIGINT,signal.SIG_IGN)
-
-            success=self._start(segmentsToStart, invalidSegments, inactiveSegments)
-        finally:
-            # Reenable Ctrl-C
-            signal.signal(signal.SIGINT,signal.default_int_handler)
-
-        if dca.is_dca_appliance():
-            logger.info("Initializing DCA settings")
-            dca.DcaGpdbInitialized.local()
-            logger.info("DCA settings initialized")
-
-        return 0 if success else 1
-
-    ######
-    def cleanup(self):
-        if self.pool:
-            self.pool.haltWork()
-
-#------------------------------- Internal Helper --------------------------------
-
-    ######
-    def _prepare(self):
-        logger.info("Gathering information and validating the environment...")
-        self._basic_setup()
-
-        if not self.masteronly:
-            gp.standby_check(self.master_datadir)
-
-        self._check_version()
-        self._check_master_running()
-
-        if not os.path.exists(self.master_datadir + '/pg_log'):
-            os.mkdir(self.master_datadir + '/pg_log')
-
-    ######
-    def _basic_setup(self):
-        self.gphome=gp.get_gphome()
-        if self.master_datadir is None:
-            self.master_datadir=gp.get_masterdatadir()
-        self.user=gp.get_user()
-        gp.check_permissions(self.user)
-        self._read_postgresqlconf()
-        self.dbidfile = GpDbidFile(self.master_datadir, do_read=True, logger=get_logger_if_verbose())
-
-    ######
-    def _read_postgresqlconf(self):
-        logger.debug("Obtaining master's port from master data directory")
-        pgconf_dict = pgconf.readfile(self.master_datadir + "/postgresql.conf")
-        self.port = pgconf_dict.int('port')
-        logger.debug("Read from postgresql.conf port=%s" % self.port)
-        self.max_connections = pgconf_dict.int('max_connections')
-        logger.debug("Read from postgresql.conf max_connections=%s" % self.max_connections)
-        self.gp_external_grant_privileges = pgconf_dict.str('gp_external_grant_privileges');
-        logger.debug("gp_external_grant_privileges is %s" % self.gp_external_grant_privileges);
-
-    ######
-    def _check_version(self):
-        self.gpversion=gp.GpVersion.local('local GP software version check',self.gphome)
-        logger.info("Greenplum Binary Version: '%s'" % self.gpversion)
-        
-        # It would be nice to work out the catalog version => greenplum version
-        # calculation so that we can print out nicer error messages when
-        # version doesn't match.  
-        bin_catversion = gp.GpCatVersion.local('local GP software catalag version check', self.gphome)
-        logger.info("Greenplum Catalog Version: '%s'" % bin_catversion)
-
-        dir_catversion = gp.GpCatVersionDirectory.local('local GP directory catalog version check', self.master_datadir)
-        
-        # If it's in upgrade mode, we don't need to check the cat version because gpmigrator have checked
-        # it already.
-        if (self.specialMode != 'upgrade' and bin_catversion != dir_catversion):
-            logger.info("MASTER_DIRECTORY Catalog Version: '%s'" % dir_catversion)
-            logger.info("Catalog Version of master directory incompatible with binaries")
-            raise ExceptionNoStackTraceNeeded("Catalog Versions are incompatible")
-            
-
-    ######
-    def _check_master_running(self):
-        logger.debug("Check if Master is already running...")
-        if os.path.exists(self.master_datadir + '/postmaster.pid'):
-            logger.warning("postmaster.pid file exists on Master, checking if recovery startup required")
-            self._recovery_startup()
-
-        self._remove_postmaster_tmpfile(self.port)
-
-    def _check_standby_activated(self):
-        logger.debug("Checking if standby has been activated...")
-
-        if self.gparray.standbyMaster:
-            
-            syncmaster_pid = gp.getSyncmasterPID(self.gparray.standbyMaster.getSegmentHostName(),
-                                                 self.gparray.standbyMaster.getSegmentDataDirectory())
-            if syncmaster_pid <= 0:
-                # we don't have a syncmaster running, so now check for postmaster.pid
-                cmd = pg.DbStatus('check standby postmaster', 
-                                  self.gparray.standbyMaster,
-                                  ctxt=base.REMOTE, 
-                                  remoteHost=self.gparray.standbyMaster.getSegmentHostName())
-                cmd.run(validateAfter=False)
-                if cmd.is_running():
-                    # stop the master we've started up.
-                    cmd=gp.GpStop("Shutting down master", masterOnly=True, 
-                                  fast=True, quiet=logging_is_quiet(),
-                                  verbose=logging_is_verbose(),
-                                  datadir=self.master_datadir)
-                    cmd.run(validateAfter=True)
-                    raise ExceptionNoStackTraceNeeded("Standby activated")
-
-    ######
-    def _recovery_startup(self):
-        logger.info("Commencing recovery startup checks")
-
-        lockfile="/tmp/.s.PGSQL.%s" % self.port
-        tmpfile_exists = os.path.exists(lockfile)
-
-        netstat_port_active = unix.PgPortIsActive.local('check netstat for master port',
-                                                        lockfile, self.port)
-        if tmpfile_exists and netstat_port_active:
-            logger.info("Have lock file %s and a process running on port %s" % (lockfile,self.port))
-            raise ExceptionNoStackTraceNeeded("Master instance process running")
-        elif tmpfile_exists and not netstat_port_active:
-            logger.info("Have lock file %s but no process running on port %s" % (lockfile,self.port))
-        elif not tmpfile_exists and netstat_port_active:
-            logger.info("No lock file %s but a process running on port %s" % (lockfile,self.port))
-            raise ExceptionNoStackTraceNeeded("Port %s is already in use" % self.port)
-        elif not tmpfile_exists and not netstat_port_active:
-            logger.info("No socket connection or lock file in /tmp found for port=%s" % self.port)
-
-        logger.info("No Master instance process, entering recovery startup mode")
-
-        if tmpfile_exists:
-            logger.info("Clearing Master instance lock files")
-            os.remove(lockfile)
-
-        postmaster_pid_file = "%s/postmaster.pid" % self.master_datadir
-        if os.path.exists(postmaster_pid_file):
-            logger.info("Clearing Master instance pid file")
-            os.remove("%s/postmaster.pid" % self.master_datadir)
-
-        self._startMaster()
-
-        logger.info("Commencing forced instance shutdown")
-
-        gp.GpStop.local("forcing master shutdown", masterOnly=True,
-                        verbose=logging_is_verbose,
-                        quiet=self.quiet, fast=False,
-                        force=True, datadir=self.master_datadir)
-
-    ######
-    def _remove_postmaster_tmpfile(self,port):
-        lockfile="/tmp/.s.PGSQL.%s" % port
-        tmpfile_exists = os.path.exists(lockfile)
-
-        if tmpfile_exists:
-            logger.info("Clearing Master instance lock files")
-            os.remove(lockfile)
-        pass
-
-    ######
-    def _summarize_actions(self, segmentsToStart):
-        logger.info("--------------------------")
-        logger.info("Master instance parameters")
-        logger.info("--------------------------")
-        logger.info("Database                 = %s" % self.dburl.pgdb )
-        logger.info("Master Port              = %s" % self.port )
-        logger.info("Master directory         = %s" % self.master_datadir )
-        logger.info("Timeout                  = %d seconds" % self.timeout)
-        if self.gparray.standbyMaster:
-            if self.start_standby:
-                logger.info("Master standby start     = On")
-            else:
-                logger.info("Master standby start     = Off")
-        else:
-            logger.info("Master standby           = Off ")
-
-        logger.info("--------------------------------------")
-        logger.info("Segment instances that will be started")
-        logger.info("--------------------------------------")
-
-        isFileReplication = self.gparray.getFaultStrategy() == FAULT_STRATEGY_FILE_REPLICATION
-
-        tabLog = TableLogger().setWarnWithArrows(True)
-        header = ["Host","Datadir","Port"]
-        if isFileReplication:
-            header.append("Role")
-        tabLog.info(header)
-        for db in segmentsToStart:
-            line = [db.getSegmentHostName(), db.getSegmentDataDirectory(), str(db.getSegmentPort())]
-            if isFileReplication:
-                line.append("Primary" if db.isSegmentPrimary(True) else "Mirror")
-            tabLog.info(line)
-        tabLog.outputTable()
-# show a warn if gp_external_grant_privileges is on
-        if self.gp_external_grant_privileges and "on" in self.gp_external_grant_privileges:
-        	logger.warning("--------------------------------------")
-        	logger.warning("Using gp_external_grant_privileges is not recommended and will soon be deprecated.")
-        	logger.warning("--------------------------------------") 
-
-    ######
-    def _get_format_string(self):
-        host_len=0
-        dir_len=0
-        port_len=0
-        for db in self.gparray.getSegDbList():
-            if len(db.hostname) > host_len:
-                host_len=len(db.hostname)
-            if len(db.datadir) > dir_len:
-                dir_len=len(db.datadir)
-            if len(str(db.port)) > port_len:
-                port_len=len(str(db.port))
-
-        return "%-" + str(host_len) + "s  %-" + str(dir_len) + "s  %-" + str(port_len) + "s  %s"
-
-    ######
-    def _startMaster(self):
-        logger.info("Starting Master instance in admin mode")
-
-        d = GpDbidFile(self.master_datadir)
-        d.read_gp_dbid()
-        cn = GpContentnumFile(self.master_datadir)
-        cn.read_gp_contentnum()
-
-        numContentsInCluster = cn.contentnum
-        cmd=gp.MasterStart('master in utility mode', self.master_datadir,
-                           self.port, d.dbid, d.standby_dbid or 0,
-                           numContentsInCluster, self.era,
-                           wrapper=self.wrapper, wrapper_args=self.wrapper_args, 
-                           specialMode=self.specialMode, timeout=self.timeout, utilityMode=True
-                           );
-        cmd.run()
-
-        if cmd.get_results().rc != 0:
-            logger.fatal("Failed to start Master instance in admin mode")
-            cmd.validate()
-
-        logger.info("Obtaining Greenplum Master catalog information")
-
-        logger.info("Obtaining Segment details from master...")
-        self.dburl = dbconn.DbURL(port=self.port,dbname='template0')
-        self.gparray = GpArray.initFromCatalog(self.dburl, utility=True)
-
-        logger.debug("Gathering collation settings...")
-        self.conn = dbconn.connect(self.dburl, utility=True)
-        (self.lc_collate,self.lc_monetary,self.lc_numeric)=catalog.getCollationSettings(self.conn)
-
-        logger.info("Setting new master era")
-        e = GpEraFile(self.master_datadir, logger=get_logger_if_verbose())
-        e.new_era(self.gparray.master.hostname, self.port, time.strftime('%y%m%d%H%M%S'))
-        self.era = e.era
-
-        self.conn.close()
-
-    ######
-    def _start(self, segmentsToStart, invalidSegments, inactiveSegments):
-        """ starts the standby master, all of the segments, and the master
-            
-            returns whether all segments that should be started were started successfully
-
-            note that the parameters do not list master/standby, they only list data segments
-        """
-        workers=min(len(self.gparray.get_hostlist()),self.parallel)
-        self.pool = base.WorkerPool(numWorkers=workers)
-
-        if os.path.exists(self.master_datadir + "/gpexpand.status") and not self.restricted:
-            raise ExceptionNoStackTraceNeeded("Found a System Expansion Setup in progress. Please run 'gpexpand --rollback'")
-
-        self.standby_was_started = self._start_standby()
-
-        strategy = self.gparray.getFaultStrategy()
-        logger.debug("Strategy in gparray is %s" % strategy)
-
-        # The SAN ('s') fault strategy leaves the mirrors shutdown until they're needed.
-        # so we don't want to start them -- SAN-start works just like a mirror-less start.
-        if strategy == FAULT_STRATEGY_FILE_REPLICATION:
-            startMode = START_AS_PRIMARY_OR_MIRROR
-        else:
-            startMode = START_AS_MIRRORLESS
-
-        localeData = ":".join([self.lc_collate,self.lc_monetary,self.lc_numeric])
-        segmentStartOp = StartSegmentsOperation(self.pool,self.quiet, localeData, self.gpversion,
-                                                self.gphome, self.master_datadir, self.timeout, 
-                                                self.specialMode, self.wrapper, self.wrapper_args)
-        segmentStartResult = segmentStartOp.startSegments(self.gparray, segmentsToStart, startMode, self.era)
-
-        # see if we have at least one segment per content
-        willShutdownSegments = not self._verify_enough_segments(segmentStartResult,self.gparray)
-
-        # process the result of segment startup
-        self._print_segment_start(segmentStartResult, invalidSegments, inactiveSegments, willShutdownSegments )
-
-        if willShutdownSegments :
-            # go through and remove any segments that we did start so that we keep everything
-            # shutdown cleanly
-            self._shutdown_segments(segmentStartResult)
-            raise ExceptionNoStackTraceNeeded("Do not have enough valid segments to start the array.")
-
-        failedToStart = segmentStartResult.getFailedSegmentObjs()
-        final_result  = self._start_final_master(failedToStart, invalidSegments)
-        return final_result
-
-
-    ######
-    def _prepare_segment_start(self):
-        segs = self.gparray.get_valid_segdbs()
-
-        logger.debug("gp_segment_configuration indicates following valid segments")
-        for seg in segs:
-            logger.debug("SegDB: %s" % seg)
-
-        # segments marked down
-        invalid_segs=self.gparray.get_invalid_segdbs()
-
-        # mirrors that should not be started
-        inactive_mirrors=self.gparray.get_inactive_mirrors_segdbs()
-        if len(inactive_mirrors) > 0:
-            logger.debug("Found %d inactive mirrors, skipping" % len(inactive_mirrors))
-        inactiveDbIds = {}
-        for segment in inactive_mirrors:
-            inactiveDbIds[segment.getSegmentDbId()] = True
-        invalid_segs = [segment for segment in invalid_segs if inactiveDbIds.get(segment.getSegmentDbId()) is None]
-
-        # now produce the list of segments to actually start
-        dbIdsToNotStart = {}
-        for segment in invalid_segs:
-            dbIdsToNotStart[segment.getSegmentDbId()] = True
-        for segment in inactive_mirrors:
-            dbIdsToNotStart[segment.getSegmentDbId()] = True
-
-        for seg in invalid_segs:
-            logger.warning("Skipping startup of segment marked down in configuration: on %s directory %s <<<<<" % \
-                    (seg.getSegmentHostName(), seg.getSegmentDataDirectory()))
-        for seg in inactive_mirrors:
-            logger.warning("Skipping startup of segment marked INACTIVE in configuration: %s directory %s <<<<<" % \
-                    (seg.getSegmentHostName(), seg.getSegmentDataDirectory()))
-        logger.debug("dbIdsToNotStart has %d entries" % len(dbIdsToNotStart))
-
-        # we intend to dedup invalid_segs and inactive_mirrors
-        assert len(dbIdsToNotStart) == len(invalid_segs) + len(inactive_mirrors)
-
-        toStart = [seg for seg in segs if dbIdsToNotStart.get(seg.getSegmentDbId()) is None]
-        return (toStart, invalid_segs, inactive_mirrors)
-
-    ####    
-    def _verify_enough_segments(self,startResult,gparray):
-        successfulSegments = startResult.getSuccessfulSegments()
-        mirroringFailures = [f.getSegment() for f in startResult.getFailedSegmentObjs() \
-                    if f.getReasonCode() == gp.SEGSTART_ERROR_MIRRORING_FAILURE]
-
-        allSegmentsByContent = GpArray.getSegmentsByContentId(gparray.getSegDbList())
-        successfulSegmentsByDbId = GpArray.getSegmentsGroupedByValue(successfulSegments, GpDB.getSegmentDbId )
-        mirroringFailuresByDbId = GpArray.getSegmentsGroupedByValue(mirroringFailures, GpDB.getSegmentDbId )
-
-        #
-        # look at each content, see if there is a segment available (or one which can be made
-        #   available by the fault prober by switching to changetracking)
-        #
-        has_least_one_segment = False
-        for primary in gparray.getSegDbList():
-
-            if not primary.isSegmentPrimary(current_role=True):
-                continue
-
-            # find the mirror
-            segs = allSegmentsByContent[primary.getSegmentContentId()]
-            mirror = None
-            if len(segs) > 1:
-                mirror = [s for s in segs if s.isSegmentMirror(current_role=True)][0]
-
-            if primary.getSegmentDbId() in successfulSegmentsByDbId:
-                # good, we can continue!
-                has_least_one_segment = True
-                continue
-
-            if primary.getSegmentDbId() in mirroringFailuresByDbId \
-                    and not primary.isSegmentModeInChangeLogging():
-                #
-                # we have a mirroring failure so the fault prober can probably convert us to changetracking
-                #
-                #   note that if the primary is already in changetracking then a mirroring failure is
-                #   considered fatal here
-                #
-                continue
-
-            if  mirror is not None \
-                    and (mirror.getSegmentDbId() in successfulSegmentsByDbId or mirror.getSegmentDbId() in mirroringFailuresByDbId) \
-                    and primary.isSegmentModeSynchronized():
-                #
-                # we could fail over to that mirror, so it's okay to start up like this
-                #
-                continue
-
-            logger.warning("No segment started for content: %d." % primary.getSegmentContentId())
-            logger.info("dumping success segments: %s" % [s.__str__() for s in startResult.getSuccessfulSegments()])
-
-        if not has_least_one_segment:
-            logger.error("No alive segment started.")
-            return False
-
-        return True
-
-    ######  
-    def _shutdown_segments(self,segmentStartResult):
-
-        logger.info("Commencing parallel segment instance shutdown, please wait...")
-
-        #
-        # Note that a future optimization would be to only stop the segments that we actually started.
-        #    This requires knowing which ones are left in a partially up state
-        #
-        #
-        # gather the list of those that we actually tried to start
-        toStop = []
-        toStop.extend(segmentStartResult.getSuccessfulSegments())
-        toStop.extend([f.getSegment() for f in segmentStartResult.getFailedSegmentObjs()])
-
-        segmentsByHost = GpArray.getSegmentsByHostName(toStop)
-
-        #
-        # stop them, stopping primaries before mirrors
-        #
-        for type in ["primary","mirror"]:
-            dispatch_count=0
-            for hostName, segments in segmentsByHost.iteritems():
-
-                if type == "primary":
-                    segments = [seg for seg in segments if seg.isSegmentPrimary(current_role=True)]
-                else: segments = [seg for seg in segments if seg.isSegmentMirror(current_role=True)]
-
-                if len(segments) > 0:
-                    logger.debug("Dispatching command to shutdown %s segments on host: %s" % (type, hostName))
-                    cmd=gp.GpSegStopCmd("remote segment stop on host '%s'" % hostName,
-                                        self.gphome, self.gpversion,
-                                        mode='immediate', dbs=segments,
-                                        verbose=logging_is_verbose(),
-                                        ctxt=base.REMOTE, remoteHost=hostName)
-                    self.pool.addCommand(cmd)
-                    dispatch_count += 1
-
-            if dispatch_count > 0:
-                self.pool.wait_and_printdots(dispatch_count,self.quiet)
-        pass
-
-    ######
-    def _print_segment_start(self, segmentStartResult, invalidSegments, inactiveSegments, willShutdownSegments):
-        """
-        Print the results of segment startup
-
-        segmentStartResult is the StartSegmentsResult from the actual start
-        invalidSegments are those that we didn't even try to start because they are marked as down or should otherwise
-           not be started
-        """
-        mirroringFailures = [f for f in segmentStartResult.getFailedSegmentObjs() \
-                        if f.getReasonCode() == gp.SEGSTART_ERROR_MIRRORING_FAILURE]
-        nonMirroringFailures = [f for f in segmentStartResult.getFailedSegmentObjs() \
-                        if f.getReasonCode() != gp.SEGSTART_ERROR_MIRRORING_FAILURE]
-
-        started = len(segmentStartResult.getSuccessfulSegments())
-        failedFromMirroring = len(mirroringFailures)
-        failedNotFromMirroring = len(nonMirroringFailures)
-        totalTriedToStart = started + failedFromMirroring + failedNotFromMirroring 
-
-        if failedFromMirroring > 0 or failedNotFromMirroring > 0 or logging_is_verbose():
-            logger.info("----------------------------------------------------")
-            for failure in segmentStartResult.getFailedSegmentObjs():
-                segment = failure.getSegment()
-                logger.info("DBID:%d  FAILED  host:'%s' datadir:'%s' with reason:'%s'" % (
-                            segment.getSegmentDbId(), segment.getSegmentHostName(),
-                            segment.getSegmentDataDirectory(), failure.getReason()))
-            for segment in segmentStartResult.getSuccessfulSegments():
-                logger.debug("DBID:%d  STARTED" % segment.getSegmentDbId())
-            logger.info("----------------------------------------------------\n\n")
-
-        tableLog = TableLogger().setWarnWithArrows(True)
-
-        tableLog.addSeparator()
-        tableLog.info(["Successful segment starts", "= %d" % started])
-
-        if self.gparray.getFaultStrategy() == FAULT_STRATEGY_FILE_REPLICATION and \
-            failedFromMirroring > 0:
-            tableLog.warn(["Failed segment starts, from mirroring connection between primary and mirror", "= %d" % failedFromMirroring])
-            tableLog.infoOrWarn(failedNotFromMirroring > 0, ["Other failed segment starts", "= %d" % failedNotFromMirroring])
-        else:
-            assert failedFromMirroring == 0
-            tableLog.infoOrWarn(failedNotFromMirroring > 0, ["Failed segment starts", "= %d" % failedNotFromMirroring])
-
-        tableLog.infoOrWarn(len(invalidSegments ) > 0, \
-                        ["Skipped segment starts (segments are marked down in configuration)", "= %d" % len(invalidSegments)])
-        tableLog.addSeparator()
-        tableLog.outputTable()
-
-        attentionFlag = "<<<<<<<<" if started != totalTriedToStart else ""
-        logger.info("")
-        if len(invalidSegments) > 0 :
-            skippedMsg = ", skipped %s other segments" % len(invalidSegments)
-        else:
-            skippedMsg = ""
-        logger.info("Successfully started %d of %d segment instances%s %s" %
-                (started, totalTriedToStart, skippedMsg, attentionFlag))
-        logger.info("----------------------------------------------------")
-
-        if failedNotFromMirroring > 0 or failedFromMirroring > 0:
-            logger.warning("Segment instance startup failures reported")
-            logger.warning("Failed start %d of %d segment instances %s" % \
-                            (failedNotFromMirroring + failedFromMirroring, totalTriedToStart, attentionFlag))
-            logger.warning("Review %s" % get_logfile())
-            if not willShutdownSegments:
-                logger.warning("For more details on segment startup failure(s)")
-                logger.warning("Run  gpstate -s  to review current segment instance status")
-
-            logger.info("----------------------------------------------------")
-
-        # MPP-14014
-        # don't confuse the user with warning about intentionally invalid segments during upgrade
-        if self.specialMode == 'upgrade':
-            return
-
-        if len(invalidSegments) > 0:
-            logger.warning("****************************************************************************")
-            logger.warning("There are %d segment(s) marked down in the database" % len(invalidSegments))
-            logger.warning("To recover from this current state, review usage of the gprecoverseg")
-            logger.warning("management utility which will recover failed segment instance databases.")
-            logger.warning("****************************************************************************")
-
-
-    ######
-    def _start_final_master(self, failedToStart, invalidSegments):
-        ''' Last item in the startup sequence is to start the master. 
-
-            After starting the master we connect to it.  This is done both as a check that the system is 
-            actually started but its also done because certain backend processes don't get kickstarted
-            until the first connection.  The DTM is an example of this and it allows those initialization
-            messages to end up in the gpstart log as opposed to the user's psql session.
-        '''
-        restrict_txt = ""
-        if self.restricted:
-            restrict_txt = "in RESTRICTED mode"
-
-        is_upgrade_mode = False
-        if self.specialMode == 'upgrade':
-            is_upgrade_mode = True
-
-        standby_dbid = 0
-        if self.gparray.standbyMaster:
-            standby_dbid = self.gparray.standbyMaster.getSegmentDbId()
-
-        numContentsInCluster = self.gparray.getNumSegmentContents()
-
-        logger.info("Starting Master instance %s directory %s %s" % (self.gparray.master.hostname, self.master_datadir, restrict_txt))
-
-        # attempt to start master
-        gp.MasterStart.local("Starting Master instance",
-                             self.master_datadir, self.port, self.gparray.master.dbid, standby_dbid,
-                             numContentsInCluster, self.era,
-                             wrapper=self.wrapper, wrapper_args=self.wrapper_args,
-                             specialMode=self.specialMode, restrictedMode=self.restricted, timeout=self.timeout, 
-                             max_connections=self.max_connections, disableMasterMirror=(not self.start_standby)
-                             )
-
-        # check that master is running now
-        if not pg.DbStatus.local('master instance',self.gparray.master):
-            logger.warning("Command pg_ctl reports Master %s on port %d not running" % (self.gparray.master.datadir,self.gparray.master.port))
-            logger.warning("Master could not be started")
-            return False
-
-        logger.info("Command pg_ctl reports Master %s instance active" % self.gparray.master.hostname)
-
-        # verify we can connect to the master
-        # MPP-15604 - use bootstrap user during upgrade
-        msg = None
-        testurl = self.dburl
-        if is_upgrade_mode:
-            testurl = copy.copy(testurl)
-            testurl.pguser = gp.get_user()
-            logger.info("Testing connection as user %s" % testurl.pguser)
-
-        try:
-            masterconn = dbconn.connect(testurl, upgrade=is_upgrade_mode)
-            masterconn.close()
-
-        except Exception, e:
-            # MPP-14016.  While certain fts scenarios will trigger initial connection failures
-            # we still need watch for PANIC events, especially during upgrades.
-            msg = str(e)
-            if 'PANIC' in msg:
-                logger.critical(msg)
-                return False
-            logger.warning(msg)
-
-        standby_failure = not self.standby_was_started and self.attempt_standby_start
-        if standby_failure:
-            logger.warning("Standby Master could not be started")
-        if len(failedToStart) > 0:
-            logger.warning("Number of segments which failed to start:  %d", len(failedToStart))
-
-        if standby_failure:
-            return False
-
-        if len(invalidSegments) > 0:
-            logger.warning("Number of segments not attempted to start: %d", len(invalidSegments))
-            
-        if len(failedToStart) > 0 or len(invalidSegments) > 0 or msg is not None:
-            logger.info("Check status of database with gpstate utility")
-        else:
-            logger.info("Database successfully started")
-
-        if len(failedToStart) > 0:
-            logger.warning("Some segments failed please try to recovery failed segment with gprecoverseg utility")
-        
-        # set the era we used when starting the segments
-        e = GpEraFile(self.master_datadir, logger=get_logger_if_verbose())
-        e.set_era(self.era)
-
-        return True
-
-
-
-    ######
-    def _start_standby(self):
-        ''' used to start the standbymaster if necessary.  
-                
-            returns if the standby master was started or not
-        '''
-        if self.start_standby and self.gparray.standbyMaster is not None:
-            try:
-                self.attempt_standby_start=True
-                host=self.gparray.standbyMaster.hostname
-                datadir=self.gparray.standbyMaster.datadir
-                port=self.gparray.standbyMaster.port
-                dbid=self.gparray.standbyMaster.dbid
-                ncontents = self.gparray.getNumSegmentContents()
-                return gp.start_standbymaster(host,datadir,port,dbid,ncontents)
-            except base.ExecutionError, e:
-                logger.warning("Error occured while starting the standby master: %s" % e)
-                return False
-        else:
-            logger.info("No standby master configured.  skipping...")
-            return False
-
-    #----------------------- Command line option parser ----------------------
-    @staticmethod
-    def createParser():
-        parser = OptParser(option_class=OptChecker,
-                    description="Starts a GPDB Array.",
-                    version='%prog version $Revision$')
-        parser.setHelp([])
-
-        addStandardLoggingAndHelpOptions(parser, includeNonInteractiveOption=True)
-
-        addTo = OptionGroup(parser, 'Connection options')
-        parser.add_option_group(addTo)
-        addMasterDirectoryOptionForSingleClusterProgram(addTo)
-
-        addTo = OptionGroup(parser, 'Database startup options: ')
-        parser.add_option_group(addTo)
-        addTo.add_option('-U', '--specialMode', type='choice', choices=['upgrade', 'maintenance'],
-                           metavar='upgrade|maintenance', action='store', default=None,
-                           help=SUPPRESS_HELP)
-        addTo.add_option('-m', '--master_only', action='store_true',
-                            help='start master instance only in maintenance mode')
-        addTo.add_option('-y', '--no_standby', dest="start_standby", action='store_false',default=True,
-                            help='do not start master standby server')
-        addTo.add_option('-B', '--parallel', type="int", default=DEFAULT_NUM_WORKERS, metavar="<parallel_processes>",
-                            help='number of segment hosts to run in parallel. Default is %d' % DEFAULT_NUM_WORKERS)
-        addTo.add_option('-R', '--restricted', action='store_true',
-                            help='start in restricted mode. Only users with superuser privilege are allowed to connect.')
-        addTo.add_option('-t', '--timeout', dest='timeout', default=SEGMENT_TIMEOUT_DEFAULT, type='int',
-                           help='time to wait for segment startup (in seconds)')
-        addTo.add_option('', '--wrapper', dest="wrapper", default=None, type='string');
-        addTo.add_option('', '--wrapper-args', dest="wrapper_args", default=None, type='string');
-        
-        parser.set_defaults(verbose=False, filters=[], slice=(None, None))
-
-        return parser
-
-    @staticmethod
-    def createProgram(options, args):
-        proccount=os.environ.get('GP_MGMT_PROCESS_COUNT')
-        if options.parallel == 64 and proccount is not None:
-            options.parallel = int(proccount)
-
-        #-n sanity check
-        if options.parallel > 128 or options.parallel < 1:
-            raise ProgramArgumentValidationException("Invalid value for parallel degree: %s" % options.parallel )
-
-        if args:
-            raise ProgramArgumentValidationException("Argument %s is invalid.  Is an option missing a parameter?" % args[-1])
-
-        return GpStart(options.specialMode, options.restricted,
-                          options.start_standby,
-                          master_datadir=options.masterDataDirectory,
-                          parallel=options.parallel,
-                          quiet=options.quiet,
-                          masteronly=options.master_only,
-                          interactive=options.interactive,
-                          timeout=options.timeout,
-                          wrapper=options.wrapper,
-                          wrapper_args=options.wrapper_args);
-
-if __name__ == '__main__':
-    simple_main( GpStart.createParser, GpStart.createProgram)
-

http://git-wip-us.apache.org/repos/asf/incubator-hawq/blob/9932786b/tools/bin/gpstate
----------------------------------------------------------------------
diff --git a/tools/bin/gpstate b/tools/bin/gpstate
deleted file mode 100755
index bbbf8c4..0000000
--- a/tools/bin/gpstate
+++ /dev/null
@@ -1,16 +0,0 @@
-#!/usr/bin/env python
-#
-# Displays system status information
-#
-
-
-#
-# THIS IMPORT MUST COME FIRST
-#
-# import mainUtils FIRST to get python version check
-from gppylib.mainUtils import *
-from gppylib.programs.clsSystemState import *
-
-#-------------------------------------------------------------------------
-if __name__ == '__main__':
-    simple_main( GpSystemStateProgram.createParser, GpSystemStateProgram.createProgram)

http://git-wip-us.apache.org/repos/asf/incubator-hawq/blob/9932786b/tools/bin/gpstop
----------------------------------------------------------------------
diff --git a/tools/bin/gpstop b/tools/bin/gpstop
deleted file mode 100755
index 03e05e1..0000000
--- a/tools/bin/gpstop
+++ /dev/null
@@ -1,674 +0,0 @@
-#!/usr/bin/env python
-#
-# Copyright (c) Greenplum Inc 2008. All Rights Reserved. 
-#
-#
-# THIS IMPORT MUST COME FIRST
-#
-# import mainUtils FIRST to get python version check
-from gppylib.mainUtils import *
-
-import os, sys
-import signal
-import time
-from optparse import Option, OptionGroup, OptionParser, OptionValueError, SUPPRESS_USAGE
-
-try:
-    from gppylib.gpparseopts import OptParser, OptChecker
-    from gppylib.gplog import *
-    from gppylib.db import dbconn
-    from gppylib.db import catalog
-    from gppylib.gparray import *
-    from gppylib import gphostcache
-    from gppylib import userinput
-    from gppylib import pgconf
-    from gppylib.commands import unix
-    from gppylib.commands import gp
-    from gppylib.commands.gp import SEGMENT_TIMEOUT_DEFAULT
-    from gppylib.commands import base
-    from gppylib.commands import pg
-    from gppylib.commands import dca
-    from gppylib.gpcoverage import GpCoverage
-    from gppylib.utils import TableLogger
-    from gppylib.gp_era import GpEraFile
-except ImportError, e:    
-    sys.exit('ERROR: Cannot import modules.  Please check that you have sourced greenplum_path.sh.  Detail: ' + str(e))
-
-DEFAULT_NUM_WORKERS=64
-logger = get_default_logger()
-
-#---------------------------------------------------------------
-class SegStopStatus:
-    """Tracks result of trying to stop an individual segment database"""
-    def __init__(self,db,stopped=False,reason=None,failedCmd=None,timedOut=False):
-        self.db=db
-        self.stopped=stopped
-        self.reason=reason
-        self.failedCmd=failedCmd
-        self.timedOut=timedOut
-        
-    
-    def __str__(self):
-        if self.stopped:
-            return "DBID:%d  STOPPED" % self.db.dbid
-        else:
-            return "DBID:%d  FAILED  host:'%s' datadir:'%s' with reason:'%s'" % (self.db.dbid,self.db.hostname,self.db.datadir,self.reason)
-        
-#---------------------------------------------------------------
-class GpStop:
-    
-    ######
-    def __init__(self,mode,master_datadir=None,
-                 parallel=DEFAULT_NUM_WORKERS,quiet=False,masteronly=False,sighup=False,
-                 interactive=False,stopstandby=False,restart=False,
-                 timeout=SEGMENT_TIMEOUT_DEFAULT):
-        self.mode=mode
-        self.master_datadir=master_datadir
-        self.pool = None
-        self.parallel=parallel
-        self.quiet=quiet
-        self.pid=0
-        self.masteronly=masteronly
-        self.sighup=sighup
-        self.interactive=interactive
-        self.stopstandby=stopstandby
-        self.restart=restart
-        self.hadFailures = False
-        self.timeout=timeout
-
-        # some variables that will be assigned during run() 
-        self.gphome = None
-        self.port = None
-        self.dburl = None
-        self.conn = None
-        self.gparray = None
-        self.hostcache = None
-        self.gpversion = None
-        
-        logger.debug("Setting level of parallelism to: %d" % self.parallel)
-        pass       
-        
-        
-    #####
-    def run(self):
-        """
-        Run and return the exitCode for the program
-        """
-        self._prepare()
-        
-        if self.masteronly:
-            self._stop_master()                                 
-        else:
-            if self.sighup:
-                return self._sighup_cluster()
-            else:
-                if self.interactive:
-                    self._summarize_actions()
-                    if not userinput.ask_yesno(None, "\nContinue with Greenplum instance shutdown", 'N'):
-                        raise UserAbortedException()
-
-                try:
-                    # Disable Ctrl-C
-                    signal.signal(signal.SIGINT,signal.SIG_IGN)
-
-                    self._stop_master()                                
-                    self._stop_standby()            
-                    self._stop_segments()                                
-                    self.cleanup()
-                finally:
-                    # Reenable Ctrl-C
-                    signal.signal(signal.SIGINT,signal.default_int_handler)
-                
-                if self.restart:                    
-                    logger.info("Restarting System...")                    
-                    gp.NewGpStart.local('restarting system',verbose=logging_is_verbose(),nostandby=not self.stopstandby, masterDirectory=self.master_datadir)                    
-                else:
-                    if dca.is_dca_appliance():
-                        logger.info("Unregistering with DCA")
-                        dca.DcaGpdbStopped.local()
-                        logger.info("Unregistered with DCA")
-         
-                if self.hadFailures:
-                    # MPP-15208
-                    return 2 
-        return 0
-    
-    
-    ######
-    def cleanup(self):
-        if self.pool:
-            self.pool.haltWork()    
-        
-    ######
-    def _prepare(self):
-        logger.info("Gathering information and validating the environment...")        
-        self.gphome=gp.get_gphome()
-        if self.master_datadir is None:
-            self.master_datadir=gp.get_masterdatadir()
-        self.user=gp.get_user() 
-        gp.check_permissions(self.user)
-        self._read_postgresqlconf()
-        gp.standby_check(self.master_datadir)   
-        self._check_db_running()
-        self._build_gparray()
-        self._check_version()
-    
-    ######
-    def _check_version(self):            
-        self.gpversion=gp.GpVersion.local('local GP software version check',self.gphome)
-        logger.info("Greenplum Version: '%s'" % self.gpversion)
-  
-    
-    ######
-    def _read_postgresqlconf(self):
-        logger.debug("Obtaining master's port from master data directory")
-        pgconf_dict = pgconf.readfile(self.master_datadir + "/postgresql.conf")        
-        self.port = pgconf_dict.int('port')        
-        logger.debug("Read from postgresql.conf port=%s" % self.port)   
-    
-    
-    ######
-    def _check_db_running(self):
-        if os.path.exists(self.master_datadir + '/postmaster.pid'):
-            self.pid=gp.read_postmaster_pidfile(self.master_datadir)
-            if not unix.check_pid(self.pid):
-                logger.warning("Have a postmaster.pid file but no Master segment process running")
-                logger.info("Clearing postmaster.pid file and /tmp lock files")
-                
-                lockfile="/tmp/.s.PGSQL.%s" % self.port
-                logger.info("Clearing Master instance lock files")        
-                os.remove(lockfile)
-        
-                logger.info("Clearing Master instance pid file")
-                os.remove("%s/postmaster.pid" % self.master_datadir)
-                
-                logger.info("Setting recovery parameters")
-                self.mode='fast'
-                logger.info("Commencing forced shutdown")
-            pass
-        else:
-            raise ExceptionNoStackTraceNeeded('postmaster.pid file does not exist.  is Greenplum instance already stopped?')
-    
-    
-    ######
-    def _build_gparray(self):
-        logger.info("Obtaining Greenplum Master catalog information")
-        
-        logger.info("Obtaining Segment details from master...")
-        self.dburl = dbconn.DbURL(port=self.port,dbname='template0')
-        self.gparray = GpArray.initFromCatalog(self.dburl, utility=True)
-    
-        
-    ######
-    def _stop_master(self,masterOnly=False):
-        ''' shutsdown the master '''
-        
-        self.conn = dbconn.connect(self.dburl, utility=True)        
-        self._stop_master_checks()
-            
-        self.conn.close()
-    
-        e = GpEraFile(self.master_datadir, logger=get_logger_if_verbose())
-        e.end_era()
-
-        logger.info("Commencing Master instance shutdown with mode=%s" % self.mode)
-        logger.info("Master segment instance directory=%s" % self.master_datadir)
-        
-        cmd=gp.MasterStop("stopping master", self.master_datadir, mode=self.mode, timeout=self.timeout)
-        try:
-            cmd.run(validateAfter=True)
-        except:
-            # Didn't stop in timeout or pg_ctl failed.  So try kill
-            (succeeded,mypid,file_datadir)=pg.ReadPostmasterTempFile.local("Read master tmp file", self.dburl.pgport).getResults()
-            if succeeded and file_datadir == self.master_datadir:
-                if unix.check_pid(mypid):
-                    logger.info("Failed to shutdown master with pg_ctl.")
-                    logger.info("Sending SIGQUIT signal...")
-                    os.kill(mypid,signal.SIGQUIT)
-                    time.sleep(5)
-                    
-                    # Still not gone... try SIGABRT
-                    if unix.check_pid(mypid):
-                        logger.info("Sending SIGABRT signal...")
-                        os.kill(mypid,signal.SIGABRT)                      
-                        time.sleep(5)
-                    
-                    if not unix.check_pid(mypid):
-                        # Clean up files
-                        lockfile="/tmp/.s.PGSQL.%s" % self.dburl.pgport    
-                        if os.path.exists(lockfile):
-                            logger.info("Clearing segment instance lock files")        
-                            os.remove(lockfile)
-            
-        logger.debug("Successfully shutdown the Master instance in admin mode")
-    
-    ######
-    def _stop_master_checks(self):
-        total_connections=len(catalog.getUserPIDs(self.conn))
-        logger.info("There are %d connections to the database" % total_connections)
-        
-        if total_connections > 0 and self.mode=='smart':
-            logger.warning("There are other connections to this instance, shutdown mode smart aborted")
-            logger.warning("Either remove connections, or use 'gpstop -M fast' or 'gpstop -M immediate'")
-            logger.warning("See gpstop -? for all options")
-            raise ExceptionNoStackTraceNeeded("Active connections. Aborting shutdown...")
-        
-        logger.info("Commencing Master instance shutdown with mode='%s'" % self.mode)
-        logger.info("Master host=%s" % self.gparray.master.hostname)
-        
-        if self.mode == 'smart':
-            pass
-        elif self.mode == 'fast':
-            logger.info("Detected %d connections to database" % total_connections)
-            if total_connections > 0:                
-                logger.info("Switching to WAIT mode")
-                logger.info("Will wait for shutdown to complete, this may take some time if")
-                logger.info("there are a large number of active complex transactions, please wait...")
-            else:
-                if self.timeout == SEGMENT_TIMEOUT_DEFAULT:
-                    logger.info("Using standard WAIT mode of %s seconds" % SEGMENT_TIMEOUT_DEFAULT)
-                else:
-                    logger.info("Using WAIT mode of %s seconds" % self.timeout)
-        pass
-    
-    
-    ######
-    def _stop_standby(self):
-        """ assumes prepare() has been called """
-        if not self.stopstandby:
-            return True
-        
-        if self.gparray.standbyMaster:
-            standby=self.gparray.standbyMaster
-            
-            logger.info("Stopping gpsyncmaster on standby host %s mode=fast" % standby.hostname)
-            try:
-                cmd=gp.SegmentStop("stopping gpsyncmaster", standby.datadir,mode='fast',timeout=self.timeout, 
-                                   ctxt=base.REMOTE,remoteHost=standby.hostname)
-                cmd.run(validateAfter=True)                
-            except base.ExecutionError, e:
-                logger.warning("Error occured while stopping the standby master: %s" % e)
-            
-            if not pg.DbStatus.remote('checking status of standby master instance',standby,standby.hostname):
-                logger.info("Successfully shutdown sync process on %s" % standby.hostname)
-                return True
-            else:
-                logger.warning("Process gpsyncmaster still running, will issue fast shutdown with immediate")
-                try:            
-                    cmd=gp.SegmentStop("stopping gpsyncmaster", standby.datadir,mode='immediate', timeout=self.timeout, 
-                                       ctxt=base.REMOTE,remoteHost=standby.hostname)
-                    cmd.run(validateAfter=True)
-                except base.ExecutionError, e:
-                    logger.warning("Error occured while stopping the standby master: %s" % e)
-                    
-                if not pg.DbStatus.remote('checking status of standby master instance',standby,standby.hostname):
-                    logger.info("Successfully shutdown sync process on %s" % standby.hostname)
-                    return True
-                else:
-                    logger.error("Unable to stop gpsyncmaster on host: %s" % standby.hostname)
-                    return False
-        else:
-            logger.info("No standby master host configured")
-            return True
-            
-            
-    ######
-    def _stop_segments(self):        
-        failed_seg_status = []
-        workers=min(len(self.gparray.get_hostlist()),self.parallel)
-        self.pool = base.WorkerPool(numWorkers=workers)
-        
-        segs = self.gparray.getSegDbList()
-
-        #read in the hostcache file and make sure we can ping everybody
-        self.hostcache=gphostcache.GpHostCache(self.gparray, self.pool)
-        failed_pings=self.hostcache.ping_hosts(self.pool)
-        for db in failed_pings:
-            logger.warning("Skipping startup of segdb on %s directory %s Ping Failed <<<<<<" % (db.hostname, db.datadir))
-            failed_seg_status.append(SegStopStatus(db,False,'Failed to Ping on host: %s' % db.hostname))
-        
-        self.hostcache.log_contents()
-         
-        strategy = self.gparray.getFaultStrategy()
-        if strategy == FAULT_STRATEGY_FILE_REPLICATION:
-            # stop primaries
-            logger.info("Commencing parallel primary segment instance shutdown, please wait...")
-            self._stopseg_cmds(True,False)
-            primary_success_seg_status=self._process_segment_stop(failed_seg_status)    
-            
-            # stop mirrors
-            logger.info("Commencing parallel mirror segment instance shutdown, please wait...")
-            self._stopseg_cmds(False,True)
-            mirror_success_seg_status=self._process_segment_stop(failed_seg_status)    
-            
-            success_seg_status = primary_success_seg_status + mirror_success_seg_status
-            self._print_segment_stop(segs,failed_seg_status,success_seg_status)            
-
-        else:
-            logger.info("Commencing parallel segment instance shutdown, please wait...")
-            # There are no active-mirrors
-            self._stopseg_cmds(True, False)
-            success_seg_status = self._process_segment_stop(failed_seg_status)
-
-            self._print_segment_stop(segs,failed_seg_status,success_seg_status)            
-        pass
-    
-    ######
-    def _stopseg_cmds(self, includePrimaries, includeMirrors):
-        dispatch_count=0
-        
-        for gphost in self.hostcache.get_hosts():
-            dbs = []
-            for db in gphost.dbs:
-                role = db.getSegmentRole()
-                if role == 'p' and includePrimaries:
-                    dbs.append(db)
-                elif role != 'p' and includeMirrors:
-                    dbs.append(db)
-            
-            hostname=gphost.dbs[0].hostname                            
-            
-            # If we have no dbs then we have no segments of the type primary
-            # or mirror.  This will occur when you have an entire host fail
-            # when using group mirroring.  This is because all the mirror segs
-            # on the alive host will be marked primary (or vice-versa)
-            if len(dbs) == 0:
-                continue
-                
-            logger.debug("Dispatching command to shutdown %d segments on host: %s" % (len(dbs), hostname))
-            cmd=gp.GpSegStopCmd("remote segment starts on host '%s'" % hostname, self.gphome,self.gpversion,
-                                mode=self.mode, dbs=dbs, timeout=self.timeout,
-                                verbose=logging_is_verbose(),ctxt=base.REMOTE, remoteHost=hostname)
-            self.pool.addCommand(cmd)
-            dispatch_count+=1
-        
-        self.pool.wait_and_printdots(dispatch_count,self.quiet)    
-        pass
-        
-    
-    ######
-    def _process_segment_stop(self,failed_seg_status):
-        '''reviews results of gpsegstop commands '''
-        success_seg_status=[]
-        seg_timed_out=False
-        cmds=self.pool.getCompletedItems()
-        for cmd in cmds:
-            if cmd.get_results().rc == 0 or cmd.get_results().rc == 1:                            
-                cmdout = cmd.get_results().stdout
-                lines=cmdout.split('\n')
-                for line in lines:
-                    if line.startswith("STATUS"):
-                        fields=line.split('--')
-                        dir = fields[1].split(':')[1]
-                        started = fields[2].split(':')[1]
-                        reasonStr = fields[3].split(':')[1]
-                        
-                        if started.lower() == 'false':
-                            success=False
-                        else:
-                            success=True
-                        
-                        for db in cmd.dblist:                            
-                            if db.datadir == dir:
-                                if success:
-                                    success_seg_status.append( SegStopStatus(db,stopped=True,reason=reasonStr,failedCmd=cmd) )
-                                else:
-                                    #dbs that are marked invalid are 'skipped' but we dispatch to them
-                                    #anyway since we want to try and shutdown any runaway pg processes.
-                                    failed_seg_status.append( SegStopStatus(db,stopped=False,reason=reasonStr,failedCmd=cmd) )
-
-                    elif line.strip().startswith('stderr: pg_ctl: server does not shut down'):
-                        failed_seg_status[-1].timedOut=True
-            else:
-                for db in cmd.dblist:
-                    #dbs that are marked invalid are 'skipped' but we dispatch to them
-                    #anyway since we want to try and shutdown any runaway pg processes.
-                    if db.valid:
-                        failed_seg_status.append( SegStopStatus(db,stopped=False,reason=cmd.get_results(),failedCmd=cmd))
-
-        self.pool.empty_completed_items()
-        return success_seg_status        
-    
-    
-    ######
-    def _print_segment_stop(self, segs, failed_seg_status, success_seg_status):
-        stopped = len(segs) - len(failed_seg_status)         
-        failed = len([x for x in failed_seg_status if x.db.valid])
-        invalid = self.gparray.get_invalid_segdbs()
-        inactive = self.gparray.get_inactive_mirrors_segdbs()
-        total_segs = len(self.gparray.getSegDbList())
-        timed_out = len([x for x in failed_seg_status if x.timedOut])
-
-        if failed > 0 or logging_is_verbose():
-            logger.info("------------------------------------------------")
-            if logging_is_verbose():
-                logger.info("Segment Stop Information")
-            else:
-                logger.info("Failed Segment Stop Information ")
-
-            logger.info("------------------------------------------------")
-            if failed > 0:
-                for failure in failed_seg_status:
-                    logger.info(failure)
-            if logging_is_verbose():
-                    for stat in success_seg_status:
-                        logger.debug(stat)              
-             
-        tabLog = TableLogger().setWarnWithArrows(True)
-        tabLog.addSeparator()
-        tabLog.info(["Segments stopped successfully", "= %d" % stopped])
-        tabLog.infoOrWarn(failed > 0, ["Segments with errors during stop", "= %d" % failed])
-        if invalid:
-            tabLog.info([])
-            tabLog.warn(["Segments that are currently marked down in configuration", "= %d" % len(invalid)])
-            tabLog.info(["         (stop was still attempted on these segments)"])
-        tabLog.addSeparator()
-
-        tabLog.outputTable()
-
-        flag = "" if failed == 0 else "<<<<<<<<"
-        logger.info("Successfully shutdown %d of %d segment instances %s" % (stopped,total_segs,flag))
-         
-        if failed > 0:
-            self.hadFailures=False
-            logger.warning("------------------------------------------------")
-            logger.warning("Segment instance shutdown failures reported")
-            logger.warning("Failed to shutdown %d of %d segment instances <<<<<" % (failed,total_segs))
-            if timed_out > 0:
-                logger.warning("%d segments did not complete their shutdown in the allowed" % timed_out)
-                logger.warning("timeout of %d seconds.  These segments are still in the process" % self.timeout)
-                logger.warning("of shutting down.  You will not be able to restart the database")
-                logger.warning("until all processes have terminated.")
-            logger.warning("A total of %d errors were encountered" % failed)
-            logger.warning("Review logfile %s" % get_logfile())
-            logger.warning("For more details on segment shutdown failure(s)")
-            logger.warning("------------------------------------------------")
-        else:
-            self.hadFailures=False
-            logger.info("Database successfully shutdown with no errors reported")
-        pass
-    
-    
-    ######
-    def _sighup_cluster(self):
-        """ assumes prepare() has been called """
-        workers=min(len(self.gparray.get_hostlist()),self.parallel)
-
-        class SighupWorkerPool(base.WorkerPool):
-            """
-            This pool knows all the commands are calls to pg_ctl.
-            The failed list collects segments without a running postmaster.
-            """
-            def __init__(self, numWorkers):        
-                base.WorkerPool.__init__(self, numWorkers)
-                self.failed = []
-            def check_results(self):
-                while not self.completed_queue.empty():
-                    item    = self.completed_queue.get(False)
-                    results = item.get_results()
-                    if results.wasSuccessful():
-                        continue
-                    if "No such process" in results.stderr:
-                        self.failed.append(item.db)
-                        continue
-                    raise ExecutionError("Error Executing Command: ",item)           
-
-        if not self.gparray.allSegmentsAlive():
-            logger.error("Can not reload postgres.conf because some of segments are down")
-            return 1
-
-        self.pool = SighupWorkerPool(numWorkers = workers)
-        dbList = self.gparray.getDbList()
-        dispatch_count = 0
-        logger.info("Signalling all postmaster processes to reload")
-        for db in dbList:
-            cmd = pg.ReloadDbConf( name = "reload segment number " + str(db.getSegmentDbId())
-                                 , db = db
-                                 , ctxt = REMOTE
-                                 , remoteHost = db.getSegmentHostName()
-                                 )
-            self.pool.addCommand(cmd)
-            dispatch_count = dispatch_count + 1
-        self.pool.wait_and_printdots(dispatch_count,self.quiet)
-        self.pool.check_results()
-        self.pool.empty_completed_items()
-
-        if len(self.pool.failed) < 1:
-            return 0
-
-        logger.info("--------------------------------------------")
-        logger.info("Some segment postmasters were not reloaded")
-        logger.info("--------------------------------------------")
-        tabLog = TableLogger().setWarnWithArrows(True)
-        tabLog.info(["Host","Datadir","Port","Status"])
-        for db in self.pool.failed:
-            tup = [db.getSegmentHostName(), db.getSegmentDataDirectory(), str(db.getSegmentPort()), db.getSegmentStatus()]
-            tabLog.info(tup)
-        tabLog.outputTable()
-        logger.info("--------------------------------------------")
-        return 1
-    
-    
-    ######
-    def _summarize_actions(self):        
-        logger.info("--------------------------------------------")
-        logger.info("Master instance parameters")
-        logger.info("--------------------------------------------")
-
-        tabLog = TableLogger().setWarnWithArrows(True)
-        tabLog.info(["Master Greenplum instance process active PID", "= %s" % self.pid])
-        tabLog.info(["Database", "= %s" % self.dburl.pgdb ])
-        tabLog.info(["Master port", "= %s" % self.port ])
-        tabLog.info(["Master directory", "= %s" % self.master_datadir ])
-        tabLog.info(["Shutdown mode", "= %s" % self.mode])
-        tabLog.info(["Timeout", "= %s" % self.timeout])
-
-        standbyMsg = "On" if self.gparray.standbyMaster and self.stopstandby else "Off"
-        tabLog.info(["Shutdown Master standby host", "= %s" % standbyMsg])
-
-        tabLog.outputTable()
-
-        logger.info("--------------------------------------------")
-        logger.info("Segment instances that will be shutdown:")
-        logger.info("--------------------------------------------")
-
-        tabLog = TableLogger().setWarnWithArrows(True)
-        tabLog.info(["Host","Datadir","Port","Status"])
-        for db in self.gparray.getSegDbList():
-            tabLog.info([db.getSegmentHostName(), db.getSegmentDataDirectory(),
-                            str(db.getSegmentPort()), db.getSegmentStatus()])
-        tabLog.outputTable()
-
-#----------------------- Command line option parser ----------------------
-    @staticmethod
-    def createParser():
-        parser = OptParser(option_class=OptChecker,
-                    description="Stops a GPDB Array.",
-                    version='%prog version $Revision$')
-        parser.setHelp([])
-
-        addStandardLoggingAndHelpOptions(parser, includeNonInteractiveOption=True, includeUsageOption=True)
-
-        addTo = OptionGroup(parser, 'Connection options')
-        parser.add_option_group(addTo)
-        addMasterDirectoryOptionForSingleClusterProgram(addTo)
-
-        addTo = OptionGroup(parser, 'Instance shutdown options: ')
-        parser.add_option_group(addTo)
-        addTo.add_option('-f', '--fast', action='store_true', default=False,
-                            help="<deprecated> Fast shutdown, active transactions interrupted and rolled back")
-        addTo.add_option('-i', '--immediate', action='store_true',default=False,
-                            help="<deprecated> Immediate shutdown, active transaction aborted.")
-        addTo.add_option('-s', '--smart', action='store_true',
-                            help="<deprecated> Smart shutdown, wait for active transaction to complete. [default]")
-        addTo.add_option('-z', '--force', action='store_true',default=False,
-                            help="<deprecated> Force shutdown of segment instances marked as invalid. Kill postmaster PID, "\
-                                 "delete /tmp lock files and remove segment instance postmaster.pid file.")
-        addTo.add_option('-M', '--mode', type='choice', choices=['fast', 'immediate', 'smart'],
-                           metavar='fast|immediate|smart', action='store', default='smart',
-                           help='set the method of shutdown')
-
-        addTo.add_option('-r', '--restart', action='store_true',
-                            help='Restart Greenplum Database instance after successful gpstop.')
-        addTo.add_option('-m', '--master_only', action='store_true',
-                            help='stop master instance started in maintenance mode')
-        addTo.add_option('-y', dest="stop_standby", action='store_false',default=True,
-                            help='stop master instance started in maintenance mode')
-        addTo.add_option('-u', dest="request_sighup", action='store_true',
-                            help="upload new master postgresql.conf settings, does not stop Greenplum array,"\
-                                 "issues a signal to the master segment potmaster process to reload")
-
-        addTo.add_option('-B', '--parallel', type="int", default=DEFAULT_NUM_WORKERS, metavar="<parallel_processes>",
-                            help='number of segment hosts to run in parallel. Default is %d' % DEFAULT_NUM_WORKERS)
-        addTo.add_option('-t', '--timeout', dest='timeout', default=SEGMENT_TIMEOUT_DEFAULT, type='int',
-                           help='time to wait for segment stop (in seconds)')
-
-        parser.set_defaults(verbose=False, filters=[], slice=(None, None))
-        return parser
-
-    @staticmethod
-    def createProgram(options, args):
-        if options.mode != 'smart':
-            if options.fast or options.immediate:
-                raise ProgramArgumentValidationException("Can not mix --mode options with older deprecated '-f,-i,-s'")
-
-        if options.fast:
-            options.mode="fast"
-        if options.immediate:
-            options.mode="immediate"
-        if options.smart:
-            options.mode="smart"
-
-        #deprecating force option.  it no longer kills -9 things.
-        # just make it stop fast instead.
-        if options.force:
-            options.mode="fast"
-
-        proccount=os.environ.get('GP_MGMT_PROCESS_COUNT')
-        if options.parallel == 64 and proccount is not None:
-            options.parallel = int(proccount)
-
-        #-n sanity check
-        if options.parallel > 128 or options.parallel < 1:
-            raise ProgramArgumentValidationException("Invalid value for parallel degree: %s" % options.parallel )
-
-        # Don't allow them to go below default
-        if options.timeout < SEGMENT_TIMEOUT_DEFAULT:
-            raise ProgramArgumentValidationException("Invalid timeout value.  Must be greater than %s seconds." % SEGMENT_TIMEOUT_DEFAULT)
-
-        if args:
-            raise ProgramArgumentValidationException("Argument %s is invalid.  Is an option missing a parameter?" % args[-1])
-
-        return GpStop(options.mode,
-                        master_datadir=options.masterDataDirectory,
-                        parallel=options.parallel,
-                        quiet=options.quiet,
-                        masteronly=options.master_only,
-                        sighup=options.request_sighup,
-                        interactive=options.interactive,
-                        stopstandby=options.stop_standby,
-                        restart=options.restart,
-                        timeout=options.timeout)
-
-if __name__ == '__main__':
-    simple_main( GpStop.createParser, GpStop.createProgram)

http://git-wip-us.apache.org/repos/asf/incubator-hawq/blob/9932786b/tools/doc/gpconfig_help
----------------------------------------------------------------------
diff --git a/tools/doc/gpconfig_help b/tools/doc/gpconfig_help
deleted file mode 100644
index 1997e9f..0000000
--- a/tools/doc/gpconfig_help
+++ /dev/null
@@ -1,158 +0,0 @@
-COMMAND NAME: gpconfig
-
-Sets server configuration parameters on all segments within 
-a HAWQ system.
-
-*****************************************************
-SYNOPSIS
-*****************************************************
-
-gpconfig -c <param_name> -v <value> [-m <master_value> | --masteronly]
-       | -r <param_name> [--masteronly]
-       | -l 
-   [--skipvalidation] [--verbose] [--debug]
-
-gpconfig -s <param_name> [--verbose] [--debug]
-
-gpconfig --help
-
-
-*****************************************************
-DESCRIPTION
-*****************************************************
-
-The gpconfig utility allows you to set, unset, or view configuration
-parameters from the postgresql.conf files of all instances (master, 
-segments, and mirrors) in your HAWQ system. When setting a parameter, 
-you can also specify a different value for the master if necessary. 
-For example, parameters such as max_connections require a different 
-setting on the master than what is used for the segments. If you want
- to set or unset a global or master only parameter, use the 
---masteronly option.
-
-gpconfig can not change the configuration parameters if there are 
-failed segments in the system.
-
-gpconfig can only be used to manage certain parameters. 
-For example, you cannot use it to set parameters such as port, 
-which is required to be distinct for every segment instance. Use 
-the -l (list) option to see a complete list of configuration parameters
- supported by gpconfig.
-
-When gpconfig sets a configuration parameter in a segment 
-postgresql.conf file, the new parameter setting always displays 
-at the bottom of the file. When you use gpconfig to remove a configuration
- parameter setting, gpconfig comments out the parameter in all segment 
-postgresql.conf files, thereby restoring the system default setting. For 
-example, if you use gpconfig to remove (comment out) a parameter and later 
-add it back (set a new value), there will be two instances of the parameter; 
-one that is commented out, and one that is enabled and inserted at the 
-bottom of the postgresql.conf file.
-
-After setting a parameter, you must restart your HAWQ system or 
-reload the postgresql.conf files for the change to take effect. 
-Whether you require a restart or a reload depends on the parameter. 
-
-To show the currently set values for a parameter across the system, 
-use the -s option.
-
-gpconfig uses the following environment variables to connect to 
-the HAWQ master instance and obtain system configuration information: 
-
-PGHOST
-PGPORT
-PGUSER
-PGPASSWORD
-PGDATABASE
-
-
-*****************************************************
-DESCRIPTION
-*****************************************************
-
--c | --change param_name
-
-  Changes a configuration parameter setting by adding the new setting
-  to the bottom of the postgresql.conf files.
-
--v | --value value
-
-  The value to use for the configuration parameter you specified 
-  with the -c option. By default, this value is applied to all segments,
-  their mirrors, the master, and the standby master.
-
--m | --mastervalue master_value
-
-  The master value to use for the configuration parameter you 
-  specified with the -c option. If specified, this value only applies 
-  to the master and standby master. This option can only be used with -v. 
-
---masteronly
-
-  When specified, gpconfig will only edit the master postgresql.conf file.
-
--r | --remove param_name
-
-  Removes a configuration parameter setting by commenting out the entry 
-  in the postgresql.conf files.
-
--l | --list
-
-  Lists all configuration parameters supported by the gpconfig utility.
-
--s | --show param_name
-
-  Shows the value for a specified configuration parameter used on 
-  all instances (master and segments) of the HAWQ system. If there is 
-  a discrepancy in a parameter value between instances, the gpconfig 
-  utility displays an error message. The gpconfig utility reads parameter 
-  values directly from the database, and not the postgresql.conf file. 
-  If you are using gpconfig to set configuration parameters across all 
-  segments, then running gpconfig -s to verify the changes, you might still 
-  see the previous (old) values. You must reload the configuration files 
-  (gpstop -u) or restart the system (gpstop -r) for changes to take effect.
-
---skipvalidation
-
-  Overrides the system validation checks of gpconfig and allows you to operate on any server configuration parameter, including hidden parameters and restricted parameters that cannot be changed by gpconfig. When used with the -l option (list), it shows the list of restricted parameters. This option should only be used to set parameters when directed by HAWQ Customer Support. 
-
---verbose 
-  
-  Displays additional log information during gpconfig command execution.
-  
---debug
-
-  Sets logging output to debug level. 
-
--? | -h | --help
-  
-  Displays the online help.
-
-
-*****************************************************
-EXAMPLES
-*****************************************************
-
-Set the work_mem parameter to 120MB in the master host file only:
-  gpconfig -c work_mem -v 120MB --masteronly
-
-Set the max_connections setting to 100 on all segments and 10 on the 
-master:
-  gpconfig -c max_connections -v 100 -m 10
-
-Comment out all instances of the default_statistics_target configuration 
-parameter, and restore the system default:
-  gpconfig -r default_statistics_target
-
-List all configuration parameters supported by gpconfig:
-  gpconfig -l
-
-Show the values of a particular configuration parameter across the system:
-  gpconfig -s max_connections
-
-*****************************************************
-SEE ALSO
-*****************************************************
-
-gpstop
-

http://git-wip-us.apache.org/repos/asf/incubator-hawq/blob/9932786b/tools/doc/gpexpand_help
----------------------------------------------------------------------
diff --git a/tools/doc/gpexpand_help b/tools/doc/gpexpand_help
deleted file mode 100644
index 00f2d72..0000000
--- a/tools/doc/gpexpand_help
+++ /dev/null
@@ -1,196 +0,0 @@
-COMMAND NAME:  gpexpand
-
-Expands an existing HAWQ cluster across new hosts in the array.
-
-******************************************************
-SYNOPSIS
-******************************************************
-
-gpexpand 
-      [-f <hosts_file>]
-      | -i <input_file> [-B <batch_size>] [-V] 
-      | {-d <hh:mm:ss> | -e '<YYYY-MM-DD hh:mm:ss>'} 
-        [-analyze] [-n <parallel_processes>]
-      | --rollback
-      | --clean
-[-D <database_name>][--verbose] [--silent]
-
-gpexpand -? | -h | --help 
-
-gpexpand --version
-
-
-******************************************************
-PREREQUISITES
-******************************************************
-
-* You are logged in as the HAWQ superuser (gpadmin).
-
-* The new segment hosts have been installed and configured as per 
-  the existing segment hosts. This involves:
-
-  * Configuring the hardware and OS
-  * Installing the HAWQ software
-  * Creating the gpadmin user account
-  * Exchanging SSH keys. 
-
-* Enough disk space on your segment hosts to temporarily hold a 
-  copy of your largest table. 
-
-******************************************************
-DESCRIPTION
-******************************************************
-
-The gpexpand utility performs system expansion in two phases: segment 
-initialization and then table redistribution.
- 
-In the initialization phase, gpexpand runs with an input file that 
-specifies data directories, dbid values, and other characteristics 
-of the new segments. You can create the input file manually, or by 
-following the prompts in an interactive interview.
-
-If you choose to create the input file using the interactive interview, 
-you can optionally specify a file containing a list of expansion hosts. 
-If your platform or command shell limits the length of the list of hostnames 
-that you can type when prompted in the interview, specifying the hosts 
-with -f may be mandatory. 
-
-In addition to initializing the segments, the initialization phase 
-performs these actions:
-* Creates an expansion schema to store the status of the expansion 
-  operation, including detailed status for tables.
-* Changes the distribution policy for all tables to DISTRIBUTED RANDOMLY. 
-  The original distribution policies are later restored in the 
-  redistribution phase.
-
-To begin the redistribution phase, you must run gpexpand with either 
-the -d (duration) or -e (end time) options. Until the specified end 
-time or duration is reached, the utility will redistribute tables in 
-the expansion schema. Each table is reorganized using ALTER TABLE 
-commands to rebalance the tables across new segments, and to set 
-tables to their original distribution policy. If gpexpand completes 
-the reorganization of all tables before the specified duration, 
-it displays a success message and ends. 
-
-
-******************************************************
-OPTIONS
-******************************************************
-
--a | --analyze
- Run ANALYZE to update the table statistics after expansion. 
- The default is to not run ANALYZE.
-
-
--B <batch_size>
- Batch size of remote commands to send to a given host before 
- making a one-second pause. Default is 16. Valid values are 1-128.
- The gpexpand utility issues a number of setup commands that may exceed 
- the host's maximum threshold for authenticated connections as defined 
- by MaxStartups in the SSH daemon configuration. The one-second pause 
- allows authentications to be completed before gpexpand issues any 
- more commands. The default value does not normally need to be changed. 
- However, it may be necessary to reduce the maximum number of commands 
- if gpexpand fails with connection errors such as 
- 'ssh_exchange_identification: Connection closed by remote host.'
-
-
--c | --clean
- Remove the expansion schema.
-
-
--d | --duration <hh:mm:ss>
- Duration of the expansion session from beginning to end.
-
-
--D <database_name>
- Specifies the database in which to create the expansion schema 
- and tables. If this option is not given, the setting for the 
- environment variable PGDATABASE is used. The database templates 
- template1 and template0 cannot be used.
-
-
--e | --end '<YYYY-MM-DD hh:mm:ss>'
- Ending date and time for the expansion session.
-
-
--f | --hosts-file <filename>
- Specifies the name of a file that contains a list of new hosts for 
- system expansion. Each line of the file must contain a single 
- host name. This file can contain hostnames with or without network 
- interfaces specified. The gpexpand utility handles either case, 
- adding interface numbers to end of the hostname if the original nodes 
- are configured with multiple network interfaces.
-
-
--i | --input <input_file>
- Specifies the name of the expansion configuration file, which contains 
- one line for each segment to be added in the format of:
-
-  <hostname>:<address>:<port>:<fselocation>:<dbid>:<content>:<preferred_role>:<replication_port>
-
- If your system has filespaces, gpexpand will expect a filespace configuration 
- file (<input_file_name>.fs) to exist in the same directory as your 
- expansion configuration file. The filespace configuration file is 
- in the format of:
-
-  filespaceOrder=<filespace1_name>:<filespace2_name>: ...
-  dbid|</path/for/filespace1>|</path/for/filespace2>| ...
-  dbid|</path/for/filespace1>|</path/for/filespace2>| ...
-  ...
-
-
--n <parallel_processes>
- The number of tables to redistribute simultaneously. Valid values 
- are 1 - 16. Each table redistribution process requires two database 
- connections: one to alter the table, and another to update the table's 
- status in the expansion schema. Before increasing -n, check the current 
- value of the server configuration parameter max_connections and make 
- sure the maximum connection limit is not exceeded.
-
-
--r | --rollback
- Roll back a failed expansion setup operation. If the rollback command 
- fails, attempt again using the -D option to specify the database that 
- contains the expansion schema for the operation that you want to roll back.
-
-
--s | --silent
- Runs in silent mode. Does not prompt for confirmation to proceed 
- on warnings.
-
-
--v | --verbose
- Verbose debugging output. With this option, the utility will output 
- all DDL and DML used to expand the database.
-
-
---version
- Display the utility's version number and exit.
-
-
--? | -h | --help
- Displays the online help.
-
-
-******************************************************
-EXAMPLES
-******************************************************
-
-Run gpexpand with an input file to initialize new segments and 
-create the expansion schema in the default database:
-
-  $ gpexpand -i input_file
-
-
-Run gpexpand for sixty hours maximum duration to redistribute 
-tables to new segments:
-
-  $ gpexpand -d 60:00:00
-
-******************************************************
-SEE ALSO
-******************************************************
-
-gpssh-exkeys
-


Mime
View raw message